diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp index d77e6693b27c08da5c60f5410406a08e4863f1c4..7b6a3191543e375dba54d51eee08265e881695db 100644 --- a/include/aidge/aidge.hpp +++ b/include/aidge/aidge.hpp @@ -40,6 +40,7 @@ #include "aidge/operator/ArgMax.hpp" #include "aidge/operator/AvgPooling.hpp" #include "aidge/operator/BatchNorm.hpp" +#include "aidge/operator/BitShift.hpp" #include "aidge/operator/Concat.hpp" #include "aidge/operator/Conv.hpp" #include "aidge/operator/ConvDepthWise.hpp" diff --git a/include/aidge/operator/BitShift.hpp b/include/aidge/operator/BitShift.hpp new file mode 100644 index 0000000000000000000000000000000000000000..ad96f6f55596daea14ef83c616dad6557d485b33 --- /dev/null +++ b/include/aidge/operator/BitShift.hpp @@ -0,0 +1,124 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CORE_OPERATOR_BITSHIFT_H_ +#define AIDGE_CORE_OPERATOR_BITSHIFT_H_ + +#include <memory> +#include <string> +#include <vector> + +#include "aidge/utils/Registrar.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/graph/Node.hpp" +#include "aidge/utils/Types.h" +#include "aidge/utils/StaticAttributes.hpp" + + +namespace Aidge { + enum class BitShiftAttr { BitShiftdirection }; + +/** + * @brief Tensor BitShift Operator + */ +class BitShift_Op : public OperatorTensor, + public Registrable<BitShift_Op, std::string, std::shared_ptr<OperatorImpl>(const BitShift_Op&)> { +public: + enum BitShiftDirection {left,right}; + static const std::string Type; +private: + + using Attributes_ = StaticAttributes<BitShiftAttr,BitShiftDirection>; + template <BitShiftAttr e> using attr = typename Attributes_::template attr<e>; + const std::shared_ptr<Attributes_> mAttributes; +public: + + BitShift_Op(BitShiftDirection direction) + : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1), + mAttributes(std::make_shared<Attributes_>( + attr<BitShiftAttr::BitShiftdirection>(direction))) + {} + + /**¨PPPP + * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), + * but not its input tensors (the new operator has no input associated). + * @param op Operator to copy. + */ + BitShift_Op(const BitShift_Op& op) + : OperatorTensor(op),mAttributes(op.mAttributes) + { + if (op.mImpl) { + SET_IMPL_MACRO(BitShift_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } + } + + /** + * @brief Clone the operator using its copy-constructor. + * @see Operator::BitShift_Op + */ + std::shared_ptr<Operator> clone() const override { + return std::make_shared<BitShift_Op>(*this); + } + + bool forwardDims(bool allowDataDependency = false) override final; + + /** + * @brief Setter to specify which backend to use + * + * @return Boolean + */ + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; + + /** + * @brief Getter to retrieve Attributes of the bitshift class + * + * @return Attributes + */ + inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + + /** + * @brief Retrieve the direction in which the shift should be applied (right or left) + * + * @return BitShiftDirection + */ + inline BitShiftDirection& direction() const noexcept { return mAttributes ->template getAttr<BitShiftAttr::BitShiftdirection>(); } + + static const std::vector<std::string> getInputsName(){ + return {"InputTensor", "ShiftAmount"}; + } + static const std::vector<std::string> getOutputsName(){ + return {"OutputTensor"}; + } + + +}; +/** + * @brief The bitwise shift operator performs an element-wise operation between the input tensor and the shift tensor in + the direction specified by "direction" + * @param[in] direction Direction of the bitshift (Left or Right) + * @param[in] name Name of the node + * @return std::shared_ptr<Node> + */ + inline std::shared_ptr<Node> BitShift(const BitShift_Op::BitShiftDirection direction, const std::string& name = "") { + return std::make_shared<Node>(std::make_shared<BitShift_Op>(direction), name); + } +} // namespace Aidge + +namespace { +template <> +const char *const EnumStrings<Aidge::BitShiftAttr>::data[] = {"BitShiftdirection"}; + +} + +#endif /* AIDGE_CORE_OPERATOR_BITSHIFT_H_ */ diff --git a/python_binding/operator/pybind_BitShift.cpp b/python_binding/operator/pybind_BitShift.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b4f6c90e54e781b011459be6e8e6e252e7347b00 --- /dev/null +++ b/python_binding/operator/pybind_BitShift.cpp @@ -0,0 +1,58 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <pybind11/pybind11.h> + +#include <string> +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/BitShift.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Types.h" + +namespace py = pybind11; +namespace Aidge { + +void init_BitShift(py::module &m) { + // Binding for BitShiftOp class + auto pyBitShiftOp = py::class_<BitShift_Op, std::shared_ptr<BitShift_Op>, OperatorTensor>(m, "BitShiftOp", py::multiple_inheritance(),R"mydelimiter( + BitShiftOp is a tensor operator that performs bitwise shifts on tensor elements. + This class allows shifting tensor values either to the left or right based on the + specified direction. The direction can be accessed and controlled using the + BitShiftDirection enum. + :param direction: direction of the bit shift (BitShiftDirection.Left or BitShiftDirection.Right) + :type direction: BitShiftDirection + :param name: name of the node. + )mydelimiter") + .def(py::init<BitShift_Op::BitShiftDirection>(), py::arg("direction")) + .def("direction", &BitShift_Op::direction, "Get the direction of the bit shift (left or right).") + .def_static("get_inputs_name", &BitShift_Op::getInputsName, "Get the names of the input tensors.") + .def_static("get_outputs_name", &BitShift_Op::getOutputsName, "Get the names of the output tensors."); + + // Enum binding under BitShiftOp class + py::enum_<BitShift_Op::BitShiftDirection>(pyBitShiftOp, "BitShiftDirection") + .value("Right", BitShift_Op::BitShiftDirection::right) + .value("Left", BitShift_Op::BitShiftDirection::left) + .export_values(); + + // Binding for the BitShift function + m.def("BitShift", &BitShift, py::arg("direction") = BitShift_Op::BitShiftDirection::right, py::arg("name") = "", + R"mydelimiter( + BitShiftOp is a tensor operator that performs bitwise shifts on tensor elements. + This class allows shifting tensor values either to the left or right based on the + specified direction. The direction can be accessed and controlled using the + BitShiftDirection enum. + :param direction: direction of the bit shift (BitShiftDirection.Left or BitShiftDirection.Right) + :type direction: BitShiftDirection + :param name: name of the node. + )mydelimiter"); +} +} // namespace Aidge \ No newline at end of file diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp index d8fa37507731c490b04010b6eea857ed4d7c8b55..52c8cc8a0199ac64b0f7bae97442178614ea5622 100644 --- a/python_binding/pybind_core.cpp +++ b/python_binding/pybind_core.cpp @@ -33,6 +33,7 @@ void init_And(py::module&); void init_ArgMax(py::module&); void init_AvgPooling(py::module&); void init_BatchNorm(py::module&); +void init_BitShift(py::module&); void init_Concat(py::module&); void init_ConstantOfShape(py::module&); void init_Conv(py::module&); @@ -115,6 +116,7 @@ void init_Aidge(py::module& m) { init_ArgMax(m); init_AvgPooling(m); init_BatchNorm(m); + init_BitShift(m); init_Concat(m); init_Conv(m); init_ConvDepthWise(m); diff --git a/src/operator/BitShift.cpp b/src/operator/BitShift.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b21722a6e55ec6370ba8d30ab2a781466a1a38be --- /dev/null +++ b/src/operator/BitShift.cpp @@ -0,0 +1,60 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <cstddef> // std::size_t +#include <memory> +#include <stdexcept> // std::runtime_error +#include <string> +#include <vector> + +#include "aidge/operator/BitShift.hpp" +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::BitShift_Op::Type = "BitShift"; + +bool Aidge::BitShift_Op::forwardDims(bool /*allowDataDependency*/) { + if (!inputsAssociated()) { + return false; + } + + const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims(); + const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims(); + + std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1; + const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1; + + std::size_t out_id = outDims.size() - 1; + std::size_t low_id = lowDims.size() - 1; + std::size_t i = 0; + + while (i++ < lowDims.size()) { + if (outDims[out_id] == 1) { + outDims[out_id] = lowDims[low_id]; + } + else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for BitShift Operation: {} for input#0 vs {} for input#1", + inputsDims0, inputsDims1); + } + --out_id; + --low_id; + } + mOutputs[0]->resize(outDims); + return true; +} + + +void Aidge::BitShift_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(BitShift_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/unit_tests/operator/Test_BitShift_Op.cpp b/unit_tests/operator/Test_BitShift_Op.cpp new file mode 100644 index 0000000000000000000000000000000000000000..39916e4e75779ecc63680b43ece8ccd2bdc667c9 --- /dev/null +++ b/unit_tests/operator/Test_BitShift_Op.cpp @@ -0,0 +1,133 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <catch2/catch_test_macros.hpp> +#include <cstddef> // std::size_t +#include <memory> +#include <random> // std::random_device, std::mt19937, std::uniform_int_distribution +#include <vector> + +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/BitShift.hpp" +#include "aidge/operator/OperatorTensor.hpp" + +namespace Aidge { +TEST_CASE("[core/operator] BitShift_Op(forwardDims)", "[BitShift][forwardDims]") +{ + constexpr std::uint16_t NBTRIALS = 10; + + // Create a random number generator + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<std::size_t> dimsDist(1, 10); + std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5); + + // Create Shift Operator + std::shared_ptr<Node> myShift = BitShift(BitShift_Op::BitShiftDirection::right); + auto op = std::static_pointer_cast<OperatorTensor>(myShift-> getOperator()); + + // input_0 + std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(); + op -> associateInput(0,T0); + // input_1 + std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(); + op -> associateInput(1,T1); + + SECTION("BitShifOP Test dimensions [Scalar]") { + // a scalar is compatible with any other Tensor + // input_1 + T1->resize({}); + + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + + // input_0 + const std::size_t nb_dims = nbDimsDist(gen); + std::vector<std::size_t> dims(nb_dims); + for (std::size_t i = 0; i < nb_dims; ++i) { + dims[i] = dimsDist(gen); + } + T0->resize(dims); + + REQUIRE_NOTHROW(op->forwardDims()); + REQUIRE((op->getOutput(0)->dims()) == dims); + } + } + + SECTION("BitShifOP Test dimensions [Same Size]") { + + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + const std::size_t nb_dims = nbDimsDist(gen) + 1; + std::vector<std::size_t> dims0(nb_dims); + for (std::size_t i = 0; i < nb_dims; ++i) { + dims0[i] = dimsDist(gen) + 1; + } + + T0->resize(dims0); + T1->resize(dims0); + REQUIRE_NOTHROW(op->forwardDims()); + REQUIRE((op->getOutput(0)->dims()) == dims0); + } + } + SECTION("BitShifOP Test dimensions [Broadcast]") { + + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + const std::size_t nb_dims = nbDimsDist(gen) + 1; + std::vector<std::size_t> dims0(nb_dims); + for (std::size_t i = 0; i < nb_dims; ++i) { + dims0[i] = dimsDist(gen) + 2; + } + std::vector<std::size_t> dimsOut = dims0; + std::vector<std::size_t> dims1 = dims0; + for (std::size_t i = 0; i < nb_dims; ++i) { + if (dimsDist(gen) <= 5) { + dims1[i] = 1; + } + } + dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1)); + + T0->resize(dims0); + T1->resize(dims1); + + REQUIRE_NOTHROW(op->forwardDims()); + REQUIRE((op->getOutput(0)->dims()) == dimsOut); + } + } + SECTION("BitShifOP Test dimensions [Wrong Dimensions]") { + + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + const std::size_t nb_dims = nbDimsDist(gen) + 1; + std::vector<std::size_t> dims0(nb_dims); + for (std::size_t i = 0; i < nb_dims; ++i) { + dims0[i] = dimsDist(gen) + 2; + } + std::vector<std::size_t> dimsOut = dims0; + std::vector<std::size_t> dims1 = dims0; + for (std::size_t i = 0; i < nb_dims; ++i) { + if (dimsDist(gen) <= 5) { + dims1[i] = 1; + } + } + dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1)); + + T0->resize(dims0); + T1->resize(dims1); + + std::vector<std::size_t> dims1_wrong = dims1; + for (std::size_t i = 0; i < dims1.size(); ++i) { + ++dims1_wrong[i]; + } + T1->resize(dims1_wrong); + REQUIRE(dims0 != dims1_wrong); + REQUIRE_THROWS(op->forwardDims()); + } +} +} +} // namespace Aidge