diff --git a/aidge_core/unit_tests/test_impl.py b/aidge_core/unit_tests/test_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..4aacfafd7d51830dc89b7b30ea5ebf521a13fe30 --- /dev/null +++ b/aidge_core/unit_tests/test_impl.py @@ -0,0 +1,72 @@ +""" +Copyright (c) 2023 CEA-List + +This program and the accompanying materials are made available under the +terms of the Eclipse Public License 2.0 which is available at +http://www.eclipse.org/legal/epl-2.0. + +SPDX-License-Identifier: EPL-2.0 +""" + +import unittest +import aidge_core +from functools import reduce + +import numpy as np + +GLOBAL_CPT = 0 + +class testImpl(aidge_core.OperatorImpl): + def __init__(self, op: aidge_core.Operator): + aidge_core.OperatorImpl.__init__(self, op, 'cpu') # Required to avoid type error ! + + def forward(self): + global GLOBAL_CPT + GLOBAL_CPT += 1 + +class test_OperatorImpl(unittest.TestCase): + """Test Op + """ + def setUp(self): + global GLOBAL_CPT + GLOBAL_CPT = 0 + def tearDown(self): + pass + + def test_setImplementation(self): + """Test setting an implementation manually + """ + global GLOBAL_CPT + matmul = aidge_core.GenericOperator("MatMul", 1, 0, 1, name="MatMul0") + generic_matmul_op = matmul.get_operator() + generic_matmul_op.set_compute_output_dims(lambda x: x) + generic_matmul_op.set_impl(testImpl(generic_matmul_op)) + generic_matmul_op.forward() + self.assertEqual(GLOBAL_CPT, 1) + + def test_Registrar_setOp(self): + """Test registering an implementation + """ + global GLOBAL_CPT + aidge_core.register_ConvOp2D("cpu", testImpl) + self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D()) + conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0") + conv.get_operator().set_backend("cpu") + conv.get_operator().forward() + self.assertEqual(GLOBAL_CPT, 1) + + def test_Registrar_setGraphView(self): + """Test registering an implementation + """ + global GLOBAL_CPT + aidge_core.register_ConvOp2D("cpu", testImpl) + aidge_core.register_ProducerOp("cpu", testImpl) + self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D()) + conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0") + model = aidge_core.sequential([conv]) + model.set_backend("cpu") + conv.get_operator().forward() + self.assertEqual(GLOBAL_CPT, 1) + +if __name__ == '__main__': + unittest.main() diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py index c541ae0e03459a0a7200795bc2d3c6b70c13be3b..c94960733b24444218b1209463adbda11b89f6e8 100644 --- a/aidge_core/unit_tests/test_operator_binding.py +++ b/aidge_core/unit_tests/test_operator_binding.py @@ -108,7 +108,7 @@ class test_operator_binding(unittest.TestCase): """Dummy implementation to test that C++ call python code """ def __init__(self, op: aidge_core.Operator): - aidge_core.OperatorImpl.__init__(self, op) # Recquired to avoid type error ! + aidge_core.OperatorImpl.__init__(self, op, 'test_impl') # Recquired to avoid type error ! self.idx = 0 def forward(self): diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp index 3c27e118a384debdaf5505aec4ab993f260a97de..4ed6f4cbfcf390582e231c9392043207f6f19de9 100644 --- a/include/aidge/aidge.hpp +++ b/include/aidge/aidge.hpp @@ -23,15 +23,17 @@ #include "aidge/data/Tensor.hpp" #include "aidge/data/Database.hpp" #include "aidge/data/DataProvider.hpp" + #include "aidge/graph/Connector.hpp" #include "aidge/graph/GraphView.hpp" #include "aidge/graph/Node.hpp" #include "aidge/graph/OpArgs.hpp" -#include "aidge/graphmatching/Match.hpp" -#include "aidge/graphmatching/NodeRegex.hpp" -#include "aidge/graphmatching/SeqStm.hpp" -#include "aidge/graphmatching/StmFactory.hpp" -#include "aidge/graphmatching/Utile.hpp" + +#include "aidge/graphRegex/GraphRegex.hpp" + +#include "aidge/filler/Filler.hpp" + +#include "aidge/nodeTester/ConditionalInterpreter.hpp" #include "aidge/operator/Add.hpp" #include "aidge/operator/AvgPooling.hpp" diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp index 8b5aba10dbc2691b5d607cda28eba621335881d1..04044ed1c77915ec10b5af5b660cf8e6b20c81b2 100644 --- a/include/aidge/backend/OperatorImpl.hpp +++ b/include/aidge/backend/OperatorImpl.hpp @@ -9,12 +9,12 @@ * ********************************************************************************/ -#ifndef AIDGE_OPERATORIMPL_H_ -#define AIDGE_OPERATORIMPL_H_ +#ifndef AIDGE_BACKEND_OPERATORIMPL_H_ +#define AIDGE_BACKEND_OPERATORIMPL_H_ -#include <cstddef> +#include <string> #include <vector> -#include <memory> + #include "aidge/utils/Types.h" namespace Aidge { @@ -22,10 +22,13 @@ class Operator; class OperatorImpl { public: - OperatorImpl(const Operator& op); + OperatorImpl(const Operator& op, const std::string& backend); virtual void forward(); virtual void backward(); + const std::string& backend() const noexcept { + return mBackend; + } /** * @brief Minimum amount of data from a specific input required by the * implementation to be run. @@ -73,9 +76,10 @@ public: protected: const Operator &mOp; + const std::string mBackend; std::vector<NbElts_t> mNbConsumedData; std::vector<NbElts_t> mNbProducedData; }; } // namespace Aidge -#endif /* AIDGE_OPERATORIMPL_H_ */ +#endif /* AIDGE_BACKEND_OPERATORIMPL_H_ */ diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp index 509c11691047604fbce959cfb29649aac75b5a1e..f3fa4ef5164a2eed7caaa7baa7f83e7ed00403b8 100644 --- a/include/aidge/backend/TensorImpl.hpp +++ b/include/aidge/backend/TensorImpl.hpp @@ -72,7 +72,7 @@ private: class TensorImpl { protected: - const char *mBackend; + const std::string mBackend; /// @brief Device id. const DeviceIdx_t mDevice; /// Number of elements (to be) stored. @@ -81,7 +81,7 @@ protected: public: TensorImpl() = delete; - TensorImpl(const char *backend, DeviceIdx_t device, std::vector<DimSize_t> dims) + TensorImpl(const std::string& backend, DeviceIdx_t device, std::vector<DimSize_t> dims) : mBackend(backend), mDevice(device) { @@ -97,7 +97,7 @@ public: * Return the (backend, device) pair for this implementation. */ std::pair<std::string, DeviceIdx_t> device() const noexcept { - return std::make_pair(std::string(mBackend), mDevice); + return std::make_pair(mBackend, mDevice); } /** @@ -171,22 +171,30 @@ public: }; /** - * Set the size, in number of elements, that must be stored. + * @brief Set the size, in number of elements, that must be stored. */ virtual void resize(std::vector<DimSize_t> dims) { mNbElts = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>()); } /** - * Return the number of elements stored. + * @brief Return the number of elements stored. */ inline std::size_t size() const noexcept { return mNbElts; } /** - * Return the size (in bytes) of one element (scalar). + * @brief Return the size (in bytes) of one element (scalar). */ virtual std::size_t scalarSize() const noexcept = 0; - constexpr const char *backend() const { return mBackend; } + + /** + * @brief Set every element of the implementation to zero. + */ + virtual void zeros() { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Function not implented"); + } + + const std::string backend() const { return mBackend; } /** * @brief Copy from another backend. diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp index 549232b2635f48b979208bb2f91b845dacef6f8b..922acacb070c745b2924d1fb787602326ec9d05a 100644 --- a/include/aidge/backend/cpu/data/TensorImpl.hpp +++ b/include/aidge/backend/cpu/data/TensorImpl.hpp @@ -14,7 +14,6 @@ #include "aidge/backend/TensorImpl.hpp" #include "aidge/data/Tensor.hpp" -#include "aidge/data/half.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" @@ -31,21 +30,12 @@ private: std::unique_ptr<T[]> mDataOwner; public: - static constexpr const char *Backend = "cpu"; + static const std::string Backend; +public: TensorImpl_cpu(DeviceIdx_t device, std::vector<DimSize_t> dims) : TensorImpl(Backend, device, dims) {} - bool operator==(const TensorImpl &otherImpl) const override final { - const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl); - AIDGE_INTERNAL_ASSERT(typedOtherImpl.size() >= mNbElts); - - std::size_t i = 0; - for (; i < mNbElts && - *static_cast<const T*>(rawPtr(i)) == *static_cast<const T*>(typedOtherImpl.rawPtr(i)); - ++i) { - } - return i == mNbElts; - } + bool operator==(const TensorImpl &other) const override final; static std::shared_ptr<TensorImpl_cpu> create(DeviceIdx_t device, std::vector<DimSize_t> dims) { return std::make_shared<TensorImpl_cpu<T>>(device, dims); @@ -53,6 +43,8 @@ public: inline std::size_t scalarSize() const noexcept override final { return sizeof(T); } + void zeros() override final; + void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final { const T* srcT = static_cast<const T *>(src); T* dstT = static_cast<T *>(rawPtr(offset)); @@ -62,64 +54,7 @@ public: std::copy(srcT, srcT + length, dstT); } - void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final { - if (length == 0) { - return; - } - - T* dstT = static_cast<T *>(rawPtr(offset)); - AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity"); - switch (srcDt) - { - case DataType::Float64: - std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length, - dstT); - break; - case DataType::Float32: - std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length, - dstT); - break; - case DataType::Float16: - std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length, - dstT); - break; - case DataType::Int64: - std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length, - dstT); - break; - case DataType::UInt64: - std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length, - dstT); - break; - case DataType::Int32: - std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length, - dstT); - break; - case DataType::UInt32: - std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length, - dstT); - break; - case DataType::Int16: - std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length, - dstT); - break; - case DataType::UInt16: - std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length, - dstT); - break; - case DataType::Int8: - std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length, - dstT); - break; - case DataType::UInt8: - std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length, - dstT); - break; - default: - AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type."); - break; - } - } + void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final; void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final { AIDGE_ASSERT(device.first == Backend, "backend must match"); @@ -176,6 +111,10 @@ private: } }; + +template <typename T> +const std::string TensorImpl_cpu<T>::Backend = "cpu"; + namespace { static Registrar<Tensor> registrarTensorImpl_cpu_Float64( {"cpu", DataType::Float64}, Aidge::TensorImpl_cpu<double>::create); diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp index d8412dbd4ddb4ec371649d180bce10a80dd624f3..a6ff03d36b662f4420424f930401844de25036d2 100644 --- a/include/aidge/data/Data.hpp +++ b/include/aidge/data/Data.hpp @@ -52,6 +52,7 @@ public: return mType; } virtual ~Data() = default; + virtual std::string toString() const = 0; private: const std::string mType; @@ -84,4 +85,4 @@ namespace Aidge { inline auto format_as(DataType dt) { return EnumStrings<Aidge::DataType>::data[static_cast<int>(dt)]; } } -#endif /* AIDGE_DATA_H_ */ \ No newline at end of file +#endif /* AIDGE_DATA_H_ */ diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp index 95101bb3ad1704f4acb8dd3e46ef7ee450f1f91f..2503c01b385a7a28eda0490a0715ef2de3a1f1db 100644 --- a/include/aidge/data/Tensor.hpp +++ b/include/aidge/data/Tensor.hpp @@ -12,16 +12,22 @@ #ifndef AIDGE_CORE_DATA_TENSOR_H_ #define AIDGE_CORE_DATA_TENSOR_H_ +#include <cstddef> // std::size_t #include <cstring> +#include <functional> // std::multiplies #include <set> #include <memory> -#include <numeric> // std::accumulate +#include <numeric> // std::accumulate #include <string> #include <type_traits> // std::is_arithmetic #include <vector> #include "aidge/backend/TensorImpl.hpp" #include "aidge/data/Data.hpp" +#include "aidge/operator/Add.hpp" +#include "aidge/operator/Div.hpp" +#include "aidge/operator/Mul.hpp" +#include "aidge/operator/Sub.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" #include "aidge/utils/ArrayHelpers.hpp" @@ -35,15 +41,17 @@ namespace Aidge { class Tensor : public Data, public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)> { private: - DataType mDataType; /** enum to specify data type. */ + DataType mDataType = DataType::Float32; /** enum to specify data type. */ std::vector<DimSize_t> mDims; /** Dimensions of the tensor. */ std::vector<DimSize_t> mStrides; /** Stride dimensions of the tensor. */ - std::shared_ptr<TensorImpl> mImpl; /** Pointer to the actual data implementation. */ + std::shared_ptr<TensorImpl> mImpl = nullptr; /** Pointer to the actual data implementation. */ std::size_t mImplOffset = 0; - std::shared_ptr<Tensor> mGrad; /** Pointer to the associated gradient Tensor instance. */ + std::shared_ptr<Tensor> mGrad = nullptr; /** Pointer to the associated gradient Tensor instance. */ // Cached data - std::size_t mSize = 0; /** Number of elements in the Tensor. */ + /// @brief Number of elements in the Tensor. + std::size_t mSize; + /// @brief Whether or not data are contiguous in memory. bool mContiguous = true; public: @@ -51,64 +59,48 @@ class Tensor : public Data, /** * @brief Construct a new empty Tensor object. - * @param dataType Sets the type of inserted data. + * It has the features of an undefined scalar. */ - Tensor(DataType dataType = DataType::Float32) + Tensor(DataType dtype = DataType::Float32) : Data(Type), - mDataType(dataType) + mDataType(dtype), + mDims(std::vector<DimSize_t>({})), + mStrides({1}), + mSize(1) { // ctor } /** - * @brief Construct a new Tensor object from dimensions. + * @brief Construct a new Tensor object from an arithmetic parameter. * - * @param dims dimensions of the tensor - * @param dataType datatype of the tensor (default = DataType::Float32) + * @tparam T Type of the input parameter. + * @tparam VT Decayed type of the input paramter. + * @param val Input value. */ - Tensor(const std::vector<DimSize_t>& dims, DataType dataType = DataType::Float32) + template<typename T, + typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>> + Tensor(T val) : Data(Type), - mDataType(dataType), - mDims(dims) + mDataType(NativeType<VT>::type), + mDims({}), + mStrides({1}), + mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, std::vector<std::size_t>())), + mSize(1) { - computeSize(); + *static_cast<VT*>(mImpl->rawPtr()) = static_cast<VT>(val); } /** - * @brief Construct a new Tensor object from another one (shallow copy). - * Data memory is not copied, but shared between the new Tensor and the - * initial one. + * @brief Construct a new Tensor object from dimensions. * - * @param otherTensor + * @param dims dimensions of the tensor */ - Tensor(const Tensor&) = default; - Tensor(Tensor&&) = default; - - /** - * Perform a deep copy of the tensor. - */ - Tensor clone() const { - Tensor newTensor(*this); - if (!newTensor.isContiguous()) { - newTensor.makeContiguous(); - } - else { - std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims); - newImpl->copy(mImpl->rawPtr(mImplOffset), mSize); - newTensor.setImpl(newImpl); - } - return newTensor; - } - - template<typename T, - typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>> - Tensor(T val) - : Data(Type), - mDataType(NativeType<VT>::type), - mDims({}), mStrides({1}), - mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, std::vector<std::size_t>())), - mSize(1) { - *static_cast<VT*>(mImpl->rawPtr()) = static_cast<VT>(val); + Tensor(const std::vector<DimSize_t>& dims) + : Data(Type) + { + // set mDims, mStrides, mContiguous, mSize + resize(dims); } /** @@ -123,20 +115,11 @@ class Tensor : public Data, mDims({SIZE_0}), mStrides({1}), mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0})), - mSize(SIZE_0) { + mSize(SIZE_0) + { mImpl->copyFromHost(&arr.data[0], SIZE_0); } - template <typename T, std::size_t SIZE_0> - constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) { - resize({SIZE_0}); - if (!mImpl) { - mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0}); - } - mImpl->copyFromHost(&arr.data[0], SIZE_0, mImplOffset); - return *this; - } - /** * @brief Construct a new Tensor object from the 2-dimensions Array helper. * @tparam T datatype @@ -154,16 +137,6 @@ class Tensor : public Data, mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1); } - template <typename T, std::size_t SIZE_0, std::size_t SIZE_1> - constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) { - resize({SIZE_0, SIZE_1}); - if (!mImpl) { - mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1}); - } - mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1, mImplOffset); - return *this; - } - /** * @brief Construct a new Tensor object from the 3-dimensions Array helper. * @tparam T datatype @@ -182,16 +155,6 @@ class Tensor : public Data, mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2); } - template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2> - constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) { - resize({SIZE_0, SIZE_1, SIZE_2}); - if (!mImpl) { - mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2}); - } - mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2, mImplOffset); - return *this; - } - /** * @brief Construct a new Tensor object from the 4-dimensions Array helper. * @tparam T datatype @@ -211,15 +174,19 @@ class Tensor : public Data, mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3); } - template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3> - constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) { - resize({SIZE_0, SIZE_1, SIZE_2, SIZE_3}); - if (!mImpl) { - mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3}); - } - mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3, mImplOffset); - return *this; - } + /** + * @brief Copy constructor. Construct a new Tensor object from another one + * (shallow copy). Data memory is not copied, but shared between the new + * Tensor and the initial one. + * @param other + */ + Tensor(const Tensor& other) = default; + + /** + * @brief Move constructor. + * @param other + */ + Tensor(Tensor&& other) = default; /** * @brief Copy dimensions, datatype and data from another Tensor. @@ -227,24 +194,32 @@ class Tensor : public Data, * existing implementation. Tensor backend/device remain untouched. * If current Tensor does not have an implementation, only a shallow copy * is performed and the Tensor will share data with t. - * @param t other Tensor object. + * @param other other Tensor object. * @return Tensor& */ - Tensor &operator=(const Tensor &t) { - resize(t.dims(), t.strides()); - setDataType(t.dataType(), false); // do not convert existing data - if (t.hasImpl()) { - if (hasImpl()) { - copyFrom(t); - } - else { - // Perform a shallow copy only - setImpl(t.mImpl, t.mImplOffset); - } - } - else { - setImpl(nullptr); - } + Tensor &operator=(const Tensor& other); + + template <typename T, std::size_t SIZE_0> + constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) { + *this = Tensor(std::move(arr)); + return *this; + } + + template <typename T, std::size_t SIZE_0, std::size_t SIZE_1> + constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) { + *this = Tensor(std::move(arr)); + return *this; + } + + template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2> + constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) { + *this = Tensor(std::move(arr)); + return *this; + } + + template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3> + constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) { + *this = Tensor(std::move(arr)); return *this; } @@ -260,6 +235,123 @@ class Tensor : public Data, return *mImpl == *(otherTensor.mImpl); } + /** + * @brief Element-wise addition operation for two ``Tensor``s. + * @note ``Tensor``s should be stored on the same backend. + * @todo If input ``Tensor``s have a different dataType, the output should + * have the dataType of the ``Tensor`` with the highest precision. + * + * @param other + * @return Tensor + */ + Tensor operator+(const Tensor& other) const { + AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation."); + AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend"); + AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same backend"); + auto add_ = Add_Op(2); + add_.associateInput(0, std::make_shared<Tensor>(*this)); + add_.associateInput(1, std::make_shared<Tensor>(other)); + add_.computeOutputDims(); + add_.setDataType(dataType()); + add_.setBackend(mImpl->backend()); + add_.forward(); + // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>; + return add_.getOutput(0)->clone(); + } + + /** + * @brief Element-wise substraction operation for two ``Tensor``s. + * @note ``Tensor``s should be stored on the same backend. + * @todo If input ``Tensor``s have a different dataType, the output should + * have the dataType of the ``Tensor`` with the highest precision. + * + * @param other + * @return Tensor + */ + Tensor operator-(const Tensor& other) const { + AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation."); + AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend"); + AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same backend"); + auto sub_ = Sub_Op(); + sub_.associateInput(0, std::make_shared<Tensor>(*this)); + sub_.associateInput(1, std::make_shared<Tensor>(other)); + sub_.computeOutputDims(); + sub_.setDataType(dataType()); + sub_.setBackend(mImpl->backend()); + sub_.forward(); + // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>; + return sub_.getOutput(0)->clone(); + } + + /** + * @brief Element-wise multiplication operation for two ``Tensor``s. + * @note ``Tensor``s should be stored on the same backend. + * @todo If input ``Tensor``s have a different dataType, the output should + * have the dataType of the ``Tensor`` with the highest precision. + * + * @param other + * @return Tensor + */ + Tensor operator*(const Tensor& other) const { + AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation."); + AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend"); + AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same backend"); + auto mul_ = Mul_Op(); + mul_.associateInput(0, std::make_shared<Tensor>(*this)); + mul_.associateInput(1, std::make_shared<Tensor>(other)); + mul_.computeOutputDims(); + mul_.setDataType(dataType()); + mul_.setBackend(mImpl->backend()); + mul_.forward(); + // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>; + return mul_.getOutput(0)->clone(); + } + + /** + * @brief Element-wise division operation for two ``Tensor``s. + * @note ``Tensor``s should be stored on the same backend. + * @todo If input ``Tensor``s have a different dataType, the output should + * have the dataType of the ``Tensor`` with the highest precision. + * + * @param other + * @return Tensor + */ + Tensor operator/(const Tensor& other) const { + AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation."); + AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend"); + AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same backend"); + auto div_ = Div_Op(); + div_.associateInput(0, std::make_shared<Tensor>(*this)); + div_.associateInput(1, std::make_shared<Tensor>(other)); + div_.computeOutputDims(); + div_.setDataType(dataType()); + div_.setBackend(mImpl->backend()); + div_.forward(); + // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>; + return div_.getOutput(0)->clone(); + } + +public: + /** + * @brief Perform a deep copy of the tensor. + */ + Tensor clone() const { + Tensor newTensor(*this); + if (!newTensor.isContiguous()) { + newTensor.makeContiguous(); + } + else { + std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims); + newImpl->copy(mImpl->rawPtr(mImplOffset), mSize); + newTensor.setImpl(newImpl); + } + return newTensor; + } + + const std::string backend() const { + return hasImpl() ? getImpl()->backend() : ""; + } + /** * @brief Set the backend of the Tensor associated implementation. If there * was no previous implementation set, data will be allocated, but it will @@ -292,12 +384,7 @@ class Tensor : public Data, * @brief Get a list of available backends. * @return std::set<std::string> */ - static std::set<std::string> getAvailableBackends(){ - std::set<std::string> backendsList; - for(std::tuple<std::string, DataType> tupleKey : Registrar<Tensor>::getKeys()) - backendsList.insert(std::get<0>(tupleKey)); - return backendsList; - } + static std::set<std::string> getAvailableBackends(); /** * @brief Get the data type enum. @@ -327,8 +414,8 @@ class Tensor : public Data, * @brief Get the Impl object * @return constexpr const std::shared_ptr<TensorImpl>& */ - constexpr const std::shared_ptr<TensorImpl> &getImpl() const { return mImpl; } - constexpr std::size_t getImplOffset() const { return mImplOffset; } + constexpr const std::shared_ptr<TensorImpl>& getImpl() const noexcept { return mImpl; } + constexpr std::size_t getImplOffset() const noexcept { return mImplOffset; } /** * @brief Set the Impl object @@ -369,13 +456,13 @@ class Tensor : public Data, * @brief Get dimensions of the Tensor object. * @return constexpr const std::vector<DimSize_t>& */ - constexpr const std::vector<DimSize_t> &dims() const { return mDims; } + constexpr inline const std::vector<DimSize_t>& dims() const noexcept { return mDims; } /** * @brief Get strides of the Tensor object. * @return constexpr const std::vector<DimSize_t>& */ - constexpr const std::vector<DimSize_t> &strides() const { return mStrides; } + constexpr inline const std::vector<DimSize_t>& strides() const noexcept { return mStrides; } /** * @brief Return true if Tensor is contiguous in memory. @@ -424,6 +511,18 @@ class Tensor : public Data, * @return false */ bool empty() const { return mDims.empty(); } + // bool newempty() const noexcept { + // return mSize == 0; + // } + + /** + * @brief Set each element of the tensor to zero. + */ + void zeros() const { + if (mImpl) { + mImpl->zeros(); + } + } template <typename expectedType> const expectedType& get(std::size_t idx) const { @@ -450,21 +549,42 @@ class Tensor : public Data, set<expectedType>(getStorageIdx(coordIdx), value); } - std::string toString() const; + std::string toString() const override; inline void print() const { fmt::print("{}\n", toString()); } std::shared_ptr<Tensor> grad() { - if (!mGrad) { - mGrad = std::make_shared<Tensor>(mDataType); - mGrad->resize(mDims); + // if (!mGrad && mImpl) { + // mGrad = std::make_shared<Tensor>(mDims); + // mGrad->setDataType(mDataType); + // mGrad->setBackend(mImpl->backend()); - if (mImpl) mGrad->setBackend(mImpl->backend()); - } + // // if (mImpl) mGrad->setBackend(mImpl->backend()); + // } return mGrad; } + /** + * @brief Associate the gradient with a Tensor instance and set its implementation + * if none was previously set. + * @note Dimensions for the Tensor instance are copied from the original current Tensor. + * @note If a Tensor instance was already associated, only the implementation is created + * with values set to 0. + * @note If Tensor instance and implementation already existed for the gradient + * nothing is done. + */ + void initGradient() { + if (!mGrad) { + mGrad = std::make_shared<Tensor>(mDims); + } + if (!mGrad->hasImpl()) { + mGrad->setDataType(dataType()); + mGrad->setBackend(hasImpl() ? mImpl->backend() : "cpu"); + mGrad->zeros(); + } + } + /** * @brief From the the 1D contiguous index, return the coordinate of an element in the tensor. * Beware: do not use this function with the storage index! @@ -473,13 +593,13 @@ class Tensor : public Data, * @return std::vector<DimSize_t> */ std::vector<std::size_t> getCoord(std::size_t flatIdx) const { - std::vector<std::size_t> coordIdx = std::vector<std::size_t>(mDims.size()); - std::size_t idx = flatIdx; - for (std::size_t i = mDims.size() - 1; i > 0; --i){ - coordIdx[i] = (idx % mDims[i]); - idx/=mDims[i]; + std::vector<std::size_t> coordIdx(mDims.size()); + std::size_t i = mDims.size(); + + while (i-- > 0) { + coordIdx[i] = (flatIdx % mDims[i]); + flatIdx/=mDims[i]; } - coordIdx[0] = idx % mDims[0]; return coordIdx; } @@ -497,7 +617,7 @@ class Tensor : public Data, AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions"); std::size_t flatIdx = 0; std::size_t i = 0; - for(; i < coordIdx.size() - 1; ++i){ + for(; i < coordIdx.size() - 1; ++i) { AIDGE_ASSERT(coordIdx[i] < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor"); flatIdx = (flatIdx + coordIdx[i]) * mDims[i + 1]; } @@ -513,20 +633,24 @@ class Tensor : public Data, * @return DimSize_t Storage index */ std::size_t getStorageIdx(const std::vector<std::size_t>& coordIdx) const { + for(std::size_t i = 0; i < coordIdx.size(); ++i) { + AIDGE_ASSERT(coordIdx[i] < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor"); + } AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions"); - return std::inner_product(coordIdx.begin(), coordIdx.end(), mStrides.begin(), DimSize_t(0)); + return std::inner_product(coordIdx.cbegin(), coordIdx.cend(), mStrides.cbegin(), DimSize_t(0)); } /** - * @brief Returns a sub-tensor with one or more dimension less. - * For instance, t.extract({1}) on a CHW tensor will return the HW tensor + * @brief Returns a sub-tensor with equal or lower number of dimensions. + * + * @note For instance, ``t.extract({1})`` on a CHW tensor will return the HW tensor * of channel #1. - * Likewise, t.extract({0, 1}) on a NCHW tensor will return the HW tensor + * Likewise, ``t.extract({0, 1})`` on a NCHW tensor will return the HW tensor * of batch #0 and channel #1. - * No memory copy is performed, the returned tensor does not own the memory. - * If the number of coordinates matches the number of dimensions, an empty + * @note No memory copy is performed, the returned tensor does not own the memory. + * @note If the number of coordinates matches the number of dimensions, a scalar * tensor is returned. - * It current tensor was contiguous, the returned tensor is garanteed to be + * @note If current tensor was contiguous, the returned tensor is garanteed to be * contiguous as well. * * @param coordIdx Coordinates of the sub-tensor to extract @@ -537,6 +661,8 @@ class Tensor : public Data, /** * @brief Returns a sub-tensor at some coordinate and with some dimension. * + * @note Data contiguity of the returned Tensor is not guaranted. + * * @param coordIdx First coordinates of the sub-tensor to extract * @param dims Dimensions of the sub-tensor to extract * @return Tensor Sub-tensor. diff --git a/include/aidge/filler/Filler.hpp b/include/aidge/filler/Filler.hpp new file mode 100644 index 0000000000000000000000000000000000000000..51d01d87f338d1c8eb33b7b3ec6194390bfe13bf --- /dev/null +++ b/include/aidge/filler/Filler.hpp @@ -0,0 +1,63 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CORE_FILLER_H_ +#define AIDGE_CORE_FILLER_H_ + +#include <memory> +#include <random> // normal_distribution, uniform_real_distribution + +#include "aidge/data/Tensor.hpp" + +namespace Aidge { + +inline void calculateFanInFanOut(std::shared_ptr<Tensor> tensor, + unsigned int& fanIn, unsigned int& fanOut) { + AIDGE_ASSERT( + tensor->nbDims() == 4, + "Tensor need to have 4 dimensions to compute FanIn and FanOut."); + // Warning: This function suppose NCXX data layout. + // Aidge currently only support NCHW but this maybe not be true in the + // future. + DimSize_t batchSize = tensor->dims()[0]; + DimSize_t channelSize = tensor->dims()[1]; + AIDGE_ASSERT(batchSize != 0, + "Cannot calculate FanIn if tensor batch size is 0."); + AIDGE_ASSERT(channelSize != 0, + "Cannot calculate FanOut if tensor channel size is 0."); + fanIn = static_cast<unsigned int>(tensor->size() / batchSize); + fanOut = static_cast<unsigned int>(tensor->size() / channelSize); +} +enum VarianceNorm { FanIn, Average, FanOut }; + +template <typename T> +void constantFiller(std::shared_ptr<Tensor> tensor, T constantValue); + +template <typename T> +void normalFiller(std::shared_ptr<Tensor> tensor, double mean = 0.0, + double stdDev = 1.0); + +template <typename T> +void uniformFiller(std::shared_ptr<Tensor> tensor, T min, T max); + +template <typename T> +void xavierUniformFiller(std::shared_ptr<Tensor> tensor, T scaling = 1.0, + VarianceNorm varianceNorm = FanIn); +template <typename T> +void xavierNormalFiller(std::shared_ptr<Tensor> tensor, T scaling = 1.0, + VarianceNorm varianceNorm = FanIn); + +template <typename T> +void heFiller(std::shared_ptr<Tensor> tensor, VarianceNorm varianceNorm = FanIn, + T meanNorm = 0.0, T scaling = 1.0); +} // namespace Aidge + +#endif /* AIDGE_CORE_FILLER_H_ */ diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp index 3311797d858cf4899a6cfed7a18fb9840afb514e..9ea1ab19bd325c51cf4c41cd48b0210b9ee8709a 100644 --- a/include/aidge/graph/GraphView.hpp +++ b/include/aidge/graph/GraphView.hpp @@ -62,11 +62,7 @@ public: return mNodes == gv.mNodes; } - NodePtr operator[](const std::string& name) - { - assert(mNodeRegistry.find(name) != mNodeRegistry.end() && "Could not find Node in the GraphView."); - return mNodeRegistry.at(name); - } + const NodePtr operator[](const std::string& nodeName) const; /////////////////////////////////////////////////////// // FUNCTIONAL DESCRIPTION @@ -82,14 +78,14 @@ public: * @brief Name of the node. * @return std::string */ - std::string name() const; + inline std::string name() const noexcept { return mName; } /** * @brief Set the node name. * @warning Undefined behaviour when several Nodes have the same name. * @param name New name for the node. */ - void setName(const std::string &name); + inline void setName(const std::string &name) { mName = name; } /** * @brief Save the GraphView as a Mermaid graph in a .md file at the @@ -98,16 +94,16 @@ public: */ void save(const std::string& path, bool verbose = false, bool showProducers = true) const; + void logOutputs(const std::string& dirName) const; + /** * Check that a node is in the current GraphView. * @param nodePtr Node to check * @return bool True is nodePtr belongs to the GraphView. */ - inline bool inView(NodePtr nodePtr) const { - return mNodes.find(nodePtr) != mNodes.end(); - } + bool inView(const NodePtr& nodePtr) const; - NodePtr getRootNode() { + inline NodePtr rootNode() const noexcept { return mRootNode; } @@ -118,41 +114,32 @@ public: /////////////////////////////////////////////////////// public: /** @brief Get reference to the set of input Nodes. */ - inline std::set<NodePtr> inputNodes() const noexcept { - std::set<NodePtr> nodes; - for (auto node : mInputNodes) { - if (node.first != nullptr) { - nodes.insert(node.first); - } - } - return nodes; - } + std::set<NodePtr> inputNodes() const; + /** @brief Get reference to the set of output Nodes. */ - inline std::set<NodePtr> outputNodes() const noexcept { - std::set<NodePtr> nodes; - for (auto node : mOutputNodes) { - if (node.first != nullptr) { - nodes.insert(node.first); - } - } - return nodes; - } + std::set<NodePtr> outputNodes() const; + /** @brief Assess if the given Node is an input Node of the GraphView object. */ - inline bool isInputNode(NodePtr nodePtr) const { - const auto nodes = inputNodes(); - return (nodes.find(nodePtr) != nodes.end()) ? true : false; - } + bool isInputNode(const NodePtr& nodePtr) const; + /** @brief Assess if the given Node is an output Node of the GraphView object. */ - inline bool isOutputNode(NodePtr nodePtr) const { - const auto nodes = outputNodes(); - return (nodes.find(nodePtr) != nodes.end()) ? true : false; - } + bool isOutputNode(const NodePtr& nodePtr) const; void setOrderedInputs(const std::vector<std::pair<NodePtr, IOIndex_t>>& inputs); void setOrderedOutputs(const std::vector<std::pair<NodePtr, IOIndex_t>>& outputs); - inline const std::vector<std::pair<NodePtr, IOIndex_t>>& getOrderedInputs() const { return mInputNodes; }; - inline const std::vector<std::pair<NodePtr, IOIndex_t>>& getOrderedOutputs() const { return mOutputNodes; }; + /** + * @brief Get inputs of the current GraphView with their associated id. + * The rank of the nodes are their rank in the vector. + * @return const std::vector<std::pair<NodePtr, IOIndex_t>>& + */ + inline const std::vector<std::pair<NodePtr, IOIndex_t>>& getOrderedInputs() const noexcept { return mInputNodes; }; + /** + * @brief Get outputs of the current GraphView with their associated id. + * The rank of the nodes are their rank in the vector. + * @return const std::vector<std::pair<NodePtr, IOIndex_t>>& + */ + inline const std::vector<std::pair<NodePtr, IOIndex_t>>& getOrderedOutputs() const noexcept { return mOutputNodes; }; /** * @brief List outside data input connections of the GraphView. @@ -214,7 +201,7 @@ public: * If not, add a Transpose Operator. * 4 - Propagate Tensor dimensions through the consecutive Operators. */ - void compile(const std::string& backend, const Aidge::DataType datatype, DeviceIdx_t device = 0); + void compile(const std::string& backend = "cpu", const Aidge::DataType datatype = DataType::Float32, DeviceIdx_t device = 0); /** * @brief Compute dimensions of input/output Tensors for each Operator of the @@ -223,9 +210,9 @@ public: void forwardDims(const std::vector<std::vector<DimSize_t>> dims = {}); /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */ - void setBackend(const std::string &backend, DeviceIdx_t device = 0); + void setBackend(const std::string& backend, const DeviceIdx_t device = 0) const; /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */ - void setDataType(const DataType &datatype); + void setDataType(const DataType& datatype) const; /////////////////////////////////////////////////////// // TOPOLOGY @@ -283,7 +270,7 @@ public: * added to the list, and so on. * - Any remaining nodes have no path to the root node and are added in * arbitrary order. In this case, the ranking is not garanteed to be unique. - * + * * If the ranking cannot be garanteed to be unique, the second item indicates * the rank from which unicity cannot be garanteed. * @return std::pair<std::vector<NodePtr>, size_t> Pair with the list of ranked @@ -379,11 +366,10 @@ public: * @param toTensor Input Tensor ID of the new Node. Default to gk_IODefaultIndex, meaning * first available data input for the Node. */ - inline void addChild(NodePtr toOtherNode, std::string fromOutNodeName, + inline void addChild(NodePtr toOtherNode, const std::string& fromOutNodeName, const IOIndex_t fromTensor = IOIndex_t(0), IOIndex_t toTensor = gk_IODefaultIndex) { - assert(mNodeRegistry.find(fromOutNodeName) != mNodeRegistry.end() && - "No Node with this name found in the GraphView."); + AIDGE_ASSERT(mNodeRegistry.find(fromOutNodeName) != mNodeRegistry.end(), "No node named {} in graph {}.", fromOutNodeName, name()); addChild(toOtherNode, mNodeRegistry.at(fromOutNodeName), fromTensor, toTensor); } @@ -524,7 +510,6 @@ private: // TOPOLOGY /////////////////////////////////////////////////////// - void _forwardDims(std::set<NodePtr> listNodes); }; /** diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp index 97a4ef69bd371e80c4e63303feac5e64197670b3..93cfb44514e39a489ccb75d86fd6e114da5c6162 100644 --- a/include/aidge/operator/Add.hpp +++ b/include/aidge/operator/Add.hpp @@ -12,15 +12,11 @@ #ifndef AIDGE_CORE_OPERATOR_ADD_H_ #define AIDGE_CORE_OPERATOR_ADD_H_ -#include <numeric> -#include <vector> -#include <cmath> #include <memory> +#include <string> #include <vector> -#include "aidge/utils/Registrar.hpp" #include "aidge/operator/OperatorTensor.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" @@ -28,7 +24,7 @@ namespace Aidge { class Add_Op : public OperatorTensor, - public Registrable<Add_Op, std::string, std::unique_ptr<OperatorImpl>(const Add_Op&)> { + public Registrable<Add_Op, std::string, std::shared_ptr<OperatorImpl>(const Add_Op&)> { public: static const std::string Type; @@ -44,11 +40,7 @@ public: * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @param op Operator to copy. */ - Add_Op(const Add_Op& op) - : OperatorTensor(op) - { - mImpl = op.mImpl ? Registrar<Add_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; - } + Add_Op(const Add_Op& op); /** * @brief Clone the operator using its copy-constructor. @@ -70,10 +62,7 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Add_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName() { return {"data_input_0", "data_input_n"}; diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp index 5066cb78f86bfc87d33fce4ecd8f302c40cb14d2..031046500e0c50443a0a1f4e98a6471625f25eb4 100644 --- a/include/aidge/operator/AvgPooling.hpp +++ b/include/aidge/operator/AvgPooling.hpp @@ -13,14 +13,18 @@ #define AIDGE_CORE_OPERATOR_AVGPOOLING_H_ #include <array> -#include <numeric> +#include <cmath> // std::floor +#include <cstddef> // std::size_t +#include <string> +#include <utility> // std::pair #include <vector> -#include <cmath> #include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" +#include "aidge/utils/ArrayHelpers.hpp" +#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" @@ -30,7 +34,7 @@ enum class AvgPoolingAttr { StrideDims, KernelDims }; template <DimIdx_t DIM> class AvgPooling_Op : public OperatorTensor, - public Registrable<AvgPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>, + public Registrable<AvgPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>, public StaticAttributes<AvgPoolingAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>> { @@ -60,7 +64,11 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl) { + SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -97,8 +105,7 @@ public: std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, - const IOIndex_t outputIdx = 0) const override final - { + const IOIndex_t outputIdx = 0) const override final { if (outputIdx != 0) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor."); } @@ -137,7 +144,7 @@ public: void setBackend(const std::string &name, DeviceIdx_t device = 0) override { - mImpl = Registrar<AvgPooling_Op<DIM>>::create(name)(*this); + SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, name); mOutputs[0]->setBackend(name, device); } @@ -149,8 +156,8 @@ public: } }; -template <DimIdx_t DIM> -const std::string AvgPooling_Op<DIM>::Type = "AvgPooling"; +template <Aidge::DimIdx_t DIM> +const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling"; template <std::array<DimSize_t, 1>::size_type DIM> inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims, @@ -177,4 +184,4 @@ const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims", "KernelDims"}; } -#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */ \ No newline at end of file +#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */ diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp index 4a0f40c034c7738a33eb8a9569fac4aa2fff465d..51673dd3c8b41c657c1df6e951a2cb3a842308b5 100644 --- a/include/aidge/operator/BatchNorm.hpp +++ b/include/aidge/operator/BatchNorm.hpp @@ -30,7 +30,7 @@ enum class BatchNormAttr { Epsilon, Momentum }; template <DimIdx_t DIM> class BatchNorm_Op : public OperatorTensor, - public Registrable<BatchNorm_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>, + public Registrable<BatchNorm_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>, public StaticAttributes<BatchNormAttr, float, float> { public: static const std::string Type; @@ -54,7 +54,11 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.backend()); + }else{ + mImpl = nullptr; + } } /** @@ -95,7 +99,7 @@ public: } void setBackend(const std::string &name, DeviceIdx_t device = 0) override { - mImpl = Registrar<BatchNorm_Op<DIM>>::create(name)(*this); + SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, name); mOutputs[0]->setBackend(name, device); // By default, automatically set backend for scale, shift, mean and variance @@ -136,4 +140,4 @@ template <> const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "Epsilon", "Momentum" }; } -#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_ \ No newline at end of file +#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_ diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp index 7cc3985674219daf087381049d3a845299b3e250..bbc776a1175a1fc29d08c3872649a6b7aac2f04f 100644 --- a/include/aidge/operator/Cast.hpp +++ b/include/aidge/operator/Cast.hpp @@ -39,7 +39,11 @@ public: Cast_Op(const Cast_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Cast_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl) { + SET_IMPL_MACRO(Cast_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -50,12 +54,7 @@ public: return std::make_shared<Cast_Op>(*this); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - if (Registrar<Cast_Op>::exists({name})) { - mImpl = Registrar<Cast_Op>::create({name})(*this); - } - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; void forward() override; diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp index 62a9540105d77866167d87b9733ed473e03f0151..611ff6bd53b1f16f87f73dd951d0645b9765262e 100644 --- a/include/aidge/operator/Concat.hpp +++ b/include/aidge/operator/Concat.hpp @@ -12,16 +12,16 @@ #ifndef AIDGE_CORE_OPERATOR_CONCAT_H_ #define AIDGE_CORE_OPERATOR_CONCAT_H_ -#include <numeric> -#include <vector> -#include <cmath> #include <memory> +#include <stdexcept> +#include <string> #include <vector> #include "aidge/utils/Registrar.hpp" #include "aidge/operator/OperatorTensor.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Types.h" @@ -29,7 +29,7 @@ namespace Aidge { enum class ConcatAttr { Axis }; class Concat_Op : public OperatorTensor, - public Registrable<Concat_Op, std::string, std::unique_ptr<OperatorImpl>(const Concat_Op&)>, + public Registrable<Concat_Op, std::string, std::shared_ptr<OperatorImpl>(const Concat_Op&)>, public StaticAttributes<ConcatAttr, DimSize_t> { public: static const std::string Type; @@ -55,7 +55,11 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<Concat_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(Concat_Op, *this, op.backend()); + }else{ + mImpl = nullptr; + } } /** @@ -66,51 +70,9 @@ public: return std::make_shared<Concat_Op>(*this); } - // Data operator[](const char* inputName) override final { - // std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] : - // (strcmp(inputName, "weight") ? mInputs[1] : - // (strcmp(inputName, "bias") ? mInputs[2] : - // nullptr)); - // assert((in!=nullptr) && "No such parameter"); - // return *in; - // } - - - void computeOutputDims() override final { - // Every input is non-empty with the same number of dimensions - bool associated = (getInput(0) != nullptr); - associated &= !(getInput(0)->empty()) && (getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims()); // do not compute anything if no input - auto outputDims = getInput(0)->dims(); - const auto firstInputNbDims = getInput(0) -> nbDims(); - for (IOIndex_t i = 1; i < nbInputs(); ++i) { - if (!getInput(i)) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i); - } + void computeOutputDims() override final; - if (getInput(i)->nbDims() == firstInputNbDims) { - for (DimSize_t dim = 0; dim < firstInputNbDims; ++dim) { - if (dim == getAttr<ConcatAttr::Axis>()) { - outputDims[dim] += getInput(i)->dims()[dim]; - } - else { - associated &= (getInput(i)->dims()[dim] == outputDims[dim]); - } - } - } - else { - associated = false; - break; - } - } - if (associated) { - getOutput(0)->resize(outputDims); - } - } - - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Concat_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName(){ return {"data_input_0", "data_input_n"}; diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp index 8290fb3d0d978e9af3291809c5057406424096d5..c93a098106be76f30c1150ea64c464492429feb9 100644 --- a/include/aidge/operator/Conv.hpp +++ b/include/aidge/operator/Conv.hpp @@ -13,35 +13,48 @@ #define AIDGE_CORE_OPERATOR_CONV_H_ #include <array> -#include <cmath> -#include <cstddef> -#include <numeric> +#include <cmath> // std::floor +#include <cstddef> // std::size_t +#include <string> +#include <utility> // std::pair #include <vector> #include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" +#include "aidge/utils/ArrayHelpers.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" // SET_IMPL_MACRO #include "aidge/utils/StaticAttributes.hpp" -#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" namespace Aidge { -enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims }; +enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims, NoBias }; template <DimIdx_t DIM> class Conv_Op : public OperatorTensor, - public Registrable<Conv_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Conv_Op<DIM> &)>, - public StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t, - DimSize_t, std::array<DimSize_t, DIM>> { + public Registrable<Conv_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)>, + public StaticAttributes<ConvAttr, + std::array<DimSize_t, DIM>, + std::array<DimSize_t, DIM>, + DimSize_t, + DimSize_t, + std::array<DimSize_t, DIM>, + bool> { public: static const std::string Type; Conv_Op() = delete; - using Attributes_ = StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, - DimSize_t, DimSize_t, std::array<DimSize_t, DIM>>; + using Attributes_ = StaticAttributes<ConvAttr, + std::array<DimSize_t, DIM>, + std::array<DimSize_t, DIM>, + DimSize_t, + DimSize_t, + std::array<DimSize_t, DIM>, + bool>; template <ConvAttr e> using attr = typename Attributes_::template attr<e>; @@ -49,13 +62,15 @@ public: DimSize_t outChannels, const std::array<DimSize_t, DIM> &kernelDims, const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1), - const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) + const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1), + bool noBias = false) : OperatorTensor(Type, 1, 2, 1), Attributes_(attr<ConvAttr::StrideDims>(strideDims), attr<ConvAttr::DilationDims>(dilationDims), attr<ConvAttr::InChannels>(inChannels), attr<ConvAttr::OutChannels>(outChannels), - attr<ConvAttr::KernelDims>(kernelDims)) {} + attr<ConvAttr::KernelDims>(kernelDims), + attr<ConvAttr::NoBias>(noBias)) {} /** * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). @@ -65,7 +80,11 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl) { + SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -118,8 +137,10 @@ public: } } - -std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override { + std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> + computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, + const std::vector<DimSize_t>& outputDims, + const IOIndex_t outputIdx = 0) const override { if (outputIdx != 0) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor."); } @@ -159,22 +180,25 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> co std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0); weightIdxDims[0] = firstEltDims[1]; - // Bias - const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel - const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]}; - // Result std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res; res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims)); res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims)); - res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims)); + + // Bias + if (! this->template getAttr<ConvAttr::NoBias>()){ + const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel + const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]}; + res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims)); + } return res; } AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); } + void setBackend(const std::string &name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Conv_Op<DIM>>::create(name)(*this); + SET_IMPL_MACRO(Conv_Op<DIM>, *this, name); mOutputs[0]->setBackend(name, device); // By default, automatically set backend for weight and bias inputs @@ -211,12 +235,14 @@ inline std::shared_ptr<Node> Conv(DimSize_t inChannels, const std::array<DimSize_t, DIM> &kernelDims, const std::string& name = "", const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1), - const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) { + const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1), + bool noBias = false) { // FIXME: properly handle default w&b initialization in every cases static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported"); - auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims), name); + auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims, noBias), name); addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w"); - addProducer(conv, 2, {outChannels}, "b"); + addProducer(conv, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims + return conv; } @@ -228,9 +254,10 @@ inline std::shared_ptr<Node> Conv( DimSize_t const (&kernelDims)[DIM], const std::string& name = "", const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1), - const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) { + const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1), + bool noBias = false) { static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported"); - return Conv(inChannels, outChannels, to_array(kernelDims), name, strideDims, dilationDims); + return Conv(inChannels, outChannels, to_array(kernelDims), name, strideDims, dilationDims, noBias); } } // namespace Aidge @@ -241,8 +268,9 @@ const char *const EnumStrings<Aidge::ConvAttr>::data[] = { "DilationDims", "InChannels", "OutChannels", - "KernelDims" + "KernelDims", + "NoBias" }; } -#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */ \ No newline at end of file +#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */ diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp index a3b537ba60d03209e078dc94348f001603d2f3f5..559c0fc7a97a3a882f6720a91d02dee1af70abd8 100644 --- a/include/aidge/operator/ConvDepthWise.hpp +++ b/include/aidge/operator/ConvDepthWise.hpp @@ -13,29 +13,33 @@ #define AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ #include <array> -#include <cmath> -#include <numeric> +#include <cmath> // std::floor +#include <cstddef> // std::size_t +#include <string> +#include <utility> // std::pair #include <vector> #include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" +#include "aidge/utils/ArrayHelpers.hpp" #include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" namespace Aidge { -enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims }; +enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims, NoBias }; template <DimIdx_t DIM> class ConvDepthWise_Op : public OperatorTensor, - public Registrable<ConvDepthWise_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>, + public Registrable<ConvDepthWise_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>, public StaticAttributes<ConvDepthWiseAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t, - std::array<DimSize_t, DIM>> { + std::array<DimSize_t, DIM>, + bool> { public: static const std::string Type; @@ -45,19 +49,22 @@ public: std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t, - std::array<DimSize_t, DIM>>; + std::array<DimSize_t, DIM>, + bool>; template <ConvDepthWiseAttr e> using attr = typename Attributes_::template attr<e>; constexpr ConvDepthWise_Op(const DimSize_t nbChannels, const std::array<DimSize_t, DIM> &kernel_dims, const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), - const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) + const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1), + bool no_bias=false) : OperatorTensor(Type, 1, 2, 1), Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims), attr<ConvDepthWiseAttr::DilationDims>(dilation_dims), attr<ConvDepthWiseAttr::Channels>(nbChannels), - attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {} + attr<ConvDepthWiseAttr::KernelDims>(kernel_dims), + attr<ConvDepthWiseAttr::NoBias>(no_bias)) {} /** * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). @@ -67,7 +74,11 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend()); + }else{ + mImpl = nullptr; + } } /** @@ -153,22 +164,24 @@ public: std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0); weightIdxDims[0] = firstEltDims[1]; - // Bias - const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel - const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]}; // Result std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res; res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims)); res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims)); - res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims)); + // Bias + if (! this->template getAttr<ConvDepthWiseAttr::NoBias>()){ + const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel + const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]}; + res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims)); + } return res; } AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); } void setBackend(const std::string &name, DeviceIdx_t device = 0) override { - mImpl = Registrar<ConvDepthWise_Op<DIM>>::create(name)(*this); + SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, name); mOutputs[0]->setBackend(name, device); // By default, automatically set backend for weight and bias inputs @@ -192,12 +205,13 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels, const std::array<DimSize_t, DIM> &kernelDims, const std::string& name = "", const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1), - const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) { + const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1), + bool noBias=false) { // FIXME: properly handle default w&b initialization in every cases static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported"); - auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims), name); + auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims, noBias), name); addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w"); - addProducer(convDW, 2, {nbChannels}, "b"); + addProducer(convDW, 2, {(noBias ? 0 : nbChannels)}, "b"); return convDW; } @@ -208,16 +222,17 @@ inline std::shared_ptr<Node> ConvDepthWise( DimSize_t const (&kernelDims)[DIM], const std::string& name = "", const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1), - const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) { + const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1), + bool noBias=false) { static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported"); - return ConvDepthWise(nbChannels, to_array(kernelDims), name, strideDims, dilationDims); + return ConvDepthWise(nbChannels, to_array(kernelDims), name, strideDims, dilationDims, noBias); } } // namespace Aidge namespace { template <> const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims", "Channels", - "KernelDims"}; + "KernelDims", "NoBias"}; } #endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */ diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp index a033c6920a374003ad869bddbf5641c48fc5f6e2..49410db044518dc3ca2cc33285d570197d83b10a 100644 --- a/include/aidge/operator/Div.hpp +++ b/include/aidge/operator/Div.hpp @@ -12,21 +12,20 @@ #ifndef AIDGE_CORE_OPERATOR_DIV_H_ #define AIDGE_CORE_OPERATOR_DIV_H_ -#include <cassert> #include <memory> +#include <string> #include <vector> #include "aidge/utils/Registrar.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/utils/Types.h" namespace Aidge { class Div_Op : public OperatorTensor, - public Registrable<Div_Op, std::string, std::unique_ptr<OperatorImpl>(const Div_Op&)> { + public Registrable<Div_Op, std::string, std::shared_ptr<OperatorImpl>(const Div_Op&)> { public: static const std::string Type; @@ -40,7 +39,11 @@ public: Div_Op(const Div_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Div_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl) { + SET_IMPL_MACRO(Div_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -53,11 +56,7 @@ public: void computeOutputDims() override final; - - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Div_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName(){ return {"data_input_1", "data_input_2"}; diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp index 6995cea5e4af9a17cf3d24516d9840850e701669..5ec10522e889bb1188b2304940fd892c0928b414 100644 --- a/include/aidge/operator/Erf.hpp +++ b/include/aidge/operator/Erf.hpp @@ -12,22 +12,20 @@ #ifndef AIDGE_CORE_OPERATOR_ERF_H_ #define AIDGE_CORE_OPERATOR_ERF_H_ -#include <cassert> #include <memory> +#include <string> #include <vector> -#include "aidge/utils/Registrar.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" -#include "aidge/data/Data.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" namespace Aidge { class Erf_Op : public OperatorTensor, - public Registrable<Erf_Op, std::string, std::unique_ptr<OperatorImpl>(const Erf_Op&)> { + public Registrable<Erf_Op, std::string, std::shared_ptr<OperatorImpl>(const Erf_Op&)> { public: static const std::string Type; @@ -40,7 +38,11 @@ public: Erf_Op(const Erf_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Erf_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl) { + SET_IMPL_MACRO(Erf_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -51,10 +53,7 @@ public: return std::make_shared<Erf_Op>(*this); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Erf_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp index f6d81b5781dd25c990f496fa9f592502c9705eba..39b28c125c917f07c2cf238988e68075adeceb8e 100644 --- a/include/aidge/operator/FC.hpp +++ b/include/aidge/operator/FC.hpp @@ -13,13 +13,10 @@ #define AIDGE_CORE_OPERATOR_FC_H_ #include <array> -#include <cmath> -#include <numeric> #include <memory> #include <vector> #include "aidge/utils/Types.h" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" @@ -32,7 +29,7 @@ enum class FCAttr { OutChannels, NoBias }; class FC_Op : public OperatorTensor, public Registrable<FC_Op, std::string, - std::unique_ptr<OperatorImpl>(const FC_Op &)>, + std::shared_ptr<OperatorImpl>(const FC_Op &)>, public StaticAttributes<FCAttr, DimSize_t, bool> { public: static const std::string Type; @@ -57,53 +54,26 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<FC_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(FC_Op, *this, op.backend()); + }else{ + mImpl = nullptr; + } } /** * @brief Clone the operator using its copy-constructor. * @see Operator::FC_Op */ - std::shared_ptr<Operator> clone() const override { + std::shared_ptr<Operator> clone() const override final { return std::make_shared<FC_Op>(*this); } - void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final { - assert(inputIdx < 3 && "operators supports only 3 inputs"); - assert(data->type() == Tensor::Type && "input data must be of Tensor type"); - // TODO: FIXME: check this, because data dims may not be initialized at this point... - //if (inputIdx == 2) { - // assert(std::dynamic_pointer_cast<Tensor>(data)->size() == ((this->template getAttr<FCAttr::NoBias>()) == false ? static_cast<std::size_t>(this->template getAttr<FCAttr::OutChannels>()) : 0)); - // assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1); - //} - mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data); - if (inputIdx == 0 && getInput(0)->nbDims() == 1) - mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()}); - } - - void computeOutputDims() override final { - bool associated = true; - for (IOIndex_t i = 0; i < nbInputs(); ++i) { - if (!getInput(i)) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i); - } - associated &= !(getInput(i)->empty()); - } - if (associated) { - // <batch, OutChannels> - mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<FCAttr::OutChannels>()}); - } - } - + void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final; - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<FC_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); + void computeOutputDims() override final; - // By default, automatically set backend for weight and bias inputs - getInput(1)->setBackend(name, device); - getInput(2)->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName(){ return {"data_input", "weight", "bias"}; @@ -128,4 +98,4 @@ const char *const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels", "NoBias"}; } -#endif /* AIDGE_CORE_OPERATOR_FC_H_ */ \ No newline at end of file +#endif /* AIDGE_CORE_OPERATOR_FC_H_ */ diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp index f6647f99151304d0cf083aed109cc642c9f1ecc2..b7d18e6443404730bbcb73cf7e6da97b8b3e6a7c 100644 --- a/include/aidge/operator/Gather.hpp +++ b/include/aidge/operator/Gather.hpp @@ -12,16 +12,14 @@ #ifndef AIDGE_CORE_OPERATOR_GATHER_H_ #define AIDGE_CORE_OPERATOR_GATHER_H_ -#include <cassert> +#include <cstdint> // std::int64_t #include <memory> +#include <string> #include <vector> #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" -#include "aidge/data/Data.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" -#include "aidge/operator/Producer.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Types.h" @@ -32,7 +30,7 @@ enum class GatherAttr { Indices, GatheredShape, Axis }; class Gather_Op : public OperatorTensor, public Registrable<Gather_Op, std::string, - std::unique_ptr<OperatorImpl>(const Gather_Op&)>, + std::shared_ptr<OperatorImpl>(const Gather_Op&)>, public StaticAttributes<GatherAttr, std::vector<std::int64_t>, std::vector<DimSize_t>, std::int64_t> { public: @@ -58,7 +56,11 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<Gather_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(Gather_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -71,10 +73,7 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Gather_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp index c315e671c2f084af869e3b21107066137496366b..e7d60285b4d45826f1d73635d54f4532b4fb1598 100644 --- a/include/aidge/operator/GenericOperator.hpp +++ b/include/aidge/operator/GenericOperator.hpp @@ -15,8 +15,6 @@ #include <memory> #include <vector> #include <string> -#include <cassert> -#include <cstring> #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" @@ -38,8 +36,8 @@ private: public: GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut) : OperatorTensor(type, nbData, nbParam, nbOut) - { - mImpl = std::make_shared<OperatorImpl>(*this); + { + mImpl = std::make_shared<OperatorImpl>(*this, ""); } /** @@ -49,9 +47,11 @@ public: GenericOperator_Op(const GenericOperator_Op& op) : OperatorTensor(op) { - mImpl = std::make_shared<OperatorImpl>(*this); + mImpl = std::make_shared<OperatorImpl>(*this, op.backend()); } + ~GenericOperator_Op() = default; + /** * @brief Clone the operator using its copy-constructor. * @see Operator::GenericOperator_Op @@ -60,58 +60,28 @@ public: return std::make_shared<GenericOperator_Op>(*this); } +public: + void computeOutputDims() override final; + + bool outputDimsForwarded() const override final; + + void setBackend(const std::string & /*name*/, DeviceIdx_t /*device*/ = 0) override { fmt::print("setBackend: not available yet.\n"); } + void setDataType(const DataType& /*datatype*/) const override { fmt::print("setDataType: not available yet.\n"); } + // Helper functions that can be used with setComputeOutputDims(): static const ComputeDimsFunc Identity; static const ComputeDimsFunc InputIdentity(IOIndex_t inputIdx, IOIndex_t nbOutputs); - inline void setComputeOutputDims(ComputeDimsFunc func) { mComputeOutputDims = func; } - - - void computeOutputDims() override final { - if (mComputeOutputDims) { - std::vector<std::vector<size_t>> inputsDims(nbInputs(), std::vector<size_t>()); - for (std::size_t i = 0; i < nbInputs(); ++i) { - if (getInput(i)) { - inputsDims[i] = getInput(i)->dims(); - } - } - - const auto& outputsDims = mComputeOutputDims(inputsDims); - assert(outputsDims.size() == nbOutputs() && "The provided ComputeDimsFunc function returns the wrong number of outputs"); - for (std::size_t i = 0; i < nbOutputs(); ++i) { - mOutputs[i]->resize(outputsDims[i]); - } - } - else { - assert(false && "Cannot compute output dim of a GenericOperator"); - } - } - - bool outputDimsForwarded() const override final { - if (mComputeOutputDims) { - return !(mOutputs[0]->empty()); - } - else { - assert(false && "GenericOperator cannot forward dims"); - return false; - } - } - - - ~GenericOperator_Op() = default; - - void setBackend(const std::string & /*name*/, DeviceIdx_t /*device*/ = 0) override { fmt::print("setBackend: not available yet.\n"); } - void setDataType(const DataType& /*datatype*/) const override { fmt::print("setDataType: not available yet.\n"); } }; /** * @brief Fictive custom operator not associated with any implementation. * Allows to import unknown operators and simulate new ones. * @param type Type of the fictive operator. - * @param nbDataIn Number of input data. - * @param nbIn Number input data + number of learnt parameters. + * @param nbData Number of input data. + * @param nbParam Number of parameters. * @param nbOut Number of output data. * @param name (optional) name of the Operator. * @return std::shared_ptr<Node> Node associated with the Generic Operator. diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp index c2e6eaff77971c3dcf350a02bc5089d08b5c8488..27432bc5bb251003e9e93261593e12c2fa704f3d 100644 --- a/include/aidge/operator/Identity.hpp +++ b/include/aidge/operator/Identity.hpp @@ -40,9 +40,9 @@ public: static const std::string Type; Identity_Op() - : OperatorTensor(Type, 1, 0, 1) + : OperatorTensor(Type, 1, 0, 1) { - mImpl = std::make_shared<OperatorImpl>(*this); + mImpl = std::make_shared<OperatorImpl>(*this, ""); } /** @@ -52,7 +52,7 @@ public: Identity_Op(const Identity_Op& op) : OperatorTensor(op) { - mImpl = std::make_shared<OperatorImpl>(*this); + mImpl = std::make_shared<OperatorImpl>(*this, op.backend()); } /** @@ -65,11 +65,16 @@ public: void computeOutputDims() override final {} // Do nothing + /** + * @brief Check if output dimensions have been computed. + * @note Since Indentity has no output Tensor, this function checks if its + * only input's dimensions have been computed. + * + * @return true Input has dimensions. + * @return false Input has no dimensions or is a nullptr. + */ bool outputDimsForwarded() const override final { - if (mInputs[0]) - return !mInputs[0]->empty(); - else - return false; + return mInputs[0] ? !mInputs[0]->empty() : false; } diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp index 5976f1d88d70ae7fb716f4038e57da95242c3551..83a7c30fce7e0f68576f367d4b0bfe48edf4b3b6 100644 --- a/include/aidge/operator/LeakyReLU.hpp +++ b/include/aidge/operator/LeakyReLU.hpp @@ -30,7 +30,7 @@ enum class LeakyReLUAttr { }; class LeakyReLU_Op : public OperatorTensor, - public Registrable<LeakyReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const LeakyReLU_Op&)>, + public Registrable<LeakyReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)>, public StaticAttributes<LeakyReLUAttr, float> { public: static const std::string Type; @@ -54,7 +54,11 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<LeakyReLU_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(LeakyReLU_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -68,7 +72,7 @@ public: void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<LeakyReLU_Op>::create(name)(*this); + SET_IMPL_MACRO(LeakyReLU_Op, *this, name); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp index a011c8666bba55eb7254a8efcd432a3f680cd461..43bd8b1654206df15cd869cf2d37a216fcc4a733 100644 --- a/include/aidge/operator/MatMul.hpp +++ b/include/aidge/operator/MatMul.hpp @@ -17,7 +17,6 @@ #include <vector> #include "aidge/utils/Types.h" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Registrar.hpp" @@ -27,7 +26,7 @@ namespace Aidge { class MatMul_Op : public OperatorTensor, public Registrable<MatMul_Op, std::string, - std::unique_ptr<OperatorImpl>(const MatMul_Op &)> { + std::shared_ptr<OperatorImpl>(const MatMul_Op &)> { public: static const std::string Type; @@ -39,7 +38,11 @@ public: */ MatMul_Op(const MatMul_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<MatMul_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(MatMul_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -64,10 +67,7 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, DeviceIdx_t device = 0) override final { - mImpl = Registrar<MatMul_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName() { return {"data_input1", "data_input2"}; @@ -82,4 +82,4 @@ inline std::shared_ptr<Node> MatMul(const std::string& name = "") { } } // namespace Aidge -#endif /* AIDGE_CORE_OPERATOR__MATMUL_H_ */ +#endif /* AIDGE_CORE_OPERATOR_MATMUL_H_ */ diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp index b07fa38a41c664c4fcbf90227914264ec68390a0..5b09aa02cd0665172a9ae69549d8d9311e10d024 100644 --- a/include/aidge/operator/MaxPooling.hpp +++ b/include/aidge/operator/MaxPooling.hpp @@ -13,16 +13,20 @@ #define AIDGE_CORE_OPERATOR_MAXPOOLING_H_ #include <array> -#include <numeric> +#include <cmath> // std::ceil, std::floor +#include <cstddef> // std::size_t +#include <functional> +#include <memory> +#include <stdexcept> // std::runtime_error #include <vector> -#include <cmath> #include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" -#include "aidge/operator/Producer.hpp" -#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/ArrayHelpers.hpp" +#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/Registrar.hpp" +#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -30,7 +34,7 @@ enum class MaxPoolingAttr { StrideDims, KernelDims, CeilMode }; template <DimIdx_t DIM> class MaxPooling_Op : public OperatorTensor, - public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>, + public Registrable<MaxPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>, public StaticAttributes<MaxPoolingAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, @@ -64,7 +68,11 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<MaxPooling_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl) { + SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -105,7 +113,7 @@ public: void setBackend(const std::string &name, DeviceIdx_t device = 0) override { - mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this); + SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, name); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp index 8991ccb44eb4926f375ff102858f4683e1bea4d8..7de34563adcaabd63ab036232d4d7b6539fd11eb 100644 --- a/include/aidge/operator/Memorize.hpp +++ b/include/aidge/operator/Memorize.hpp @@ -12,17 +12,17 @@ #ifndef AIDGE_CORE_OPERATOR_MEMORIZE_H_ #define AIDGE_CORE_OPERATOR_MEMORIZE_H_ -#include <cassert> #include <memory> +#include <string> #include <vector> -#include "aidge/utils/Registrar.hpp" -#include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" #include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" -#include "aidge/utils/Types.h" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Types.h" namespace Aidge { enum class MemorizeAttr { ScheduleStep, ForwardStep, EndStep }; @@ -47,14 +47,19 @@ public: } /** - * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). + * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), + * but not its input tensors (the new operator has no input associated). * @param op Operator to copy. */ Memorize_Op(const Memorize_Op& op) : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<Memorize_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl) { + SET_IMPL_MACRO(Memorize_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } mOutputs[1] = mOutputs[0]; } @@ -66,10 +71,7 @@ public: return std::make_shared<Memorize_Op>(*this); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Memorize_Op>::create({name})(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; void computeOutputDims() override; bool outputDimsForwarded() const override; @@ -98,4 +100,4 @@ const char *const EnumStrings<Aidge::MemorizeAttr>::data[] = { }; } -#endif /* AIDGE_CORE_OPERATOR_MEMORIZE_H_ */ \ No newline at end of file +#endif /* AIDGE_CORE_OPERATOR_MEMORIZE_H_ */ diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp index 7f36eca2c4586f61f72e0d842d2d576450cd1596..4d719b6cb755bb2ddff96905f2e5b6bc24844e37 100644 --- a/include/aidge/operator/MetaOperator.hpp +++ b/include/aidge/operator/MetaOperator.hpp @@ -12,10 +12,18 @@ #ifndef AIDGE_CORE_OPERATOR_METAOPERATOR_H_ #define AIDGE_CORE_OPERATOR_METAOPERATOR_H_ -#include "aidge/operator/OperatorTensor.hpp" +#include <array> +#include <memory> +#include <string> + +#include "aidge/data/Data.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/graph/GraphView.hpp" #include "aidge/graph/OpArgs.hpp" +#include "aidge/operator/OperatorTensor.hpp" #include "aidge/scheduler/Scheduler.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" namespace Aidge { class MetaOperator_Op : public OperatorTensor, @@ -28,7 +36,7 @@ public: std::weak_ptr<Node> mUpperNode; public: - MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph); + MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph); /** * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp index 8f1de7c0e92558a4b47962c3a375764e1bd1c2ee..fb3aa6384fc703d758cb8753dcf54c4694f96bd4 100644 --- a/include/aidge/operator/MetaOperatorDefs.hpp +++ b/include/aidge/operator/MetaOperatorDefs.hpp @@ -35,11 +35,12 @@ inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels, const std::string& name = "", const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), - const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) + const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1), + bool no_bias = false) { // Construct micro-graph auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0); - auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : ""); + auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : ""); auto metaOp = MetaOperator("PaddedConv", Sequential({pad, conv}), name); addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w"); @@ -56,9 +57,10 @@ inline std::shared_ptr<Node> PaddedConv( const std::string& name = "", const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), - const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) + const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1), + bool no_bias = false) { - return PaddedConv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims); + return PaddedConv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias); } template <std::array<DimSize_t, 1>::size_type DIM> @@ -67,11 +69,12 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels, const std::string& name = "", const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), - const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) + const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1), + bool no_bias = false) { // Construct micro-graph auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0); - auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nb_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : ""); + auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nb_channels, kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : ""); auto metaOp = MetaOperator("PaddedConvDepthWise", Sequential({pad, conv}), name); addProducer(metaOp, 1, append(nb_channels, append(DimSize_t(1), kernel_dims)), "w"); @@ -87,9 +90,10 @@ inline std::shared_ptr<Node> PaddedConvDepthWise( const std::string& name = "", const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), - const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) + const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1), + bool no_bias = false) { - return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims); + return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias); } template <std::array<DimSize_t, 1>::size_type DIM> diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp index 62fb9897384673c695895b54557b4cf637aa2447..3652cf9697c6bcfea4befe4cdcdf5b9efff8b70c 100644 --- a/include/aidge/operator/Move.hpp +++ b/include/aidge/operator/Move.hpp @@ -72,4 +72,4 @@ inline std::shared_ptr<Node> Move(const std::string& name = "") { } } -#endif /* AIDGE_CORE_OPERATOR_MOVE_H_ */ \ No newline at end of file +#endif /* AIDGE_CORE_OPERATOR_MOVE_H_ */ diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp index 8758021a9c3de1707a96bbfafc21686ded8b7e40..cc9fba59431356a132330e453288f2f6e7141178 100644 --- a/include/aidge/operator/Mul.hpp +++ b/include/aidge/operator/Mul.hpp @@ -19,7 +19,6 @@ #include "aidge/utils/Registrar.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/utils/Types.h" @@ -29,7 +28,7 @@ namespace Aidge { * @brief Tensor element-wise multiplication. */ class Mul_Op : public OperatorTensor, - public Registrable<Mul_Op, std::string, std::unique_ptr<OperatorImpl>(const Mul_Op&)> { + public Registrable<Mul_Op, std::string, std::shared_ptr<OperatorImpl>(const Mul_Op&)> { public: static const std::string Type; @@ -43,7 +42,11 @@ public: Mul_Op(const Mul_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Mul_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl) { + SET_IMPL_MACRO(Mul_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -56,10 +59,7 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Mul_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName(){ return {"data_input_1", "data_input_2"}; @@ -74,4 +74,4 @@ inline std::shared_ptr<Node> Mul(const std::string& name = "") { } } // namespace Aidge -#endif /* AIDGE_CORE_OPERATOR_MUL_H_ */ \ No newline at end of file +#endif /* AIDGE_CORE_OPERATOR_MUL_H_ */ diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp index a0d2292b7860baa60fe537698784d4d250c81f42..17c8204c1fec4a54e8194bf2db1dc6e5a616fd23 100644 --- a/include/aidge/operator/Operator.hpp +++ b/include/aidge/operator/Operator.hpp @@ -81,7 +81,7 @@ public: virtual void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0; /** - * @brief Set the specified input by performing a deep copy of the given data. + * @brief Set the specified input value by performing a deep copy of the given data. * The pointer itself is not changed, thus keeping the current connections. * @param inputIdx Index of the input to set. * @param data Data to copy. @@ -90,7 +90,7 @@ public: virtual void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) = 0; virtual std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const = 0; /** - * @brief Set the specified output by performing a deep copy of the given data. + * @brief Set the specified output value by performing a deep copy of the given data. * The pointer itself is not changed, thus keeping the current connections. * @param inputIdx Index of the input to set. */ @@ -110,20 +110,29 @@ public: /////////////////////////////////////////////////////// // IMPLEMENTATION /////////////////////////////////////////////////////// + std::string backend() const noexcept { + return mImpl ? mImpl->backend() : ""; + } virtual void setBackend(const std::string& name, DeviceIdx_t device = 0) = 0; virtual void setDataType(const DataType& dataType) const = 0; /** - * @brief Set the a new OperatorImpl to the Operator + * @brief Set a new OperatorImpl to the Operator * */ inline void setImpl(std::shared_ptr<OperatorImpl> impl) { mImpl = impl; } /** - * @brief Minimum amount of data from a specific input required by the - * implementation to be run. + * @brief Get the OperatorImpl of the Operator * + */ + inline std::shared_ptr<OperatorImpl> getImpl() const noexcept { + return mImpl; + } + + /** + * @brief Minimum amount of data from a specific input for one computation pass. * @param inputIdx Index of the input analysed. * @return NbElts_t */ diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp index 504a416488651d43126a60981cd8afe0f95821f2..adf45c2d8311112fa145097ee98f46d120bd41ff 100644 --- a/include/aidge/operator/OperatorTensor.hpp +++ b/include/aidge/operator/OperatorTensor.hpp @@ -17,12 +17,12 @@ #include <vector> #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/operator/Operator.hpp" #include "aidge/utils/Types.h" namespace Aidge { +class Tensor; class OperatorTensor : public Operator { /* TODO: Add an attribute specifying the type of Data used by the Operator. * The same way ``Type`` attribute specifies the type of Operator. Hence this @@ -41,26 +41,9 @@ public: OperatorTensor() = delete; OperatorTensor(const std::string& type, const IOIndex_t nbData, const IOIndex_t nbParam, - const IOIndex_t nbOut) - : Operator(type, nbData, nbParam, nbOut, OperatorType::Tensor), - mInputs(std::vector<std::shared_ptr<Tensor>>(nbData + nbParam, nullptr)), - mOutputs(std::vector<std::shared_ptr<Tensor>>(nbOut)) { - for (std::size_t i = 0; i < static_cast<std::size_t>(nbOut); ++i) { - mOutputs[i] = std::make_shared<Tensor>(); - mOutputs[i]->setDataType(DataType::Float32); - } - } + const IOIndex_t nbOut); - OperatorTensor(const OperatorTensor& other) - : Operator(other), - mInputs(std::vector<std::shared_ptr<Tensor>>(other.nbInputs(), nullptr)), - mOutputs(std::vector<std::shared_ptr<Tensor>>(other.nbOutputs())) { - for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) { - mOutputs[i] = std::make_shared<Tensor>(); - // mOutputs[i] = std::make_shared<Tensor>(*(other.getOutput(i))); - // datatype already copied - } - } + OperatorTensor(const OperatorTensor& other); ~OperatorTensor(); @@ -76,17 +59,13 @@ public: void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final; void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override final; const std::shared_ptr<Tensor>& getInput(const IOIndex_t inputIdx) const; - inline std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final { - return std::static_pointer_cast<Data>(getInput(inputIdx)); - } + std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final; // output management void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) override; void setOutput(const IOIndex_t outputIdx, std::shared_ptr<Data>&& data) override; virtual const std::shared_ptr<Tensor>& getOutput(const IOIndex_t outputIdx) const; - inline std::shared_ptr<Aidge::Data> getRawOutput(const Aidge::IOIndex_t outputIdx) const override final { - return std::static_pointer_cast<Data>(getOutput(outputIdx)); - } + std::shared_ptr<Aidge::Data> getRawOutput(const Aidge::IOIndex_t outputIdx) const override final; /////////////////////////////////////////////////// /////////////////////////////////////////////////// diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp index bb961295bfaad2999af01460c49833085ff50a92..dce2a6e9e5ea9e0c5fe9a841c587c1f7bbe36fc7 100644 --- a/include/aidge/operator/Pad.hpp +++ b/include/aidge/operator/Pad.hpp @@ -31,7 +31,7 @@ enum class PadBorderType { Constant, Edge, Reflect, Wrap }; template <DimIdx_t DIM> class Pad_Op : public OperatorTensor, - public Registrable<Pad_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Pad_Op<DIM> &)>, + public Registrable<Pad_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Pad_Op<DIM> &)>, public StaticAttributes<PadAttr, std::array<DimSize_t, 2*DIM>, PadBorderType, @@ -98,7 +98,7 @@ public: } void setBackend(const std::string &name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Pad_Op<DIM>>::create(name)(*this); + SET_IMPL_MACRO(Pad_Op<DIM>, *this, name); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp index cb4ba871a55b9dfd1c835c05949c3c18966b7f5a..9109ccaeb8bc648fe74510216fad93299740b9bf 100644 --- a/include/aidge/operator/Pop.hpp +++ b/include/aidge/operator/Pop.hpp @@ -12,17 +12,16 @@ #ifndef AIDGE_CORE_OPERATOR_POP_H_ #define AIDGE_CORE_OPERATOR_POP_H_ -#include <cassert> #include <memory> +#include <string> #include <vector> -#include "aidge/utils/Registrar.hpp" -#include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" -#include "aidge/utils/Types.h" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Types.h" namespace Aidge { enum class PopAttr { ForwardStep }; @@ -40,9 +39,7 @@ public: Pop_Op() : OperatorTensor(Type, 1, 0, 1), Attributes_(attr<PopAttr::ForwardStep>(0)) - { - - } + {} /** * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). @@ -52,7 +49,11 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<Pop_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(Pop_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -63,10 +64,7 @@ public: return std::make_shared<Pop_Op>(*this); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Pop_Op>::create({name})(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; void computeOutputDims() override final; void updateConsummerProducer() override; diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp index ba8d3d05877f9aa543518fff1d88f4e8a436b712..f2becdc60ceb44c19e341496f71e09f061cea55f 100644 --- a/include/aidge/operator/Pow.hpp +++ b/include/aidge/operator/Pow.hpp @@ -12,22 +12,20 @@ #ifndef AIDGE_CORE_OPERATOR_POW_H_ #define AIDGE_CORE_OPERATOR_POW_H_ -#include <cassert> #include <memory> +#include <string> #include <vector> #include "aidge/utils/Registrar.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" -#include "aidge/data/Data.hpp" #include "aidge/graph/Node.hpp" #include "aidge/utils/Types.h" namespace Aidge { class Pow_Op : public OperatorTensor, - public Registrable<Pow_Op, std::string, std::unique_ptr<OperatorImpl>(const Pow_Op&)> { + public Registrable<Pow_Op, std::string, std::shared_ptr<OperatorImpl>(const Pow_Op&)> { public: static const std::string Type; @@ -40,7 +38,11 @@ public: Pow_Op(const Pow_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Pow_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(Pow_Op, *this, op.backend()); + }else{ + mImpl = nullptr; + } } /** @@ -54,15 +56,12 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Pow_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; - static const std::vector<std::string> getInputsName(){ + static const std::vector<std::string> getInputsName() { return {"data_input_1", "data_input_2"}; } - static const std::vector<std::string> getOutputsName(){ + static const std::vector<std::string> getOutputsName() { return {"data_output"}; } }; @@ -72,4 +71,4 @@ inline std::shared_ptr<Node> Pow(const std::string& name = "") { } } // namespace Aidge -#endif /* AIDGE_CORE_OPERATOR_POW_H_ */ \ No newline at end of file +#endif /* AIDGE_CORE_OPERATOR_POW_H_ */ diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp index 0731498dd3e06541ed82a86a98c2ae0bb355f413..66c66d90b4ed465d31ed20dd41245fed7a71d58e 100644 --- a/include/aidge/operator/Producer.hpp +++ b/include/aidge/operator/Producer.hpp @@ -12,7 +12,9 @@ #ifndef AIDGE_CORE_OPERATOR_PRODUCER_H_ #define AIDGE_CORE_OPERATOR_PRODUCER_H_ +#include <cstddef> #include <array> +#include <memory> #include <vector> #include "aidge/utils/Types.h" @@ -28,7 +30,7 @@ enum class ProdAttr { Constant }; class Producer_Op : public OperatorTensor, - public Registrable<Producer_Op, std::string, std::unique_ptr<OperatorImpl>( + public Registrable<Producer_Op, std::string, std::shared_ptr<OperatorImpl>( const Producer_Op &)>, public StaticAttributes<ProdAttr, bool> { public: @@ -42,39 +44,40 @@ public: Producer_Op(const std::array<DimSize_t, DIM>& dims, bool constant = false) : OperatorTensor(Type, 0, 0, 1), - Attributes_(attr<ProdAttr::Constant>(constant)) + Attributes_(attr<ProdAttr::Constant>(constant)) { mOutputs[0]->resize(dims); - mImpl = std::make_shared<OperatorImpl>(*this); + // mImpl = std::make_shared<OperatorImpl>(*this, ""); + mImpl = nullptr; } - Producer_Op(const std::shared_ptr<Tensor> tensor, bool constant = false) - : OperatorTensor(Type, 0, 0, 1), - Attributes_(attr<ProdAttr::Constant>(constant)) - { - mOutputs[0] = tensor; // copy the pointer of the Tensor - mImpl = std::make_shared<OperatorImpl>(*this); - } + /** + * @brief Construct a new Producer_Op object from a Tensor. + * + * @param tensor Tensor to set in the Prducer. + * @param constant Whether the Producer should be considered constant. + */ + Producer_Op(const std::shared_ptr<Tensor> tensor, bool constant = false); /** - * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). + * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), + * but not its input tensors (the new operator has no input associated). * @param op OperatorTensor to copy. */ - Producer_Op(const Producer_Op& op) - : OperatorTensor(op), - Attributes_(op) - { - for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) { - mOutputs[i] = std::make_shared<Tensor>(*(op.getOutput(i))); - } - mImpl = (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})) - ? Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) - : std::make_shared<OperatorImpl>(*this); - } + Producer_Op(const Producer_Op& op); +public: + /** + * @brief Conversion operator from Producer to Tensor. + * + * @return std::shared_ptr<Tensor> + */ + operator std::shared_ptr<Tensor>() const { return mOutputs[0]; } + +public: /** * @brief Clone the operator using its copy-constructor. - * @see Operator::Producer_Op + * @see Operator::Producer_Op(const Producer_Op&) */ std::shared_ptr<Operator> clone() const override { return std::make_shared<Producer_Op>(*this); @@ -84,19 +87,14 @@ public: AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer operator takes no input."); } - void computeOutputDims() override final {} + void computeOutputDims() noexcept override final {} - bool outputDimsForwarded() const override final {return true;} + inline bool outputDimsForwarded() const noexcept override final { return true; } inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - if (Registrar<Producer_Op>::exists({name})) { - mImpl = Registrar<Producer_Op>::create({name})(*this); - } - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName(){ return {}; @@ -105,7 +103,6 @@ public: return {"data_output"}; } -public: void forward() override final { fmt::print("Basic Producer forward() function.\n"); } diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp index 0bb7cdffe421b973ae7c86b4569e7464b3cf6da4..963de31c49f48784e92434b2b563d6c008e2d4fd 100644 --- a/include/aidge/operator/ReLU.hpp +++ b/include/aidge/operator/ReLU.hpp @@ -16,17 +16,17 @@ #include <memory> #include <vector> -#include "aidge/utils/Registrar.hpp" -#include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" namespace Aidge { class ReLU_Op : public OperatorTensor, - public Registrable<ReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const ReLU_Op&)> { + public Registrable<ReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const ReLU_Op&)> { public: static const std::string Type; @@ -39,7 +39,11 @@ public: ReLU_Op(const ReLU_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<ReLU_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(ReLU_Op, *this, op.backend()); + }else{ + mImpl = nullptr; + } } /** @@ -51,10 +55,7 @@ public: } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<ReLU_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName(){ return {"data_input"}; @@ -69,4 +70,4 @@ inline std::shared_ptr<Node> ReLU(const std::string& name = "") { } } -#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */ \ No newline at end of file +#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */ diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp index 5f07cddfa667e7e494defe38a5667332744c3e20..ab27e4e0233052f7cc155ed0375175a27d3edcf5 100644 --- a/include/aidge/operator/ReduceMean.hpp +++ b/include/aidge/operator/ReduceMean.hpp @@ -12,17 +12,15 @@ #ifndef AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ #define AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ -#include <algorithm> // std::for_each -#include <array> -#include <cmath> #include <cstdint> // std::int32_t -#include <numeric> +#include <memory> +#include <string> #include <vector> -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" +#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" @@ -30,21 +28,20 @@ namespace Aidge { enum class ReduceMeanAttr { Axes, KeepDims }; -template <DimIdx_t DIM> class ReduceMean_Op : public OperatorTensor, - public Registrable<ReduceMean_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ReduceMean_Op<DIM> &)>, - public StaticAttributes<ReduceMeanAttr, std::array<std::int32_t, DIM>, DimSize_t> { + public Registrable<ReduceMean_Op, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)>, + public StaticAttributes<ReduceMeanAttr, std::vector<std::int32_t>, DimSize_t> { public: static const std::string Type; ReduceMean_Op() = delete; - using Attributes_ = StaticAttributes<ReduceMeanAttr, std::array<std::int32_t, DIM>, DimSize_t>; + using Attributes_ = StaticAttributes<ReduceMeanAttr, std::vector<std::int32_t>, DimSize_t>; template <ReduceMeanAttr e> using attr = typename Attributes_::template attr<e>; - constexpr ReduceMean_Op(const std::array<std::int32_t, DIM> &axes, DimSize_t keep_dims) + ReduceMean_Op(const std::vector<std::int32_t>& axes, DimSize_t keep_dims) : OperatorTensor(Type, 1, 0, 1), Attributes_(attr<ReduceMeanAttr::Axes>(axes), attr<ReduceMeanAttr::KeepDims>(keep_dims)) {} @@ -53,11 +50,15 @@ class ReduceMean_Op : public OperatorTensor, * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @param op Operator to copy. */ - ReduceMean_Op(const ReduceMean_Op<DIM>& op) + ReduceMean_Op(const ReduceMean_Op& op) : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<ReduceMean_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(ReduceMean_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -65,74 +66,51 @@ class ReduceMean_Op : public OperatorTensor, * @see Operator::ReduceMean_Op */ std::shared_ptr<Operator> clone() const override { - return std::make_shared<ReduceMean_Op<DIM>>(*this); + return std::make_shared<ReduceMean_Op>(*this); } - void computeOutputDims() override final { - if (!getInput(0)) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor"); - } - if (!getInput(0)->empty()) { - // make Axes attribute positive - std::array<std::int32_t, DIM>& axes = this->template getAttr<ReduceMeanAttr::Axes>(); - std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) { - if (val < 0) - val+=static_cast<std::int32_t>(getInput(0)->nbDims()); - }); - std::sort(axes.begin(), axes.end()); - - // build output dimensions - std::vector<DimSize_t> outDims = getInput(0)->dims(); - if (this->template getAttr<ReduceMeanAttr::KeepDims>()) { - std::for_each(axes.begin(), axes.end(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; }); - } - else { - for (auto it = axes.crbegin(); it != axes.crend(); ++it) - outDims.erase(outDims.begin() + static_cast<std::size_t>(*it)); - } - - if(outDims.size()>0) - mOutputs[0]->resize(outDims); - else - mOutputs[0]->resize({1}); - } - } + void computeOutputDims() override final; - void setBackend(const std::string &name, DeviceIdx_t device = 0) override { - mImpl = Registrar<ReduceMean_Op<DIM>>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string &name, DeviceIdx_t device = 0) override final; - static const std::vector<std::string> getInputsName(){ + static const std::vector<std::string> getInputsName() { return {"data_input"}; } - static const std::vector<std::string> getOutputsName(){ + static const std::vector<std::string> getOutputsName() { return {"data_output"}; } }; -template <std::array<DimSize_t, 1>::size_type DIM> -inline std::shared_ptr<Node> ReduceMean(const std::array<std::int32_t, DIM> &axes, +/** + * @brief Compute the mean value of a Tensor over the provided axes. Dimensions + * may be reduced by erasing the provided axes or not. + * + * @param axes Dimensions over which data mean should be computed. + * @param keep_dims Whether or not reduced dimensions are to be erased. + * @param name Name of the Operator. + * @return std::shared_ptr<Node> Node containing the Operator. + */ +inline std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes, DimSize_t keep_dims=1, const std::string& name = "") { // FIXME: properly handle default w&b initialization in every cases - static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ReduceMean, not supported"); - return std::make_shared<Node>(std::make_shared<ReduceMean_Op<static_cast<DimIdx_t>(DIM)>>(axes, keep_dims), name); + AIDGE_ASSERT(axes.size()<=MaxDim, "Too many kernel dimensions required by ReduceMean, not supported"); + return std::make_shared<Node>(std::make_shared<ReduceMean_Op>(axes, keep_dims), name); } // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction -template <DimSize_t DIM> -inline std::shared_ptr<Node> ReduceMean( - std::int32_t const (&axes)[DIM], - DimSize_t keep_dims = 1, - const std::string& name = "") { - static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ReduceMean, not supported"); - return ReduceMean(to_array(axes), keep_dims, name); -} - -template <DimIdx_t DIM> -const std::string ReduceMean_Op<DIM>::Type = "ReduceMean"; +// template <DimSize_t DIM> +// inline std::shared_ptr<Node> ReduceMean( +// std::int32_t const (&axes)[DIM], +// DimSize_t keep_dims = 1, +// const std::string& name = "") { +// static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ReduceMean, not supported"); +// return ReduceMean(to_array(axes), keep_dims, name); +// } + +// template <DimIdx_t DIM> +// const std::string ReduceMean_Op::Type = "ReduceMean"; } // namespace Aidge diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp index 32d71d5adc3cfd92c9840dcb5bc61bfb6399c6db..060029bb87ea142728056b3817b8162d566cb458 100644 --- a/include/aidge/operator/Reshape.hpp +++ b/include/aidge/operator/Reshape.hpp @@ -12,7 +12,6 @@ #ifndef AIDGE_CORE_OPERATOR_RESHAPE_H_ #define AIDGE_CORE_OPERATOR_RESHAPE_H_ -#include <cassert> #include <memory> #include <vector> @@ -28,7 +27,7 @@ namespace Aidge { enum class ReshapeAttr { Shape }; class Reshape_Op : public OperatorTensor, - public Registrable<Reshape_Op, std::string, std::unique_ptr<OperatorImpl>(const Reshape_Op&)>, + public Registrable<Reshape_Op, std::string, std::shared_ptr<OperatorImpl>(const Reshape_Op&)>, public StaticAttributes<ReshapeAttr, std::vector<std::int64_t>> { public: @@ -53,7 +52,11 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<Reshape_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(Reshape_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -66,10 +69,7 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Reshape_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp index 54f1d98d2f61d18dd821c9f0a6b574bb52b0c9f0..8f54ab217631ac69a4e16555f8e58f550ab0156c 100644 --- a/include/aidge/operator/Scaling.hpp +++ b/include/aidge/operator/Scaling.hpp @@ -9,18 +9,17 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_OPERATOR_Scaling_H__ -#define __AIDGE_CORE_OPERATOR_Scaling_H__ +#ifndef AIDGE_CORE_OPERATOR_SCALING_H_ +#define AIDGE_CORE_OPERATOR_SCALING_H_ #include <vector> #include <memory> -#include "aidge/utils/StaticAttributes.hpp" -#include "aidge/utils/Registrar.hpp" -#include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -55,7 +54,11 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<Scaling_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(Scaling_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -66,10 +69,7 @@ public: return std::make_shared<Scaling_Op>(*this); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Scaling_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName() { return {"data_input"}; @@ -95,4 +95,4 @@ const char* const EnumStrings<Aidge::ScalingAttr>::data[] = {"scalingFactor", "quantizedNbBits", "isOutputUnsigned"}; } -#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */ \ No newline at end of file +#endif /* AIDGE_CORE_OPERATOR_SCALING_H_ */ diff --git a/include/aidge/operator/Sigmoid.hpp b/include/aidge/operator/Sigmoid.hpp index ab97bf3211edb53d65a90d16dba5d0c66dfa33da..bea9fc45eaa7f17f71963106b5bd3e1340a48a92 100644 --- a/include/aidge/operator/Sigmoid.hpp +++ b/include/aidge/operator/Sigmoid.hpp @@ -39,7 +39,11 @@ public: Sigmoid_Op(const Sigmoid_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Sigmoid_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(Sigmoid_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -51,10 +55,7 @@ public: } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Sigmoid_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp index 4a073bc525640846c28d718d09741a67d499830e..f68aa17f480038d8ff7850577c438cfdc6704d59 100644 --- a/include/aidge/operator/Slice.hpp +++ b/include/aidge/operator/Slice.hpp @@ -28,7 +28,7 @@ enum class SliceAttr { Starts, Ends, Axes }; class Slice_Op : public OperatorTensor, - public Registrable<Slice_Op, std::string, std::unique_ptr<OperatorImpl>(const Slice_Op &)>, + public Registrable<Slice_Op, std::string, std::shared_ptr<OperatorImpl>(const Slice_Op &)>, public StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int64_t>> { public: static const std::string Type; @@ -55,8 +55,11 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<Slice_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) - : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(Slice_Op, *this, op.backend()); + }else{ + mImpl = nullptr; + } } public: @@ -69,7 +72,7 @@ public: void computeOutputDims() override final; void setBackend(const std::string &name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Slice_Op>::create(name)(*this); + SET_IMPL_MACRO(Slice_Op, *this, name); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp index ed6689dc97ef17276df260cd90649f2a75b10007..d48dbc2b60e46eb5c074b8adae065383e29b1769 100644 --- a/include/aidge/operator/Softmax.hpp +++ b/include/aidge/operator/Softmax.hpp @@ -12,14 +12,10 @@ #ifndef AIDGE_CORE_OPERATOR_SOFTMAX_H_ #define AIDGE_CORE_OPERATOR_SOFTMAX_H_ -#include <cassert> #include <memory> #include <vector> - #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" -#include "aidge/data/Data.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" @@ -33,7 +29,7 @@ enum class SoftmaxAttr { AxisIdx }; class Softmax_Op : public OperatorTensor, public Registrable<Softmax_Op, std::string, - std::unique_ptr<OperatorImpl>(const Softmax_Op&)>, + std::shared_ptr<OperatorImpl>(const Softmax_Op&)>, public StaticAttributes<SoftmaxAttr, int> { public: @@ -55,7 +51,11 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<Softmax_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(Softmax_Op, *this, op.backend()); + }else{ + mImpl = nullptr; + } } /** @@ -66,10 +66,7 @@ public: return std::make_shared<Softmax_Op>(*this); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Softmax_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp index 32adfdb93db1e9da857f4147efdcfe64bbb34475..f5ffa431192d73a703c1ce973cb485dadb31420d 100644 --- a/include/aidge/operator/Sqrt.hpp +++ b/include/aidge/operator/Sqrt.hpp @@ -12,22 +12,19 @@ #ifndef AIDGE_CORE_OPERATOR_SQRT_H_ #define AIDGE_CORE_OPERATOR_SQRT_H_ -#include <cassert> #include <memory> #include <vector> -#include "aidge/utils/Registrar.hpp" -#include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" -#include "aidge/data/Data.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" namespace Aidge { class Sqrt_Op : public OperatorTensor, - public Registrable<Sqrt_Op, std::string, std::unique_ptr<OperatorImpl>(const Sqrt_Op&)> { + public Registrable<Sqrt_Op, std::string, std::shared_ptr<OperatorImpl>(const Sqrt_Op&)> { public: // FIXME: change accessibility std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>(); @@ -45,7 +42,11 @@ public: Sqrt_Op(const Sqrt_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Sqrt_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(Sqrt_Op, *this, op.backend()); + }else{ + mImpl = nullptr; + } } /** @@ -56,10 +57,7 @@ public: return std::make_shared<Sqrt_Op>(*this); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Sqrt_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp index 7d346457ead71724ba05da70b5bdf7ad145cbe0c..fbcebcc9f62c23e9c60b5dff6f0d41c10d8b8717 100644 --- a/include/aidge/operator/Sub.hpp +++ b/include/aidge/operator/Sub.hpp @@ -12,22 +12,19 @@ #ifndef AIDGE_CORE_OPERATOR_SUB_H_ #define AIDGE_CORE_OPERATOR_SUB_H_ -#include <cassert> #include <memory> #include <vector> -#include "aidge/utils/Registrar.hpp" -#include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" -#include "aidge/data/Data.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" namespace Aidge { class Sub_Op : public OperatorTensor, - public Registrable<Sub_Op, std::string, std::unique_ptr<OperatorImpl>(const Sub_Op&)> { + public Registrable<Sub_Op, std::string, std::shared_ptr<OperatorImpl>(const Sub_Op&)> { public: // FIXME: change accessibility std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()}; @@ -45,7 +42,11 @@ public: Sub_Op(const Sub_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Sub_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(Sub_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -59,10 +60,7 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Sub_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName(){ return {"data_input_1", "data_input_2"}; @@ -77,4 +75,4 @@ inline std::shared_ptr<Node> Sub(const std::string& name = "") { } } // namespace Aidge -#endif /* AIDGE_CORE_OPERATOR_SUB_H_ */ \ No newline at end of file +#endif /* AIDGE_CORE_OPERATOR_SUB_H_ */ diff --git a/include/aidge/operator/Tanh.hpp b/include/aidge/operator/Tanh.hpp index ce0dc12a06d242d215c07dc6593bb7e2cb2c3c8a..3fd5377d30cfff864743dcab2da9e690e26e5263 100644 --- a/include/aidge/operator/Tanh.hpp +++ b/include/aidge/operator/Tanh.hpp @@ -12,15 +12,13 @@ #ifndef AIDGE_CORE_OPERATOR_TANH_H_ #define AIDGE_CORE_OPERATOR_TANH_H_ -#include <cassert> #include <memory> #include <vector> -#include "aidge/utils/Registrar.hpp" -#include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -39,7 +37,11 @@ public: Tanh_Op(const Tanh_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Tanh_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(Tanh_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -51,10 +53,7 @@ public: } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Tanh_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp index 2262bec14bd2f00cda643ade0709f7f9d509fa22..1beb5781b9262669cd2acb6ce4ef3aae85843573 100644 --- a/include/aidge/operator/Transpose.hpp +++ b/include/aidge/operator/Transpose.hpp @@ -30,7 +30,7 @@ enum class TransposeAttr { OutputDimsOrder }; template <DimIdx_t DIM> class Transpose_Op : public OperatorTensor, - public Registrable<Transpose_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Transpose_Op<DIM> &)>, + public Registrable<Transpose_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op<DIM> &)>, public StaticAttributes<TransposeAttr, std::array<DimSize_t, DIM>> { @@ -56,7 +56,11 @@ class Transpose_Op : public OperatorTensor, : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<Transpose_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(Transpose_Op<DIM>, *this, op.backend()); + }else{ + mImpl = nullptr; + } } /** @@ -80,7 +84,7 @@ class Transpose_Op : public OperatorTensor, } void setBackend(const std::string &name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Transpose_Op<DIM>>::create(name)(*this); + SET_IMPL_MACRO(Transpose_Op<DIM>, *this, name); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/recipes/GraphViewHelper.hpp b/include/aidge/recipes/GraphViewHelper.hpp index c6204cdffa5e580190b8cd3f1817788a12e00bc3..a2c571bf4ed164729f7c3416c814b913b4d07e6f 100644 --- a/include/aidge/recipes/GraphViewHelper.hpp +++ b/include/aidge/recipes/GraphViewHelper.hpp @@ -9,14 +9,14 @@ * ********************************************************************************/ -#ifndef AIDGE_CORE_UTILS_RECIPES_H_ -#define AIDGE_CORE_UTILS_RECIPES_H_ +#ifndef AIDGE_CORE_UTILS_GRAPHVIEWHELPER_H_ +#define AIDGE_CORE_UTILS_GRAPHVIEWHELPER_H_ #include <memory> #include <set> -#include "aidge/graph/Node.hpp" #include "aidge/graph/GraphView.hpp" +#include "aidge/data/Tensor.hpp" namespace Aidge { @@ -26,15 +26,21 @@ namespace Aidge { * @param graphview GraphView instance where Producers should be searched. * @return std::set<std::shared_ptr<Node>> */ -std::set<std::shared_ptr<Aidge::Node>> producers(std::shared_ptr<Aidge::GraphView> graphview) { - std::set<std::shared_ptr<Node>> res; - const std::set<std::shared_ptr<Node>> nodes = graphview->getNodes(); - - std::copy_if(nodes.cbegin(), - nodes.cend(), - std::inserter(res, res.begin()), - [](std::shared_ptr<Node> n){ return n->type() == "Producer"; }); - - return res; -} -} // namespace Aidge \ No newline at end of file +std::set<std::shared_ptr<Tensor>> producers(std::shared_ptr<GraphView> graphview); + + +// TODO: change for every Tensor of Operator Producer not constant +/** + * @brief Getter for every ``Tensor`` owned by an ``Operator`` inside the provided ``GraphView``. + * @note An ``Operator`` owns its output ``Tensor``s. + * + * @param graphview Pointer to the ``GraphView`` from which ``Tensor``s should be extracted. + * @return std::set<std::shared_ptr<Tensor>> Set of pointers to the ``Tensor``s. + */ +std::set<std::shared_ptr<Tensor>> parameters(std::shared_ptr<GraphView> graphview); + +void compile_gradient(std::shared_ptr<Aidge::GraphView> gv); + +} // namespace Aidge + +#endif /* AIDGE_CORE_UTILS_GRAPHVIEWHELPER_H_ */ diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp index c737680bf3d9227161eed250c2cb52a443c37ab3..b25ebd3c8de3830174c11d93d6eb60c8703c6a0d 100644 --- a/include/aidge/scheduler/Scheduler.hpp +++ b/include/aidge/scheduler/Scheduler.hpp @@ -69,7 +69,7 @@ public: /** * @brief Place the data tensors inside in the data input tensor of the graphView. In case of multiple data input tensors, they are mapped to producers in the order given by the graph. - * + * * @param data data input tensors */ void connectInputs(std::vector<std::shared_ptr<Aidge::Tensor>> data); @@ -79,6 +79,11 @@ public: */ void forward(bool forwardDims = true, bool verbose = false, std::vector<std::shared_ptr<Aidge::Tensor>> data = {}); + /** + * @brief Run the provided Computational Graph with a batch of data + */ + void backward(std::vector<std::shared_ptr<Aidge::Tensor>> data, bool instantiateGrad = true, bool verbose = false); + /** * @brief Save in a Markdown file the order of layers execution. * @param fileName Name of the generated file. diff --git a/include/aidge/utils/Directories.hpp b/include/aidge/utils/Directories.hpp new file mode 100644 index 0000000000000000000000000000000000000000..3bc07b9dd58e472096102c1b0c66971164d632a3 --- /dev/null +++ b/include/aidge/utils/Directories.hpp @@ -0,0 +1,83 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + + +#ifndef AIDGE_DIRECTORIES_H_ +#define AIDGE_DIRECTORIES_H_ + + +#include <string> // std::string +#include <sstream> // std::stringstream +#include <iostream> +#include <sys/stat.h> +#include <errno.h> + +#ifdef WIN32 +#include <direct.h> +#else +#include <sys/types.h> +#include <unistd.h> +#endif + +namespace Aidge { + + bool isNotValidFilePath(int c) { + return (iscntrl(c) + || c == '<' + || c == '>' + || c == ':' + || c == '"' + || c == '|' + || c == '?' + || c == '*'); + } + + std::string filePath(const std::string& str) { + std::string filePath(str); + std::replace_if(filePath.begin(), filePath.end(), + isNotValidFilePath, '_'); + return filePath; + } + + + bool createDirectories(const std::string& dirName) + { + std::stringstream path(dirName); + std::string dir; + std::string pathToDir(""); + int status = 0; + + while (std::getline(path, dir, '/') && status == 0) { + pathToDir += dir + '/'; + struct stat fileStat; + if (stat(pathToDir.c_str(), &fileStat) != 0) { + // Directory does not exist + #ifdef WIN32 + status = _mkdir(pathToDir.c_str()); + #else + #if defined(S_IRWXU) + status = mkdir(pathToDir.c_str(), S_IRWXU | S_IRWXG | S_IRWXO); + #else + status = mkdir(pathToDir.c_str()); + #endif + #endif + } else if (!S_ISDIR(fileStat.st_mode)) { + status = -1; + } + } + return (status == 0 || errno == EEXIST); + } + + +} + +#endif //AIDGE_DIRECTORIES_H_ + diff --git a/include/aidge/utils/ErrorHandling.hpp b/include/aidge/utils/ErrorHandling.hpp index 653a774b92e26513c9ac555e0aec1daed793e208..d4235d2db9b06597df80966e67306d84ac814a3c 100644 --- a/include/aidge/utils/ErrorHandling.hpp +++ b/include/aidge/utils/ErrorHandling.hpp @@ -18,13 +18,15 @@ #include <fmt/format.h> #include <fmt/ranges.h> +#include "aidge/utils/Log.hpp" + #ifdef NO_EXCEPTION #define AIDGE_THROW_OR_ABORT(ex, ...) \ -do { fmt::print(__VA_ARGS__); std::abort(); } while (false) +do { Aidge::Log::fatal(__VA_ARGS__); std::abort(); } while (false) #else #include <stdexcept> #define AIDGE_THROW_OR_ABORT(ex, ...) \ -throw ex(fmt::format(__VA_ARGS__)) +do { Aidge::Log::fatal(__VA_ARGS__); throw ex(fmt::format(__VA_ARGS__)); } while (false) #endif /** @@ -33,7 +35,7 @@ throw ex(fmt::format(__VA_ARGS__)) * If it asserts, it means an user error. */ #define AIDGE_ASSERT(stm, ...) \ -if (!(stm)) { fmt::print("Assertion failed: " #stm " in {}:{}", __FILE__, __LINE__); \ +if (!(stm)) { Aidge::Log::error("Assertion failed: " #stm " in {}:{}", __FILE__, __LINE__); \ AIDGE_THROW_OR_ABORT(std::runtime_error, __VA_ARGS__); } /** diff --git a/include/aidge/utils/Log.hpp b/include/aidge/utils/Log.hpp new file mode 100644 index 0000000000000000000000000000000000000000..8a18bbab34d3c1c86252833852abc5faca41dd96 --- /dev/null +++ b/include/aidge/utils/Log.hpp @@ -0,0 +1,148 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + + +#ifndef AIDGE_LOG_H_ +#define AIDGE_LOG_H_ + +#include <memory> + +#include <fmt/format.h> +#include <fmt/ranges.h> + +namespace Aidge { +/** + * Aidge logging class, for displaying and file logging of events. +*/ +class Log { +public: + enum Level { + Debug = 0, + Info, + Notice, + Warn, + Error, + Fatal + }; + + /** + * Detailed messages for debugging purposes, providing information helpful + * for developers to trace and identify issues. + * Detailed insights of what is appening in an operation, not useful for the + * end-user. The operation is performed nominally. + * @note This level is disabled at compile time for Release, therefore + * inducing no runtime overhead for Release. + */ + template <typename... Args> + constexpr static void debug(Args&&... args) { +#ifndef NDEBUG + // only when compiled in Debug + log(Debug, fmt::format(std::forward<Args>(args)...)); +#endif + } + + /** + * Messages that provide a record of the normal operation, about + * the application's state, progress, or important events. + * Reports normal start, end and key steps in an operation. The operation is + * performed nominally. + */ + template <typename... Args> + constexpr static void info(Args&&... args) { + log(Info, fmt::format(std::forward<Args>(args)...)); + } + + /** + * Applies to normal but significant conditions that may require monitoring, + * like unusual or normal fallback events. + * Reports specific paths in an operation. The operation can still be + * performed normally. + */ + template <typename... Args> + constexpr static void notice(Args&&... args) { + log(Notice, fmt::format(std::forward<Args>(args)...)); + } + + /** + * Indicates potential issues or situations that may lead to errors but do + * not necessarily cause immediate problems. + * Some specific steps of the operation could not be performed, but it can + * still provide an exploitable result. + */ + template <typename... Args> + constexpr static void warn(Args&&... args) { + log(Warn, fmt::format(std::forward<Args>(args)...)); + } + + /** + * Signifies a problem or unexpected condition that the application can + * recover from, but attention is needed to prevent further issues. + * The operation could not be performed, but it does not prevent potential + * further operations. + */ + template <typename... Args> + constexpr static void error(Args&&... args) { + log(Error, fmt::format(std::forward<Args>(args)...)); + } + + /** + * Represents a critical error or condition that leads to the termination of + * the application, indicating a severe and unrecoverable problem. + * The operation could not be performed and any further operation is + * impossible. + */ + template <typename... Args> + constexpr static void fatal(Args&&... args) { + log(Fatal, fmt::format(std::forward<Args>(args)...)); + } + + /** + * Set the minimum log level displayed in the console. + */ + constexpr static void setConsoleLevel(Level level) { + mConsoleLevel = level; + } + + /** + * Set the minimum log level saved in the log file. + */ + constexpr static void setFileLevel(Level level) { + mFileLevel = level; + } + + /** + * Set the log file name. + * Close the current log file and open the one with the new file name. + * If empty, stop logging into a file. + */ + static void setFileName(const std::string& fileName) { + if (fileName != mFileName) { + mFileName = fileName; + mFile.release(); + + if (!fileName.empty()) { + initFile(fileName); + } + } + } + +private: + static void log(Level level, const std::string& msg); + static void initFile(const std::string& fileName); + + static Level mConsoleLevel; + static Level mFileLevel; + static std::string mFileName; + static std::unique_ptr<FILE, decltype(&std::fclose)> mFile; +}; +} + +#endif //AIDGE_LOG_H_ diff --git a/include/aidge/utils/Random.hpp b/include/aidge/utils/Random.hpp index 704609c0c778c7065a580b86fc67aea7e9d3525d..73cbd1453b3d840d6da2c58eadd5c5f47e9e9070 100644 --- a/include/aidge/utils/Random.hpp +++ b/include/aidge/utils/Random.hpp @@ -9,23 +9,53 @@ * ********************************************************************************/ - #ifndef AIDGE_RANDOM_H_ #define AIDGE_RANDOM_H_ - #include <algorithm> -#include <vector> #include <random> +#include <vector> +namespace Aidge { namespace Random { - void randShuffle(std::vector<unsigned int>& vec) { - std::random_device rd; - std::mt19937 g(rd()); - std::shuffle(vec.begin(), vec.end(), g); - } - +/** + * @brief Generator is a class created to handle only one Mersenne Twister + * pseudo-random number generator for the whole Aidge framework. + * + * All of its method are static. You can set a random seed and access the + * generator. + * By default, the random seed is set to 0 but selected randomly. + * + */ +class Generator { + public: + /** + * @brief Set a seed to the pseudo-random number generator. + * + * @return std::mt19937& + */ + static void setSeed(unsigned int seed); + static unsigned int getSeed() { return seed; }; + /** + * @brief Return a Mersenne Twister pseudo-random number generator. + * You can set the seed of this generator using ``setSeed`` method. + * + * @return std::mt19937& + */ + static std::mt19937& get() { return generator; }; + + private: + // Mersenne Twister pseudo-random number generator + static std::mt19937 generator; + static unsigned int seed; +}; + +inline void randShuffle(std::vector<unsigned int>& vec) { + std::shuffle(vec.begin(), vec.end(), Aidge::Random::Generator::get()); } -#endif //AIDGE_RANDOM_H_ \ No newline at end of file +} // namespace Random +} // namespace Aidge + +#endif // AIDGE_RANDOM_H_ diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp index 4d604d520d3d8af532e196c7785896ddc1c242d0..e116fa91cac4d3828e998c6a06825afb118ac52c 100644 --- a/include/aidge/utils/Registrar.hpp +++ b/include/aidge/utils/Registrar.hpp @@ -14,19 +14,25 @@ #ifdef PYBIND #include <pybind11/pybind11.h> +#include <pybind11/stl.h> // declare_registrable key can recquire stl +#include <pybind11/functional.h>// declare_registrable allow binding of lambda fn + #endif #include "aidge/utils/ErrorHandling.hpp" #include <functional> #include <map> -#include <cassert> +#include <vector> namespace Aidge { #ifdef PYBIND namespace py = pybind11; #endif +// Abstract class used to test if a class is Registrable. +class AbstractRegistrable {}; + template <class DerivedClass, class Key, class Func> // curiously rucurring template pattern class Registrable { public: @@ -58,29 +64,88 @@ struct Registrar { Registrar(const registrar_key& key, registrar_type func) { //fmt::print("REGISTRAR: {}\n", key); - bool newInsert; - std::tie(std::ignore, newInsert) = C::registry().insert(std::make_pair(key, func)); + // bool newInsert; + // std::tie(std::ignore, newInsert) = C::registry().insert(std::make_pair(key, func)); + C::registry().erase(key); + C::registry().insert(std::make_pair(key, func)); //assert(newInsert && "registrar already exists"); } static bool exists(const registrar_key& key) { - const auto it = C::registry().find(key); - return (it != C::registry().end()); + return (C::registry().find(key) != C::registry().cend()); } static auto create(const registrar_key& key){ const auto it = C::registry().find(key); - AIDGE_ASSERT(it != C::registry().end(), "missing or invalid registrar key: {}\nDid you include/import the corresponding module?", key); + AIDGE_ASSERT(it != C::registry().cend(), "missing or invalid registrar key: {}\nDid you include/import the corresponding module?", key); return (*it).second; } static std::vector<registrar_key> getKeys(){ std::vector<registrar_key> keys; - for(auto keyValue : C::registry()) + for(const auto& keyValue : C::registry()) keys.push_back(keyValue.first); return keys; } }; + +#ifdef PYBIND +/** + * @brief Function to define register function for a registrable class + * Defined here to have access to this function in every module who wants + * to create a new registrable class. + * + * @tparam C registrable class + * @param m pybind module + * @param class_name python name of the class + */ +template <class C> +void declare_registrable(py::module& m, const std::string& class_name){ + typedef typename C::registrar_key registrar_key; + typedef typename C::registrar_type registrar_type; + m.def(("register_"+ class_name).c_str(), [](registrar_key& key, registrar_type function){ + Registrar<C>(key, function); + }) + .def(("get_keys_"+ class_name).c_str(), [](){ + return Registrar<C>::getKeys(); + }); +} +#endif + +/* +* This macro allow to set an implementation to an operator +* This macro is mandatory for using implementation registered in python +* PyBind when calling create method will do a call to the copy ctor if +* op is not visible to the python world (if the create method return a python function) +* See this issue for more information https://github.com/pybind/pybind11/issues/4417 +* Note: using a method to do this is not possible has any call to a function will call +* the cpy ctor. This is why I used a macro +* Note: I duplicated +* (op).setImpl(Registrar<T_Op>::create(backend_name)(op)); \ +* This is because the py::cast need to be done in the same scope. +* I know this only empyrically not sure what happens under the hood... +* +* If someone wants to find an alternative to this Macro, you can contact me: +* cyril.moineau@cea.fr +*/ +#ifdef PYBIND +#define SET_IMPL_MACRO(T_Op, op, backend_name) \ + \ + if (Registrar<T_Op>::exists(backend_name)) { \ + if(Py_IsInitialized()) { \ + auto obj = py::cast(&(op)); \ + (op).setImpl(Registrar<T_Op>::create(backend_name)(op)); \ + } else { \ + (op).setImpl(Registrar<T_Op>::create(backend_name)(op)); \ + } \ + } +#else +#define SET_IMPL_MACRO(T_Op, op, backend_name) \ + if (Registrar<T_Op>::exists(backend_name)) { \ + (op).setImpl(Registrar<T_Op>::create(backend_name)(op)); \ + } +#endif + } #endif //AIDGE_CORE_UTILS_REGISTRAR_H_ diff --git a/python_binding/backend/pybind_OperatorImpl.cpp b/python_binding/backend/pybind_OperatorImpl.cpp index a2a5e6b8bb2d0f2413ef94c360b383608c5b41b5..97cf817176c733000eda8da6c6a213ccc22f1dc4 100644 --- a/python_binding/backend/pybind_OperatorImpl.cpp +++ b/python_binding/backend/pybind_OperatorImpl.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> #include <pybind11/stl.h> +#include <string> #include "aidge/operator/Operator.hpp" #include "aidge/backend/OperatorImpl.hpp" @@ -116,7 +117,7 @@ public: void init_OperatorImpl(py::module& m){ py::class_<OperatorImpl, std::shared_ptr<OperatorImpl>, pyOperatorImpl>(m, "OperatorImpl", py::dynamic_attr()) - .def(py::init<const Operator&>()) + .def(py::init<const Operator&, const std::string&>(), py::keep_alive<1, 1>(), py::keep_alive<1, 2>(), py::keep_alive<1,3>()) .def("forward", &OperatorImpl::forward) .def("backward", &OperatorImpl::backward) .def("get_nb_required_data", &OperatorImpl::getNbRequiredData) diff --git a/python_binding/data/pybind_Data.cpp b/python_binding/data/pybind_Data.cpp index df3792fd784a2ef2b9418628959629ac59c04094..bca246c94434b280a12d070526ad4ffb2c7fbe7b 100644 --- a/python_binding/data/pybind_Data.cpp +++ b/python_binding/data/pybind_Data.cpp @@ -26,12 +26,11 @@ void init_Data(py::module& m){ .value("Int64", DataType::Int64) .value("UInt8", DataType::UInt8) .value("UInt32", DataType::UInt32) - .value("UInt64", DataType::UInt64) + .value("UInt64", DataType::UInt64) ; - py::class_<Data, std::shared_ptr<Data>>(m,"Data") - .def(py::init<const std::string&>()); + py::class_<Data, std::shared_ptr<Data>>(m,"Data"); + - } } diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp index 93389edf663a6154daf0b9ef2a7cc4095abc4d0f..b97af94ad583cf42e25fa3afc0697021f6dcadcc 100644 --- a/python_binding/data/pybind_Tensor.cpp +++ b/python_binding/data/pybind_Tensor.cpp @@ -76,6 +76,7 @@ void init_Tensor(py::module& m){ .def("set_datatype", &Tensor::setDataType, py::arg("datatype"), py::arg("copyCast") = true) .def("set_backend", &Tensor::setBackend, py::arg("name"), py::arg("device") = 0, py::arg("copyFrom") = true) .def("dims", (const std::vector<DimSize_t>& (Tensor::*)()const) &Tensor::dims) + .def("grad", &Tensor::grad) .def("dtype", &Tensor::dataType) .def("size", &Tensor::size) .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize) diff --git a/python_binding/filler/pybind_Filler.cpp b/python_binding/filler/pybind_Filler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a85c0d6cd6fa0367dfc26328d214c99a4288a3be --- /dev/null +++ b/python_binding/filler/pybind_Filler.cpp @@ -0,0 +1,147 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <pybind11/pybind11.h> + +#include "aidge/data/Tensor.hpp" +#include "aidge/filler/Filler.hpp" + +namespace py = pybind11; + +namespace Aidge { + +void init_Filler(py::module &m) { + py::enum_<enum VarianceNorm>(m, "VarianceNorm") + .value("FanIn", VarianceNorm::FanIn) + .value("Average", VarianceNorm::Average) + .value("FanOut", VarianceNorm::FanOut) + .export_values(); + + m.def( + "constant_filler", + [](std::shared_ptr<Tensor> tensor, py::object value) -> void { + switch (tensor->dataType()) { + case DataType::Float64: + constantFiller<double>(tensor, value.cast<double>()); + break; + case DataType::Float32: + constantFiller<float>(tensor, value.cast<float>()); + break; + default: + AIDGE_THROW_OR_ABORT( + py::value_error, + "Data type is not supported for Constant filler."); + } + }, + py::arg("tensor"), py::arg("value")) + .def( + "normal_filler", + [](std::shared_ptr<Tensor> tensor, double mean, + double stdDev) -> void { + switch (tensor->dataType()) { + case DataType::Float64: + normalFiller<double>(tensor, mean, stdDev); + break; + case DataType::Float32: + normalFiller<float>(tensor, mean, stdDev); + break; + default: + AIDGE_THROW_OR_ABORT( + py::value_error, + "Data type is not supported for Normal filler."); + } + }, + py::arg("tensor"), py::arg("mean") = 0.0, py::arg("stdDev") = 1.0) + .def( + "uniform_filler", + [](std::shared_ptr<Tensor> tensor, double min, double max) -> void { + switch (tensor->dataType()) { + case DataType::Float64: + uniformFiller<double>(tensor, min, max); + break; + case DataType::Float32: + uniformFiller<float>(tensor, min, max); + break; + default: + AIDGE_THROW_OR_ABORT( + py::value_error, + "Data type is not supported for Uniform filler."); + } + }, + py::arg("tensor"), py::arg("min"), py::arg("max")) + .def( + "xavier_uniform_filler", + [](std::shared_ptr<Tensor> tensor, py::object scaling, + VarianceNorm varianceNorm) -> void { + switch (tensor->dataType()) { + case DataType::Float64: + xavierUniformFiller<double>( + tensor, scaling.cast<double>(), varianceNorm); + break; + case DataType::Float32: + xavierUniformFiller<float>( + tensor, scaling.cast<float>(), varianceNorm); + break; + default: + AIDGE_THROW_OR_ABORT( + py::value_error, + "Data type is not supported for Uniform filler."); + } + }, + py::arg("tensor"), py::arg("scaling") = 1.0, + py::arg("varianceNorm") = VarianceNorm::FanIn) + .def( + "xavier_normal_filler", + [](std::shared_ptr<Tensor> tensor, py::object scaling, + VarianceNorm varianceNorm) -> void { + switch (tensor->dataType()) { + case DataType::Float64: + xavierNormalFiller<double>( + tensor, scaling.cast<double>(), varianceNorm); + break; + case DataType::Float32: + xavierNormalFiller<float>(tensor, scaling.cast<float>(), + varianceNorm); + break; + default: + AIDGE_THROW_OR_ABORT( + py::value_error, + "Data type is not supported for Uniform filler."); + } + }, + py::arg("tensor"), py::arg("scaling") = 1.0, + py::arg("varianceNorm") = VarianceNorm::FanIn) + .def( + "he_filler", + [](std::shared_ptr<Tensor> tensor, VarianceNorm varianceNorm, + py::object meanNorm, py::object scaling) -> void { + switch (tensor->dataType()) { + case DataType::Float64: + heFiller<double>(tensor, varianceNorm, + meanNorm.cast<double>(), + scaling.cast<double>()); + break; + case DataType::Float32: + heFiller<float>(tensor, varianceNorm, + meanNorm.cast<float>(), + scaling.cast<float>()); + break; + default: + AIDGE_THROW_OR_ABORT( + py::value_error, + "Data type is not supported for Uniform filler."); + } + }, + py::arg("tensor"), py::arg("varianceNorm") = VarianceNorm::FanIn, + py::arg("meanNorm") = 0.0, py::arg("scaling") = 1.0) + ; +} +} // namespace Aidge diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp index a41d0d92835be2b5ef07d30c4a5233da1e3906b7..f06a70f32999d942f6d060ba9b6df6360438c60d 100644 --- a/python_binding/graph/pybind_GraphView.cpp +++ b/python_binding/graph/pybind_GraphView.cpp @@ -30,7 +30,9 @@ void init_GraphView(py::module& m) { :param path: save location :type path: str )mydelimiter") - + .def("log_outputs", &GraphView::logOutputs, py::arg("path")) + .def("get_ordered_inputs", &GraphView::getOrderedInputs) + .def("get_ordered_outputs", &GraphView::getOrderedOutputs) .def("get_output_nodes", &GraphView::outputNodes, R"mydelimiter( Get set of output Nodes. diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp index 74ec11c28e746856fe767f16a4380651271d8fe4..c3eeb192a88163be96f973a55e6ef7cc60ec48af 100644 --- a/python_binding/operator/pybind_Add.cpp +++ b/python_binding/operator/pybind_Add.cpp @@ -12,6 +12,7 @@ #include <pybind11/pybind11.h> #include "aidge/operator/Add.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Types.h" @@ -23,7 +24,7 @@ void declare_Add(py::module &m) { py::class_<Add_Op, std::shared_ptr<Add_Op>, OperatorTensor>(m, "AddOp", py::multiple_inheritance()) .def("get_inputs_name", &Add_Op::getInputsName) .def("get_outputs_name", &Add_Op::getOutputsName); - + declare_registrable<Add_Op>(m, "AddOp"); m.def("Add", &Add, py::arg("nbIn"), py::arg("name") = ""); } diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp index 0ca01c07535f65ac1161603d32d191881eb28746..ab52472b4576d4ab4adf05d3fed139ae40c75919 100644 --- a/python_binding/operator/pybind_AvgPooling.cpp +++ b/python_binding/operator/pybind_AvgPooling.cpp @@ -17,6 +17,7 @@ #include <array> #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/AvgPooling.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Types.h" @@ -26,8 +27,9 @@ namespace py = pybind11; namespace Aidge { template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) { + const std::string pyClassName("AvgPoolingOp" + std::to_string(DIM) + "D"); py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Attributes, OperatorTensor>( - m, ("AvgPoolingOp" + std::to_string(DIM) + "D").c_str(), + m, pyClassName.c_str(), py::multiple_inheritance()) .def(py::init<const std::array<DimSize_t, DIM> &, const std::array<DimSize_t, DIM> &>(), @@ -36,7 +38,7 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) { .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName) .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName) .def("attributes_name", &AvgPooling_Op<DIM>::staticGetAttrsName); - + declare_registrable<AvgPooling_Op<DIM>>(m, pyClassName); m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, const std::string& name, const std::vector<DimSize_t> &stride_dims) { diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp index e11fc288fb9eb837c0a7b36c0a1c4024ab6c8633..9640141e03bcd811f5ce24c544c5cdbc9fe6b2f3 100644 --- a/python_binding/operator/pybind_BatchNorm.cpp +++ b/python_binding/operator/pybind_BatchNorm.cpp @@ -12,6 +12,7 @@ #include <pybind11/pybind11.h> #include <string> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/BatchNorm.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Types.h" @@ -21,13 +22,15 @@ namespace Aidge { template <DimSize_t DIM> void declare_BatchNormOp(py::module& m) { - py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance()) + const std::string pyClassName("BatchNormOp" + std::to_string(DIM) + "D"); + py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, pyClassName.c_str(), py::multiple_inheritance()) .def(py::init<float, float>(), - py::arg("epsilon"), - py::arg("momentum")) + py::arg("epsilon"), + py::arg("momentum")) .def("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName) .def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName) .def("attributes_name", &BatchNorm_Op<DIM>::staticGetAttrsName); + declare_registrable<BatchNorm_Op<DIM>>(m, pyClassName); m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("nbFeatures"), py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = ""); } diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp index 8cdd138b8cde2a582e9f569a17ae33811637092c..756686c209c33fe03f7bda4bbb53d8c3c71e8b4c 100644 --- a/python_binding/operator/pybind_Concat.cpp +++ b/python_binding/operator/pybind_Concat.cpp @@ -12,6 +12,7 @@ #include <pybind11/pybind11.h> #include <string> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Concat.hpp" #include "aidge/operator/OperatorTensor.hpp" @@ -24,6 +25,7 @@ void init_Concat(py::module& m) { .def("get_outputs_name", &Concat_Op::getOutputsName) .def("attributes_name", &Concat_Op::staticGetAttrsName); + declare_registrable<Concat_Op>(m, "ConcatOp"); m.def("Concat", &Concat, py::arg("nbIn"), py::arg("axis"), py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp index 346acc5d9d05c24e9538c3b8c5edf1f7e37d6ba8..adb0e108c409032c7e132016f5b92ed9f9233491 100644 --- a/python_binding/operator/pybind_Conv.cpp +++ b/python_binding/operator/pybind_Conv.cpp @@ -16,49 +16,58 @@ #include <array> #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Conv.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Types.h" +#include "aidge/utils/Registrar.hpp" // declare_registrable namespace py = pybind11; namespace Aidge { template <DimIdx_t DIM> void declare_ConvOp(py::module &m) { + const std::string pyClassName("ConvOp" + std::to_string(DIM) + "D"); py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Attributes, OperatorTensor>( - m, ("ConvOp" + std::to_string(DIM) + "D").c_str(), + m, pyClassName.c_str(), py::multiple_inheritance()) .def(py::init<DimSize_t, DimSize_t, const std::array<DimSize_t, DIM> &, const std::array<DimSize_t, DIM> &, - const std::array<DimSize_t, DIM> &>(), + const std::array<DimSize_t, DIM> &, + bool>(), py::arg("in_channels"), py::arg("out_channels"), py::arg("kernel_dims"), py::arg("stride_dims"), - py::arg("dilation_dims")) + py::arg("dilation_dims"), + py::arg("no_bias")) .def("get_inputs_name", &Conv_Op<DIM>::getInputsName) .def("get_outputs_name", &Conv_Op<DIM>::getOutputsName) .def("attributes_name", &Conv_Op<DIM>::staticGetAttrsName) ; + declare_registrable<Conv_Op<DIM>>(m, pyClassName); + m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels, DimSize_t out_channels, const std::vector<DimSize_t>& kernel_dims, const std::string& name, const std::vector<DimSize_t> &stride_dims, - const std::vector<DimSize_t> &dilation_dims) { + const std::vector<DimSize_t> &dilation_dims, + bool noBias) { AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM); AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM); AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM); - return Conv<DIM>(in_channels, out_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin())); + return Conv<DIM>(in_channels, out_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()), noBias); }, py::arg("in_channels"), py::arg("out_channels"), py::arg("kernel_dims"), py::arg("name") = "", py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), - py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1)); + py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1), + py::arg("no_bias") = false); } @@ -66,9 +75,5 @@ void init_Conv(py::module &m) { declare_ConvOp<1>(m); declare_ConvOp<2>(m); declare_ConvOp<3>(m); - - // FIXME: - // m.def("Conv1D", static_cast<NodeAPI(*)(const char*, int, int, int const - // (&)[1])>(&Conv)); } } // namespace Aidge diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp index e25024e09cdd4fe234416a9aa8f0fef91a3c27fe..19b3332a84037185afdc87fd90cb9c8fea2e64f8 100644 --- a/python_binding/operator/pybind_ConvDepthWise.cpp +++ b/python_binding/operator/pybind_ConvDepthWise.cpp @@ -17,6 +17,7 @@ #include <array> #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/ConvDepthWise.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Types.h" @@ -26,36 +27,41 @@ namespace py = pybind11; namespace Aidge { template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) { + const std::string pyClassName("ConvDepthWiseOp" + std::to_string(DIM) + "D"); py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Attributes, OperatorTensor>( - m, ("ConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(), + m, pyClassName.c_str(), py::multiple_inheritance()) .def(py::init<const DimSize_t, const std::array<DimSize_t, DIM> &, const std::array<DimSize_t, DIM> &, - const std::array<DimSize_t, DIM> &>(), + const std::array<DimSize_t, DIM> &, + bool>(), py::arg("nb_channels"), py::arg("kernel_dims"), py::arg("stride_dims"), - py::arg("dilation_dims")) + py::arg("dilation_dims"), + py::arg("no_bias")) .def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName) .def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName) .def("attributes_name", &ConvDepthWise_Op<DIM>::staticGetAttrsName); - + declare_registrable<ConvDepthWise_Op<DIM>>(m, pyClassName); m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels, const std::vector<DimSize_t>& kernel_dims, const std::string& name, const std::vector<DimSize_t> &stride_dims, - const std::vector<DimSize_t> &dilation_dims) { + const std::vector<DimSize_t> &dilation_dims, + bool no_bias) { AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM); AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM); AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM); - return ConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin())); + return ConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias); }, py::arg("nb_channenls"), py::arg("kernel_dims"), py::arg("name") = "", py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), - py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1)); + py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1), + py::arg("no_bias")= false); } diff --git a/python_binding/operator/pybind_Div.cpp b/python_binding/operator/pybind_Div.cpp index 6d14510f34349c001289096a7fc9b08681a25bc8..e9bf26b629aa05090c9601103676cbc12ff4c88d 100644 --- a/python_binding/operator/pybind_Div.cpp +++ b/python_binding/operator/pybind_Div.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Div.hpp" #include "aidge/operator/OperatorTensor.hpp" @@ -21,7 +22,7 @@ void init_Div(py::module& m) { py::class_<Div_Op, std::shared_ptr<Div_Op>, OperatorTensor>(m, "DivOp", py::multiple_inheritance()) .def("get_inputs_name", &Div_Op::getInputsName) .def("get_outputs_name", &Div_Op::getOutputsName); - + declare_registrable<Div_Op>(m, "DivOp"); m.def("Div", &Div, py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_Erf.cpp b/python_binding/operator/pybind_Erf.cpp index 806867f61c3580543c184d529edc2856ee8d7a6c..c5fd53f2a665b5b816a3778e6f874cd04956e99e 100644 --- a/python_binding/operator/pybind_Erf.cpp +++ b/python_binding/operator/pybind_Erf.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Erf.hpp" #include "aidge/operator/OperatorTensor.hpp" @@ -21,7 +22,7 @@ void init_Erf(py::module& m) { py::class_<Erf_Op, std::shared_ptr<Erf_Op>, OperatorTensor>(m, "ErfOp", py::multiple_inheritance()) .def("get_inputs_name", &Erf_Op::getInputsName) .def("get_outputs_name", &Erf_Op::getOutputsName); - + declare_registrable<Erf_Op>(m, "ErfOp"); m.def("Erf", &Erf, py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp index ad589d73d0aea94d96e62e8065b70bd517633f88..ab1ed9ce20bec01e205cd6478c6a93df9f91a2fb 100644 --- a/python_binding/operator/pybind_FC.cpp +++ b/python_binding/operator/pybind_FC.cpp @@ -11,8 +11,9 @@ #include <pybind11/pybind11.h> -#include "aidge/operator/FC.hpp" #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/FC.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Types.h" @@ -24,7 +25,7 @@ void declare_FC(py::module &m) { .def("get_inputs_name", &FC_Op::getInputsName) .def("get_outputs_name", &FC_Op::getOutputsName) .def("attributes_name", &FC_Op::staticGetAttrsName); - + declare_registrable<FC_Op>(m, "FCOp"); m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = ""); } diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp index f0d55e2f40bd89269c96564cea6b5a002b477b8b..8c32acfe2bd7e0118c186be8fa1297ee16fe6f6c 100644 --- a/python_binding/operator/pybind_Gather.cpp +++ b/python_binding/operator/pybind_Gather.cpp @@ -12,6 +12,7 @@ #include <pybind11/pybind11.h> #include <string> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Gather.hpp" #include "aidge/operator/OperatorTensor.hpp" @@ -23,7 +24,7 @@ void init_Gather(py::module& m) { .def("get_inputs_name", &Gather_Op::getInputsName) .def("get_outputs_name", &Gather_Op::getOutputsName) .def("attributes_name", &Gather_Op::staticGetAttrsName); - - m.def("Gather", &Gather, py::arg("indices"), py::arg("gathered_shape"), py::arg("axis"), py::arg("name") = ""); + declare_registrable<Gather_Op>(m, "GatherOp"); + m.def("Gather", &Gather, py::arg("indices"), py::arg("gathered_shape"), py::arg("axis")= 0, py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp index a5435a3ce67ffe0f75b8bbda19e3d552baeef5ef..31ee946fc99df40133ff04965c762f9ddae0d131 100644 --- a/python_binding/operator/pybind_GenericOperator.cpp +++ b/python_binding/operator/pybind_GenericOperator.cpp @@ -15,6 +15,7 @@ #include <stdio.h> #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/GenericOperator.hpp" #include "aidge/operator/OperatorTensor.hpp" namespace py = pybind11; diff --git a/python_binding/operator/pybind_Identity.cpp b/python_binding/operator/pybind_Identity.cpp index b1b1e8888976c578ff490f35776c890ba59911dc..4538b72fcb012a35ca0ebf3a15449a4b5cfff7a8 100644 --- a/python_binding/operator/pybind_Identity.cpp +++ b/python_binding/operator/pybind_Identity.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Identity.hpp" #include "aidge/operator/Operator.hpp" diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp index 3e9acb831eb3334bd126d3b360f3b5aa39d83731..9ad47e7a391698ae9b30d35d94f05e8b80138590 100644 --- a/python_binding/operator/pybind_LeakyReLU.cpp +++ b/python_binding/operator/pybind_LeakyReLU.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/LeakyReLU.hpp" #include "aidge/operator/OperatorTensor.hpp" @@ -22,7 +23,7 @@ void init_LeakyReLU(py::module& m) { .def("get_inputs_name", &LeakyReLU_Op::getInputsName) .def("get_outputs_name", &LeakyReLU_Op::getOutputsName) .def("attributes_name", &LeakyReLU_Op::staticGetAttrsName); - + declare_registrable<LeakyReLU_Op>(m, "LeakyReLUOp"); m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp index d0d7f28d52a9a9899b08d37a0c1a4a8720f2ae20..73bfac04a78ec9b972ec984466dbae582b2c03dc 100644 --- a/python_binding/operator/pybind_Matmul.cpp +++ b/python_binding/operator/pybind_Matmul.cpp @@ -11,8 +11,9 @@ #include <pybind11/pybind11.h> -#include "aidge/operator/MatMul.hpp" #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/MatMul.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Types.h" @@ -23,7 +24,7 @@ void init_MatMul(py::module &m) { py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor>(m, "MatMulOp", py::multiple_inheritance()) .def("get_inputs_name", &MatMul_Op::getInputsName) .def("get_outputs_name", &MatMul_Op::getOutputsName); - + declare_registrable<MatMul_Op>(m, "MatMulOp"); m.def("MatMul", &MatMul, py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp index 9c83a67e81120e2cc2674e3ceb4c8871dd6fd393..91fa0489d8bedd16dd33424e33d7e15eea3e3ecb 100644 --- a/python_binding/operator/pybind_MaxPooling.cpp +++ b/python_binding/operator/pybind_MaxPooling.cpp @@ -17,15 +17,16 @@ #include <array> #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/MaxPooling.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Types.h" -#include "aidge/data/Tensor.hpp" namespace py = pybind11; namespace Aidge { template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) { + const std::string pyClassName("MaxPoolingOp" + std::to_string(DIM) + "D"); py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Attributes, OperatorTensor>( m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance()) @@ -38,7 +39,7 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) { .def("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName) .def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName) .def("attributes_name", &MaxPooling_Op<DIM>::staticGetAttrsName); - + declare_registrable<MaxPooling_Op<DIM>>(m, pyClassName); m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, const std::string& name, const std::vector<DimSize_t> &stride_dims, diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp index 20a620cee737db5380ee7641b161cf6296ef7e5b..20cd3f156996c98bb64502a90ab98535f87cc2a3 100644 --- a/python_binding/operator/pybind_MetaOperatorDefs.cpp +++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp @@ -30,21 +30,23 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) { const std::string& name, const std::vector<DimSize_t> &stride_dims, const std::vector<DimSize_t> &padding_dims, - const std::vector<DimSize_t> &dilation_dims) + const std::vector<DimSize_t> &dilation_dims, + bool no_bias) { AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM); AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM); AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM); AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM); - return PaddedConv<DIM>(in_channels, out_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin())); + return PaddedConv<DIM>(in_channels, out_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias); }, py::arg("in_channels"), py::arg("out_channels"), py::arg("kernel_dims"), py::arg("name") = "", py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0), - py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1)); + py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1), + py::arg("no_bias")= false); } template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) { @@ -53,20 +55,22 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) { const std::string& name, const std::vector<DimSize_t> &stride_dims, const std::vector<DimSize_t> &padding_dims, - const std::vector<DimSize_t> &dilation_dims) + const std::vector<DimSize_t> &dilation_dims, + bool no_bias) { AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM); AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM); AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM); AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM); - return PaddedConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin())); + return PaddedConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias); }, py::arg("nb_channels"), py::arg("kernel_dims"), py::arg("name") = "", py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0), - py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1)); + py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1), + py::arg("no_bias") = false); } diff --git a/python_binding/operator/pybind_Mul.cpp b/python_binding/operator/pybind_Mul.cpp index 21f510d98728fbe5401288a366294241b5f10a3f..47c84c0e52f605a5466a63a5a5d0851fecedd2f8 100644 --- a/python_binding/operator/pybind_Mul.cpp +++ b/python_binding/operator/pybind_Mul.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Mul.hpp" #include "aidge/operator/OperatorTensor.hpp" @@ -21,7 +22,7 @@ void init_Mul(py::module& m) { py::class_<Mul_Op, std::shared_ptr<Mul_Op>, OperatorTensor>(m, "MulOp", py::multiple_inheritance()) .def("get_inputs_name", &Mul_Op::getInputsName) .def("get_outputs_name", &Mul_Op::getOutputsName); - + declare_registrable<Mul_Op>(m, "MulOp"); m.def("Mul", &Mul, py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp index 79a85cb92cf27c7edb745c36eefe61ae86c66786..589bad0be4ebfac10b476990e4501d6c219abbb1 100644 --- a/python_binding/operator/pybind_Operator.cpp +++ b/python_binding/operator/pybind_Operator.cpp @@ -1,3 +1,4 @@ + /******************************************************************************** * Copyright (c) 2023 CEA-List * @@ -10,10 +11,12 @@ ********************************************************************************/ #include <pybind11/pybind11.h> +#include <pybind11/stl.h> + #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Operator.hpp" #include "aidge/utils/Types.h" -#include <pybind11/stl.h> namespace py = pybind11; namespace Aidge { @@ -32,10 +35,11 @@ void init_Operator(py::module& m){ .def("set_datatype", &Operator::setDataType, py::arg("dataType")) .def("set_backend", &Operator::setBackend, py::arg("name"), py::arg("device") = 0) .def("forward", &Operator::forward) - // py::keep_alive forbide Python to garbage collect implementation will the Operator is not garbade collected ! + // py::keep_alive forbide Python to garbage collect the implementation lambda as long as the Operator is not deleted ! .def("set_impl", &Operator::setImpl, py::arg("implementation"), py::keep_alive<1, 2>()) + .def("get_impl", &Operator::getImpl) .def("get_hook", &Operator::getHook) .def("add_hook", &Operator::addHook) ; } -} \ No newline at end of file +} diff --git a/python_binding/operator/pybind_OperatorTensor.cpp b/python_binding/operator/pybind_OperatorTensor.cpp index d0a4d024384ca158c1c9b009f5267aedcb9b8470..4cd7306494730036f90dd6311bc80d821ebe8f4d 100644 --- a/python_binding/operator/pybind_OperatorTensor.cpp +++ b/python_binding/operator/pybind_OperatorTensor.cpp @@ -10,7 +10,9 @@ ********************************************************************************/ #include <pybind11/pybind11.h> + #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Operator.hpp" #include <pybind11/stl.h> diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp index 69d63fe7b8d31a6fa9747df2ce4a93ec4a0f4cac..1cd9f074fe5241be11da0ea7d0d1ed5a1c5869c2 100644 --- a/python_binding/operator/pybind_Pad.cpp +++ b/python_binding/operator/pybind_Pad.cpp @@ -9,14 +9,14 @@ * ********************************************************************************/ +#include <array> #include <pybind11/pybind11.h> #include <pybind11/stl.h> -#include <iostream> #include <string> #include <vector> -#include <array> #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Pad.hpp" #include "aidge/operator/Operator.hpp" #include "aidge/utils/Types.h" @@ -25,8 +25,9 @@ namespace py = pybind11; namespace Aidge { template <DimIdx_t DIM> void declare_PadOp(py::module &m) { + const std::string pyClassName("PadOp" + std::to_string(DIM) + "D"); py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Attributes, Operator>( - m, ("PadOp" + std::to_string(DIM) + "D").c_str(), + m, pyClassName.c_str(), py::multiple_inheritance()) .def(py::init<const std::array<DimSize_t, 2*DIM> &, const PadBorderType &, @@ -38,7 +39,7 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) { .def("get_outputs_name", &Pad_Op<DIM>::getOutputsName) .def("attributes_name", &Pad_Op<DIM>::staticGetAttrsName) ; - + declare_registrable<Pad_Op<DIM>>(m, pyClassName); m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples, const std::string& name, const PadBorderType &borderType = PadBorderType::Constant, diff --git a/python_binding/operator/pybind_Pop.cpp b/python_binding/operator/pybind_Pop.cpp index 91726fc1d4721df1be712a26721d09b1a98fd9a2..baae552270a4776d292047140e213dbe1566d35e 100644 --- a/python_binding/operator/pybind_Pop.cpp +++ b/python_binding/operator/pybind_Pop.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Pop.hpp" #include "aidge/operator/OperatorTensor.hpp" diff --git a/python_binding/operator/pybind_Pow.cpp b/python_binding/operator/pybind_Pow.cpp index 09d1e4ad2ad6413901c28bc9d9fe16995483da05..9e9ef772cadddb1c7928060b503c388b094ed9f4 100644 --- a/python_binding/operator/pybind_Pow.cpp +++ b/python_binding/operator/pybind_Pow.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Pow.hpp" #include "aidge/operator/OperatorTensor.hpp" @@ -21,6 +22,7 @@ void init_Pow(py::module& m) { py::class_<Pow_Op, std::shared_ptr<Pow_Op>, OperatorTensor>(m, "PowOp", py::multiple_inheritance()) .def("get_inputs_name", &Pow_Op::getInputsName) .def("get_outputs_name", &Pow_Op::getOutputsName); + declare_registrable<Pow_Op>(m, "PowOp"); m.def("Pow", &Pow, py::arg("name") = ""); } diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp index 3caa438d18b3919dbedcf66e4ba53b92b84a50b5..eb74515915c252d50a2522cae6d6f4c6832ab3ef 100644 --- a/python_binding/operator/pybind_Producer.cpp +++ b/python_binding/operator/pybind_Producer.cpp @@ -12,11 +12,11 @@ #include <pybind11/pybind11.h> #include <pybind11/stl.h> -#include "aidge/utils/Types.h" // #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" -#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Types.h" namespace py = pybind11; namespace Aidge { @@ -26,6 +26,7 @@ void declare_Producer(py::module &m) { // m.def(("Producer_" + std::to_string(DIM)+"D").c_str(), py::overload_cast<shared_ptr<Node>&>(&Producer<DIM>), py::arg("dims"), py::arg("name")); m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::array<DimSize_t, DIM>&, const std::string&, bool)>(&Producer), py::arg("dims"), py::arg("name") = "", py::arg("constant") = false); + } @@ -39,7 +40,7 @@ void init_Producer(py::module &m) { .def("get_outputs_name", &Producer_Op::getOutputsName) .def("attributes_name", &Producer_Op::staticGetAttrsName); m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&, bool)>(&Producer), py::arg("tensor"), py::arg("name") = "", py::arg("constant") = false); - + declare_registrable<Producer_Op>(m, "ProducerOp"); declare_Producer<1>(m); declare_Producer<2>(m); declare_Producer<3>(m); diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp index 24ae96649a87ff9acc996715d3cd00a97c393578..57601e25607a40c44c400fe75965d83050a146ed 100644 --- a/python_binding/operator/pybind_ReLU.cpp +++ b/python_binding/operator/pybind_ReLU.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/ReLU.hpp" #include "aidge/operator/OperatorTensor.hpp" @@ -21,6 +22,7 @@ void init_ReLU(py::module& m) { py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, OperatorTensor>(m, "ReLUOp", py::multiple_inheritance()) .def("get_inputs_name", &ReLU_Op::getInputsName) .def("get_outputs_name", &ReLU_Op::getOutputsName); + declare_registrable<ReLU_Op>(m, "ReLUOp"); m.def("ReLU", &ReLU, py::arg("name") = ""); } diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp index 11e979736dcab211aa11758cb3138f9d6827cc4e..599a648a3f2733acd49bbbc293cd30734e8ea2ff 100644 --- a/python_binding/operator/pybind_ReduceMean.cpp +++ b/python_binding/operator/pybind_ReduceMean.cpp @@ -9,13 +9,14 @@ * ********************************************************************************/ +#include <array> #include <pybind11/pybind11.h> #include <pybind11/stl.h> #include <string> #include <vector> -#include <array> #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/ReduceMean.hpp" #include "aidge/utils/Types.h" @@ -23,20 +24,22 @@ namespace py = pybind11; namespace Aidge { -template <DimIdx_t DIM> void declare_ReduceMeanOp(py::module &m) { - py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, Attributes, OperatorTensor>( - m, ("ReduceMeanOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance()) - .def("get_inputs_name", &ReduceMean_Op<DIM>::getInputsName) - .def("get_outputs_name", &ReduceMean_Op<DIM>::getOutputsName) - .def("attributes_name", &ReduceMean_Op<DIM>::staticGetAttrsName) +void declare_ReduceMeanOp(py::module &m) { + const std::string pyClassName("ReduceMeanOp"); + py::class_<ReduceMean_Op, std::shared_ptr<ReduceMean_Op>, Attributes, OperatorTensor>( + m, pyClassName.c_str(), py::multiple_inheritance()) + .def("get_inputs_name", &ReduceMean_Op::getInputsName) + .def("get_outputs_name", &ReduceMean_Op::getOutputsName) + .def("attributes_name", &ReduceMean_Op::staticGetAttrsName) ; + declare_registrable<ReduceMean_Op>(m, pyClassName); - m.def(("ReduceMean" + std::to_string(DIM) + "D").c_str(), [](const std::vector<int>& axes, + m.def("ReduceMean", [](const std::vector<int>& axes, DimSize_t keepDims, const std::string& name) { - AIDGE_ASSERT(axes.size() == DIM, "axes size [{}] does not match DIM [{}]", axes.size(), DIM); + // AIDGE_ASSERT(axes.size() == DIM, "axes size [{}] does not match DIM [{}]", axes.size(), DIM); - return ReduceMean<DIM>(to_array<DIM>(axes.begin()), keepDims, name); + return ReduceMean(axes, keepDims, name); }, py::arg("axes"), py::arg("keep_dims") = 1, py::arg("name") = ""); @@ -44,9 +47,9 @@ template <DimIdx_t DIM> void declare_ReduceMeanOp(py::module &m) { void init_ReduceMean(py::module &m) { - declare_ReduceMeanOp<1>(m); - declare_ReduceMeanOp<2>(m); - declare_ReduceMeanOp<3>(m); + declare_ReduceMeanOp(m); +// declare_ReduceMeanOp<2>(m); +// declare_ReduceMeanOp<3>(m); // FIXME: // m.def("ReduceMean1D", static_cast<NodeAPI(*)(const char*, int, int, int const diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp index b3e9850a54a36e440876dace2b635a122c63b4af..0e336db28ddba4629e61d30e026befe4240c40b6 100644 --- a/python_binding/operator/pybind_Reshape.cpp +++ b/python_binding/operator/pybind_Reshape.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Reshape.hpp" #include "aidge/operator/OperatorTensor.hpp" @@ -21,7 +22,7 @@ void init_Reshape(py::module& m) { py::class_<Reshape_Op, std::shared_ptr<Reshape_Op>, Attributes, OperatorTensor>(m, "ReshapeOp", py::multiple_inheritance()) .def("get_inputs_name", &Reshape_Op::getInputsName) .def("get_outputs_name", &Reshape_Op::getOutputsName); - + declare_registrable<Reshape_Op>(m, "ReshapeOp"); m.def("Reshape", &Reshape, py::arg("shape"), py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_Sigmoid.cpp b/python_binding/operator/pybind_Sigmoid.cpp index 2393e56c10ef37e4eee078fe6f8bee4abd77ac39..8ffa8581593af9dc994baa566475317bcd96d475 100644 --- a/python_binding/operator/pybind_Sigmoid.cpp +++ b/python_binding/operator/pybind_Sigmoid.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Sigmoid.hpp" #include "aidge/operator/OperatorTensor.hpp" diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp index 7bfd1b4f00579ed29658db73b71f2c596048fe75..558fc98c172ea1a264ee8ac3ebbc70e09eba826d 100644 --- a/python_binding/operator/pybind_Slice.cpp +++ b/python_binding/operator/pybind_Slice.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Slice.hpp" #include "aidge/operator/OperatorTensor.hpp" @@ -21,7 +22,7 @@ void init_Slice(py::module& m) { py::class_<Slice_Op, std::shared_ptr<Slice_Op>, OperatorTensor>(m, "SliceOp", py::multiple_inheritance()) .def("get_inputs_name", &Slice_Op::getInputsName) .def("get_outputs_name", &Slice_Op::getOutputsName); - + declare_registrable<Slice_Op>(m, "SliceOp"); m.def("Slice", &Slice, py::arg("starts"), py::arg("ends"), py::arg("axes"), py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp index 780cffdef695b71dbc2781ba30936b3b45657cbb..837f3ed2b92aeab5739d07a04b071040806d8a1f 100644 --- a/python_binding/operator/pybind_Softmax.cpp +++ b/python_binding/operator/pybind_Softmax.cpp @@ -12,6 +12,7 @@ #include <pybind11/pybind11.h> #include <string> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Softmax.hpp" #include "aidge/operator/OperatorTensor.hpp" @@ -23,7 +24,7 @@ void init_Softmax(py::module& m) { .def("get_inputs_name", &Softmax_Op::getInputsName) .def("get_outputs_name", &Softmax_Op::getOutputsName) .def("attributes_name", &Softmax_Op::staticGetAttrsName); - + declare_registrable<Softmax_Op>(m, "SoftmaxOp"); m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_Sqrt.cpp b/python_binding/operator/pybind_Sqrt.cpp index 98d65242e8ff199992bbfc740192ae25e6d7b738..7065b828eb18d77edce49726dd903045c7952977 100644 --- a/python_binding/operator/pybind_Sqrt.cpp +++ b/python_binding/operator/pybind_Sqrt.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Sqrt.hpp" #include "aidge/operator/OperatorTensor.hpp" @@ -21,7 +22,7 @@ void init_Sqrt(py::module& m) { py::class_<Sqrt_Op, std::shared_ptr<Sqrt_Op>, OperatorTensor>(m, "SqrtOp", py::multiple_inheritance()) .def("get_inputs_name", &Sqrt_Op::getInputsName) .def("get_outputs_name", &Sqrt_Op::getOutputsName); - + declare_registrable<Sqrt_Op>(m, "SqrtOp"); m.def("Sqrt", &Sqrt, py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_Sub.cpp b/python_binding/operator/pybind_Sub.cpp index dce1ab6cb27cc7da02e6c817a6bc49ec64bcf364..e031040dfe8373c07d1524cbe4f75f3744e2f312 100644 --- a/python_binding/operator/pybind_Sub.cpp +++ b/python_binding/operator/pybind_Sub.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Sub.hpp" #include "aidge/operator/OperatorTensor.hpp" @@ -21,7 +22,7 @@ void init_Sub(py::module& m) { py::class_<Sub_Op, std::shared_ptr<Sub_Op>, OperatorTensor>(m, "SubOp", py::multiple_inheritance()) .def("get_inputs_name", &Sub_Op::getInputsName) .def("get_outputs_name", &Sub_Op::getOutputsName); - + declare_registrable<Sub_Op>(m, "SubOp"); m.def("Sub", &Sub, py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_Tanh.cpp b/python_binding/operator/pybind_Tanh.cpp index 2f3140039b030505af860352372c865c1aab05e3..a5c2f9dd5f2eab17e296f82788726210f976bd0d 100644 --- a/python_binding/operator/pybind_Tanh.cpp +++ b/python_binding/operator/pybind_Tanh.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Tanh.hpp" #include "aidge/operator/OperatorTensor.hpp" diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp index f5fbaf0e75ddd81265fd17e0aeb18b54f3908627..f6e2f2225e4858d3385c5d0140a863e7e7705652 100644 --- a/python_binding/operator/pybind_Transpose.cpp +++ b/python_binding/operator/pybind_Transpose.cpp @@ -17,22 +17,25 @@ #include <array> #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/operator/Transpose.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/OperatorTensor.hpp" +#include "aidge/operator/Transpose.hpp" #include "aidge/utils/Types.h" -#include "aidge/data/Tensor.hpp" namespace py = pybind11; namespace Aidge { template <DimIdx_t DIM> void declare_Transpose(py::module &m) { + const std::string pyClassName("TransposeOp" + std::to_string(DIM) + "D"); py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, Attributes, OperatorTensor>( m, ("TransposeOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance()) .def("get_inputs_name", &Transpose_Op<DIM>::getInputsName) .def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName) .def("attributes_name", &Transpose_Op<DIM>::staticGetAttrsName); + declare_registrable<Transpose_Op<DIM>>(m, pyClassName); + m.def(("Transpose" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& output_dims_order, const std::string& name) { AIDGE_ASSERT(output_dims_order.size() == DIM, "output_dims_order size [{}] does not match DIM [{}]", output_dims_order.size(), DIM); diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp index 6c4dd29dfbb158774ea86b181503e7e7e718bda4..8f3e5880cd2fb8e8ecbee9c185ab6b476eb1f0e6 100644 --- a/python_binding/pybind_core.cpp +++ b/python_binding/pybind_core.cpp @@ -11,18 +11,19 @@ #include <pybind11/pybind11.h> -#include "aidge/backend/cpu/data/TensorImpl.hpp" // This include add Tensor - +#include "aidge/backend/cpu/data/TensorImpl.hpp" // This include add Tensor namespace py = pybind11; namespace Aidge { +void init_Random(py::module&); void init_Data(py::module&); void init_Database(py::module&); void init_DataProvider(py::module&); void init_Tensor(py::module&); void init_OperatorImpl(py::module&); void init_Attributes(py::module&); +void init_Log(py::module&); void init_Operator(py::module&); void init_OperatorTensor(py::module&); @@ -67,12 +68,15 @@ void init_GraphRegex(py::module&); void init_MatchSolution(py::module&); void init_Recipes(py::module&); +void init_GraphViewHelper(py::module&); void init_Scheduler(py::module&); void init_TensorUtils(py::module&); +void init_Filler(py::module&); +void init_Aidge(py::module& m) { + init_Random(m); -void init_Aidge(py::module& m){ init_Data(m); init_Database(m); init_DataProvider(m); @@ -85,6 +89,7 @@ void init_Aidge(py::module& m){ init_OperatorImpl(m); init_Attributes(m); + init_Log(m); init_Operator(m); init_OperatorTensor(m); init_Add(m); @@ -125,11 +130,11 @@ void init_Aidge(py::module& m){ init_MatchSolution(m); init_Recipes(m); + init_GraphViewHelper(m); init_Scheduler(m); init_TensorUtils(m); + init_Filler(m); } -PYBIND11_MODULE(aidge_core, m) { - init_Aidge(m); -} -} +PYBIND11_MODULE(aidge_core, m) { init_Aidge(m); } +} // namespace Aidge diff --git a/python_binding/recipes/pybind_GraphViewHelper.cpp b/python_binding/recipes/pybind_GraphViewHelper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ac56fb4b43eb5b0a737157ec9e64c6771a692816 --- /dev/null +++ b/python_binding/recipes/pybind_GraphViewHelper.cpp @@ -0,0 +1,28 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <pybind11/pybind11.h> +#include <pybind11/stl.h> + +#include <memory> +#include <set> + +#include "aidge/graph/GraphView.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/recipes/GraphViewHelper.hpp" + +namespace py = pybind11; + +namespace Aidge { +void init_GraphViewHelper(py::module &m) { + m.def("producers", &producers, py::arg("graphview")); +} +} // namespace Aidge diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp index 170aa6c271a4f08ff5ad2801b754b647fee56df6..1b541b60672cc28cfe318b7bcc029627d6491818 100644 --- a/python_binding/scheduler/pybind_Scheduler.cpp +++ b/python_binding/scheduler/pybind_Scheduler.cpp @@ -21,6 +21,7 @@ void init_Scheduler(py::module& m){ py::class_<SequentialScheduler, std::shared_ptr<SequentialScheduler>>(m, "SequentialScheduler") .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view")) .def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("verbose")=false, py::arg("data")=std::vector<Tensor>()) + .def("backward", &SequentialScheduler::backward, py::arg("data"), py::arg("instanciate_grad")=true, py::arg("verbose")=false) .def("save_scheduling_diagram", &SequentialScheduler::saveSchedulingDiagram, py::arg("file_name")) .def("resetScheduling", &SequentialScheduler::resetScheduling) .def("generate_scheduling", &SequentialScheduler::generateScheduling, py::arg("verbose")=false) diff --git a/python_binding/utils/pybind_Log.cpp b/python_binding/utils/pybind_Log.cpp new file mode 100644 index 0000000000000000000000000000000000000000..10a02dcafefe089c8836ee7d4e3a9783a2aa96a6 --- /dev/null +++ b/python_binding/utils/pybind_Log.cpp @@ -0,0 +1,103 @@ +#include <pybind11/pybind11.h> +#include "aidge/utils/Log.hpp" + +namespace py = pybind11; +namespace Aidge { +void init_Log(py::module& m){ + py::enum_<Log::Level>(m, "Level") + .value("Debug", Log::Debug) + .value("Info", Log::Info) + .value("Notice", Log::Notice) + .value("Warn", Log::Warn) + .value("Error", Log::Error) + .value("Fatal", Log::Fatal); + + py::class_<Log>(m, "Log") + .def_static("debug", [](const std::string& msg) { Log::debug(msg); }, py::arg("msg"), + R"mydelimiter( + Detailed messages for debugging purposes, providing information helpful + for developers to trace and identify issues. + Detailed insights of what is appening in an operation, not useful for the + end-user. The operation is performed nominally. + Note: This level is disabled at compile time for Release, therefore + inducing no runtime overhead for Release. + + :param msg: Debug message. + :type msg: str + )mydelimiter") + .def_static("info", [](const std::string& msg) { Log::info(msg); }, py::arg("msg"), + R"mydelimiter( + Messages that provide a record of the normal operation, about + the application's state, progress, or important events. + Reports normal start, end and key steps in an operation. The operation is + performed nominally. + + :param msg: Info message. + :type msg: str + )mydelimiter") + .def_static("notice", [](const std::string& msg) { Log::notice(msg); }, py::arg("msg"), + R"mydelimiter( + Applies to normal but significant conditions that may require monitoring, + like unusual or normal fallback events. + Reports specific paths in an operation. The operation can still be + performed normally. + + :param msg: Notice message. + :type msg: str + )mydelimiter") + .def_static("warn", [](const std::string& msg) { Log::warn(msg); }, py::arg("msg"), + R"mydelimiter( + Indicates potential issues or situations that may lead to errors but do + not necessarily cause immediate problems. + Some specific steps of the operation could not be performed, but it can + still provide an exploitable result. + + :param msg: Warning message. + :type msg: str + )mydelimiter") + .def_static("error",[](const std::string& msg) { Log::error(msg); }, py::arg("msg"), + R"mydelimiter( + Signifies a problem or unexpected condition that the application can + recover from, but attention is needed to prevent further issues. + The operation could not be performed, but it does not prevent potential + further operations. + + :param msg: Error message. + :type msg: str + )mydelimiter") + .def_static("fatal", [](const std::string& msg) { Log::fatal(msg); }, py::arg("msg"), + R"mydelimiter( + Represents a critical error or condition that leads to the termination of + the application, indicating a severe and unrecoverable problem. + The operation could not be performed and any further operation is + impossible. + + :param msg: Fatal message. + :type msg: str + )mydelimiter") + .def_static("setConsoleLevel", &Log::setConsoleLevel, py::arg("level"), + R"mydelimiter( + Set the minimum log level displayed in the console. + + :param level: Log level. + :type level: Level + )mydelimiter") + .def_static("setFileLevel", &Log::setFileLevel, py::arg("level"), + R"mydelimiter( + Set the minimum log level saved in the log file. + + :param level: Log level. + :type level: Level + )mydelimiter") + .def_static("setFileName", &Log::setFileName, py::arg("fileName"), + R"mydelimiter( + Set the log file name. + Close the current log file and open the one with the new file name. + If empty, stop logging into a file. + + :param fileName: Log file name. + :type fileName: str + )mydelimiter"); +} + +} diff --git a/python_binding/utils/pybind_Random.cpp b/python_binding/utils/pybind_Random.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a1956d2d1e398cdb81673e7760a92bcde46e2de6 --- /dev/null +++ b/python_binding/utils/pybind_Random.cpp @@ -0,0 +1,24 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <pybind11/pybind11.h> +#include "aidge/utils/Random.hpp" + +namespace py = pybind11; + +namespace Aidge { + +void init_Random(py::module &m) { + auto mRand = m.def_submodule("random", "Random module."); + py::class_<Random::Generator>(mRand, "Generator") + .def_static("set_seed", Random::Generator::setSeed); +} +} // namespace Aidge diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp index 1911da228c83d66117a2591adf47dc07cd8dc674..48d615a2b0a5ccb5a51a3edb28ac68dbd7d67501 100644 --- a/src/backend/OperatorImpl.cpp +++ b/src/backend/OperatorImpl.cpp @@ -10,14 +10,16 @@ ********************************************************************************/ #include <cassert> +#include <string> #include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/Operator.hpp" #include "aidge/data/Tensor.hpp" #include "aidge/utils/ErrorHandling.hpp" -Aidge::OperatorImpl::OperatorImpl(const Operator& op): +Aidge::OperatorImpl::OperatorImpl(const Operator& op, const std::string& backend): mOp(op), + mBackend(backend), mNbConsumedData(mOp.nbInputs(), 0), mNbProducedData(mOp.nbOutputs(), 0) { @@ -25,14 +27,18 @@ Aidge::OperatorImpl::OperatorImpl(const Operator& op): } Aidge::NbElts_t Aidge::OperatorImpl::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const { - assert(mOp.getRawInput(inputIdx) && "requires valid input"); + AIDGE_ASSERT(mOp.getRawInput(inputIdx), + "a valid input is required at index {} for operator type {}", + inputIdx, mOp.type()); // Requires the whole tensor by default return std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx))->size(); } Aidge::NbElts_t Aidge::OperatorImpl::getNbRequiredProtected(IOIndex_t inputIdx) const { - assert(mOp.getRawInput(inputIdx) && "requires valid input"); + AIDGE_ASSERT(mOp.getRawInput(inputIdx), + "a valid input is required at index {} for operator type {}", + inputIdx, mOp.type()); // Protect the whole tensor by default return std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx))->size(); @@ -40,19 +46,25 @@ Aidge::NbElts_t Aidge::OperatorImpl::getNbRequiredProtected(IOIndex_t inputIdx) Aidge::NbElts_t Aidge::OperatorImpl::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const { - assert(mOp.getRawOutput(outputIdx) && "requires valid output"); + AIDGE_ASSERT(mOp.getRawOutput(outputIdx), + "a valid output is required at index {} for operator type {}", + outputIdx, mOp.type()); // Requires the whole tensor by default, regardless of available data on inputs return std::static_pointer_cast<Tensor>(mOp.getRawOutput(outputIdx))->size(); } Aidge::NbElts_t Aidge::OperatorImpl::getNbConsumedData(Aidge::IOIndex_t inputIdx) const { - assert(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size()); + AIDGE_ASSERT(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size(), + "input index ({}) is out of bound ({}) for operator type {}", + inputIdx, mNbConsumedData.size(), mOp.type()); return mNbConsumedData[static_cast<std::size_t>(inputIdx)]; } Aidge::NbElts_t Aidge::OperatorImpl::getNbProducedData(Aidge::IOIndex_t outputIdx) const { - assert(static_cast<std::size_t>(outputIdx) < mNbProducedData.size()); + AIDGE_ASSERT(static_cast<std::size_t>(outputIdx) < mNbProducedData.size(), + "output index ({}) is out of bound ({}) for operator type {}", + outputIdx, mNbProducedData.size(), mOp.type()); return mNbProducedData[static_cast<std::size_t>(outputIdx)]; } diff --git a/src/backend/cpu/data/TensorImpl.cpp b/src/backend/cpu/data/TensorImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..da90197e912fbeabc2f28bd3bedd91cc6f29e466 --- /dev/null +++ b/src/backend/cpu/data/TensorImpl.cpp @@ -0,0 +1,107 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include "aidge/backend/cpu/data/TensorImpl.hpp" + +#include <algorithm> // std::copy +#include <cstddef> // std::size_t +#include <cstdint> // std::uint8_t, std::int8_t, std::uint16_t, std::int16_t, + // std::uint32_t, std::int32_t, std::uint64_t, std::int64_t +#include <string> + +#include "aidge/data/half.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Types.h" + + +template <typename T> +bool Aidge::TensorImpl_cpu<T>::operator==(const Aidge::TensorImpl &other) const { + const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T>&>(other); + AIDGE_INTERNAL_ASSERT(typedOtherImpl.size() >= mNbElts); + + std::size_t i = 0; + for (; + i < mNbElts && + *static_cast<const T*>(rawPtr(i)) == *static_cast<const T*>(typedOtherImpl.rawPtr(i)); + ++i) + {} + return i == mNbElts; +} + +template <typename T> +void Aidge::TensorImpl_cpu<T>::zeros() { + if (mData.empty()) { + lazyInit(); + } + for (std::size_t i = 0; i < mData.size(); ++i) { + *(mData.data() + i) = T(0); + } +} + +template <typename T> +void Aidge::TensorImpl_cpu<T>::copyCast(const void *src, const Aidge::DataType srcDt, Aidge::NbElts_t length, Aidge::NbElts_t offset) { + if (length == 0) { + return; + } + + T* dstT = static_cast<T *>(rawPtr(offset)); + AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity"); + switch (srcDt) + { + case DataType::Float64: + std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length, + dstT); + break; + case DataType::Float32: + std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length, + dstT); + break; + case DataType::Float16: + std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length, + dstT); + break; + case DataType::Int64: + std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length, + dstT); + break; + case DataType::UInt64: + std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length, + dstT); + break; + case DataType::Int32: + std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length, + dstT); + break; + case DataType::UInt32: + std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length, + dstT); + break; + case DataType::Int16: + std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length, + dstT); + break; + case DataType::UInt16: + std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length, + dstT); + break; + case DataType::Int8: + std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length, + dstT); + break; + case DataType::UInt8: + std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length, + dstT); + break; + default: + AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type."); + break; + } +} \ No newline at end of file diff --git a/src/data/DataProvider.cpp b/src/data/DataProvider.cpp index 7783ed86cf4ae1d8672cc6a35a97ca9a996457b6..5c3d1d7ef3b3dd8c779cf9cda737f1a2b2f6e01f 100644 --- a/src/data/DataProvider.cpp +++ b/src/data/DataProvider.cpp @@ -41,8 +41,8 @@ Aidge::DataProvider::DataProvider(const Aidge::Database& database, const std::si } // Compute the number of bacthes depending on mDropLast boolean - mNbBatch = (mDropLast) ? - static_cast<std::size_t>(std::floor(mNbItems / mBatchSize)) : + mNbBatch = (mDropLast) ? + static_cast<std::size_t>(std::floor(mNbItems / mBatchSize)) : static_cast<std::size_t>(std::ceil(mNbItems / mBatchSize)); } @@ -98,7 +98,7 @@ std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch() con void Aidge::DataProvider::setBatches(){ - + mBatches.clear(); mBatches.resize(mNbItems); std::iota(mBatches.begin(), @@ -106,7 +106,7 @@ void Aidge::DataProvider::setBatches(){ 0U); if (mShuffle){ - Random::randShuffle(mBatches); + Aidge::Random::randShuffle(mBatches); } if (mNbItems % mBatchSize !=0){ // The last batch is not full diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp index 4d8e0dcd7d29b47b7a3591652c6d3002698ab29c..b350c5bf0fa2b1af6f102c3a74486c159a7505b4 100644 --- a/src/data/Tensor.cpp +++ b/src/data/Tensor.cpp @@ -9,14 +9,50 @@ * ********************************************************************************/ -#include <vector> +#include "aidge/data/Tensor.hpp" + #include <cstddef> +#include <vector> -#include "aidge/data/Tensor.hpp" -#include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) { + if (this == &other) { + return *this; + } + resize(other.dims(), other.strides()); + setDataType(other.dataType(), false); // do not convert existing data + if (other.hasImpl()) { + if (hasImpl()) { + copyFrom(other); + } + else { + // Perform a shallow copy only + setImpl(other.mImpl, other.mImplOffset); + } + } + else { + setImpl(nullptr); + } + return *this; +} void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vector<Aidge::DimSize_t> strides) { + // TODO: scalar Tensor not handled + if (dims.empty()) { // scalar + mDims = std::vector<DimSize_t>(0); + mStrides = std::vector<DimSize_t>({1}); + mContiguous = true; + + computeSize(); + if (mImpl) { + mImpl->resize(mDims); + } + return; + } + bool checkContiguous = true; if (strides.empty()) { strides.resize(dims.size()); @@ -31,7 +67,7 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vecto AIDGE_ASSERT(strides.size() == dims.size(), "Number of strides must match number of dims"); } - if (mImpl.use_count() > 1) { + if (mImpl && mImpl.use_count() > 1) { // Here we could also create a new storage for this tensor in this case // But, is it more likely that the user really wants this, or that he did a mistake? AIDGE_ASSERT(dims == mDims && strides == mStrides, "Cannot resize Tensor with shared storage"); @@ -43,6 +79,11 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vecto mContiguous = true; if (checkContiguous) { std::size_t expectedStride = 1; + // std::size_t i = dims.size(); + // while ((i-- > 0) && (strides[i] == expectedStride)) { + // mContiguous&= (strides[i] == expectedStride); + // expectedStride*= dims[i]; + // } for (std::size_t i = dims.size()-1; i > 0; --i) { if (strides[i] != expectedStride) { mContiguous = false; @@ -148,26 +189,26 @@ std::string Aidge::Tensor::toString() const { return res; } -Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx) const { +Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& fixedCoord) const { AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous"); - AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Number of coordinates is higher than number of dimensions"); + AIDGE_ASSERT(fixedCoord.size() <= mDims.size(), "Number of coordinates is higher than number of dimensions"); Tensor subTensor(mDataType); - subTensor.resize(std::vector<size_t>(mDims.begin() + coordIdx.size(), mDims.end()), - std::vector<size_t>(mStrides.begin() + coordIdx.size(), mStrides.end())); + subTensor.resize(std::vector<size_t>(mDims.cbegin() + fixedCoord.size(), mDims.cend()), + std::vector<size_t>(mStrides.cbegin() + fixedCoord.size(), mStrides.cend())); subTensor.setBackend(mImpl->backend(), mImpl->device().second); - subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(coordIdx)); + subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(fixedCoord)); return subTensor; } -Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx, const std::vector<std::size_t>& dims) const { +Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& startCoord, const std::vector<std::size_t>& dims) const { AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous"); - AIDGE_ASSERT(coordIdx.size() == mDims.size(), "Coordinates does not match number of dimensions"); + AIDGE_ASSERT(startCoord.size() == mDims.size(), "Coordinates does not match number of dimensions"); Tensor subTensor(mDataType); subTensor.resize(dims, mStrides); subTensor.setBackend(mImpl->backend(), mImpl->device().second); - subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(coordIdx)); + subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(startCoord)); return subTensor; } @@ -181,12 +222,12 @@ void Aidge::Tensor::makeContiguous() { // Create a new storage that will be contiguous std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims); // Copy elements from old to new storage - size_t idx = 0; + std::size_t idx = 0; while (idx < mSize) { - const size_t storageIdx = getStorageIdx(getCoord(idx)); + const std::size_t storageIdx = getStorageIdx(getCoord(idx)); // Determine the size of the contiguous chunk - size_t copySize = 1; + std::size_t copySize = 1; while (idx + copySize < mSize && getStorageIdx(getCoord(idx + copySize)) == storageIdx + copySize) { @@ -215,7 +256,7 @@ void Aidge::Tensor::copyCast(const Tensor& src) { AIDGE_ASSERT(src.isContiguous(), "cannot copy-cast non-contiguous tensor"); // Current Tensor has necessarily a data type, but may not have backend - if (!getImpl()) { + if (!hasImpl()) { // If no backend was set for the current tensor, use the same as src const auto deviceSrc = src.getImpl()->device(); setBackend(deviceSrc.first, deviceSrc.second); @@ -234,7 +275,7 @@ void Aidge::Tensor::copyFrom(const Tensor& src) { AIDGE_ASSERT(src.isContiguous(), "cannot copy from non-contiguous tensor"); // Current Tensor has necessarily a data type, but may not have backend - if (!getImpl()) { + if (!hasImpl()) { // If no backend was set for the current tensor, use the same as src const auto deviceSrc = src.getImpl()->device(); setBackend(deviceSrc.first, deviceSrc.second); @@ -391,3 +432,10 @@ const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, const return *fallback; } } + +std::set<std::string> Aidge::Tensor::getAvailableBackends() { + std::set<std::string> backendsList; + for(const auto& tupleKey : Registrar<Tensor>::getKeys()) + backendsList.insert(std::get<0>(tupleKey)); + return backendsList; +} diff --git a/src/filler/ConstantFiller.cpp b/src/filler/ConstantFiller.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e7db5e4d02b2031e7f5cf6a0203e3c7acbd3b93e --- /dev/null +++ b/src/filler/ConstantFiller.cpp @@ -0,0 +1,40 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ +#include <memory> +#include <random> // normal_distribution, uniform_real_distribution + +#include "aidge/filler/Filler.hpp" +#include "aidge/data/Tensor.hpp" + + +template<typename T> +void Aidge::constantFiller(std::shared_ptr<Aidge::Tensor> tensor, T constantValue){ + AIDGE_ASSERT(tensor->getImpl(), + "Tensor got no implementation, cannot fill it."); + AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type"); + + std::shared_ptr<Aidge::Tensor> cpyTensor; + // Create cpy only if tensor not on CPU + Aidge::Tensor& tensorWithValues = + tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu"); + + // Setting values + for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) { + tensorWithValues.set<T>(idx, constantValue); + } + + // Copy values back to the original tensors (actual copy only if needed) + tensor->copyCastFrom(tensorWithValues); +} + + +template void Aidge::constantFiller<float>(std::shared_ptr<Aidge::Tensor>, float); +template void Aidge::constantFiller<double>(std::shared_ptr<Aidge::Tensor>, double); diff --git a/src/filler/HeFiller.cpp b/src/filler/HeFiller.cpp new file mode 100644 index 0000000000000000000000000000000000000000..74d681f1a05c15045d27a0fe678aa676d16af077 --- /dev/null +++ b/src/filler/HeFiller.cpp @@ -0,0 +1,59 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ +#include <memory> +#include <random> // normal_distribution, uniform_real_distribution + +#include "aidge/data/Tensor.hpp" +#include "aidge/filler/Filler.hpp" +#include "aidge/utils/Random.hpp" + +template <typename T> +void Aidge::heFiller(std::shared_ptr<Aidge::Tensor> tensor, + Aidge::VarianceNorm varianceNorm, T meanNorm, T scaling) { + AIDGE_ASSERT(tensor->getImpl(), + "Tensor got no implementation, cannot fill it."); + AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type"); + + unsigned int fanIn, fanOut = 0; + Aidge::calculateFanInFanOut(tensor, fanIn, fanOut); + + const T n((varianceNorm == Aidge::VarianceNorm::FanIn) ? fanIn + : (varianceNorm == Aidge::VarianceNorm::Average) + ? (fanIn + fanOut) / 2.0 + : fanOut); + + const T stdDev(std::sqrt(2.0 / n)); + + const T mean(varianceNorm == Aidge::VarianceNorm::FanIn ? meanNorm / fanIn + : (varianceNorm == Aidge::VarianceNorm::Average) + ? meanNorm / ((fanIn + fanOut) / 2.0) + : meanNorm / fanOut); + + std::normal_distribution<T> normalDist(mean, stdDev); + + std::shared_ptr<Tensor> cpyTensor; + // Create cpy only if tensor not on CPU + Tensor& tensorWithValues = + tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu"); + + // Setting values + for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) { + tensorWithValues.set<T>(idx, scaling*normalDist(Aidge::Random::Generator::get())); + } + + // Copy values back to the original tensors (actual copy only if needed) + tensor->copyCastFrom(tensorWithValues); +} + +template void Aidge::heFiller<float>(std::shared_ptr<Aidge::Tensor>, + Aidge::VarianceNorm, float, float); +template void Aidge::heFiller<double>(std::shared_ptr<Aidge::Tensor>, + Aidge::VarianceNorm, double, double); diff --git a/src/filler/NormalFiller.cpp b/src/filler/NormalFiller.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f30b32431cf466b10c1b10df8e0e5ccec9f483b6 --- /dev/null +++ b/src/filler/NormalFiller.cpp @@ -0,0 +1,44 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ +#include <memory> +#include <random> // normal_distribution, uniform_real_distribution + +#include "aidge/data/Tensor.hpp" +#include "aidge/filler/Filler.hpp" +#include "aidge/utils/Random.hpp" + +template <typename T> +void Aidge::normalFiller(std::shared_ptr<Aidge::Tensor> tensor, double mean, + double stdDev) { + AIDGE_ASSERT(tensor->getImpl(), + "Tensor got no implementation, cannot fill it."); + AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type"); + + std::normal_distribution<T> normalDist(mean, stdDev); + + std::shared_ptr<Tensor> cpyTensor; + // Create cpy only if tensor not on CPU + Tensor& tensorWithValues = + tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu"); + + // Setting values + for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) { + tensorWithValues.set<T>(idx, normalDist(Aidge::Random::Generator::get())); + } + + // Copy values back to the original tensors (actual copy only if needed) + tensor->copyCastFrom(tensorWithValues); +} + +template void Aidge::normalFiller<float>(std::shared_ptr<Aidge::Tensor>, double, + double); +template void Aidge::normalFiller<double>(std::shared_ptr<Aidge::Tensor>, + double, double); diff --git a/src/filler/UniformFiller.cpp b/src/filler/UniformFiller.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a942f59d717fd8d7b541ee28868a7fb9f2e7cd95 --- /dev/null +++ b/src/filler/UniformFiller.cpp @@ -0,0 +1,44 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ +#include <memory> +#include <random> // normal_distribution, uniform_real_distribution + +#include "aidge/data/Tensor.hpp" +#include "aidge/filler/Filler.hpp" +#include "aidge/utils/Random.hpp" + +template <typename T> +void Aidge::uniformFiller(std::shared_ptr<Aidge::Tensor> tensor, T min, T max) { + AIDGE_ASSERT(tensor->getImpl(), + "Tensor got no implementation, cannot fill it."); + AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type"); + + + std::uniform_real_distribution<T> uniformDist(min, max); + + std::shared_ptr<Aidge::Tensor> cpyTensor; + // Create cpy only if tensor not on CPU + Aidge::Tensor& tensorWithValues = + tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu"); + + // Setting values + for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) { + tensorWithValues.set<T>(idx, uniformDist(Aidge::Random::Generator::get())); + } + + // Copy values back to the original tensors (actual copy only if needed) + tensor->copyCastFrom(tensorWithValues); +} + +template void Aidge::uniformFiller<float>(std::shared_ptr<Aidge::Tensor>, float, + float); +template void Aidge::uniformFiller<double>(std::shared_ptr<Aidge::Tensor>, + double, double); diff --git a/src/filler/XavierFiller.cpp b/src/filler/XavierFiller.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a1de15971ca8063e504e270fa6d2275d93270460 --- /dev/null +++ b/src/filler/XavierFiller.cpp @@ -0,0 +1,90 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ +#include <memory> +#include <random> // normal_distribution, uniform_real_distribution + +#include "aidge/data/Tensor.hpp" +#include "aidge/filler/Filler.hpp" +#include "aidge/utils/Random.hpp" + +template <typename T> +void Aidge::xavierUniformFiller(std::shared_ptr<Aidge::Tensor> tensor, + T scaling, Aidge::VarianceNorm varianceNorm) { + AIDGE_ASSERT(tensor->getImpl(), + "Tensor got no implementation, cannot fill it."); + AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type"); + + unsigned int fanIn, fanOut = 0; + Aidge::calculateFanInFanOut(tensor, fanIn, fanOut); + + const T n((varianceNorm == Aidge::VarianceNorm::FanIn) ? fanIn + : (varianceNorm == Aidge::VarianceNorm::Average) + ? (fanIn + fanOut) / 2.0 + : fanOut); + const T scale(std::sqrt(3.0 / n)); + + std::uniform_real_distribution<T> uniformDist(-scale, scale); + + std::shared_ptr<Aidge::Tensor> cpyTensor; + // Create cpy only if tensor not on CPU + Aidge::Tensor& tensorWithValues = + tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu"); + // Setting values + for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) { + tensorWithValues.set<T>( + idx, scaling * uniformDist(Aidge::Random::Generator::get())); + } + + // Copy values back to the original tensors (actual copy only if needed) + tensor->copyCastFrom(tensorWithValues); +} +template <typename T> +void Aidge::xavierNormalFiller(std::shared_ptr<Aidge::Tensor> tensor, T scaling, + Aidge::VarianceNorm varianceNorm) { + AIDGE_ASSERT(tensor->getImpl(), + "Tensor got no implementation, cannot fill it."); + AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type"); + + unsigned int fanIn, fanOut = 0; + Aidge::calculateFanInFanOut(tensor, fanIn, fanOut); + + const T n((varianceNorm == Aidge::VarianceNorm::FanIn) ? fanIn + : (varianceNorm == Aidge::VarianceNorm::Average) + ? (fanIn + fanOut) / 2.0 + : fanOut); + const double stdDev(std::sqrt(1.0 / n)); + + std::normal_distribution<T> normalDist(0.0, stdDev); + + std::shared_ptr<Aidge::Tensor> cpyTensor; + // Create cpy only if tensor not on CPU + Aidge::Tensor& tensorWithValues = + tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu"); + + // Setting values + for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) { + tensorWithValues.set<T>( + idx, scaling * normalDist(Aidge::Random::Generator::get())); + } + + // Copy values back to the original tensors (actual copy only if needed) + tensor->copyCastFrom(tensorWithValues); +} + +template void Aidge::xavierUniformFiller<float>(std::shared_ptr<Aidge::Tensor>, + float, Aidge::VarianceNorm); +template void Aidge::xavierUniformFiller<double>(std::shared_ptr<Aidge::Tensor>, + double, Aidge::VarianceNorm); + +template void Aidge::xavierNormalFiller<float>(std::shared_ptr<Aidge::Tensor>, + float, Aidge::VarianceNorm); +template void Aidge::xavierNormalFiller<double>(std::shared_ptr<Aidge::Tensor>, + double, Aidge::VarianceNorm); diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp index 3681ac533cab36d68e5243fe0486b7d0febca694..e6dd128af2548fea10a6ee8b95b3bddde2b27b2c 100644 --- a/src/graph/GraphView.cpp +++ b/src/graph/GraphView.cpp @@ -9,23 +9,36 @@ * ********************************************************************************/ -#include <algorithm> -#include <cassert> -#include <iterator> -#include <utility> -#include <numeric> +#include "aidge/graph/GraphView.hpp" +#include <algorithm> // std::find, std::set_intersection, std::transform +#include <cassert> +#include <stdexcept> // std::runtime_error +#include <cstddef> // std::size_t +#include <cstdio> // std::fclose, std::fopen #include <fmt/format.h> -#include <fmt/ranges.h> +#include <iterator> // std::back_inserter, std::distance, std::inserter, + // std::next +#include <map> +#include <memory> // std::dynamic_pointer_cast, std::static_pointer_cast +#include <set> +#include <string> // std::to_string +#include <utility> // std::make_pair, std::pair +#include <vector> -#include "aidge/utils/Types.h" -#include "aidge/graph/GraphView.hpp" #include "aidge/data/Tensor.hpp" -#include "aidge/operator/OperatorTensor.hpp" -#include "aidge/operator/Producer.hpp" #include "aidge/operator/GenericOperator.hpp" #include "aidge/operator/MetaOperator.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/operator/Producer.hpp" +#include "aidge/utils/Directories.hpp" #include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Types.h" + + +const std::shared_ptr<Aidge::Node> Aidge::GraphView::operator[](const std::string& nodeName) const { + return (mNodeRegistry.find(nodeName) != mNodeRegistry.cend()) ? mNodeRegistry.at(nodeName) : nullptr; +} /////////////////////////////////////////////////////// // FUNCTIONAL DESCRIPTION @@ -56,9 +69,10 @@ Aidge::Connector Aidge::GraphView::operator()( // INNER /////////////////////////////////////////////////////// -std::string Aidge::GraphView::name() const { return mName; } +bool Aidge::GraphView::inView(const std::shared_ptr<Aidge::Node>& nodePtr) const { + return mNodes.find(nodePtr) != mNodes.cend(); +} -void Aidge::GraphView::setName(const std::string &name) { mName = name; } void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProducers) const { auto fp = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen((path + ".mmd").c_str(), "w"), &std::fclose); @@ -117,8 +131,8 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd continue; } IOIndex_t outputIdx = 0; - for (auto childs : node_ptr->getOrderedChildren()) { - for (auto child : childs) { + for (const auto& childs : node_ptr->getOrderedChildren()) { + for (const auto& child : childs) { if (child != nullptr) { IOIndex_t inputIdx = 0; for (auto parent : child->inputs()) { @@ -193,6 +207,29 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd fmt::print(fp.get(), "\n"); } +void Aidge::GraphView::logOutputs(const std::string& dirName) const { + if (!Aidge::createDirectories(dirName)){ + AIDGE_THROW_OR_ABORT(std::runtime_error, "Failed to create directory: {}.", dirName); + } + for (std::shared_ptr<Node> nodePtr : getNodes()) { + + const std::string& nodePath = dirName + "/" + Aidge::filePath(nodePtr->name()) +"/"; + if (!Aidge::createDirectories(nodePath)){ + AIDGE_THROW_OR_ABORT(std::runtime_error, "Failed to create directory: {}.", nodePath); + } + + for (IOIndex_t outIdx = 0; outIdx < nodePtr->nbOutputs(); ++outIdx) { + const std::string& inputPath = nodePath +"output_" + std::to_string(outIdx) + ".log"; + auto fp = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen(inputPath.c_str(), "w"), &std::fclose); + if (!fp) { + AIDGE_THROW_OR_ABORT(std::runtime_error, + "Could not create graph view log file: {}", inputPath); + } + fmt::print(fp.get(), "{}\n", nodePtr->getOperator()->getRawOutput(outIdx)->toString().c_str()); + } + } +} + void Aidge::GraphView::setRootNode(NodePtr node) { AIDGE_ASSERT(mNodes.find(node) != mNodes.end(), "Root node is not in the GraphView!"); mRootNode = node; @@ -202,6 +239,33 @@ void Aidge::GraphView::setRootNode(NodePtr node) { // TENSOR MANAGEMENT /////////////////////////////////////////////////////// +std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::inputNodes() const { + std::set<std::shared_ptr<Aidge::Node>> nodes; + for (const auto& node : mInputNodes) { + nodes.insert(node.first); + } + return nodes; +} + +std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::outputNodes() const { + std::set<std::shared_ptr<Aidge::Node>> nodes; + for (const auto& node : mOutputNodes) { + nodes.insert(node.first); + } + return nodes; +} + +bool Aidge::GraphView::isInputNode(const std::shared_ptr<Aidge::Node>& nodePtr) const { + const auto nodes = inputNodes(); + return (nodes.find(nodePtr) != nodes.cend()); +} + +bool Aidge::GraphView::isOutputNode(const std::shared_ptr<Aidge::Node>& nodePtr) const { + const auto nodes = outputNodes(); + return (nodes.find(nodePtr) != nodes.cend()); +} + + void Aidge::GraphView::setOrderedInputs(const std::vector<std::pair<NodePtr, IOIndex_t>>& inputs) { size_t nbInputs = 0; std::vector<std::pair<NodePtr, IOIndex_t>> ignoredInputs(mInputNodes); @@ -328,19 +392,18 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType } void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>> dims) { - std::set<NodePtr> startNodes = inputNodes(); - // setInputs // Link every tensor to the right pointer // following parent - children informations if (!dims.empty()){ - AIDGE_ASSERT(dims.size() == mInputNodes.size(), "GraphView forwardDims error - Inconsistent number of dimensions and graph inputs"); + AIDGE_ASSERT(dims.size() == mInputNodes.size(), "GraphView forwardDims error - Inconsistent number of given dimensions ({}) and graph inputs ({})", dims.size(), mInputNodes.size()); for (std::size_t i = 0; i < dims.size(); ++i){ auto tensor = std::make_shared<Tensor>(dims[i]); mInputNodes[i].first->getOperator()->setInput(mInputNodes[i].second, tensor); } } - + + // Ensure every node in the graph is correctly connected for (std::shared_ptr<Node> nodePtr : getNodes()) { for (IOIndex_t i = 0; i < nodePtr->nbInputs(); ++i) { // assess if the input was not already set and is a Tensor then link it to parent output @@ -352,74 +415,57 @@ void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_ nodePtr->getOperator()->associateInput(i, inputI.first->getOperator()->getRawOutput(inputI.second)); } else { - AIDGE_ASSERT(false, "Non-tensor entries not handled yet.\n"); + AIDGE_ASSERT(false, "Non-tensor entries not handled yet, for node {} (of type {}).", nodePtr->name(), nodePtr->type()); } } } else { AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i) - && !std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty(), + && !std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty(), "Missing input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type()); } } - - if (nodePtr->type() == Producer_Op::Type) { - startNodes.insert(nodePtr); - } } - // Compute dimensions of every node - _forwardDims(startNodes); - -} -void Aidge::GraphView::_forwardDims(std::set<std::shared_ptr<Node>> listNodes) { - // TODO: support multi-inputs/outputs - std::set<std::shared_ptr<Node>> nextList = std::set<std::shared_ptr<Node>>(); - for (std::shared_ptr<Node> nodePtr : listNodes) { - if (nodePtr->getOperator()->operatorType() == OperatorType::Tensor) { - const auto op = std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator()); - if (!op->outputDimsForwarded()) { - op->computeOutputDims(); - } - if (!op->outputDimsForwarded()) { // try to compute output dimensions again later - nextList.insert(nodePtr); - } else { // compute output dimensions of children - std::set<std::shared_ptr<Node>> children = nodePtr->getChildren(); - for (auto child : children) { - const auto childOp = std::static_pointer_cast<OperatorTensor>(child->getOperator()); - if (!childOp->outputDimsForwarded()) { - nextList.insert(child); - } - } - } - } - } - if (nextList.empty()) { - for (std::shared_ptr<Node> nodePtr : getNodes()) { + // Compute dimensions of every node + std::set<std::shared_ptr<Node>> listNodes = getNodes(); + do { + std::set<std::shared_ptr<Node>> nextList; + for (std::shared_ptr<Node> nodePtr : listNodes) { if (nodePtr->getOperator()->operatorType() == OperatorType::Tensor) { - if (!std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator())->outputDimsForwarded()) { - nextList.insert(nodePtr); - } + const auto op = std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator()); + // Recompute everytime, even if it was already computed in a + // previous call of forwardDims(), as the graph may have changed! + op->computeOutputDims(); + if (!op->outputDimsForwarded()) { + nextList.insert(nodePtr); + } } } - } - // Internal check to make sure we won't enter in an infinite loop! - AIDGE_ASSERT(nextList != listNodes, "Unable to forward dimensions (circular dependency and/or wrong dimensions?)"); + // Internal check to make sure we won't enter in an infinite loop! + if (nextList == listNodes) { + // We are stuck! + std::vector<std::string> nodesName; + std::transform(nextList.begin(), nextList.end(), + std::back_inserter(nodesName), + [](auto val){ return val->name() + " (" + val->type() + ")"; }); + AIDGE_THROW_OR_ABORT(std::runtime_error, "Unable to forward dimensions (circular dependency and/or wrong dimensions?). Unable to compute output dims for nodes {}.", nodesName); + } - if (!nextList.empty()) { - _forwardDims(nextList); + listNodes.swap(nextList); } + while (!listNodes.empty()); } -void Aidge::GraphView::setBackend(const std::string &backend, DeviceIdx_t device) { - for (auto node : getNodes()) { +void Aidge::GraphView::setBackend(const std::string &backend, const DeviceIdx_t device) const { + for (const auto& node : getNodes()) { node->getOperator()->setBackend(backend, device); } } -void Aidge::GraphView::setDataType(const Aidge::DataType &datatype) { - for (auto node : getNodes()) { +void Aidge::GraphView::setDataType(const Aidge::DataType &datatype) const { + for (const auto& node : getNodes()) { node->getOperator()->setDataType(datatype); } } @@ -458,7 +504,7 @@ Aidge::GraphView::outputs(const std::string& nodeName) const { void Aidge::GraphView::setInputId(Aidge::IOIndex_t /*inID*/, Aidge::IOIndex_t /*newNodeOutID*/) { - fmt::print("Not implemented yet.\n"); + AIDGE_THROW_OR_ABORT(std::runtime_error, "Not implemented yet."); } void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnableParam) { @@ -653,11 +699,9 @@ bool Aidge::GraphView::add(std::pair<NodePtr, std::set<NodePtr>> nodes, bool inc } bool Aidge::GraphView::add(std::shared_ptr<GraphView> graph) { - if (mRootNode == nullptr) { - mRootNode = graph->getRootNode(); - } - - return add(graph->getNodes(), false); + // set the rootNode to the other graphView rootNode if no rootNode yet + mRootNode = mRootNode ? mRootNode : graph->rootNode(); + return add(graph->getNodes(), false); } void Aidge::GraphView::addChild(std::shared_ptr<Node> toOtherNode, @@ -714,10 +758,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::getParents() const { std::vector<std::shared_ptr<Aidge::Node>> Aidge::GraphView::getParents(const std::string nodeName) const { std::map<std::string, std::shared_ptr<Node>>::const_iterator it = mNodeRegistry.find(nodeName); - if (it == mNodeRegistry.end()) { - fmt::print("No such node a {} in {} graph.\n", nodeName, name()); - exit(-1); - } + AIDGE_ASSERT(it != mNodeRegistry.end(), "No node named {} in graph {}.", nodeName, name()); return (it->second)->getParents(); } @@ -743,20 +784,15 @@ std::vector<std::vector<std::shared_ptr<Aidge::Node>>> Aidge::GraphView::getChildren(const std::string nodeName) const { std::map<std::string, std::shared_ptr<Node>>::const_iterator it = mNodeRegistry.find(nodeName); - if (it == mNodeRegistry.end()) { - fmt::print("No such node a {} in {} graph.\n", nodeName, name()); - exit(-1); - } + AIDGE_ASSERT(it != mNodeRegistry.end(), "No node named {} in graph {}.", nodeName, name()); return (it->second)->getOrderedChildren(); } std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::getChildren(const std::shared_ptr<Node> otherNode) const { std::set<std::shared_ptr<Node>>::const_iterator it = mNodes.find(otherNode); - if (it == mNodes.end()) { - fmt::print("No such node in graph.\n"); - exit(-1); - } + AIDGE_ASSERT(it != mNodes.end(), "The node {} (of type {}) is not in graph {}.", + (otherNode) ? otherNode->name() : "#nullptr", (otherNode) ? otherNode->type() : "", name()); return (*it)->getChildren(); } @@ -768,7 +804,7 @@ Aidge::GraphView::getNode(const std::string& nodeName) const { if (it != mNodeRegistry.cend()) { return it->second; } else { - fmt::print("No Node named {} in the current GraphView.\n", nodeName); + Log::warn("No Node named {} in the current GraphView {}.", nodeName, name()); return nullptr; } } diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp index 7da7032012f79719f7cf64abba9a98cb84f8018a..fb252c349ca1cb966dee3e4aa72872a3d358f0a0 100644 --- a/src/graph/Node.cpp +++ b/src/graph/Node.cpp @@ -169,7 +169,9 @@ Aidge::IOIndex_t Aidge::Node::nbValidOutputs() const { } void Aidge::Node::setInputId(const IOIndex_t inId, const IOIndex_t newNodeoutId) { - assert(inId != gk_IODefaultIndex && (inId < nbInputs()) && "Must be a valid index"); + AIDGE_ASSERT(inId != gk_IODefaultIndex && inId < nbInputs(), + "Input index ({}) is out of bound ({}) for node {} (of type {})", + inId, nbInputs(), name(), type()); if (mIdOutParents[inId] != gk_IODefaultIndex) { auto originalParent = input(inId); // remove original parent reference to child @@ -193,7 +195,7 @@ void Aidge::Node::addChildOp(std::shared_ptr<Node> otherNode, const IOIndex_t ou "Output index (#{}) of the node {} (of type {}) is out of bound (it has {} outputs), when trying to add the child node {} (of type {})", outId, name(), type(), nbOutputs(), otherNode->name(), otherNode->type()); if (otherNode->input(otherInId).second != gk_IODefaultIndex) { - fmt::print("Warning, the {}-th Parent of the child node already existed.\n", otherInId); + Log::notice("Notice: the {}-th Parent of the child node {} (of type {}) already existed", otherInId, otherNode->name(), otherNode->type()); } // manage tensors and potential previous parent otherNode->setInputId(otherInId, outId); @@ -238,23 +240,29 @@ void Aidge::Node::addChild(std::shared_ptr<GraphView> otherView, const IOIndex_t void Aidge::Node::addParent(const std::shared_ptr<Node> other_node, const IOIndex_t inId) { if (getParent(inId) != nullptr) { - fmt::print("Warning, you're replacing a Parent.\n"); + Log::notice("Notice: you are replacing an existing parent for node {} (of type {})", name(), type()); } - assert((inId != gk_IODefaultIndex) && (inId < nbInputs()) && "Input index out of bound."); + AIDGE_ASSERT(inId != gk_IODefaultIndex && inId < nbInputs(), + "Input index ({}) is out of bound ({}) for node {} (of type {})", + inId, nbInputs(), name(), type()); mParents[inId] = other_node; } std::vector<std::shared_ptr<Aidge::Node>> Aidge::Node::getParents() const { return mParents; } std::shared_ptr<Aidge::Node> Aidge::Node::popParent(const IOIndex_t inId) { - assert((inId != gk_IODefaultIndex) && (inId < nbInputs()) && "Input index out of bound."); + AIDGE_ASSERT(inId != gk_IODefaultIndex && inId < nbInputs(), + "Input index ({}) is out of bound ({}) for node {} (of type {})", + inId, nbInputs(), name(), type()); std::shared_ptr<Node> val = mParents[inId]; removeParent(inId); return val; } bool Aidge::Node::removeParent(const IOIndex_t inId) { - assert((inId != gk_IODefaultIndex) && (inId < nbInputs()) && "Parent index out of bound."); + AIDGE_ASSERT(inId != gk_IODefaultIndex && inId < nbInputs(), + "Input index ({}) is out of bound ({}) for node {} (of type {})", + inId, nbInputs(), name(), type()); if (mParents[inId]) { mParents[inId] = nullptr; mIdOutParents[inId] = gk_IODefaultIndex; diff --git a/src/graphRegex/GraphRegex.cpp b/src/graphRegex/GraphRegex.cpp index 00a031e3fa9b03ff1870446b9ae58e8d3eb65bf7..ca15ff8dec5ff5ebd4ea69141c6e286849162bb5 100644 --- a/src/graphRegex/GraphRegex.cpp +++ b/src/graphRegex/GraphRegex.cpp @@ -117,6 +117,8 @@ std::set<std::shared_ptr<MatchSolution>> GraphRegex::match(std::shared_ptr<Graph std::vector<std::shared_ptr<MatchSolution>> solution = fsm->test(combination); solutions.insert(solutions.end(), solution.begin(), solution.end()); } + + } return _findLargestCompatibleSet(solutions); } @@ -142,7 +144,10 @@ void GraphRegex::setNodeKey(const std::string key,std::function<bool(NodePtr)> f throw std::runtime_error(key + " is define"); } mAllLambda[key] = f; + _majConditionalInterpreterLambda(); + //we add the lambda as key by default + setNodeKey(key, key + "($)==true"); } void GraphRegex::_majConditionalInterpreterLambda(){ diff --git a/src/nodeTester/ConditionalLexer.cpp b/src/nodeTester/ConditionalLexer.cpp index 9379bd8409f8f7ec4bae3e0122f88de79718e9dd..e70772fc1a5d6136fb56f5981d73bf6cb0622991 100644 --- a/src/nodeTester/ConditionalLexer.cpp +++ b/src/nodeTester/ConditionalLexer.cpp @@ -120,7 +120,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo } - if (std::regex_match(currentChars,std::regex("(true|false)"))){ + if (std::regex_match(currentChars,std::regex("(true|false|True|False)"))){ return std::make_shared<ParsingToken<ConditionalTokenTypes>>(ConditionalTokenTypes::BOOL,currentChars); } else if (isLambda){ diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp index a54302d06059d43336800d81e4d18744b6243785..85bc4b7aef53e8064a8f31815a42689013880812 100644 --- a/src/operator/Add.cpp +++ b/src/operator/Add.cpp @@ -14,12 +14,24 @@ #include <string> #include <vector> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Add.hpp" #include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" const std::string Aidge::Add_Op::Type = "Add"; +Aidge::Add_Op::Add_Op(const Add_Op& op) + : OperatorTensor(op) +{ + if (op.mImpl) { + SET_IMPL_MACRO(Add_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } +} + void Aidge::Add_Op::computeOutputDims() { // check inputs have been associated bool associated = (nbInputs() > 0); // do not compute anything if no input @@ -59,3 +71,8 @@ void Aidge::Add_Op::computeOutputDims() { mOutputs[0]->resize(outDims); } } + +void Aidge::Add_Op::setBackend(const std::string& name, DeviceIdx_t device) { + SET_IMPL_MACRO(Add_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp index f09d8eb83c6a6dae6416ffebcc01b22fb479a862..3e594b49404999fee10eed3a22a7c0a78f765df0 100644 --- a/src/operator/Cast.cpp +++ b/src/operator/Cast.cpp @@ -9,9 +9,17 @@ * ********************************************************************************/ -#include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/Cast.hpp" +#include <memory> +#include <string> +#include <vector> + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + const std::string Aidge::Cast_Op::Type = "Cast"; void Aidge::Cast_Op::forward() { @@ -24,3 +32,8 @@ void Aidge::Cast_Op::forward() { runHooks(); } + +void Aidge::Cast_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Cast_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp index eafcd126480df6da2c0127bdbb896d3ce98d0e0a..7df5b6dbf6122da44aed280da0d717232ba42fef 100644 --- a/src/operator/Concat.cpp +++ b/src/operator/Concat.cpp @@ -9,8 +9,49 @@ * ********************************************************************************/ +#include "aidge/operator/Concat.hpp" + #include <string> +#include <vector> -#include "aidge/operator/Concat.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Concat_Op::Type = "Concat"; + +void Aidge::Concat_Op::computeOutputDims() { + // Every input is non-empty with the same number of dimensions + bool associated = (getInput(0) != nullptr); + associated &= !(getInput(0)->empty()) && (getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims()); // do not compute anything if no input + auto outputDims = getInput(0)->dims(); + const auto firstInputNbDims = getInput(0) -> nbDims(); + for (IOIndex_t i = 1; i < nbInputs(); ++i) { + if (!getInput(i)) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i); + } + + if (getInput(i)->nbDims() == firstInputNbDims) { + for (DimSize_t dim = 0; dim < firstInputNbDims; ++dim) { + if (dim == getAttr<ConcatAttr::Axis>()) { + outputDims[dim] += getInput(i)->dims()[dim]; + } + else { + associated &= (getInput(i)->dims()[dim] == outputDims[dim]); + } + } + } + else { + associated = false; + break; + } + } + if (associated) { + getOutput(0)->resize(outputDims); + } +} -const std::string Aidge::Concat_Op::Type = "Concat"; \ No newline at end of file +void Aidge::Concat_Op::setBackend(const std::string& name, DeviceIdx_t device) { + SET_IMPL_MACRO(Concat_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp index 6b55338f4ab7ac9131231fcced21869274c1bd47..5ffe5f08dbcbfe42c406846990c432a7fbd325e0 100644 --- a/src/operator/Div.cpp +++ b/src/operator/Div.cpp @@ -14,6 +14,7 @@ #include <string> #include <vector> +#include "aidge/data/Tensor.hpp" #include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/Div.hpp" #include "aidge/utils/Types.h" @@ -50,4 +51,10 @@ void Aidge::Div_Op::computeOutputDims() { } mOutputs[0]->resize(outDims); } -} \ No newline at end of file +} + + +void Aidge::Div_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Div_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/Erf.cpp b/src/operator/Erf.cpp index 387af4edf417f8c7ac6ee9b8b2b7069179ad59cb..81c87f10b10210c2af203a05df53e3330bb33b72 100644 --- a/src/operator/Erf.cpp +++ b/src/operator/Erf.cpp @@ -9,8 +9,17 @@ * ********************************************************************************/ +#include "aidge/operator/Erf.hpp" + #include <string> -#include "aidge/operator/Erf.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Erf_Op::Type = "Erf"; -const std::string Aidge::Erf_Op::Type = "Erf"; \ No newline at end of file +void Aidge::Erf_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Erf_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp index 32114f5bf9e0d160db9fdc2d1971481be0b4e703..9865d64f6a0b87be96244bc4b39c91b605f02b6f 100644 --- a/src/operator/FC.cpp +++ b/src/operator/FC.cpp @@ -9,8 +9,52 @@ * ********************************************************************************/ +#include "aidge/operator/FC.hpp" + +#include <memory> #include <string> +#include <vector> -#include "aidge/operator/FC.hpp" +#include "aidge/data/Data.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::FC_Op::Type = "FC"; + +void Aidge::FC_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) { + AIDGE_ASSERT(inputIdx < 3, "Operators {} supports only {} inputs", type(), nbInputs()); + AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type"); + // TODO: FIXME: check this, because data dims may not be initialized at this point... + //if (inputIdx == 2) { + // assert(std::dynamic_pointer_cast<Tensor>(data)->size() == ((this->template getAttr<FCAttr::NoBias>()) == false ? static_cast<std::size_t>(this->template getAttr<FCAttr::OutChannels>()) : 0)); + // assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1); + //} + mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data); + if (inputIdx == 0 && getInput(0)->nbDims() == 1) + mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()}); +} + +void Aidge::FC_Op::computeOutputDims() { + bool associated = true; + for (IOIndex_t i = 0; i < nbInputs(); ++i) { + if (!getInput(i)) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i); + } + associated &= !(getInput(i)->empty()); + } + if (associated) { + // <batch, OutChannels> + mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<FCAttr::OutChannels>()}); + } +} + +void Aidge::FC_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(FC_Op, *this, name); + mOutputs[0]->setBackend(name, device); -const std::string Aidge::FC_Op::Type = "FC"; \ No newline at end of file + // By default, automatically set backend for weight and bias inputs + getInput(1)->setBackend(name, device); + getInput(2)->setBackend(name, device); +} diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp index b5f9d738a0280b3bacdb2ce201c8303b2b4d0a1f..259e6513994970eb7e677f44c981888388825fae 100644 --- a/src/operator/Gather.cpp +++ b/src/operator/Gather.cpp @@ -9,15 +9,18 @@ * ********************************************************************************/ -#include <cstddef> -#include <cstdint> +#include "aidge/operator/Gather.hpp" + +#include <cstddef> // std::size_t +#include <cstdint> // std::int64_t #include <string> #include <vector> -#include "aidge/operator/Gather.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" + const std::string Aidge::Gather_Op::Type = "Gather"; void Aidge::Gather_Op::computeOutputDims() { @@ -44,4 +47,9 @@ void Aidge::Gather_Op::computeOutputDims() { mOutputs[0]->resize(outDims); } -} \ No newline at end of file +} + +void Aidge::Gather_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Gather_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp index 5556f4ff5c87d1adc23f5bff1aaf90c230de06cc..3eae49b69ce639529d49dd1c0d241f12ece5d98b 100644 --- a/src/operator/GenericOperator.cpp +++ b/src/operator/GenericOperator.cpp @@ -9,13 +9,48 @@ * ********************************************************************************/ +#include "aidge/operator/GenericOperator.hpp" + +#include <cstddef> // std::size_t #include <vector> -#include "aidge/operator/GenericOperator.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Types.h" +#include "aidge/utils/ErrorHandling.hpp" const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::Identity - = [](const std::vector<std::vector<size_t>>& inputsDims) { return inputsDims; }; + = [](const std::vector<std::vector<std::size_t>>& inputsDims) { return inputsDims; }; const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::InputIdentity(IOIndex_t inputIdx, IOIndex_t nbOutputs) { - return [nbOutputs, inputIdx](const std::vector<std::vector<size_t>>& inputsDims) { return std::vector<std::vector<size_t>>(nbOutputs, inputsDims[inputIdx]); }; + return [nbOutputs, inputIdx](const std::vector<std::vector<std::size_t>>& inputsDims) { return std::vector<std::vector<std::size_t>>(nbOutputs, inputsDims[inputIdx]); }; } + +void Aidge::GenericOperator_Op::computeOutputDims() { + if (mComputeOutputDims) { + std::vector<std::vector<std::size_t>> inputsDims(nbInputs(), std::vector<std::size_t>()); + for (std::size_t i = 0; i < nbInputs(); ++i) { + if (getInput(i)) { + inputsDims[i] = getInput(i)->dims(); + } + } + + const auto& outputsDims = mComputeOutputDims(inputsDims); + AIDGE_ASSERT((outputsDims.size() == nbOutputs()), "The provided ComputeDimsFunc function returns the wrong number of outputs"); + for (std::size_t i = 0; i < nbOutputs(); ++i) { + mOutputs[i]->resize(outputsDims[i]); + } + } + else { + AIDGE_ASSERT(false, "Cannot compute output dim of a GenericOperator"); + } +} + +bool Aidge::GenericOperator_Op::outputDimsForwarded() const { + if (mComputeOutputDims) { + return !(mOutputs[0]->empty()); + } + else { + AIDGE_ASSERT(false, "GenericOperator cannot forward dims"); + return false; + } +} \ No newline at end of file diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp index f48c7ca81d6abd1d5150f54eb7d98bf109307d33..56899875338d487294163aa018e0d98b5f7a5269 100644 --- a/src/operator/MatMul.cpp +++ b/src/operator/MatMul.cpp @@ -13,6 +13,7 @@ #include <string> #include <vector> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/MatMul.hpp" #include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" @@ -70,3 +71,8 @@ void Aidge::MatMul_Op::computeOutputDims() { mOutputs[0]->resize(outDims); } } + +void Aidge::MatMul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(MatMul_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp index 6e34c1a2005f551c255e9b7441e853015354337f..6e54a234d2fc78c8e8e9a43a7528709c8e51adc4 100644 --- a/src/operator/Memorize.cpp +++ b/src/operator/Memorize.cpp @@ -9,9 +9,17 @@ * ********************************************************************************/ -#include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/Memorize.hpp" +#include <memory> +#include <string> +#include <vector> + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Types.h" + const std::string Aidge::Memorize_Op::Type = "Memorize"; void Aidge::Memorize_Op::computeOutputDims() { @@ -33,6 +41,11 @@ void Aidge::Memorize_Op::computeOutputDims() { } } +void Aidge::Memorize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + mImpl = Registrar<Memorize_Op>::create({name})(*this); + mOutputs[0]->setBackend(name, device); +} + bool Aidge::Memorize_Op::outputDimsForwarded() const { // Only check the output dims bool forwarded = true; diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp index 883185021b395b42e5c47ef0461ebc0614f14456..45e7556265d1af4e95e50be4cf60e8067ded332f 100644 --- a/src/operator/MetaOperator.cpp +++ b/src/operator/MetaOperator.cpp @@ -10,9 +10,16 @@ ********************************************************************************/ #include "aidge/operator/MetaOperator.hpp" + +#include <cstddef> // std::size_t +#include <memory> +#include <string> + +#include "aidge/data/Tensor.hpp" +#include "aidge/graph/GraphView.hpp" #include "aidge/utils/ErrorHandling.hpp" -Aidge::MetaOperator_Op::MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph) +Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph) : OperatorTensor(type, graph->dataInputs().size(), (graph->getOrderedInputs().size() - graph->dataInputs().size()), graph->getOrderedOutputs().size()), mGraph(graph) { diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp index d4a594e95b2695b496fc28b8e8a7fcf3442e9253..89bef9e0edcf6731dfbaf9ebf48ebddf5b71e815 100644 --- a/src/operator/Mul.cpp +++ b/src/operator/Mul.cpp @@ -10,14 +10,16 @@ ********************************************************************************/ #include <cstddef> // std::size_t +#include <memory> #include <stdexcept> // std::runtime_error #include <string> #include <vector> #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Mul.hpp" -#include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Types.h" const std::string Aidge::Mul_Op::Type = "Mul"; @@ -53,4 +55,9 @@ void Aidge::Mul_Op::computeOutputDims() { else if (!getInput(0)->empty() && !getInput(1)->empty()) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible input dimensions for Operator Mul: {} and {}", getInput(0)->dims(), getInput(1)->dims()); } -} \ No newline at end of file +} + +void Aidge::Mul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Mul_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/Operator.cpp b/src/operator/Operator.cpp index 289b2be90735d848e5083090d2ae4319a7490fde..e4213cad80ebdc177649b0c25e4fc49222993211 100644 --- a/src/operator/Operator.cpp +++ b/src/operator/Operator.cpp @@ -75,4 +75,7 @@ void Aidge::Operator::forward() { runHooks(); } -void Aidge::Operator::backward() { mImpl->backward(); } +void Aidge::Operator::backward() { + AIDGE_ASSERT(mImpl != nullptr, "backward(): an implementation is required for {}!", type()); + mImpl->backward(); +} diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp index c0ada265410f9bc46aab3b43fae270f1e74dd5eb..b85c18040ad84a1e9b1ea1f8b475c32260b6587a 100644 --- a/src/operator/OperatorTensor.cpp +++ b/src/operator/OperatorTensor.cpp @@ -19,6 +19,32 @@ #include "aidge/utils/ErrorHandling.hpp" +Aidge::OperatorTensor::OperatorTensor(const std::string& type, + const IOIndex_t nbData, + const IOIndex_t nbParam, + const IOIndex_t nbOut) +: Operator(type, nbData, nbParam, nbOut, OperatorType::Tensor), + mInputs(std::vector<std::shared_ptr<Tensor>>(nbData + nbParam, nullptr)), + mOutputs(std::vector<std::shared_ptr<Tensor>>(nbOut)) { + for (std::size_t i = 0; i < static_cast<std::size_t>(nbOut); ++i) { + mOutputs[i] = std::make_shared<Tensor>(); + mOutputs[i]->setDataType(DataType::Float32); + } +} + + +Aidge::OperatorTensor::OperatorTensor(const OperatorTensor& other) + : Operator(other), + mInputs(std::vector<std::shared_ptr<Tensor>>(other.nbInputs(), nullptr)), + mOutputs(std::vector<std::shared_ptr<Tensor>>(other.nbOutputs())) { + for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) { + mOutputs[i] = std::make_shared<Tensor>(); + // mOutputs[i] = std::make_shared<Tensor>(*(other.getOutput(i))); + // datatype already copied + } +} + + void Aidge::OperatorTensor::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) { AIDGE_ASSERT(inputIdx < nbInputs(), "{} Operator has {} inputs", type(), nbInputs()); AIDGE_ASSERT(data->type() == Tensor::Type, "Input data must be of Tensor type"); @@ -45,6 +71,9 @@ void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, std::share } } +std::shared_ptr<Aidge::Data> Aidge::OperatorTensor::getRawInput(const Aidge::IOIndex_t inputIdx) const { + return std::static_pointer_cast<Data>(getInput(inputIdx)); +} const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getInput(const Aidge::IOIndex_t inputIdx) const { AIDGE_ASSERT(inputIdx < nbInputs(), "{} Operator has {} inputs", type(), nbInputs()); return mInputs[inputIdx]; @@ -53,13 +82,23 @@ const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getInput(const Aidg void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) { AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type()); AIDGE_ASSERT(outputIdx < nbOutputs(), "{} Operator has {} outputs", type(), nbOutputs()); - *mOutputs[outputIdx] = *std::dynamic_pointer_cast<Tensor>(data); + const auto& data_tensor = std::dynamic_pointer_cast<Tensor>(data); + // if (mImpl) + // AIDGE_ASSERT(data_tensor->getImpl()->backend() == backend(), "Data parameter and Operator have different backends: {} and {}", data_tensor->getImpl()->backend(), backend()); + *mOutputs[outputIdx] = *data_tensor; } void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, std::shared_ptr<Aidge::Data>&& data) { AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type()); AIDGE_ASSERT(outputIdx < nbOutputs(), "{} Operator has {} outputs", type(), nbOutputs()); - *mOutputs[outputIdx] = std::move(*std::dynamic_pointer_cast<Tensor>(data)); + auto&& data_tensor = std::dynamic_pointer_cast<Tensor>(data); + // if (mImpl) + // AIDGE_ASSERT(data_tensor->getImpl()->backend() == backend(), "Data parameter and Operator have different backends: {} and {}", data_tensor->getImpl()->backend(), backend()); + *mOutputs[outputIdx] = std::move(*data_tensor); +} + +std::shared_ptr<Aidge::Data> Aidge::OperatorTensor::getRawOutput(const Aidge::IOIndex_t outputIdx) const { + return std::static_pointer_cast<Data>(getOutput(outputIdx)); } const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getOutput(const Aidge::IOIndex_t outputIdx) const { diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp index 3dd65eb4d34266f6e419bdc86362b8da4a55fdf0..06999e301ce0968b2d9979e47f412c02e59de3ad 100644 --- a/src/operator/Pop.cpp +++ b/src/operator/Pop.cpp @@ -9,9 +9,17 @@ * ********************************************************************************/ +#include "aidge/operator/Pop.hpp" + +#include <memory> #include <string> -#include "aidge/operator/Pop.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Types.h" + const std::string Aidge::Pop_Op::Type = "Pop"; @@ -36,3 +44,8 @@ void Aidge::Pop_Op::forward() { Operator::forward(); ++this->template getAttr<PopAttr::ForwardStep>(); } + +void Aidge::Pop_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Pop_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp index 5e29eae0c0f42e7d566a933e9409766026369dad..72a04de04fda8a432309de8b4a69b1dfb6af1370 100644 --- a/src/operator/Pow.cpp +++ b/src/operator/Pow.cpp @@ -15,6 +15,7 @@ #include <vector> #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Pow.hpp" #include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" @@ -50,4 +51,9 @@ void Aidge::Pow_Op::computeOutputDims() { } mOutputs[0]->resize(outDims); } +} + +void Aidge::Pow_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Pow_Op, *this, name); + mOutputs[0]->setBackend(name, device); } \ No newline at end of file diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp index 7bccbe763b90f2697997a889b30b610e4b531334..43e991288c483f07138a2b236a2c4925ea0a3754 100644 --- a/src/operator/Producer.cpp +++ b/src/operator/Producer.cpp @@ -9,8 +9,114 @@ * ********************************************************************************/ +#include "aidge/operator/Producer.hpp" + +#include <cstddef> +#include <array> +#include <memory> #include <string> -#include "aidge/operator/Producer.hpp" +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Types.h" + const std::string Aidge::Producer_Op::Type = "Producer"; + + +Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, bool constant) + : OperatorTensor(Type, 0, 0, 1), + Attributes_(attr<ProdAttr::Constant>(constant)) +{ + mOutputs[0] = tensor; // copy the pointer of the Tensor +#ifdef PYBIND + if(Py_IsInitialized()) { + auto obj = py::cast(&(*this)); + setImpl((mOutputs[0]->hasImpl()) ? + (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ? + Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : + std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) : + nullptr); + } else { + setImpl((mOutputs[0]->hasImpl()) ? + (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ? + Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : + std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) : + nullptr); + } +#else + setImpl((mOutputs[0]->hasImpl()) ? + (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ? + Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : + std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) : + nullptr); +#endif +} + +/** + * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), + * but not its input tensors (the new operator has no input associated). + * @param op OperatorTensor to copy. + */ +Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op) + : OperatorTensor(op), + Attributes_(op) +{ + mOutputs[0] = std::make_shared<Tensor>(*(op.getOutput(0))); +#ifdef PYBIND + if(Py_IsInitialized()) { + auto obj = py::cast(&(*this)); + setImpl((mOutputs[0]->hasImpl()) ? + (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ? + Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : + std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) : + nullptr); + } else { + setImpl((mOutputs[0]->hasImpl()) ? + (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ? + Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : + std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) : + nullptr); + } +#else + setImpl((mOutputs[0]->hasImpl()) ? + (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ? + Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : + std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) : + nullptr); +#endif + // if (mOutputs[0]->hasImpl()) { + // if (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){ + // setImpl(Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this)); + // } + // else { + // mImpl = std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend()); + // } + + // } else { + // mImpl = nullptr; + // } +} + +void Aidge::Producer_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { +#ifdef PYBIND + if(Py_IsInitialized()) { + auto obj = py::cast(&(*this)); + setImpl((Registrar<Producer_Op>::exists({name})) ? + Registrar<Producer_Op>::create(name)(*this) : + std::make_shared<OperatorImpl>(*this, name)); + } else { + setImpl((Registrar<Producer_Op>::exists({name})) ? + Registrar<Producer_Op>::create(name)(*this) : + std::make_shared<OperatorImpl>(*this, name)); + } +#else + setImpl((Registrar<Producer_Op>::exists({name})) ? + Registrar<Producer_Op>::create(name)(*this) : + std::make_shared<OperatorImpl>(*this, name)); +#endif + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/ReLU.cpp b/src/operator/ReLU.cpp index 0f7874acfe7d865ea8c56d4bca02b51864480df6..7b945a7d62ab0ef7f73a25f6f74430e725d17b48 100644 --- a/src/operator/ReLU.cpp +++ b/src/operator/ReLU.cpp @@ -9,8 +9,17 @@ * ********************************************************************************/ +#include "aidge/operator/ReLU.hpp" + +#include <memory> #include <string> -#include "aidge/operator/ReLU.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::ReLU_Op::Type = "ReLU"; -const std::string Aidge::ReLU_Op::Type = "ReLU"; \ No newline at end of file +void Aidge::ReLU_Op::setBackend(const std::string& name, DeviceIdx_t device) { + SET_IMPL_MACRO(ReLU_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0de676e22ec668a9b41d7d61f184465d431715a2 --- /dev/null +++ b/src/operator/ReduceMean.cpp @@ -0,0 +1,61 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include "aidge/operator/ReduceMean.hpp" + +#include <algorithm> // std::for_each, std::sort +#include <cstddef> // std::size_t +#include <cstdint> // std::int32_t +#include <memory> +#include <stdexcept> // std::runtime_error +#include <string> +#include <vector> + +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::ReduceMean_Op::Type = "ReduceMean"; + +void Aidge::ReduceMean_Op::computeOutputDims() { + if (!getInput(0)) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor"); + } + if (!getInput(0)->empty()) { + // make Axes attribute positive + std::vector<std::int32_t>& axes = this->template getAttr<ReduceMeanAttr::Axes>(); + std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) { + if (val < 0) + val+=static_cast<std::int32_t>(getInput(0)->nbDims()); + }); + std::sort(axes.begin(), axes.end()); + + // build output dimensions + std::vector<DimSize_t> outDims = getInput(0)->dims(); + if (this->template getAttr<ReduceMeanAttr::KeepDims>()) { + std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; }); + } + else { + for (auto it = axes.crbegin(); it != axes.crend(); ++it) + outDims.erase(outDims.begin() + static_cast<std::size_t>(*it)); + } + + // TODO: change {1} for {} when scalar Tensors are better handled. + mOutputs[0]->resize((outDims.size()>0) ? outDims : std::vector<DimSize_t>({1})); + + } + } + +void Aidge::ReduceMean_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(ReduceMean_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp index 30b060cd2a58d7995a7447bd9b85b9bc0026a7f7..79cfc0659849248bac791ba5b1db25096824e928 100644 --- a/src/operator/Reshape.cpp +++ b/src/operator/Reshape.cpp @@ -9,14 +9,18 @@ * ********************************************************************************/ +#include "aidge/operator/Reshape.hpp" + #include <cstddef> // std::size_t #include <cstdint> // std::int64_t +#include <memory> #include <stdexcept> // std::runtime_error #include <string> #include <vector> -#include "aidge/operator/Reshape.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" const std::string Aidge::Reshape_Op::Type = "Reshape"; @@ -55,4 +59,9 @@ void Aidge::Reshape_Op::computeOutputDims() { mOutputs[0]->resize(outDims); } +} + +void Aidge::Reshape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Reshape_Op, *this, name); + mOutputs[0]->setBackend(name, device); } \ No newline at end of file diff --git a/src/operator/Scaling.cpp b/src/operator/Scaling.cpp index 4c121e1268c1e1a62f793f38c6d816e7c6b48c25..8b0d6f9db698e36d232dec38fd8cdd0fad5f8c59 100644 --- a/src/operator/Scaling.cpp +++ b/src/operator/Scaling.cpp @@ -9,8 +9,18 @@ * ********************************************************************************/ +#include "aidge/operator/Scaling.hpp" + +#include <memory> #include <string> -#include "aidge/operator/Scaling.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Scaling_Op::Type = "Scaling"; -const std::string Aidge::Scaling_Op::Type = "Scaling"; \ No newline at end of file +void Aidge::Scaling_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + mImpl = Registrar<Scaling_Op>::create(name)(*this); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/Sigmoid.cpp b/src/operator/Sigmoid.cpp index 48ed5f8286712c94bcf87f3234e70080652ab141..a6edcf823695f95253d6c56e45975480909679d3 100644 --- a/src/operator/Sigmoid.cpp +++ b/src/operator/Sigmoid.cpp @@ -9,8 +9,18 @@ * ********************************************************************************/ +#include "aidge/operator/Sigmoid.hpp" + +#include <memory> #include <string> -#include "aidge/operator/Sigmoid.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Sigmoid_Op::Type = "Sigmoid"; -const std::string Aidge::Sigmoid_Op::Type = "Sigmoid"; \ No newline at end of file +void Aidge::Sigmoid_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + mImpl = Registrar<Sigmoid_Op>::create(name)(*this); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/Softmax.cpp b/src/operator/Softmax.cpp index e88ff4bb4ec6e2cb1357d578c2d07cc4edcb59f7..612c61b0f66b97eb4630214538a22154a67b80d8 100644 --- a/src/operator/Softmax.cpp +++ b/src/operator/Softmax.cpp @@ -9,8 +9,18 @@ * ********************************************************************************/ +#include "aidge/operator/Softmax.hpp" + +#include <memory> #include <string> -#include "aidge/operator/Softmax.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Softmax_Op::Type = "Softmax"; -const std::string Aidge::Softmax_Op::Type = "Softmax"; \ No newline at end of file +void Aidge::Softmax_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + mImpl = Registrar<Softmax_Op>::create(name)(*this); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/Sqrt.cpp b/src/operator/Sqrt.cpp index dbcaba42619762f8fd00bb2f6e0aa0de11d92960..d8ac8b8b0bf28110bd52493d7833f64e9e80fc6a 100644 --- a/src/operator/Sqrt.cpp +++ b/src/operator/Sqrt.cpp @@ -9,8 +9,18 @@ * ********************************************************************************/ +#include "aidge/operator/Sqrt.hpp" + +#include <memory> #include <string> -#include "aidge/operator/Sqrt.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Sqrt_Op::Type = "Sqrt"; -const std::string Aidge::Sqrt_Op::Type = "Sqrt"; \ No newline at end of file +void Aidge::Sqrt_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + mImpl = Registrar<Sqrt_Op>::create(name)(*this); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp index 9d933bf6c97348842fae8f405d3e709e68d56916..0c12e6a1fdb7f3b1056e19bf694996d0061b5b04 100644 --- a/src/operator/Sub.cpp +++ b/src/operator/Sub.cpp @@ -9,15 +9,18 @@ * ********************************************************************************/ +#include "aidge/operator/Sub.hpp" + #include <cstddef> // std::size_t #include <stdexcept> // std::runtime_error #include <string> #include <vector> #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/operator/Sub.hpp" -#include "aidge/utils/Types.h" +#include "aidge/data/Tensor.hpp" #include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" const std::string Aidge::Sub_Op::Type = "Sub"; @@ -50,4 +53,9 @@ void Aidge::Sub_Op::computeOutputDims() { } mOutputs[0]->resize(outDims); } -} \ No newline at end of file +} + +void Aidge::Sub_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Sub_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/Tanh.cpp b/src/operator/Tanh.cpp index de55a6d6c69df5706b945ef9f56027f7a09ce8d7..c113ee6f2da52f40a66a8df04ca33ec4b85f3387 100644 --- a/src/operator/Tanh.cpp +++ b/src/operator/Tanh.cpp @@ -9,8 +9,18 @@ * ********************************************************************************/ +#include "aidge/operator/Tanh.hpp" + +#include <memory> #include <string> -#include "aidge/operator/Tanh.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Tanh_Op::Type = "Tanh"; -const std::string Aidge::Tanh_Op::Type = "Tanh"; \ No newline at end of file +void Aidge::Tanh_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + mImpl = Registrar<Tanh_Op>::create(name)(*this); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/recipes/FuseMulAdd.cpp b/src/recipes/FuseMulAdd.cpp index f408959a13d007853c24e30c1ef683648cf9c200..b57c1c3fc5e4b12dbd0004472a864ddaa864116e 100644 --- a/src/recipes/FuseMulAdd.cpp +++ b/src/recipes/FuseMulAdd.cpp @@ -64,7 +64,7 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr< { // If both inputs are producers, there is an ambiguity, but both options // result in a correct solution. - fmt::print("Warning: both MatMul inputs are Producers, assume data at input#0 and weights at input#1.\n"); + Log::notice("Notice: both MatMul inputs are Producers, assume data at input#0 and weights at input#1."); weight = matmulNode->getParent(1)->cloneSharedOperators(); } AIDGE_ASSERT(weight != nullptr, "Could not deduce weight input for MatMul operator."); diff --git a/src/recipes/GraphViewHelper.cpp b/src/recipes/GraphViewHelper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3b42db7fe18d2269b95cf35fd92851d1e3684bad --- /dev/null +++ b/src/recipes/GraphViewHelper.cpp @@ -0,0 +1,57 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <memory> +#include <set> + +#include "aidge/data/Tensor.hpp" +#include "aidge/graph/Node.hpp" +#include "aidge/graph/GraphView.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/recipes/GraphViewHelper.hpp" + + +std::set<std::shared_ptr<Aidge::Tensor>> Aidge::producers(std::shared_ptr<Aidge::GraphView> graphview) { + std::set<std::shared_ptr<Tensor>> res; + const auto& nodes = graphview->getNodes(); + for (const auto& node : nodes) { + if (node->type() == "Producer") { + const auto& param = std::static_pointer_cast<OperatorTensor>(node->getOperator()); + res.insert(param->getOutput(0)); + } + } + return res; +} + + +std::set<std::shared_ptr<Aidge::Tensor>> Aidge::parameters(std::shared_ptr<Aidge::GraphView> graphview) { + std::set<std::shared_ptr<Tensor>> res; + const auto& nodes = graphview->getNodes(); + for (const auto& node : nodes) { + const auto& param = std::static_pointer_cast<OperatorTensor>(node->getOperator()); + for (std::size_t o = 0; o < param->nbOutputs(); ++o) { + res.insert(param->getOutput(o)); + } + } + return res; +} + +void Aidge::compile_gradient(std::shared_ptr<Aidge::GraphView> gv) { + for (const auto& node : gv->getNodes()) { + // TODO: check that each node is an OperatorTensor + AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Cannot instanciate gradient of an Operator ({}) that doesn't use Tensor.", node->getOperator()->type()); + const std::shared_ptr<OperatorTensor> op = std::dynamic_pointer_cast<OperatorTensor>(node -> getOperator()); + for (std::size_t o = 0; o < node -> nbOutputs(); ++o) { + op->getOutput(o)->initGradient(); + } + } +} \ No newline at end of file diff --git a/src/recipes/RemoveDropout.cpp b/src/recipes/RemoveDropout.cpp index d141f5d3a74e42f8f0fc5465fda043f91f37d5bc..4f8805845bd1f46fd187cba3564b031c55c4655a 100644 --- a/src/recipes/RemoveDropout.cpp +++ b/src/recipes/RemoveDropout.cpp @@ -10,7 +10,6 @@ ********************************************************************************/ #include <memory> -#include <iostream> #include "aidge/graph/Node.hpp" #include "aidge/graph/GraphView.hpp" diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp index f6ef29698e34cf03125e23ac925f8a8d93321ff9..94baf6a3e7b6e2e86de4e2d72ed19bfd9338392e 100644 --- a/src/scheduler/Scheduler.cpp +++ b/src/scheduler/Scheduler.cpp @@ -21,8 +21,9 @@ #include "aidge/graph/GraphView.hpp" #include "aidge/graph/Node.hpp" -#include "aidge/utils/Types.h" #include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Types.h" +#include "aidge/recipes/GraphViewHelper.hpp" #include "aidge/operator/Producer.hpp" #include "aidge/operator/Memorize.hpp" #include "aidge/operator/MetaOperator.hpp" @@ -71,14 +72,14 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) { do { // 2) From the current consumers list, check if any prior consumer node - // is needed. A prior will generally be required for any node consuming + // is needed. A prior will generally be required for any node consuming // parameters (weights and bias) that is not an input node. // If for a given node, only parent producers (at any depth) are needed // to satisfy its required data, it becomes a prior. // If the prior node is a producer, it is added to the list of required // producers. // If the prior node is of another type, it replaces the initial consumer - // in the new priorConsumers list. The initial consumer will become + // in the new priorConsumers list. The initial consumer will become // again a consumer later, by construction. if (verbose) fmt::print("List of consumers with their priors:\n"); std::set<std::shared_ptr<Node>> requiredProducers; @@ -130,7 +131,7 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) { } // 5) Find runnable consumers. - // A consumer is runnable if the required data is available for all of + // A consumer is runnable if the required data is available for all of // its inputs. At this point, not all consumers are necessarily // runnable because some may depend on the execution of others (when // there is multiple successive priors for example). @@ -154,7 +155,7 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) { fmt::print("{}", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1)); fmt::print("\n"); } - + bool isRunnable = true; for (IOIndex_t inputIdx = 0; inputIdx < consumer->nbInputs(); ++inputIdx) { if (/*consumer->getOperator()->getNbRequiredData(inputIdx) > 0 @@ -190,7 +191,7 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) { // 6) Push runnable consumers in the list of nodes to run and update the // consumer producer system. - // At this point, simultaneously runnable consumers have no data + // At this point, simultaneously runnable consumers have no data // dependency and could be run in parallel! for (const auto& runnable : runnableConsumers) { if (verbose) fmt::print("Runnable: {}\n", namePtrTable[runnable]); @@ -324,7 +325,7 @@ Aidge::MemoryManager Aidge::SequentialScheduler::generateMemory(bool incProducer memManager.releaseDependencies(node); continue; } - + const auto childs = node->getChildren(); AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type."); const auto op = std::static_pointer_cast<OperatorTensor>(node->getOperator()); @@ -348,7 +349,7 @@ Aidge::MemoryManager Aidge::SequentialScheduler::generateMemory(bool incProducer length = op->getOutput(outputIdx)->dims().end()[-1]; count = op->getOutput(outputIdx)->dims().end()[-2]; } - + // Check if wrap around buffer is possible for this node // (re-using previous node outputs memory for this node outputs). // => only if this node is the only child of its parent(s) @@ -356,7 +357,7 @@ Aidge::MemoryManager Aidge::SequentialScheduler::generateMemory(bool incProducer size_t wrapAroundExtra = 0; wrapAroundMemPlane.push_back(nullptr); - // Select the best parent among all allocable nodes for + // Select the best parent among all allocable nodes for // reallocation, which is the one with most memory (in order // to minimize the reallocation size). IOIndex_t inputIdx = 0; @@ -427,7 +428,7 @@ void Aidge::SequentialScheduler::connectInputs(std::vector<std::shared_ptr<Aidge // Assert that the number of input data producers corresponds to the number of data input assert(data.size() == inputNodes.size() && "Scheduler connectInput error - Inconsistent number of graph inputs and inputs passed to the graph"); - + for (std::size_t i = 0; i < data.size(); ++i){ // TODO : maybe shallow copy instead of deepcopy inputNodes[i].first->getOperator()->setInput(inputNodes[i].second, data[i]); @@ -436,7 +437,7 @@ void Aidge::SequentialScheduler::connectInputs(std::vector<std::shared_ptr<Aidge void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose, std::vector<std::shared_ptr<Aidge::Tensor>> data) { - + // Collect all data input of the graph (that are producers) if (!data.empty()){ connectInputs(data); @@ -452,16 +453,15 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose, std::ve this->generateScheduling(verbose); } - std::map<std::shared_ptr<Node>, std::string> namePtrTable; - if (verbose) namePtrTable = mGraphView->getRankedNodesName("{0} ({1}#{3})"); + const auto namePtrTable = mGraphView->getRankedNodesName("{0} ({1}#{3})"); size_t cpt = 0; for (const auto& runnable : mStaticSchedule.at(mStaticScheduleStep)) { if (verbose) - fmt::print("run: {}\n", namePtrTable[runnable]); + fmt::print("run: {}\n", namePtrTable.at(runnable)); else drawProgressBar(static_cast<float>(cpt) / static_cast<float>(mStaticSchedule.size()), 50, - (std::string("running ") + namePtrTable[runnable])); + (std::string("running ") + namePtrTable.at(runnable))); const auto tStart = std::chrono::high_resolution_clock::now(); runnable->forward(); const auto tEnd = std::chrono::high_resolution_clock::now(); @@ -477,6 +477,59 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose, std::ve } } +void Aidge::SequentialScheduler::backward(std::vector<std::shared_ptr<Aidge::Tensor>> data, bool instanciateGrad, bool verbose) { + // create ad set Grad values + if (instanciateGrad) { compile_gradient(mGraphView); } + + const auto& ordered_outputs = mGraphView->getOrderedOutputs(); + AIDGE_ASSERT(ordered_outputs.size() == data.size(), "You must provide the \ + right number of data objects to run the backward function. \ + {} outputs detected for the current GraphView when {} were \ + provided.", ordered_outputs.size(), data.size()); + for (std::size_t i = 0; i < ordered_outputs.size(); ++i) { + const std::shared_ptr<OperatorTensor> op_ = std::dynamic_pointer_cast<OperatorTensor>(ordered_outputs[i].first->getOperator()); + const std::shared_ptr<Tensor> t_grad = op_->getOutput(ordered_outputs[i].second)->grad(); + AIDGE_ASSERT(data[i]->dims() == t_grad->dims(), "Wrong gradient size."); + *t_grad = data[i]->clone(); + } + // Generate scheduling *only if empty* + // If scheduling was already generated (in one or several steps, i.e. one or + // several successive call to generateScheduling()), do not generate it twice + if (mStaticSchedule.empty()) { + this->generateScheduling(); + } + + // map of node <-> info to display with verbose + const auto namePtrTable = mGraphView->getRankedNodesName("{0} ({1}#{3})"); + + // Clear previous scheduling results + mScheduling.clear(); + + std::size_t cpt = 0; + // run scheduled operators in reverse order + const auto& runnableList = mStaticSchedule.at(mStaticScheduleStep); + for (auto runnable = runnableList.crbegin(); runnable != runnableList.crend(); ++runnable) { + if (verbose) + fmt::print("run: {}\n", namePtrTable.at(*runnable)); + else + drawProgressBar(static_cast<float>(cpt) / static_cast<float>(mStaticSchedule.size()), 50, + (std::string("running ") + namePtrTable.at(*runnable))); + const auto tStart = std::chrono::high_resolution_clock::now(); + (*runnable)->backward(); + const auto tEnd = std::chrono::high_resolution_clock::now(); + mScheduling.push_back(SchedulingElement(*runnable, tStart, tEnd)); + cpt++; + } + if (!verbose) drawProgressBar(1.0, 50, " "); + fmt::print("\n"); + + ++mStaticScheduleStep; + if (mStaticScheduleStep == mStaticSchedule.size()) { + mStaticScheduleStep = 0; + } +} + + void Aidge::SequentialScheduler::saveSchedulingDiagram(const std::string& fileName) const { auto fp = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen((fileName + ".mmd").c_str(), "w"), &std::fclose); @@ -542,7 +595,7 @@ Aidge::NbElts_t Aidge::SequentialScheduler::getNbAvailableData(const std::shared const auto upperInput = upperNode->inputs()[nodeInputIdx]; if (upperInput.first) { return upperInput.first->getOperator()->getNbProducedData(upperInput.second); - } + } } ++nodeInputIdx; } diff --git a/src/utils/Log.cpp b/src/utils/Log.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7649809339f4ebf716a7287f5744fb94a5b67ce2 --- /dev/null +++ b/src/utils/Log.cpp @@ -0,0 +1,59 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include "aidge/utils/Log.hpp" +#include "aidge/utils/ErrorHandling.hpp" + +#include <fmt/color.h> +#include <fmt/chrono.h> + +Aidge::Log::Level Aidge::Log::mConsoleLevel = Info; +Aidge::Log::Level Aidge::Log::mFileLevel = Debug; +std::string Aidge::Log::mFileName = "aidge.log"; +std::unique_ptr<FILE, decltype(&std::fclose)> Aidge::Log::mFile {nullptr, nullptr}; + +void Aidge::Log::log(Level level, const std::string& msg) { + if (level >= mConsoleLevel) { + // Apply log level style only for console. + // Styles that were already applied to msg with fmt are kept also in + // the log file. + const auto modifier + = (level == Debug) ? fmt::fg(fmt::color::gray) + : (level == Notice) ? fmt::fg(fmt::color::light_yellow) + : (level == Warn) ? fmt::fg(fmt::color::orange) + : (level == Error) ? fmt::fg(fmt::color::red) + : (level == Fatal) ? fmt::bg(fmt::color::red) + : fmt::text_style(); + + fmt::println("{}", fmt::styled(msg, modifier)); + } + + if (level >= mFileLevel && !mFileName.empty()) { + if (!mFile) { + initFile(mFileName); + } + + fmt::println(mFile.get(), msg); + } +} + +void Aidge::Log::initFile(const std::string& fileName) { + mFile = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen(fileName.c_str(), "a"), &std::fclose); + + if (!mFile) { + mFileName.clear(); // prevents AIDGE_THROW_OR_ABORT() to try to log into file + AIDGE_THROW_OR_ABORT(std::runtime_error, + "Could not create log file: {}", fileName); + } + + const std::time_t t = std::time(nullptr); + fmt::println(mFile.get(), "###### {:%Y-%m-%d %H:%M:%S} ######", fmt::localtime(t)); +} diff --git a/src/utils/Random.cpp b/src/utils/Random.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0c3dc61df54e16d129638c66b4c245d6141e819c --- /dev/null +++ b/src/utils/Random.cpp @@ -0,0 +1,22 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include "aidge/utils/Random.hpp" + +#include <random> // normal_distribution, uniform_real_distribution + +std::mt19937 Aidge::Random::Generator::generator{std::random_device{}()}; +unsigned int Aidge::Random::Generator::seed = 0; + +void Aidge::Random::Generator::setSeed(unsigned int new_seed) { + seed = new_seed; + generator.seed(seed); +} diff --git a/unit_tests/backend/Test_TensorImpl.cpp b/unit_tests/backend/Test_TensorImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..43e25092a0f502698bbff7b0142969154f2cb0b0 --- /dev/null +++ b/unit_tests/backend/Test_TensorImpl.cpp @@ -0,0 +1,61 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <array> +#include <cstddef> +#include <cstdint> //std::uint16_t +#include <random> +#include <vector> + +#include <catch2/catch_test_macros.hpp> + +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/TensorUtils.hpp" +#include "aidge/backend/cpu/data/TensorImpl.hpp" + +using namespace Aidge; + +TEST_CASE("[backend/cpu/data] Tensor", "[TensorImpl]") { + Tensor x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}; + + SECTION("Access to array") { + x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}; + REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1); + REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8); + } +} + +TEST_CASE("Tensor fill", "[TensorImpl][fill]") { + SECTION("Instantiate batches independantly") { + // initialization with 0s + std::shared_ptr<Tensor> concatenatedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{}); + //concatenatedTensor->print(); + + std::shared_ptr<Tensor> myTensor1 = std::make_shared<Tensor>(Array1D<int, 5>{{1,2,3,4,5}}); + std::shared_ptr<Tensor> myTensor2 = std::make_shared<Tensor>(Array1D<int, 5>{{6,7,8,9,10}}); + std::shared_ptr<Tensor> myTensor3 = std::make_shared<Tensor>(Array1D<int, 5>{{11,12,13,14,15}}); + + // use copy function from implementation + concatenatedTensor->getImpl()->copy(myTensor1->getImpl()->rawPtr(), 5, 0); + concatenatedTensor->getImpl()->copy(myTensor2->getImpl()->rawPtr(), 5, 5); + concatenatedTensor->getImpl()->copy(myTensor3->getImpl()->rawPtr(), 5, 10); + // concatenatedTensor->print(); + + std::shared_ptr<Tensor> expectedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{ + {{1,2,3,4,5}, + {6,7,8,9,10}, + {11,12,13,14,15}} + }); + // expectedTensor->print(); + + REQUIRE(*concatenatedTensor == *expectedTensor); + } +} diff --git a/unit_tests/data/Test_Tensor.cpp b/unit_tests/data/Test_Tensor.cpp new file mode 100644 index 0000000000000000000000000000000000000000..655fd725e9d7d913d24c6552571ae3b91e3605b4 --- /dev/null +++ b/unit_tests/data/Test_Tensor.cpp @@ -0,0 +1,412 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <array> +#include <cstddef> // std::size_t +#include <cstdint> // std::uint8_t, std::uint16_t, std::int32_t +#include <numeric> // std::accumulate, std::inner_product +#include <functional> // std::multiplies +#include <random> // std::random_device, std::mt19937, + // std::uniform_int_distribution, std::uniform_real_distribution +#include <set> +#include <string> +#include <vector> + +#include <catch2/catch_test_macros.hpp> + +#include "aidge/backend/cpu/data/TensorImpl.hpp" +#include "aidge/data/Data.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/ArrayHelpers.hpp" +#include "aidge/utils/TensorUtils.hpp" +#include "aidge/utils/Types.h" + +namespace Aidge { + +TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") { + SECTION("Default constructor") { + Tensor T_default{}; + REQUIRE(( + (T_default.dataType() == DataType::Float32) && + (T_default.size() == 1) && + (T_default.dims() == std::vector<DimSize_t>({})) && + (T_default.strides() == std::vector<DimSize_t>({1})) && + (T_default.getImpl() == nullptr) && + (T_default.grad() == nullptr) && + (T_default.isContiguous() == true) + )); + } + SECTION("scalar constructor") { + Tensor T; + REQUIRE_NOTHROW(T = Tensor(std::int32_t(20))); + REQUIRE(( + (T.dataType() == DataType::Int32) && + (T.size() == 1) && + (T.dims() == std::vector<DimSize_t>({})) && + (T.strides() == std::vector<DimSize_t>({1})) && + (T.getImpl() != nullptr) && + (T.grad() == nullptr) && + (T.isContiguous() == true) + )); + } + SECTION("dim constructor") { + const std::vector<DimSize_t> Tdims = {1,2,3,4,5,6,7}; + Tensor T; + REQUIRE_NOTHROW(T = Tensor(Tdims)); + REQUIRE(( + (T.dataType() == DataType::Float32) && + (T.size() == std::accumulate(Tdims.cbegin(), Tdims.cend(), DimSize_t(1), std::multiplies<DimSize_t>())) && + (T.dims() == Tdims) && + (T.strides() == std::vector<DimSize_t>({5040,2520,840,210,42,7,1})) && + (T.getImpl() == nullptr) && + (T.grad() == nullptr) && + (T.isContiguous() == true) + )); + } + SECTION("TensorUtils, constructor from const arrays") { + Tensor T; + // Construction from different types and sizes + + // Set an already constructed Tensor + REQUIRE_NOTHROW(T = Array1D<int, 2>{{1, 2}}); + REQUIRE(( + (T.dataType() == DataType::Int32) && + (T.size() == 2) && + (T.dims() == std::vector<DimSize_t>({2})) && + (T.strides() == std::vector<DimSize_t>({1})) && + (T.getImpl() != nullptr) && + (T.grad() == nullptr) && + (T.isContiguous() == true) + )); + + // Change dims + REQUIRE_NOTHROW(T = Array2D<int, 2, 2>{{{1, 2}, {3, 4}}}); + // Change data types + REQUIRE_NOTHROW(T = Array3D<std::uint8_t, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}); + REQUIRE(( + (T.dataType() == DataType::UInt8) && + (T.size() == 8) && + (T.dims() == std::vector<DimSize_t>({2,2,2})) && + (T.strides() == std::vector<DimSize_t>({4,2,1})) && + (T.getImpl() != nullptr) && + (T.grad() == nullptr) && + (T.isContiguous() == true) + )); + REQUIRE_NOTHROW(T = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}); + REQUIRE_NOTHROW(T = Array3D<float, 2, 2, 2>{{{{1.0f, 2.0f}, {3.0f, 4.0f}}, {{5.0f, 6.0f}, {7.0f, 8.0f}}}}); + REQUIRE_NOTHROW(T = Array3D<double, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}}); + + // Change dims + REQUIRE_NOTHROW(T = Array4D<int, 2, 2, 2, 2>{{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}, + {{{9,10}, {11,12}}, {{13,14},{15,16}}}}}); + REQUIRE(( + (T.dataType() == DataType::Int32) && + (T.size() == 16) && + (T.dims() == std::vector<DimSize_t>({2,2,2,2})) && + (T.strides() == std::vector<DimSize_t>({8,4,2,1})) && + (T.getImpl() != nullptr) && + (T.grad() == nullptr) && + (T.isContiguous() == true) + )); + } + SECTION("copy constructor / copy assignment operator") { + + } + SECTION("move constructor / move assignment operator") { + + } + SECTION("prototype") { + constexpr std::uint16_t NBTRIALS = 10; + + // Create random number generators + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<std::size_t> dimsDist(1, 10); + std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5); + std::uniform_real_distribution<float> valueDist(0.001f, 1.0f); + + for (std::size_t trial = 0; trial < NBTRIALS; ++trial) { + std::vector<std::size_t> Tdims; + const std::size_t Tsize = nbDimsDist(gen); + for (std::size_t i = 0; i < Tsize; ++i) { + Tdims.push_back(dimsDist(gen)); + } + Tensor T(Tdims); + + // file the tensor + std::unique_ptr<float[]> array0(new float[T.size()]); + for (std::size_t i = 0; i < T.size(); ++i) { + array0[i] = valueDist(gen); + } + T.setBackend("cpu"); + T.getImpl() -> setRawPtr(array0.get(), T.size()); + + Tensor Tclone; + REQUIRE_NOTHROW(Tclone = T.clone()); + REQUIRE(( + (T.dataType() == Tclone.dataType()) && + (T.size() == Tclone.size()) && + (T.dims() == Tclone.dims()) && + (T.strides() == Tclone.strides()) && + (T.getImpl() != Tclone.getImpl()) && + (Tclone.grad() == nullptr) && + (Tclone.isContiguous() == true) + )); + REQUIRE(Tclone == T); + } + } +} + +TEST_CASE("[core/data] Tensor(getter/setter)", "[Tensor][Getter][Setter]") { + constexpr std::uint16_t NBTRIALS = 10; + + // Create random number generators + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<std::size_t> dimsDist(1, 10); + std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5); + std::uniform_real_distribution<float> valueDist(0.001f, 1.0f); + + for (std::size_t trial = 0; trial < NBTRIALS; ++trial) { + std::vector<std::size_t> Tdims; + const std::size_t Tsize = nbDimsDist(gen); + for (std::size_t i = 0; i < Tsize; ++i) { + Tdims.push_back(dimsDist(gen)); + } + + // create Tensor + Tensor T(Tdims); + // compute stride + std::vector<std::size_t> Tstrides(Tdims.size(), 1); + std::size_t i = Tdims.size() - 1; + while (i-- > 0) { + Tstrides[i] = Tstrides[i+1]*Tdims[i+1]; + } + + ///////////////// + // dimensions + // nbDims(), dims(), size() + REQUIRE(T.nbDims() == Tdims.size()); + + REQUIRE(T.dims() == Tdims); + + std::size_t trueSize = std::accumulate(Tdims.cbegin(), Tdims.cend(), 1, std::multiplies<std::size_t>()); + REQUIRE(T.size() == trueSize); + + ///////////////// + // implementation + // getImpl(), setImpl(), hasImpl() + REQUIRE(T.hasImpl() == false); + std::shared_ptr<TensorImpl_cpu<float>> tensorImpl = std::make_shared<TensorImpl_cpu<float>>(0, Tdims); + + T.setImpl(tensorImpl); + REQUIRE(T.getImpl() == tensorImpl); + REQUIRE(T.hasImpl() == true); + + // isContiguous(), stride(), + REQUIRE(T.isContiguous()); + REQUIRE(T.strides() == Tstrides); + + // file the tensor + std::unique_ptr<float[]> array0(new float[T.size()]); + for (std::size_t i = 0; i < T.size(); ++i) { + array0[i] = valueDist(gen); + } + tensorImpl -> setRawPtr(array0.get(), T.size()); + + // getCoord(), getIdx(), getStorageIdx() + std::vector<DimSize_t> Tdims_copy = Tdims; + for (auto& val : Tdims_copy) { + val = std::min(DimSize_t(2), std::max(DimSize_t(0), val - 1)); + } + DimSize_t true_flatid = std::inner_product(Tdims_copy.cbegin(), Tdims_copy.cend(), Tstrides.cbegin(), DimSize_t(0)); + + REQUIRE(T.getCoord(true_flatid) == Tdims_copy); + REQUIRE(T.getIdx(Tdims_copy) == true_flatid); + REQUIRE(T.getStorageIdx(Tdims_copy) == true_flatid); // Tensor is not a view + + // set(vector), set(size_t), get(vector), get(size_t), getImplOffset() + REQUIRE_NOTHROW(T.set<float>(Tdims_copy, 50.0f)); + REQUIRE(T.get<float>(Tdims_copy) == 50.0f); + + REQUIRE_NOTHROW(T.set<float>(true_flatid, 40.0f)); + REQUIRE(T.get<float>(true_flatid) == 40.0f); + REQUIRE(T.getImplOffset() == 0); + + + ////////////// + // backend + // getAvailableBackends() + REQUIRE(Tensor::getAvailableBackends() == std::set<std::string>({"cpu"})); + + // setBackend() + REQUIRE_NOTHROW(T.setBackend("cpu", 0)); + + // setDataType(), dataType() + REQUIRE_NOTHROW(T.setDataType(DataType::Int16)); + REQUIRE(T.dataType() == DataType::Int16); + } +} +TEST_CASE("[core/data] Tensor(other)", "[Tensor][extract][zeros][print]") { + // extract, makeContiguous + // empty + constexpr std::uint16_t NBTRIALS = 10; + + // Create random number generators + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<std::size_t> dimsDist(1, 10); + std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5); + std::uniform_real_distribution<float> valueDist(0.001f, 1.0f); + // zeros, resize + SECTION("zeros") { + Tensor T; + for (std::size_t trial = 0; trial < NBTRIALS; ++trial) { + std::vector<std::size_t> Tdims; + const std::size_t Tsize = nbDimsDist(gen); + for (std::size_t i = 0; i < Tsize; ++i) { + Tdims.push_back(dimsDist(gen)); + } + T.resize(Tdims); + + // file the tensor + std::unique_ptr<float[]> array0(new float[T.size()]); + for (std::size_t i = 0; i < T.size(); ++i) { + array0[i] = valueDist(gen); + } + T.setBackend("cpu"); + T.getImpl() -> setRawPtr(array0.get(), T.size()); + float* res = static_cast<float*>(T.getImpl()->hostPtr()); + for (std::size_t i = 0; i < T.size(); ++i) { + REQUIRE(res[i] == array0[i]); + } + + T.zeros(); + res = static_cast<float*>(T.getImpl()->hostPtr()); + for (std::size_t i = 0; i < T.size(); ++i) { + REQUIRE(res[i] == 0.0f); + } + } + } + + SECTION("Tensor extract") { + bool equal; + + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // create Tensor + const std::size_t nb_dims = 3; + const std::size_t dim0 = dimsDist(gen) + 1; // dim0 >= 2 + const std::size_t dim1 = dimsDist(gen) + 1; + const std::size_t dim2 = dimsDist(gen) + 1; + std::vector<std::size_t> dims = {dim0, dim1, dim2}; + std::unique_ptr<int[]> array0(new int[dim0*dim1*dim2]); + for (std::size_t i = 0; i < dim0; ++i) { + for (std::size_t j = 0; j < dim1; ++j) { + for (std::size_t k = 0; k < dim2; ++k) { + array0[((i * dim1) + j)*dim2 + k] = valueDist(gen); + } + } + } + Tensor x{dims}; + x.setDataType(DataType::Int32); + x.setBackend("cpu"); + Tensor y; + Tensor y0; + Tensor y1; + Tensor y2; + Tensor y3; + x.getImpl()->setRawPtr(array0.get(), dim0*dim1*dim2); + REQUIRE(x.isContiguous()); + + //////////////// + // extract contiguous Tensor slice given start coordinates + // the whole Tensor + REQUIRE_NOTHROW(y0 = x.extract({})); + REQUIRE(y0 == x); + int* y0_res = static_cast<int*>(y0.getImpl()->hostPtr()); + equal = true; + for (std::size_t i = 0; i < dim0*dim1*dim2; ++i) { + equal &= (y0_res[i] == array0[i]); + } + REQUIRE(equal); + REQUIRE(y0.getImpl() == x.getImpl()); + REQUIRE(y0.isContiguous()); + + // Tensor - 1-D + REQUIRE_NOTHROW(y1 = x.extract({dim0 - 2})); + int* y1_res = static_cast<int*>(y1.getImpl()->hostPtr()); + equal = true; + for (std::size_t i = 0; i < dim1*dim2; ++i) { + equal &= (y1_res[i] == array0[(dim0-2)*dim1*dim2 + i]); + } + REQUIRE(equal); + REQUIRE(y1.getImpl() == x.getImpl()); + REQUIRE(y1.isContiguous()); + + // Tensor - 2-D + REQUIRE_NOTHROW(y2 = x.extract({dim0 - 2, dim1 - 2})); + int* y2_res = static_cast<int*>(y2.getImpl()->hostPtr()); + equal = true; + for (std::size_t i = 0; i < dim2; ++i) { + equal &= (y2_res[i] == array0[(((dim0 - 2) * dim1) + (dim1 - 2))*dim2 + i]); + } + REQUIRE(equal); + REQUIRE(y2.getImpl() == x.getImpl()); + REQUIRE(y2.isContiguous()); + + // Tensor - 3-D => scalar + REQUIRE_NOTHROW(y3 = x.extract({dim0 - 2, dim1 - 2, dim2 - 2})); + int* y3_res = static_cast<int*>(y3.getImpl()->hostPtr()); + REQUIRE(y3_res[0] == array0[(((dim0 - 2) * dim1) + (dim1 - 2))*dim2 + dim2 - 2]); + REQUIRE(y3.getImpl() == x.getImpl()); + REQUIRE(y3.isContiguous()); + + // throw an error + REQUIRE_THROWS(y = x.extract({0, dim1, 0})); + + ///////////////// + // extract Tensor slice given start coordinates and dimension + REQUIRE_NOTHROW(y = x.extract({0, 0, 1}, {dim0-1, 1, dim2-1})); + REQUIRE(y.getImpl() == x.getImpl()); // shared implem + REQUIRE(!y.isContiguous()); + + Tensor yClone = y.clone(); // when copying data, they are contiguous in memory + REQUIRE(yClone.isContiguous()); + // int yTruth[2][1][1] = + REQUIRE(approxEq<int>(yClone, y, 0.0f, 0.0f)); + } + } + + // print, toString, + SECTION("Pretty printing for debug") { + Tensor x{}; + // Empty Tensor + REQUIRE_THROWS(x.print()); + // scalar + x = Tensor(42); + REQUIRE_NOTHROW(x.print()); + // 1-D Tensors + x = Array1D<int, 1>{{1}}; + REQUIRE_NOTHROW(x.print()); + x = Array1D<int, 6>{{1,2,3,4,5,6}}; + REQUIRE_NOTHROW(x.print()); + // 2-D Tensors + x = Array2D<int, 3, 2>{{{1, 2}, {3, 4}, {5, 6}}}; + REQUIRE_NOTHROW(x.print()); + // +2-D Tensors + x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}; + REQUIRE_NOTHROW(x.print()); + x = Array4D<int, 2, 2, 2, 2>{{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}},{{{11, 12}, {13, 14}}, {{15, 16}, {17, 18}}}}}; + REQUIRE_NOTHROW(x.print()); + } +} + +} // namespace Aidge diff --git a/unit_tests/data/Test_TensorImpl.cpp b/unit_tests/data/Test_TensorImpl.cpp deleted file mode 100644 index e734fcd7770483dbcd9f594847ffd4297c071e68..0000000000000000000000000000000000000000 --- a/unit_tests/data/Test_TensorImpl.cpp +++ /dev/null @@ -1,127 +0,0 @@ -/******************************************************************************** - * Copyright (c) 2023 CEA-List - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License 2.0 which is available at - * http://www.eclipse.org/legal/epl-2.0. - * - * SPDX-License-Identifier: EPL-2.0 - * - ********************************************************************************/ - -#include <array> - -#include <catch2/catch_test_macros.hpp> - -#include "aidge/data/Tensor.hpp" -#include "aidge/utils/TensorUtils.hpp" -#include "aidge/backend/cpu/data/TensorImpl.hpp" - -using namespace Aidge; - -TEST_CASE("[core/data] Tensor creation") { - SECTION("from const array") { - Tensor x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}; - - Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}; - - Tensor xFloat = - Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}}; - - SECTION("Tensor features") { - REQUIRE(x.nbDims() == 3); - REQUIRE(x.dims()[0] == 2); - REQUIRE(x.dims()[1] == 2); - REQUIRE(x.dims()[2] == 2); - REQUIRE(x.size() == 8); - } - - SECTION("Access to array") { - REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1); - REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8); - } - - SECTION("get function") { - REQUIRE(x.get<int>({0, 0, 0}) == 1); - REQUIRE(x.get<int>({0, 0, 1}) == 2); - REQUIRE(x.get<int>({0, 1, 1}) == 4); - REQUIRE(x.get<int>({1, 1, 0}) == 7); - x.set<int>({1, 1, 1}, 36); - REQUIRE(x.get<int>({1, 1, 1}) == 36); - } - - SECTION("Pretty printing for debug") { REQUIRE_NOTHROW(x.print()); } - - SECTION("Tensor (in)equality") { - REQUIRE(x == xCopy); - REQUIRE_FALSE(x == xFloat); - } - } -} - -TEST_CASE("Tensor fill") { - SECTION("Instantiate batches independantly") { - // initialization with 0s - std::shared_ptr<Tensor> concatenatedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{}); - //concatenatedTensor->print(); - - std::shared_ptr<Tensor> myTensor1 = std::make_shared<Tensor>(Array1D<int, 5>{{1,2,3,4,5}}); - std::shared_ptr<Tensor> myTensor2 = std::make_shared<Tensor>(Array1D<int, 5>{{6,7,8,9,10}}); - std::shared_ptr<Tensor> myTensor3 = std::make_shared<Tensor>(Array1D<int, 5>{{11,12,13,14,15}}); - - // use copy function from implementation - concatenatedTensor->getImpl()->copy(myTensor1->getImpl()->rawPtr(), 5, 0); - concatenatedTensor->getImpl()->copy(myTensor2->getImpl()->rawPtr(), 5, 5); - concatenatedTensor->getImpl()->copy(myTensor3->getImpl()->rawPtr(), 5, 10); - // concatenatedTensor->print(); - - std::shared_ptr<Tensor> expectedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{ - {{1,2,3,4,5}, - {6,7,8,9,10}, - {11,12,13,14,15}} - }); - // expectedTensor->print(); - - REQUIRE(*concatenatedTensor == *expectedTensor); - } -} - -TEST_CASE("[core/data] Tensor methods","[Tensor]") { - Tensor x = Array3D<int, 2, 2, 2>{{ - {{1, 2}, - {3, 4}}, - {{5, 6}, - {7, 8}} - }}; - - Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}; - - Tensor xFloat = - Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}}; - - SECTION("Tensor sharing") { - Tensor xCopyCtor(x); - REQUIRE(xCopyCtor.getImpl() == x.getImpl()); - - Tensor xEqOp = x; - REQUIRE(xEqOp.getImpl() == x.getImpl()); - - Tensor xCloned = x.clone(); - REQUIRE(xCloned.getImpl() != x.getImpl()); - REQUIRE(xCloned == x); - } - - SECTION("Tensor extract") { - Tensor y = x.extract({0, 1}); - REQUIRE(y.getImpl() == x.getImpl()); - REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}})); - REQUIRE(y.isContiguous()); - - Tensor y2 = x.extract({0, 1, 1}, {2, 1, 1}); - REQUIRE(y2.getImpl() == x.getImpl()); - REQUIRE(!y2.isContiguous()); - Tensor y3 = y2.clone(); - REQUIRE(y3.isContiguous()); - REQUIRE(approxEq<int>(y3, Array3D<int, 2, 1, 1>{{{{4}}, {{8}}}})); - } -} diff --git a/unit_tests/graphRegex/Test_GraphRegex.cpp b/unit_tests/graphRegex/Test_GraphRegex.cpp index bcd6d0f4cd9ba32ee4318188343b7e6360670d3b..a62b9a8602b494f26fb47061b899eaba41129a1f 100644 --- a/unit_tests/graphRegex/Test_GraphRegex.cpp +++ b/unit_tests/graphRegex/Test_GraphRegex.cpp @@ -18,6 +18,32 @@ using namespace Aidge; TEST_CASE("GraphRegexUser") { + + SECTION("Match using custom lambda") { + + std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph"); + std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c"); + std::shared_ptr<Node> fc = GenericOperator("FC", 1, 0, 1, "c1"); + std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2"); + std::shared_ptr<Node> fc2 = GenericOperator("FC", 1, 0, 1, "c3"); + + g1->add(conv); + g1->addChild(fc, "c"); + g1->addChild(conv2, "c1"); + g1->addChild(fc2, "c2"); + + /// + std::shared_ptr<GraphRegex> sut = std::make_shared<GraphRegex>(); + sut->setNodeKey("C",+[](NodePtr NodeOp){return NodeOp->type() == "FC";}); + + sut->setNodeKey("A","C($)==True"); + sut->addQuery("A"); + auto match = sut->match(g1); + REQUIRE(match.size() == 2); + + } + + SECTION("INIT") { const std::string query = "Conv->FC"; diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp index 3ff2a3c6c7422c1ead53a629670975a25e54f7d7..cd42791e0db1d95469bdd414cab94f1c6e8fea17 100644 --- a/unit_tests/operator/Test_MetaOperator.cpp +++ b/unit_tests/operator/Test_MetaOperator.cpp @@ -21,7 +21,7 @@ using namespace Aidge; -TEST_CASE("[core/operators] MetaOperator", "[Operator]") { +TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") { SECTION("PaddedConv") { auto op = PaddedConv(1, 3, {3, 3}, "padded_conv", {1, 1}, {1, 1, 1, 1}); @@ -108,21 +108,21 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator]") { // Weights X myLSTM->input(1).first->getOperator()->setOutput(0, myInitW); - myLSTM->input(2).first->getOperator()->setOutput(0, myInitW); - myLSTM->input(3).first->getOperator()->setOutput(0, myInitW); - myLSTM->input(4).first->getOperator()->setOutput(0, myInitW); + op->setInput(2, myInitW); + op->setInput(3, myInitW); + op->setInput(4, myInitW); // Weights H - myLSTM->input(5).first->getOperator()->setOutput(0, myInitR); - myLSTM->input(6).first->getOperator()->setOutput(0, myInitR); - myLSTM->input(7).first->getOperator()->setOutput(0, myInitR); - myLSTM->input(8).first->getOperator()->setOutput(0, myInitR); + op->setInput(5, myInitR); + op->setInput(6, myInitR); + op->setInput(7, myInitR); + op->setInput(8, myInitR); auto g = getConnectedGraphView(myLSTM); g->save("lstm_before_expand", true, true); expandMetaOps(g); g->setRootNode(pop); - REQUIRE(g->getRootNode() == pop); + REQUIRE(g->rootNode() == pop); g->save("lstm_expanded", true, true); REQUIRE(g->getNodes().size() == 41); diff --git a/unit_tests/utils/Test_Log.cpp b/unit_tests/utils/Test_Log.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3d8e672b84f5055a12185c3684c34bd888f0545b --- /dev/null +++ b/unit_tests/utils/Test_Log.cpp @@ -0,0 +1,31 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <catch2/catch_test_macros.hpp> + +#include "aidge/utils/Log.hpp" + +#include <fmt/color.h> + +using namespace Aidge; + +TEST_CASE("[core/log] Log") { + SECTION("TestLog") { + Log::setConsoleLevel(Log::Debug); + Log::debug("debug"); + Log::debug("{}", fmt::styled("green debug", fmt::fg(fmt::color::green))); + Log::info("info"); + Log::notice("notice"); + Log::warn("warn"); + Log::error("error"); + Log::fatal("fatal"); + } +} diff --git a/version.txt b/version.txt index 17e51c385ea382d4f2ef124b7032c1604845622d..0ea3a944b399d25f7e1b8fe684d754eb8da9fe7f 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -0.1.1 +0.2.0