From 102ba14ff6792fabd56b1ea2c2ce3c73432351cc Mon Sep 17 00:00:00 2001 From: cmoineau <cyril.moineau@cea.fr> Date: Thu, 29 Feb 2024 15:40:51 +0000 Subject: [PATCH] Update every binded operator to be registrable. --- include/aidge/operator/Add.hpp | 4 ++-- include/aidge/operator/AvgPooling.hpp | 6 +++--- include/aidge/operator/BatchNorm.hpp | 6 +++--- include/aidge/operator/Concat.hpp | 4 ++-- include/aidge/operator/ConvDepthWise.hpp | 4 ++-- include/aidge/operator/Div.hpp | 4 ++-- include/aidge/operator/Erf.hpp | 4 ++-- include/aidge/operator/FC.hpp | 6 +++--- include/aidge/operator/Gather.hpp | 4 ++-- include/aidge/operator/LeakyReLU.hpp | 3 ++- include/aidge/operator/MatMul.hpp | 4 ++-- include/aidge/operator/MaxPooling.hpp | 4 ++-- include/aidge/operator/Mul.hpp | 6 +++--- include/aidge/operator/Pad.hpp | 4 ++-- include/aidge/operator/Pow.hpp | 6 +++--- include/aidge/operator/ReLU.hpp | 6 +++--- include/aidge/operator/ReduceMean.hpp | 4 ++-- include/aidge/operator/Reshape.hpp | 4 ++-- include/aidge/operator/Slice.hpp | 4 ++-- include/aidge/operator/Softmax.hpp | 4 ++-- include/aidge/operator/Sqrt.hpp | 4 ++-- include/aidge/operator/Sub.hpp | 6 +++--- include/aidge/operator/Transpose.hpp | 4 ++-- python_binding/operator/pybind_Add.cpp | 2 +- python_binding/operator/pybind_AvgPooling.cpp | 5 +++-- python_binding/operator/pybind_BatchNorm.cpp | 7 +++---- python_binding/operator/pybind_Concat.cpp | 1 + python_binding/operator/pybind_ConvDepthWise.cpp | 5 +++-- python_binding/operator/pybind_Div.cpp | 2 +- python_binding/operator/pybind_Erf.cpp | 2 +- python_binding/operator/pybind_FC.cpp | 2 +- python_binding/operator/pybind_Gather.cpp | 2 +- python_binding/operator/pybind_LeakyReLU.cpp | 2 +- python_binding/operator/pybind_Matmul.cpp | 2 +- python_binding/operator/pybind_MaxPooling.cpp | 3 ++- python_binding/operator/pybind_Mul.cpp | 2 +- python_binding/operator/pybind_Pad.cpp | 5 +++-- python_binding/operator/pybind_Pow.cpp | 1 + python_binding/operator/pybind_ReLU.cpp | 1 + python_binding/operator/pybind_ReduceMean.cpp | 4 +++- python_binding/operator/pybind_Reshape.cpp | 2 +- python_binding/operator/pybind_Slice.cpp | 2 +- python_binding/operator/pybind_Softmax.cpp | 2 +- python_binding/operator/pybind_Sqrt.cpp | 2 +- python_binding/operator/pybind_Sub.cpp | 2 +- python_binding/operator/pybind_Transpose.cpp | 3 +++ 46 files changed, 89 insertions(+), 77 deletions(-) diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp index 97a4ef69b..58ff87cf7 100644 --- a/include/aidge/operator/Add.hpp +++ b/include/aidge/operator/Add.hpp @@ -28,7 +28,7 @@ namespace Aidge { class Add_Op : public OperatorTensor, - public Registrable<Add_Op, std::string, std::unique_ptr<OperatorImpl>(const Add_Op&)> { + public Registrable<Add_Op, std::string, std::shared_ptr<OperatorImpl>(const Add_Op&)> { public: static const std::string Type; @@ -71,7 +71,7 @@ public: void computeOutputDims() override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Add_Op>::create(name)(*this); + SET_IMPL_MACRO(Add_Op, *this, name); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp index 5066cb78f..3b8467068 100644 --- a/include/aidge/operator/AvgPooling.hpp +++ b/include/aidge/operator/AvgPooling.hpp @@ -30,7 +30,7 @@ enum class AvgPoolingAttr { StrideDims, KernelDims }; template <DimIdx_t DIM> class AvgPooling_Op : public OperatorTensor, - public Registrable<AvgPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>, + public Registrable<AvgPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>, public StaticAttributes<AvgPoolingAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>> { @@ -137,7 +137,7 @@ public: void setBackend(const std::string &name, DeviceIdx_t device = 0) override { - mImpl = Registrar<AvgPooling_Op<DIM>>::create(name)(*this); + SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, name); mOutputs[0]->setBackend(name, device); } @@ -177,4 +177,4 @@ const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims", "KernelDims"}; } -#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */ \ No newline at end of file +#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */ diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp index 4a0f40c03..7d57a9033 100644 --- a/include/aidge/operator/BatchNorm.hpp +++ b/include/aidge/operator/BatchNorm.hpp @@ -30,7 +30,7 @@ enum class BatchNormAttr { Epsilon, Momentum }; template <DimIdx_t DIM> class BatchNorm_Op : public OperatorTensor, - public Registrable<BatchNorm_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>, + public Registrable<BatchNorm_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>, public StaticAttributes<BatchNormAttr, float, float> { public: static const std::string Type; @@ -95,7 +95,7 @@ public: } void setBackend(const std::string &name, DeviceIdx_t device = 0) override { - mImpl = Registrar<BatchNorm_Op<DIM>>::create(name)(*this); + SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, name); mOutputs[0]->setBackend(name, device); // By default, automatically set backend for scale, shift, mean and variance @@ -136,4 +136,4 @@ template <> const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "Epsilon", "Momentum" }; } -#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_ \ No newline at end of file +#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_ diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp index 62a954010..89f113f68 100644 --- a/include/aidge/operator/Concat.hpp +++ b/include/aidge/operator/Concat.hpp @@ -29,7 +29,7 @@ namespace Aidge { enum class ConcatAttr { Axis }; class Concat_Op : public OperatorTensor, - public Registrable<Concat_Op, std::string, std::unique_ptr<OperatorImpl>(const Concat_Op&)>, + public Registrable<Concat_Op, std::string, std::shared_ptr<OperatorImpl>(const Concat_Op&)>, public StaticAttributes<ConcatAttr, DimSize_t> { public: static const std::string Type; @@ -108,7 +108,7 @@ public: } void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Concat_Op>::create(name)(*this); + SET_IMPL_MACRO(Concat_Op, *this, name); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp index a3b537ba6..b9dabd7e2 100644 --- a/include/aidge/operator/ConvDepthWise.hpp +++ b/include/aidge/operator/ConvDepthWise.hpp @@ -30,7 +30,7 @@ enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims }; template <DimIdx_t DIM> class ConvDepthWise_Op : public OperatorTensor, - public Registrable<ConvDepthWise_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>, + public Registrable<ConvDepthWise_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>, public StaticAttributes<ConvDepthWiseAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, @@ -168,7 +168,7 @@ public: } void setBackend(const std::string &name, DeviceIdx_t device = 0) override { - mImpl = Registrar<ConvDepthWise_Op<DIM>>::create(name)(*this); + SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, name); mOutputs[0]->setBackend(name, device); // By default, automatically set backend for weight and bias inputs diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp index a033c6920..b998e9ee2 100644 --- a/include/aidge/operator/Div.hpp +++ b/include/aidge/operator/Div.hpp @@ -26,7 +26,7 @@ namespace Aidge { class Div_Op : public OperatorTensor, - public Registrable<Div_Op, std::string, std::unique_ptr<OperatorImpl>(const Div_Op&)> { + public Registrable<Div_Op, std::string, std::shared_ptr<OperatorImpl>(const Div_Op&)> { public: static const std::string Type; @@ -55,7 +55,7 @@ public: void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Div_Op>::create(name)(*this); + SET_IMPL_MACRO(Div_Op, *this, name); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp index 6995cea5e..895d58a87 100644 --- a/include/aidge/operator/Erf.hpp +++ b/include/aidge/operator/Erf.hpp @@ -27,7 +27,7 @@ namespace Aidge { class Erf_Op : public OperatorTensor, - public Registrable<Erf_Op, std::string, std::unique_ptr<OperatorImpl>(const Erf_Op&)> { + public Registrable<Erf_Op, std::string, std::shared_ptr<OperatorImpl>(const Erf_Op&)> { public: static const std::string Type; @@ -52,7 +52,7 @@ public: } void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Erf_Op>::create(name)(*this); + SET_IMPL_MACRO(Erf_Op, *this, name); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp index f6d81b578..3e4ffabc2 100644 --- a/include/aidge/operator/FC.hpp +++ b/include/aidge/operator/FC.hpp @@ -32,7 +32,7 @@ enum class FCAttr { OutChannels, NoBias }; class FC_Op : public OperatorTensor, public Registrable<FC_Op, std::string, - std::unique_ptr<OperatorImpl>(const FC_Op &)>, + std::shared_ptr<OperatorImpl>(const FC_Op &)>, public StaticAttributes<FCAttr, DimSize_t, bool> { public: static const std::string Type; @@ -97,7 +97,7 @@ public: void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<FC_Op>::create(name)(*this); + SET_IMPL_MACRO(FC_Op, *this, name); mOutputs[0]->setBackend(name, device); // By default, automatically set backend for weight and bias inputs @@ -128,4 +128,4 @@ const char *const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels", "NoBias"}; } -#endif /* AIDGE_CORE_OPERATOR_FC_H_ */ \ No newline at end of file +#endif /* AIDGE_CORE_OPERATOR_FC_H_ */ diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp index f6647f991..1e5957e83 100644 --- a/include/aidge/operator/Gather.hpp +++ b/include/aidge/operator/Gather.hpp @@ -32,7 +32,7 @@ enum class GatherAttr { Indices, GatheredShape, Axis }; class Gather_Op : public OperatorTensor, public Registrable<Gather_Op, std::string, - std::unique_ptr<OperatorImpl>(const Gather_Op&)>, + std::shared_ptr<OperatorImpl>(const Gather_Op&)>, public StaticAttributes<GatherAttr, std::vector<std::int64_t>, std::vector<DimSize_t>, std::int64_t> { public: @@ -72,7 +72,7 @@ public: void computeOutputDims() override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Gather_Op>::create(name)(*this); + SET_IMPL_MACRO(Gather_Op, *this, name); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp index 5976f1d88..d0fd2733a 100644 --- a/include/aidge/operator/LeakyReLU.hpp +++ b/include/aidge/operator/LeakyReLU.hpp @@ -30,7 +30,7 @@ enum class LeakyReLUAttr { }; class LeakyReLU_Op : public OperatorTensor, - public Registrable<LeakyReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const LeakyReLU_Op&)>, + public Registrable<LeakyReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)>, public StaticAttributes<LeakyReLUAttr, float> { public: static const std::string Type; @@ -69,6 +69,7 @@ public: void setBackend(const std::string& name, DeviceIdx_t device = 0) override { mImpl = Registrar<LeakyReLU_Op>::create(name)(*this); + SET_IMPL_MACRO(LeakyReLU_Op, *this, name); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp index a011c8666..596aa6346 100644 --- a/include/aidge/operator/MatMul.hpp +++ b/include/aidge/operator/MatMul.hpp @@ -27,7 +27,7 @@ namespace Aidge { class MatMul_Op : public OperatorTensor, public Registrable<MatMul_Op, std::string, - std::unique_ptr<OperatorImpl>(const MatMul_Op &)> { + std::shared_ptr<OperatorImpl>(const MatMul_Op &)> { public: static const std::string Type; @@ -65,7 +65,7 @@ public: void setBackend(const std::string& name, DeviceIdx_t device = 0) override final { - mImpl = Registrar<MatMul_Op>::create(name)(*this); + SET_IMPL_MACRO(MatMul_Op, *this, name); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp index b07fa38a4..a256758f9 100644 --- a/include/aidge/operator/MaxPooling.hpp +++ b/include/aidge/operator/MaxPooling.hpp @@ -30,7 +30,7 @@ enum class MaxPoolingAttr { StrideDims, KernelDims, CeilMode }; template <DimIdx_t DIM> class MaxPooling_Op : public OperatorTensor, - public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>, + public Registrable<MaxPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>, public StaticAttributes<MaxPoolingAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, @@ -105,7 +105,7 @@ public: void setBackend(const std::string &name, DeviceIdx_t device = 0) override { - mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this); + SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, name); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp index 8758021a9..753040788 100644 --- a/include/aidge/operator/Mul.hpp +++ b/include/aidge/operator/Mul.hpp @@ -29,7 +29,7 @@ namespace Aidge { * @brief Tensor element-wise multiplication. */ class Mul_Op : public OperatorTensor, - public Registrable<Mul_Op, std::string, std::unique_ptr<OperatorImpl>(const Mul_Op&)> { + public Registrable<Mul_Op, std::string, std::shared_ptr<OperatorImpl>(const Mul_Op&)> { public: static const std::string Type; @@ -57,7 +57,7 @@ public: void computeOutputDims() override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Mul_Op>::create(name)(*this); + SET_IMPL_MACRO(Mul_Op, *this, name); mOutputs[0]->setBackend(name, device); } @@ -74,4 +74,4 @@ inline std::shared_ptr<Node> Mul(const std::string& name = "") { } } // namespace Aidge -#endif /* AIDGE_CORE_OPERATOR_MUL_H_ */ \ No newline at end of file +#endif /* AIDGE_CORE_OPERATOR_MUL_H_ */ diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp index bb961295b..dce2a6e9e 100644 --- a/include/aidge/operator/Pad.hpp +++ b/include/aidge/operator/Pad.hpp @@ -31,7 +31,7 @@ enum class PadBorderType { Constant, Edge, Reflect, Wrap }; template <DimIdx_t DIM> class Pad_Op : public OperatorTensor, - public Registrable<Pad_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Pad_Op<DIM> &)>, + public Registrable<Pad_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Pad_Op<DIM> &)>, public StaticAttributes<PadAttr, std::array<DimSize_t, 2*DIM>, PadBorderType, @@ -98,7 +98,7 @@ public: } void setBackend(const std::string &name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Pad_Op<DIM>>::create(name)(*this); + SET_IMPL_MACRO(Pad_Op<DIM>, *this, name); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp index ba8d3d058..e8894d1a2 100644 --- a/include/aidge/operator/Pow.hpp +++ b/include/aidge/operator/Pow.hpp @@ -27,7 +27,7 @@ namespace Aidge { class Pow_Op : public OperatorTensor, - public Registrable<Pow_Op, std::string, std::unique_ptr<OperatorImpl>(const Pow_Op&)> { + public Registrable<Pow_Op, std::string, std::shared_ptr<OperatorImpl>(const Pow_Op&)> { public: static const std::string Type; @@ -55,7 +55,7 @@ public: void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Pow_Op>::create(name)(*this); + SET_IMPL_MACRO(Pow_Op, *this, name); mOutputs[0]->setBackend(name, device); } @@ -72,4 +72,4 @@ inline std::shared_ptr<Node> Pow(const std::string& name = "") { } } // namespace Aidge -#endif /* AIDGE_CORE_OPERATOR_POW_H_ */ \ No newline at end of file +#endif /* AIDGE_CORE_OPERATOR_POW_H_ */ diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp index 0bb7cdffe..7a5144f48 100644 --- a/include/aidge/operator/ReLU.hpp +++ b/include/aidge/operator/ReLU.hpp @@ -26,7 +26,7 @@ namespace Aidge { class ReLU_Op : public OperatorTensor, - public Registrable<ReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const ReLU_Op&)> { + public Registrable<ReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const ReLU_Op&)> { public: static const std::string Type; @@ -52,7 +52,7 @@ public: void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<ReLU_Op>::create(name)(*this); + SET_IMPL_MACRO(ReLU_Op, *this, name); mOutputs[0]->setBackend(name, device); } @@ -69,4 +69,4 @@ inline std::shared_ptr<Node> ReLU(const std::string& name = "") { } } -#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */ \ No newline at end of file +#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */ diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp index 5f07cddfa..70fe03635 100644 --- a/include/aidge/operator/ReduceMean.hpp +++ b/include/aidge/operator/ReduceMean.hpp @@ -32,7 +32,7 @@ enum class ReduceMeanAttr { Axes, KeepDims }; template <DimIdx_t DIM> class ReduceMean_Op : public OperatorTensor, - public Registrable<ReduceMean_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ReduceMean_Op<DIM> &)>, + public Registrable<ReduceMean_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op<DIM> &)>, public StaticAttributes<ReduceMeanAttr, std::array<std::int32_t, DIM>, DimSize_t> { public: @@ -99,7 +99,7 @@ class ReduceMean_Op : public OperatorTensor, } void setBackend(const std::string &name, DeviceIdx_t device = 0) override { - mImpl = Registrar<ReduceMean_Op<DIM>>::create(name)(*this); + SET_IMPL_MACRO(ReduceMean_Op<DIM>, *this, name); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp index 32d71d5ad..410f55e5b 100644 --- a/include/aidge/operator/Reshape.hpp +++ b/include/aidge/operator/Reshape.hpp @@ -28,7 +28,7 @@ namespace Aidge { enum class ReshapeAttr { Shape }; class Reshape_Op : public OperatorTensor, - public Registrable<Reshape_Op, std::string, std::unique_ptr<OperatorImpl>(const Reshape_Op&)>, + public Registrable<Reshape_Op, std::string, std::shared_ptr<OperatorImpl>(const Reshape_Op&)>, public StaticAttributes<ReshapeAttr, std::vector<std::int64_t>> { public: @@ -67,7 +67,7 @@ public: void computeOutputDims() override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Reshape_Op>::create(name)(*this); + SET_IMPL_MACRO(Reshape_Op, *this, name); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp index 4a073bc52..3635eb32c 100644 --- a/include/aidge/operator/Slice.hpp +++ b/include/aidge/operator/Slice.hpp @@ -28,7 +28,7 @@ enum class SliceAttr { Starts, Ends, Axes }; class Slice_Op : public OperatorTensor, - public Registrable<Slice_Op, std::string, std::unique_ptr<OperatorImpl>(const Slice_Op &)>, + public Registrable<Slice_Op, std::string, std::shared_ptr<OperatorImpl>(const Slice_Op &)>, public StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int64_t>> { public: static const std::string Type; @@ -69,7 +69,7 @@ public: void computeOutputDims() override final; void setBackend(const std::string &name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Slice_Op>::create(name)(*this); + SET_IMPL_MACRO(Slice_Op, *this, name); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp index ed6689dc9..a63827a85 100644 --- a/include/aidge/operator/Softmax.hpp +++ b/include/aidge/operator/Softmax.hpp @@ -33,7 +33,7 @@ enum class SoftmaxAttr { AxisIdx }; class Softmax_Op : public OperatorTensor, public Registrable<Softmax_Op, std::string, - std::unique_ptr<OperatorImpl>(const Softmax_Op&)>, + std::shared_ptr<OperatorImpl>(const Softmax_Op&)>, public StaticAttributes<SoftmaxAttr, int> { public: @@ -67,7 +67,7 @@ public: } void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Softmax_Op>::create(name)(*this); + SET_IMPL_MACRO(Softmax_Op, *this, name); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp index 32adfdb93..69a1ffba9 100644 --- a/include/aidge/operator/Sqrt.hpp +++ b/include/aidge/operator/Sqrt.hpp @@ -27,7 +27,7 @@ namespace Aidge { class Sqrt_Op : public OperatorTensor, - public Registrable<Sqrt_Op, std::string, std::unique_ptr<OperatorImpl>(const Sqrt_Op&)> { + public Registrable<Sqrt_Op, std::string, std::shared_ptr<OperatorImpl>(const Sqrt_Op&)> { public: // FIXME: change accessibility std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>(); @@ -57,7 +57,7 @@ public: } void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Sqrt_Op>::create(name)(*this); + SET_IMPL_MACRO(Sqrt_Op, *this, name); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp index 7d346457e..721b68a44 100644 --- a/include/aidge/operator/Sub.hpp +++ b/include/aidge/operator/Sub.hpp @@ -27,7 +27,7 @@ namespace Aidge { class Sub_Op : public OperatorTensor, - public Registrable<Sub_Op, std::string, std::unique_ptr<OperatorImpl>(const Sub_Op&)> { + public Registrable<Sub_Op, std::string, std::shared_ptr<OperatorImpl>(const Sub_Op&)> { public: // FIXME: change accessibility std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()}; @@ -60,7 +60,7 @@ public: void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Sub_Op>::create(name)(*this); + SET_IMPL_MACRO(Sub_Op, *this, name); mOutputs[0]->setBackend(name, device); } @@ -77,4 +77,4 @@ inline std::shared_ptr<Node> Sub(const std::string& name = "") { } } // namespace Aidge -#endif /* AIDGE_CORE_OPERATOR_SUB_H_ */ \ No newline at end of file +#endif /* AIDGE_CORE_OPERATOR_SUB_H_ */ diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp index 2262bec14..f081f830a 100644 --- a/include/aidge/operator/Transpose.hpp +++ b/include/aidge/operator/Transpose.hpp @@ -30,7 +30,7 @@ enum class TransposeAttr { OutputDimsOrder }; template <DimIdx_t DIM> class Transpose_Op : public OperatorTensor, - public Registrable<Transpose_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Transpose_Op<DIM> &)>, + public Registrable<Transpose_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op<DIM> &)>, public StaticAttributes<TransposeAttr, std::array<DimSize_t, DIM>> { @@ -80,7 +80,7 @@ class Transpose_Op : public OperatorTensor, } void setBackend(const std::string &name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Transpose_Op<DIM>>::create(name)(*this); + SET_IMPL_MACRO(Transpose_Op<DIM>, *this, name); mOutputs[0]->setBackend(name, device); } diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp index 74ec11c28..661c96bb8 100644 --- a/python_binding/operator/pybind_Add.cpp +++ b/python_binding/operator/pybind_Add.cpp @@ -23,7 +23,7 @@ void declare_Add(py::module &m) { py::class_<Add_Op, std::shared_ptr<Add_Op>, OperatorTensor>(m, "AddOp", py::multiple_inheritance()) .def("get_inputs_name", &Add_Op::getInputsName) .def("get_outputs_name", &Add_Op::getOutputsName); - + declare_registrable<Add_Op>(m, "AddOp"); m.def("Add", &Add, py::arg("nbIn"), py::arg("name") = ""); } diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp index 0ca01c075..c44c7b49a 100644 --- a/python_binding/operator/pybind_AvgPooling.cpp +++ b/python_binding/operator/pybind_AvgPooling.cpp @@ -26,8 +26,9 @@ namespace py = pybind11; namespace Aidge { template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) { + const std::string pyClassName("AvgPoolingOp" + std::to_string(DIM) + "D"); py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Attributes, OperatorTensor>( - m, ("AvgPoolingOp" + std::to_string(DIM) + "D").c_str(), + m, pyClassName.c_str(), py::multiple_inheritance()) .def(py::init<const std::array<DimSize_t, DIM> &, const std::array<DimSize_t, DIM> &>(), @@ -36,7 +37,7 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) { .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName) .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName) .def("attributes_name", &AvgPooling_Op<DIM>::staticGetAttrsName); - + declare_registrable<AvgPooling_Op<DIM>>(m, pyClassName); m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, const std::string& name, const std::vector<DimSize_t> &stride_dims) { diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp index e11fc288f..7020c35f6 100644 --- a/python_binding/operator/pybind_BatchNorm.cpp +++ b/python_binding/operator/pybind_BatchNorm.cpp @@ -21,13 +21,12 @@ namespace Aidge { template <DimSize_t DIM> void declare_BatchNormOp(py::module& m) { - py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance()) - .def(py::init<float, float>(), - py::arg("epsilon"), - py::arg("momentum")) + const std::string pyClassName("BatchNormOp" + std::to_string(DIM) + "D"); + py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, pyClassName.c_str(), py::multiple_inheritance()) .def("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName) .def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName) .def("attributes_name", &BatchNorm_Op<DIM>::staticGetAttrsName); + declare_registrable<BatchNorm_Op<DIM>>(m, pyClassName); m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("nbFeatures"), py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = ""); } diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp index 8cdd138b8..38d8a20cb 100644 --- a/python_binding/operator/pybind_Concat.cpp +++ b/python_binding/operator/pybind_Concat.cpp @@ -24,6 +24,7 @@ void init_Concat(py::module& m) { .def("get_outputs_name", &Concat_Op::getOutputsName) .def("attributes_name", &Concat_Op::staticGetAttrsName); + declare_registrable<Concat_Op>(m, "ConcatOp"); m.def("Concat", &Concat, py::arg("nbIn"), py::arg("axis"), py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp index e25024e09..83eac8742 100644 --- a/python_binding/operator/pybind_ConvDepthWise.cpp +++ b/python_binding/operator/pybind_ConvDepthWise.cpp @@ -26,8 +26,9 @@ namespace py = pybind11; namespace Aidge { template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) { + const std::string pyClassName("ConvDepthWiseOp" + std::to_string(DIM) + "D"); py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Attributes, OperatorTensor>( - m, ("ConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(), + m, pyClassName.c_str(), py::multiple_inheritance()) .def(py::init<const DimSize_t, const std::array<DimSize_t, DIM> &, @@ -40,7 +41,7 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) { .def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName) .def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName) .def("attributes_name", &ConvDepthWise_Op<DIM>::staticGetAttrsName); - + declare_registrable<ConvDepthWise_Op<DIM>>(m, pyClassName); m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels, const std::vector<DimSize_t>& kernel_dims, const std::string& name, diff --git a/python_binding/operator/pybind_Div.cpp b/python_binding/operator/pybind_Div.cpp index 6d14510f3..2996e0bca 100644 --- a/python_binding/operator/pybind_Div.cpp +++ b/python_binding/operator/pybind_Div.cpp @@ -21,7 +21,7 @@ void init_Div(py::module& m) { py::class_<Div_Op, std::shared_ptr<Div_Op>, OperatorTensor>(m, "DivOp", py::multiple_inheritance()) .def("get_inputs_name", &Div_Op::getInputsName) .def("get_outputs_name", &Div_Op::getOutputsName); - + declare_registrable<Div_Op>(m, "DivOp"); m.def("Div", &Div, py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_Erf.cpp b/python_binding/operator/pybind_Erf.cpp index 806867f61..e1aef08ad 100644 --- a/python_binding/operator/pybind_Erf.cpp +++ b/python_binding/operator/pybind_Erf.cpp @@ -21,7 +21,7 @@ void init_Erf(py::module& m) { py::class_<Erf_Op, std::shared_ptr<Erf_Op>, OperatorTensor>(m, "ErfOp", py::multiple_inheritance()) .def("get_inputs_name", &Erf_Op::getInputsName) .def("get_outputs_name", &Erf_Op::getOutputsName); - + declare_registrable<Erf_Op>(m, "ErfOp"); m.def("Erf", &Erf, py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp index ad589d73d..0b13643cb 100644 --- a/python_binding/operator/pybind_FC.cpp +++ b/python_binding/operator/pybind_FC.cpp @@ -24,7 +24,7 @@ void declare_FC(py::module &m) { .def("get_inputs_name", &FC_Op::getInputsName) .def("get_outputs_name", &FC_Op::getOutputsName) .def("attributes_name", &FC_Op::staticGetAttrsName); - + declare_registrable<FC_Op>(m, "FCOp"); m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = ""); } diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp index f0d55e2f4..a67dd6c13 100644 --- a/python_binding/operator/pybind_Gather.cpp +++ b/python_binding/operator/pybind_Gather.cpp @@ -23,7 +23,7 @@ void init_Gather(py::module& m) { .def("get_inputs_name", &Gather_Op::getInputsName) .def("get_outputs_name", &Gather_Op::getOutputsName) .def("attributes_name", &Gather_Op::staticGetAttrsName); - + declare_registrable<Gather_Op>(m, "GatherOp"); m.def("Gather", &Gather, py::arg("indices"), py::arg("gathered_shape"), py::arg("axis"), py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp index 3e9acb831..66b2c34a9 100644 --- a/python_binding/operator/pybind_LeakyReLU.cpp +++ b/python_binding/operator/pybind_LeakyReLU.cpp @@ -22,7 +22,7 @@ void init_LeakyReLU(py::module& m) { .def("get_inputs_name", &LeakyReLU_Op::getInputsName) .def("get_outputs_name", &LeakyReLU_Op::getOutputsName) .def("attributes_name", &LeakyReLU_Op::staticGetAttrsName); - + declare_registrable<LeakyReLU_Op>(m, "LeakyReLUOp"); m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp index d0d7f28d5..383bad54b 100644 --- a/python_binding/operator/pybind_Matmul.cpp +++ b/python_binding/operator/pybind_Matmul.cpp @@ -23,7 +23,7 @@ void init_MatMul(py::module &m) { py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor>(m, "MatMulOp", py::multiple_inheritance()) .def("get_inputs_name", &MatMul_Op::getInputsName) .def("get_outputs_name", &MatMul_Op::getOutputsName); - + declare_registrable<MatMul_Op>(m, "MatMulOp"); m.def("MatMul", &MatMul, py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp index 9c83a67e8..8a5e3db9d 100644 --- a/python_binding/operator/pybind_MaxPooling.cpp +++ b/python_binding/operator/pybind_MaxPooling.cpp @@ -26,6 +26,7 @@ namespace py = pybind11; namespace Aidge { template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) { + const std::string pyClassName("MaxPoolingOp" + std::to_string(DIM) + "D"); py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Attributes, OperatorTensor>( m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance()) @@ -38,7 +39,7 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) { .def("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName) .def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName) .def("attributes_name", &MaxPooling_Op<DIM>::staticGetAttrsName); - + declare_registrable<MaxPooling_Op<DIM>>(m, pyClassName); m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, const std::string& name, const std::vector<DimSize_t> &stride_dims, diff --git a/python_binding/operator/pybind_Mul.cpp b/python_binding/operator/pybind_Mul.cpp index 21f510d98..5354f01ca 100644 --- a/python_binding/operator/pybind_Mul.cpp +++ b/python_binding/operator/pybind_Mul.cpp @@ -21,7 +21,7 @@ void init_Mul(py::module& m) { py::class_<Mul_Op, std::shared_ptr<Mul_Op>, OperatorTensor>(m, "MulOp", py::multiple_inheritance()) .def("get_inputs_name", &Mul_Op::getInputsName) .def("get_outputs_name", &Mul_Op::getOutputsName); - + declare_registrable<Mul_Op>(m, "MulOp"); m.def("Mul", &Mul, py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp index 69d63fe7b..d784a0d6a 100644 --- a/python_binding/operator/pybind_Pad.cpp +++ b/python_binding/operator/pybind_Pad.cpp @@ -25,8 +25,9 @@ namespace py = pybind11; namespace Aidge { template <DimIdx_t DIM> void declare_PadOp(py::module &m) { + const std::string pyClassName("PadOp" + std::to_string(DIM) + "D"); py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Attributes, Operator>( - m, ("PadOp" + std::to_string(DIM) + "D").c_str(), + m, pyClassName.c_str(), py::multiple_inheritance()) .def(py::init<const std::array<DimSize_t, 2*DIM> &, const PadBorderType &, @@ -38,7 +39,7 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) { .def("get_outputs_name", &Pad_Op<DIM>::getOutputsName) .def("attributes_name", &Pad_Op<DIM>::staticGetAttrsName) ; - + declare_registrable<Pad_Op<DIM>>(m, pyClassName); m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples, const std::string& name, const PadBorderType &borderType = PadBorderType::Constant, diff --git a/python_binding/operator/pybind_Pow.cpp b/python_binding/operator/pybind_Pow.cpp index 09d1e4ad2..03e822adb 100644 --- a/python_binding/operator/pybind_Pow.cpp +++ b/python_binding/operator/pybind_Pow.cpp @@ -21,6 +21,7 @@ void init_Pow(py::module& m) { py::class_<Pow_Op, std::shared_ptr<Pow_Op>, OperatorTensor>(m, "PowOp", py::multiple_inheritance()) .def("get_inputs_name", &Pow_Op::getInputsName) .def("get_outputs_name", &Pow_Op::getOutputsName); + declare_registrable<Pow_Op>(m, "PowOp"); m.def("Pow", &Pow, py::arg("name") = ""); } diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp index 24ae96649..f08c67cb9 100644 --- a/python_binding/operator/pybind_ReLU.cpp +++ b/python_binding/operator/pybind_ReLU.cpp @@ -21,6 +21,7 @@ void init_ReLU(py::module& m) { py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, OperatorTensor>(m, "ReLUOp", py::multiple_inheritance()) .def("get_inputs_name", &ReLU_Op::getInputsName) .def("get_outputs_name", &ReLU_Op::getOutputsName); + declare_registrable<ReLU_Op>(m, "ReLUOp"); m.def("ReLU", &ReLU, py::arg("name") = ""); } diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp index 11e979736..fbec68640 100644 --- a/python_binding/operator/pybind_ReduceMean.cpp +++ b/python_binding/operator/pybind_ReduceMean.cpp @@ -24,12 +24,14 @@ namespace py = pybind11; namespace Aidge { template <DimIdx_t DIM> void declare_ReduceMeanOp(py::module &m) { + const std::string pyClassName("ReduceMeanOp" + std::to_string(DIM) + "D"); py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, Attributes, OperatorTensor>( - m, ("ReduceMeanOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance()) + m, pyClassName.c_str(), py::multiple_inheritance()) .def("get_inputs_name", &ReduceMean_Op<DIM>::getInputsName) .def("get_outputs_name", &ReduceMean_Op<DIM>::getOutputsName) .def("attributes_name", &ReduceMean_Op<DIM>::staticGetAttrsName) ; + declare_registrable<ReduceMean_Op<DIM>>(m, pyClassName); m.def(("ReduceMean" + std::to_string(DIM) + "D").c_str(), [](const std::vector<int>& axes, DimSize_t keepDims, diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp index b3e9850a5..dc6a9b4ec 100644 --- a/python_binding/operator/pybind_Reshape.cpp +++ b/python_binding/operator/pybind_Reshape.cpp @@ -21,7 +21,7 @@ void init_Reshape(py::module& m) { py::class_<Reshape_Op, std::shared_ptr<Reshape_Op>, Attributes, OperatorTensor>(m, "ReshapeOp", py::multiple_inheritance()) .def("get_inputs_name", &Reshape_Op::getInputsName) .def("get_outputs_name", &Reshape_Op::getOutputsName); - + declare_registrable<Reshape_Op>(m, "ReshapeOp"); m.def("Reshape", &Reshape, py::arg("shape"), py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp index 7bfd1b4f0..3bb1b082c 100644 --- a/python_binding/operator/pybind_Slice.cpp +++ b/python_binding/operator/pybind_Slice.cpp @@ -21,7 +21,7 @@ void init_Slice(py::module& m) { py::class_<Slice_Op, std::shared_ptr<Slice_Op>, OperatorTensor>(m, "SliceOp", py::multiple_inheritance()) .def("get_inputs_name", &Slice_Op::getInputsName) .def("get_outputs_name", &Slice_Op::getOutputsName); - + declare_registrable<Slice_Op>(m, "SliceOp"); m.def("Slice", &Slice, py::arg("starts"), py::arg("ends"), py::arg("axes"), py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp index 780cffdef..bac553387 100644 --- a/python_binding/operator/pybind_Softmax.cpp +++ b/python_binding/operator/pybind_Softmax.cpp @@ -23,7 +23,7 @@ void init_Softmax(py::module& m) { .def("get_inputs_name", &Softmax_Op::getInputsName) .def("get_outputs_name", &Softmax_Op::getOutputsName) .def("attributes_name", &Softmax_Op::staticGetAttrsName); - + declare_registrable<Softmax_Op>(m, "SoftmaxOp"); m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_Sqrt.cpp b/python_binding/operator/pybind_Sqrt.cpp index 98d65242e..33d46e02c 100644 --- a/python_binding/operator/pybind_Sqrt.cpp +++ b/python_binding/operator/pybind_Sqrt.cpp @@ -21,7 +21,7 @@ void init_Sqrt(py::module& m) { py::class_<Sqrt_Op, std::shared_ptr<Sqrt_Op>, OperatorTensor>(m, "SqrtOp", py::multiple_inheritance()) .def("get_inputs_name", &Sqrt_Op::getInputsName) .def("get_outputs_name", &Sqrt_Op::getOutputsName); - + declare_registrable<Sqrt_Op>(m, "SqrtOp"); m.def("Sqrt", &Sqrt, py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_Sub.cpp b/python_binding/operator/pybind_Sub.cpp index dce1ab6cb..1b858d152 100644 --- a/python_binding/operator/pybind_Sub.cpp +++ b/python_binding/operator/pybind_Sub.cpp @@ -21,7 +21,7 @@ void init_Sub(py::module& m) { py::class_<Sub_Op, std::shared_ptr<Sub_Op>, OperatorTensor>(m, "SubOp", py::multiple_inheritance()) .def("get_inputs_name", &Sub_Op::getInputsName) .def("get_outputs_name", &Sub_Op::getOutputsName); - + declare_registrable<Sub_Op>(m, "SubOp"); m.def("Sub", &Sub, py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp index f5fbaf0e7..59482cf48 100644 --- a/python_binding/operator/pybind_Transpose.cpp +++ b/python_binding/operator/pybind_Transpose.cpp @@ -27,12 +27,15 @@ namespace Aidge { template <DimIdx_t DIM> void declare_Transpose(py::module &m) { + const std::string pyClassName("TransposeOp" + std::to_string(DIM) + "D"); py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, Attributes, OperatorTensor>( m, ("TransposeOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance()) .def("get_inputs_name", &Transpose_Op<DIM>::getInputsName) .def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName) .def("attributes_name", &Transpose_Op<DIM>::staticGetAttrsName); + declare_registrable<Transpose_Op<DIM>>(m, pyClassName); + m.def(("Transpose" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& output_dims_order, const std::string& name) { AIDGE_ASSERT(output_dims_order.size() == DIM, "output_dims_order size [{}] does not match DIM [{}]", output_dims_order.size(), DIM); -- GitLab