Skip to content
Snippets Groups Projects
Commit 108727b7 authored by Cyril Moineau's avatar Cyril Moineau Committed by Maxence Naud
Browse files

Update every binded operator to be registrable.

parent 299e7a40
No related branches found
No related tags found
2 merge requests!105version 0.2.0,!85Initial working python registrar.
Showing
with 35 additions and 27 deletions
......@@ -27,7 +27,7 @@
namespace Aidge {
class Sqrt_Op : public OperatorTensor,
public Registrable<Sqrt_Op, std::string, std::unique_ptr<OperatorImpl>(const Sqrt_Op&)> {
public Registrable<Sqrt_Op, std::string, std::shared_ptr<OperatorImpl>(const Sqrt_Op&)> {
public:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
......@@ -57,7 +57,7 @@ public:
}
void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
mImpl = Registrar<Sqrt_Op>::create(name)(*this);
SET_IMPL_MACRO(Sqrt_Op, *this, name);
mOutputs[0]->setBackend(name, device);
}
......
......@@ -27,7 +27,7 @@
namespace Aidge {
class Sub_Op : public OperatorTensor,
public Registrable<Sub_Op, std::string, std::unique_ptr<OperatorImpl>(const Sub_Op&)> {
public Registrable<Sub_Op, std::string, std::shared_ptr<OperatorImpl>(const Sub_Op&)> {
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
......@@ -60,7 +60,7 @@ public:
void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
mImpl = Registrar<Sub_Op>::create(name)(*this);
SET_IMPL_MACRO(Sub_Op, *this, name);
mOutputs[0]->setBackend(name, device);
}
......@@ -77,4 +77,4 @@ inline std::shared_ptr<Node> Sub(const std::string& name = "") {
}
} // namespace Aidge
#endif /* AIDGE_CORE_OPERATOR_SUB_H_ */
\ No newline at end of file
#endif /* AIDGE_CORE_OPERATOR_SUB_H_ */
......@@ -30,7 +30,7 @@ enum class TransposeAttr { OutputDimsOrder };
template <DimIdx_t DIM>
class Transpose_Op : public OperatorTensor,
public Registrable<Transpose_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Transpose_Op<DIM> &)>,
public Registrable<Transpose_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op<DIM> &)>,
public StaticAttributes<TransposeAttr,
std::array<DimSize_t, DIM>> {
......@@ -80,7 +80,7 @@ class Transpose_Op : public OperatorTensor,
}
void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
mImpl = Registrar<Transpose_Op<DIM>>::create(name)(*this);
SET_IMPL_MACRO(Transpose_Op<DIM>, *this, name);
mOutputs[0]->setBackend(name, device);
}
......
......@@ -23,7 +23,7 @@ void declare_Add(py::module &m) {
py::class_<Add_Op, std::shared_ptr<Add_Op>, OperatorTensor>(m, "AddOp", py::multiple_inheritance())
.def("get_inputs_name", &Add_Op::getInputsName)
.def("get_outputs_name", &Add_Op::getOutputsName);
declare_registrable<Add_Op>(m, "AddOp");
m.def("Add", &Add, py::arg("nbIn"), py::arg("name") = "");
}
......
......@@ -26,8 +26,9 @@ namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
const std::string pyClassName("AvgPoolingOp" + std::to_string(DIM) + "D");
py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Attributes, OperatorTensor>(
m, ("AvgPoolingOp" + std::to_string(DIM) + "D").c_str(),
m, pyClassName.c_str(),
py::multiple_inheritance())
.def(py::init<const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &>(),
......@@ -36,7 +37,7 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
.def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
.def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
.def("attributes_name", &AvgPooling_Op<DIM>::staticGetAttrsName);
declare_registrable<AvgPooling_Op<DIM>>(m, pyClassName);
m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::vector<DimSize_t> &stride_dims) {
......
......@@ -21,13 +21,12 @@ namespace Aidge {
template <DimSize_t DIM>
void declare_BatchNormOp(py::module& m) {
py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
.def(py::init<float, float>(),
py::arg("epsilon"),
py::arg("momentum"))
const std::string pyClassName("BatchNormOp" + std::to_string(DIM) + "D");
py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, pyClassName.c_str(), py::multiple_inheritance())
.def("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
.def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
.def("attributes_name", &BatchNorm_Op<DIM>::staticGetAttrsName);
declare_registrable<BatchNorm_Op<DIM>>(m, pyClassName);
m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("nbFeatures"), py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = "");
}
......
......@@ -24,6 +24,7 @@ void init_Concat(py::module& m) {
.def("get_outputs_name", &Concat_Op::getOutputsName)
.def("attributes_name", &Concat_Op::staticGetAttrsName);
declare_registrable<Concat_Op>(m, "ConcatOp");
m.def("Concat", &Concat, py::arg("nbIn"), py::arg("axis"), py::arg("name") = "");
}
} // namespace Aidge
......@@ -26,8 +26,9 @@ namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
const std::string pyClassName("ConvDepthWiseOp" + std::to_string(DIM) + "D");
py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Attributes, OperatorTensor>(
m, ("ConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(),
m, pyClassName.c_str(),
py::multiple_inheritance())
.def(py::init<const DimSize_t,
const std::array<DimSize_t, DIM> &,
......@@ -40,7 +41,7 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
.def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
.def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
.def("attributes_name", &ConvDepthWise_Op<DIM>::staticGetAttrsName);
declare_registrable<ConvDepthWise_Op<DIM>>(m, pyClassName);
m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels,
const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
......
......@@ -21,7 +21,7 @@ void init_Div(py::module& m) {
py::class_<Div_Op, std::shared_ptr<Div_Op>, OperatorTensor>(m, "DivOp", py::multiple_inheritance())
.def("get_inputs_name", &Div_Op::getInputsName)
.def("get_outputs_name", &Div_Op::getOutputsName);
declare_registrable<Div_Op>(m, "DivOp");
m.def("Div", &Div, py::arg("name") = "");
}
} // namespace Aidge
......@@ -21,7 +21,7 @@ void init_Erf(py::module& m) {
py::class_<Erf_Op, std::shared_ptr<Erf_Op>, OperatorTensor>(m, "ErfOp", py::multiple_inheritance())
.def("get_inputs_name", &Erf_Op::getInputsName)
.def("get_outputs_name", &Erf_Op::getOutputsName);
declare_registrable<Erf_Op>(m, "ErfOp");
m.def("Erf", &Erf, py::arg("name") = "");
}
} // namespace Aidge
......@@ -24,7 +24,7 @@ void declare_FC(py::module &m) {
.def("get_inputs_name", &FC_Op::getInputsName)
.def("get_outputs_name", &FC_Op::getOutputsName)
.def("attributes_name", &FC_Op::staticGetAttrsName);
declare_registrable<FC_Op>(m, "FCOp");
m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
}
......
......@@ -23,7 +23,7 @@ void init_Gather(py::module& m) {
.def("get_inputs_name", &Gather_Op::getInputsName)
.def("get_outputs_name", &Gather_Op::getOutputsName)
.def("attributes_name", &Gather_Op::staticGetAttrsName);
declare_registrable<Gather_Op>(m, "GatherOp");
m.def("Gather", &Gather, py::arg("indices"), py::arg("gathered_shape"), py::arg("axis"), py::arg("name") = "");
}
} // namespace Aidge
......@@ -22,7 +22,7 @@ void init_LeakyReLU(py::module& m) {
.def("get_inputs_name", &LeakyReLU_Op::getInputsName)
.def("get_outputs_name", &LeakyReLU_Op::getOutputsName)
.def("attributes_name", &LeakyReLU_Op::staticGetAttrsName);
declare_registrable<LeakyReLU_Op>(m, "LeakyReLUOp");
m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
}
} // namespace Aidge
......@@ -23,7 +23,7 @@ void init_MatMul(py::module &m) {
py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor>(m, "MatMulOp", py::multiple_inheritance())
.def("get_inputs_name", &MatMul_Op::getInputsName)
.def("get_outputs_name", &MatMul_Op::getOutputsName);
declare_registrable<MatMul_Op>(m, "MatMulOp");
m.def("MatMul", &MatMul, py::arg("name") = "");
}
} // namespace Aidge
......@@ -26,6 +26,7 @@ namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
const std::string pyClassName("MaxPoolingOp" + std::to_string(DIM) + "D");
py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Attributes, OperatorTensor>(
m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
py::multiple_inheritance())
......@@ -38,7 +39,7 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
.def("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
.def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
.def("attributes_name", &MaxPooling_Op<DIM>::staticGetAttrsName);
declare_registrable<MaxPooling_Op<DIM>>(m, pyClassName);
m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::vector<DimSize_t> &stride_dims,
......
......@@ -21,7 +21,7 @@ void init_Mul(py::module& m) {
py::class_<Mul_Op, std::shared_ptr<Mul_Op>, OperatorTensor>(m, "MulOp", py::multiple_inheritance())
.def("get_inputs_name", &Mul_Op::getInputsName)
.def("get_outputs_name", &Mul_Op::getOutputsName);
declare_registrable<Mul_Op>(m, "MulOp");
m.def("Mul", &Mul, py::arg("name") = "");
}
} // namespace Aidge
......@@ -25,8 +25,9 @@ namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
const std::string pyClassName("PadOp" + std::to_string(DIM) + "D");
py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Attributes, Operator>(
m, ("PadOp" + std::to_string(DIM) + "D").c_str(),
m, pyClassName.c_str(),
py::multiple_inheritance())
.def(py::init<const std::array<DimSize_t, 2*DIM> &,
const PadBorderType &,
......@@ -38,7 +39,7 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
.def("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
.def("attributes_name", &Pad_Op<DIM>::staticGetAttrsName)
;
declare_registrable<Pad_Op<DIM>>(m, pyClassName);
m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples,
const std::string& name,
const PadBorderType &borderType = PadBorderType::Constant,
......
......@@ -21,6 +21,7 @@ void init_Pow(py::module& m) {
py::class_<Pow_Op, std::shared_ptr<Pow_Op>, OperatorTensor>(m, "PowOp", py::multiple_inheritance())
.def("get_inputs_name", &Pow_Op::getInputsName)
.def("get_outputs_name", &Pow_Op::getOutputsName);
declare_registrable<Pow_Op>(m, "PowOp");
m.def("Pow", &Pow, py::arg("name") = "");
}
......
......@@ -21,6 +21,7 @@ void init_ReLU(py::module& m) {
py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, OperatorTensor>(m, "ReLUOp", py::multiple_inheritance())
.def("get_inputs_name", &ReLU_Op::getInputsName)
.def("get_outputs_name", &ReLU_Op::getOutputsName);
declare_registrable<ReLU_Op>(m, "ReLUOp");
m.def("ReLU", &ReLU, py::arg("name") = "");
}
......
......@@ -24,12 +24,14 @@ namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM> void declare_ReduceMeanOp(py::module &m) {
const std::string pyClassName("ReduceMeanOp" + std::to_string(DIM) + "D");
py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, Attributes, OperatorTensor>(
m, ("ReduceMeanOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
m, pyClassName.c_str(), py::multiple_inheritance())
.def("get_inputs_name", &ReduceMean_Op<DIM>::getInputsName)
.def("get_outputs_name", &ReduceMean_Op<DIM>::getOutputsName)
.def("attributes_name", &ReduceMean_Op<DIM>::staticGetAttrsName)
;
declare_registrable<ReduceMean_Op<DIM>>(m, pyClassName);
m.def(("ReduceMean" + std::to_string(DIM) + "D").c_str(), [](const std::vector<int>& axes,
DimSize_t keepDims,
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment