diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp index c2b0431182e07c1e90f8108b8f8aad9a5266b24a..e03456aaaf8ad277d679d156d818fc1a20cfdc1f 100644 --- a/include/aidge/operator/ConvDepthWise.hpp +++ b/include/aidge/operator/ConvDepthWise.hpp @@ -49,13 +49,14 @@ public: template <ConvDepthWiseAttr e> using attr = typename Attributes_::template attr<e>; - constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims, + constexpr ConvDepthWise_Op(const DimSize_t nbChannels, + const std::array<DimSize_t, DIM> &kernel_dims, const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) : OperatorTensor(Type, 1, 2, 1), Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims), attr<ConvDepthWiseAttr::DilationDims>(dilation_dims), - attr<ConvDepthWiseAttr::Channels>(0), + attr<ConvDepthWiseAttr::Channels>(nbChannels), attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {} /** @@ -101,7 +102,6 @@ public: floor(static_cast<float>(inputDims[dim+2] - kernelExtent) / static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim]))); } - this->template getAttr<ConvDepthWiseAttr::Channels>() = inputDims[1]; // std::array<DimSize_t, DIM+2> weightDims = append(mInputs[0]->dims()[1],append(1, this->template getAttr<ConvDepthWiseAttr::KernelDims>())); // if (mInputs[1]->empty()) { // mInputs[1]->resize(weightDims); @@ -177,7 +177,7 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels, // FIXME: properly handle default w&b initialization in every cases static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported"); auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims), name); - addProducer(convDW, 1, append(nbChannels, append(1, kernelDims)), "w"); + addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w"); addProducer(convDW, 2, std::array<DimSize_t, 1>({nbChannels}), "b"); return convDW; } diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp index 73feb134837787ae8d0d280dd723182c9d21438b..9ec6cdb928cdfa433b04ea23c69344133a3c7064 100644 --- a/include/aidge/operator/MetaOperatorDefs.hpp +++ b/include/aidge/operator/MetaOperatorDefs.hpp @@ -56,7 +56,8 @@ inline std::shared_ptr<Node> PaddedConv( } template <std::array<DimSize_t, 1>::size_type DIM> -inline std::shared_ptr<Node> PaddedConvDepthWise(const std::array<DimSize_t, DIM> &kernel_dims, +inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels, + const std::array<DimSize_t, DIM> &kernel_dims, const std::string& name = "", const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), @@ -64,7 +65,7 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const std::array<DimSize_t, DIM { // Construct micro-graph auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0); - auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : ""); + auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nb_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : ""); // Need to specify the ordered list of input operators const std::vector<NodePtr> orderedInputNodes = {pad, conv}; @@ -77,13 +78,14 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const std::array<DimSize_t, DIM // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction template <DimSize_t DIM> inline std::shared_ptr<Node> PaddedConvDepthWise( + const DimSize_t nb_channels, DimSize_t const (&kernel_dims)[DIM], const std::string& name = "", const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) { - return PaddedConvDepthWise(to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims); + return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims); } template <std::array<DimSize_t, 1>::size_type DIM> diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp index 4745ef345264763f1a890d566235be072c8e50d8..15f2c1c8acb4a1b59cfb0f35ebb78cb611647d3b 100644 --- a/python_binding/operator/pybind_ConvDepthWise.cpp +++ b/python_binding/operator/pybind_ConvDepthWise.cpp @@ -18,7 +18,7 @@ #include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/ConvDepthWise.hpp" -#include "aidge/operator/Operator.hpp" +#include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Types.h" #include "aidge/data/Tensor.hpp" @@ -26,19 +26,22 @@ namespace py = pybind11; namespace Aidge { template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) { - py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Operator, Attributes>( + py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, OperatorTensor, Attributes>( m, ("ConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance()) - .def(py::init<const std::array<DimSize_t, DIM> &, + .def(py::init<const DimSize_t, + const std::array<DimSize_t, DIM> &, const std::array<DimSize_t, DIM> &, const std::array<DimSize_t, DIM> &>(), + py::arg("nb_channels"), py::arg("kernel_dims"), py::arg("stride_dims"), py::arg("dilation_dims")) .def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName) .def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName); - m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, + m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels, + const std::vector<DimSize_t>& kernel_dims, const std::string& name, const std::vector<DimSize_t> &stride_dims, const std::vector<DimSize_t> &dilation_dims) { @@ -46,8 +49,9 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) { AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM); AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [%ld] does not match DIM [%d]", dilation_dims.size(), DIM); - return ConvDepthWise<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin())); - }, py::arg("kernel_dims"), + return ConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin())); + }, py::arg("nb_channenls"), + py::arg("kernel_dims"), py::arg("name") = "", py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1)); diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp index c6a1c70000e3e6d604a6652716667efa1c18e956..606b9ae948847f98d5a1129c08db21e073311879 100644 --- a/python_binding/operator/pybind_FC.cpp +++ b/python_binding/operator/pybind_FC.cpp @@ -13,18 +13,18 @@ #include "aidge/operator/FC.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/operator/Operator.hpp" +#include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Types.h" namespace py = pybind11; namespace Aidge { void declare_FC(py::module &m) { - py::class_<FC_Op, std::shared_ptr<FC_Op>, Operator, Attributes>(m, "FCOp", py::multiple_inheritance()) + py::class_<FC_Op, std::shared_ptr<FC_Op>, OperatorTensor, Attributes>(m, "FCOp", py::multiple_inheritance()) .def("get_inputs_name", &FC_Op::getInputsName) .def("get_outputs_name", &FC_Op::getOutputsName); - m.def("FC", &FC, py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = ""); + m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = ""); } void init_FC(py::module &m) { diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp index aa9f3c50e6b8c6ab9e7be46776d5fba30d775be2..aa87ce28c17d9ba272ef8501a510014f391b381c 100644 --- a/python_binding/operator/pybind_MetaOperatorDefs.cpp +++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp @@ -18,7 +18,6 @@ #include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/MetaOperatorDefs.hpp" -#include "aidge/operator/Operator.hpp" #include "aidge/utils/Types.h" namespace py = pybind11; @@ -49,7 +48,8 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) { } template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) { - m.def(("PaddedConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, + m.def(("PaddedConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels, + const std::vector<DimSize_t>& kernel_dims, const std::string& name, const std::vector<DimSize_t> &stride_dims, const std::vector<DimSize_t> &padding_dims, @@ -60,8 +60,9 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) { AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [%ld] does not match DIM [%d]", padding_dims.size(), 2*DIM); AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [%ld] does not match DIM [%d]", dilation_dims.size(), DIM); - return PaddedConvDepthWise<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin())); - }, py::arg("kernel_dims"), + return PaddedConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin())); + }, py::arg("nb_channels"), + py::arg("kernel_dims"), py::arg("name") = "", py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),