diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp index 5bb184b808e0a9d685879e53554ff3be500f5717..1214f918624fe02940ba6c6476a1940d09556510 100644 --- a/include/aidge/operator/MetaOperatorDefs.hpp +++ b/include/aidge/operator/MetaOperatorDefs.hpp @@ -200,6 +200,48 @@ PaddedAvgPooling(DimSize_t const (&kernel_dims)[DIM], //////////////////////////////////////////////////////////////////////////////// +template <std::array<DimSize_t, 1>::size_type DIM> +extern std::shared_ptr<Node> PaddedConvTranspose( + const DimSize_t &inChannels, + const DimSize_t &outChannels, + const std::array<DimSize_t, DIM> &kernelDims, + const std::array<DimSize_t, DIM> &strideDims = + create_array<DimSize_t,DIM>(1), + const std::array<DimSize_t, DIM> &dilationDims = + create_array<DimSize_t,DIM>(1), + const bool noBias = false, + const std::array<DimSize_t, 2*DIM> &paddingDims = + create_array<DimSize_t,2*DIM>(0), + const std::string& name = ""); + +template <std::array<DimSize_t, 1>::size_type DIM> +extern std::shared_ptr<Node> PaddedConvTranspose(const DimSize_t &inChannels, + const DimSize_t &outChannels, + DimSize_t const (&kernel_dims)[DIM], + const std::array<DimSize_t, DIM> &strideDims = + create_array<DimSize_t,DIM>(1), + const std::array<DimSize_t, DIM> &dilationDims = + create_array<DimSize_t,DIM>(1), + const bool noBias = false, + const std::array<DimSize_t, 2*DIM> &paddingDims = + create_array<DimSize_t,2*DIM>(0), + const std::string& name = ""); + +template <std::array<DimSize_t, 1>::size_type DIM> +extern std::shared_ptr<MetaOperator_Op> PaddedConvTranspose_Op(const DimSize_t &inChannels, + const DimSize_t &outChannels, + const std::array<DimSize_t, DIM> &kernelDims, + const std::array<DimSize_t, DIM> &strideDims = + create_array<DimSize_t,DIM>(1), + const std::array<DimSize_t, DIM> &dilationDims = + create_array<DimSize_t,DIM>(1), + const bool noBias = false, + const std::array<DimSize_t, 2*DIM> &paddingDims = + create_array<DimSize_t,2*DIM>(0), + const std::string& name = ""); + +//////////////////////////////////////////////////////////////////////////////// + /** * @brief Creates a padded max pooling operation. * diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp index b2811fbaab2b6cd33dc2b105f0044cd8a5edbbc7..9ad80dd6d2b2b8396796833f978a066c6b1503c8 100644 --- a/python_binding/operator/pybind_MetaOperatorDefs.cpp +++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp @@ -14,9 +14,7 @@ #include <string> #include <vector> -#include <array> -#include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/MetaOperatorDefs.hpp" #include "aidge/utils/Types.h" @@ -106,6 +104,71 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) { } +template <DimIdx_t DIM> void declare_PaddedConvTransposeOp(py::module &m) { + m.def(("PaddedConvTranspose" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t &in_channels, + const DimSize_t &out_channels, + const std::vector<DimSize_t>& kernel_dims, + const std::vector<DimSize_t> &stride_dims, + const std::vector<DimSize_t> &dilation_dims, + const bool no_bias, + const std::vector<DimSize_t> &padding_dims, + const std::string& name) + { + AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM); + AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM); + AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM); + AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM); + + return PaddedConvTranspose<DIM>(in_channels, + out_channels, + to_array<DIM>(kernel_dims.begin()), + to_array<DIM>(stride_dims.begin()), + to_array<DIM>(dilation_dims.begin()), + no_bias, + to_array<2*DIM>(padding_dims.begin()), + name); + }, py::arg("in_channels"), + py::arg("out_channels"), + py::arg("kernel_dims"), + py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), + py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1), + py::arg("no_bias")= false, + py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0), + py::arg("name") = ""); + m.def(("PaddedConvTranspose" + std::to_string(DIM) + "DOp").c_str(), []( + const DimSize_t &inChannels, + const DimSize_t &outChannels, + const std::vector<DimSize_t>& kernel_dims, + const std::vector<DimSize_t> &stride_dims, + const std::vector<DimSize_t> &dilation_dims, + const bool no_bias, + const std::vector<DimSize_t> &padding_dims, + const std::string &name) + { + AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM); + AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM); + AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM); + AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM); + + return PaddedConvTranspose_Op<DIM>(inChannels, + outChannels, + to_array<DIM>(kernel_dims.begin()), + to_array<DIM>(stride_dims.begin()), + to_array<DIM>(dilation_dims.begin()), + no_bias, + to_array<2*DIM>(padding_dims.begin()), + name); + }, py::arg("in_channels"), + py::arg("out_channels"), + py::arg("kernel_dims"), + py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), + py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1), + py::arg("no_bias") = false, + py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0), + py::arg("name") = ""); +} + + template <DimIdx_t DIM> void declare_PaddedAvgPoolingOp(py::module &m) { m.def(("PaddedAvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, const std::string& name, @@ -194,6 +257,8 @@ void init_MetaOperatorDefs(py::module &m) { declare_PaddedConvDepthWiseOp<1>(m); declare_PaddedConvDepthWiseOp<2>(m); // declare_PaddedConvDepthWiseOp<3>(m); + declare_PaddedConvTransposeOp<1>(m); + declare_PaddedConvTransposeOp<2>(m); // declare_PaddedAvgPoolingOp<1>(m); declare_PaddedAvgPoolingOp<2>(m); // declare_PaddedAvgPoolingOp<3>(m); diff --git a/src/operator/MetaOperatorDefs/PaddedConvTranspose.cpp b/src/operator/MetaOperatorDefs/PaddedConvTranspose.cpp index 7a9142c82ff28f075c80bc16afc4cdbd99526ac8..1d43e891ac73f53f2bad565584b805711b4d80d1 100644 --- a/src/operator/MetaOperatorDefs/PaddedConvTranspose.cpp +++ b/src/operator/MetaOperatorDefs/PaddedConvTranspose.cpp @@ -25,30 +25,29 @@ namespace Aidge { -template <std::array<DimSize_t, 1>::size_type DIM> +////////////////////////////////// +// Node functions +////////////////////////////////// + +template <std::array<DimIdx_t, 1>::size_type DIM> std::shared_ptr<Node> -PaddedConvTranspose(DimSize_t inChannels, - DimSize_t outChannels, +PaddedConvTranspose(const DimSize_t &inChannels, + const DimSize_t &outChannels, const std::array<DimSize_t, DIM> &kernelDims, - const std::string &name, const std::array<DimSize_t, DIM> &strideDims, - const std::array<DimSize_t, 2 * DIM> &paddingDims, const std::array<DimSize_t, DIM> &dilationDims, - bool noBias) { - // auto metaOp = PaddedConvTranspose_Op<DIM>(kernel_dims, stride_dims, - // padding_dims, dilation_dims); if (!name.empty()) { - // metaOp->getMicroGraph()->setName(name); - // metaOp->getMicroGraph()->setNodesName(); - // } - // auto metaOpNode = std::make_shared<Node>(metaOp, name); + const bool noBias, + const std::array<DimSize_t, 2 * DIM> &paddingDims, + const std::string &name) { auto graph = Sequential( {Pad<DIM>(paddingDims, (!name.empty()) ? name + "_pad" : ""), - std::make_shared<Node>( - std::make_shared<ConvTranspose_Op<static_cast<DimIdx_t>(DIM)>>( - kernelDims, - strideDims, - dilationDims), - (!name.empty()) ? name + "_conv" : "")}); + ConvTranspose(inChannels, + outChannels, + kernelDims, + strideDims, + dilationDims, + noBias, + !name.empty() ? name + "_convTranspose" : "")}); auto metaOpNode = MetaOperator( ("PaddedConvTranspose" + std::to_string(DIM) + "D").c_str(), graph, @@ -63,95 +62,116 @@ PaddedConvTranspose(DimSize_t inChannels, } return metaOpNode; } + template std::shared_ptr<Node> -PaddedConvTranspose<1>(const DimSize_t, - const DimSize_t, +PaddedConvTranspose<1>(const DimSize_t &, + const DimSize_t &, const std::array<DimSize_t, 1> &, - const std::string &, const std::array<DimSize_t, 1> &, - const std::array<DimSize_t, 2> &, const std::array<DimSize_t, 1> &, - bool); + const bool, + const std::array<DimSize_t, 2> &, + const std::string &); template std::shared_ptr<Node> -PaddedConvTranspose<2>(const DimSize_t, - const DimSize_t, +PaddedConvTranspose<2>(const DimSize_t &, + const DimSize_t &, const std::array<DimSize_t, 2> &, - const std::string &, const std::array<DimSize_t, 2> &, + const std::array<DimSize_t, 2> &, + const bool, const std::array<DimSize_t, 4> &, + const std::string &); + +template <std::array<DimIdx_t, 1>::size_type DIM> +extern std::shared_ptr<Node> +PaddedConvTranspose(const DimSize_t &inChannels, + const DimSize_t &outChannels, + DimSize_t const (&kernelDims)[DIM], + const std::array<DimSize_t, DIM> &strideDims, + const std::array<DimSize_t, DIM> &dilationDims, + const bool noBias, + const std::array<DimSize_t, 2 * DIM> &paddingDims, + const std::string &name) { + return PaddedConvTranspose<DIM>(inChannels, + outChannels, + to_array(kernelDims), + strideDims, + dilationDims, + noBias, + paddingDims, + name); +} + +template std::shared_ptr<Node> +PaddedConvTranspose<1>(const DimSize_t &, + const DimSize_t &, + DimSize_t const (&)[1], + const std::array<DimSize_t, 1> &, + const std::array<DimSize_t, 1> &, + const bool, + const std::array<DimSize_t, 2> &, + const std::string &); +template std::shared_ptr<Node> +PaddedConvTranspose<2>(const DimSize_t &, + const DimSize_t &, + const DimSize_t (&)[2], + const std::array<DimSize_t, 2> &, const std::array<DimSize_t, 2> &, - bool); + const bool, + const std::array<DimSize_t, 4> &, + const std::string &); + +////////////////////////////////// +// Operator functions +////////////////////////////////// -template <std::array<DimSize_t, 1>::size_type DIM> +template <std::array<DimIdx_t, 1>::size_type DIM> std::shared_ptr<MetaOperator_Op> -PaddedConvTranspose_Op(const std::array<DimSize_t, DIM> &kernelDims, +PaddedConvTranspose_Op(const DimSize_t &inChannels, + const DimSize_t &outChannels, + const std::array<DimSize_t, DIM> &kernelDims, const std::array<DimSize_t, DIM> &strideDims, + const std::array<DimSize_t, DIM> &dilationDims, + const bool noBias, const std::array<DimSize_t, 2 * DIM> &paddingDims, - const std::array<DimSize_t, DIM> &dilationDims) { - auto pad = Pad<DIM>(paddingDims, "", PadBorderType::Constant, 0.0); - auto conv = std::make_shared<Node>( - std::make_shared<ConvTranspose_Op<static_cast<DimIdx_t>(DIM)>>( - kernelDims, - strideDims, - dilationDims), - ""); + const std::string &name) { + auto pad = Pad<DIM>(paddingDims, + !name.empty() ? name + "_pad" : "pad", + PadBorderType::Constant, + 0.0); + + auto convTranspose = ConvTranspose( + inChannels, + outChannels, + kernelDims, + strideDims, + dilationDims, + noBias, + !name.empty() ? name + "_convtranspose" : "convTranspose"); return std::make_shared<MetaOperator_Op>( ("PaddedConvTranspose" + std::to_string(DIM) + "D").c_str(), - Sequential({pad, conv})); + Sequential({pad, convTranspose})); } template std::shared_ptr<MetaOperator_Op> -PaddedConvTranspose_Op<1>(const std::array<DimSize_t, 1> &, +PaddedConvTranspose_Op<1>(const DimSize_t &, + const DimSize_t &, const std::array<DimSize_t, 1> &, + const std::array<DimSize_t, 1> &, + const std::array<DimSize_t, 1> &, + const bool, const std::array<DimSize_t, 2> &, - const std::array<DimSize_t, 1> &); + const std::string &); + template std::shared_ptr<MetaOperator_Op> -PaddedConvTranspose_Op<2>(const std::array<DimSize_t, 2> &, +PaddedConvTranspose_Op<2>(const DimSize_t &, + const DimSize_t &, const std::array<DimSize_t, 2> &, + const std::array<DimSize_t, 2> &, + const std::array<DimSize_t, 2> &, + const bool, const std::array<DimSize_t, 4> &, - const std::array<DimSize_t, 2> &); - -// helper with C-style array instead of std::array for kernel_dims to allow -// automatic template DIM deduction -template <DimSize_t DIM> -std::shared_ptr<Node> -PaddedConvTranspose(DimSize_t inChannels, - DimSize_t outChannels, - DimSize_t const (&kernelDims)[DIM], - const std::string &name, - const std::array<DimSize_t, DIM> &strideDims, - const std::array<DimSize_t, 2 * DIM> &paddingDims, - const std::array<DimSize_t, DIM> &dilationDims, - bool noBias) { - return PaddedConvTranspose(inChannels, - outChannels, - to_array(kernelDims), - name, - strideDims, - paddingDims, - dilationDims, - noBias); -} - -template std::shared_ptr<Node> -PaddedConvTranspose<1>(const DimSize_t, - const DimSize_t, - const DimSize_t (&)[1], - const std::string &, - const std::array<DimSize_t, 1> &, - const std::array<DimSize_t, 2> &, - const std::array<DimSize_t, 1> &, - bool); - -template std::shared_ptr<Node> -PaddedConvTranspose<2>(const DimSize_t, - const DimSize_t, - const DimSize_t (&)[2], - const std::string &, - const std::array<DimSize_t, 2> &, - const std::array<DimSize_t, 4> &, - const std::array<DimSize_t, 2> &, - bool); + const std::string &); } // namespace Aidge