From 55ed24d9f74a796f6a810617bb1ff9924098b32d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Gr=C3=A9goire=20KUBLER?= <gregoire.kubler@proton.me>
Date: Thu, 27 Feb 2025 16:06:52 +0000
Subject: [PATCH 01/10] feat : [ADD] convtranspose operator

---
 include/aidge/operator/ConvTranspose.hpp      | 208 +++++++++++
 .../operator/pybind_ConvTranspose.cpp         | 111 ++++++
 python_binding/pybind_core.cpp                |   2 +
 src/operator/ConvTranspose.cpp                | 322 ++++++++++++++++++
 unit_tests/operator/Test_ConvTranspose.cpp    | 241 +++++++++++++
 5 files changed, 884 insertions(+)
 create mode 100644 include/aidge/operator/ConvTranspose.hpp
 create mode 100644 python_binding/operator/pybind_ConvTranspose.cpp
 create mode 100644 src/operator/ConvTranspose.cpp
 create mode 100644 unit_tests/operator/Test_ConvTranspose.cpp

diff --git a/include/aidge/operator/ConvTranspose.hpp b/include/aidge/operator/ConvTranspose.hpp
new file mode 100644
index 000000000..e573a1a02
--- /dev/null
+++ b/include/aidge/operator/ConvTranspose.hpp
@@ -0,0 +1,208 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_CONVTRANSPOSE_H_
+#define AIDGE_CORE_OPERATOR_CONVTRANSPOSE_H_
+
+#include <array>
+#include <cmath>   // std::floor
+#include <string>
+#include <utility> // std::pair
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp" // SET_IMPL_MACRO
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class ConvTransposeAttr { StrideDims, DilationDims, KernelDims };
+
+template <DimIdx_t DIM>
+class ConvTranspose_Op
+    : public OperatorTensor,
+      public Registrable<ConvTranspose_Op<DIM>,
+                         std::string,
+                         std::function<std::shared_ptr<OperatorImpl>(
+                             const ConvTranspose_Op<DIM> &)>> {
+
+  public:
+    static const std::string Type;
+
+  private:
+    using Attributes_ = StaticAttributes<ConvTransposeAttr,
+                                         std::array<DimSize_t, DIM>,
+                                         std::array<DimSize_t, DIM>,
+                                         std::array<DimSize_t, DIM>>;
+    template <ConvTransposeAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+  public:
+    ConvTranspose_Op() = delete;
+
+    constexpr explicit ConvTranspose_Op(
+        const std::array<DimSize_t, DIM> &kernelDims,
+        const std::array<DimSize_t, DIM> &strideDims =
+            create_array<DimSize_t, DIM>(1),
+        const std::array<DimSize_t, DIM> &dilationDims =
+            create_array<DimSize_t, DIM>(1))
+    : OperatorTensor(Type,
+                     {InputCategory::Data,
+                      InputCategory::Param,
+                      InputCategory::OptionalParam},
+                     1),
+      mAttributes(std::make_shared<Attributes_>(
+          attr<ConvTransposeAttr::StrideDims>(strideDims),
+          attr<ConvTransposeAttr::DilationDims>(dilationDims),
+          attr<ConvTransposeAttr::KernelDims>(kernelDims))) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output
+     * tensor(s), but not its input tensors (the new operator has no input
+     * associated).
+     * @param op Operator to copy.
+     */
+    ConvTranspose_Op(const ConvTranspose_Op<DIM> &op);
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Conv_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<ConvTranspose_Op<DIM>>(*this);
+    }
+
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
+
+    std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
+    computeReceptiveField(const std::vector<DimSize_t> &firstEltDims,
+                          const std::vector<DimSize_t> &outputDims,
+                          const IOIndex_t outputIdx = 0) const override;
+
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+
+    DimSize_t inChannels() const {
+        if (!getInput(1)) {
+            AIDGE_THROW_OR_ABORT(
+                std::runtime_error,
+                "{}: operator has no weight Tensor associated so no "
+                "specific number of input channel imposed.",
+                Type);
+        }
+        return getInput(1)->template dims<DIM + 2>()[0];
+    }
+
+    DimSize_t outChannels() const {
+        if (!getInput(1)) {
+            AIDGE_THROW_OR_ABORT(
+                std::runtime_error,
+                "{}: operator has no weight Tensor associated so no "
+                "specific number of output channel imposed.",
+                Type);
+        }
+        return getInput(1)->template dims<DIM + 2>()[1];
+    }
+
+    inline std::shared_ptr<Attributes> attributes() const override {
+        return mAttributes;
+    }
+    inline std::array<DimSize_t, DIM> &strideDims() const {
+        return mAttributes->template getAttr<ConvTransposeAttr::StrideDims>();
+    }
+    inline std::array<DimSize_t, DIM> &dilationDims() const {
+        return mAttributes
+            ->template getAttr<ConvTransposeAttr::DilationDims>();
+    }
+    inline std::array<DimSize_t, DIM> &kernelDims() const {
+        return mAttributes->template getAttr<ConvTransposeAttr::KernelDims>();
+    }
+
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input", "weight", "bias"};
+    }
+    static const std::vector<std::string> getOutputsName() {
+        return {"data_output"};
+    }
+};
+
+/**
+ * @brief Perform a convTranspose(/deconvolution) on the input Tensor.
+ *
+ * @tparam DIM Number of dimensions for the feature map.
+ * @param inChannels Number of input channels.
+ * @param outChannels Number of output channels.
+ * @param kernelDims Dimensions of the kernel. Must be the same number of
+ * dimensions as the feature map.
+ * @param name Name of the operator.
+ * @param strideDims Dimensions of the stride attribute. Must be the same
+ * number of dimensions as the feature map.
+ * @param dilationDims Dimensions of the dilation attribute. Must be the same
+ * number of dimensions as the feature map.
+ * @return std::shared_ptr<Node> A Node containing the operator.
+ */
+template <std::array<DimIdx_t, 1>::size_type DIM>
+std::shared_ptr<Node>
+ConvTranspose(const DimSize_t &inChannels,
+              const DimSize_t &outChannels,
+              const std::array<DimSize_t, DIM> &kernelDims,
+              const std::array<DimSize_t, DIM> &strideDims =
+                  create_array<DimSize_t, DIM>(1),
+              const std::array<DimSize_t, DIM> &dilationDims =
+                  create_array<DimSize_t, DIM>(1),
+              const bool noBias = false,
+              const std::string &name = "");
+
+// helper with C-style array instead of std::array for kernel_dims to allow
+// automatic template DIM deduction
+/**
+ * @brief Conv Transpose node constructor
+ * @param[in] inChannels number of input channels of the conv transpose
+ * operator
+ * @param[in] outChannels number of ouptut channels of the convTranspose
+ * operator
+ * @param[in] kernelDims array of size DIM describing the dimensions of the
+ * kernel
+ * @param[in] name name of the node
+ * @param[in] strideDims stride along each dimension of the operator
+ * @param[in] dilationDims dilation along each dimension of the operator
+ * @param[in] noBias describes if the operator has biases or just weights
+ */
+template <DimIdx_t DIM>
+inline std::shared_ptr<Node>
+ConvTranspose(const DimSize_t &inChannels,
+              const DimSize_t &outChannels,
+              DimSize_t const (&kernelDims)[DIM],
+              const std::array<DimSize_t, DIM> &strideDims =
+                  create_array<DimSize_t, DIM>(1),
+              const std::array<DimSize_t, DIM> &dilationDims =
+                  create_array<DimSize_t, DIM>(1),
+              const bool noBias = false,
+              const std::string &name = "");
+} // namespace Aidge
+
+extern template class Aidge::ConvTranspose_Op<1>;
+extern template class Aidge::ConvTranspose_Op<2>;
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::ConvTransposeAttr>::data[] = {
+    "stride_dims",
+    "dilation_dims",
+    "kernel_dims"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_CONVTRANSPOSE_H_ */
diff --git a/python_binding/operator/pybind_ConvTranspose.cpp b/python_binding/operator/pybind_ConvTranspose.cpp
new file mode 100644
index 000000000..0f759e3db
--- /dev/null
+++ b/python_binding/operator/pybind_ConvTranspose.cpp
@@ -0,0 +1,111 @@
+
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <array>
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <string>
+#include <vector>
+
+#include "aidge/operator/ConvTranspose.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp" // declare_registrable
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+template <DimIdx_t DIM> void declare_ConvTransposeOp(py::module &m) {
+    const std::string pyClassName("ConvTranspose" + std::to_string(DIM) +
+                                  "DOp");
+    py::class_<ConvTranspose_Op<DIM>,
+               std::shared_ptr<ConvTranspose_Op<DIM>>,
+               OperatorTensor>(m,
+                               pyClassName.c_str(),
+                               py::multiple_inheritance())
+        .def(py::init([](const std::vector<DimSize_t> &kernel_dims,
+                         const std::vector<DimSize_t> &stride_dims,
+                         const std::vector<DimSize_t> &dilation_dims) {
+                 AIDGE_ASSERT(kernel_dims.size() == DIM,
+                              "kernel_dims size [{}] does not match DIM [{}]",
+                              kernel_dims.size(),
+                              DIM);
+                 AIDGE_ASSERT(stride_dims.size() == DIM,
+                              "stride_dims size [{}] does not match DIM [{}]",
+                              stride_dims.size(),
+                              DIM);
+                 AIDGE_ASSERT(
+                     dilation_dims.size() == DIM,
+                     "dilation_dims size [{}] does not match DIM [{}]",
+                     dilation_dims.size(),
+                     DIM);
+
+                 return new ConvTranspose_Op<DIM>(
+                     to_array<DIM>(kernel_dims.begin()),
+                     to_array<DIM>(stride_dims.begin()),
+                     to_array<DIM>(dilation_dims.begin()));
+             }),
+             py::arg("kernel_dims"),
+             py::arg("stride_dims") = std::vector<DimSize_t>(DIM, 1),
+             py::arg("dilation_dims") = std::vector<DimSize_t>(DIM, 1))
+        .def_static("get_inputs_name", &ConvTranspose_Op<DIM>::getInputsName)
+        .def_static("get_outputs_name", &ConvTranspose_Op<DIM>::getOutputsName)
+        .def("in_channels", &ConvTranspose_Op<DIM>::inChannels)
+        .def("out_channels", &ConvTranspose_Op<DIM>::outChannels)
+        .def_readonly_static("Type", &ConvTranspose_Op<DIM>::Type);
+
+    declare_registrable<ConvTranspose_Op<DIM>>(m, pyClassName);
+
+    m.def(("ConvTranspose" + std::to_string(DIM) + "D").c_str(),
+          [](const DimSize_t &in_channels,
+             const DimSize_t &out_channels,
+             const std::vector<DimSize_t> &kernel_dims,
+             const std::vector<DimSize_t> &stride_dims,
+             const std::vector<DimSize_t> &dilation_dims,
+             bool noBias,
+             const std::string &name){
+              AIDGE_ASSERT(kernel_dims.size() == DIM,
+                           "kernel_dims size [{}] does not match DIM [{}]",
+                           kernel_dims.size(),
+                           DIM);
+              AIDGE_ASSERT(stride_dims.size() == DIM,
+                           "stride_dims size [{}] does not match DIM [{}]",
+                           stride_dims.size(),
+                           DIM);
+              AIDGE_ASSERT(dilation_dims.size() == DIM,
+                           "dilation_dims size [{}] does not match DIM [{}]",
+                           dilation_dims.size(),
+                           DIM);
+
+              return ConvTranspose<DIM>(in_channels,
+                               out_channels,
+                               to_array<DIM>(kernel_dims.begin()),
+                               to_array<DIM>(stride_dims.begin()),
+                               to_array<DIM>(dilation_dims.begin()),
+                               noBias,
+                               name);
+          },
+          py::arg("in_channels"),
+          py::arg("out_channels"),
+          py::arg("kernel_dims"),
+          py::arg("name") = "",
+          py::arg("stride_dims") = std::vector<DimSize_t>(DIM, 1),
+          py::arg("dilation_dims") = std::vector<DimSize_t>(DIM, 1),
+          py::arg("no_bias") = false);
+}
+
+void init_ConvTranspose(py::module &m) {
+    declare_ConvTransposeOp<1>(m);
+    declare_ConvTransposeOp<2>(m);
+}
+
+} // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index cc3f3abef..7fef82847 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -51,6 +51,7 @@ void init_Clip(py::module&);
 void init_Concat(py::module&);
 void init_ConstantOfShape(py::module&);
 void init_Conv(py::module&);
+void init_ConvTranspose(py::module&);
 void init_ConvDepthWise(py::module&);
 void init_CryptoHash(py::module&);
 void init_DepthToSpace(py::module&);
@@ -157,6 +158,7 @@ void init_Aidge(py::module& m) {
     init_Clip(m);
     init_Concat(m);
     init_Conv(m);
+    init_ConvTranspose(m);
     init_ConvDepthWise(m);
     init_ConstantOfShape(m);
     init_CryptoHash(m);
diff --git a/src/operator/ConvTranspose.cpp b/src/operator/ConvTranspose.cpp
new file mode 100644
index 000000000..8571518d7
--- /dev/null
+++ b/src/operator/ConvTranspose.cpp
@@ -0,0 +1,322 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ConvTranspose.hpp"
+
+#include <cmath>     // std::floor
+#include <cstddef>   // std::size_t
+#include <cstdint>
+#include <stdexcept> // std::runtime_error
+#include <string>
+#include <utility>   // std::pair
+#include <vector>
+
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+template <DimIdx_t DIM>
+const std::string ConvTranspose_Op<DIM>::Type =
+    "ConvTranspose" + std::to_string(DIM) + "D";
+
+template <DimIdx_t DIM>
+ConvTranspose_Op<DIM>::ConvTranspose_Op(const ConvTranspose_Op<DIM> &op)
+    : OperatorTensor(op), mAttributes(op.mAttributes) {
+    if (op.mImpl) {
+        SET_IMPL_MACRO(ConvTranspose_Op<DIM>, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+template <DimIdx_t DIM>
+bool ConvTranspose_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
+    if (!inputsAssociated()) {
+        return false;
+    }
+    constexpr std::int8_t batchIdx = 0;
+    constexpr std::int8_t channelIdx = 1;
+    // DIM only defines the dimensions of the input, this defines channel &
+    // batch idx offset
+    constexpr std::int8_t NCIdx = 2;
+
+    // first check weight since it defines inChannels and outChannels
+    AIDGE_ASSERT((getInput(1)->nbDims() == (DIM + NCIdx)),
+                 "{}: Wrong weight Tensor dimension: {}. "
+                 "Expected number of dimensions is {}.",
+                 type(),
+                 getInput(1)->nbDims(),
+                 DIM + NCIdx);
+    // check data
+    AIDGE_ASSERT(
+        getInput(0)->template dims<DIM + NCIdx>()[channelIdx] == inChannels(),
+        "{}: Wrong input size ({}). Expected dims are [x, {}, {}] as weights "
+        "dim size "
+        "on 1st axis describes number of input channel.",
+        type(),
+        getInput(0)->dims(),
+        inChannels(),
+        fmt::join(std::vector<std::string>(DIM, "x"), ", "));
+    // check optional bias
+    if (getInput(2)) {
+        AIDGE_ASSERT((getInput(2)->nbDims() == 1) &&
+                         (getInput(2)->template dims<1>()[0] == outChannels()),
+                     "{}: Wrong bias size ({}). Expected dims are [{}].",
+                     type(),
+                     getInput(2)->dims(),
+                     outChannels());
+    }
+    std::array<DimSize_t, DIM + NCIdx> outputDims{};
+    const std::array<DimSize_t, DIM + NCIdx> inputDims(
+        getInput(0)->template dims<DIM + NCIdx>());
+
+    outputDims[channelIdx] = outChannels();
+    outputDims[batchIdx] = inputDims[batchIdx];
+
+    for (std::size_t dim = 0; dim < DIM; ++dim) {
+        const DimSize_t kernelExtent =
+            dilationDims()[dim] * (kernelDims()[dim] - 1) + 1;
+        outputDims[dim + NCIdx] =
+            ((inputDims[dim + NCIdx] - 1) * strideDims()[dim]) + kernelExtent;
+    }
+
+    mOutputs[0]->resize(outputDims);
+    return true;
+}
+
+template <DimIdx_t DIM>
+std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
+ConvTranspose_Op<DIM>::computeReceptiveField(
+    const std::vector<DimSize_t> &firstEltDims,
+    const std::vector<DimSize_t> &outputDims,
+    const IOIndex_t outputIdx) const {
+
+    constexpr std::int8_t inBatchIdx = 0;
+    constexpr std::int8_t inChannelIdx = 1;
+    // DIM only defines the dimensions of the input, this defines channel &
+    // batch idx offset
+    constexpr std::int8_t NCChannels = 2;
+
+    if (outputIdx != 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error,
+                             "{}: Operator has got only one output Tensor.",
+                             type());
+    }
+    if (firstEltDims.size() != outputDims.size()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error,
+                             "{}: outputDims and firstEltDims should have the "
+                             "size of the output Tensor dimensions.",
+                             type());
+    }
+    if ((outputDims.size() != (DIM + NCChannels)) || !dimsForwarded()) {
+        AIDGE_THROW_OR_ABORT(
+            std::runtime_error,
+            "Given outputDim out of range or output dim not forwarded yet.");
+    }
+
+    // Offset
+    auto inputIdxDims = firstEltDims; // batch idx is the same
+    // each channel is used so start with the first one
+    inputIdxDims[inChannelIdx] = 0;
+
+    // Error checking : parameters will not create an out of bound error.
+    for (DimIdx_t i = 0; i < (DIM + NCChannels); ++i) {
+        AIDGE_ASSERT(
+            ((outputDims[i] + firstEltDims[i]) <=
+             mOutputs[outputIdx]->template dims<DIM + NCChannels>()[i]) &&
+                outputDims[i] != 0,
+            "{}: Given outputDim out of range for dimension {} ({} + {})",
+            type(),
+            static_cast<std::size_t>(i),
+            firstEltDims[i],
+            outputDims[i]);
+    }
+
+    ////////////////////////
+    // Input
+    std::vector<DimSize_t> inputDims{outputDims[inBatchIdx],
+                                     getInput(0)->dims()[inChannelIdx]};
+    for (DimIdx_t i = 0; i < DIM; ++i) {
+        const DimSize_t kernelExtent =
+            dilationDims()[i] * (kernelDims()[i] - 1) + 1;
+
+        inputDims.push_back(
+            1 +
+            static_cast<DimSize_t>(floor(
+                static_cast<float>(inputDims[i + NCChannels] - kernelExtent) /
+                static_cast<float>(strideDims()[i]))));
+        inputIdxDims[NCChannels + i] *= strideDims()[i];
+    }
+
+    ////////////////////////
+    // Weight
+    // same output value, every input channel is used
+    std::vector<DimSize_t> weightDims{outputDims[inChannelIdx],
+                                      getInput(0)->dims()[inChannelIdx]};
+    for (std::size_t i = 0; i < DIM; ++i) {
+        weightDims.push_back(kernelDims()[i]);
+    }
+    std::vector<DimSize_t> weightIdxDims =
+        std::vector<DimSize_t>(DIM + NCChannels, 0);
+    weightIdxDims[0] = firstEltDims[inChannelIdx];
+
+    ////////////////////////
+    // Result
+    std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>> res;
+    res.push_back(
+        std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(inputIdxDims,
+                                                                  inputDims));
+    res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(
+        weightIdxDims,
+        weightDims));
+
+    ////////////////////////
+    // Bias
+    if (getInput(2)) {
+        const std::vector<DimSize_t> biasDims{
+            outputDims[1]}; // the number of output channel
+        const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
+        res.push_back(
+            std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(
+                biasIdxDims,
+                biasDims));
+    }
+    return res;
+}
+
+template <DimIdx_t DIM>
+void ConvTranspose_Op<DIM>::setBackend(const std::string &name,
+                                       DeviceIdx_t device) {
+    SET_IMPL_MACRO(ConvTranspose_Op<DIM>, *this, name);
+    mOutputs[0]->setBackend(name, device);
+
+    // By default, automatically set backend for weight and bias inputs
+    if (getInput(1)) {
+        getInput(1)->setBackend(name, device);
+    } else {
+        Log::notice("ConvTranspose_Op::setBackend(): could not set backend "
+                    "for weight input, because input is not connected");
+    }
+
+    if (getInput(2)) {
+        // Bias is optional
+        getInput(2)->setBackend(name, device);
+    }
+}
+
+template <DimIdx_t DIM>
+std::set<std::string> ConvTranspose_Op<DIM>::getAvailableBackends() const {
+    return Registrar<ConvTranspose_Op<DIM>>::getKeys();
+}
+
+template class ConvTranspose_Op<1>;
+template class ConvTranspose_Op<2>;
+
+/////////////////////////////////////////////////////////////
+
+template <std::array<DimIdx_t, 1>::size_type DIM>
+std::shared_ptr<Node>
+ConvTranspose(const DimSize_t &inChannels,
+              const DimSize_t &outChannels,
+              const std::array<DimSize_t, DIM> &kernelDims,
+              const std::array<DimSize_t, DIM> &strideDims,
+              const std::array<DimSize_t, DIM> &dilationDims,
+              const bool noBias,
+              const std::string &name) {
+    AIDGE_ASSERT(DIM <= MaxDim,
+                 "Too many kernel dimensions required by Conv, not supported");
+    AIDGE_ASSERT(
+        !std::any_of(dilationDims.cbegin(),
+                     dilationDims.cend(),
+                     [](DimSize_t val) { return val <= 0; }),
+        "Conv : at least of of the dilation dimension is <= 0, expecting "
+        "strictly positive values. Got {}",
+        dilationDims);
+    AIDGE_ASSERT(!std::any_of(strideDims.cbegin(),
+                              strideDims.cend(),
+                              [](DimSize_t val) { return val <= 0; }),
+                 "Conv : at least of of the stride dimension is 0<= , expecting "
+                 "strictly positive values. Got {}",
+                 strideDims);
+    auto conv = std::make_shared<Node>(
+        std::make_shared<ConvTranspose_Op<static_cast<DimIdx_t>(DIM)>>(
+            kernelDims,
+            strideDims,
+            dilationDims),
+        name);
+    addProducer(conv,
+                1,
+                append(inChannels, append(outChannels, kernelDims)),
+                "w");
+    if (!noBias) {
+        addProducer(conv, 2, {outChannels}, "b"); // already sets bias dims
+    }
+    return conv;
+}
+
+template std::shared_ptr<Node>
+ConvTranspose<1>(const DimSize_t &,
+                 const DimSize_t &,
+                 const std::array<DimSize_t, 1> &,
+                 const std::array<DimSize_t, 1> &,
+                 const std::array<DimSize_t, 1> &,
+                 const bool,
+                 const std::string &);
+
+template std::shared_ptr<Node>
+ConvTranspose<2>(const DimSize_t &,
+                 const DimSize_t &,
+                 const std::array<DimSize_t, 2> &,
+                 const std::array<DimSize_t, 2> &,
+                 const std::array<DimSize_t, 2> &,
+                 const bool,
+                 const std::string &);
+
+template <DimIdx_t DIM>
+inline std::shared_ptr<Node>
+ConvTranspose(const DimSize_t &inChannels,
+              const DimSize_t &outChannels,
+              DimSize_t const (&kernelDims)[DIM],
+              const std::array<DimSize_t, DIM> &strideDims,
+              const std::array<DimSize_t, DIM> &dilationDims,
+              const bool noBias,
+              const std::string &name) {
+    return ConvTranspose<DIM>(inChannels,
+                              outChannels,
+                              to_array(kernelDims),
+                              strideDims,
+                              dilationDims,
+                              noBias,
+                              name);
+}
+
+template std::shared_ptr<Node>
+ConvTranspose<1>(const DimSize_t &,
+                 const DimSize_t &,
+                 DimSize_t const (&)[1],
+                 const std::array<DimSize_t, 1> &,
+                 const std::array<DimSize_t, 1> &,
+                 const bool,
+                 const std::string &);
+
+template std::shared_ptr<Node>
+ConvTranspose<2>(const DimSize_t &,
+                 const DimSize_t &,
+                 DimSize_t const (&)[2],
+                 const std::array<DimSize_t, 2> &,
+                 const std::array<DimSize_t, 2> &,
+                 const bool,
+                 const std::string &);
+
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_ConvTranspose.cpp b/unit_tests/operator/Test_ConvTranspose.cpp
new file mode 100644
index 000000000..f284be237
--- /dev/null
+++ b/unit_tests/operator/Test_ConvTranspose.cpp
@@ -0,0 +1,241 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <memory>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/ConvTranspose.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+template <DimSize_t DIM>
+static std::shared_ptr<OperatorTensor> setupTestConvTransposeForwardDims(
+    const DimSize_t batchSize,
+    const DimSize_t inChannels,
+    const DimSize_t outChannels,
+    const DimSize_t kernelDims,
+    const DimSize_t inDataSize,
+    const std::array<DimSize_t, DIM> strideDims,
+    const std::array<DimSize_t, DIM> dilationDims) {
+
+    auto convTrans = ConvTranspose(inChannels,
+                                   outChannels,
+                                   {kernelDims},
+                                   strideDims,
+                                   dilationDims,
+                                   false,
+                                   "yeet");
+
+    auto op =
+        std::dynamic_pointer_cast<OperatorTensor>(convTrans->getOperator());
+
+    auto input = std::make_shared<Tensor>(
+        std::vector<DimSize_t>({batchSize, inChannels, inDataSize}));
+
+    op->associateInput(0, input);
+    return op;
+}
+
+/***********************************************************
+ * This test is based on the assumption that conv and
+ * ConvTranspose are the exact oppposite operations
+ * Hence :
+ * Conv::computeReceptiveField() <=> ConvTranspose::forwardDims()
+ * Conv::forwardDims() <=> ConvTranspose::computeReceptiveField()
+ *
+ * This means that this test relies on Conv Operator's tests
+ * properties.
+ ***********************************************************/
+TEST_CASE("[core/operator] ConvTranspose_Op(forwarDims)",
+          "[Operator][forwardDims][ConvTranspose]") {
+
+    SECTION("a (conv()=>convTranspose()) graph has inputs & outputs of "
+            "identical dimensions") {
+        auto prod = Producer({16, 3, 224, 224}, "dataProvider");
+
+        // output dims: {16, 32, 220, 220}
+        auto conv =
+            Conv(3, 32, {5, 5}, "conv"); // output dims: {16, 32, 220, 220}
+
+        auto convTranspose = ConvTranspose(32,
+                                           3,
+                                           {5, 5},
+                                           std::array<DimSize_t, 2>({1, 1}),
+                                           std::array<DimSize_t, 2>({1, 1}),
+                                           false,
+                                           "convtranspose");
+
+        auto g = std::make_shared<GraphView>("TestGraph");
+
+        prod->addChild(conv, 0);
+        g->add(conv);
+        g->addChild(convTranspose, conv, 0);
+        g->forwardDims();
+
+        auto prodOp =
+            std::dynamic_pointer_cast<OperatorTensor>(prod->getOperator());
+        auto op1 =
+            std::dynamic_pointer_cast<OperatorTensor>(conv->getOperator());
+        auto op2 = std::dynamic_pointer_cast<OperatorTensor>(
+            convTranspose->getOperator());
+
+        REQUIRE(g->forwardDims({prodOp->getOutput(0)->dims()}, true));
+        CHECK(prodOp->getOutput(0)->dims() ==
+              std::dynamic_pointer_cast<OperatorTensor>(
+                  g->getOrderedOutputs()[0].first->getOperator())
+                  ->getOutput(0)
+                  ->dims());
+    }
+    SECTION("1D") {
+        constexpr DimSize_t DIM = 1;
+        SECTION("Test with reference output") {
+            SECTION("no stride / no dilation") {
+                constexpr DimSize_t batchSize = 2;
+                constexpr DimSize_t inChannels = 3;
+                constexpr DimSize_t outChannels = 4;
+                constexpr DimSize_t kernelDims = 2;
+
+                constexpr std::array<DimSize_t, DIM> strideDims{1};
+                constexpr std::array<DimSize_t, DIM> dilationDims{1};
+
+                constexpr DimSize_t inDataSize = 6;
+                constexpr DimSize_t outDataSize = 7;
+
+                auto op = setupTestConvTransposeForwardDims(batchSize,
+                                                            inChannels,
+                                                            outChannels,
+                                                            kernelDims,
+                                                            inDataSize,
+                                                            strideDims,
+                                                            dilationDims);
+                REQUIRE(op->forwardDims());
+
+                CHECK(op->getOutput(0)->dims() ==
+                      std::vector<DimSize_t>(
+                          {batchSize, outChannels, outDataSize}));
+            }
+        }
+        SECTION("stride / no dilation") {
+            constexpr DimSize_t batchSize = 2;
+            constexpr DimSize_t inChannels = 3;
+            constexpr DimSize_t outChannels = 4;
+            constexpr DimSize_t kernelDims = 2;
+
+            constexpr std::array<DimSize_t, DIM> strideDims{3};
+            constexpr std::array<DimSize_t, DIM> dilationDims{1};
+
+            constexpr DimSize_t inDataSize = 6;
+            constexpr DimSize_t outDataSize = 17;
+
+            auto op = setupTestConvTransposeForwardDims(batchSize,
+                                                        inChannels,
+                                                        outChannels,
+                                                        kernelDims,
+                                                        inDataSize,
+                                                        strideDims,
+                                                        dilationDims);
+
+            REQUIRE(op->forwardDims());
+
+            CHECK(
+                op->getOutput(0)->dims() ==
+                std::vector<DimSize_t>({batchSize, outChannels, outDataSize}));
+        }
+        SECTION("no stride / dilation") {
+            constexpr DimSize_t batchSize = 2;
+            constexpr DimSize_t inChannels = 3;
+            constexpr DimSize_t outChannels = 4;
+            constexpr DimSize_t kernelDims = 2;
+
+            constexpr std::array<DimSize_t, DIM> strideDims{1};
+            constexpr std::array<DimSize_t, DIM> dilationDims{3};
+
+            constexpr DimSize_t inDataSize = 6;
+            constexpr DimSize_t outDataSize = 9;
+
+            auto op = setupTestConvTransposeForwardDims(batchSize,
+                                                        inChannels,
+                                                        outChannels,
+                                                        kernelDims,
+                                                        inDataSize,
+                                                        strideDims,
+                                                        dilationDims);
+
+            REQUIRE(op->forwardDims());
+
+            CHECK(
+                op->getOutput(0)->dims() ==
+                std::vector<DimSize_t>({batchSize, outChannels, outDataSize}));
+        }
+        SECTION("stride / dilation") {
+            constexpr DimSize_t batchSize = 2;
+            constexpr DimSize_t inChannels = 3;
+            constexpr DimSize_t outChannels = 4;
+            constexpr DimSize_t kernelDims = 4;
+
+            constexpr std::array<DimSize_t, DIM> strideDims{3};
+            constexpr std::array<DimSize_t, DIM> dilationDims{3};
+
+            constexpr DimSize_t inDataSize = 15;
+            constexpr DimSize_t outDataSize = 52;
+
+            auto op = setupTestConvTransposeForwardDims(batchSize,
+                                                        inChannels,
+                                                        outChannels,
+                                                        kernelDims,
+                                                        inDataSize,
+                                                        strideDims,
+                                                        dilationDims);
+
+            REQUIRE(op->forwardDims());
+
+            CHECK(
+                op->getOutput(0)->dims() ==
+                std::vector<DimSize_t>({batchSize, outChannels, outDataSize}));
+        }
+        SECTION("stride / dilation") {
+            constexpr DimSize_t batchSize = 2;
+            constexpr DimSize_t inChannels = 3;
+            constexpr DimSize_t outChannels = 4;
+
+            constexpr DimSize_t kernelDims = 2;
+
+            constexpr DimSize_t inDataSize = 6;
+            constexpr DimSize_t outDataSize = 7;
+
+            constexpr std::array<DimSize_t, DIM> strideDims{1};
+            constexpr std::array<DimSize_t, DIM> dilationDims{1};
+
+            auto op = setupTestConvTransposeForwardDims(batchSize,
+                                                        inChannels,
+                                                        outChannels,
+                                                        kernelDims,
+                                                        inDataSize,
+                                                        strideDims,
+                                                        dilationDims);
+
+            REQUIRE(op->forwardDims());
+
+            CHECK(
+                op->getOutput(0)->dims() ==
+                std::vector<DimSize_t>({batchSize, outChannels, outDataSize}));
+        }
+    }
+}
+
+} // namespace Aidge
-- 
GitLab


From dc683cfd815aa8404fedd5fb854bf10e963dc55b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Gr=C3=A9goire=20KUBLER?= <gregoire.kubler@proton.me>
Date: Thu, 27 Feb 2025 16:07:10 +0000
Subject: [PATCH 02/10] feat : [ADD] PaddedConvTranspose operator

---
 include/aidge/operator/MetaOperatorDefs.hpp   |  42 +++++
 .../operator/pybind_MetaOperatorDefs.cpp      |  69 ++++++-
 .../MetaOperatorDefs/PaddedConvTranspose.cpp  | 177 ++++++++++++++++++
 3 files changed, 286 insertions(+), 2 deletions(-)
 create mode 100644 src/operator/MetaOperatorDefs/PaddedConvTranspose.cpp

diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index 975fcffaa..ef0879268 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -211,6 +211,48 @@ PaddedAvgPooling(DimSize_t const (&kernel_dims)[DIM],
 
 ////////////////////////////////////////////////////////////////////////////////
 
+template <std::array<DimSize_t, 1>::size_type DIM>
+extern std::shared_ptr<Node> PaddedConvTranspose(
+                                  const DimSize_t &inChannels,
+                                  const DimSize_t &outChannels,
+                                  const std::array<DimSize_t, DIM> &kernelDims,
+                                  const std::array<DimSize_t, DIM> &strideDims =
+                                      create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, DIM> &dilationDims =
+                                      create_array<DimSize_t,DIM>(1),
+                                  const bool noBias = false,
+                                  const std::array<DimSize_t, 2*DIM> &paddingDims =
+                                      create_array<DimSize_t,2*DIM>(0),
+                                  const std::string& name = "");
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+extern std::shared_ptr<Node> PaddedConvTranspose(const DimSize_t &inChannels,
+                                  const DimSize_t &outChannels,
+                                  DimSize_t const (&kernel_dims)[DIM],
+                                  const std::array<DimSize_t, DIM> &strideDims =
+                                      create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, DIM> &dilationDims =
+                                      create_array<DimSize_t,DIM>(1),
+                                  const bool noBias = false,
+                                  const std::array<DimSize_t, 2*DIM> &paddingDims =
+                                      create_array<DimSize_t,2*DIM>(0),
+                                  const std::string& name = "");
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+extern std::shared_ptr<MetaOperator_Op> PaddedConvTranspose_Op(const DimSize_t &inChannels,
+                                  const DimSize_t &outChannels,
+                                  const std::array<DimSize_t, DIM> &kernelDims,
+                                  const std::array<DimSize_t, DIM> &strideDims =
+                                      create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, DIM> &dilationDims =
+                                      create_array<DimSize_t,DIM>(1),
+                                  const bool noBias = false,
+                                  const std::array<DimSize_t, 2*DIM> &paddingDims =
+                                      create_array<DimSize_t,2*DIM>(0),
+                                  const std::string& name = "");
+
+////////////////////////////////////////////////////////////////////////////////
+
 /**
  * @brief Creates a padded max pooling operation.
  *
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index 9e266cfe2..182a5edaa 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -14,9 +14,7 @@
 
 #include <string>
 #include <vector>
-#include <array>
 
-#include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/MetaOperatorDefs.hpp"
 #include "aidge/utils/Types.h"
 
@@ -193,6 +191,71 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
     )mydelimiter");
 }
 
+template <DimIdx_t DIM> void declare_PaddedConvTransposeOp(py::module &m) {
+  m.def(("PaddedConvTranspose" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t &in_channels,
+                                                         const DimSize_t &out_channels,
+                                                         const std::vector<DimSize_t>& kernel_dims,
+                                                         const std::vector<DimSize_t> &stride_dims,
+                                                         const std::vector<DimSize_t> &dilation_dims,
+                                                         const bool no_bias,
+                                                         const std::vector<DimSize_t> &padding_dims,
+                                                         const std::string& name)
+    {
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
+        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
+
+        return PaddedConvTranspose<DIM>(in_channels,
+                                        out_channels,
+                                        to_array<DIM>(kernel_dims.begin()),
+                                        to_array<DIM>(stride_dims.begin()),
+                                        to_array<DIM>(dilation_dims.begin()),
+                                        no_bias,
+                                        to_array<2*DIM>(padding_dims.begin()),
+                                        name);
+    }, py::arg("in_channels"),
+       py::arg("out_channels"),
+       py::arg("kernel_dims"),
+       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("no_bias")= false,
+       py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
+       py::arg("name") = "");
+    m.def(("PaddedConvTranspose" + std::to_string(DIM) + "DOp").c_str(), [](
+                                                         const DimSize_t &inChannels,
+                                                         const DimSize_t &outChannels,
+                                                         const std::vector<DimSize_t>& kernel_dims,
+                                                         const std::vector<DimSize_t> &stride_dims,
+                                                         const std::vector<DimSize_t> &dilation_dims,
+                                                         const bool no_bias,
+                                                         const std::vector<DimSize_t> &padding_dims,
+	                                                     const std::string &name)
+    {
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
+        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
+
+        return PaddedConvTranspose_Op<DIM>(inChannels,
+                                           outChannels,
+                                           to_array<DIM>(kernel_dims.begin()),
+                                           to_array<DIM>(stride_dims.begin()),
+                                           to_array<DIM>(dilation_dims.begin()),
+                                           no_bias,
+                                           to_array<2*DIM>(padding_dims.begin()),
+	                                       name);
+    }, py::arg("in_channels"),
+	   py::arg("out_channels"),
+	   py::arg("kernel_dims"),
+       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
+	   py::arg("no_bias") = false,
+       py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
+	   py::arg("name") = "");
+}
+
+
 template <DimIdx_t DIM> void declare_PaddedAvgPoolingOp(py::module &m) {
   m.def(("PaddedAvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                          const std::string& name,
@@ -446,6 +509,8 @@ void init_MetaOperatorDefs(py::module &m) {
   declare_PaddedConvDepthWiseOp<1>(m);
   declare_PaddedConvDepthWiseOp<2>(m);
 //   declare_PaddedConvDepthWiseOp<3>(m);
+  declare_PaddedConvTransposeOp<1>(m);
+  declare_PaddedConvTransposeOp<2>(m);
 //   declare_PaddedAvgPoolingOp<1>(m);
   declare_PaddedAvgPoolingOp<2>(m);
 //   declare_PaddedAvgPoolingOp<3>(m);
diff --git a/src/operator/MetaOperatorDefs/PaddedConvTranspose.cpp b/src/operator/MetaOperatorDefs/PaddedConvTranspose.cpp
new file mode 100644
index 000000000..1d43e891a
--- /dev/null
+++ b/src/operator/MetaOperatorDefs/PaddedConvTranspose.cpp
@@ -0,0 +1,177 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/MetaOperatorDefs.hpp"
+
+#include <array>
+#include <memory>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/ConvTranspose.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+//////////////////////////////////
+// Node functions
+//////////////////////////////////
+
+template <std::array<DimIdx_t, 1>::size_type DIM>
+std::shared_ptr<Node>
+PaddedConvTranspose(const DimSize_t &inChannels,
+                    const DimSize_t &outChannels,
+                    const std::array<DimSize_t, DIM> &kernelDims,
+                    const std::array<DimSize_t, DIM> &strideDims,
+                    const std::array<DimSize_t, DIM> &dilationDims,
+                    const bool noBias,
+                    const std::array<DimSize_t, 2 * DIM> &paddingDims,
+                    const std::string &name) {
+    auto graph = Sequential(
+        {Pad<DIM>(paddingDims, (!name.empty()) ? name + "_pad" : ""),
+         ConvTranspose(inChannels,
+                       outChannels,
+                       kernelDims,
+                       strideDims,
+                       dilationDims,
+                       noBias,
+                       !name.empty() ? name + "_convTranspose" : "")});
+    auto metaOpNode = MetaOperator(
+        ("PaddedConvTranspose" + std::to_string(DIM) + "D").c_str(),
+        graph,
+        {},
+        name);
+    addProducer(metaOpNode,
+                1,
+                append(outChannels, append(inChannels, kernelDims)),
+                "w");
+    if (!noBias) {
+        addProducer(metaOpNode, 2, {outChannels}, "b");
+    }
+    return metaOpNode;
+}
+
+template std::shared_ptr<Node>
+PaddedConvTranspose<1>(const DimSize_t &,
+                       const DimSize_t &,
+                       const std::array<DimSize_t, 1> &,
+                       const std::array<DimSize_t, 1> &,
+                       const std::array<DimSize_t, 1> &,
+                       const bool,
+                       const std::array<DimSize_t, 2> &,
+                       const std::string &);
+template std::shared_ptr<Node>
+PaddedConvTranspose<2>(const DimSize_t &,
+                       const DimSize_t &,
+                       const std::array<DimSize_t, 2> &,
+                       const std::array<DimSize_t, 2> &,
+                       const std::array<DimSize_t, 2> &,
+                       const bool,
+                       const std::array<DimSize_t, 4> &,
+                       const std::string &);
+
+template <std::array<DimIdx_t, 1>::size_type DIM>
+extern std::shared_ptr<Node>
+PaddedConvTranspose(const DimSize_t &inChannels,
+                    const DimSize_t &outChannels,
+                    DimSize_t const (&kernelDims)[DIM],
+                    const std::array<DimSize_t, DIM> &strideDims,
+                    const std::array<DimSize_t, DIM> &dilationDims,
+                    const bool noBias,
+                    const std::array<DimSize_t, 2 * DIM> &paddingDims,
+                    const std::string &name) {
+    return PaddedConvTranspose<DIM>(inChannels,
+                                    outChannels,
+                                    to_array(kernelDims),
+                                    strideDims,
+                                    dilationDims,
+                                    noBias,
+                                    paddingDims,
+                                    name);
+}
+
+template std::shared_ptr<Node>
+PaddedConvTranspose<1>(const DimSize_t &,
+                       const DimSize_t &,
+                       DimSize_t const (&)[1],
+                       const std::array<DimSize_t, 1> &,
+                       const std::array<DimSize_t, 1> &,
+                       const bool,
+                       const std::array<DimSize_t, 2> &,
+                       const std::string &);
+template std::shared_ptr<Node>
+PaddedConvTranspose<2>(const DimSize_t &,
+                       const DimSize_t &,
+                       const DimSize_t (&)[2],
+                       const std::array<DimSize_t, 2> &,
+                       const std::array<DimSize_t, 2> &,
+                       const bool,
+                       const std::array<DimSize_t, 4> &,
+                       const std::string &);
+
+//////////////////////////////////
+// Operator functions
+//////////////////////////////////
+
+template <std::array<DimIdx_t, 1>::size_type DIM>
+std::shared_ptr<MetaOperator_Op>
+PaddedConvTranspose_Op(const DimSize_t &inChannels,
+                       const DimSize_t &outChannels,
+                       const std::array<DimSize_t, DIM> &kernelDims,
+                       const std::array<DimSize_t, DIM> &strideDims,
+                       const std::array<DimSize_t, DIM> &dilationDims,
+                       const bool noBias,
+                       const std::array<DimSize_t, 2 * DIM> &paddingDims,
+                       const std::string &name) {
+    auto pad = Pad<DIM>(paddingDims,
+                        !name.empty() ? name + "_pad" : "pad",
+                        PadBorderType::Constant,
+                        0.0);
+
+    auto convTranspose = ConvTranspose(
+        inChannels,
+        outChannels,
+        kernelDims,
+        strideDims,
+        dilationDims,
+        noBias,
+        !name.empty() ? name + "_convtranspose" : "convTranspose");
+
+    return std::make_shared<MetaOperator_Op>(
+        ("PaddedConvTranspose" + std::to_string(DIM) + "D").c_str(),
+        Sequential({pad, convTranspose}));
+}
+
+template std::shared_ptr<MetaOperator_Op>
+PaddedConvTranspose_Op<1>(const DimSize_t &,
+                          const DimSize_t &,
+                          const std::array<DimSize_t, 1> &,
+                          const std::array<DimSize_t, 1> &,
+                          const std::array<DimSize_t, 1> &,
+                          const bool,
+                          const std::array<DimSize_t, 2> &,
+                          const std::string &);
+
+template std::shared_ptr<MetaOperator_Op>
+PaddedConvTranspose_Op<2>(const DimSize_t &,
+                          const DimSize_t &,
+                          const std::array<DimSize_t, 2> &,
+                          const std::array<DimSize_t, 2> &,
+                          const std::array<DimSize_t, 2> &,
+                          const bool,
+                          const std::array<DimSize_t, 4> &,
+                          const std::string &);
+
+} // namespace Aidge
-- 
GitLab


From 853570d578f1bd0ddcbc3690b9b9a3648d53b530 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Gr=C3=A9goire=20KUBLER?= <gregoire.kubler@proton.me>
Date: Tue, 21 Jan 2025 14:15:52 +0100
Subject: [PATCH 03/10] chore : [Conv] updated the method for attribute
 accession to ease code reading

---
 src/operator/Conv.cpp | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index d69aad616..8d5f33322 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -70,14 +70,14 @@ bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     unsigned int in_dims_index = (getInput(0)->dataFormat() == Aidge::DataFormat::NHWC) ? 1 : 2;
     unsigned int out_dims_index = (getOutput(0)->dataFormat() == Aidge::DataFormat::NHWC) ? 1 : 2;
 
-    for (std::size_t dim = 0; dim < mAttributes->template getAttr<Attr::KernelDims>().size(); ++dim) {
-        const DimSize_t kernelExtent = mAttributes->template getAttr<Attr::DilationDims>()[dim] *
-                                    (mAttributes->template getAttr<Attr::KernelDims>()[dim] - 1) +
+    for (std::size_t dim = 0; dim < kernelDims().size(); ++dim) {
+        const DimSize_t kernelExtent = dilationDims()[dim] *
+                                    (kernelDims()[dim] - 1) +
                                     1;
 
         outputDims[dim + out_dims_index] = 1 + static_cast<DimSize_t>(
             floor(static_cast<float>(inputDims[dim + in_dims_index] - kernelExtent) /
-                static_cast<float>(mAttributes->template getAttr<Attr::StrideDims>()[dim]))
+                static_cast<float>(strideDims()[dim]))
         );
     }
 
@@ -123,18 +123,18 @@ Aidge::Conv_Op<DIM>::computeReceptiveField(
         std::vector<DimSize_t> inputDims{outputDims[0], getInput(0)->dims()[1]};
         for (DimIdx_t i = 0; i < DIM; ++i) {
             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
-                        * mAttributes->template getAttr<Attr::StrideDims>()[static_cast<std::size_t>(i)]
+                        * strideDims()[static_cast<std::size_t>(i)]
                         + 1
-                        + (mAttributes->template getAttr<Attr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
-                        * mAttributes->template getAttr<Attr::DilationDims>()[static_cast<std::size_t>(i)]);
-            inputIdxDims[2+i] *= mAttributes->template getAttr<Attr::StrideDims>()[static_cast<std::size_t>(i)];
+                        + (kernelDims()[static_cast<std::size_t>(i)] - 1)
+                        * dilationDims()[static_cast<std::size_t>(i)]);
+            inputIdxDims[2+i] *= strideDims()[static_cast<std::size_t>(i)];
         }
 
         // Weight
         // same output value, every input channel is used
         std::vector<DimSize_t> weightDims{outputDims[1], getInput(0)->dims()[1]};
         for (std::size_t i = 0; i < DIM; ++i) {
-            weightDims.push_back(mAttributes->template getAttr<Attr::KernelDims>()[i]);
+            weightDims.push_back(kernelDims()[i]);
         }
         std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
         weightIdxDims[0] = firstEltDims[1];
@@ -242,4 +242,4 @@ std::shared_ptr<Aidge::Node> Aidge::Conv(
 }
 
 template std::shared_ptr<Aidge::Node> Aidge::Conv<1>(Aidge::DimSize_t, Aidge::DimSize_t, Aidge::DimSize_t const (&)[1], const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, bool);
-template std::shared_ptr<Aidge::Node> Aidge::Conv<2>(Aidge::DimSize_t, Aidge::DimSize_t, Aidge::DimSize_t const (&)[2], const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, bool);
\ No newline at end of file
+template std::shared_ptr<Aidge::Node> Aidge::Conv<2>(Aidge::DimSize_t, Aidge::DimSize_t, Aidge::DimSize_t const (&)[2], const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, bool);
-- 
GitLab


From d2a8b90f35beba65128ecf24adae3ae8caab1ff9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Gr=C3=A9goire=20KUBLER?= <gregoire.kubler@proton.me>
Date: Tue, 17 Dec 2024 17:46:28 +0100
Subject: [PATCH 04/10] fix : [Conv] added check to ensure that dilation &
 stride values were all >= 1

Also better warning message for conv operator constructor
---
 include/aidge/operator/Conv.hpp | 13 ++++++++++---
 src/operator/Conv.cpp           | 15 +++++++++++++++
 2 files changed, 25 insertions(+), 3 deletions(-)

diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 283d0136e..0db1e8343 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -12,6 +12,7 @@
 #ifndef AIDGE_CORE_OPERATOR_CONV_H_
 #define AIDGE_CORE_OPERATOR_CONV_H_
 
+#include <algorithm>
 #include <array>
 #include <cstddef>  // std::size_t
 #include <string>
@@ -234,10 +235,16 @@ std::shared_ptr<Node> Conv(DimSize_t inChannels,
                            bool noBias = false);
 
 /**
- * @brief Helper function for Conv with C-style arrays.
+ * @brief Perform a convolution on the input Tensor.
  *
- * This helper function allows automatic template deduction of the number of dimensions (DIM)
- * based on the kernel dimensions provided.
+ * @tparam DIM Number of dimensions for the feature map.
+ * @param inChannels Number of input channels.
+ * @param outChannels Number of output channels.
+ * @param kernelDims Dimensions of the kernel. Must be the same number of dimensions as the feature map.
+ * @param name Name of the operator.
+ * @param strideDims Dimensions of the stride attribute. Must be the same number of dimensions as the feature map.
+ * @param dilationDims Dimensions of the dilation attribute. Must be the same number of dimensions as the feature map.
+ * @return std::shared_ptr<Node> A Node containing the operator.
  */
 template <DimSize_t DIM>
 std::shared_ptr<Node> Conv(
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index 8d5f33322..3abd144f8 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -237,6 +237,21 @@ std::shared_ptr<Aidge::Node> Aidge::Conv(
     const std::array<Aidge::DimSize_t, DIM> &dilationDims,
     bool noBias)
 {
+    AIDGE_ASSERT(DIM<=MaxDim,"{}: Too many kernel dimensions required, maximum allowed : {} ", Conv_Op<DIM>::Type, MaxDim);
+    AIDGE_ASSERT(!std::any_of(dilationDims.cbegin(),
+                              dilationDims.cend(),
+                              [](DimSize_t val) { return val == 0; }),
+                 "Conv : at least of of the dilation dimension is 0, expecting "
+                 "strictly positive values. Got {}",
+                 Conv_Op<DIM>::Type,
+                 dilationDims);
+    AIDGE_ASSERT(!std::any_of(strideDims.cbegin(),
+                              strideDims.cend(),
+                              [](DimSize_t val) { return val == 0; }),
+                 "{}: at least one of the stride dimension is 0, expecting "
+                 "strictly positive values. Got {}.",
+                 Conv_Op<DIM>::Type,
+                 strideDims);
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
     return Conv(inChannels, outChannels, to_array(kernelDims), name, strideDims, dilationDims, noBias);
 }
-- 
GitLab


From cd3da81c76dfd7c0102315c364a5dafa1139f0b6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Gr=C3=A9goire=20KUBLER?= <gregoire.kubler@proton.me>
Date: Thu, 27 Feb 2025 16:13:34 +0000
Subject: [PATCH 05/10] feat : [Conv] added check to ensure dilation & stride
 attributes values are strictly positive

---
 src/operator/Conv.cpp | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index 3abd144f8..42c47cb81 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -240,15 +240,15 @@ std::shared_ptr<Aidge::Node> Aidge::Conv(
     AIDGE_ASSERT(DIM<=MaxDim,"{}: Too many kernel dimensions required, maximum allowed : {} ", Conv_Op<DIM>::Type, MaxDim);
     AIDGE_ASSERT(!std::any_of(dilationDims.cbegin(),
                               dilationDims.cend(),
-                              [](DimSize_t val) { return val == 0; }),
-                 "Conv : at least of of the dilation dimension is 0, expecting "
+                              [](DimSize_t val) { return val <= 0; }),
+                 "Conv : at least of of the dilation dimension is <= 0, expecting "
                  "strictly positive values. Got {}",
                  Conv_Op<DIM>::Type,
                  dilationDims);
     AIDGE_ASSERT(!std::any_of(strideDims.cbegin(),
                               strideDims.cend(),
-                              [](DimSize_t val) { return val == 0; }),
-                 "{}: at least one of the stride dimension is 0, expecting "
+                              [](DimSize_t val) { return val <= 0; }),
+                 "{}: at least one of the stride dimension is 0,  <=expecting "
                  "strictly positive values. Got {}.",
                  Conv_Op<DIM>::Type,
                  strideDims);
-- 
GitLab


From 5e91f6697ef1d0aa385563bd43291f7cbfb45493 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Gr=C3=A9goire=20KUBLER?= <gregoire.kubler@proton.me>
Date: Tue, 21 Jan 2025 14:21:23 +0100
Subject: [PATCH 06/10] feat : approxEqual, better error checking & error
 messages

replaced tensor->size by tensor->dims comparison
replaced fmt::print by log::error
replaced assert with AIDGE_ASSERT
---
 include/aidge/utils/TensorUtils.hpp | 51 ++++++++++++++++++-----------
 1 file changed, 32 insertions(+), 19 deletions(-)

diff --git a/include/aidge/utils/TensorUtils.hpp b/include/aidge/utils/TensorUtils.hpp
index 794abf763..a62850c73 100644
--- a/include/aidge/utils/TensorUtils.hpp
+++ b/include/aidge/utils/TensorUtils.hpp
@@ -11,25 +11,23 @@
 
 #ifndef AIDGE_CORE_UTILS_TENSOR_UTILS_H_
 #define AIDGE_CORE_UTILS_TENSOR_UTILS_H_
-
-#include <cmath>  // std::abs
-
-#include "aidge/data/DataType.hpp"
 #include "aidge/data/Tensor.hpp"
-#include "aidge/utils/ErrorHandling.hpp"
-#include "aidge/utils/Log.hpp"
+#include <cmath> // std::abs
+#include <fmt/base.h>
 
 namespace Aidge {
 
 /**
- * @brief Compare two Aidge::Tensor value wise. The comparison function is:
+ * @brief Compare two :cpp:class:`Aidge::Tensor` value wise. The comparison
+ * function is:
  *
  * |t1-t2| <= absolute + relative * |t2|
  *
  * If a tensor value is different from the other tensor return False
  * If the tensor does not have the same size, return False
  * If the datatype is not the same between each tensor return False
- * If the templated type does not correspond to the datatype of each tensor, raise an assertion error
+ * If the templated type does not correspond to the datatype of each tensor,
+ * raise an assertion error
  *
  * @tparam T1 should correspond to the type of the first tensor, defines part of the type for absolute and relative error
  * @tparam T2 should correspond to the type of the second tensor, defaults to T1
@@ -43,33 +41,42 @@ template <typename T1, typename T2 = T1>
 bool approxEq(const Tensor& t1, const Tensor& t2, float relative = 1e-5f, float absolute = 1e-8f) {
     // Check template type matches tensor datatype
     if (t1.dataType() != NativeType_v<T1>) {
-        Log::error("First tensor datatype ({}) does not match template type", t1.dataType());
+        Log::error("approxEq : First tensor datatype ({}) does not match"
+                   "template type (NativeType_v<T1> = {}) .",
+                    t1.dataType(),
+                    NativeType_v<T1>);
         return false;
     }
 
     if (t2.dataType() != NativeType_v<T2>) {
-        Log::error("Second tensor datatype ({}) does not match template type", t2.dataType());
+        Log::error("approxEq : Second tensor datatype ({}) does not match"
+                   "template type (NativeType_v<T1> = {}) .",
+                    t2.dataType(),
+                    NativeType_v<T1>);
         return false;
     }
 
     // Validate parameters
     if (relative < 0.0f) {
-        Log::error("Relative error must be non-negative (got {})", relative);
+        Log::error("approxEq : Relative error must be non-negative (got  : {}).",
+                    relative);
         return false;
     }
 
     if (absolute < 0.0f || absolute > 1.0f) {
-        Log::error("Absolute error must be between 0 and 1 (got {})", absolute);
+        Log::error("approxEq : Absolute error must be between 0 and 1 (got  : {}).",
+                    absolute);
         return false;
     }
 
-    // Check tensor sizes match
-    if (t1.size() != t2.size()) {
-        Log::error("Tensor sizes do not match: {} vs {}", t1.size(), t2.size());
+    if (t1.dims() != t2.dims()) {
+        Log::error("approxEq: Dimension mismatch.\nt1 :\t{}\nt2 :\t{}",
+                   t1.dims(),
+                   t2.dims());
         return false;
     }
 
-    // Compare values/
+    // Compare values
     for (size_t i = 0; i < t1.size(); ++i) {
         const auto val1 = t1.get<T1>(i);
         const auto val2 = t2.get<T2>(i);
@@ -77,9 +84,15 @@ bool approxEq(const Tensor& t1, const Tensor& t2, float relative = 1e-5f, float
         const float threshold = absolute + (relative * static_cast<float>(std::abs(val2)));
 
         if (diff > threshold) {
-            Log::notice("Tensor values differ at index {}: {} vs {} (diff: {}, threshold: {})\n"
-                "Tensor 1:\n{}\nTensor 2:\n{}",
-                i, val1, val2, diff, threshold, t1, t2);
+            Log::error("approxEq : value mismatch at index {} : {} != "
+                       "{} (diff: {}, threshold: {}) \nt1:\n{}\nt2:\n{}\n",
+                       i,
+                       t1.get<T1>(i),
+                       t2.get<T1>(i),
+                       diff,
+                       threshold,
+                       t1,
+                       t2);
             return false;
         }
     }
-- 
GitLab


From 2bdd06b2be812ce010683fbe5894861f891663da Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Gr=C3=A9goire=20KUBLER?= <gregoire.kubler@proton.me>
Date: Thu, 27 Feb 2025 13:53:13 +0000
Subject: [PATCH 07/10] chore : [DOC] added pybind documentation to
 graphview.compile

---
 include/aidge/graph/GraphView.hpp         |  2 +-
 python_binding/graph/pybind_GraphView.cpp | 24 ++++++++++++++++++++++-
 2 files changed, 24 insertions(+), 2 deletions(-)

diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index be325cb96..c6e3322ae 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -239,7 +239,7 @@ public:
      * 3 - Assert data format (NCHW, NHWC, ...) of each Operator's input Tensor is
      * compatible with the selected kernel.
      * If not, add a Transpose Operator.
-     * 4 - Propagate Tensor dimensions through the consecutive Operators.
+     * 4 - Propagate Tensor dimensions through the consecutive Operators(also named forward dims).
      */
     void compile(const std::string& backend = "cpu",
                  const Aidge::DataType datatype = DataType::Float32,
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index 31e3a0099..abb1a9eca 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -188,7 +188,29 @@ void init_GraphView(py::module& m) {
             ... ]
             >>> success = graph.forward_dims(input_dims)
           )mydelimiter")
-          .def("compile", &GraphView::compile, py::arg("backend"), py::arg("datatype"), py::arg("device") = 0, py::arg("dims")=std::vector<std::vector<DimSize_t>>())
+          .def("compile", &GraphView::compile, py::arg("backend"), py::arg("datatype"), py::arg("device") = 0, py::arg("dims")=std::vector<std::vector<DimSize_t>>(),
+                R"mydelimiter(
+                Sets the GraphView ready for computation in four steps:
+                1 - Assert input Tensors' datatype is compatible with each Operator's datatype.
+                If not, a conversion Operator is inserted.
+                2 - Assert input Tensors' backend is compatible with each Operator's backend.
+                If not, add a Transmitter Operator.
+                3 - Assert data format (NCHW, NHWC, ...) of each Operator's input Tensor is
+                compatible with the selected kernel.
+                If not, add a Transpose Operator.
+                4 - Propagate Tensor dimensions through the consecutive Operators (forward dims).
+
+              :param backend: backend on which the graph will run
+              :type backend: str
+              :param datatype: datatype of the graph
+              :type datatype: Aidge.DataType
+              :param device: backend device index
+              :type device: int
+              :param dims: input dimension to forward
+              :type dims: List[List[Int]]
+
+               
+               )mydelimiter")
           .def("__call__", &GraphView::operator(), py::arg("connectors"))
           .def("set_datatype", &GraphView::setDataType, py::arg("datatype"))
           .def("set_backend", &GraphView::setBackend, py::arg("backend"), py::arg("device") = 0)
-- 
GitLab


From ef5bccde705c026733fd1a6f9ff2cdcffef1cea1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Gr=C3=A9goire=20KUBLER?= <gregoire.kubler@proton.me>
Date: Thu, 27 Feb 2025 16:05:01 +0000
Subject: [PATCH 08/10] fix : missing header in Types.h

---
 include/aidge/utils/Types.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/include/aidge/utils/Types.h b/include/aidge/utils/Types.h
index c6b8bd4b2..7296d049f 100644
--- a/include/aidge/utils/Types.h
+++ b/include/aidge/utils/Types.h
@@ -13,6 +13,7 @@
 #ifndef AIDGE_TYPES_H_
 #define AIDGE_TYPES_H_
 
+#include <array>
 #include <cstddef>
 #include <cstdint>
 #include <limits>
-- 
GitLab


From c1c35a65a6656c58a92c799d58411c67de7d70ab Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Gr=C3=A9goire=20KUBLER?= <gregoire.kubler@proton.me>
Date: Tue, 4 Mar 2025 16:25:29 +0000
Subject: [PATCH 09/10] fix : Expand shape input was set as Param instead of
 Data

---
 include/aidge/operator/Expand.hpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/include/aidge/operator/Expand.hpp b/include/aidge/operator/Expand.hpp
index da87cfb56..95ca72a27 100644
--- a/include/aidge/operator/Expand.hpp
+++ b/include/aidge/operator/Expand.hpp
@@ -73,7 +73,7 @@ class Expand_Op
      */
     Expand_Op()
         : OperatorTensor(Type,
-                         {InputCategory::Data, InputCategory::Param},
+                         {InputCategory::Data, InputCategory::Data},
                          1) {}
 
     Expand_Op(const Expand_Op &op);
-- 
GitLab


From 0e89cde059a5f3e3d0da1582b2af9b3f9e2c7120 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Gr=C3=A9goire=20KUBLER?= <gregoire.kubler@proton.me>
Date: Wed, 5 Mar 2025 11:56:13 +0000
Subject: [PATCH 10/10] feat : added getDataType() function to OperatorTensor

---
 include/aidge/operator/Operator.hpp       |  7 +++++++
 include/aidge/operator/OperatorTensor.hpp | 10 ++++++++++
 src/operator/OperatorTensor.cpp           | 19 +++++++++++++++++++
 3 files changed, 36 insertions(+)

diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 81a54620a..5a12cfea2 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -237,6 +237,13 @@ public:
      */
     void setBackend(const std::vector<std::pair<std::string, DeviceIdx_t>>& backends);
 
+    /**
+     * @brief gets the data type of the operator's tensors.
+     * @return a pair whose first object contains inputs' data types
+     * and second object outputs' data types
+     */
+    virtual std::pair<std::vector<Aidge::DataType>, std::vector<Aidge::DataType>>
+    getDataType() const = 0;
     virtual void setDataType(const DataType& dataType) const = 0;
     virtual void setDataFormat(const DataFormat& dataFormat) const = 0;
     /**
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index a515ecb5b..0e3d275eb 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -179,8 +179,18 @@ public:
     virtual bool dimsForwarded() const;
     ///////////////////////////////////////////////////
 
+    /**
+     * @brief gets the data type of the operator's tensors.
+     * @return a pair whose first object contains inputs' data types
+     * and second object outputs' data types
+     */
+    std::pair<std::vector<Aidge::DataType>, std::vector<Aidge::DataType>>
+    getDataType() const;
+
     /**
      * @brief Sets the data type of the operator's tensors.
+     * @warning Sets all outputs but only inputs of category 
+     * @code InputCategory::Param @endcode & @code InputCategory::OptionnalParam @endcode
      * @param dataType Data type to set.
      */
     virtual void setDataType(const DataType& dataType) const override;
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index cac1ad226..1c5b0a0d3 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -10,13 +10,18 @@
  ********************************************************************************/
 
 #include <memory>
+#include <utility>
+#include <vector>
 
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/data/Data.hpp"
+#include "aidge/data/DataType.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
+namespace Aidge{
+using std::make_pair;
 
 Aidge::OperatorTensor::OperatorTensor(const std::string& type,
                                       const std::vector<InputCategory>& inputsCategory,
@@ -180,6 +185,19 @@ bool Aidge::OperatorTensor::dimsForwarded() const {
     return forwarded;
 }
 
+std::pair<std::vector<DataType>, std::vector<DataType>>
+OperatorTensor::getDataType() const {
+    auto res = make_pair(std::vector<DataType>(nbInputs()),
+                         std::vector<DataType>(nbOutputs()));
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        res.first[i] = getOutput(i)->dataType();
+    }
+    for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
+        res.second[i] = getOutput(i)->dataType();
+    }
+    return res;
+}
+
 void Aidge::OperatorTensor::setDataType(const DataType& dataType) const {
     for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
         getOutput(i)->setDataType(dataType);
@@ -222,3 +240,4 @@ void Aidge::OperatorTensor::forward() {
 
     Operator::forward();
 }
+}  // namespace Aidge
-- 
GitLab