From 053a98f9dcd0302719f0c56e29c20d8f1acd59d7 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Thu, 12 Oct 2023 15:19:41 +0200
Subject: [PATCH] More fixes

---
 include/aidge/operator/Add.hpp           |   8 +-
 include/aidge/operator/AvgPooling.hpp    |   4 +-
 include/aidge/operator/BatchNorm.hpp     |   4 +-
 include/aidge/operator/Conv.hpp          |   4 +-
 include/aidge/operator/ConvDepthWise.hpp |   4 +-
 include/aidge/operator/FC.hpp            |   4 +-
 include/aidge/operator/LeakyReLU.hpp     |   4 +-
 include/aidge/operator/MatMul.hpp        |   4 +-
 include/aidge/operator/MaxPooling.hpp    |   4 +-
 include/aidge/operator/Pad.hpp           | 233 +++++++++++++++++++++++
 include/aidge/operator/ReLU.hpp          |   4 +-
 include/aidge/operator/Scaling.hpp       |   4 +-
 include/aidge/operator/Softmax.hpp       |   4 +-
 src/recipies/FuseBatchNorm.cpp           |   4 +-
 14 files changed, 261 insertions(+), 28 deletions(-)
 create mode 100644 include/aidge/operator/Pad.hpp

diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 1e0f17e6d..57403270d 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -81,14 +81,14 @@ public:
     //     return *in;
     // }
 
-    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
         assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
 
         mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
     }
 
-    constexpr void computeOutputDims() override final {
+    void computeOutputDims() override final {
         if (!mInputs[0]->empty()) {
             const auto expectedDims =  mInputs[0]->dims();
             std::size_t nonEmptyInputTensor = 1;
@@ -140,7 +140,7 @@ public:
     }
 
 
-    void setBackend(const std::string& name) {
+    void setBackend(const std::string& name) override {
         mImpl = Registrar<Add_Op<NUM>>::create(name)(*this);
         mOutput->setBackend(name);
 
@@ -150,7 +150,7 @@ public:
         }
     }
 
-    void setDatatype(const DataType& datatype) {
+    void setDatatype(const DataType& datatype) override {
         mOutput->setDatatype(datatype);
 
         // FIXME: temporary workaround
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index b29463c67..a5143ab4b 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -84,7 +84,7 @@ public:
         return std::make_shared<AvgPooling_Op<DIM>>(*this);
     }
 
-    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx < 1 && "operators supports only 3 inputs");
         (void) inputIdx; // avoid unused warning
         assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
@@ -92,7 +92,7 @@ public:
         mInput = std::dynamic_pointer_cast<Tensor>(data);
     }
 
-    constexpr void computeOutputDims() override final {
+    void computeOutputDims() override final {
         if (!mInput->empty()) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 90a6be722..d918c4317 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -87,14 +87,14 @@ public:
     //     return *in;
     // }
 
-    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx < 5 && "operators supports only 5 inputs");
         assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
 
         mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
     }
 
-    constexpr void computeOutputDims() override final {
+    void computeOutputDims() override final {
         if (!mInputs[0]->empty()) {
             for (std::size_t i = nbDataInputs(); i < nbInputs(); ++i) {
                 if(mInputs[i]->size() != mInputs[0]->dims()[1]) {
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 22553080c..b2416220a 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -100,14 +100,14 @@ public:
 
     // }
 
-    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx < 3 && "operators supports only 3 inputs");
         assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
 
         mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
     }
 
-    constexpr void computeOutputDims() override final {
+    void computeOutputDims() override final {
         if (!mInputs[0]->empty()) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 7a4db68ba..2b03dc419 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -92,14 +92,14 @@ class ConvDepthWise_Op : public Operator,
         return std::make_shared<ConvDepthWise_Op<DIM>>(*this);
     }
 
-    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx < 3 && "operators supports only 3 inputs");
         assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
 
         mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
     }
 
-    constexpr void computeOutputDims() override final {
+    void computeOutputDims() override final {
         if (!mInputs[0]->empty()) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 127d39a8b..6f5b2a44f 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -135,7 +135,7 @@ public:
     }
 
 
-    void setBackend(const std::string& name) {
+    void setBackend(const std::string& name) override {
         mImpl = Registrar<FC_Op>::create(name)(*this);
         mOutput->setBackend(name);
 
@@ -145,7 +145,7 @@ public:
         mInputs[2]->setBackend(name);
     }
 
-    void setDatatype(const DataType& datatype) {
+    void setDatatype(const DataType& datatype) override {
         mOutput->setDatatype(datatype);
 
         // FIXME: temporary workaround
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index c6ee01239..7a6fc4cbb 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -120,14 +120,14 @@ public:
     }
 
 
-    void setBackend(const std::string& name) {
+    void setBackend(const std::string& name) override {
         mImpl = Registrar<LeakyReLU_Op>::create(name)(*this);
         mOutput->setBackend(name);
 
         // FIXME: temporary workaround
         mInput->setBackend(name);
     }
-    void setDatatype(const DataType& datatype) {
+    void setDatatype(const DataType& datatype) override {
         mOutput->setDatatype(datatype);
 
         // FIXME: temporary workaround
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index d0dadd847..4c15f8ce3 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -127,7 +127,7 @@ public:
     }
 
 
-    void setBackend(const std::string& name) {
+    void setBackend(const std::string& name) override {
         mImpl = Registrar<MatMul_Op>::create(name)(*this);
         mOutput->setBackend(name);
 
@@ -136,7 +136,7 @@ public:
         mInputs[1]->setBackend(name);
     }
 
-    void setDatatype(const DataType& datatype) {
+    void setDatatype(const DataType& datatype) override {
         mOutput->setDatatype(datatype);
 
         // FIXME: temporary workaround
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index eae7e30df..fefd112e5 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -85,7 +85,7 @@ public:
         return std::make_shared<MaxPooling_Op<DIM>>(*this);
     }
 
-    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx < 1 && "operators supports only 3 inputs");
         (void) inputIdx; // avoid unused warning
         assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
@@ -93,7 +93,7 @@ public:
         mInput = std::dynamic_pointer_cast<Tensor>(data);
     }
 
-    constexpr void computeOutputDims() override final {
+    void computeOutputDims() override final {
         if (!mInput->empty()) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
new file mode 100644
index 000000000..9b3a6c24f
--- /dev/null
+++ b/include/aidge/operator/Pad.hpp
@@ -0,0 +1,233 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_PAD_H_
+#define AIDGE_CORE_OPERATOR_PAD_H_
+
+#include <array>
+#include <numeric>
+#include <vector>
+#include <cmath>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class PadAttr { BeginEndBorders, BorderType, BorderValue };
+enum class PadBorderType { Constant, Replicate, Reflect, Wrap };
+
+template <DimIdx_t DIM>
+class Pad_Op : public Operator,
+                public Registrable<Pad_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
+                public StaticAttributes<PadAttr,
+                                       std::array<std::array<DimSize_t, 2>, DIM>,
+                                       PadBorderType,
+                                       double> {
+private:
+    // FIXME: change accessibility
+    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char *Type = "Pad";
+
+    Pad_Op() = delete;
+
+    using Attributes_ = StaticAttributes<PadAttr,
+                                             std::array<std::array<DimSize_t, 2>, DIM>,
+                                             PadBorderType,
+                                             double>;
+    template <PadAttr e>
+    using attr = typename Attributes_::template attr<e>;
+
+    constexpr Pad_Op(const std::array<std::array<DimSize_t, 2>, DIM> &beginEndTuples,
+                     const PadBorderType &borderType = PadBorderType::Constant,
+                     double borderValue = 0.0)
+        : Operator(Type),
+          Attributes_(attr<PadAttr::BeginEndBorders>(beginEndTuples),
+                           attr<PadAttr::BorderType>(borderType),
+                           attr<PadAttr::BorderValue>(borderValue)) {
+        setDatatype(DataType::Float32);
+    }
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Pad_Op(const Pad_Op& op)
+        : Operator(Type),
+          Attributes_(op),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Pad_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Pad_Op<DIM>>(*this);
+    }
+
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 1 && "operators supports only 3 inputs");
+        (void) inputIdx; // avoid unused warning
+        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
+
+        mInput = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        if (!mInput->empty()) {
+            std::array<DimSize_t, DIM + 2> outputDims = {};
+
+            for (std::size_t dim = 0; dim < DIM; ++dim) {
+                outputDims[dim+2] = this->template getAttr<PadAttr::BeginEndBorders>()[dim][0]
+                                    + mInput->dims()[dim+2]
+                                    + this->template getAttr<PadAttr::BeginEndBorders>()[dim][1];
+            }
+            outputDims[1] = mInput->dims()[1];
+            outputDims[0] = mInput->dims()[0];
+            mOutput->resize(outputDims);
+        }
+    }
+
+    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return *(mInput.get());
+    }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "Pad Operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return mInput;
+    }
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "Pad Operators has only 1 outputs");
+        (void) outputIdx; // avoid unused warning
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mInput);
+    }
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string &name) {
+        mImpl = Registrar<Pad_Op<DIM>>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInput->setBackend(name);
+    }
+
+    void setDatatype(const DataType &datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInput->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+};
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> Pad(const std::array<std::array<DimSize_t, 2>, DIM> &beginEndTuples,
+                                           const std::string& name = "",
+                                           const PadBorderType &borderType = PadBorderType::Constant,
+                                           double borderValue = 0.0)
+{
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
+    auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
+    return pad;
+}
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> Pad(const std::array<DimSize_t, DIM> &dimBeginEnd,
+                                           const std::string& name = "",
+                                           const PadBorderType &borderType = PadBorderType::Constant,
+                                           double borderValue = 0.0)
+{
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
+    std::array<std::array<DimSize_t, 2>, DIM> beginEndTuples;
+    for (size_t i = 0; i < DIM; ++i) {
+        beginEndTuples[i] = {dimBeginEnd[i], dimBeginEnd[i]};
+    }
+    auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
+    return pad;
+}
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> ZeroPad(const std::array<std::array<DimSize_t, 2>, DIM> &beginEndTuples,
+                                           const std::string& name = "")
+{
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
+    auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, PadBorderType::Constant, 0.0), name);
+    return pad;
+}
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> ZeroPad(const std::array<DimSize_t, DIM> &dimBeginEnd,
+                                           const std::string& name = "")
+{
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
+    std::array<std::array<DimSize_t, 2>, DIM> beginEndTuples;
+    for (size_t i = 0; i < DIM; ++i) {
+        beginEndTuples[i] = {dimBeginEnd[i], dimBeginEnd[i]};
+    }
+    auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, PadBorderType::Constant, 0.0), name);
+    return pad;
+}
+
+template <DimSize_t DIM>
+inline std::shared_ptr<Node> Pad(
+    std::array<DimSize_t, 2> const (&beginEndTuples)[DIM],
+    const std::string& name = "",
+    const PadBorderType &borderType = PadBorderType::Constant,
+    double borderValue = 0.0)
+{
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
+    return Pad(to_array(beginEndTuples), name, borderType, borderValue);
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::PadAttr>::data[] = {"BeginEndBorders", "BorderType", "BorderValue"};
+
+template <>
+const char *const EnumStrings<Aidge::PadBorderType>::data[] = {"Constant", "Replicate", "Reflect", "Wrap"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_PAD_H_ */
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 433e353f0..0a7ec3b4f 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -108,14 +108,14 @@ public:
     }
 
 
-    void setBackend(const std::string& name) {
+    void setBackend(const std::string& name) override {
         mImpl = Registrar<ReLU_Op>::create(name)(*this);
         mOutput->setBackend(name);
 
         // FIXME: temporary workaround
         mInput->setBackend(name);
     }
-    void setDatatype(const DataType& datatype) {
+    void setDatatype(const DataType& datatype) override {
         mOutput->setDatatype(datatype);
 
         // FIXME: temporary workaround
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 0ea6ba39b..f18abaf32 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -130,13 +130,13 @@ public:
     }
 
 
-    void setBackend(const std::string& name) {
+    void setBackend(const std::string& name) override {
         mImpl = Registrar<Scaling_Op>::create(name)(*this);
         mOutput->setBackend(name);
         // FIXME: temporary workaround
         mInput->setBackend(name);
     }
-    void setDatatype(const DataType& datatype) {
+    void setDatatype(const DataType& datatype) override {
         mOutput->setDatatype(datatype);
 
         // FIXME: temporary workaround
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 898bae4c3..095ea0aad 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -108,14 +108,14 @@ public:
     }
 
 
-    void setBackend(const std::string& name) {
+    void setBackend(const std::string& name) override {
         mImpl = Registrar<Softmax_Op>::create(name)(*this);
         mOutput->setBackend(name);
 
         // FIXME: temporary workaround
         mInput->setBackend(name);
     }
-    void setDatatype(const DataType& datatype) {
+    void setDatatype(const DataType& datatype) override {
         mOutput->setDatatype(datatype);
 
         // FIXME: temporary workaround
diff --git a/src/recipies/FuseBatchNorm.cpp b/src/recipies/FuseBatchNorm.cpp
index 3a50ec3e7..e5e59582a 100644
--- a/src/recipies/FuseBatchNorm.cpp
+++ b/src/recipies/FuseBatchNorm.cpp
@@ -79,10 +79,10 @@ void Aidge::fuseBatchNorm(std::set<std::shared_ptr<Node>> nodes){
         printf("variance < 1e-12 for all outputs! Is the network correctly trained?\n");
     }
 
-    const DimSize_t channelsSize = std::static_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<DimSize_t>("InChannels");
+    const DimSize_t channelsSize = std::dynamic_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<DimSize_t>("InChannels");
 
     // TODO : suppose we have Conv2D ...
-    const std::array<DimSize_t, 2> kernelDims = std::static_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<std::array<DimSize_t, 2>>("KernelDims");
+    const std::array<DimSize_t, 2> kernelDims = std::dynamic_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<std::array<DimSize_t, 2>>("KernelDims");
 
     std::shared_ptr<Tensor> weight  = conv->input(1).first->getOperator()->getOutput(conv->input(1).second);
     std::shared_ptr<Tensor> bias  = conv->input(2).first->getOperator()->getOutput(conv->input(2).second);
-- 
GitLab