diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index d6a0df5ab472c4a728e5b5042258d6d2bd34f871..f1c344063acdf1c530f10e5be0629c90bf6235f7 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -30,7 +30,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims, NoBias };
+enum class ConvAttr { StrideDims, DilationDims, KernelDims, NoBias };
 
 template <DimIdx_t DIM>
 class Conv_Op : public OperatorTensor,
@@ -38,8 +38,6 @@ class Conv_Op : public OperatorTensor,
                 public StaticAttributes<ConvAttr,
                                         std::array<DimSize_t, DIM>,
                                         std::array<DimSize_t, DIM>,
-                                        DimSize_t,
-                                        DimSize_t,
                                         std::array<DimSize_t, DIM>,
                                         bool> {
 
@@ -51,24 +49,20 @@ public:
     using Attributes_ = StaticAttributes<ConvAttr,
                                         std::array<DimSize_t, DIM>,
                                         std::array<DimSize_t, DIM>,
-                                        DimSize_t,
-                                        DimSize_t,
                                         std::array<DimSize_t, DIM>,
                                         bool>;
     template <ConvAttr e>
     using attr = typename Attributes_::template attr<e>;
 
-    constexpr Conv_Op(DimSize_t inChannels,
-                      DimSize_t outChannels,
-                      const std::array<DimSize_t, DIM> &kernelDims,
+    constexpr Conv_Op(const std::array<DimSize_t, DIM> &kernelDims,
                       const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
                       const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
                       bool noBias = false)
         : OperatorTensor(Type, 1, 2, 1),
           Attributes_(attr<ConvAttr::StrideDims>(strideDims),
                       attr<ConvAttr::DilationDims>(dilationDims),
-                      attr<ConvAttr::InChannels>(inChannels),
-                      attr<ConvAttr::OutChannels>(outChannels),
+                    //   attr<ConvAttr::InChannels>(inChannels),
+                    //   attr<ConvAttr::OutChannels>(outChannels),
                       attr<ConvAttr::KernelDims>(kernelDims),
                       attr<ConvAttr::NoBias>(noBias)) {}
 
@@ -76,16 +70,7 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Conv_Op(const Conv_Op<DIM>& op)
-        : OperatorTensor(op),
-          Attributes_(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Conv_Op(const Conv_Op<DIM>& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
@@ -108,115 +93,28 @@ public:
 
     // }
 
-    bool forwardDims(bool /*allowDataDependency*/ = false) override final {
-        // check inputs have been associated
-        bool associated = true;
-        for (IOIndex_t i = 0; i < 3; ++i) {
-            if (!getInput(i)) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-            }
-            associated &= !(getInput(i)->empty());
-        }
-        if (associated) {
-            AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
-                     (getInput(0)->template dims<DIM+2>()[1] == this->template getAttr<ConvAttr::InChannels>()),
-                     "Wrong input size for Conv operator.");
-            AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)) &&
-                        (getInput(1)->template dims<DIM+2>()[1] == this->template getAttr<ConvAttr::InChannels>()) &&
-                        (getInput(1)->template dims<DIM+2>()[0] == this->template getAttr<ConvAttr::OutChannels>()),
-                        "Wrong weight size for Conv operator.");
-            if(!this->template getAttr<ConvAttr::NoBias>())
-                AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
-                        (getInput(2)->template dims<1>()[0] == this->template getAttr<ConvAttr::OutChannels>()),
-                        "Wrong bias size for Conv operator.");
-            std::array<DimSize_t, DIM + 2> outputDims{};
-            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
-
-            for (std::size_t dim = 0; dim < this->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
-                const DimSize_t kernelExtent = this->template getAttr<ConvAttr::DilationDims>()[dim] *
-                                                       (this->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
-                                               1;
-
-                outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                        floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
-                              static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim])));
-            }
-
-            outputDims[1] = this->template getAttr<ConvAttr::OutChannels>();
-            outputDims[0] = inputDims[0];
-            mOutputs[0]->resize(outputDims);
-        }
-
-        return associated;
-    }
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>>
+    std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
     computeReceptiveField(const std::vector<DimSize_t>& firstEltDims,
                           const std::vector<DimSize_t>& outputDims,
-                          const IOIndex_t outputIdx = 0) const override {
-        if (outputIdx != 0) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
-        }
-        if (firstEltDims.size() != outputDims.size()) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
-        }
-        if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
-            // Offset
-            auto inputIdxDims = firstEltDims; // batch idx is the same
-            inputIdxDims[1] = 0; // each channel is used so start with the first one
-
-            for (DimIdx_t i = 0; i < (DIM+2); ++i) {
-                if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
-                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
-                }
-            }
+                          const IOIndex_t outputIdx = 0) const override;
 
-            // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
-            // Input
-            // same batch value, every input channel is used
-            std::vector<DimSize_t> inputDims{outputDims[0], getInput(0)->dims()[1]};
-            for (DimIdx_t i = 0; i < DIM; ++i) {
-                inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
-                            * this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
-                            + 1
-                            + (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
-                            * this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
-                inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
-            }
 
-            // Weight
-            // same output value, every input channel is used
-            std::vector<DimSize_t> weightDims{outputDims[1], getInput(0)->dims()[1]};
-            for (std::size_t i = 0; i < DIM; ++i) {
-                weightDims.push_back(this->template getAttr<ConvAttr::KernelDims>()[i]);
-            }
-            std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
-            weightIdxDims[0] = firstEltDims[1];
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
-            // Result
-            std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
-            res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
-            res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
-
-            // Bias
-            if (! this->template getAttr<ConvAttr::NoBias>()){
-                const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
-                const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
-                res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
-            }
-            return res;
+    DimSize_t inChannels() const {
+        if (!getInput(1)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of input channel imposed.");
         }
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+        return getInput(1)->template dims<DIM+2>()[1];
     }
 
-
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Conv_Op<DIM>, *this, name);
-        mOutputs[0]->setBackend(name, device);
-
-        // By default, automatically set backend for weight and bias inputs
-        getInput(1)->setBackend(name, device);
-        getInput(2)->setBackend(name, device);
+    DimSize_t outChannels() const {
+        if (!getInput(1)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of input channel imposed.");
+        }
+        return getInput(1)->template dims<DIM+2>()[0];
     }
 
     static const std::vector<std::string> getInputsName(){
@@ -227,8 +125,6 @@ public:
     }
 };
 
-template <DimIdx_t DIM>
-const std::string Conv_Op<DIM>::Type = "Conv";
 
 /**
  * @brief Perform a convolution on the input Tensor.
@@ -252,7 +148,7 @@ inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
                                   bool noBias = false) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims, noBias), name);
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims, noBias), name);
     addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
     addProducer(conv, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
 
@@ -274,13 +170,13 @@ inline std::shared_ptr<Node> Conv(
 }
 }  // namespace Aidge
 
+extern template class Aidge::Conv_Op<2>;
+
 namespace {
 template <>
 const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
     "StrideDims",
     "DilationDims",
-    "InChannels",
-    "OutChannels",
     "KernelDims",
     "NoBias"
 };
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 2337ff66f00b932a190d5b1735d53df3da8ffdbf..7091421720aaf4291198823a6d7dcd732a8d9f99 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -29,7 +29,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims, NoBias };
+enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims, NoBias };
 
 template <DimIdx_t DIM>
 class ConvDepthWise_Op : public OperatorTensor,
@@ -37,7 +37,6 @@ class ConvDepthWise_Op : public OperatorTensor,
                 public StaticAttributes<ConvDepthWiseAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
-                                       DimSize_t,
                                        std::array<DimSize_t, DIM>,
                                        bool> {
 public:
@@ -48,21 +47,18 @@ public:
     using Attributes_ = StaticAttributes<ConvDepthWiseAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
-                                             DimSize_t,
                                              std::array<DimSize_t, DIM>,
                                              bool>;
     template <ConvDepthWiseAttr e>
     using attr = typename Attributes_::template attr<e>;
 
-    constexpr ConvDepthWise_Op(const DimSize_t nbChannels,
-                               const std::array<DimSize_t, DIM> &kernel_dims,
+    constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                                const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
                                bool no_bias=false)
         : OperatorTensor(Type, 1, 2, 1),
           Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
                       attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
-                      attr<ConvDepthWiseAttr::Channels>(nbChannels),
                       attr<ConvDepthWiseAttr::KernelDims>(kernel_dims),
                       attr<ConvDepthWiseAttr::NoBias>(no_bias)) {}
 
@@ -70,16 +66,7 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op)
-        : OperatorTensor(op),
-          Attributes_(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
+    ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
@@ -90,105 +77,20 @@ public:
     }
 
 
-    bool forwardDims(bool /*allowDataDependency*/ = false) override final {
-        // check inputs have been associated
-        // TODO : add a check of inputs dimensions ?
-        bool associated = true;
-        for (IOIndex_t i = 0; i < 3; ++i) {
-            if (!getInput(i)) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-            }
-            associated &= !(getInput(i)->empty());
-        }
-        if (associated) {
-            std::array<DimSize_t, DIM + 2> outputDims = {};
-            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
-
-            for (std::size_t dim = 0; dim < this->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
-                const DimSize_t kernelExtent = this->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
-                                                       (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[dim] - 1) +
-                                               1;
-
-                outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                        floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
-                              static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
-            }
-            // std::array<DimSize_t, DIM+2> weightDims = append(mInputs[0]->dims()[1],append(1, this->template getAttr<ConvDepthWiseAttr::KernelDims>()));
-            // if (mInputs[1]->empty()) {
-            //     mInputs[1]->resize(weightDims);
-            // }
-            // if (mInputs[2]->empty()) {
-            //     mInputs[2]->resize({mInputs[0]->dims()[1]});
-            // }
-            outputDims[1] = inputDims[1];
-            outputDims[0] = inputDims[0];
-            mOutputs[0]->resize(outputDims);
-        }
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-        return associated;
-    }
-
-    std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
-        if (outputIdx != 0) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
-        }
-        if (firstEltDims.size() != outputDims.size()) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
-        }
-        if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
-            // Offset
-            auto inputIdxDims = firstEltDims; // batch idx is the same
-
-            for (DimIdx_t i = 0; i < (DIM+2); ++i) {
-                if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
-                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
-                }
-            }
-
-            // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
-            // Input
-            // same batch value
-            std::vector<DimSize_t> inputDims{outputDims[0], outputDims[1]};
-            for (DimIdx_t i = 0; i < DIM; ++i) {
-                inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
-                            * this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
-                            + 1
-                            + (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
-                            * this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
-                inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
-            }
-
-            // Weight
-            std::vector<DimSize_t> weightDims{outputDims[1], 1};
-            for (std::size_t i = 0; i < DIM; ++i) {
-                weightDims.push_back(this->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]);
-            }
-            std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
-            weightIdxDims[0] = firstEltDims[1];
-
-
-            // Result
-            std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
-            res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
-            res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
-            // Bias
-            if (! this->template getAttr<ConvDepthWiseAttr::NoBias>()){
-                const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
-                const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
-                res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
-            }
-            return res;
-        }
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
-    }
+    std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
+    computeReceptiveField(const std::vector<DimSize_t>& firstEltDims,
+                          const std::vector<DimSize_t>& outputDims,
+                          const IOIndex_t outputIdx = 0) const override;
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, name);
-        mOutputs[0]->setBackend(name, device);
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
-        // By default, automatically set backend for weight and bias inputs
-        getInput(1)->setBackend(name, device);
-        getInput(2)->setBackend(name, device);
+    DimSize_t nbChannels() const {
+        if (!getInput(1)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of channel imposed.");
+        }
+        return getInput(1)->template dims<DIM+2>()[0];
     }
 
     static const std::vector<std::string> getInputsName(){
@@ -199,9 +101,6 @@ public:
     }
 };
 
-template <DimIdx_t DIM>
-const std::string ConvDepthWise_Op<DIM>::Type = "ConvDepthWise";
-
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
                                            const std::array<DimSize_t, DIM> &kernelDims,
@@ -211,7 +110,7 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
                                            bool noBias=false) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
-    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims, noBias), name);
+    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims, noBias), name);
     addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
     addProducer(convDW, 2, {(noBias ? 0 : nbChannels)}, "b");
     return convDW;
@@ -231,9 +130,11 @@ inline std::shared_ptr<Node> ConvDepthWise(
 }
 }  // namespace Aidge
 
+extern template class Aidge::ConvDepthWise_Op<2>;
+
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims", "Channels",
+const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims",
                                                           "KernelDims", "NoBias"};
 }
 
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 0f4b970d6c192ac4c90d7d6d4b8fe5bfa184845d..c1a4f1319e4e715add01417f86d17bddadb992f1 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -30,24 +30,27 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
   py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Attributes, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
-  .def(py::init<DimSize_t,
-                DimSize_t,
-                const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &,
-                bool>(),
-        py::arg("in_channels"),
-        py::arg("out_channels"),
-        py::arg("kernel_dims"),
-        py::arg("stride_dims"),
-        py::arg("dilation_dims"),
-        py::arg("no_bias"))
-    .def_static("get_inputs_name", &Conv_Op<DIM>::getInputsName)
-    .def_static("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
-    .def_static("attributes_name", &Conv_Op<DIM>::staticGetAttrsName)
-    ;
-  declare_registrable<Conv_Op<DIM>>(m, pyClassName);
+        .def(py::init([](const std::vector<DimSize_t>& kernel_dims,
+                         const std::vector<DimSize_t> &stride_dims,
+                         const std::vector<DimSize_t> &dilation_dims,
+                         bool no_bias) {
+            AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+            AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+            AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
+
+            return new Conv_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
+        }), py::arg("kernel_dims"),
+            py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+            py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
+            py::arg("no_bias") = false)
+        .def_static("get_inputs_name", &Conv_Op<DIM>::getInputsName)
+        .def_static("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
+        .def_static("attributes_name", &Conv_Op<DIM>::staticGetAttrsName)
+        .def("in_channels", &Conv_Op<DIM>::inChannels)
+        .def("out_channels", &Conv_Op<DIM>::outChannels)
+        ;
 
+  declare_registrable<Conv_Op<DIM>>(m, pyClassName);
 
   m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels,
                                                          DimSize_t out_channels,
@@ -72,8 +75,9 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
 
 
 void init_Conv(py::module &m) {
-  declare_ConvOp<1>(m);
+//   declare_ConvOp<1>(m);
   declare_ConvOp<2>(m);
-  declare_ConvOp<3>(m);
+//   declare_ConvOp<3>(m);
 }
+
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index be47f57d9bb36dc249d635bf3afb874d1df51308..c0c494e8db29c9520cc88fb1fb622cfa831fe9f3 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -31,12 +31,10 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
   py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Attributes, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
-  .def(py::init<const DimSize_t,
-                const std::array<DimSize_t, DIM> &,
+  .def(py::init<const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &,
                 bool>(),
-        py::arg("nb_channels"),
         py::arg("kernel_dims"),
         py::arg("stride_dims"),
         py::arg("dilation_dims"),
@@ -67,9 +65,9 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
 
 
 void init_ConvDepthWise(py::module &m) {
-  declare_ConvDepthWiseOp<1>(m);
+//   declare_ConvDepthWiseOp<1>(m);
   declare_ConvDepthWiseOp<2>(m);
-  declare_ConvDepthWiseOp<3>(m);
+//   declare_ConvDepthWiseOp<3>(m);
 
   // FIXME:
   // m.def("ConvDepthWise1D", static_cast<NodeAPI(*)(const char*, int, int, int const
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..99b40fcb277ce1f22c5cd3a571eaaaa4910b6ba5
--- /dev/null
+++ b/src/operator/Conv.cpp
@@ -0,0 +1,158 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Conv.hpp"
+
+#include <cmath>      // std::floor
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <utility>    // std::pair
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+template <Aidge::DimIdx_t DIM>
+const std::string Aidge::Conv_Op<DIM>::Type = "Conv";
+
+template <Aidge::DimIdx_t DIM>
+Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
+    : OperatorTensor(op),
+      Attributes_(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+template <Aidge::DimIdx_t DIM>
+bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
+    // check inputs have been associated
+    bool associated = true;
+    for (IOIndex_t i = 0; i < 3; ++i) {
+        if (!getInput(i)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
+        }
+        associated &= !(getInput(i)->empty());
+    }
+    if (associated) {
+        // first check weight since it defines inChannels and outChannels
+        AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
+                    "Wrong weight Tensor dimension: {} for Conv{}D operator.", getInput(1)->nbDims(), DIM);
+        // check data
+        AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
+                    (getInput(0)->template dims<DIM+2>()[1] == inChannels()),
+                    "Wrong input size for Conv operator.");
+        // check optional bias
+        if(!this->template getAttr<ConvAttr::NoBias>())
+            AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
+                    (getInput(2)->template dims<1>()[0] == outChannels()),
+                    "Wrong bias size for Conv operator.");
+        std::array<DimSize_t, DIM + 2> outputDims{};
+        const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
+
+        for (std::size_t dim = 0; dim < this->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
+            const DimSize_t kernelExtent = this->template getAttr<ConvAttr::DilationDims>()[dim] *
+                                                    (this->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
+                                            1;
+
+            outputDims[dim+2] = 1 + static_cast<DimSize_t>(
+                    floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
+                            static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim])));
+        }
+
+        outputDims[1] = outChannels();
+        outputDims[0] = inputDims[0];
+        mOutputs[0]->resize(outputDims);
+    }
+
+    return associated;
+}
+
+
+template <Aidge::DimIdx_t DIM>
+std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>
+Aidge::Conv_Op<DIM>::computeReceptiveField(
+                          const std::vector<Aidge::DimSize_t>& firstEltDims,
+                          const std::vector<Aidge::DimSize_t>& outputDims,
+                          const Aidge::IOIndex_t outputIdx) const
+{
+    if (outputIdx != 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
+    }
+    if (firstEltDims.size() != outputDims.size()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
+    }
+    if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
+        // Offset
+        auto inputIdxDims = firstEltDims; // batch idx is the same
+        inputIdxDims[1] = 0; // each channel is used so start with the first one
+
+        for (DimIdx_t i = 0; i < (DIM+2); ++i) {
+            if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
+            }
+        }
+
+        // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
+        // Input
+        // same batch value, every input channel is used
+        std::vector<DimSize_t> inputDims{outputDims[0], getInput(0)->dims()[1]};
+        for (DimIdx_t i = 0; i < DIM; ++i) {
+            inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
+                        * this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
+                        + 1
+                        + (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
+                        * this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
+            inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
+        }
+
+        // Weight
+        // same output value, every input channel is used
+        std::vector<DimSize_t> weightDims{outputDims[1], getInput(0)->dims()[1]};
+        for (std::size_t i = 0; i < DIM; ++i) {
+            weightDims.push_back(this->template getAttr<ConvAttr::KernelDims>()[i]);
+        }
+        std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
+        weightIdxDims[0] = firstEltDims[1];
+
+        // Result
+        std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>> res;
+        res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
+        res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
+
+        // Bias
+        if (! this->template getAttr<ConvAttr::NoBias>()){
+            const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
+            const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
+            res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
+        }
+        return res;
+    }
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+}
+
+template <Aidge::DimIdx_t DIM>
+void Aidge::Conv_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Conv_Op<DIM>, *this, name);
+    mOutputs[0]->setBackend(name, device);
+
+    // By default, automatically set backend for weight and bias inputs
+    getInput(1)->setBackend(name, device);
+    getInput(2)->setBackend(name, device);
+}
+
+template class Aidge::Conv_Op<2>;
\ No newline at end of file
diff --git a/src/operator/ConvDepthWise.cpp b/src/operator/ConvDepthWise.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..12aa0818b244ef0f3195de49467a464e057f2c73
--- /dev/null
+++ b/src/operator/ConvDepthWise.cpp
@@ -0,0 +1,158 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ConvDepthWise.hpp"
+
+#include <array>
+#include <cmath>      // std::floor
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <utility>    // std::pair
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+template <Aidge::DimIdx_t DIM>
+const std::string Aidge::ConvDepthWise_Op<DIM>::Type = "ConvDepthWise";
+
+template <Aidge::DimIdx_t DIM>
+Aidge::ConvDepthWise_Op<DIM>::ConvDepthWise_Op(const Aidge::ConvDepthWise_Op<DIM>& op)
+    : OperatorTensor(op),
+      Attributes_(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+template <Aidge::DimIdx_t DIM>
+bool Aidge::ConvDepthWise_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
+    // check inputs have been associated
+    // TODO : add a check of inputs dimensions ?
+    bool associated = true;
+    for (IOIndex_t i = 0; i < 3; ++i) {
+        if (!getInput(i)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
+        }
+        associated &= !(getInput(i)->empty());
+    }
+    if (associated) {
+        // first check weight since it defines nbChannels
+        AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
+                    "Wrong weight Tensor dimension: {} for Conv{}D operator.", getInput(1)->nbDims(), DIM);
+        // check data
+        AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
+                    (getInput(0)->template dims<DIM+2>()[1] == nbChannels()),
+                    "Wrong input size for Conv operator.");
+        // check optional bias
+        if(!this->template getAttr<ConvDepthWiseAttr::NoBias>())
+            AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
+                    (getInput(2)->template dims<1>()[0] == nbChannels()),
+                    "Wrong bias size for Conv operator.");
+        std::array<DimSize_t, DIM + 2> outputDims = {};
+        const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
+
+        for (std::size_t dim = 0; dim < this->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
+            const DimSize_t kernelExtent = this->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
+                                                    (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[dim] - 1) +
+                                            1;
+
+            outputDims[dim+2] = 1 + static_cast<DimSize_t>(
+                    floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
+                            static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
+        }
+
+        outputDims[1] = inputDims[1];
+        outputDims[0] = inputDims[0];
+        mOutputs[0]->resize(outputDims);
+    }
+
+    return associated;
+}
+
+
+template <Aidge::DimIdx_t DIM>
+std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>
+Aidge::ConvDepthWise_Op<DIM>::computeReceptiveField(
+                          const std::vector<Aidge::DimSize_t>& firstEltDims,
+                          const std::vector<Aidge::DimSize_t>& outputDims,
+                          const Aidge::IOIndex_t outputIdx) const
+{
+    if (outputIdx != 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
+    }
+    if (firstEltDims.size() != outputDims.size()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
+    }
+    if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
+        // Offset
+        auto inputIdxDims = firstEltDims; // batch idx is the same
+
+        for (DimIdx_t i = 0; i < (DIM+2); ++i) {
+            if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
+            }
+        }
+
+        // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
+        // Input
+        // same batch value
+        std::vector<DimSize_t> inputDims{outputDims[0], outputDims[1]};
+        for (DimIdx_t i = 0; i < DIM; ++i) {
+            inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
+                        * this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
+                        + 1
+                        + (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
+                        * this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
+            inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
+        }
+
+        // Weight
+        std::vector<DimSize_t> weightDims{outputDims[1], 1};
+        for (std::size_t i = 0; i < DIM; ++i) {
+            weightDims.push_back(this->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]);
+        }
+        std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
+        weightIdxDims[0] = firstEltDims[1];
+
+
+        // Result
+        std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>> res;
+        res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
+        res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
+        // Bias
+        if (! this->template getAttr<ConvDepthWiseAttr::NoBias>()){
+            const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
+            const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
+            res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
+        }
+        return res;
+    }
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+}
+
+template <Aidge::DimIdx_t DIM>
+void Aidge::ConvDepthWise_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, name);
+    mOutputs[0]->setBackend(name, device);
+
+    // By default, automatically set backend for weight and bias inputs
+    getInput(1)->setBackend(name, device);
+    getInput(2)->setBackend(name, device);
+}
+
+template class Aidge::ConvDepthWise_Op<2>;
\ No newline at end of file