From 482557ca88802121bde800ea6dbe104ff703c339 Mon Sep 17 00:00:00 2001
From: NAUD Maxence <maxence.naud@cea.fr>
Date: Mon, 10 Mar 2025 15:45:28 +0000
Subject: [PATCH] Move code from header to source file

---
 include/aidge/analysis/DynamicAnalysis.hpp |   7 +-
 include/aidge/analysis/OperatorStats.hpp   | 300 +++++++++-----------
 include/aidge/data/Data.hpp                |  23 +-
 include/aidge/data/DataType.hpp            |  11 +-
 include/aidge/data/Elts.hpp                |  80 ++----
 include/aidge/data/Tensor.hpp              | 140 +++-------
 include/aidge/graph/Connector.hpp          |  35 +--
 include/aidge/operator/Abs.hpp             |  19 +-
 include/aidge/operator/And.hpp             |  25 +-
 include/aidge/operator/ArgMax.hpp          |   8 +-
 include/aidge/operator/AvgPooling.hpp      |  11 +-
 include/aidge/operator/BatchNorm.hpp       |  14 +-
 include/aidge/operator/BitShift.hpp        |  22 +-
 include/aidge/operator/Clip.hpp            |  15 +-
 include/aidge/operator/ConstantOfShape.hpp |  21 +-
 src/analysis/DynamicAnalysis.cpp           |  12 +-
 src/analysis/OperatorStats.cpp             | 306 +++++++++++++++++++--
 src/analysis/StaticAnalysis.cpp            |   2 +
 src/data/Data.cpp                          |  35 +++
 src/data/Elts.cpp                          |  91 ++++++
 src/data/Tensor.cpp                        |  77 +++++-
 src/graph/Connector.cpp                    |  63 +++--
 src/operator/Abs.cpp                       |  28 +-
 src/operator/And.cpp                       |  24 ++
 src/operator/ArgMax.cpp                    |   8 +
 src/operator/AvgPooling.cpp                |  13 +
 src/operator/BatchNorm.cpp                 |  16 ++
 src/operator/BitShift.cpp                  |  36 ++-
 src/operator/Clip.cpp                      |  37 ++-
 src/operator/ConstantOfShape.cpp           |  23 +-
 unit_tests/data/Test_Spikegen.cpp          |  31 ++-
 31 files changed, 982 insertions(+), 551 deletions(-)
 create mode 100644 src/data/Data.cpp
 create mode 100644 src/data/Elts.cpp

diff --git a/include/aidge/analysis/DynamicAnalysis.hpp b/include/aidge/analysis/DynamicAnalysis.hpp
index 3dadf79b3..2052f9633 100644
--- a/include/aidge/analysis/DynamicAnalysis.hpp
+++ b/include/aidge/analysis/DynamicAnalysis.hpp
@@ -15,14 +15,9 @@
 
 #include <cstddef>  // std::size_t
 #include <memory>
-#include <string>
 
 #include "aidge/analysis/OperatorStats.hpp"
-#include "aidge/data/Tensor.hpp"
-#include "aidge/graph/GraphView.hpp"
-#include "aidge/operator/Operator.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/utils/Registrar.hpp"
+#include "aidge/scheduler/Scheduler.hpp"
 
 namespace Aidge {
 /**
diff --git a/include/aidge/analysis/OperatorStats.hpp b/include/aidge/analysis/OperatorStats.hpp
index ac1abcee7..c9f2bb7c2 100644
--- a/include/aidge/analysis/OperatorStats.hpp
+++ b/include/aidge/analysis/OperatorStats.hpp
@@ -14,27 +14,20 @@
 #define AIDGE_CORE_ANALYSIS_OPERATORSTATS_H_
 
 #include <cstddef>  // std::size_t
+#include <functional>
 #include <memory>
 #include <string>
 
-#include "aidge/data/Tensor.hpp"
-#include "aidge/graph/GraphView.hpp"
-#include "aidge/operator/AvgPooling.hpp"
-#include "aidge/operator/Producer.hpp"
-#include "aidge/operator/Conv.hpp"
-#include "aidge/operator/ConvDepthWise.hpp"
-#include "aidge/operator/FC.hpp"
-#include "aidge/operator/MatMul.hpp"
-#include "aidge/operator/MaxPooling.hpp"
 #include "aidge/operator/Operator.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/operator/ReduceMean.hpp"
-#include "aidge/operator/ReduceSum.hpp"
-#include "aidge/operator/Softmax.hpp"
-#include "aidge/operator/MetaOperator.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
+
+template <std::uint8_t DIM> class AvgPooling_Op;
+template <std::uint8_t DIM> class Conv_Op;
+template <std::uint8_t DIM> class ConvDepthWise_Op;
+template <std::uint8_t DIM> class MaxPooling_Op;
+
 /**
  * @brief Base class to compute statistics from an Operator.
  *
@@ -165,11 +158,12 @@ protected:
 };
 
 ////////////////////////////////////////////////////////////////////////////////
+// MetaOpStats
 
 class MetaOpStats : public OperatorStats {
 public:
     MetaOpStats() = delete;
-    MetaOpStats(const Operator& op) : OperatorStats(op) {}
+    MetaOpStats(const Operator& op);
 
     ~MetaOpStats();
 
@@ -185,264 +179,236 @@ public:
     std::size_t getNbMACOps() const override;
 };
 
+////////////////////////////////////////////////////////////////////////////////
+// ConvStats
+
 template <class OP>
 class ConvStats : public OperatorStats {
 public:
-    ConvStats(const Operator& op) : OperatorStats(op) {}
+    ConvStats() = delete;
+    ConvStats(const Operator& op);
+
+    ~ConvStats();
 
     static std::unique_ptr<ConvStats<OP>> create(const Operator& op) {
         return std::make_unique<ConvStats<OP>>(op);
     }
 
-    std::size_t getNbMACOps() const override {
-        const OP& op_ = dynamic_cast<const OP&>(mOp);
-        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-	    const std::size_t weightsSize = op_.getInput(1)->size();
-        const std::size_t outputSize
-            = std::accumulate(op_.getOutput(0)->dims().cbegin() + 2,
-                              op_.getOutput(0)->dims().cend(),
-                              1,
-                              std::multiplies<std::size_t>()); // NCHW...
-        const std::size_t batchSize = op_.getInput(0)->dims()[0]; // NCHW
-        return batchSize * (weightsSize * outputSize);
-    }
+    std::size_t getNbMACOps() const override;
 };
 
+extern template class ConvStats<Conv_Op<1>>;
+extern template class ConvStats<ConvDepthWise_Op<1>>;
+extern template class ConvStats<Conv_Op<2>>;
+extern template class ConvStats<ConvDepthWise_Op<2>>;
+
 // Beware: cannot use Conv_Op<2>::Type as key because static variable initialization order is undefined!
 REGISTRAR(OperatorStats, "Conv1D", ConvStats<Conv_Op<1>>::create);
 REGISTRAR(OperatorStats, "ConvDepthWise1D", ConvStats<ConvDepthWise_Op<1>>::create);
 REGISTRAR(OperatorStats, "Conv2D", ConvStats<Conv_Op<2>>::create);
 REGISTRAR(OperatorStats, "ConvDepthWise2D", ConvStats<ConvDepthWise_Op<2>>::create);
 
+////////////////////////////////////////////////////////////////////////////////
+// MaxPoolingStats
+
 template <class OP>
 class MaxPoolingStats : public OperatorStats {
 public:
-    MaxPoolingStats(const Operator& op) : OperatorStats(op) {}
+    MaxPoolingStats() = delete;
+    MaxPoolingStats(const Operator& op);
+
+    ~MaxPoolingStats();
 
     static std::unique_ptr<MaxPoolingStats<OP>> create(const Operator& op) {
         return std::make_unique<MaxPoolingStats<OP>>(op);
     }
 
-    std::size_t getNbCompOps() const override {
-        const OP& op_ = dynamic_cast<const OP&>(mOp);
-        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-	    const std::size_t poolSize
-            = std::accumulate(op_.kernelDims().cbegin(),
-                              op_.kernelDims().cend(),
-                              1,
-                              std::multiplies<std::size_t>());
-        const std::size_t outputSize
-            = std::accumulate(op_.getOutput(0)->dims().cbegin() + 2,
-                              op_.getOutput(0)->dims().cend(),
-                              1,
-                              std::multiplies<std::size_t>()); // NCHW...
-        const std::size_t batchSize = op_.getInput(0)->dims()[0]; // NCHW
-        return batchSize * ((poolSize - 1) * outputSize);
-    }
+    std::size_t getNbCompOps() const override;
 };
 
+extern template class MaxPoolingStats<MaxPooling_Op<1>>;
+extern template class MaxPoolingStats<MaxPooling_Op<2>>;
+extern template class MaxPoolingStats<MaxPooling_Op<3>>;
+
 REGISTRAR(OperatorStats, "MaxPooling1D", MaxPoolingStats<MaxPooling_Op<1>>::create);
 REGISTRAR(OperatorStats, "MaxPooling2D", MaxPoolingStats<MaxPooling_Op<2>>::create);
 REGISTRAR(OperatorStats, "MaxPooling3D", MaxPoolingStats<MaxPooling_Op<3>>::create);
 
+////////////////////////////////////////////////////////////////////////////////
+// AvgPoolingStats
+
 template <class OP>
 class AvgPoolingStats : public OperatorStats {
 public:
-    AvgPoolingStats(const Operator& op) : OperatorStats(op) {}
+    AvgPoolingStats() = delete;
+    AvgPoolingStats(const Operator& op);
+
+    ~AvgPoolingStats();
 
     static std::unique_ptr<AvgPoolingStats<OP>> create(const Operator& op) {
         return std::make_unique<AvgPoolingStats<OP>>(op);
     }
 
-    std::size_t getNbArithmOps() const override {
-        const OP& op_ = dynamic_cast<const OP&>(mOp);
-        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-	    const std::size_t poolSize
-            = std::accumulate(op_.kernelDims().cbegin(),
-                              op_.kernelDims().cend(),
-                              1,
-                              std::multiplies<std::size_t>());
-        const std::size_t outputSize
-            = std::accumulate(op_.getOutput(0)->dims().cbegin() + 2,
-                              op_.getOutput(0)->dims().cend(),
-                              1,
-                              std::multiplies<std::size_t>()); // NCHW...
-        const std::size_t batchSize = op_.getInput(0)->dims()[0]; // NCHW
-        // (poolSize - 1) additions + 1 division for each output
-        return batchSize * (poolSize * outputSize);
-    }
+    std::size_t getNbArithmOps() const override;
 };
 
+extern template class AvgPoolingStats<AvgPooling_Op<1>>;
+extern template class AvgPoolingStats<AvgPooling_Op<2>>;
+extern template class AvgPoolingStats<AvgPooling_Op<3>>;
+extern template class AvgPoolingStats<AvgPooling_Op<4>>;
+
 REGISTRAR(OperatorStats, "AvgPooling1D", AvgPoolingStats<AvgPooling_Op<1>>::create);
 REGISTRAR(OperatorStats, "AvgPooling2D", AvgPoolingStats<AvgPooling_Op<2>>::create);
 REGISTRAR(OperatorStats, "AvgPooling3D", AvgPoolingStats<AvgPooling_Op<3>>::create);
 REGISTRAR(OperatorStats, "AvgPooling4D", AvgPoolingStats<AvgPooling_Op<4>>::create);
 
+////////////////////////////////////////////////////////////////////////////////
+// FCStats
+
 class FCStats : public OperatorStats {
 public:
-    FCStats(const Operator& op) : OperatorStats(op) {}
+    FCStats() = delete;
+    FCStats(const Operator& op);
+
+    ~FCStats();
 
     static std::unique_ptr<FCStats> create(const Operator& op) {
         return std::make_unique<FCStats>(op);
     }
 
-    std::size_t getNbMACOps() const override {
-        const FC_Op& op_ = dynamic_cast<const FC_Op&>(mOp);
-        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-	    const std::size_t weightsSize = op_.getInput(1)->size();
-        const std::size_t batchSize = op_.getInput(0)->dims()[0]; // NCHW
-        return batchSize * weightsSize;
-    }
+    std::size_t getNbMACOps() const override;
 };
 
 REGISTRAR(OperatorStats, "FC", FCStats::create);
 
+////////////////////////////////////////////////////////////////////////////////
+// MatMulStats
+
 class MatMulStats : public OperatorStats {
 public:
-    MatMulStats(const Operator& op) : OperatorStats(op) {}
+    MatMulStats() = delete;
+    MatMulStats(const Operator& op);
+
+    ~MatMulStats();
 
     static std::unique_ptr<MatMulStats> create(const Operator& op) {
         return std::make_unique<MatMulStats>(op);
     }
 
-    std::size_t getNbMACOps() const override {
-        const MatMul_Op& op_ = dynamic_cast<const MatMul_Op&>(mOp);
-        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-        const std::size_t n = (op_.getInput(0)->dims().size() > 1)
-            ? op_.getInput(0)->dims().end()[-2] : 1;
-        const std::size_t k = op_.getInput(0)->dims().back();
-        const std::size_t m = (op_.getInput(1)->dims().size() > 1)
-            ? op_.getInput(1)->dims().back() : 1;
-        const std::size_t nb = (op_.getInput(0)->dims().size() > 2)
-            ? std::accumulate(op_.getInput(0)->dims().cbegin(),
-                              op_.getInput(0)->dims().cend() - 2,
-                              1,
-                              std::multiplies<std::size_t>())
-            : 1;
-
-        return nb * n * m * k;
-    }
+    std::size_t getNbMACOps() const override;
 };
 
 REGISTRAR(OperatorStats, "MatMul", MatMulStats::create);
 
+////////////////////////////////////////////////////////////////////////////////
+// ReLUStats
+
 class ReLUStats : public OperatorStats {
 public:
-    ReLUStats(const Operator& op) : OperatorStats(op) {}
+    ReLUStats() = delete;
+    ReLUStats(const Operator& op);
+
+    ~ReLUStats();
 
     static std::unique_ptr<ReLUStats> create(const Operator& op) {
         return std::make_unique<ReLUStats>(op);
     }
 
-    std::size_t getNbCompOps() const override {
-        const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
-        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-        return op_.getOutput(0)->size();
-    }
+    std::size_t getNbCompOps() const override;
 };
 
 REGISTRAR(OperatorStats, "ReLU", ReLUStats::create);
 
+////////////////////////////////////////////////////////////////////////////////
+// AbsStats
+
 class AbsStats : public OperatorStats {
 public:
-    AbsStats(const Operator& op) : OperatorStats(op) {}
+    AbsStats() = delete;
+    AbsStats(const Operator& op);
+
+    ~AbsStats();
 
     static std::unique_ptr<AbsStats> create(const Operator& op) {
         return std::make_unique<AbsStats>(op);
     }
 
-    std::size_t getNbCompOps() const override {
-        const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
-        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-        return op_.getOutput(0)->size();
-    }
+    std::size_t getNbCompOps() const override;
 
     // This is in the worst case (all values are negative)
-    std::size_t getNbArithmOps() const override {
-        const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
-        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-        return op_.getOutput(0)->size();
-    }
+    std::size_t getNbArithmOps() const override;
 };
 
 REGISTRAR(OperatorStats, "Abs", AbsStats::create);
 
+////////////////////////////////////////////////////////////////////////////////
+// ReduceMeanStats
+
 class ReduceMeanStats : public OperatorStats {
 public:
-    ReduceMeanStats(const Operator& op) : OperatorStats(op) {}
+    ReduceMeanStats() = delete;
+    ReduceMeanStats(const Operator& op);
+
+    ~ReduceMeanStats();
 
     static std::unique_ptr<ReduceMeanStats> create(const Operator& op) {
         return std::make_unique<ReduceMeanStats>(op);
     }
 
-    std::size_t getNbArithmOps() const override {
-        const ReduceMean_Op& op_ = dynamic_cast<const ReduceMean_Op&>(mOp);
-        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-        const std::size_t nbIn = op_.getInput(0)->size();
-        const std::size_t nbOut = op_.getOutput(0)->size();
-        const std::size_t nbReduce = nbIn / nbOut;
-        // (nbReduce - 1) additions + 1 division for each output
-        return nbOut * nbReduce;
-    }
+    std::size_t getNbArithmOps() const override;
 };
 
 REGISTRAR(OperatorStats, "ReduceMean", ReduceMeanStats::create);
 
+////////////////////////////////////////////////////////////////////////////////
+// ReduceSumStats
+
 class ReduceSumStats : public OperatorStats {
 public:
-    ReduceSumStats(const Operator& op) : OperatorStats(op) {}
+    ReduceSumStats() = delete;
+    ReduceSumStats(const Operator& op);
+
+    ~ReduceSumStats();
 
     static std::unique_ptr<ReduceSumStats> create(const Operator& op) {
         return std::make_unique<ReduceSumStats>(op);
     }
 
-    std::size_t getNbArithmOps() const override {
-        const ReduceSum_Op& op_ = dynamic_cast<const ReduceSum_Op&>(mOp);
-        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-        const std::size_t nbIn = op_.getInput(0)->size();
-        const std::size_t nbOut = op_.getOutput(0)->size();
-        const std::size_t nbReduce = nbIn / nbOut;
-        // (nbReduce - 1) additions for each output
-        return nbOut * (nbReduce - 1);
-    }
+    std::size_t getNbArithmOps() const override;
 };
 
 REGISTRAR(OperatorStats, "ReduceSum", ReduceSumStats::create);
 
+////////////////////////////////////////////////////////////////////////////////
+// SoftmaxStats
+
 class SoftmaxStats : public OperatorStats {
 public:
-    SoftmaxStats(const Operator& op) : OperatorStats(op) {}
+    SoftmaxStats() = delete;
+    SoftmaxStats(const Operator& op);
+
+    ~SoftmaxStats();
 
     static std::unique_ptr<SoftmaxStats> create(const Operator& op) {
         return std::make_unique<SoftmaxStats>(op);
     }
 
-    std::size_t getNbArithmOps() const override {
-        const Softmax_Op& op_ = dynamic_cast<const Softmax_Op&>(mOp);
-        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-        const std::size_t axis = (op_.axis() >= 0) ? op_.axis() : op_.getInput(0)->nbDims() + op_.axis();
-        const std::size_t nbReduce = op_.getInput(0)->dims()[axis];
-        const std::size_t nbOut = op_.getOutput(0)->size();
-        // nbOut divisions + (nbReduce - 1) additions
-        return nbOut + (nbReduce - 1);
-    }
+    std::size_t getNbArithmOps() const override;
 
-    std::size_t getNbNLOps() const override {
-        const Softmax_Op& op_ = dynamic_cast<const Softmax_Op&>(mOp);
-        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-        const std::size_t axis = (op_.axis() >= 0) ? op_.axis() : op_.getInput(0)->nbDims() + op_.axis();
-        const std::size_t nbReduce = op_.getInput(0)->dims()[axis];
-        const std::size_t nbOut = op_.getOutput(0)->size();
-        // nbOut exp + nbReduce exp
-        return nbOut + nbReduce;
-    }
+    std::size_t getNbNLOps() const override;
 };
 
 REGISTRAR(OperatorStats, "Softmax", SoftmaxStats::create);
 
+////////////////////////////////////////////////////////////////////////////////
+// MemOpStats
+
 class MemOpStats : public OperatorStats {
 public:
-    MemOpStats(const Operator& op) : OperatorStats(op) {}
+    MemOpStats() = delete;
+    MemOpStats(const Operator& op);
+
+    ~MemOpStats();
 
     static std::unique_ptr<MemOpStats> create(const Operator& op) {
         return std::make_unique<MemOpStats>(op);
@@ -459,19 +425,21 @@ REGISTRAR(OperatorStats, "Unsqueeze", MemOpStats::create);
 REGISTRAR(OperatorStats, "Gather", MemOpStats::create);
 REGISTRAR(OperatorStats, "Identity", MemOpStats::create);
 
+////////////////////////////////////////////////////////////////////////////////
+// ElemWiseOpStats
+
 class ElemWiseOpStats : public OperatorStats {
 public:
-    ElemWiseOpStats(const Operator& op) : OperatorStats(op) {}
+    ElemWiseOpStats() = delete;
+    ElemWiseOpStats(const Operator& op);
+
+    ~ElemWiseOpStats();
 
     static std::unique_ptr<ElemWiseOpStats> create(const Operator& op) {
         return std::make_unique<ElemWiseOpStats>(op);
     }
 
-    std::size_t getNbArithmOps() const override {
-        const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
-        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-        return op_.getOutput(0)->size();
-    }
+    std::size_t getNbArithmOps() const override;
 };
 
 REGISTRAR(OperatorStats, "Add", ElemWiseOpStats::create);
@@ -479,36 +447,40 @@ REGISTRAR(OperatorStats, "Sub", ElemWiseOpStats::create);
 REGISTRAR(OperatorStats, "Mul", ElemWiseOpStats::create);
 REGISTRAR(OperatorStats, "Div", ElemWiseOpStats::create);
 
+////////////////////////////////////////////////////////////////////////////////
+// ElemWiseLogicOpStats
+
 class ElemWiseLogicOpStats : public OperatorStats {
 public:
-    ElemWiseLogicOpStats(const Operator& op) : OperatorStats(op) {}
+    ElemWiseLogicOpStats() = delete;
+    ElemWiseLogicOpStats(const Operator& op);
+
+    ~ElemWiseLogicOpStats();
 
     static std::unique_ptr<ElemWiseLogicOpStats> create(const Operator& op) {
         return std::make_unique<ElemWiseLogicOpStats>(op);
     }
 
-    std::size_t getNbArithmOps() const override {
-        const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
-        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-        return op_.getOutput(0)->size();
-    }
+    std::size_t getNbArithmOps() const override;
 };
 
 REGISTRAR(OperatorStats, "And", ElemWiseLogicOpStats::create);
 
+////////////////////////////////////////////////////////////////////////////////
+// ElemWiseNLOpStats
+
 class ElemWiseNLOpStats : public OperatorStats {
 public:
-    ElemWiseNLOpStats(const Operator& op) : OperatorStats(op) {}
+    ElemWiseNLOpStats() = delete;
+    ElemWiseNLOpStats(const Operator& op);
+
+    ~ElemWiseNLOpStats();
 
     static std::unique_ptr<ElemWiseNLOpStats> create(const Operator& op) {
         return std::make_unique<ElemWiseNLOpStats>(op);
     }
 
-    std::size_t getNbNLOps() const override {
-        const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
-        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-        return op_.getOutput(0)->size();
-    }
+    std::size_t getNbNLOps() const override;
 };
 
 REGISTRAR(OperatorStats, "Atan", ElemWiseNLOpStats::create);
diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index fac8e7fb4..1ca66b2b3 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -22,22 +22,19 @@ namespace Aidge {
 class Data {
 public:
     Data() = delete;
-    Data(Data&& other) = default;
-    Data(const Data& other) = default;
-    Data(const std::string& type): mType(type) {};
-
-    Data& operator=(const Data& other) {
-        AIDGE_ASSERT(other.mType == mType, "Cannot copy a different type fo Data object.");
-        return *this;
-    };
-    Data& operator=(Data&& other) {
-        AIDGE_ASSERT(other.mType == mType, "Cannot copy a different type fo Data object.");
-        return *this;
-    };
+    Data(Data&& other);
+    Data(const Data& other);
+    Data(const std::string& type);
+
+    virtual ~Data();
+
+    Data& operator=(const Data& other);
+
+    Data& operator=(Data&& other);
+
     constexpr const std::string& type() const {
         return mType;
     }
-    virtual ~Data() = default;
     virtual std::string toString(int precision = -1, std::size_t offset = 0) const = 0;
 
 private:
diff --git a/include/aidge/data/DataType.hpp b/include/aidge/data/DataType.hpp
index 9963534d2..7b9637f16 100644
--- a/include/aidge/data/DataType.hpp
+++ b/include/aidge/data/DataType.hpp
@@ -172,12 +172,15 @@ std::size_t getDataTypeBitWidth(const DataType& type);
 
 namespace {
 template <>
+struct EnumStrings<Aidge::DataType> {
+    static const char* const data[];
+};
 const char* const EnumStrings<Aidge::DataType>::data[]
     = {"Float64", "Float32", "Float16", "BFloat16", "Binary", "Octo_Binary",
-       "Ternary", "Int2", "Quad_Int2", "UInt2", "Quad_UInt2", "Int3",
-       "Dual_Int3", "UInt3", "Dual_UInt3", "Int4", "Dual_Int4", "UInt4",
-       "Dual_UInt4", "Int5", "Int6", "Int7", "Int8", "Int16", "Int32", "Int64",
-       "UInt5", "UInt6", "UInt7", "UInt8", "UInt16", "UInt32", "UInt64", "Any"};
+        "Ternary", "Int2", "Quad_Int2", "UInt2", "Quad_UInt2", "Int3",
+        "Dual_Int3", "UInt3", "Dual_UInt3", "Int4", "Dual_Int4", "UInt4",
+        "Dual_UInt4", "Int5", "Int6", "Int7", "Int8", "Int16", "Int32", "Int64",
+        "UInt5", "UInt6", "UInt7", "UInt8", "UInt16", "UInt32", "UInt64", "Any"};
 }
 
 #endif /* AIDGE_CORE_DATA_DATATYPE_H_ */
diff --git a/include/aidge/data/Elts.hpp b/include/aidge/data/Elts.hpp
index 4bfca706e..e9810c124 100644
--- a/include/aidge/data/Elts.hpp
+++ b/include/aidge/data/Elts.hpp
@@ -12,7 +12,8 @@
 #ifndef AIDGE_ELTS_H_
 #define AIDGE_ELTS_H_
 
-#include "aidge/utils/ErrorHandling.hpp"
+#include <fmt/core.h>
+
 #include "aidge/utils/Types.h"
 #include "aidge/utils/logger/EnumString.hpp"
 
@@ -23,81 +24,34 @@ namespace Aidge {
  * with precise data (bytes) or with tokens.
 */
 struct Elts_t {
+    // Define the type of element
     enum EltType {
         Data,
         Token,
         Undef
     };
 
-    NbElts_t data;
-    NbElts_t token;
-    EltType type;
-
-    // Addition operator
-    inline Elts_t operator+(const Elts_t& other) const {
-        AIDGE_ASSERT(type == other.type || other.type == Undef || type == Undef,
-            "Incompatible C-P model types: {} + {}. Data and Token cannot be mixed.", type, other.type);
-        return Elts_t(data + other.data, token + other.token, (other.type == Undef) ? type : other.type);
-    }
+    NbElts_t data;  ///< Amount of data (e.g. bytes)
+    NbElts_t token; ///< Count of tokens
+    EltType type;   ///< The kind of element
 
-    // Addition assignment operator
-    inline Elts_t& operator+=(const Elts_t& other) {
-        AIDGE_ASSERT(type == other.type || other.type == Undef || type == Undef,
-            "Incompatible C-P model types: {} += {}. Data and Token cannot be mixed.", type, other.type);
-        data += other.data;
-        token += other.token;
-        type = (other.type == Undef) ? type : other.type;
-        return *this;
-    }
+    // Arithmetic operators
+    Elts_t operator+(const Elts_t& other) const;
+    Elts_t& operator+=(const Elts_t& other);
 
     // Comparison operators
-    inline bool operator<(const Elts_t& other) const {
-        if (type == Elts_t::Undef || type == Elts_t::Token) {
-            // Nothing, or only a token is required: don't care about how much data has been produced for the token
-            return (token < other.token);
-        }
-        else if (type == Elts_t::Data && other.type != Elts_t::Token) {
-            // A precise amount of data is required, so the amount of produced data must be specified, a token is not enough
-            return (data < other.data);
-        }
-        else {
-            AIDGE_THROW_OR_ABORT(std::runtime_error,
-                "Incompatible C-P model types: {} < {}. Data is expected for right-hand side.", type, other.type);
-        }
-    }
-
-    inline bool operator>(const Elts_t& other) const {
-        if (type == Elts_t::Undef || type == Elts_t::Token) {
-            // Nothing, or only a token is required: don't care about how much data has been produced for the token
-            return (token > other.token);
-        }
-        else if (type == Elts_t::Data && other.type != Elts_t::Token) {
-            // A precise amount of data is required, so the amount of produced data must be specified, a token is not enough
-            return (data > other.data);
-        }
-        else {
-            AIDGE_THROW_OR_ABORT(std::runtime_error,
-                "Incompatible C-P model types: {} > {}. Data is expected for right-hand side.", type, other.type);
-        }
-    }
-
-    inline static Elts_t NoneElts() {
-        return Elts_t(0, 0, Elts_t::Undef);
-    }
+    bool operator<(const Elts_t& other) const;
+    bool operator>(const Elts_t& other) const;
 
-    inline static Elts_t DataElts(NbElts_t data, NbElts_t token = 1) {
-        return Elts_t(data, token, Elts_t::Data);
-    }
-
-    inline static Elts_t TokenElts(NbElts_t token) {
-        return Elts_t(0, token, Elts_t::Token);
-    }
+    // Factory methods to create Elts_t objects
+    static Elts_t NoneElts();
+    static Elts_t DataElts(NbElts_t data, NbElts_t token = 1);
+    static Elts_t TokenElts(NbElts_t token);
 
 private:
-    inline Elts_t(NbElts_t data_, NbElts_t token_, EltType type_):
-        data(data_), token(token_), type(type_) {}
+    Elts_t(NbElts_t data_, NbElts_t token_, EltType type_);
 };
-} // end namespace Aidge
+} // namespace Aidge
 
 template<>
 struct fmt::formatter<Aidge::Elts_t> {
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 76295270e..09c69d64b 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -12,12 +12,12 @@
 #ifndef AIDGE_CORE_DATA_TENSOR_H_
 #define AIDGE_CORE_DATA_TENSOR_H_
 
-#include <algorithm>
-#include <cstddef>      // std::size_t #include <cstring> #include <functional>   // std::multiplies
-#include <set>
+#include <cstddef>      // std::size_t
+#include <functional>   // std::function
 #include <memory>
-#include <numeric>      // std::accumulate
+#include <set>
 #include <string>
+#include <tuple>
 #include <type_traits>  // std::is_arithmetic
 #include <vector>
 
@@ -63,16 +63,7 @@ class Tensor : public Data,
      * It is considered undefined, i.e. dims can't be forwarded from such a Tensor.
      * @ref undefined() method for details
      */
-    Tensor(DataType dtype = DataType::Float32, DataFormat dformat = DataFormat::Default)
-        : Data(Type),
-          mDataType(dtype),
-          mDataFormat(dformat),
-          mDims(std::vector<DimSize_t>({})),
-          mStrides({1}),
-          mSize(0)
-    {
-        // ctor
-    }
+    Tensor(DataType dtype = DataType::Float32, DataFormat dformat = DataFormat::Default);
 
     /**
      * @brief Construct a new Tensor object from an arithmetic parameter.
@@ -85,12 +76,12 @@ class Tensor : public Data,
              typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
     Tensor(T val)
         : Data(Type),
-          mDataType(NativeType_v<VT>),
-          mDataFormat(DataFormat::Default),
-          mDims({}),
-          mStrides({1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<VT>})(0, std::vector<std::size_t>())),
-          mSize(1)
+        mDataType(NativeType_v<VT>),
+        mDataFormat(DataFormat::Default),
+        mDims({}),
+        mStrides({1}),
+        mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<VT>})(0, std::vector<std::size_t>())),
+        mSize(1)
     {
         *static_cast<VT*>(mImpl->rawPtr()) = static_cast<VT>(val);
     }
@@ -100,12 +91,7 @@ class Tensor : public Data,
      *
      * @param dims dimensions of the tensor
      */
-    Tensor(const std::vector<DimSize_t>& dims)
-        : Data(Type)
-    {
-        // set mDims, mStrides, mContiguous, mSize
-        resize(dims);
-    }
+    Tensor(const std::vector<DimSize_t>& dims);
 
     /**
      * @brief Construct a new Tensor object from the 1-dimension Vector helper.
@@ -114,11 +100,11 @@ class Tensor : public Data,
     template <typename T>
     Tensor(Vector<T> &&arr)
         : Data(Type),
-          mDataType(NativeType_v<T>),
-          mDims({arr.data.size()}),
-          mStrides({1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {arr.data.size()})),
-          mSize(arr.data.size())
+        mDataType(NativeType_v<T>),
+        mDims({arr.data.size()}),
+        mStrides({1}),
+        mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {arr.data.size()})),
+        mSize(arr.data.size())
     {
         mImpl->copyFromHost(&arr.data[0], arr.data.size());
     }
@@ -131,12 +117,12 @@ class Tensor : public Data,
     template <typename T, std::size_t SIZE_0>
     constexpr Tensor(Array1D<T, SIZE_0> &&arr)
         : Data(Type),
-          mDataType(NativeType_v<T>),
-          mDataFormat(DataFormat::Default),
-          mDims({SIZE_0}),
-          mStrides({1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0})),
-          mSize(SIZE_0)
+        mDataType(NativeType_v<T>),
+        mDataFormat(DataFormat::Default),
+        mDims({SIZE_0}),
+        mStrides({1}),
+        mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0})),
+        mSize(SIZE_0)
     {
         mImpl->copyFromHost(&arr.data[0], SIZE_0);
     }
@@ -155,7 +141,8 @@ class Tensor : public Data,
           mDims({SIZE_0, SIZE_1}),
           mStrides({SIZE_1, 1}),
           mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0, SIZE_1})),
-          mSize(SIZE_0 * SIZE_1) {
+          mSize(SIZE_0 * SIZE_1)
+    {
         mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1);
     }
 
@@ -169,12 +156,13 @@ class Tensor : public Data,
     template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
     constexpr Tensor(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr)
         : Data(Type),
-          mDataType(NativeType_v<T>),
-          mDataFormat(DataFormat::Default),
-          mDims({SIZE_0, SIZE_1, SIZE_2}),
-          mStrides({SIZE_1 * SIZE_2, SIZE_2, 1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0, SIZE_1, SIZE_2})),
-          mSize(SIZE_0 * SIZE_1 * SIZE_2) {
+        mDataType(NativeType_v<T>),
+        mDataFormat(DataFormat::Default),
+        mDims({SIZE_0, SIZE_1, SIZE_2}),
+        mStrides({SIZE_1 * SIZE_2, SIZE_2, 1}),
+        mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0, SIZE_1, SIZE_2})),
+        mSize(SIZE_0 * SIZE_1 * SIZE_2)
+    {
         mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
     }
 
@@ -189,12 +177,13 @@ class Tensor : public Data,
     template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
     constexpr Tensor(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr)
         : Data(Type),
-          mDataType(NativeType_v<T>),
-          mDataFormat(DataFormat::Default),
-          mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}),
-          mStrides({SIZE_1 * SIZE_2 * SIZE_3, SIZE_2 * SIZE_3, SIZE_3, 1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3})),
-          mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3) {
+        mDataType(NativeType_v<T>),
+        mDataFormat(DataFormat::Default),
+        mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}),
+        mStrides({SIZE_1 * SIZE_2 * SIZE_3, SIZE_2 * SIZE_3, SIZE_3, 1}),
+        mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3})),
+        mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3)
+    {
         mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
     }
 
@@ -256,13 +245,7 @@ class Tensor : public Data,
      * @brief Assess data type, dimensions, backend and data are the same.
      * @param otherTensor
      */
-    bool operator==(const Tensor &otherTensor) const {
-        if ((!mImpl && !otherTensor.mImpl) || (dataType() != otherTensor.dataType()) ||
-            (dims() != otherTensor.dims()) || (mImpl->backend() != otherTensor.mImpl->backend())) {
-            return false;
-        }
-        return *mImpl == *(otherTensor.mImpl);
-    }
+    bool operator==(const Tensor &otherTensor) const;
 
     /**
      * @brief Element-wise addition operation for two ``Tensor``s.
@@ -284,7 +267,7 @@ class Tensor : public Data,
     Tensor& operator+=(const Tensor& other);
     template<typename T,
              typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
-    Tensor& operator+=(T val) {return *this += Tensor(val); }
+    inline Tensor& operator+=(T val) {return *this += Tensor(val); }
 
     /**
      * @brief Element-wise subtraction operation for two ``Tensor``s.
@@ -459,34 +442,7 @@ public:
      *                  format are different from DataFormat::Default, previous
      *                  data is copy-transposed.
      */
-    void setDataFormat(const DataFormat df, bool copyTrans = true) {
-        if (!copyTrans || df == dataFormat() || df == DataFormat::Default || dataFormat() == DataFormat::Default) {
-            mDataFormat = df;
-            return;
-        }
-
-        const auto transpose = getPermutationMapping(dataFormat(), df);
-
-        if (mImpl) {
-            copyTranspose(*this, transpose);
-        } else {
-            std::vector<DimSize_t> newDims;
-            for (std::size_t i = 0; i < dims().size(); ++i) {
-                newDims.push_back(dims()[transpose[i]]);
-            }
-
-            std::vector<std::size_t> newStrides(dims().size(), 1);
-            for (size_t i = 0; i < dims().size(); ++i) {
-                for (size_t j = i + 1; j < dims().size(); ++j) {
-                    newStrides[i] *= newDims[j];
-                }
-            }
-            mDims = std::move(newDims);
-            mStrides = std::move(newStrides);
-        }
-
-        mDataFormat = df;
-    }
+    void setDataFormat(const DataFormat df, bool copyTrans = true);
 
     /**
      * @brief Get the Impl object
@@ -771,13 +727,7 @@ public:
      * @param coordIdx Coordinate to an element in the tensor
      * @return DimSize_t Storage index
      */
-    std::size_t getStorageIdx(const std::vector<std::size_t>& coordIdx) const {
-        AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions");
-        for(std::size_t i = 0; i < coordIdx.size(); ++i) {
-            AIDGE_ASSERT(coordIdx[i] < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor");
-        }
-        return std::inner_product(coordIdx.cbegin(), coordIdx.cend(), mStrides.cbegin(), DimSize_t(0));
-    }
+    std::size_t getStorageIdx(const std::vector<std::size_t>& coordIdx) const;
 
     /**
      * @brief Returns a sub-tensor with equal or lower number of dimensions.
@@ -1005,9 +955,7 @@ private:
      * @note If dimensions are not empty, they are multiplied to get the total number
      * of elements. Else, the Tensor represents a scalar and contains a single element.
      */
-    void computeSize() {
-        mSize = std::accumulate(mDims.begin(), mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>());
-    }
+    void computeSize();
 };
 }  // namespace Aidge
 
diff --git a/include/aidge/graph/Connector.hpp b/include/aidge/graph/Connector.hpp
index ec59e1b38..63e31d963 100644
--- a/include/aidge/graph/Connector.hpp
+++ b/include/aidge/graph/Connector.hpp
@@ -14,7 +14,6 @@
 #include <memory>
 #include <vector>
 
-#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -38,40 +37,32 @@ class GraphView;
  *  GraphView g = x.generateGraph();
  */
 class Connector {
-   private:
+private:
+    /// @brief Pointer to the associated Node.
     std::shared_ptr<Node> mNode;
     ///\brief output id
     ///\details gk_IODefaultIndex is reserved for?
     ///\bug Is negative value pertinent?
     IOIndex_t mOutputId = gk_IODefaultIndex;
 
-   public:
-    Connector() : mNode(nullptr) {
-        // ctor
-    }
+public:
+    Connector() noexcept;
     Connector(std::shared_ptr<Node> node);
 
-    ~Connector() = default;
+    ~Connector();
 
-   public:
-    Connector operator[](IOIndex_t index) {
-        AIDGE_ASSERT((size() > 1), "Cannot refer a slice of the output.");
-        return Connector(mNode, index);
-    }
+public:
+    Connector operator[](IOIndex_t index) const;
 
-   public:
+public:
     IOIndex_t size() const;
 
-    inline std::shared_ptr<Node> node() const { return mNode; }
+    inline std::shared_ptr<Node> node() const noexcept { return mNode; }
 
-    inline IOIndex_t index() const { return mOutputId; }
+    inline IOIndex_t index() const noexcept { return mOutputId; }
 
-   private:
-    Connector(std::shared_ptr<Node> node, IOIndex_t index) : mNode(node) {
-        AIDGE_ASSERT((index != gk_IODefaultIndex) && (index < size()),
-               "Non-valid output index.\n");
-        mOutputId = index;
-    }
+private:
+    Connector(std::shared_ptr<Node> node, IOIndex_t index);
 };
 
 /**
@@ -80,7 +71,7 @@ class Connector {
  * @param ctors list of output Connector for the graph to generate.
  * @return std::shared_ptr<GraphView>
  */
-std::shared_ptr<GraphView> generateGraph(std::vector<Connector> ctors);
+std::shared_ptr<GraphView> generateGraph(const std::vector<Connector>& ctors);
 }  // namespace Aidge
 
 #endif /* AIDGE_CORE_GRAPH_CONNECTOR_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Abs.hpp b/include/aidge/operator/Abs.hpp
index d8dc62752..d70387ed0 100644
--- a/include/aidge/operator/Abs.hpp
+++ b/include/aidge/operator/Abs.hpp
@@ -43,7 +43,7 @@ class Abs_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Abs_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+    Abs_Op();
 
     /**
      * @brief Copy-constructor.
@@ -51,15 +51,7 @@ public:
      * @details Copies the operator attributes and its output tensor(s), but not
      * its input tensors. The new operator has no associated input.
      */
-    Abs_Op(const Abs_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Abs_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Abs_Op(const Abs_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
@@ -80,9 +72,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Abs(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Abs_Op>(), name);
-}
-}
+std::shared_ptr<Node> Abs(const std::string& name = "");
+
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_ABS_H_ */
diff --git a/include/aidge/operator/And.hpp b/include/aidge/operator/And.hpp
index 32b6684a0..5c970cc43 100644
--- a/include/aidge/operator/And.hpp
+++ b/include/aidge/operator/And.hpp
@@ -28,7 +28,7 @@ namespace Aidge {
  * @brief Description of an element-wise And operation on input Tensors,
  * supporting NumPy broadcasting.
  *
- * For each pair of elements x and y from the input Tensors, the function 
+ * For each pair of elements x and y from the input Tensors, the function
  * is defined as:
  * `f(x, y) = x && y`
  *
@@ -36,13 +36,13 @@ namespace Aidge {
  * - Tensors are aligned from the rightmost dimensions.
  * - Dimensions are compatible if they are equal, one of them is 1, or missing.
  *
- * The output Tensor shape is determined by taking the maximum size along 
+ * The output Tensor shape is determined by taking the maximum size along
  * each dimension of the input Tensors after broadcasting.
- * 
+ *
  * Examples:
  * 1. Input A: (3, 4, 2), Input B: (2), Output: (3, 4, 2)
  * 2. Input A: (1, 5, 3), Input B: (2, 1, 3), Output: (2, 5, 3)
- * 
+ *
  * @see OperatorTensor
  * @see Registrable
  */
@@ -51,7 +51,7 @@ class And_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    And_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
+    And_Op();
 
     /**
      * @brief Copy-constructor.
@@ -59,15 +59,7 @@ public:
      * @details Copies the operator attributes and its output tensor(s), but not
      * its input tensors. The new operator has no associated input.
      */
-    And_Op(const And_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(And_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    And_Op(const And_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
@@ -90,9 +82,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> And(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<And_Op>(), name);
-}
+inline std::shared_ptr<Node> And(const std::string& name = "");
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_AND_H_ */
diff --git a/include/aidge/operator/ArgMax.hpp b/include/aidge/operator/ArgMax.hpp
index 5057310d3..cc520d3fb 100644
--- a/include/aidge/operator/ArgMax.hpp
+++ b/include/aidge/operator/ArgMax.hpp
@@ -107,13 +107,7 @@ public:
      * @param[in] keep_dims Whether to retain reduced dimensions with size 1 (`true`) or remove them (`false`).
      * @param[in] select_last_index Whether to select the last occurrence of the maximum value (`true`) or the first (`false`).
      */
-    ArgMax_Op(std::int32_t axis = 0, bool keep_dims = true, bool select_last_index = false)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<ArgMaxAttr::Axis>(axis),
-            attr<ArgMaxAttr::KeepDims>(keep_dims),
-            attr<ArgMaxAttr::SelectLastIndex>(select_last_index)))
-    {}
+    ArgMax_Op(std::int32_t axis = 0, bool keep_dims = true, bool select_last_index = false);
 
     /**
      * @brief Copy constructor for the ArgMax operator.
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 505a06398..86e1946fa 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -131,17 +131,10 @@ public:
      * @param[in] dilations Spatial dilations for the pooling operation.
      * @param[in] ceil_mode Indicates whether to use ceil mode for output size calculation.
      */
-    constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
+    AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1),
                             const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t, DIM>(1),
-                            bool ceil_mode = false)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-                        attr<AvgPoolingAttr::StrideDims>(stride_dims),
-                        attr<AvgPoolingAttr::KernelDims>(kernel_dims),
-                        attr<AvgPoolingAttr::Dilations>(dilations),
-                        attr<AvgPoolingAttr::CeilMode>(ceil_mode)))
-    {}
+                            bool ceil_mode = false);
 
     /**
      * @brief Copy-constructor.
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 81a679502..f3a1a8d56 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -100,19 +100,7 @@ public:
      * @param[in] momentum Momentum for the moving average of statistics.
      * @param[in] trainingMode Flag indicating whether to use current or moving average statistics.
      */
-    constexpr BatchNorm_Op(float epsilon, float momentum, bool trainingMode)
-        : OperatorTensor(Type,
-                            {InputCategory::Data,
-                             InputCategory::Param,
-                             InputCategory::Param,
-                             InputCategory::Param,
-                             InputCategory::Param},
-                            1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<BatchNormAttr::Epsilon>(epsilon),
-            attr<BatchNormAttr::Momentum>(momentum),
-            attr<BatchNormAttr::TrainingMode>(trainingMode)
-            )) {}
+    BatchNorm_Op(float epsilon, float momentum, bool trainingMode);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/BitShift.hpp b/include/aidge/operator/BitShift.hpp
index 8ae64ede5..d1a5ec140 100644
--- a/include/aidge/operator/BitShift.hpp
+++ b/include/aidge/operator/BitShift.hpp
@@ -87,27 +87,13 @@ public:
      * @brief Constructor to initialize the `BitShift_Op` with a shift direction.
      * @param[in] direction The direction of the bitwise shift (left or right).
      */
-    BitShift_Op(BitShiftDirection direction, bool rounding = false)
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-              attr<BitShiftAttr::BitShiftdirection>(direction),
-              attr<BitShiftAttr::Rounding>(rounding))) 
-              {}
+    BitShift_Op(BitShiftDirection direction, bool rounding = false);
 
     /**
      * @brief Copy-constructor. Copies operator attributes and output tensors but not input tensors.
      * @param[in] op Operator instance to copy.
      */
-    BitShift_Op(const BitShift_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(BitShift_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    BitShift_Op(const BitShift_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
@@ -185,9 +171,13 @@ public:
  * @param[in] name (Optional) Name of the node.
  * @return A shared pointer to the created node.
  */
+<<<<<<< HEAD
 inline std::shared_ptr<Node> BitShift(const BitShift_Op::BitShiftDirection direction,bool rounding = false, const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<BitShift_Op>(direction,rounding), name);
 }
+=======
+std::shared_ptr<Node> BitShift(const BitShift_Op::BitShiftDirection direction, const std::string& name = "");
+>>>>>>> 8b2420d7 (Move code from header to source file)
 
 } // namespace Aidge
 
diff --git a/include/aidge/operator/Clip.hpp b/include/aidge/operator/Clip.hpp
index 4d5d2a93c..886e74ea1 100644
--- a/include/aidge/operator/Clip.hpp
+++ b/include/aidge/operator/Clip.hpp
@@ -98,24 +98,13 @@ public:
      * @param[in] min Minimum value for clipping.
      * @param[in] max Maximum value for clipping.
      */
-    Clip_Op(float min, float max)
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData, InputCategory::OptionalData}, 1),
-          mAttributes(std::make_shared<Attributes_>(attr<ClipAttr::Min>(min), attr<ClipAttr::Max>(max))) {}
+    Clip_Op(float min, float max);
 
     /**
      * @brief Copy-constructor. Copies operator attributes and output tensors, but not input tensors.
      * @param op Clip_Op instance to copy.
      */
-    Clip_Op(const Clip_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Clip_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Clip_Op(const Clip_Op& op);
 
     /**
      * @brief Clone the operator using its copy constructor.
diff --git a/include/aidge/operator/ConstantOfShape.hpp b/include/aidge/operator/ConstantOfShape.hpp
index 886df95a8..a5d30f696 100644
--- a/include/aidge/operator/ConstantOfShape.hpp
+++ b/include/aidge/operator/ConstantOfShape.hpp
@@ -77,10 +77,7 @@ public:
    * @param[in] value : a scalar tensor which holds the value that will
    * fill the output tensor
    */
-  ConstantOfShape_Op(const Tensor &value = Tensor(0.f))
-      : OperatorTensor(Type, {InputCategory::Data}, 1),
-        mAttributes(std::make_shared<Attributes_>(
-            attr<ConstantOfShapeAttr::Value>(value))) {}
+  ConstantOfShape_Op(const Tensor &value = Tensor(0.f));
 
   /**
    * @brief Copy-constructor. Copy the operator attributes and its output
@@ -88,14 +85,7 @@ public:
    * associated).
    * @param op Operator to copy.
    */
-  ConstantOfShape_Op(const ConstantOfShape_Op &op)
-      : OperatorTensor(op), mAttributes(op.mAttributes) {
-    if (op.mImpl) {
-      SET_IMPL_MACRO(ConstantOfShape_Op, *this, op.backend());
-    } else {
-      mImpl = nullptr;
-    }
-  }
+  ConstantOfShape_Op(const ConstantOfShape_Op &op);
 
   /**
    * @brief Clone the operator using its copy-constructor.
@@ -141,11 +131,8 @@ public:
 
 // helper with C-style array instead of std::array for kernel_dims to allow
 // automatic template DIM deduction
-inline std::shared_ptr<Node> ConstantOfShape(const Tensor value = Tensor(0.f),
-                                             const std::string &name = "") {
-  return std::make_shared<Node>(std::make_shared<ConstantOfShape_Op>(value),
-                                name);
-}
+std::shared_ptr<Node> ConstantOfShape(const Tensor value = Tensor(0.f),
+                                             const std::string &name = "");
 } // namespace Aidge
 
 #undef LIST_CONSTANTOFSHAPE_ATTR
diff --git a/src/analysis/DynamicAnalysis.cpp b/src/analysis/DynamicAnalysis.cpp
index 039820154..cb4adf28a 100644
--- a/src/analysis/DynamicAnalysis.cpp
+++ b/src/analysis/DynamicAnalysis.cpp
@@ -14,18 +14,8 @@
 #include <cstddef>  // std::size_t
 #include <memory>
 #include <numeric>  // std::accumulate
-#include <set>
 
-#include <fmt/core.h>  // fmt::println
-#include <fmt/format.h>
-#include <fmt/ranges.h>
-
-#include "aidge/data/DataType.hpp"  // Aidge::isFloatingPoint
-#include "aidge/data/Tensor.hpp"
-#include "aidge/graph/GraphView.hpp"
-#include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/analysis/OperatorStats.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
 
 Aidge::DynamicAnalysis::DynamicAnalysis(const Scheduler& scheduler)
diff --git a/src/analysis/OperatorStats.cpp b/src/analysis/OperatorStats.cpp
index a020403ad..846735c47 100644
--- a/src/analysis/OperatorStats.cpp
+++ b/src/analysis/OperatorStats.cpp
@@ -15,26 +15,34 @@
 #include <cstddef>  // std::size_t
 #include <memory>
 #include <numeric>  // std::accumulate
-#include <set>
-
-#include <fmt/core.h>  // fmt::println
-#include <fmt/format.h>
-#include <fmt/ranges.h>
 
 #include "aidge/data/DataType.hpp"  // Aidge::isFloatingPoint
 #include "aidge/data/Tensor.hpp"
-#include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/operator/AvgPooling.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/ConvDepthWise.hpp"
+#include "aidge/operator/FC.hpp"
+#include "aidge/operator/MatMul.hpp"
+#include "aidge/operator/MaxPooling.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/ReduceMean.hpp"
+#include "aidge/operator/ReduceSum.hpp"
+#include "aidge/operator/Softmax.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+
+#include "aidge/utils/ErrorHandling.hpp"
+
+namespace Aidge {
 
-Aidge::OperatorStats::OperatorStats(const Operator& op)
+OperatorStats::OperatorStats(const Operator& op)
   : mOp(op)
 {
     //ctor
 }
 
-std::shared_ptr<Aidge::OperatorStats> Aidge::OperatorStats::getOpStats(std::shared_ptr<Node> node) {
+std::shared_ptr<OperatorStats> OperatorStats::getOpStats(std::shared_ptr<Node> node) {
     return (Registrar<OperatorStats>::exists(node->type()))
         ? Registrar<OperatorStats>::create(node->type())(*(node->getOperator()))
         : (node->getOperator()->isAtomic())
@@ -42,9 +50,9 @@ std::shared_ptr<Aidge::OperatorStats> Aidge::OperatorStats::getOpStats(std::shar
             : std::make_shared<MetaOpStats>(*(node->getOperator()));
 }
 
-Aidge::OperatorStats::~OperatorStats() = default;
+OperatorStats::~OperatorStats() = default;
 
-std::size_t Aidge::OperatorStats::getNbArithmIntOps() const {
+std::size_t OperatorStats::getNbArithmIntOps() const {
     const auto opTensor = dynamic_cast<const OperatorTensor*>(&mOp);
     if (opTensor) {
         if (!isFloatingPoint(opTensor->getOutput(0)->dataType())) {
@@ -55,12 +63,276 @@ std::size_t Aidge::OperatorStats::getNbArithmIntOps() const {
 }
 
 ////////////////////////////////////////////////////////////////////////////////
+// MetaOpStats
+
+MetaOpStats::MetaOpStats(const Operator& op) : OperatorStats(op) {}
+
+MetaOpStats::~MetaOpStats() = default;
+
+std::size_t MetaOpStats::getNbArithmOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbArithmOps(); }
+std::size_t MetaOpStats::getNbLogicOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbLogicOps(); }
+std::size_t MetaOpStats::getNbCompOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbCompOps(); }
+std::size_t MetaOpStats::getNbNLOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbNLOps(); }
+std::size_t MetaOpStats::getNbArithmIntOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbArithmIntOps(); }
+std::size_t MetaOpStats::getNbMACOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbMACOps(); }
+
+////////////////////////////////////////////////////////////////////////////////
+// ConvStats
+
+template <class OP>
+ConvStats<OP>::ConvStats(const Operator& op) :OperatorStats(op) {}
+template <class OP>
+ConvStats<OP>::~ConvStats() = default;
+
+template <class OP>
+std::size_t ConvStats<OP>::getNbMACOps() const {
+    const OP& op_ = dynamic_cast<const OP&>(mOp);
+    AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+    const std::size_t weightsSize = op_.getInput(1)->size();
+    const std::size_t outputSize
+        = std::accumulate(op_.getOutput(0)->dims().cbegin() + 2,
+                          op_.getOutput(0)->dims().cend(),
+                          1,
+                          std::multiplies<std::size_t>()); // NCHW...
+    const std::size_t batchSize = op_.getInput(0)->dims()[0]; // NCHW
+    return batchSize * (weightsSize * outputSize);
+}
+
+template class ConvStats<Conv_Op<1>>;
+template class ConvStats<ConvDepthWise_Op<1>>;
+template class ConvStats<Conv_Op<2>>;
+template class ConvStats<ConvDepthWise_Op<2>>;
+
+////////////////////////////////////////////////////////////////////////////////
+// MaxPoolingStats
+
+template <class OP>
+MaxPoolingStats<OP>::MaxPoolingStats(const Operator& op) :OperatorStats(op) {}
+template <class OP>
+MaxPoolingStats<OP>::~MaxPoolingStats() = default;
+
+template <class OP>
+std::size_t MaxPoolingStats<OP>::getNbCompOps() const {
+    const OP& op_ = dynamic_cast<const OP&>(mOp);
+    AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+    const std::size_t poolSize
+        = std::accumulate(op_.kernelDims().cbegin(),
+                          op_.kernelDims().cend(),
+                          1,
+                          std::multiplies<std::size_t>());
+    const std::size_t outputSize
+        = std::accumulate(op_.getOutput(0)->dims().cbegin() + 2,
+                          op_.getOutput(0)->dims().cend(),
+                          1,
+                          std::multiplies<std::size_t>()); // NCHW...
+    const std::size_t batchSize = op_.getInput(0)->dims()[0]; // NCHW
+    return batchSize * ((poolSize - 1) * outputSize);
+}
+
+template class MaxPoolingStats<MaxPooling_Op<1>>;
+template class MaxPoolingStats<MaxPooling_Op<2>>;
+template class MaxPoolingStats<MaxPooling_Op<3>>;
+
+////////////////////////////////////////////////////////////////////////////////
+// AvgPoolingStats
+
+template <class OP>
+AvgPoolingStats<OP>::AvgPoolingStats(const Operator& op) :OperatorStats(op) {}
+template <class OP>
+AvgPoolingStats<OP>::~AvgPoolingStats() = default;
+
+template <class OP>
+std::size_t AvgPoolingStats<OP>::getNbArithmOps() const {
+    const OP& op_ = dynamic_cast<const OP&>(mOp);
+    AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+    const std::size_t poolSize
+        = std::accumulate(op_.kernelDims().cbegin(),
+                          op_.kernelDims().cend(),
+                          1,
+                          std::multiplies<std::size_t>());
+    const std::size_t outputSize
+        = std::accumulate(op_.getOutput(0)->dims().cbegin() + 2,
+                          op_.getOutput(0)->dims().cend(),
+                          1,
+                          std::multiplies<std::size_t>()); // NCHW...
+    const std::size_t batchSize = op_.getInput(0)->dims()[0]; // NCHW
+    // (poolSize - 1) additions + 1 division for each output
+    return batchSize * (poolSize * outputSize);
+}
+
+template class AvgPoolingStats<AvgPooling_Op<1>>;
+template class AvgPoolingStats<AvgPooling_Op<2>>;
+template class AvgPoolingStats<AvgPooling_Op<3>>;
+template class AvgPoolingStats<AvgPooling_Op<4>>;
+
+////////////////////////////////////////////////////////////////////////////////
+// FCStats
+
+FCStats::FCStats(const Operator& op) : OperatorStats(op) {}
+FCStats::~FCStats() = default;
+
+std::size_t FCStats::getNbMACOps() const {
+    const FC_Op& op_ = dynamic_cast<const FC_Op&>(mOp);
+    AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+    const std::size_t weightsSize = op_.getInput(1)->size();
+    const std::size_t batchSize = op_.getInput(0)->dims()[0]; // NCHW
+    return batchSize * weightsSize;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// MatMulStats
+
+MatMulStats::MatMulStats(const Operator& op) : OperatorStats(op) {}
+MatMulStats::~MatMulStats() = default;
+
+std::size_t MatMulStats::getNbMACOps() const {
+    const MatMul_Op& op_ = dynamic_cast<const MatMul_Op&>(mOp);
+    AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+    const std::size_t n = (op_.getInput(0)->dims().size() > 1)
+        ? op_.getInput(0)->dims().end()[-2] : 1;
+    const std::size_t k = op_.getInput(0)->dims().back();
+    const std::size_t m = (op_.getInput(1)->dims().size() > 1)
+        ? op_.getInput(1)->dims().back() : 1;
+    const std::size_t nb = (op_.getInput(0)->dims().size() > 2)
+        ? std::accumulate(op_.getInput(0)->dims().cbegin(),
+                          op_.getInput(0)->dims().cend() - 2,
+                          1,
+                          std::multiplies<std::size_t>())
+        : 1;
+
+    return nb * n * m * k;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// ReLUStats
+
+ReLUStats::ReLUStats(const Operator& op) : OperatorStats(op) {}
+ReLUStats::~ReLUStats() = default;
+
+std::size_t ReLUStats::getNbCompOps() const {
+    const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
+    AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+    return op_.getOutput(0)->size();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// AbsStats
+
+AbsStats::AbsStats(const Operator& op) : OperatorStats(op) {}
+AbsStats::~AbsStats() = default;
+
+std::size_t AbsStats::getNbCompOps() const {
+    const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
+    AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+    return op_.getOutput(0)->size();
+}
+
+// This is in the worst case (all values are negative)
+std::size_t AbsStats::getNbArithmOps() const {
+    const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
+    AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+    return op_.getOutput(0)->size();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// ReduceMeanStats
+
+ReduceMeanStats::ReduceMeanStats(const Operator& op) : OperatorStats(op) {}
+ReduceMeanStats::~ReduceMeanStats() = default;
+
+std::size_t ReduceMeanStats::getNbArithmOps() const {
+    const ReduceMean_Op& op_ = dynamic_cast<const ReduceMean_Op&>(mOp);
+    AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+    const std::size_t nbIn = op_.getInput(0)->size();
+    const std::size_t nbOut = op_.getOutput(0)->size();
+    const std::size_t nbReduce = nbIn / nbOut;
+    // (nbReduce - 1) additions + 1 division for each output
+    return nbOut * nbReduce;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// ReduceSumStats
+
+ReduceSumStats::ReduceSumStats(const Operator& op) : OperatorStats(op) {}
+ReduceSumStats::~ReduceSumStats() = default;
+
+std::size_t ReduceSumStats::getNbArithmOps() const {
+    const ReduceSum_Op& op_ = dynamic_cast<const ReduceSum_Op&>(mOp);
+    AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+    const std::size_t nbIn = op_.getInput(0)->size();
+    const std::size_t nbOut = op_.getOutput(0)->size();
+    const std::size_t nbReduce = nbIn / nbOut;
+    // (nbReduce - 1) additions for each output
+    return nbOut * (nbReduce - 1);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SoftmaxStats
+
+SoftmaxStats::SoftmaxStats(const Operator& op) : OperatorStats(op) {}
+SoftmaxStats::~SoftmaxStats() = default;
+
+std::size_t SoftmaxStats::getNbArithmOps() const {
+    const Softmax_Op& op_ = dynamic_cast<const Softmax_Op&>(mOp);
+    AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+    const std::size_t axis = (op_.axis() >= 0) ? op_.axis() : op_.getInput(0)->nbDims() + op_.axis();
+    const std::size_t nbReduce = op_.getInput(0)->dims()[axis];
+    const std::size_t nbOut = op_.getOutput(0)->size();
+    // nbOut divisions + (nbReduce - 1) additions
+    return nbOut + (nbReduce - 1);
+}
 
-Aidge::MetaOpStats::~MetaOpStats() = default;
+std::size_t SoftmaxStats::getNbNLOps() const {
+    const Softmax_Op& op_ = dynamic_cast<const Softmax_Op&>(mOp);
+    AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+    const std::size_t axis = (op_.axis() >= 0) ? op_.axis() : op_.getInput(0)->nbDims() + op_.axis();
+    const std::size_t nbReduce = op_.getInput(0)->dims()[axis];
+    const std::size_t nbOut = op_.getOutput(0)->size();
+    // nbOut exp + nbReduce exp
+    return nbOut + nbReduce;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// MemOpStats
+
+MemOpStats::MemOpStats(const Operator& op) : OperatorStats(op) {}
+MemOpStats::~MemOpStats() = default;
+
+////////////////////////////////////////////////////////////////////////////////
+// ElemWiseOpStats
+
+ElemWiseOpStats::ElemWiseOpStats(const Operator& op) : OperatorStats(op) {}
+ElemWiseOpStats::~ElemWiseOpStats() = default;
+
+std::size_t ElemWiseOpStats::getNbArithmOps() const {
+    const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
+    AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+    return op_.getOutput(0)->size();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// ElemWiseLogicOpStats
+
+ElemWiseLogicOpStats::ElemWiseLogicOpStats(const Operator& op) : OperatorStats(op) {}
+ElemWiseLogicOpStats::~ElemWiseLogicOpStats() = default;
+
+std::size_t ElemWiseLogicOpStats::getNbArithmOps() const {
+    const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
+    AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+    return op_.getOutput(0)->size();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// ElemWiseNLOpStats
+
+ElemWiseNLOpStats::ElemWiseNLOpStats(const Operator& op) : OperatorStats(op) {}
+ElemWiseNLOpStats::~ElemWiseNLOpStats() = default;
+
+
+std::size_t ElemWiseNLOpStats::getNbNLOps() const {
+    const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
+    AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+    return op_.getOutput(0)->size();
+}
 
-std::size_t Aidge::MetaOpStats::getNbArithmOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbArithmOps(); }
-std::size_t Aidge::MetaOpStats::getNbLogicOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbLogicOps(); }
-std::size_t Aidge::MetaOpStats::getNbCompOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbCompOps(); }
-std::size_t Aidge::MetaOpStats::getNbNLOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbNLOps(); }
-std::size_t Aidge::MetaOpStats::getNbArithmIntOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbArithmIntOps(); }
-std::size_t Aidge::MetaOpStats::getNbMACOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbMACOps(); }
+} // namespace Aidge
\ No newline at end of file
diff --git a/src/analysis/StaticAnalysis.cpp b/src/analysis/StaticAnalysis.cpp
index 0e32618c2..0e99a975d 100644
--- a/src/analysis/StaticAnalysis.cpp
+++ b/src/analysis/StaticAnalysis.cpp
@@ -24,8 +24,10 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
 
 Aidge::StaticAnalysis::StaticAnalysis(std::shared_ptr<GraphView> graph)
   : mGraph(graph)
diff --git a/src/data/Data.cpp b/src/data/Data.cpp
new file mode 100644
index 000000000..26802b1e8
--- /dev/null
+++ b/src/data/Data.cpp
@@ -0,0 +1,35 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <string>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+
+namespace Aidge {
+
+Data::Data(Data&& other): mType(other.mType) {};
+Data::Data(const Data& other): mType(other.mType) {};
+Data::Data(const std::string& type): mType(type) {};
+
+Data::~Data() = default;
+
+Data& Data::operator=(const Data& other) {
+    AIDGE_ASSERT(other.mType == mType, "Cannot copy a different type for Data object.");
+    return *this;
+};
+
+Data& Data::operator=(Data&& other) {
+    AIDGE_ASSERT(other.mType == mType, "Cannot copy a different type fro Data object.");
+    return *this;
+};
+
+} // namespace Aidge
diff --git a/src/data/Elts.cpp b/src/data/Elts.cpp
new file mode 100644
index 000000000..24f95476b
--- /dev/null
+++ b/src/data/Elts.cpp
@@ -0,0 +1,91 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/data/Elts.hpp"
+
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+// Private constructor definition
+Elts_t::Elts_t(NbElts_t data_, NbElts_t token_, EltType type_)
+    : data(data_),
+      token(token_),
+      type(type_)
+{
+    // ctor
+}
+
+// Addition operator: returns a new Elts_t after addition.
+Elts_t Elts_t::operator+(const Elts_t& other) const {
+    AIDGE_ASSERT(type == other.type || other.type ==  EltType::Undef || type ==  EltType::Undef,
+        "Incompatible C-P model types: {} + {}. Data and Token cannot be mixed.", type, other.type);
+    return Elts_t(data + other.data,
+                  token + other.token,
+                  (other.type == Undef) ? type : other.type);
+}
+
+// Addition assignment operator.
+Elts_t& Elts_t::operator+=(const Elts_t& other) {
+    AIDGE_ASSERT(type == other.type || other.type ==  EltType::Undef || type ==  EltType::Undef,
+        "Incompatible C-P model types: {} += {}. Data and Token cannot be mixed.", type, other.type);
+    data += other.data;
+    token += other.token;
+    type = (other.type == Undef) ? type : other.type;
+    return *this;
+}
+
+// Comparison operators
+bool Elts_t::operator<(const Elts_t& other) const {
+    if (type ==  EltType::Undef || type ==  EltType::Token) {
+        // Nothing, or only a token is required: don't care about how much data has been produced for the token
+        return (token < other.token);
+    }
+    else if (type == Elts_t::Data && other.type != Elts_t::Token) {
+        // A precise amount of data is required, so the amount of produced data must be specified, a token is not enough
+        return (data < other.data);
+    }
+    else {
+        AIDGE_THROW_OR_ABORT(std::runtime_error,
+            "Incompatible C-P model types: {} < {}. Data is expected for right-hand side.", type, other.type);
+    }
+}
+
+bool Elts_t::operator>(const Elts_t& other) const {
+    if (type ==  EltType::Undef || type ==  EltType::Token) {
+        // Nothing, or only a token is required: don't care about how much data has been produced for the token
+        return (token > other.token);
+    }
+    else if (type ==  EltType::Data && other.type !=  EltType::Token) {
+        // A precise amount of data is required, so the amount of produced data must be specified, a token is not enough
+        return (data > other.data);
+    }
+    else {
+        AIDGE_THROW_OR_ABORT(std::runtime_error,
+            "Incompatible C-P model types: {} > {}. Data is expected for right-hand side.", type, other.type);
+    }
+}
+
+// Factory methods.
+Elts_t Elts_t::NoneElts() {
+    return Elts_t(0, 0,  EltType::Undef);
+}
+
+Elts_t Elts_t::DataElts(NbElts_t data, NbElts_t token) {
+    return Elts_t(data, token,  EltType::Data);
+}
+
+Elts_t Elts_t::TokenElts(NbElts_t token) {
+    return Elts_t(0, token,  EltType::Token);
+}
+
+} // namespace Aidge
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index d44168564..b85bb472c 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -28,14 +28,42 @@
 
 namespace Aidge {
 
+Tensor::Tensor(DataType dtype, DataFormat dformat)
+    : Data(Type),
+      mDataType(dtype),
+      mDataFormat(dformat),
+      mDims(std::vector<DimSize_t>({})),
+      mStrides({1}),
+      mSize(0)
+{
+    // ctor
+}
+
+Tensor::Tensor(const std::vector<DimSize_t>& dims)
+    : Data(Type)
+{
+    // set mDims, mStrides, mContiguous, mSize
+    resize(dims);
+}
+
 Tensor::Tensor(const Tensor& other) = default;
 Tensor::Tensor(Tensor&& other) = default;
 
+Tensor::~Tensor() noexcept = default;
+
+///////////////////////////////////////////////
+// operator
+
 Tensor& Tensor::operator=(const Tensor& other) = default;
 Tensor& Tensor::operator=(Tensor&& other) = default;
 
-Tensor::~Tensor() noexcept = default;
-
+bool Tensor::operator==(const Tensor &otherTensor) const {
+    if ((!mImpl && !otherTensor.mImpl) || (dataType() != otherTensor.dataType()) ||
+        (dims() != otherTensor.dims()) || (mImpl->backend() != otherTensor.mImpl->backend())) {
+        return false;
+    }
+    return *mImpl == *(otherTensor.mImpl);
+}
 
 Tensor Tensor::operator+(const Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
@@ -247,8 +275,41 @@ void Tensor::setBackend(const std::string &name, DeviceIdx_t device, bool copyFr
     else {
         mImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims);
     }
+}
+
+void Tensor::setDataFormat(const DataFormat df, bool copyTrans) {
+    if (!copyTrans
+        || df == dataFormat()
+        || df == DataFormat::Default
+        || dataFormat() == DataFormat::Default)
+    {
+        mDataFormat = df;
+        return;
     }
 
+    const auto transpose = getPermutationMapping(dataFormat(), df);
+
+    if (mImpl) {
+        copyTranspose(*this, transpose);
+    } else {
+        std::vector<DimSize_t> newDims;
+        for (std::size_t i = 0; i < dims().size(); ++i) {
+            newDims.push_back(dims()[transpose[i]]);
+        }
+
+        std::vector<std::size_t> newStrides(dims().size(), 1);
+        for (size_t i = 0; i < dims().size(); ++i) {
+            for (size_t j = i + 1; j < dims().size(); ++j) {
+                newStrides[i] *= newDims[j];
+            }
+        }
+        mDims = std::move(newDims);
+        mStrides = std::move(newStrides);
+    }
+
+    mDataFormat = df;
+}
+
 void Tensor::resize(const std::vector<DimSize_t>& dims,
                            std::vector<DimSize_t> strides) {
     if (dims.empty()) {  // scalar
@@ -888,6 +949,13 @@ template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const
 template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<float>& coords);
 template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<double>& coords);
 
+std::size_t Tensor::getStorageIdx(const std::vector<std::size_t>& coordIdx) const {
+    AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions");
+    for(std::size_t i = 0; i < coordIdx.size(); ++i) {
+        AIDGE_ASSERT(coordIdx[i] < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor");
+    }
+    return std::inner_product(coordIdx.cbegin(), coordIdx.cend(), mStrides.cbegin(), DimSize_t(0));
+}
 
 std::set<std::string> Tensor::getAvailableBackends() {
     std::set<std::string> backendsList;
@@ -896,4 +964,9 @@ std::set<std::string> Tensor::getAvailableBackends() {
     }
     return backendsList;
 }
+
+void Tensor::computeSize() {
+    mSize = std::accumulate(mDims.begin(), mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>());
+}
+
 }  // namespace Aidge
diff --git a/src/graph/Connector.cpp b/src/graph/Connector.cpp
index 98f58259a..be80dc9d0 100644
--- a/src/graph/Connector.cpp
+++ b/src/graph/Connector.cpp
@@ -17,39 +17,68 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Connector::Connector(std::shared_ptr<Aidge::Node> node) {
-    mNode = node;
+namespace Aidge {
+
+Connector::Connector() noexcept : mNode(nullptr) {}
+
+Connector::Connector(std::shared_ptr<Aidge::Node> node)
+    : mNode(std::move(node))
+{
     if (mNode->nbOutputs() == 1U) {
         mOutputId = 0;
     }
 }
 
-Aidge::IOIndex_t Aidge::Connector::size() const { return mNode->nbOutputs(); }
+Connector::Connector(std::shared_ptr<Node> node, IOIndex_t index)
+    : mNode(std::move(node)), mOutputId(index)
+{
+    // AIDGE_ASSERT(mNode, "Node pointer cannot be null.");
+    AIDGE_ASSERT(index < size(), "Non-valid output index.");
+}
+
+Connector::~Connector() = default;
+
+Connector Connector::operator[](IOIndex_t index) const {
+    return Connector(mNode, index);
+}
+
+IOIndex_t Connector::size() const { return mNode->nbOutputs(); };
 
-std::shared_ptr<Aidge::GraphView> Aidge::generateGraph(std::vector<Connector> ctors) {
-    std::shared_ptr<GraphView> graph = std::make_shared<GraphView>();
-    std::vector<std::shared_ptr<Node>> nodesToAdd = std::vector<std::shared_ptr<Node>>();
-    for (const Connector& ctor : ctors) {
+////////////////////////////////////////////////////////////////////////////////
+
+std::shared_ptr<GraphView> generateGraph(const std::vector<Connector>& ctors) {
+    auto graph = std::make_shared<GraphView>();
+    std::vector<std::shared_ptr<Node>> nodesToAdd;
+    nodesToAdd.reserve(ctors.size());
+
+    // Add initial nodes from the connectors.
+    for (const auto& ctor : ctors) {
         nodesToAdd.push_back(ctor.node());
     }
-    std::vector<std::shared_ptr<Node>> buffer = {};
+
+    std::vector<std::shared_ptr<Node>> buffer;
+    buffer.reserve(ctors.size());
 
     while (!nodesToAdd.empty()) {
         while (!nodesToAdd.empty()) {
-            graph->add(nodesToAdd.back());  // only add, connection already done
-                                            // between nodes
-            std::vector<std::shared_ptr<Node>> parents = nodesToAdd.back()->getParents();
-            const std::set<std::shared_ptr<Node>>& alreadyAdded = graph->getNodes();
-            for (std::shared_ptr<Node> parent : parents) {
+            auto node = nodesToAdd.back();
+            nodesToAdd.pop_back();
+            graph->add(node);  // only add, connection already done
+                               // between nodes
+            // Get parent nodes and add those not already in the graph.
+            const auto& parents = node->getParents();
+            const auto& alreadyAdded = graph->getNodes();
+            for (const auto& parent : parents) {
                 if (!parent) continue;
-                if (alreadyAdded.find(parent) == alreadyAdded.end()) {
+                if (alreadyAdded.find(parent) == alreadyAdded.cend()) {
                     buffer.push_back(parent);
                 }
             }
-            nodesToAdd.pop_back();
         }
-        nodesToAdd.insert(nodesToAdd.end(), buffer.begin(), buffer.end());
-        buffer = {};
+        nodesToAdd.insert(nodesToAdd.cend(), buffer.begin(), buffer.end());
+        buffer.clear();
     }
     return graph;
 }
+
+} // namespace Aidge
diff --git a/src/operator/Abs.cpp b/src/operator/Abs.cpp
index 1dd7836ad..86d4dd94b 100644
--- a/src/operator/Abs.cpp
+++ b/src/operator/Abs.cpp
@@ -17,13 +17,35 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Abs_Op::Type = "Abs";
+namespace Aidge {
 
-void Aidge::Abs_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+const std::string Abs_Op::Type = "Abs";
+
+Abs_Op::Abs_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+Abs_Op::Abs_Op(const Abs_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(Abs_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+void Abs_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     SET_IMPL_MACRO(Abs_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
 
-std::set<std::string> Aidge::Abs_Op::getAvailableBackends() const {
+std::set<std::string> Abs_Op::getAvailableBackends() const {
     return Registrar<Abs_Op>::getKeys();
 }
+
+////////////////////////////////////////////////////////////////////////////////
+
+std::shared_ptr<Node> Abs(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Abs_Op>(), name);
+}
+
+} // namespace Aidge
diff --git a/src/operator/And.cpp b/src/operator/And.cpp
index aebd5a717..1d496e11e 100644
--- a/src/operator/And.cpp
+++ b/src/operator/And.cpp
@@ -21,8 +21,24 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
+namespace Aidge {
+
 const std::string Aidge::And_Op::Type = "And";
 
+And_Op::And_Op()
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1)
+{}
+
+And_Op::And_Op(const And_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(And_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
 bool Aidge::And_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
@@ -60,3 +76,11 @@ void Aidge::And_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
 std::set<std::string> Aidge::And_Op::getAvailableBackends() const {
     return Registrar<And_Op>::getKeys();
 }
+
+////////////////////////////////////////////////////////////////////////////////
+
+std::shared_ptr<Node> And(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<And_Op>(), name);
+}
+
+} // namespace AIdge
\ No newline at end of file
diff --git a/src/operator/ArgMax.cpp b/src/operator/ArgMax.cpp
index 531c41596..8833452a8 100644
--- a/src/operator/ArgMax.cpp
+++ b/src/operator/ArgMax.cpp
@@ -24,6 +24,14 @@
 
 const std::string Aidge::ArgMax_Op::Type = "ArgMax";
 
+Aidge::ArgMax_Op::ArgMax_Op(std::int32_t axis, bool keep_dims, bool select_last_index)
+: OperatorTensor(Type, {InputCategory::Data}, 1),
+  mAttributes(std::make_shared<Attributes_>(
+    attr<ArgMaxAttr::Axis>(axis),
+    attr<ArgMaxAttr::KeepDims>(keep_dims),
+    attr<ArgMaxAttr::SelectLastIndex>(select_last_index)))
+{}
+
 Aidge::ArgMax_Op::ArgMax_Op(const Aidge::ArgMax_Op& op)
     : OperatorTensor(op),
       mAttributes(op.mAttributes)
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index 79341687c..7561297ba 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -27,6 +27,19 @@ template <Aidge::DimIdx_t DIM>
 const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling" + std::to_string(DIM) + "D";
 
 
+template <Aidge::DimIdx_t DIM>
+Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
+    const std::array<DimSize_t, DIM> &stride_dims,
+    const std::array<DimSize_t, DIM> &dilations,
+    bool ceil_mode)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    mAttributes(std::make_shared<Attributes_>(
+    attr<AvgPoolingAttr::StrideDims>(stride_dims),
+    attr<AvgPoolingAttr::KernelDims>(kernel_dims),
+    attr<AvgPoolingAttr::Dilations>(dilations),
+    attr<AvgPoolingAttr::CeilMode>(ceil_mode)))
+{}
+
 template <Aidge::DimIdx_t DIM>
 Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op)
     : OperatorTensor(op),
diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp
index 24a49e56c..db5ab4d40 100644
--- a/src/operator/BatchNorm.cpp
+++ b/src/operator/BatchNorm.cpp
@@ -26,6 +26,22 @@
 template <Aidge::DimIdx_t DIM>
 const std::string Aidge::BatchNorm_Op<DIM>::Type = "BatchNorm" + std::to_string(DIM) + "D";
 
+template <Aidge::DimIdx_t DIM>
+Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(float epsilon, float momentum, bool trainingMode)
+    : OperatorTensor(Type,
+                        {InputCategory::Data,
+                        InputCategory::Param,
+                        InputCategory::Param,
+                        InputCategory::Param,
+                        InputCategory::Param},
+                        1),
+    mAttributes(std::make_shared<Attributes_>(
+        attr<BatchNormAttr::Epsilon>(epsilon),
+        attr<BatchNormAttr::Momentum>(momentum),
+        attr<BatchNormAttr::TrainingMode>(trainingMode)
+        ))
+{}
+
 template <Aidge::DimIdx_t DIM>
 Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op)
     : OperatorTensor(op),
diff --git a/src/operator/BitShift.cpp b/src/operator/BitShift.cpp
index 7595590f7..f500c58ae 100644
--- a/src/operator/BitShift.cpp
+++ b/src/operator/BitShift.cpp
@@ -21,9 +21,29 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::BitShift_Op::Type = "BitShift";
+namespace Aidge {
 
-bool Aidge::BitShift_Op::forwardDims(bool /*allowDataDependency*/) {
+const std::string BitShift_Op::Type = "BitShift";
+
+BitShift_Op::BitShift_Op(BitShiftDirection direction)
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1),
+      mAttributes(std::make_shared<Attributes_>(
+        attr<BitShiftAttr::BitShiftdirection>(direction),
+        attr<BitShiftAttr::Rounding>(rounding)))
+{}
+
+BitShift_Op::BitShift_Op(const BitShift_Op& op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(BitShift_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+bool BitShift_Op::forwardDims(bool /*allowDataDependency*/) {
     if (!inputsAssociated()) {
     return false;
     }
@@ -54,11 +74,19 @@ bool Aidge::BitShift_Op::forwardDims(bool /*allowDataDependency*/) {
 }
 
 
-void Aidge::BitShift_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+void BitShift_Op::setBackend(const std::string &name, DeviceIdx_t device) {
     SET_IMPL_MACRO(BitShift_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
 
-std::set<std::string> Aidge::BitShift_Op::getAvailableBackends() const {
+std::set<std::string> BitShift_Op::getAvailableBackends() const {
     return Registrar<BitShift_Op>::getKeys();
 }
+
+////////////////////////////////////////////////////////////////////////////////
+
+std::shared_ptr<Node> BitShift(const BitShift_Op::BitShiftDirection direction, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<BitShift_Op>(direction), name);
+}
+
+} // namespace Aidge
diff --git a/src/operator/Clip.cpp b/src/operator/Clip.cpp
index 62787ebcf..87ac105da 100644
--- a/src/operator/Clip.cpp
+++ b/src/operator/Clip.cpp
@@ -13,14 +13,32 @@
 
 #include <memory>
 #include <string>
+
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/operator/Clip.hpp"
 
-const std::string Aidge::Clip_Op::Type = "Clip";
+namespace Aidge {
+
+const std::string Clip_Op::Type = "Clip";
+
+Clip_Op::Clip_Op(float min, float max)
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData, InputCategory::OptionalData}, 1),
+        mAttributes(std::make_shared<Attributes_>(attr<ClipAttr::Min>(min), attr<ClipAttr::Max>(max))) {}
 
-bool Aidge::Clip_Op::dimsForwarded() const {
+Clip_Op::Clip_Op(const Clip_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(Clip_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+bool Clip_Op::dimsForwarded() const {
     if ((getInput(1) && !getInput(1)->undefined())
         || (getInput(2) && !getInput(2)->undefined()))
     {
@@ -32,7 +50,7 @@ bool Aidge::Clip_Op::dimsForwarded() const {
 }
 
 
-bool Aidge::Clip_Op::forwardDims(bool allowDataDependency)
+bool Clip_Op::forwardDims(bool allowDataDependency)
 {
     if (getInput(1) )
     {
@@ -80,14 +98,19 @@ bool Aidge::Clip_Op::forwardDims(bool allowDataDependency)
     mOutputs[0] -> resize(getInput(0)->dims());
     return true;
 }
-void Aidge::Clip_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+void Clip_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     mImpl = Registrar<Clip_Op>::create(name)(*this);
     mOutputs[0]->setBackend(name, device);
 }
-std::set<std::string> Aidge::Clip_Op::getAvailableBackends() const {
+std::set<std::string> Clip_Op::getAvailableBackends() const {
     return Registrar<Clip_Op>::getKeys();
 }
-std::shared_ptr<Aidge::Node> Aidge::Clip(const std::string &name,float min,float max)
+
+////////////////////////////////////////////////////////////////////////////////
+
+std::shared_ptr<Node> Clip(const std::string &name, float min, float max)
 {
     return std::make_shared<Node>(std::make_shared<Clip_Op>(min, max), name);
-}
\ No newline at end of file
+}
+
+} // namespace Aidge
diff --git a/src/operator/ConstantOfShape.cpp b/src/operator/ConstantOfShape.cpp
index 7fe9dc130..524eb44be 100644
--- a/src/operator/ConstantOfShape.cpp
+++ b/src/operator/ConstantOfShape.cpp
@@ -28,6 +28,21 @@ namespace Aidge {
 
 const std::string ConstantOfShape_Op::Type = "ConstantOfShape";
 
+ConstantOfShape_Op::ConstantOfShape_Op(const Tensor &value)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+      mAttributes(std::make_shared<Attributes_>(
+        attr<ConstantOfShapeAttr::Value>(value))) {}
+
+ConstantOfShape_Op::ConstantOfShape_Op(const ConstantOfShape_Op &op)
+    : OperatorTensor(op), mAttributes(op.mAttributes)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(ConstantOfShape_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
 bool ConstantOfShape_Op::forwardDims(bool allowDataDependency) {
   if (!inputsAssociated()) {
     return false;
@@ -68,5 +83,11 @@ std::set<std::string> Aidge::ConstantOfShape_Op::getAvailableBackends() const {
   return Registrar<ConstantOfShape_Op>::getKeys();
 }
 
-} // namespace Aidge
+////////////////////////////////////////////////////////////////////////////////
 
+std::shared_ptr<Node> ConstantOfShape(const Tensor value, const std::string &name) {
+  return std::make_shared<Node>(std::make_shared<ConstantOfShape_Op>(value),
+                                name);
+}
+
+} // namespace Aidge
diff --git a/unit_tests/data/Test_Spikegen.cpp b/unit_tests/data/Test_Spikegen.cpp
index 06f33197d..73bfbcd44 100644
--- a/unit_tests/data/Test_Spikegen.cpp
+++ b/unit_tests/data/Test_Spikegen.cpp
@@ -9,8 +9,7 @@
  *
  ********************************************************************************/
 
-#include <cstdint>     // std::uint8_t, std::uint16_t, std::int32_t
-#include <vector>      // std::vector
+#include <cstdint>     // std::int32_t
 
 #include <catch2/catch_test_macros.hpp>
 
@@ -20,23 +19,25 @@
 #include "aidge/utils/TensorUtils.hpp"
 
 
-namespace Aidge
-{
-TEST_CASE("[core/data] SpikeGen zeros", "[SpikeGen]") {
-    auto input = Tensor(Array1D<float, 3>({0,0,0}));
-    auto expectedOutput = Tensor(Array2D<float, 3, 3>({{{0,0,0}, {0,0,0}, {0,0,0}}}));
+namespace Aidge {
 
-    auto spikes = spikegenRate(std::make_shared<Tensor>(input), 3);
+TEST_CASE("[core/data] SpikeGen zeros", "[SpikeGen]") {
+    SECTION("filled with zeros"){
+        const auto input = std::make_shared<Tensor>(Array1D<float, 3>({0,0,0}));
+        const auto expectedOutput = Tensor(Array2D<float, 3, 3>({{{0,0,0}, {0,0,0}, {0,0,0}}}));
 
-    REQUIRE(approxEq<float>(spikes, expectedOutput));
-}
+        auto spikes = spikegenRate(input, 3);
 
-TEST_CASE("[core/data] SpikeGen ones", "[SpikeGen]") {
-    auto input = Tensor(Array1D<float, 3>({1,1,1}));
-    auto expectedOutput = Tensor(Array2D<float, 3, 3>({{{1,1,1}, {1,1,1}, {1,1,1}}}));
+        REQUIRE(approxEq<float>(spikes, expectedOutput));
+    }
+    SECTION("filled with ones") {
+        auto input = std::make_shared<Tensor>(Array1D<float, 3>({1,1,1}));
+        auto expectedOutput = Tensor(Array2D<float, 3, 3>({{{1,1,1}, {1,1,1}, {1,1,1}}}));
 
-    auto spikes = spikegenRate(std::make_shared<Tensor>(input), 3);
+        auto spikes = spikegenRate(input, 3);
 
-    REQUIRE(approxEq<float>(spikes, expectedOutput));
+        REQUIRE(approxEq<float>(spikes, expectedOutput));
+    }
 }
+
 }  // namespace Aidge
-- 
GitLab