From c12a1a60da3327e784efde1931ae34dc0b2ebbba Mon Sep 17 00:00:00 2001
From: Vincent TEMPLIER <vincent.templier@cea.fr>
Date: Mon, 24 Jul 2023 12:49:15 +0000
Subject: [PATCH] Add Core library

---
 aidge/_Core/CMakeLists.txt                    |  19 +
 aidge/_Core/include/backend/OperatorImpl.hpp  |  60 ++
 aidge/_Core/include/backend/TensorImpl.hpp    |  41 ++
 aidge/_Core/include/data/Data.hpp             |  75 ++
 aidge/_Core/include/data/Tensor.hpp           | 530 ++++++++++++++
 aidge/_Core/include/graph/Connector.hpp       |  84 +++
 aidge/_Core/include/graph/GraphView.hpp       | 324 ++++++++
 aidge/_Core/include/graph/Node.hpp            | 345 +++++++++
 aidge/_Core/include/graph/OpArgs.hpp          |  86 +++
 aidge/_Core/include/graphmatching/GRegex.hpp  |  63 ++
 aidge/_Core/include/graphmatching/Match.hpp   |  44 ++
 .../_Core/include/graphmatching/NodeRegex.hpp |  41 ++
 aidge/_Core/include/graphmatching/SeqStm.hpp  | 127 ++++
 .../include/graphmatching/StmFactory.hpp      |  55 ++
 aidge/_Core/include/graphmatching/Utile.hpp   |  50 ++
 aidge/_Core/include/operator/Add.hpp          | 132 ++++
 aidge/_Core/include/operator/Conv.hpp         | 164 +++++
 aidge/_Core/include/operator/FC.hpp           | 137 ++++
 .../include/operator/GenericOperator.hpp      | 122 ++++
 aidge/_Core/include/operator/Matmul.hpp       | 125 ++++
 aidge/_Core/include/operator/MetaOperator.hpp |  31 +
 aidge/_Core/include/operator/Operator.hpp     |  95 +++
 aidge/_Core/include/operator/Producer.hpp     | 129 ++++
 aidge/_Core/include/operator/ReLU.hpp         | 115 +++
 aidge/_Core/include/scheduler/Scheduler.hpp   |  71 ++
 aidge/_Core/include/utils/CParameter.hpp      | 110 +++
 aidge/_Core/include/utils/Parameter.hpp       | 155 ++++
 aidge/_Core/include/utils/Recipies.hpp        |  27 +
 aidge/_Core/include/utils/Registrar.hpp       |  77 ++
 aidge/_Core/include/utils/Types.h             |  62 ++
 aidge/_Core/src/graph/Connector.cpp           |  54 ++
 aidge/_Core/src/graph/GraphView.cpp           | 691 ++++++++++++++++++
 aidge/_Core/src/graph/Node.cpp                | 312 ++++++++
 aidge/_Core/src/graph/OpArgs.cpp              |  73 ++
 aidge/_Core/src/graphmatching/GRegex.cpp      | 301 ++++++++
 aidge/_Core/src/graphmatching/Match.cpp       |  37 +
 aidge/_Core/src/graphmatching/NodeRegex.cpp   |  46 ++
 aidge/_Core/src/graphmatching/SeqStm.cpp      | 247 +++++++
 aidge/_Core/src/graphmatching/StmFactory.cpp  | 150 ++++
 aidge/_Core/src/operator/Operator.cpp         |  44 ++
 aidge/_Core/src/recipies/FuseMulAdd.cpp       | 103 +++
 aidge/_Core/src/scheduler/Scheduler.cpp       | 235 ++++++
 42 files changed, 5789 insertions(+)
 create mode 100644 aidge/_Core/include/backend/OperatorImpl.hpp
 create mode 100644 aidge/_Core/include/backend/TensorImpl.hpp
 create mode 100644 aidge/_Core/include/data/Data.hpp
 create mode 100644 aidge/_Core/include/data/Tensor.hpp
 create mode 100644 aidge/_Core/include/graph/Connector.hpp
 create mode 100644 aidge/_Core/include/graph/GraphView.hpp
 create mode 100644 aidge/_Core/include/graph/Node.hpp
 create mode 100644 aidge/_Core/include/graph/OpArgs.hpp
 create mode 100644 aidge/_Core/include/graphmatching/GRegex.hpp
 create mode 100644 aidge/_Core/include/graphmatching/Match.hpp
 create mode 100644 aidge/_Core/include/graphmatching/NodeRegex.hpp
 create mode 100755 aidge/_Core/include/graphmatching/SeqStm.hpp
 create mode 100644 aidge/_Core/include/graphmatching/StmFactory.hpp
 create mode 100644 aidge/_Core/include/graphmatching/Utile.hpp
 create mode 100644 aidge/_Core/include/operator/Add.hpp
 create mode 100644 aidge/_Core/include/operator/Conv.hpp
 create mode 100644 aidge/_Core/include/operator/FC.hpp
 create mode 100644 aidge/_Core/include/operator/GenericOperator.hpp
 create mode 100644 aidge/_Core/include/operator/Matmul.hpp
 create mode 100644 aidge/_Core/include/operator/MetaOperator.hpp
 create mode 100644 aidge/_Core/include/operator/Operator.hpp
 create mode 100644 aidge/_Core/include/operator/Producer.hpp
 create mode 100644 aidge/_Core/include/operator/ReLU.hpp
 create mode 100644 aidge/_Core/include/scheduler/Scheduler.hpp
 create mode 100644 aidge/_Core/include/utils/CParameter.hpp
 create mode 100644 aidge/_Core/include/utils/Parameter.hpp
 create mode 100644 aidge/_Core/include/utils/Recipies.hpp
 create mode 100644 aidge/_Core/include/utils/Registrar.hpp
 create mode 100644 aidge/_Core/include/utils/Types.h
 create mode 100644 aidge/_Core/src/graph/Connector.cpp
 create mode 100644 aidge/_Core/src/graph/GraphView.cpp
 create mode 100644 aidge/_Core/src/graph/Node.cpp
 create mode 100644 aidge/_Core/src/graph/OpArgs.cpp
 create mode 100644 aidge/_Core/src/graphmatching/GRegex.cpp
 create mode 100644 aidge/_Core/src/graphmatching/Match.cpp
 create mode 100644 aidge/_Core/src/graphmatching/NodeRegex.cpp
 create mode 100755 aidge/_Core/src/graphmatching/SeqStm.cpp
 create mode 100644 aidge/_Core/src/graphmatching/StmFactory.cpp
 create mode 100644 aidge/_Core/src/operator/Operator.cpp
 create mode 100644 aidge/_Core/src/recipies/FuseMulAdd.cpp
 create mode 100644 aidge/_Core/src/scheduler/Scheduler.cpp

diff --git a/aidge/_Core/CMakeLists.txt b/aidge/_Core/CMakeLists.txt
index e69de29b..53ab8551 100644
--- a/aidge/_Core/CMakeLists.txt
+++ b/aidge/_Core/CMakeLists.txt
@@ -0,0 +1,19 @@
+
+option(PYBIND "python binding" ON)
+
+project(Aidge_Core)
+
+add_library(aidge_core STATIC)
+
+# Add include directories 
+target_include_directories(aidge_core PUBLIC "include")
+
+# Containers module
+file(GLOB_RECURSE src_files "src/*.cpp")
+target_sources(aidge_core PRIVATE ${src_files})
+
+
+# Activate compile time reducer for aidge_core
+set_target_properties(aidge_core PROPERTIES COTIRE_ADD_UNITY_BUILD FALSE)
+# set_target_properties(n2d2_cpu_lib PROPERTIES COTIRE_CXX_PREFIX_HEADER_INIT "include/utils/Precompiled.hpp")
+cotire(aidge_core)
\ No newline at end of file
diff --git a/aidge/_Core/include/backend/OperatorImpl.hpp b/aidge/_Core/include/backend/OperatorImpl.hpp
new file mode 100644
index 00000000..a2c97c60
--- /dev/null
+++ b/aidge/_Core/include/backend/OperatorImpl.hpp
@@ -0,0 +1,60 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_OPERATORIMPL_H__
+#define __AIDGE_OPERATORIMPL_H__
+
+#include <cstddef>
+#include <vector>
+#include "utils/Types.h"
+
+namespace Aidge {
+class OperatorImpl {
+public:
+    virtual void forward(){};
+    virtual void backward() {}
+
+    /**
+     * @brief Minimum amount of data from a specific input required by the
+     * implementation to be run.
+     *
+     * @param inputIdx Index of the input analysed.
+     * @return std::size_t
+     */
+    virtual NbElts_t getNbRequiredData(IOIndex_t inputIdx) const = 0;
+
+    // Amount of input data that cannot be overwritten during the execution.
+    virtual NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const = 0;
+
+    // Memory required at an output for a given input size.
+    virtual NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const = 0;
+
+    /**
+     * @brief Total amount of consumed data from a specific input.
+     *
+     * @param inputIdx Index of the input analysed.
+     * @return DimSize_t
+     */
+    virtual NbElts_t getNbConsumedData(IOIndex_t inputIdx) const = 0;
+
+    /**
+     * @brief TOtal amount of produced data ready to be used on a specific output.
+     *
+     * @param outputIdx Index of the output analysed.
+     * @return DimSize_t
+     */
+    virtual NbElts_t getNbProducedData(IOIndex_t outputIdx) const = 0;
+
+    virtual ~OperatorImpl() = default;
+};
+} // namespace Aidge
+
+#endif /* __AIDGE_OPERATORIMPL_H__ */
diff --git a/aidge/_Core/include/backend/TensorImpl.hpp b/aidge/_Core/include/backend/TensorImpl.hpp
new file mode 100644
index 00000000..f4c38d59
--- /dev/null
+++ b/aidge/_Core/include/backend/TensorImpl.hpp
@@ -0,0 +1,41 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_TENSORIMPL_H__
+#define __AIDGE_TENSORIMPL_H__
+
+#include <cstddef>
+#include <cstdio>
+#include "utils/Types.h"
+
+namespace Aidge {
+class TensorImpl {
+public:
+    TensorImpl() = delete;
+    TensorImpl(const char *backend) : mBackend(backend){};
+    virtual void copy(const void *src, NbElts_t length) = 0;
+    virtual void *rawPtr() = 0;
+    virtual void setRawPtr(void* /*ptr*/)
+    {
+        printf("Cannot set raw pointer for backend %s\n", mBackend);
+    };  
+    virtual std::size_t scalarSize() const = 0; // Size of one scalar (in bytes)
+    constexpr const char *backend() const { return mBackend; }
+    virtual ~TensorImpl() = default;
+    virtual bool operator==(const TensorImpl &othImpl) const = 0;
+
+private:
+    const char *mBackend;
+};
+
+} // namespace Aidge
+
+#endif /* __AIDGE_TENSORIMPL_H__ */
diff --git a/aidge/_Core/include/data/Data.hpp b/aidge/_Core/include/data/Data.hpp
new file mode 100644
index 00000000..ddf3c3f1
--- /dev/null
+++ b/aidge/_Core/include/data/Data.hpp
@@ -0,0 +1,75 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_DATA_H__
+#define __AIDGE_DATA_H__
+
+#include "utils/Parameter.hpp"
+
+namespace Aidge {
+enum class DataType {
+    Float64,
+    Float32,
+    Float16,
+    BFloat16,
+    Binary,
+    Ternary,
+    Int2,
+    Int3,
+    Int4,
+    Int5,
+    Int6,
+    Int7,
+    Int8,
+    Int16,
+    Int32,
+    Int64,
+    UInt2,
+    UInt3,
+    UInt4,
+    UInt5,
+    UInt6,
+    UInt7,
+    UInt8,
+    UInt16,
+    UInt32,
+    UInt64
+};
+
+class Data {
+public:
+    constexpr Data(const char* type): mType(type) {};
+    constexpr const char* type() const {
+        return mType;
+    }
+    virtual ~Data() = default;
+
+private:
+    const char* mType;
+};
+}
+
+namespace {
+template <typename T> struct NativeType { static const Aidge::DataType type; };
+template <> const Aidge::DataType NativeType<double>::type = Aidge::DataType::Float64;
+template <> const Aidge::DataType NativeType<float>::type = Aidge::DataType::Float32;
+template <> const Aidge::DataType NativeType<long>::type = Aidge::DataType::Int64;
+template <> const Aidge::DataType NativeType<int>::type = Aidge::DataType::Int32;
+
+template <>
+const char* const EnumStrings<Aidge::DataType>::data[]
+    = {"Float64", "Float32", "Float16", "BFloat16", "Binary", "Ternary", 
+       "Int2", "Int3", "Int4", "Int5", "Int6", "Int7", "Int8", "Int16", 
+       "Int32", "Int64", "UInt2", "UInt3", "UInt4", "UInt5", "UInt6", 
+       "UInt7", "UInt8", "UInt16", "UInt32", "UInt64"};
+}
+
+#endif /* __AIDGE_DATA_H__ */
\ No newline at end of file
diff --git a/aidge/_Core/include/data/Tensor.hpp b/aidge/_Core/include/data/Tensor.hpp
new file mode 100644
index 00000000..d6e8f303
--- /dev/null
+++ b/aidge/_Core/include/data/Tensor.hpp
@@ -0,0 +1,530 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_TENSOR_H__
+#define __AIDGE_TENSOR_H__
+
+#include <cstring>
+#include <set>
+#include <memory>
+#include <numeric>
+#include <string>
+#include <vector>
+
+#include "backend/TensorImpl.hpp"
+#include "data/Data.hpp"
+#include "utils/Registrar.hpp"
+#include "utils/Types.h"
+
+namespace Aidge {
+// Helper to convert vector to array
+template <typename T, typename Iter, std::size_t... Is>
+constexpr auto to_array(Iter &iter, std::index_sequence<Is...>) -> std::array<T, sizeof...(Is)> {
+    return {{((void)Is, T(*iter++))...}};
+}
+
+/**
+ * @brief Convert an object with an iterator to an std::array.
+ */
+template <std::size_t N, typename U = void, typename Iter, typename V = typename std::iterator_traits<Iter>::value_type,
+          typename T = std::conditional_t<std::is_same<U, void>{}, V, U>>
+constexpr auto to_array(Iter iter) -> std::array<T, N> {
+    return to_array<T>(iter, std::make_index_sequence<N>{});
+}
+
+namespace detail {
+
+template <class T, std::size_t N, std::size_t... I>
+constexpr std::array<std::remove_cv_t<T>, N> to_array_impl(T (&a)[N], std::index_sequence<I...>) {
+    return {{a[I]...}};
+}
+
+}  // namespace detail
+
+/**
+ * @brief Convert a C-stype array into a C++ std::array.
+ *
+ * @tparam T Data type.
+ * @tparam N Number of elements.
+ * @param a C-style array to convert.
+ * @return constexpr std::array<std::remove_cv_t<T>, N>
+ */
+template <class T, std::size_t N>
+constexpr std::array<std::remove_cv_t<T>, N> to_array(T (&a)[N]) {
+    return detail::to_array_impl(a, std::make_index_sequence<N>{});
+}
+
+template <typename T, std::size_t N, std::size_t... I>
+constexpr std::array<T, N + 1> append(std::array<T, N> a, T t, std::index_sequence<I...>) {
+    return std::array<T, N + 1>{a[I]..., t};
+}
+
+template <typename T, std::size_t N, std::size_t... I>
+constexpr std::array<T, N + 1> append(T t, std::array<T, N> a, std::index_sequence<I...>) {
+    return std::array<T, N + 1>{t, a[I]...};
+}
+
+/**
+ * @brief Create a new array concatenating the initial one with the value to
+ * add.
+ * @details append([1,2,7], 3) -> [1,2,7,3]
+ *
+ * @tparam T Data type.
+ * @tparam N Number of elements in the initilial array.
+ * @param a Initial array.
+ * @param t Element to add.
+ * @return constexpr std::array<T, N + 1>
+ */
+template <typename T, std::size_t N>
+constexpr std::array<T, N + 1> append(std::array<T, N> a, T t) {
+    return append(a, t, std::make_index_sequence<N>());
+}
+
+template <typename T, std::size_t N>
+constexpr std::array<T, N + 1> append(T t, std::array<T, N> a) {
+    return append(t, a, std::make_index_sequence<N>());
+}
+
+// Generic helper for initializing a Tensor
+template <typename T, std::size_t SIZE_0>
+struct Array1D {
+    T data[SIZE_0];
+};
+
+template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
+struct Array2D {
+    T data[SIZE_0][SIZE_1];
+};
+
+template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
+struct Array3D {
+    T data[SIZE_0][SIZE_1][SIZE_2];
+};
+
+template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
+struct Array4D {
+    T data[SIZE_0][SIZE_1][SIZE_2][SIZE_3];
+};
+
+class Tensor : public Data,
+               public Registrable<std::tuple<std::string, DataType>, std::unique_ptr<TensorImpl>(const Tensor &)> {
+   private:
+    DataType mDataType;
+    std::vector<DimSize_t> mDims;
+    std::unique_ptr<TensorImpl> mImpl;
+    std::shared_ptr<Tensor> mGrad;
+
+    // Cached data
+    std::size_t mSize;    // number of elements in the tensor
+    std::size_t mSizeM1;  // for a tensor of N dimensions, number of elements in the N-1
+                     // first dimensions
+
+   public:
+    static constexpr const char *Type = "Tensor";
+
+    Tensor(DataType dataType = DataType::Float32) : Data(Type), mDataType(dataType), mDims({}), mSize(0), mSizeM1(0) {
+        // ctor
+    }
+
+    template <typename T, std::size_t SIZE_0>
+    constexpr Tensor(Array1D<T, SIZE_0> &&arr)
+        : Data(Type),
+          mDims({SIZE_0}),
+          mDataType(NativeType<T>::type),
+          mImpl(Registrar<Tensor>::create({"ref_cpp", NativeType<T>::type})(*this)),
+          mSize(SIZE_0),
+          mSizeM1(SIZE_0) {
+        mImpl->copy(&arr.data[0], SIZE_0);
+    }
+
+    template <typename T, std::size_t SIZE_0>
+    constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
+        resize({SIZE_0});
+        if (!mImpl) {
+            mImpl = Registrar<Tensor>::create({"ref_cpp", NativeType<T>::type})(*this);
+        }
+        mImpl->copy(&arr.data[0], SIZE_0);
+        return *this;
+    }
+    template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
+    constexpr Tensor(Array2D<T, SIZE_0, SIZE_1> &&arr)
+        : Data(Type),
+          mDims({SIZE_0, SIZE_1}),
+          mDataType(NativeType<T>::type),
+          mImpl(Registrar<Tensor>::create({"ref_cpp", NativeType<T>::type})(*this)),
+          mSize(SIZE_0 * SIZE_1),
+          mSizeM1(SIZE_1) {
+        mImpl->copy(&arr.data[0][0], SIZE_0 * SIZE_1);
+    }
+
+    template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
+    constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) {
+        resize({SIZE_0, SIZE_1});
+        if (!mImpl) {
+            mImpl = Registrar<Tensor>::create({"ref_cpp", NativeType<T>::type})(*this);
+        }
+        mImpl->copy(&arr.data[0][0], SIZE_0 * SIZE_1);
+        return *this;
+    }
+    template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
+    constexpr Tensor(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr)
+        : Data(Type),
+          mDims({SIZE_0, SIZE_1, SIZE_2}),
+          mDataType(NativeType<T>::type),
+          mImpl(Registrar<Tensor>::create({"ref_cpp", NativeType<T>::type})(*this)),
+          mSize(SIZE_0 * SIZE_1 * SIZE_2),
+          mSizeM1(SIZE_1 * SIZE_2) {
+        mImpl->copy(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
+    }
+
+    template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
+    constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) {
+        resize({SIZE_0, SIZE_1, SIZE_2});
+        if (!mImpl) {
+            mImpl = Registrar<Tensor>::create({"ref_cpp", NativeType<T>::type})(*this);
+        }
+        mImpl->copy(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
+        return *this;
+    }
+    template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
+    constexpr Tensor(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr)
+        : Data(Type),
+          mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}),
+          mDataType(NativeType<T>::type),
+          mImpl(Registrar<Tensor>::create({"ref_cpp", NativeType<T>::type})(*this)),
+          mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3),
+          mSizeM1(SIZE_1 * SIZE_2 * SIZE_3) {
+        mImpl->copy(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
+    }
+
+    template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
+    constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) {
+        resize({SIZE_0, SIZE_1, SIZE_2, SIZE_3});
+        if (!mImpl) {
+            mImpl = Registrar<Tensor>::create({"ref_cpp", NativeType<T>::type})(*this);
+        }
+        mImpl->copy(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
+        return *this;
+    }
+
+    bool operator==(const Tensor &otherTensor) const {
+        if ((!mImpl && !otherTensor.mImpl) || (dataType() != otherTensor.dataType()) ||
+            (dims() != otherTensor.dims()) || (mImpl->backend() != otherTensor.mImpl->backend())) {
+            return false;
+        }
+        return *mImpl == *(otherTensor.mImpl);
+    }
+
+    inline void setBackend(const std::string &name) {
+        if (mImpl) {
+            if (strcmp(mImpl->backend(), name.c_str()) != 0) {
+                // Backend change: create new impl, copy from old to new and replace
+                // impl
+                std::unique_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(*this);
+                newImpl->copy(mImpl->rawPtr(), size());
+                mImpl = std::move(newImpl);
+            }
+        } else
+            mImpl = Registrar<Tensor>::create({name, mDataType})(*this);
+    }
+    static std::set<std::string> getAvailableBackends(){
+        std::set<std::string> backendsList;
+        for(std::tuple<std::string, DataType> tupleKey : Registrar<Tensor>::getKeys())
+            backendsList.insert(std::get<0>(tupleKey));
+        return backendsList;
+    }
+
+    constexpr DataType dataType() const { return mDataType; }
+
+    /**
+     * @brief Set the DataType of the Tensor and converts data
+     * if the Tensor has already been initialized.
+     * @param dt DataType.
+     */
+    void setDatatype(const DataType dt) {
+        if (mImpl && (dataType() != dt)) {
+            // get ptr before changing Tensor backend or the type difference will trigger a warning
+            const void *data = mImpl->rawPtr();
+            mDataType = dt;
+            std::unique_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), dt})(*this);
+            newImpl->copy(data, size());  // /!\ it does not cast data but reinterpret them
+            mImpl = std::move(newImpl);
+        }
+        mDataType = dt;
+    }
+
+    constexpr const std::unique_ptr<TensorImpl> &getImpl() { return mImpl; }
+
+    bool hasImpl() 
+    {
+        return (mImpl) ? true : false;
+    }
+
+    inline std::size_t nbDims() const { return mDims.size(); }
+
+    template <DimIdx_t DIM>
+    constexpr std::array<DimSize_t, DIM> dims() const {
+        assert(DIM == mDims.size() && "wrong number of dimensions");
+        return to_array<DIM>(mDims.cbegin());
+    }
+
+    constexpr const std::vector<DimSize_t> &dims() const { return mDims; }
+
+    constexpr std::size_t size() const { return mSize; }
+
+    constexpr std::size_t sizeM1() const { return mSizeM1; }
+
+// deducing std::array size_type and declaring DIM accordingly
+    template <std::array<DimSize_t, 1>::size_type DIM>
+    void resize(const std::array<DimSize_t, DIM> &dims) {
+        static_assert(DIM<=MaxDim,"Too many tensor dimensions required by resize, not supported");
+        mDims.assign(dims.begin(), dims.end());
+        computeSize();
+    }
+    void resize(const std::vector<DimSize_t> &dims) {
+        mDims = dims;
+        computeSize();
+    }
+    bool empty() const { return mDims.empty(); }
+    template <typename expectedType, std::array<std::size_t, 1>::size_type DIM>
+    constexpr expectedType &get(std::array<std::size_t, DIM> idx) {
+        assert(DIM == mDims.size());
+        assert(mImpl);
+        std::size_t unfoldedIdx = 0;
+        for (std::size_t i = 0; i < DIM - std::size_t(1); ++i) {
+            unfoldedIdx = (unfoldedIdx + idx[i]) * mDims[i + 1];
+        }
+        unfoldedIdx += idx[DIM - 1];
+        return static_cast<expectedType *>(mImpl->rawPtr())[unfoldedIdx];
+    }
+
+    std::string toString() {
+        std::string res;
+        std::size_t dim = 0;
+        std::size_t *dimVals = new std::size_t[nbDims()];
+        for (std::size_t i = 0; i < nbDims(); ++i) {
+            dimVals[i] = 0;
+        }
+        std::size_t counter = 0;
+        res += "{\n";
+        if (nbDims()>=2){
+            while (counter < mSize) {
+                std::string spaceString = std::string((dim+1)<<1,' ');
+                if (dim < nbDims()-2) {
+                    if (dimVals[dim] == 0) {
+                        res += spaceString + "{\n";
+                        ++dim;
+                    } else if (dimVals[dim] < static_cast<std::size_t>(dims()[dim])) {
+                        res += spaceString + "},\n" + spaceString + "{\n";
+                        ++dim;
+                    } else {
+                        res += spaceString + "}\n";
+                        dimVals[dim--] = 0;
+                        dimVals[dim]++;
+                    }
+                } else {
+                    for (; dimVals[dim] < static_cast<std::size_t>(dims()[dim]); ++dimVals[dim]) {
+                        res += spaceString + "{";
+                        for (DimSize_t j = 0; j < dims()[dim + 1] - 1; ++j) {
+                            switch (mDataType)
+                            {
+                            case DataType::Int32:
+                                res += " " + std::to_string(static_cast<int *>(mImpl->rawPtr())[counter++]) + ",";
+                                break;
+                            case DataType::Float64:
+                                res += " " + std::to_string(static_cast<double *>(mImpl->rawPtr())[counter++]) + ",";
+                                break;
+                            default:
+                                res += " " + std::to_string(static_cast<float *>(mImpl->rawPtr())[counter++]) + ",";
+                                break;
+                            }
+                        }
+                        switch (mDataType)
+                        {
+                        case DataType::Int32:
+                            res += " " + std::to_string(static_cast<int *>(mImpl->rawPtr())[counter++]) + "}";
+                            break;
+                        case DataType::Float64:
+                            res += " " + std::to_string(static_cast<double *>(mImpl->rawPtr())[counter++]) + "}";
+                            break;
+                        default:
+                            res += " " + std::to_string(static_cast<float *>(mImpl->rawPtr())[counter++]) + "}";
+                            break;
+                        }
+                        if (dimVals[dim] < static_cast<std::size_t>(dims()[dim] - 1)) {
+                            res += ",";
+                        }
+                        res += "\n";
+                    }
+                    dimVals[dim--] = 0;
+                    dimVals[dim]++;
+                }
+            }
+            for(int i = static_cast<int>(dim); i>=0; --i) {
+                res += std::string((dim+1)<<1,' ') + "}\n";
+            }
+        }else{
+            for (DimSize_t j = 0; j < dims()[0]; ++j) {
+                switch (mDataType)
+                {
+                case DataType::Int32:
+                    res += " " + std::to_string(static_cast<int *>(mImpl->rawPtr())[j]) + ((j < dims()[0]-1) ? "," : "\n");
+                    break;
+                case DataType::Float64:
+                    res += " " + std::to_string(static_cast<double *>(mImpl->rawPtr())[j]) + ((j < dims()[0]-1) ? "," : "\n");
+                    break;
+                default:
+                    res += " " + std::to_string(static_cast<float *>(mImpl->rawPtr())[j]) + ((j < dims()[0]-1) ? "," : "\n");
+                    break;
+                }
+            }
+        }
+        
+        
+        res += "}";
+        return res;
+    }
+
+    inline void print() { printf("%s\n", toString().c_str()); }
+
+    std::shared_ptr<Tensor> grad() {
+        if (!mGrad) {
+            mGrad = std::make_shared<Tensor>(mDataType);
+            mGrad->resize(mDims);
+
+            if (mImpl) mGrad->setBackend(mImpl->backend());
+        }
+
+        return mGrad;
+    }
+
+private:
+    ///\bug not protected against overflow, see ThaliaCommonPack for a solution
+    std::size_t computeSize() {
+        if (mDims.empty()) {
+            mSizeM1 = DimSize_t(0);
+            mSize = DimSize_t(0);
+        }
+        else if (mDims.size() == 1)
+        {
+            mSizeM1 = mDims[0];
+            mSize = mDims[0];
+        }
+        else {
+            mSizeM1 = std::accumulate(++mDims.begin(),mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>());
+            mSize = static_cast<std::size_t>(mSizeM1 * mDims[0]);
+        }
+        
+        return mSize;
+    }
+};
+
+    // template <class T>
+    // class Tensor
+    // {
+    // private:
+    //     vector<T> mData;
+    //     vector<short> mDims;
+    //     short mSize;
+
+    //     /// @brief Size of the n-1 last dimensions
+    //     /// @return For a Tensor T of dimension [N,C,H,W]
+    //     /// returns C*H*W
+    //     short sizeM1();
+
+    // public:
+    //     Tensor();
+    //     Tensor(const Tensor<T>& other);
+    //     Tensor(std::initializer_list<short> dims,
+    //            const T &value = T());
+    //     Tensor(std::vector<short> dims,
+    //             const T& value = T());
+    //     ~Tensor() = default;
+
+    //     /// @brief Fills the Tensor with one single value
+    //     /// @param value new value
+    //     void fill(const T& value);
+    //     /// @brief Number of elements in the Tensor
+    //     short size();
+    // };
+
+    // template <class T>
+    // Tensor<T>::Tensor()
+    //     : mData(std::make_shared<std::vector<T>>(std::vector<T>())),
+    //       mDims({}),
+    //       mSize(0)
+    // {
+    //     // ctor
+    // }
+
+    // template <class T>
+    // Tensor<T>::Tensor(std::initializer_list<short> dims,
+    //                     const T& value)
+    // {
+    //     mDims = dims;
+    //     mSize = (!dims.empty()) ? std::accumulate(dims.begin(),
+    //                                                dims.end(),
+    //                                                1U,
+    //                                                std::multiplies<short>()) :
+    //                                                0U;
+    //     mData = std::make_shared<std::vector<T>>(std::vector<T>(mSize, value));
+    // }
+
+    // template <class T>
+    // Tensor<T>::Tensor(std::vector<short> dims,
+    //                     const T& value)
+    // {
+    //     mDims = dims;
+    //     mSize = (!dims.empty()) ? std::accumulate(dims.begin(),
+    //                                                dims.end(),
+    //                                                1U,
+    //                                                std::multiplies<short>()) :
+    //                                                0U;
+    //     mData = std::make_shared<std::vector<T>>(std::vector<T>(mSize, value));
+    // }
+
+    // template <class T>
+    // Tensor<T>::Tensor(const Tensor<T>& other)
+    //     : mDims(other.mDims),
+    //       mData(other.mData),
+    //       mSize(other.size())
+    // {
+    //     // ctor by copy
+    //     // Warning: a tensor "B" created from another tensor "A" with
+    //     // this copy-ctor will share the same mData as "A".
+    //     // Indeed, this implementation only copies the address of mData
+    //     // because mData is a shared pointer. Thus if one of the values of B
+    //     // is changed, then the value of A is also changed.
+    //     // If you want to create a tensor from another tensor with different
+    //     // memory locations, please use Tensor::clone()
+    // }
+
+    // template <class T>
+    // short Tensor<T>::size() {
+    //     return mSize;
+    // }
+
+    // template <class T>
+    // short Tensor<T>::sizeM1() {
+    //     return (!mDims.empty()) ? std::accumulate(++mDims.begin(),
+    //                                                mDims.end(),
+    //                                                1U,
+    //                                                std::multiplies<short>()) :
+    //                                                0U;
+    // }
+
+    // template <class T>
+    // void Tensor<T>::fill(const T& value) {
+    //     std::fill(mData.begin(), mData.end(), value);
+    // }
+}  // namespace Aidge
+
+#endif /* __AIDGE_TENSOR_H__ */
diff --git a/aidge/_Core/include/graph/Connector.hpp b/aidge/_Core/include/graph/Connector.hpp
new file mode 100644
index 00000000..df99689d
--- /dev/null
+++ b/aidge/_Core/include/graph/Connector.hpp
@@ -0,0 +1,84 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CONNECTOR_H__
+#define __AIDGE_CONNECTOR_H__
+
+#include <cassert>
+#include "utils/Types.h"
+#include <memory>
+#include <vector>
+
+namespace Aidge {
+
+class Node;
+class GraphView;
+/**
+ * @brief Object meant for simpler and more instrinctive user API.
+ *
+ * example:
+ *  Connector x();
+ *  x = Conv(...)(x);
+ *  Connector y = Split(3)(x[0]); // Error! Cannot slice a Connector with one output only
+ *  Connector y = Split(3)(x);
+ *  CustomLayer cl(...);
+ *  Connector z = cl(y) // Error! y has multiple outputs, must specify which one to use
+ *  Connector z1 = cl(y[0]);
+ *  Connector z2 = cl(y[1]);
+ *  Connector z3 = cl(y[2]);
+ *  x = Sum(...)(z1, z2, z3);
+ *  GraphView g = x.generateGraph();
+ */
+class Connector {
+private:
+    std::shared_ptr<Node> mNode;
+    ///\brief output id
+    ///\details gk_IODefaultIndex is reserved for?
+    ///\bug Is negative value pertinent?
+    IOIndex_t mOutputId = gk_IODefaultIndex;
+
+public:
+    Connector() : mNode(nullptr) {
+        // ctor
+    }
+    Connector(std::shared_ptr<Node> node);
+
+    ~Connector() = default;
+
+    Connector operator[](IOIndex_t index) {
+        assert((size() > 1) && "Cannot refer a slice of the output.");
+        return Connector(mNode, index);
+    }
+
+    IONb_t size() const;
+
+    inline std::shared_ptr<Node> node() const { return mNode; }
+
+    inline IOIndex_t index() const { return mOutputId; }
+
+private:
+    Connector(std::shared_ptr<Node> node, IOIndex_t index) : mNode(node) {
+        assert((index >= 0) && ( static_cast<IONb_t>(index) < size()) && "Non-valid output index.\n");
+        mOutputId = index;
+    }
+};
+
+/**
+ * @brief Generate a GraphView from a list of output Connectors
+ *
+ * @param ctors list of output Connector for the graph to generate.
+ * @return std::shared_ptr<GraphView>
+ */
+std::shared_ptr<GraphView> generateGraph(std::vector<Connector> ctors);
+
+} // namespace Aidge
+
+#endif /* __AIDGE_CONNECTOR_H__ */
\ No newline at end of file
diff --git a/aidge/_Core/include/graph/GraphView.hpp b/aidge/_Core/include/graph/GraphView.hpp
new file mode 100644
index 00000000..fda0e983
--- /dev/null
+++ b/aidge/_Core/include/graph/GraphView.hpp
@@ -0,0 +1,324 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_GRAPHVIEW_H__
+#define __AIDGE_GRAPHVIEW_H__
+
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "graph/Node.hpp"
+#include "utils/Types.h"
+
+namespace Aidge {
+enum class DataType;
+class GraphView : public std::enable_shared_from_this<GraphView> {
+private:
+    /// @brief Name of the graphview
+    std::string mName; 
+
+    /// @brief Set of nodes included in the GraphView
+    std::set<NodePtr> mNodes; 
+
+    /// @brief Set of nodes included in the graphview with names
+    std::map<std::string, NodePtr> mNodeRegistry;
+    
+    /// @brief Nodes without input link
+    std::set<NodePtr> mInputNodes;
+
+    /// @brief Nodes without output link
+    std::set<NodePtr> mOutputNodes;
+
+public:
+    GraphView(std::string name="")
+        : mName(name) 
+    {
+        // ctor
+    }
+
+    GraphView(std::set<NodePtr> nodes, std::string name="")
+        : mName(name) 
+    {
+        add(nodes);
+    }
+
+    bool operator==(const GraphView &gv) const 
+    {
+        return mNodes == gv.mNodes;
+    }
+
+    NodePtr operator[](std::string name) 
+    {
+        assert(mNodeRegistry.find(name) != mNodeRegistry.end() && "Could not find Node in the GraphView.");
+        return mNodeRegistry.at(name);
+    }
+
+///////////////////////////////////////////////////////
+//        FUNCTIONAL DESCRIPTION
+///////////////////////////////////////////////////////
+
+    Connector operator()(const std::vector<Connector> ctors);
+
+///////////////////////////////////////////////////////
+//        INNER
+///////////////////////////////////////////////////////
+public:
+    /**
+     * @brief Name of the node.
+     * @return std::string
+     */
+    std::string name() const;
+
+    /**
+     * @brief Set the node name.
+     * @warning Undefined behaviour when several Nodes have the same name.
+     * @param name New name for the node.
+     */
+    void setName(const std::string &name);
+
+    /**
+     * @brief Save the GraphView as a Mermaid graph in a .md file at the
+     * specified location.
+     * @param path
+     */
+    void save(std::string path, bool verbose = false) const;
+
+    inline bool inView(NodePtr nodePtr) const {
+        return mNodes.find(nodePtr) != mNodes.end();
+    }
+
+///////////////////////////////////////////////////////
+//        TENSOR MANAGEMENT
+///////////////////////////////////////////////////////
+public:
+    inline std::set<NodePtr> inputNodes() const noexcept { return mInputNodes; }
+    inline std::set<NodePtr> outputNodes() const noexcept { return mOutputNodes; }
+
+    inline bool isInputNode(NodePtr nodePtr) const {
+        return (mInputNodes.find(nodePtr) != mInputNodes.end()) ? true : false;
+    }
+    inline bool isOutputNode(NodePtr nodePtr) const {
+        return (mOutputNodes.find(nodePtr) != mOutputNodes.end()) ? true : false;
+    }
+
+    /**
+     * @brief List data input Tensors of the graph input nodes.
+     * @return std::vector<std::pair<NodePtr, IOIndex_t>>
+     */
+    std::vector<std::pair<NodePtr, IOIndex_t>> dataInputs() const;
+
+    /**
+     * @brief List data input Tensors of the graph input nodes.
+     * @param name Name of the Node.
+     * @return std::vector<std::pair<NodePtr, IOIndex_t>>
+     */
+    inline auto dataInputs(std::string name) const { return mNodeRegistry.at(name)->dataInputs(); }
+
+    /**
+     * @brief List input Tensors of the graph input nodes.
+     * @return std::vector<std::pair<NodePtr, IOIndex_t>>
+     */
+    std::vector<std::pair<NodePtr, IOIndex_t>> inputs() const;
+
+    std::vector<std::pair<NodePtr, IOIndex_t>> inputs(std::string name) const;
+
+    /**
+     * @brief List output Tensors of the node.
+     * @return std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>
+     */
+    std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>> outputs() const;
+
+    /**
+     * @brief Specific i-th output Tensor of the GraphView.
+     * @param nodeName Name of the Node of which to show the output.
+     * @return std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>
+     */
+    std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>> outputs(
+            std::string nodeName) const;
+
+    void forwardDims();
+
+    void setBackend(const std::string &backend);
+    void setDatatype(const DataType &datatype);
+
+///////////////////////////////////////////////////////
+//        TOPOLOGY
+///////////////////////////////////////////////////////
+public:
+    /**
+     * @brief Get the Parents of inputNodes.
+     * @return std::vector<NodePtr>
+     */
+    std::set<NodePtr> getParents() const;
+    std::vector<NodePtr> getParents(const std::string nodeName) const;
+    std::vector<std::vector<NodePtr>> getOrderedParents() const;
+
+    /**
+     * @brief Get the Children of outputNodes.
+     * @return std::set<NodePtr>
+     */
+    std::set<NodePtr> getChildren() const;
+    std::vector<std::vector<NodePtr>> getChildren(const std::string nodeName) const;
+    std::set<NodePtr> getChildren(
+            const NodePtr otherNode) const;  // TODO change it for a vector<vector> ?
+
+    /**
+     * @brief Getter for Operators of the GraphView.
+     * @return std::set<NodePtr>
+     */
+    inline std::set<NodePtr> getNodes() const { return mNodes; }
+
+    /**
+     * @brief Get the operator with the corresponding name if it is in the
+     * GraphView.
+     * @param nodeName name of the node.
+     * @return NodePtr return a new empty node if the one asked for
+     * was not found.
+     */
+    NodePtr getNode(const char *nodeName) const;
+
+    void remove(NodePtr nodePtr, bool includeLearnableParam = true);
+
+    // Surrounding nodes management
+
+    void setInput(IOIndex_t inID, IOIndex_t newNodeOutID);
+
+    /**
+     * @brief Includes a Node to the current GraphView
+     * @param other_node Node to add.
+     * @param includeLearnableParam Should non-data inputs, like weights and biases
+     * be included in the GraphView automatically. Default: true.
+     */
+    void add(NodePtr otherNode, bool includeLearnableParam = true);
+    void add(std::set<NodePtr> otherNodes,
+             bool includeLearnableParam = true);
+
+    /**
+     * @brief Include every Node inside another GraphView to the current
+     * GraphView.
+     * @param other_graph GraphView containing the Nodes to include.
+     */
+    void add(std::shared_ptr<GraphView> otherGraph);
+
+    /**
+     * @brief Include a Node in the current GraphView and link it to another
+     * already contained Node.
+     *
+     * @param toOtherNode Pointer to the Node to add.
+     * @param fromOutNode Pointer to the already included Node the new Node will
+     * be linked to (it will become a parent of the new Node). If the GraphView
+     * only has one output Node, then default to this Node.
+     * @param fromTensor Ouput Tensor ID of the already included Node. Default to
+     * 0.
+     * @param toTensor Input Tensor ID of the new Node. Default to gk_IODefaultIndex, meaning
+     * first available data input for the Node.
+     */
+    void addChild(NodePtr toOtherNode, NodePtr fromOutNode = nullptr,
+                  const IOIndex_t fromTensor = IOIndex_t(0),
+                  IOIndex_t toTensor = gk_IODefaultIndex);
+
+    /**
+     * @brief Include a Node in the current GraphView and link it to another
+     * already contained Node.
+     *
+     * @param toOtherNode Pointer to the Node to add.
+     * @param fromOutNodeName Name of the already included Node the new Node will
+     * be linked to (it will become a parent of the new Node). As a name is
+     * optional, ensure such Node is in the GraphView or it will send back an
+     * error message.
+     * @param fromTensor Ouput Tensor ID of the already included Node. Default to
+     * 0.
+     * @param toTensor Input Tensor ID of the new Node. Default to gk_IODefaultIndex, meaning
+     * first available data input for the Node.
+     */
+    inline void addChild(NodePtr toOtherNode, std::string fromOutNodeName,
+                         const IOIndex_t fromTensor = IOIndex_t(0),
+                         IOIndex_t toTensor = gk_IODefaultIndex) {
+        assert(mNodeRegistry.find(fromOutNodeName) != mNodeRegistry.end() &&
+               "No Node with this name found in the GraphView.");
+        addChild(toOtherNode, mNodeRegistry.at(fromOutNodeName), fromTensor, toTensor);
+    }
+
+    /**
+     * @brief Include a GraphView content in the current GraphView and link
+     * the two sets by linking one Node from each GraphView.
+     * @param toOtherView Pointer to the GraphView whose content should be added.
+     * @param fromOutNode Pair of pointer to Node and Tensor ID for specifying the
+     * connection. If the GraphView including the other one has only one output
+     * Node, then it defaults to the first output Tensor of this Node.
+     * @param toNode Pair of pointer to Node and Tensor ID for specifying the
+     * connection. If the GraphView whose content is included has only one input
+     * Node, then it defaults to the first available data input Tensor of this
+     * Node.
+     */
+    void addChild(std::shared_ptr<GraphView> toOtherView,
+                  std::pair<NodePtr, IOIndex_t> fromOutNode =
+                          std::pair<NodePtr, IOIndex_t>(nullptr, IOIndex_t(0)),
+                  std::pair<NodePtr, IOIndex_t> toNode =
+                          std::pair<NodePtr, IOIndex_t>(nullptr, gk_IODefaultIndex));
+
+    /**
+     * @brief Swap two Node instances if possible.
+     * @param node
+     * @param otherNode
+     * @return true
+     * @return false
+     */
+    bool swap(Node &node, Node &otherNode);
+
+    void link(std::string name1_inID, std::string name2_outID);
+
+    void insert(Node &newNode, Node &inNode, std::initializer_list<Node> outNodes,
+                IOIndex_t tensorIdx);
+
+    bool replaceWith(std::set<NodePtr> newNodes);
+
+private:
+///////////////////////////////////////////////////////
+//        TENSOR MANAGEMENT
+///////////////////////////////////////////////////////
+
+    IONb_t getNbDataInputs() const;
+
+    IONb_t getNbFreeDataInputs() const;
+
+    void updateInputNodes();
+
+    void updateInputNodes(NodePtr node);
+
+    /**
+     * @brief Process from zero the set of output Nodes.
+     */
+    void updateOutputNodes();
+
+    /**
+     * @brief Update the set of output Nodes with a new Node,checking if it can be
+     * added and removing any Node not part of mOutputNode anymore.
+     * @param nodePtr
+     */
+    void updateOutputNodes(NodePtr node);
+
+    ///////////////////////////////////////////////////////
+    //        TOPOLOGY
+    ///////////////////////////////////////////////////////
+
+    void _forwardDims(std::set<NodePtr> listNodes);
+
+    void removeInputNode(const std::string nodeName);
+    void removeOutputNode(const std::string nodeName);
+};
+}  // namespace Aidge
+
+#endif /* __AIDGE_GRAPHVIEW_H__ */
\ No newline at end of file
diff --git a/aidge/_Core/include/graph/Node.hpp b/aidge/_Core/include/graph/Node.hpp
new file mode 100644
index 00000000..da47bab2
--- /dev/null
+++ b/aidge/_Core/include/graph/Node.hpp
@@ -0,0 +1,345 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_NODE_H__
+#define __AIDGE_NODE_H__
+
+#include <cassert>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+#include <utility>
+
+#include "graph/Connector.hpp"
+#include "operator/Operator.hpp"
+#include "utils/Types.h"
+
+namespace Aidge {
+
+using NodePtr = std::shared_ptr<Node>;
+
+class GraphView;
+
+class Node : public std::enable_shared_from_this<Node> {
+private:
+    /// @brief Name of the Node. Should be unique
+    std::string mName; 
+
+    /// @brief Set of pointers to GraphView (instances including this Node)
+    std::set<std::shared_ptr<GraphView>> mViews = std::set<std::shared_ptr<GraphView>>(); 
+
+    /// @brief Pointer to the associated Operator
+    const std::shared_ptr<Operator> mOperator;
+
+    /// @brief List of parent nodes (Parent --> Node --> Child)
+    std::vector<NodePtr> mParents;
+
+    /// @brief List of child nodes for each output (Parent --> Node --> Child)
+    std::vector<std::vector<NodePtr>> mChildren;
+
+    /// @brief InID of Child node.
+    std::vector<std::vector<IOIndex_t>> mIdInChildren;
+
+    /// @brief OutID of Parent node. Default: gk_IODefaultIndex.
+    std::vector<IOIndex_t> mIdOutParents;
+
+public:
+    Node() = delete;
+    Node(std::shared_ptr<Operator> op, const char *name = nullptr);
+
+    virtual ~Node() = default;
+
+    friend bool operator==(const Node &lhs, const Node &rhs) {
+        return lhs.shared_from_this() == rhs.shared_from_this();
+    }
+
+    ///////////////////////////////////////////////////////
+    //        FUNCTIONAL DESCRIPTION
+    ///////////////////////////////////////////////////////
+
+    Connector operator()(const std::vector<Connector> ctors);
+
+    ///////////////////////////////////////////////////////
+    //        INNER
+    ///////////////////////////////////////////////////////
+
+    /**
+     * @brief Name of the node.
+     * @return std::string
+     */
+    inline std::string name() const noexcept { return mName; }
+
+    /**
+     * @brief Set the node name.
+     * @warning Undefined behaviour when several Nodes have the same name.
+     * @param name New name for the node.
+     */
+    void setName(const std::string &name);
+
+    /**
+     * @brief Type of the node.
+     * @return std::string
+     */
+    inline std::string type() const { return mOperator->type(); }
+
+    ///////////////////////////////////////////////////////
+    //        OPERATORS
+    ///////////////////////////////////////////////////////
+
+    /**
+     * @brief Run forward() function of the associated Operator
+     */
+    void forward();
+
+    /**
+     * @brief Run backward() function of the associated Operator
+     */
+    void backward();
+
+    /**
+     * @brief Get the Operator object of the Node
+     * @return std::shared_ptr<Operator>
+     */
+    inline std::shared_ptr<Operator> getOperator() const { return mOperator; }
+
+    ///////////////////////////////////////////////////////
+    //        TENSOR MANAGEMENT
+    ///////////////////////////////////////////////////////
+
+    /**
+     * @brief Whether or not every input of the Node is linked to a Tensor.
+     * If true then the Node is ready to be executed.
+     * @return true
+     * @return false
+     */
+    bool valid() const;
+
+    /**
+     * @brief List of pair <Parent, ID of the data intput>. When an input is not
+     * linked to any Parent, the pair is <nullptr, gk_IODefaultIndex>.
+     * @return std::vector<std::pair<NodePtr, IOIndex_t>>
+     */
+    std::vector<std::pair<NodePtr, IOIndex_t>> dataInputs() const;
+
+    /**
+     * @brief List of pair <Parent, ID of the intput>. When an input is not linked
+     * to any Parent, the pair is <nullptr, gk_IODefaultIndex>.
+     * @return std::vector<std::pair<NodePtr, IOIndex_t>>
+     */
+    std::vector<std::pair<NodePtr, IOIndex_t>> inputs() const;
+
+    /**
+     * @brief Parent and its output Tensor ID linked to the inID-th input Tensor.
+     * If the input is not linked to any Parent, the pair is <nullptr, gk_IODefaultIndex>.
+     * @param inID
+     * @return std::pair<NodePtr, IOIndex_t>
+     */
+    inline std::pair<NodePtr, IOIndex_t> input(IOIndex_t inID) const {
+        assert((inID != gk_IODefaultIndex) && (static_cast<IONb_t>(inID) < nbInputs()) && "Input index out of bound.");
+        return std::pair<NodePtr, IOIndex_t>(mParents[inID], mIdOutParents[inID]);
+    }
+
+    /**
+     * @brief Get the lowest index in the input Data Parent list equal to the
+     * nullptr.
+     * @return std::size_t
+     */
+    inline IOIndex_t getFirstFreeDataInput() const {
+        IOIndex_t i = 0;
+        for (; (static_cast<IONb_t>(i) < nbDataInputs()) && (input(i).second >= 0); ++i) {}
+        // assert((i<nbDataInputs()) && "No free data input for Node");
+        return (static_cast<IONb_t>(i) < nbDataInputs()) ? i : gk_IODefaultIndex;
+    }
+
+    IONb_t getNbFreeDataInputs() const;
+
+    /**
+     * @brief List input ids of children liked to outputs of the node
+     * @return std::vector<std::vector<std::pair<NodePtr,
+     * IOIndex_t>>>
+     */
+    std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>> outputs() const;
+
+    /**
+     * @brief Children and their input Tensor ID linked to the outID-th output
+     * Tensor.
+     * @param outID
+     * @return std::vector<std::pair<NodePtr, IOIndex_t>>
+     */
+    std::vector<std::pair<NodePtr, IOIndex_t>> output(IOIndex_t outID) const;
+
+    /**
+     * @brief Number of input specifically for data
+     * @details [data, data, weight, bias] => 4
+     * @return IOIndex_t
+     */
+    inline IONb_t nbInputs() const noexcept { return getOperator()->nbInputs(); }
+
+    /**
+     * @brief Number of input specifically for data
+     * @details [data, data, weight, bias] => 2
+     * @return IOIndex_t
+     */
+    inline IONb_t nbDataInputs() const noexcept {
+        return getOperator()->nbDataInputs();
+    }
+
+    /**
+     * @brief Number of inputs linked to a Parent's output.
+     * @return IOIndex_t
+     */
+    IONb_t nbValidInputs() const;
+
+    /**
+     * @brief Getter for the number of Output Tensors of the Node.
+     * @return IOIndex_t
+     */
+    inline IONb_t nbOutputs() const noexcept { return getOperator()->nbOutputs(); }
+
+    IONb_t nbValidOutputs() const;
+
+    ///////////////////////////////////////////////////////
+    //        TOPOLOGY
+    ///////////////////////////////////////////////////////
+
+    /**
+     * @brief Vector of pointers to each GraphView containing the object
+     * @return std::vector<GraphView>
+     */
+    inline std::set<std::shared_ptr<GraphView>> views() const noexcept {
+        return mViews;
+    }
+
+    /**
+     * @brief Add a GraphView pointer to the list of GraphView containing
+     * the current Node. This feature allows transparent GraphViews.
+     * @param graphPtr Pointer to GraphView to add to the list.
+     */
+    inline void addView(const std::shared_ptr<GraphView> graphPtr) {
+        mViews.insert(graphPtr);
+    }
+
+    inline void removeView(const std::shared_ptr<GraphView> graphPtr) {
+        if (mViews.find(graphPtr) != mViews.end()) {
+        mViews.erase(graphPtr);
+        }
+    }
+
+    /**
+     * @brief Link another Node to an output of the current Node.
+     * @param otherNode Pointer to the other Node.
+     * @param outId ID of the output Tensor to connect to the other Node.
+     * Default to 0.
+     * @param otherInId ID of the input Tensor to connect to the current Node.
+     * Default to the first avaible data input.
+     */
+    void addChild(NodePtr otherNode,
+                    const IOIndex_t outId = IOIndex_t(0),
+                    IOIndex_t otherInId = gk_IODefaultIndex);
+
+    /**
+     * @brief Link a Node from a specific GraphView to the current Node.
+     * @param otherView Pointer to the GraphView whose content should be
+     * linked to the current Node.
+     * @param outId ID of the output Tensor to connect to the other Node.
+     * Default to 0.
+     * @param otherInId Pair of pointer to Node and Tensor ID for specifying the
+     * connection. If the GraphView whose content is linked has only one input
+     * Node, then it defaults to the first available data input Tensor of this
+     * Node.
+     */
+    void addChild(std::shared_ptr<GraphView> otherView,
+                    const IOIndex_t outId = IOIndex_t(0),
+                    std::pair<NodePtr, IOIndex_t> otherInId =
+                    std::pair<NodePtr, IOIndex_t>(nullptr, gk_IODefaultIndex));
+
+    /**
+     * @brief Get the list of parent Nodes. As an input is linked to a unic Node,
+     * if non is linked then the parent is a nullptr.
+     * @return std::vector<NodePtr>
+     */
+    std::vector<NodePtr> getParents() const;
+
+    inline NodePtr &getParents(IOIndex_t inID) {
+        assert(inID != gk_IODefaultIndex);
+        return mParents.at(inID);
+    }
+
+    NodePtr popParent(const IOIndex_t inID);
+
+    void removeParent(IOIndex_t inID);
+
+    /**
+     * @brief Get the Children object. Children do not include any nullptr as
+     * an output maybe linked to nobody and the Node would still work fine.
+     * @return std::set<NodePtr>>
+     */
+    std::set<NodePtr> getChildren() const;
+
+    std::vector<std::vector<NodePtr>> getOrderedChildren() const;
+
+    std::vector<NodePtr> getChildren(IOIndex_t outID) const;
+
+    void removeChild(IOIndex_t outID, std::size_t childId);
+
+    /**
+     * @brief Remove every link of surrounding nodes to it and conversly
+     */
+    void resetConnections(bool includeLearnableParam = false);
+
+    private:
+    ///////////////////////////////////////////////////////
+    //        OPERATORS
+    ///////////////////////////////////////////////////////
+
+    // void setOperator(const std::shared_ptr<Operator> op_ptr);
+
+    ///////////////////////////////////////////////////////
+    //        TENSOR MANAGEMENT
+    ///////////////////////////////////////////////////////
+
+    void setInput(IOIndex_t inID, IOIndex_t newNodeOutID);
+
+    ///////////////////////////////////////////////////////
+    //        TOPOLOGY
+    ///////////////////////////////////////////////////////
+
+    /**
+     * @brief add function specialized in adding Nodes.
+     * @param other_node
+     * @param outID
+     * @param other_inID
+     */
+    void addChildOp(NodePtr other_node, const IOIndex_t outID,
+                    IOIndex_t other_inID);
+
+    /**
+     * @brief add functon specialized in adding GraphView.
+     *
+     * @param other_graph
+     * @param outID
+     * @param other_inID
+     */
+    void addChildView(std::shared_ptr<GraphView> other_graph,
+                        const IOIndex_t outID,
+                        std::pair<NodePtr, IOIndex_t> other_inID);
+
+    /**
+     * @brief Add a Node to the list of parents.
+     * @param other_node Node to add to parents list.
+     * @param inID index for adding the parent.
+     */
+    void addParent(const NodePtr other_node, const IOIndex_t inID);
+};
+} // namespace Aidge
+
+#endif /* __AIDGE_NODE_H__ */
\ No newline at end of file
diff --git a/aidge/_Core/include/graph/OpArgs.hpp b/aidge/_Core/include/graph/OpArgs.hpp
new file mode 100644
index 00000000..9b61cc2c
--- /dev/null
+++ b/aidge/_Core/include/graph/OpArgs.hpp
@@ -0,0 +1,86 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_OPARGS_H__
+#define __AIDGE_OPARGS_H__
+
+#include <memory>
+#include <cassert>
+
+namespace Aidge {
+class Node;
+class GraphView;
+
+/**
+ * @brief Intermediate representation for Structural description.
+ */
+class OpArgs {
+private:
+    std::shared_ptr<Node> mNode = nullptr;
+    std::shared_ptr<GraphView> mView = nullptr;
+
+public:
+    OpArgs(const std::shared_ptr<GraphView>& view_)
+     : mView(view_) {assert(mView && "The GraphView provided should not be a nullptr.");}
+    
+    OpArgs(const std::shared_ptr<Node>& node_)
+     : mNode(node_) {assert(mNode && "The Node provided should not be a nullptr.");}
+
+    inline std::shared_ptr<Node> node() const noexcept {
+        return mNode;
+    }
+
+    inline std::shared_ptr<GraphView> view() const noexcept {
+        return mView;
+    }
+};
+
+
+/////////////////////////////
+// Sequential
+
+/**
+ * @brief Create a GraphView by linking every input with the next
+ * one in a sequential way. Nodes linked with the Sequential graph
+ * generation instructions must have a single output.
+ * Sequential(A, B, C) returns A-->B-->C.
+ * @param inputs List of Node and GraphView to link sequentially.
+ * @return std::shared_ptr<GraphView> Pointer to the generated view.
+ */
+std::shared_ptr<GraphView> Sequential(std::initializer_list<OpArgs> inputs);
+
+/////////////////////////////
+// Parallel
+
+/**
+ * @brief Creates a GraphView with provided Nodes without linking them.
+ * @param inputs List of Node and GraphView to link sequentially.
+ * @return std::shared_ptr<GraphView> pointer to the generated view.
+ */
+std::shared_ptr<GraphView> Parallel(std::initializer_list<OpArgs> inputs);
+
+/////////////////////////////
+// Residual
+
+/**
+ * @brief Create a GraphView by linking every input with the next
+ * one in a sequential way. Finally the first element output is used
+ * as another input for the last element. Nodes linked with the Recursive graph
+ * generation instructions must have a single output.
+ * Recursive(A, B, C) returns A-->B-->C , A-->C.
+ * @param inputs List of Node and GraphView to link sequentially.
+ * @return std::shared_ptr<GraphView> pointer to the generated view.
+ */
+std::shared_ptr<GraphView> Residual(std::initializer_list<OpArgs> inputs);
+
+}
+
+#endif /* __AIDGE_OPARGS_H__ */
\ No newline at end of file
diff --git a/aidge/_Core/include/graphmatching/GRegex.hpp b/aidge/_Core/include/graphmatching/GRegex.hpp
new file mode 100644
index 00000000..5a49bcd8
--- /dev/null
+++ b/aidge/_Core/include/graphmatching/GRegex.hpp
@@ -0,0 +1,63 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+
+#ifndef __AIDGE_GREGEX_H__
+#define __AIDGE_GREGEX_H__
+
+#include <stdexcept>    // for exception, runtime_error, out_of_range
+#include <regex>
+#include <memory>       // for shared_ptr
+#include <algorithm>    // for next_permutation
+
+#include "graphmatching/Utile.hpp"
+#include "graphmatching/StmFactory.hpp"
+#include "graphmatching/SeqStm.hpp"
+#include "graphmatching/NodeRegex.hpp"
+#include "graphmatching/Match.hpp"
+
+
+namespace Aidge{
+
+class GRegex {
+// __init__(self,nodes_regex:dict,seq_regexps:list)
+
+    StmFactory mStmFab;
+    std::vector<SeqStm*> mStmInit;
+
+public:
+    GRegex(const std::map<std::string,NodeRegex*>& nodesRegex,std::vector<std::string>& seqRegexps );
+
+    std::set<NodeTmp> matchFromStartNodes(const std::vector<NodeTmp> startNodes,const std::shared_ptr<GraphView> graphToMatch);
+
+    bool walk_validation_all_stm_are_valid(const std::vector<std::vector<SeqStm*>> all_stm);
+
+    bool walk_validation_all_node_read_validate_by_one_stm(const std::vector<std::vector<SeqStm*>> all_stm);
+
+    bool walk_validation_common_nodes_same_tag_for_all_stm(const std::vector<std::vector<SeqStm*>> all_stm);
+    
+    std::set<NodeTmp> get_all_validate_nodes(const std::vector<std::vector<SeqStm*>> all_stm);
+
+    std::vector<SeqStm*> getStmInit() const {
+        return mStmInit;
+    }
+
+    StmFactory getStmFab() const {
+        return mStmFab;
+    }
+    
+    //std::set<std::pair<std::vector<NodeTmp>,std::set<NodeTmp>>> match(const std::shared_ptr<GraphView> graphToMatch);
+    Match match(const std::shared_ptr<GraphView> graphToMatch);
+
+};
+
+}
+#endif //__AIDGE_GREGEX_H__
\ No newline at end of file
diff --git a/aidge/_Core/include/graphmatching/Match.hpp b/aidge/_Core/include/graphmatching/Match.hpp
new file mode 100644
index 00000000..2651bf3a
--- /dev/null
+++ b/aidge/_Core/include/graphmatching/Match.hpp
@@ -0,0 +1,44 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_MATCH_H__
+#define __AIDGE_MATCH_H__
+
+#include <vector>
+#include <set>
+#include <iostream>
+#include <cassert>
+#include "graphmatching/Utile.hpp"
+
+
+namespace Aidge{
+
+class Match {
+
+public:
+    Match();
+
+    size_t getNbMatch();
+
+    void insert(std::vector<NodeTmp> startnodes, std::set<NodeTmp> matchnodes);
+
+    std::vector<std::vector<NodeTmp>> getStartNodes();
+
+    std::vector<std::set<NodeTmp>> getMatchNodes();
+
+protected:
+    std::vector<std::vector<NodeTmp>> mStartNodes;
+    std::vector<std::set<NodeTmp>> mMatchNodes;
+
+};
+
+}
+#endif //__AIDGE_MATCH_H__
\ No newline at end of file
diff --git a/aidge/_Core/include/graphmatching/NodeRegex.hpp b/aidge/_Core/include/graphmatching/NodeRegex.hpp
new file mode 100644
index 00000000..24a8ed22
--- /dev/null
+++ b/aidge/_Core/include/graphmatching/NodeRegex.hpp
@@ -0,0 +1,41 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_NODEREGEX_H__
+#define __AIDGE_NODEREGEX_H__
+#include <cstdlib>
+#include <iostream>
+#include <cstring>
+#include "graph/Node.hpp"
+
+
+namespace Aidge {
+
+class NodeRegex
+{
+    public:
+    std::string mCondition;
+
+    NodeRegex(const std::string c){
+        mCondition = c;
+    };
+    
+    // Version 1 - Only test the type of the node (no need for a lexer)
+    // Input : Node_op
+    // Output : bool
+    // return mCondition == Node_op.type
+    bool _is(std::shared_ptr<Node> &Node_op);
+    bool isA(std::string NodeType);
+};
+
+}
+
+#endif /* ___AIDGE_NODEREGEX_H___ */
\ No newline at end of file
diff --git a/aidge/_Core/include/graphmatching/SeqStm.hpp b/aidge/_Core/include/graphmatching/SeqStm.hpp
new file mode 100755
index 00000000..0abcc3d0
--- /dev/null
+++ b/aidge/_Core/include/graphmatching/SeqStm.hpp
@@ -0,0 +1,127 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_SEQSTM_H__
+#define __AIDGE_SEQSTM_H__
+
+#include <iostream>
+#include <map>
+#include <regex>
+#include <set>
+#include <stdexcept> // for exception, runtime_error, out_of_range
+#include <string>
+#include <utility>
+#include <vector>
+
+
+#include "graphmatching/NodeRegex.hpp"
+#include "graphmatching/Utile.hpp"
+
+
+namespace Aidge {
+
+class SeqStm {
+
+private:
+  const int mStmIdx;
+  const std::vector<std::vector<int>> mTransitionMatrix;
+  // str key of type like 'A' that ce use in the A->B .. extpr
+  const std::map<std::string, NodeRegex *> mNodesRegex;
+  // mTypeToIdxTransition.first = std::pair node_type , common_tag
+  // mTypeToIdxTransition.segond = idx in trans matrix
+  const std::map<NodeTypeKey, int> mTypeToIdxTransition;
+
+  int mActSt;
+  std::set<NodeTmp> mAllNodeValidated;
+  std::set<NodeTmp> mAllNodeTested;
+  std::set<std::pair<NodeTmp, std::string>> mAllCommonNode;
+  bool mStmIsValid;
+
+  std::pair<NodeRegex *, std::string> getNodeRegexAndCommonAt(int idxType);
+
+  /**
+   * @brief test the stm on a type
+   * @return the common tag
+   */
+  std::string transitionOnNodeType(NodeType nodeType);
+
+public:
+  SeqStm(const int mStmIdx,
+         const std::vector<std::vector<int>> &mTransitionMatrix,
+         const std::map<std::string, NodeRegex *> &mNodesRegex,
+         const std::map<NodeTypeKey, int> &mTypeToIdxTransition, int mActSt,
+         std::set<NodeTmp> mAllNodeValidated, std::set<NodeTmp> mAllNodeTested,
+         std::set<std::pair<NodeTmp, std::string>> mAllCommonNode,
+         bool mStmIsValid);
+
+  //////////////////////////////////////
+  // STM test
+  /////////////////////////////////////
+
+  /**
+   * @brief get if a st is a valide one
+   * @return bool
+   */
+  bool isAValidSt(int st) {
+    std::size_t size = mTransitionMatrix.size();
+    return st == static_cast<int>(size - 1) ? true : false;
+  }
+
+  /**
+   * @brief true if the stm is blocked into st
+   * @return bool
+   */
+  bool isStmBlocked() { return mActSt == -1 ? true : false; }
+
+  /**
+   * @brief true if the stm into valide st
+   * @return bool
+   */
+  bool isValid() { return mStmIsValid; }
+
+  /////////////////////////////////////
+  // utile
+  /////////////////////////////////////
+  /**
+   * @brief extract from a node is type
+   * @return bool
+   */
+  NodeType getTheNodeType(NodeTmp node);
+
+  void drawStm();
+  /////////////////////////////////////
+  // geter
+  /////////////////////////////////////
+
+  std::set<std::pair<NodeTmp, std::string>> getAllCommonNode() {
+    return mAllCommonNode;
+  }
+  std::set<NodeTmp> getAllNodeTested() { return mAllNodeTested; }
+
+  std::set<NodeTmp> getAllNodeValidated() { return mAllNodeValidated; }
+
+  SeqStm *duplicateStm();
+
+  int getStmIdx() { return mStmIdx; }
+
+  int getState() { return mActSt; }
+  //////////////////////////////////////////
+  // USE
+  //////////////////////////////////////////
+  /**
+   * @brief test the stm on a node
+   * @return  pair new stm state, the common tag
+   */
+  std::pair<int, std::string> testNode(const NodeTmp node);
+};
+} // namespace Aidge
+
+#endif /* __AIDGE_SEQSTM_H__ */
\ No newline at end of file
diff --git a/aidge/_Core/include/graphmatching/StmFactory.hpp b/aidge/_Core/include/graphmatching/StmFactory.hpp
new file mode 100644
index 00000000..2e5e8451
--- /dev/null
+++ b/aidge/_Core/include/graphmatching/StmFactory.hpp
@@ -0,0 +1,55 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_STMFACTORY_H__
+#define __AIDGE_STMFACTORY_H__
+
+#include <map>
+#include <utility>
+#include <set>
+#include <string>
+#include <vector>
+#include <iostream>
+#include <stdexcept>   // for exception, runtime_error, out_of_range
+#include <regex>
+
+#include "graphmatching/NodeRegex.hpp"
+#include "graphmatching/SeqStm.hpp"
+#include "graphmatching/Utile.hpp"
+
+namespace Aidge{
+
+
+
+class StmFactory {
+
+    const std::map<std::string,NodeRegex*>& mNodesRegex;
+    std::size_t mCmptStm = 0;
+public:
+    StmFactory(const std::map<std::string,NodeRegex*>& nodesRegex);
+    //StmFactory(){};
+
+    SeqStm* makeNewStm(const std::string& sequRegex);
+    SeqStm* duplicateStm(SeqStm* stm);
+
+    std::size_t getNumberOfStm(){
+        return mCmptStm;
+    }
+private:
+
+    ParsingReturn initParsingSequRegex(const std::string& sequRegex);
+
+    std::vector<std::vector<int>> initTransitionMatrix(ParsingReturn& parsing);
+
+};
+}
+
+#endif //__AIDGE_STMFACTORY_H__
\ No newline at end of file
diff --git a/aidge/_Core/include/graphmatching/Utile.hpp b/aidge/_Core/include/graphmatching/Utile.hpp
new file mode 100644
index 00000000..251eafd8
--- /dev/null
+++ b/aidge/_Core/include/graphmatching/Utile.hpp
@@ -0,0 +1,50 @@
+
+/**
+ * @file
+ * @brief
+ * @version file 1.0.0
+ * @author vl241552
+ * @copyright
+ *  Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory.
+ * All rights reserved.
+ */
+
+#ifndef _utile_H_
+#define _utile_H_
+
+#include <map>
+
+#include "graph/Node.hpp"
+#include <map>
+
+namespace Aidge {
+
+using NodeTmp = std::shared_ptr<Node>;
+using NodeType = std::string;
+using CommonTag = std::string;
+using NodeTypeKey = std::pair<NodeType, CommonTag>;
+
+// type def
+// struct NodeTypeKey {
+//     NodeType nodeType;
+//     std::string commonTag;
+
+//     // for map find
+//     bool operator<(const NodeTypeKey& other) const {
+//         if (nodeType != other.nodeType or commonTag != other.commonTag) {
+//             return false;
+//         } else {
+//             return true;
+//         }
+//     }
+
+// };
+
+struct ParsingReturn {
+  std::map<NodeTypeKey, int> typeToIdxTransition;
+  std::vector<std::pair<NodeTypeKey, std::string>> transition;
+};
+
+} // namespace Aidge
+
+#endif //_utile_H_
\ No newline at end of file
diff --git a/aidge/_Core/include/operator/Add.hpp b/aidge/_Core/include/operator/Add.hpp
new file mode 100644
index 00000000..62ef492a
--- /dev/null
+++ b/aidge/_Core/include/operator/Add.hpp
@@ -0,0 +1,132 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_ADD_H__
+#define __AIDGE_ADD_H__
+
+#include <numeric>
+#include <vector>
+#include <cmath>
+
+#include "utils/Parameter.hpp"
+#include "utils/Registrar.hpp"
+#include "operator/Operator.hpp"
+#include "operator/Producer.hpp"
+#include "data/Tensor.hpp"
+#include "graph/Node.hpp"
+
+namespace Aidge {
+
+template <std::size_t NUM>
+class Add_Op : public Operator,
+    public Registrable<std::string, std::unique_ptr<OperatorImpl>(const Add_Op<NUM>&)> {
+public:
+    // FIXME: change accessibility
+    std::array<std::shared_ptr<Tensor>, NUM> mInputs;
+    std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "Add";
+
+    constexpr Add_Op()
+            : Operator(Type),
+            mOutput(std::make_shared<Tensor>())
+    {
+        assert(NUM > 0 && "Add should have at least one input");
+        for (std::size_t i = 0; i<NUM; ++i) {
+            mInputs[i] = std::make_shared<Tensor>();
+        }
+        setDatatype(DataType::Float32);
+    }
+
+    // Data operator[](const char* inputName) override final {
+    //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
+    //         (strcmp(inputName, "weight") ? mInputs[1] :
+    //         (strcmp(inputName, "bias") ? mInputs[2] :
+    //         nullptr));
+    //     assert((in!=nullptr) && "No such parameter");
+    //     return *in;
+    // }
+
+    constexpr void setInput(IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+
+        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    constexpr void computeOutputDims() override final {
+        if (!mInputs[0]->empty()) {
+            const auto expectedDims =  mInputs[0]->dims();
+            std::size_t nonEmptyInputTensor = 1;
+            for (; nonEmptyInputTensor<NUM && (!mInputs[nonEmptyInputTensor]->empty()); ++nonEmptyInputTensor) {
+                assert(expectedDims == mInputs[nonEmptyInputTensor]->dims());
+            }
+            if (nonEmptyInputTensor == NUM) {
+                mOutput->resize(expectedDims);
+            }
+        }
+    }
+
+    bool outputDimsForwarded() const override final {
+        std::size_t forwarded = 0;
+        for (; forwarded < NUM && (!mInputs[forwarded]->empty()); ++forwarded) {}
+        return ((forwarded==NUM) && !(mOutput->empty()));
+    }
+
+    // void checkDims() const override final {
+    //     assert(outputDimsForwarded());
+    //     for (const auto& in : mInputs) {
+    //         assert(in->dims() == mOutput->dims());
+    //     }
+    // }
+
+    std::shared_ptr<Data> getInput(IOIndex_t inputIdx) const override final {
+        assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
+        return mInputs[inputIdx];
+    }
+
+    std::shared_ptr<Data> getOutput(IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        return mOutput;
+    }
+
+    void setBackend(const std::string& name) {
+        mImpl = Registrar<Add_Op<NUM>>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        for (std::size_t i = 0; i < NUM; ++i) {
+            mInputs[i]->setBackend(name);
+        }
+    }
+
+    void setDatatype(const DataType& datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        for (std::size_t i = 0; i < NUM; ++i) {
+            mInputs[i]->setDatatype(datatype);
+        }
+    }
+
+    IONb_t nbInputs() const override final { return NUM; }
+    IONb_t nbDataInputs() const override final { return NUM; }
+    IONb_t nbOutputs() const override final { return 1; }
+};
+
+template <std::size_t NUM>
+inline std::shared_ptr<Node> Add(const char* name = nullptr) {
+    return std::make_shared<Node>(std::make_shared<Add_Op<NUM>>(), name);
+}
+}
+
+#endif /* __AIDGE_ADD_H__ */
diff --git a/aidge/_Core/include/operator/Conv.hpp b/aidge/_Core/include/operator/Conv.hpp
new file mode 100644
index 00000000..c925945c
--- /dev/null
+++ b/aidge/_Core/include/operator/Conv.hpp
@@ -0,0 +1,164 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CONV_H__
+#define __AIDGE_CONV_H__
+
+#include <array>
+#include <cmath>
+#include <numeric>
+#include <vector>
+
+#include "data/Tensor.hpp"
+#include "graph/Node.hpp"
+#include "operator/Operator.hpp"
+#include "operator/Producer.hpp"
+#include "utils/Parameter.hpp"
+#include "utils/Registrar.hpp"
+#include "utils/Types.h"
+
+namespace Aidge {
+enum class ConvParam { StrideDims, DilationDims, InChannels, OutChannels, KernelDims };
+
+template <DimIdx_t DIM>
+class Conv_Op : public Operator,
+                public Registrable<std::string, std::unique_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
+                public Parameterizable<ConvParam, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t,
+                                       DimSize_t, std::array<DimSize_t, DIM>> {
+   public:
+    // FIXME: change accessibility
+    std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
+                                                      std::make_shared<Tensor>()};
+    std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+   public:
+    static constexpr const char *Type = "Conv";
+
+    Conv_Op() = delete;
+
+    using Parameterizable_ = Parameterizable<ConvParam, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
+                                             DimSize_t, DimSize_t, std::array<DimSize_t, DIM>>;
+    template <ConvParam e>
+    using param = typename Parameterizable_::template param<e>;
+
+    constexpr Conv_Op(DimSize_t in_channels, DimSize_t out_channels, const std::array<DimSize_t, DIM> &kernel_dims)
+        : Operator(Type),
+          Parameterizable_(param<ConvParam::StrideDims>({}), param<ConvParam::DilationDims>({}),
+                           param<ConvParam::InChannels>(in_channels), param<ConvParam::OutChannels>(out_channels),
+                           param<ConvParam::KernelDims>(kernel_dims)),
+          mOutput(std::make_shared<Tensor>()) {
+        setDatatype(DataType::Float32);
+        this->template get<ConvParam::StrideDims>().fill(1);
+        this->template get<ConvParam::DilationDims>().fill(1);
+    }
+
+    // Data operator[](const char* inputName) override final {
+    //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
+    //         (strcmp(inputName, "weight") ? mInputs[1] :
+    //         (strcmp(inputName, "bias") ? mInputs[2] :
+    //         nullptr));
+    //     assert((in!=nullptr) && "No such parameter");
+    //     return *in;
+    // }
+
+    constexpr void setInput(IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 3 && "operators supports only 3 inputs");
+        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
+
+        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    constexpr void computeOutputDims() override final {
+        if (!mInputs[0]->empty()) {
+            std::array<DimSize_t, DIM + 2> outputDims = {};
+
+            for (std::size_t dim = 0; dim < this->template get<ConvParam::KernelDims>().size() ; ++dim) {
+                const DimSize_t kernelExtent = this->template get<ConvParam::DilationDims>()[dim] *
+                                                       (this->template get<ConvParam::KernelDims>()[dim] - 1) +
+                                               1;
+
+                outputDims[dim+2] = static_cast<DimSize_t>(
+                        floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent +
+                                                 this->template get<ConvParam::StrideDims>()[dim]) /
+                              static_cast<float>(this->template get<ConvParam::StrideDims>()[dim])));
+            }
+
+            outputDims[1] = this->template get<ConvParam::OutChannels>();
+            outputDims[0] = mInputs[0]->dims()[0];
+            mOutput->resize(outputDims);
+        }
+    }
+
+    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
+
+    std::shared_ptr<Data> getInput(IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 3 && "operators supports only 3 inputs");
+        return mInputs[inputIdx];
+    }
+
+    std::shared_ptr<Data> getOutput(IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        return mOutput;
+    }
+
+    void setBackend(const std::string &name) {
+        mImpl = Registrar<Conv_Op<DIM>>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInputs[1]->setBackend(name);
+        mInputs[2]->setBackend(name);
+    }
+
+    void setDatatype(const DataType &datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setDatatype(datatype);
+        mInputs[1]->setDatatype(datatype);
+        mInputs[2]->setDatatype(datatype);
+    }
+
+    IONb_t nbInputs() const override final { return 3; }
+    IONb_t nbDataInputs() const override final { return 1; }
+    IONb_t nbOutputs() const override final { return 1; }
+};
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> Conv(DimSize_t in_channels, DimSize_t out_channels,
+                                  const std::array<DimSize_t, DIM> &kernel_dims, const char *name = nullptr) {
+    // FIXME: properly handle default w&b initialization in every cases
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims), name);
+    // addProducer(conv, 1, append(append(kernel_dims, in_channels), out_channels), "w");
+    addProducer(conv, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
+    addProducer(conv, 2, {out_channels}, "b");
+    return conv;
+}
+
+template <DimSize_t DIM>
+inline std::shared_ptr<Node> Conv(
+    DimSize_t in_channels,
+    DimSize_t out_channels,
+    DimSize_t const (&kernel_dims)[DIM],
+    const char *name = nullptr) {
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
+    return Conv(in_channels, out_channels, to_array(kernel_dims), name);
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::ConvParam>::data[] = {"StrideDims", "DilationDims", "InChannels", "OutChannels",
+                                                          "KernelDims"};
+}
+
+#endif /* __AIDGE_CONV_H__ */
diff --git a/aidge/_Core/include/operator/FC.hpp b/aidge/_Core/include/operator/FC.hpp
new file mode 100644
index 00000000..a44affcd
--- /dev/null
+++ b/aidge/_Core/include/operator/FC.hpp
@@ -0,0 +1,137 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_FC_H__
+#define __AIDGE_FC_H__
+
+#include <array>
+#include <cmath>
+#include <numeric>
+#include <memory>
+#include <vector>
+
+#include "utils/Types.h"
+#include "data/Tensor.hpp"
+#include "graph/Node.hpp"
+#include "operator/Operator.hpp"
+#include "operator/Producer.hpp"
+#include "utils/Parameter.hpp"
+#include "utils/Registrar.hpp"
+
+namespace Aidge {
+enum class FCParam { OutChannels, NoBias };
+
+class FC_Op : public Operator,
+              public Registrable<std::string,
+                                 std::unique_ptr<OperatorImpl>(const FC_Op &)>,
+              public Parameterizable<FCParam, DimSize_t, bool> {
+public:
+    // FIXME: change accessibility
+    std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(), std::make_shared<Tensor>()};
+    std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "FC";
+
+    FC_Op() = delete;
+
+    using Parameterizable_ = Parameterizable<FCParam, DimSize_t, bool>;
+    template <FCParam e> using param = typename Parameterizable_::template param<e>;
+
+    FC_Op(DimSize_t out_channels, bool noBias)
+            : Operator(Type),
+            Parameterizable_(
+                param<FCParam::OutChannels>(out_channels),
+                param<FCParam::NoBias>(noBias)),
+            mOutput(std::make_shared<Tensor>())
+    {
+        setDatatype(DataType::Float32);
+    }
+
+    void setInput(IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 3 && "operators supports only 3 inputs");
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+        if (inputIdx == 2) {
+            assert(std::dynamic_pointer_cast<Tensor>(data)->size() == ((this->template get<FCParam::NoBias>()) == false ? static_cast<std::size_t>(this->template get<FCParam::OutChannels>()) : 0));
+            assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1);
+        }
+        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+        if (inputIdx == 0 && mInputs[0]->nbDims() == 1)
+            mInputs[inputIdx]->resize(std::array<DimSize_t, 2>({1, mInputs[inputIdx]->size()}));
+    }
+
+    void computeOutputDims() override final {
+        if (!mInputs[0]->empty()) {
+            // <in_features**, out_channels>
+            std::array<DimSize_t, 2> weightDims = {this->template get<FCParam::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
+            // <out_channels, batch>
+            std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template get<FCParam::OutChannels>()};
+            
+            mInputs[1]->resize(weightDims);
+            mOutput->resize(outputDims);
+        }
+    }
+
+    bool outputDimsForwarded() const override final {
+        return !(mOutput->empty());
+    }
+
+    std::shared_ptr<Data> getInput(IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 3 && "operators supports only 3 inputs");
+        return mInputs[inputIdx];
+    }
+
+    std::shared_ptr<Data> getOutput(IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        return mOutput;
+    }
+
+    void setBackend(const std::string& name) {
+        mImpl = Registrar<FC_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setBackend(name);
+        mInputs[1]->setBackend(name);
+        mInputs[2]->setBackend(name);
+    }
+
+    void setDatatype(const DataType& datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setDatatype(datatype);
+        mInputs[1]->setDatatype(datatype);
+        mInputs[2]->setDatatype(datatype);
+    }
+
+
+    IONb_t nbInputs() const override final { return 3; }
+    IONb_t nbDataInputs() const override final { return 1; }
+    IONb_t nbOutputs() const override final { return 1; }
+};
+
+inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const char* name = nullptr) {
+    // FIXME: properly handle default w&b initialization in every cases
+    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(out_channels, noBias), name);
+    addProducer(fc, 1, {out_channels, 1}, "w");
+    addProducer(fc, 2, {(noBias ? 0 : out_channels)}, "b"); // already sets bias dims
+    return fc;
+}
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::FCParam>::data[] = {"OutChannels",
+                                                        "NoBias"};
+}
+
+#endif /* __AIDGE_FC_H__ */
\ No newline at end of file
diff --git a/aidge/_Core/include/operator/GenericOperator.hpp b/aidge/_Core/include/operator/GenericOperator.hpp
new file mode 100644
index 00000000..fcfd71a5
--- /dev/null
+++ b/aidge/_Core/include/operator/GenericOperator.hpp
@@ -0,0 +1,122 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_GENERIC_OPERATOR_H__
+#define __AIDGE_GENERIC_OPERATOR_H__
+
+#include <iostream>
+
+#include "graph/Node.hpp"
+#include "operator/Operator.hpp"
+#include "utils/CParameter.hpp"
+#include "utils/Registrar.hpp"
+#include "utils/Types.h"
+
+namespace Aidge {
+class GenericOperator_Op
+    : public Operator,
+      public Registrable<std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)> {
+   private:
+    CParameter mParams;
+    IONb_t mNbDataIn;
+    IONb_t mNbIn;
+    IONb_t mNbOut;
+
+   public:
+    GenericOperator_Op(const char *type, IONb_t nbDataIn, IONb_t nbIn, IONb_t nbOut)
+        : Operator(type), mNbDataIn(nbDataIn), mNbIn(nbIn), mNbOut(nbOut) {
+        // ctor
+    }
+
+    /**
+     * @brief Get the Parameter object identified by its name.
+     * @tparam T expected parameter type.
+     * @param key Parameter name.
+     * @details assert if T is not the actual parameter type, if the parameter
+     * does not exist or internal parameter position is invalid.
+     * @todo Returning a T const& ? But dangerous => may get an address within
+     * param buffer that will get invalid after the CParam death.
+     * @note at() throws if the parameter does not exist, using find to test
+     * for parameter existance
+     * @return template<class T> The parameter.
+     */
+    template <class T>
+    T getParameter(std::string const &key) const {
+        return mParams.Get<T>(key);
+    }
+
+    ///\brief Add a parameter value, identified by its name
+    ///\tparam T expected parameter type
+    ///\param i_ParamName Parameter name
+    ///\param i_Value Parameter value
+    ///\todo Pass i_Value by ref if large or not trivial
+    ///\bug If parameter already exists, its value is changed but written in the
+    /// internal buffer in a new location (previous value is still in memory at
+    /// its previous location)
+    template <class T>
+    void addParameter(std::string const &key, T const &value) {
+        mParams.Add<T>(key, value);
+    }
+
+    std::string getParameterType(std::string const &key) { return mParams.getParamType(key); }
+
+    std::vector<std::string> getParametersName() { return mParams.getParametersName(); }
+
+    // Override Virtual Opertor methods
+    void setInput(IOIndex_t /*inputIdx*/, std::shared_ptr<Data> /*data*/) override final {
+        printf("Not available yet.\n");
+    }
+
+    void computeOutputDims() override final { printf("Not available yet"); }
+
+    bool outputDimsForwarded() const override final {
+        assert(false && "GenericOperator cannot forward dims");
+        return false;
+    }
+
+    std::shared_ptr<Data> getInput(IOIndex_t /*inputIdx*/) const override final {
+        printf("Not available yet.\n");
+        return nullptr;
+    }
+
+    std::shared_ptr<Data> getOutput(IOIndex_t /*outputIdx*/) const override final {
+        printf("Not available yet.\n");
+        return nullptr;
+    }
+    ~GenericOperator_Op() = default;
+
+    void setBackend(const std::string & /*name*/) { printf("Not available yet.\n"); }
+    void setDatatype(const DataType & /*datatype*/) { printf("Not available yet.\n"); }
+    void forward() override final { printf("Not available yet.\n"); }
+    void backward() override final { printf("Not available yet.\n"); }
+
+    IONb_t nbInputs() const override final { return mNbIn; };
+    IONb_t nbDataInputs() const override final { return mNbDataIn; };
+    IONb_t nbOutputs() const override final { return mNbOut; };
+};
+
+/**
+ * @brief Fictive custom operator not associated with any implementation.
+ * Allows to import unknown operators and simulate new ones.
+ * @param type Type of the fictive operator.
+ * @param nbDataIn Number of input data.
+ * @param nbIn Number input data + number of learnt parameters.
+ * @param nbOut Number of output data.
+ * @param name (optional) name of the Operator.
+ * @return std::shared_ptr<Node> Node associated with the Generic Operator.
+ */
+inline std::shared_ptr<Node> GenericOperator(const char *type, IONb_t nbDataIn, IONb_t nbIn, IONb_t nbOut,
+                                             const char *name = nullptr) {
+    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbDataIn, nbIn, nbOut), name);
+}
+}  // namespace Aidge
+
+#endif /* __AIDGE_GENERIC_OPERATOR_H__ */
diff --git a/aidge/_Core/include/operator/Matmul.hpp b/aidge/_Core/include/operator/Matmul.hpp
new file mode 100644
index 00000000..d009fa73
--- /dev/null
+++ b/aidge/_Core/include/operator/Matmul.hpp
@@ -0,0 +1,125 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_MATMUL_H__
+#define __AIDGE_MATMUL_H__
+
+#include <array>
+#include <cmath>
+#include <numeric>
+#include <memory>
+#include <vector>
+
+#include "utils/Types.h"
+#include "data/Tensor.hpp"
+#include "graph/Node.hpp"
+#include "operator/Operator.hpp"
+#include "operator/Producer.hpp"
+#include "utils/Parameter.hpp"
+#include "utils/Registrar.hpp"
+
+namespace Aidge {
+enum class MatmulParam { OutChannels };
+
+class Matmul_Op : public Operator,
+              public Registrable<std::string,
+                                 std::unique_ptr<OperatorImpl>(const Matmul_Op &)>,
+              public Parameterizable<MatmulParam, DimSize_t> {
+public:
+    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
+    std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "Matmul";
+
+    Matmul_Op() = delete;
+
+    using Parameterizable_ = Parameterizable<MatmulParam, DimSize_t>;
+    template <MatmulParam e> using param = typename Parameterizable_::template param<e>;
+
+    Matmul_Op(DimSize_t out_channels)
+            : Operator(Type),
+            Parameterizable_(
+                param<MatmulParam::OutChannels>(out_channels)),
+            mOutput(std::make_shared<Tensor>())
+    {
+        setDatatype(DataType::Float32);
+    }
+
+    void setInput(IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 2 && "operators supports only 2 inputs");
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        if (!mInputs[0]->empty()) {
+            // <in_features**, out_channels>
+            std::array<DimSize_t, 2> weightDims = {static_cast<DimSize_t>(mInputs[0]->size()), this->template get<MatmulParam::OutChannels>()};
+            // <out_channels, batch>
+            std::array<DimSize_t, 1> outputDims = {this->template get<MatmulParam::OutChannels>()};
+            
+            mInputs[1]->resize(weightDims);
+            mOutput->resize(outputDims);
+        }
+    }
+
+    bool outputDimsForwarded() const override final {
+        return !(mOutput->empty());
+    }
+
+    std::shared_ptr<Data> getInput(IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 2 && "operators supports only 2 inputs");
+        return mInputs[inputIdx];
+    }
+
+    std::shared_ptr<Data> getOutput(IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        return mOutput;
+    }
+
+    void setBackend(const std::string& name) {
+        mImpl = Registrar<Matmul_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setBackend(name);
+        mInputs[1]->setBackend(name);
+    }
+
+    void setDatatype(const DataType& datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setDatatype(datatype);
+        mInputs[1]->setDatatype(datatype);
+    }
+
+
+    IONb_t nbInputs() const override final { return 2; }
+    IONb_t nbDataInputs() const override final { return 1; }
+    IONb_t nbOutputs() const override final { return 1; }
+};
+
+inline std::shared_ptr<Node> Matmul(DimSize_t out_channels, const char* name = nullptr) {
+    // FIXME: properly handle default w&b initialization in every cases
+    auto matmul = std::make_shared<Node>(std::make_shared<Matmul_Op>(out_channels), name);
+    addProducer(matmul, 1, {1, out_channels}, "w");
+    return matmul;
+}
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::MatmulParam>::data[] = {"OutChannels"};
+}
+
+#endif /* __AIDGE_MATMUL_H__ */
diff --git a/aidge/_Core/include/operator/MetaOperator.hpp b/aidge/_Core/include/operator/MetaOperator.hpp
new file mode 100644
index 00000000..e2ff8781
--- /dev/null
+++ b/aidge/_Core/include/operator/MetaOperator.hpp
@@ -0,0 +1,31 @@
+///\file MetaOperator.h
+///\brief MetaOperator __insert lib module brief description__
+///\version file 1.0.0
+///\date Creation 09 February 2023
+///\date 09 February 2023
+///\par ChangeLog
+///\par
+/// v1.0.0, 09 February 2023<br>
+/// - Initial version.
+///\author mn271187
+///\copyright
+/// Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
+/// rights reserved.
+
+#ifndef MetaOperator_H_
+#define MetaOperator_H_
+
+#include "operator/Operator.hpp"
+
+namespace Aidge {
+class MetaOperator : public Operator {
+public:
+    MetaOperator()
+        : Operator("MetaOp")
+    {
+    }
+    ~MetaOperator() = default;
+};
+}
+
+#endif /* MetaOperator_H_ */
diff --git a/aidge/_Core/include/operator/Operator.hpp b/aidge/_Core/include/operator/Operator.hpp
new file mode 100644
index 00000000..1dcaf9b5
--- /dev/null
+++ b/aidge/_Core/include/operator/Operator.hpp
@@ -0,0 +1,95 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_OPERATOR_H__
+#define __AIDGE_OPERATOR_H__
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "backend/OperatorImpl.hpp"
+#include "data/Data.hpp"
+#include "data/Tensor.hpp"
+#include "utils/Types.h"
+
+namespace Aidge {
+
+class Operator : public std::enable_shared_from_this<Operator> {
+protected:
+    std::unique_ptr<OperatorImpl> mImpl; // implementation of the operator
+
+private:
+    std::string mType;
+
+public:
+    Operator() = delete;
+    Operator(const char* type) : mType(type) {}
+    virtual ~Operator();
+
+
+public:
+
+    virtual void setInput(IOIndex_t inputIdx, std::shared_ptr<Data> data) = 0;
+    virtual void computeOutputDims() = 0;
+    virtual bool outputDimsForwarded() const = 0;
+    virtual std::shared_ptr<Data> getInput(IOIndex_t inputIdx) const = 0;
+    virtual std::shared_ptr<Data> getOutput(IOIndex_t outputIdx) const = 0;
+
+///////////////////////////////////////////////////////
+//        IMPLEMENTATION
+///////////////////////////////////////////////////////
+
+    virtual void setBackend(const std::string& name) = 0;
+    virtual void setDatatype(const DataType& datatype) = 0;
+
+    /**
+     * @brief Minimum amount of data from a specific input for one computation pass.
+     * @param inputIdx Index of the input analysed.
+     * @return NbElts_t 
+     */
+    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const;
+
+    /**
+     * @brief Amount of data from a specific input actually used in one computation pass.
+     * 
+     * @param inputIdx Index of the input analysed.
+     * @return NbElts_t 
+     */
+    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const;
+
+    /**
+     * @brief Amount of data ready to be used on a specific output.
+     * 
+     * @param outputIdx Index of the output analysed.
+     * @return NbElts_t 
+     */
+    NbElts_t getNbProducedData(IOIndex_t outputIdx) const;
+
+    virtual void forward();
+
+    virtual void backward();
+
+///////////////////////////////////////////////////////
+//        INNER
+///////////////////////////////////////////////////////
+
+    std::string type() const {
+        return mType;
+    }
+
+    virtual IONb_t nbInputs() const = 0;
+    virtual IONb_t nbDataInputs() const = 0;
+    virtual IONb_t nbOutputs() const = 0;
+};
+} // namespace Aidge
+
+#endif /* __AIDGE_OPERATOR_H__ */
diff --git a/aidge/_Core/include/operator/Producer.hpp b/aidge/_Core/include/operator/Producer.hpp
new file mode 100644
index 00000000..e81c35f1
--- /dev/null
+++ b/aidge/_Core/include/operator/Producer.hpp
@@ -0,0 +1,129 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_PRODUCER_H__
+#define __AIDGE_PRODUCER_H__
+
+#include <array>
+#include <vector>
+
+#include "utils/Types.h"
+#include "data/Tensor.hpp"
+#include "graph/Node.hpp"
+#include "operator/Operator.hpp"
+#include "utils/Parameter.hpp"
+#include "utils/Registrar.hpp"
+
+namespace Aidge {
+enum class ProducerParam { Dims };
+
+template <std::size_t DIM>
+class Producer_Op
+    : public Operator,
+      public Registrable<std::string, std::unique_ptr<OperatorImpl>(
+                                          const Producer_Op<DIM> &)>,
+      public Parameterizable<ProducerParam, std::array<DimSize_t, DIM>> {
+private:
+    std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "Producer";
+
+    using Parameterizable_ = Parameterizable<ProducerParam, std::array<DimSize_t, DIM>>;
+    template <ProducerParam e> using param = typename Parameterizable_::template param<e>;
+
+    Producer_Op(const std::array<DimSize_t, DIM>& dims)
+        : Operator(Type),
+          Parameterizable_(
+              param<ProducerParam::Dims>(dims)),
+          mOutput(std::make_shared<Tensor>())
+    {
+        //ctor
+        setDatatype(DataType::Float32);
+        mOutput->resize(this->template get<ProducerParam::Dims>());
+    }
+
+    void setInput(IOIndex_t /*inputIdx*/, std::shared_ptr<Data> /*data*/) override final {
+        assert(false && "Producer operator takes no input");
+    }
+
+    constexpr void computeOutputDims() override final {}
+
+    constexpr bool outputDimsForwarded() const override final {return true;}
+
+    std::shared_ptr<Data> getInput(IOIndex_t /*inputIdx*/) const override final {
+        assert(false && "Producer operator takes no input");
+        return nullptr;
+    }
+
+    std::shared_ptr<Data> getOutput(IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        return mOutput;
+    }
+
+    void setOutput(std::shared_ptr<Data> outputData) {
+        mOutput = std::static_pointer_cast<Aidge::Tensor>(outputData);
+    }
+
+    void setBackend(const std::string& name) {
+        mImpl = Registrar<Producer_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+    }
+    void setDatatype(const DataType& datatype) {
+        mOutput->setDatatype(datatype);
+    }
+
+    IONb_t nbInputs() const override final { return 0; };
+    IONb_t nbDataInputs() const override final { return 0; };
+    IONb_t nbOutputs() const override final { return 1; };
+
+public:
+  void forward() override final {
+    printf("Basic Producer forward() function.\n");
+  }
+  void backward() override final {
+    printf("Basic Producer backward() function.\n");
+  }
+};
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const char *name = nullptr) {
+  static_assert(DIM<=MaxDim,"Too many tensor dimensions required by Producer, not supported");
+  return std::make_shared<Node>(std::make_shared<Producer_Op<DIM>>(dims), name);
+}
+
+template <std::size_t DIM>
+inline std::shared_ptr<Node> Producer(DimSize_t const (&dims)[DIM], const char *name = nullptr) {
+  return Producer(to_array(dims), name);
+}
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, const std::array<DimSize_t, DIM>& dims, const char* extension) {
+    assert(inputIdx != gk_IODefaultIndex);
+    static_assert(DIM<=MaxDim,"Too many tensor dimensions required by addProducer, not supported");
+    const char* prodName = otherNode->name().empty() ? nullptr : (otherNode->name() + std::string("_") + std::string(extension)).c_str();
+    auto prod = Producer(dims, prodName);
+    prod->addChild(otherNode, 0, inputIdx);
+    otherNode->getOperator()->setInput(inputIdx, std::static_pointer_cast<Producer_Op<DIM>>(prod->getOperator())->getOutput(0));
+}
+
+template <std::size_t DIM>
+void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const char* extension) {
+    addProducer(otherNode, inputIdx, to_array(dims), extension);
+}
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::ProducerParam>::data[] = {"Dims"};
+}
+
+#endif /* __AIDGE_PRODUCER_H__ */
\ No newline at end of file
diff --git a/aidge/_Core/include/operator/ReLU.hpp b/aidge/_Core/include/operator/ReLU.hpp
new file mode 100644
index 00000000..a375821d
--- /dev/null
+++ b/aidge/_Core/include/operator/ReLU.hpp
@@ -0,0 +1,115 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_RELU_H__
+#define __AIDGE_RELU_H__
+
+#include <vector>
+#include <memory>
+
+#include "utils/Parameter.hpp"
+#include "utils/Registrar.hpp"
+#include "operator/Operator.hpp"
+#include "backend/OperatorImpl.hpp"
+#include "data/Tensor.hpp"
+#include "data/Data.hpp"
+#include "graph/Node.hpp"
+#include "utils/Types.h"
+
+namespace Aidge {
+
+enum class ReLUParam {
+    Alpha
+};
+
+class ReLU_Op : public Operator,
+    public Registrable<std::string, std::unique_ptr<OperatorImpl>(const ReLU_Op&)>,
+    public Parameterizable<ReLUParam, float> {
+public:
+    // FIXME: change accessibility
+    std::array<std::shared_ptr<Tensor>, 1> mInputs = {std::make_shared<Tensor>()};
+    std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "ReLU";
+
+    ReLU_Op() = delete;
+
+    using Parameterizable_ = Parameterizable<ReLUParam, float>;
+    template <ReLUParam e> using param = typename Parameterizable_::template param<e>;
+
+    ReLU_Op(float alpha)
+            : Operator(Type),
+            Parameterizable_(
+                param<ReLUParam::Alpha>(alpha)),
+            mOutput(std::make_shared<Tensor>())
+    {
+        setDatatype(DataType::Float32);
+    }
+
+    void setInput(IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx == 0 && "operator supports only 1 input");
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        if (!mInputs[0]->empty())
+            mOutput->resize(mInputs[0]->dims());
+    }
+
+    bool outputDimsForwarded() const override final {
+        return !(mOutput->empty());
+    }
+
+    std::shared_ptr<Data> getInput(IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operator supports only 1 input");
+        return mInputs[inputIdx];
+    }
+
+    std::shared_ptr<Data> getOutput(IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        return mOutput;
+    }
+
+    void setBackend(const std::string& name) {
+        mImpl = Registrar<ReLU_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setBackend(name);
+    }
+    void setDatatype(const DataType& datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setDatatype(datatype);
+    }
+
+    IONb_t nbInputs() const override final { return 1; }
+    IONb_t nbDataInputs() const override final { return 1; }
+    IONb_t nbOutputs() const override final { return 1; }
+};
+
+inline std::shared_ptr<Node> ReLU(float alpha = 0.0f, const char* name = nullptr) {
+    // FIXME: properly handle default w&b initialization in every cases
+    return std::make_shared<Node>(std::make_shared<ReLU_Op>(alpha), name);
+}
+
+}
+
+namespace {
+template <>
+const char* const EnumStrings<Aidge::ReLUParam>::data[]
+    = {"Alpha"};
+}
+
+#endif /* __AIDGE_RELU_H__ */
diff --git a/aidge/_Core/include/scheduler/Scheduler.hpp b/aidge/_Core/include/scheduler/Scheduler.hpp
new file mode 100644
index 00000000..2abe90e1
--- /dev/null
+++ b/aidge/_Core/include/scheduler/Scheduler.hpp
@@ -0,0 +1,71 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_SCHEDULER_H__
+#define __AIDGE_SCHEDULER_H__
+
+#include <chrono>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+namespace Aidge {
+class Node;
+class GraphView;
+
+class SequentialScheduler {
+public:
+    struct SchedulingElement {
+        SchedulingElement(
+            std::shared_ptr<Node> node_,
+            std::chrono::time_point<std::chrono::high_resolution_clock> start_,
+            std::chrono::time_point<std::chrono::high_resolution_clock> end_)
+            : node(node_), start(start_), end(end_) {}
+
+        std::shared_ptr<Node> node;
+        std::chrono::time_point<std::chrono::high_resolution_clock> start;
+        std::chrono::time_point<std::chrono::high_resolution_clock> end;
+    };
+
+    SequentialScheduler(std::shared_ptr<GraphView> graphView)
+        : mGraphView(graphView)
+    {
+        // ctor
+    };
+    ~SequentialScheduler() = default;
+
+    /**
+     * @brief Run the provided Computational Graph with a batch of data
+     */
+    void forward(bool forwardDims = true, bool verbose = false);
+
+    /**
+     * @brief Save in a Markdown file the order of layers execution.
+     * @param fileName Name of the generated file.
+     */
+    void saveSchedulingDiagram(const std::string& fileName) const;
+
+private:
+    /**
+     * @brief Set of layers receiving an input from currently processing layers
+     * 
+     * @param producers Set of layers ready to run.
+     * @return std::set<std::shared_ptr<Node>> 
+     */
+    std::set<std::shared_ptr<Node>> getConsumers(const std::set<std::shared_ptr<Node>>& producers) const;
+
+    std::shared_ptr<GraphView> mGraphView;
+    std::vector<SchedulingElement> mScheduling;
+};
+} // namespace Aidge
+
+#endif /* __AIDGE_SCHEDULER_H__ */
\ No newline at end of file
diff --git a/aidge/_Core/include/utils/CParameter.hpp b/aidge/_Core/include/utils/CParameter.hpp
new file mode 100644
index 00000000..c7d0ea23
--- /dev/null
+++ b/aidge/_Core/include/utils/CParameter.hpp
@@ -0,0 +1,110 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CPARAMETER_H__
+#define __AIDGE_CPARAMETER_H__
+
+#include <assert.h>
+#include <map>
+#include <vector>
+#include <numeric>
+
+namespace Aidge {
+
+///\todo store also a fix-sized code that indicates the type
+///\todo managing complex types or excluding non-trivial, non-aggregate types
+class CParameter
+{
+public:
+    // not copyable, not movable
+    CParameter(CParameter const &) = delete;
+    CParameter(CParameter &&) = delete;
+    CParameter &operator=(CParameter const &) = delete;
+    CParameter &operator=(CParameter &&) = delete;
+    CParameter() : m_Params({}){};
+
+    /**
+     * \brief Returning a parameter identified by its name
+     * \tparam T expected parameter type
+     * \param i_ParamName Parameter name
+     * \details assert if T is not the actual parameter type, if the parameter does not
+     *  exist or interna parameter position is invalid.
+     * \todo Returning a T const& ? But dangerous => the client may get an address within
+     *  param buffer that will get invalid after the CParam death.
+     * \note at() throws if the parameter does not exist, using find to test for parameter existance
+     */
+    template<class T> T Get(std::string const &i_ParamName) const
+    {
+        assert(m_Params.find(i_ParamName) != m_Params.end());
+        assert(m_Types.find(i_ParamName) != m_Types.end());
+        assert(m_Params.at(i_ParamName) <= m_OffSet);
+        assert(typeid(T).name() == m_Types.at(i_ParamName));
+        return *reinterpret_cast<T *>(m_BeginBuffer + m_Params.at(i_ParamName));
+    }
+
+    ///\brief Add a parameter value, identified by its name
+    ///\tparam T expected parameter type
+    ///\param i_ParamName Parameter name
+    ///\param i_Value Parameter value
+    ///\todo Pass i_Value by ref if large or not trivial
+    ///\bug If parameter already exists, its value is changed but written in the
+    /// internal buffer in a new location (previous value is still in memory at its previous location)
+    template<class T> void Add(std::string const &i_ParamName, T const &i_Value)
+    {
+        m_Buffer.resize(m_Buffer.size() + (sizeof(T) / sizeof(uint8_t)));
+        m_BeginBuffer = m_Buffer.data(); // Update buffer ptr in case of memory reordering
+        *reinterpret_cast<T *>(m_BeginBuffer + m_OffSet)
+            = i_Value; // Black-magic used to add anytype into the vector
+        m_Params[i_ParamName] = m_OffSet; // Copy pointer offset
+        m_OffSet += sizeof(T); // Increment offset
+        m_Types[i_ParamName] = typeid(i_Value).name();
+    }
+    
+    std::string getParamType(std::string const &i_ParamName){
+        return m_Types[i_ParamName];
+    }
+
+    std::vector<std::string> getParametersName(){
+        std::vector<std::string> parametersName;
+        for(auto const& it: m_Params)
+            parametersName.push_back(it.first);
+        return parametersName;
+    }
+
+
+    ~CParameter() = default;
+
+private:
+    // Note for Cyril: of course storing offset and not address! Good idea
+    std::map<std::string, std::size_t> m_Params; // { Param name : offset }
+
+    ///\brief Map to check type error
+    /* Note : i tried this : `std::map<std::string, std::type_info const *> m_Types;`
+    but looks like the type_ingo object was destroyed.
+    I am not a hugde fan of storing a string and making string comparison.
+    Maybe we can use a custom enum type (or is there a standard solution ?)  
+    */
+    std::map<std::string, std::string> m_Types;
+
+    ///\brief All parameters values concatenated in raw binary form.
+    std::vector<uint8_t> m_Buffer = {};
+
+    ///\brief Starting address of the buffer
+    uint8_t *m_BeginBuffer = m_Buffer.data();
+
+    ///\brief Offset, in number of uint8_t, of the next parameter to write
+    std::size_t m_OffSet = 0;
+};
+
+}
+
+
+#endif /* __AIDGE_CPARAMETER_H__ */
diff --git a/aidge/_Core/include/utils/Parameter.hpp b/aidge/_Core/include/utils/Parameter.hpp
new file mode 100644
index 00000000..98325c61
--- /dev/null
+++ b/aidge/_Core/include/utils/Parameter.hpp
@@ -0,0 +1,155 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_PARAMETER_H__
+#define __AIDGE_PARAMETER_H__
+
+#include <tuple>
+#include <cassert>
+#include <cstddef>
+
+namespace {
+// This is the type that will hold all the strings. Each enumerate type will
+// declare its own specialization.
+template <typename T> struct EnumStrings {
+    static const char* const data[];
+};
+}
+
+namespace Aidge {
+template<class T, std::size_t N>
+constexpr std::size_t size(T (&)[N]) { return N; }
+
+template <class PARAM_ENUM, class ...T>
+class Parameterizable {
+public:
+    typedef std::tuple<T...> Parameters;
+
+    // Helper class to pass to the constructor
+    template <PARAM_ENUM paramEnum>
+    class param {
+    public:
+        constexpr param(const typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& v) : value(v) {}
+        const typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type value;
+    };
+
+/*
+    // Direct tuple initialization
+    Parameterizable(T... params) : mParams({params...}) {
+
+    }
+*/
+
+    // Constructor for parameters initialization.
+    // Compile-time garantee that every parameter is initialized.
+    template <PARAM_ENUM ...paramEnum> // non-type parameter pack
+    constexpr Parameterizable(const param<paramEnum>&&... params) {
+        // Check number of params consistency
+        static_assert(sizeof...(params) == std::tuple_size<std::tuple<T...>>::value, "wrong number of parameters in constructor");
+        static_assert(size(EnumStrings<PARAM_ENUM>::data) == std::tuple_size<std::tuple<T...>>::value, "wrong number of parameters in enum string");
+
+        // Check no duplicates
+        constexpr std::array<PARAM_ENUM, std::tuple_size<std::tuple<T...>>::value> pe = { paramEnum... };
+        static_assert(!hasDuplicates(pe), "duplicate parameter"); // requires C++14
+
+        // Init params with constructor arguments
+        const std::array<PARAM_ENUM, std::tuple_size<std::tuple<T...>>::value> p = { ((void)(get<paramEnum>() = params.value), paramEnum) ... };
+        (void)p; // avoid unused warning
+    }
+
+    // Compile-time access with enum
+    template <PARAM_ENUM paramEnum>
+    constexpr typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& get() {
+        return std::get<static_cast<std::size_t>(paramEnum)>(mParams);
+    }
+    
+    template <PARAM_ENUM paramEnum>
+    constexpr const typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& get() const {
+        return std::get<static_cast<std::size_t>(paramEnum)>(mParams);
+    }
+
+    // Runtime access with enum
+    template <typename R>
+    constexpr R& get(PARAM_ENUM paramEnum) {
+        return get<R>(static_cast<std::size_t>(paramEnum));
+    }
+
+    template <typename R>
+    constexpr const R& get(PARAM_ENUM paramEnum) const {
+        return get<R>(static_cast<std::size_t>(paramEnum));
+    }
+
+    // Runtime existance check with name
+    constexpr bool isParam(const char* name) const {
+        for (std::size_t i = 0; i < size(EnumStrings<PARAM_ENUM>::data); ++i) {
+            if (strcmp(EnumStrings<PARAM_ENUM>::data[i], name) == 0) {
+                return true;
+            }
+        }
+
+        return false;
+    }
+
+    // Runtime access with name
+    template <typename R>
+    constexpr R& get(const char* name) {
+        for (std::size_t i = 0; i < size(EnumStrings<PARAM_ENUM>::data); ++i) {
+            if (strcmp(EnumStrings<PARAM_ENUM>::data[i], name) == 0) {
+                return get<R>(i);
+            }
+        }
+
+        assert(false && "parameter not found");
+    }
+
+    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value-1>
+    constexpr typename std::enable_if<(SIZE > 0), R&>::type get(std::size_t i) {
+        if (i == SIZE) {
+            if (std::is_same<R, typename std::tuple_element<SIZE,std::tuple<T...>>::type>::value) {
+                return reinterpret_cast<R&>(std::get<SIZE>(mParams));
+            }
+            else {
+                assert(false && "wrong parameter type");
+            }
+        }
+        else {
+            return get<R, SIZE-1>(i);
+        }
+    }
+
+    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value-1>
+    constexpr typename std::enable_if<(SIZE <= 0), R&>::type get(std::size_t i) {
+        assert(false && "parameter not found");
+    }
+
+    constexpr const std::tuple<T...>& getParams() const {
+        return mParams;
+    }
+
+private:
+    template <typename V, std::size_t N>
+    static constexpr bool hasDuplicates(const std::array<V, N>& array) {
+        for (std::size_t i = 1; i < N; i++) {
+            for (std::size_t j = 0; j < i; j++) {
+                if (array[i] == array[j]) {
+                    return true;
+                }
+            }
+        }
+
+        return false;
+    }
+
+    std::tuple<T...> mParams;
+};
+}
+
+#endif /* __AIDGE_PARAMETER_H__ */
diff --git a/aidge/_Core/include/utils/Recipies.hpp b/aidge/_Core/include/utils/Recipies.hpp
new file mode 100644
index 00000000..a53e8f25
--- /dev/null
+++ b/aidge/_Core/include/utils/Recipies.hpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_RECIPIES_H__
+#define __AIDGE_RECIPIES_H__
+
+#include "graph/Node.hpp"
+#include "graph/GraphView.hpp"
+
+namespace Aidge{
+
+void fuseMulAdd(std::set<std::shared_ptr<Node>> nodes);
+
+
+
+}
+
+
+#endif /* __AIDGE_RECIPIES_H__ */
\ No newline at end of file
diff --git a/aidge/_Core/include/utils/Registrar.hpp b/aidge/_Core/include/utils/Registrar.hpp
new file mode 100644
index 00000000..86c90410
--- /dev/null
+++ b/aidge/_Core/include/utils/Registrar.hpp
@@ -0,0 +1,77 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_REGISTRAR_H__
+#define __AIDGE_REGISTRAR_H__
+
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#endif
+
+#include <iostream>
+#include <functional>
+#include <map>
+#include <cassert>
+
+namespace Aidge {
+#ifdef PYBIND
+namespace py = pybind11;
+#endif
+
+template <class Key, class Func>
+class Registrable {
+public:
+    typedef Key registrar_key;
+    typedef std::function<Func> registrar_type;
+
+    static std::map<Key, std::function<Func>>& registry()
+    {
+        #ifdef PYBIND
+        if (std::getenv("AIDGE_CORE_WITH_PYBIND")){
+            std::string name = std::string("registrar_")+typeid(Registrable<Key, Func>).name();
+            static auto shared_data = reinterpret_cast<std::map<Key, std::function<Func>> *>(py::get_shared_data(name));
+            if (!shared_data)
+                shared_data = static_cast<std::map<Key, std::function<Func>> *>(py::set_shared_data(name, new std::map<Key, std::function<Func>>()));
+            return *shared_data;
+        }
+        #endif // PYBIND
+        static std::map<Key, std::function<Func>> rMap;
+        return rMap;
+    }
+
+};
+
+template <class C>
+struct Registrar {
+    Registrar(const typename C::registrar_key& key, typename C::registrar_type func) {
+        //printf("REGISTRAR: %s\n", key.c_str());
+        bool newInsert;
+        std::tie(std::ignore, newInsert) = C::registry().insert(std::make_pair(key, func));
+        //assert(newInsert && "registrar already exists");
+    }
+
+    static auto create(const typename C::registrar_key& key){
+        const auto it = C::registry().find(key);
+        assert(it != C::registry().end() && "invalid registrar key");
+
+        return (*it).second;
+    }
+    static std::vector<typename C::registrar_key> getKeys(){
+        std::vector<typename C::registrar_key> keys;
+        for(auto keyValue : C::registry())
+            keys.push_back(keyValue.first);
+        return keys;
+    }    
+};
+}
+
+
+#endif // __AIDGE_REGISTRAR_H__
\ No newline at end of file
diff --git a/aidge/_Core/include/utils/Types.h b/aidge/_Core/include/utils/Types.h
new file mode 100644
index 00000000..f626c635
--- /dev/null
+++ b/aidge/_Core/include/utils/Types.h
@@ -0,0 +1,62 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+
+#ifndef __AIDGE_TYPES_H__
+#define __AIDGE_TYPES_H__
+
+#include <limits>
+#include <type_traits>
+#include <cstddef>
+#include <cstdint>
+
+namespace Aidge
+{
+//////////////////////////////////////
+///          Tensor
+//////////////////////////////////////
+
+/// @brief Number of elements used for scheduling
+using NbElts_t = std::size_t;
+constexpr NbElts_t MaxElts = std::numeric_limits<NbElts_t>::max();
+
+///\brief Signed dimension size for Tensor (allow for negative coordinates).
+using Coord_t = std::make_signed<std::size_t>::type;
+constexpr Coord_t MaxCoord = std::numeric_limits<Coord_t>::max();
+
+///\brief Unsigned value for the size of each dimension for a Tensor.
+using DimSize_t = std::size_t;
+constexpr DimSize_t MaxDimSize = std::numeric_limits<DimSize_t>::max();
+
+///\brief Unsigned index for a Tensor's number of dimension.
+using DimIdx_t = std::uint8_t;
+constexpr DimIdx_t MaxDim = std::numeric_limits<DimIdx_t>::max();
+
+//////////////////////////////////////
+///          Operator/Nodes
+//////////////////////////////////////
+
+///\brief Signed integral type to hold an IO index.
+///\details <0 values reserved
+///\todo Change it for an unsigned value with default to numeric_limit and max to numeric_limit-1
+using IOIndex_t = std::make_signed<std::uint16_t>::type;
+/// @brief Default for absence of connection
+constexpr IOIndex_t gk_IODefaultIndex = -1;
+constexpr IOIndex_t gk_IOMaxIndex = std::numeric_limits<IOIndex_t>::max();
+
+///\brief Number of input/output connections for a Node/Operator
+using IONb_t = std::uint16_t;
+constexpr IONb_t gk_IOMaxNb = std::numeric_limits<IONb_t>::max();
+
+
+} // namespace Aidge
+
+#endif // __AIDGE_TYPES_H__
\ No newline at end of file
diff --git a/aidge/_Core/src/graph/Connector.cpp b/aidge/_Core/src/graph/Connector.cpp
new file mode 100644
index 00000000..4297453f
--- /dev/null
+++ b/aidge/_Core/src/graph/Connector.cpp
@@ -0,0 +1,54 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "graph/Connector.hpp"
+
+#include <map>
+
+#include "graph/GraphView.hpp"
+#include "graph/Node.hpp"
+#include "utils/Types.h"
+
+Aidge::Connector::Connector(std::shared_ptr<Aidge::Node> node) {
+    mNode = node;
+    if (mNode->nbOutputs() == 1U) {
+        mOutputId = 0;
+    }
+}
+
+Aidge::IONb_t Aidge::Connector::size() const { return mNode->nbOutputs(); }
+
+std::shared_ptr<Aidge::GraphView> Aidge::generateGraph(std::vector<Connector> ctors) {
+    std::shared_ptr<GraphView> graph = std::make_shared<GraphView>();
+    std::vector<std::shared_ptr<Node>> nodesToAdd = std::vector<std::shared_ptr<Node>>();
+    for (const Connector& ctor : ctors) {
+        nodesToAdd.push_back(ctor.node());
+    }
+    std::vector<std::shared_ptr<Node>> buffer = {};
+
+    while (!nodesToAdd.empty()) {
+        while (!nodesToAdd.empty()) {
+            graph->add(nodesToAdd.back());  // only add, connection already done
+                                            // between nodes
+            std::vector<std::shared_ptr<Node>> parents = nodesToAdd.back()->getParents();
+            std::set<std::shared_ptr<Node>> alreadyAdded = graph->getNodes();
+            for (std::shared_ptr<Node> parent : parents) {
+                if (alreadyAdded.find(parent) == alreadyAdded.end()) {
+                    buffer.push_back(parent);
+                }
+            }
+            nodesToAdd.pop_back();
+        }
+        nodesToAdd.insert(nodesToAdd.end(), buffer.begin(), buffer.end());
+        buffer = {};
+    }
+    return graph;
+}
\ No newline at end of file
diff --git a/aidge/_Core/src/graph/GraphView.cpp b/aidge/_Core/src/graph/GraphView.cpp
new file mode 100644
index 00000000..7d98d00a
--- /dev/null
+++ b/aidge/_Core/src/graph/GraphView.cpp
@@ -0,0 +1,691 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <utility>
+
+#include "utils/Types.h"
+#include "graph/GraphView.hpp"
+#include "data/Tensor.hpp"
+
+///////////////////////////////////////////////////////
+//        FUNCTIONAL DESCRIPTION
+///////////////////////////////////////////////////////
+
+Aidge::Connector Aidge::GraphView::operator()(
+    const std::vector<Aidge::Connector> ctors) {
+  // TODO: allow for multiple inputNodes?
+  assert((inputNodes().size() == 1U) && "Too many input Nodes for the GraphView, undefined behaviour");
+  std::shared_ptr<Node> inNode = *inputNodes().begin();
+  assert((ctors.size() == static_cast<std::size_t>(inNode->nbDataInputs())) && "Wrong number of arguments.\n");
+  for (std::pair<std::shared_ptr<Node>, IOIndex_t> &input : inNode->inputs()) {
+    assert((gk_IODefaultIndex == input.second) && "At least one input connection is not free.\n");
+  }
+
+  for (const Connector &ctor : ctors) {
+    assert((ctor.node() != nullptr) &&
+           "Input Connector must be associated with a node");
+  }
+  IOIndex_t inID = 0;
+  for (const Connector &ctor : ctors) {
+    ctor.node()->addChild(shared_from_this(), static_cast<std::size_t>(ctor.index()),
+                          {inNode, inID++});
+  }
+  return Connector(*(outputNodes().begin()));
+}
+
+///////////////////////////////////////////////////////
+//        INNER
+///////////////////////////////////////////////////////
+
+std::string Aidge::GraphView::name() const { return mName; }
+
+void Aidge::GraphView::setName(const std::string &name) { mName = name; }
+
+
+void Aidge::GraphView::save(std::string path, bool verbose) const {
+    FILE *fp = std::fopen((path + ".mmd").c_str(), "w");
+    std::fprintf(fp,
+                "%%%%{init: {'flowchart': { 'curve': 'monotoneY'}, "
+                "'fontFamily': 'Verdana' } }%%%%\nflowchart TB\n\n");
+
+    std::map<const std::string, std::size_t> typeCounter;
+    std::map<std::shared_ptr<Node>, std::string> namePtrTable;
+
+    // Start by creating every node
+    for (const std::shared_ptr<Node> &node_ptr : mNodes) {
+        const std::string currentType = node_ptr->type();
+        if (typeCounter.find(currentType) == typeCounter.end())
+        typeCounter[currentType] = 0;
+        ++typeCounter[currentType];
+
+        const std::string givenName =
+            (node_ptr->name().empty())
+                ? currentType + std::to_string(typeCounter[currentType])
+                : node_ptr->name();
+        namePtrTable[node_ptr] =
+            (currentType + "_" + std::to_string(typeCounter[currentType]));
+        std::fprintf(fp, "%s(%s)\n", namePtrTable[node_ptr].c_str(),
+                    givenName.c_str());
+    }
+    // Write every link
+    std::size_t emptyInputCounter = 0;
+    for (const std::shared_ptr<Node> &node_ptr : mNodes) {
+        for (const std::shared_ptr<Node> &pa_ptr : node_ptr->getParents()) {
+        if ((pa_ptr == nullptr) || !inView(pa_ptr)) {
+            std::fprintf(fp, "input%zu((in - %zu))-->%s\n", emptyInputCounter,
+                        emptyInputCounter, namePtrTable[node_ptr].c_str());
+            ++emptyInputCounter;
+        } else {
+            std::fprintf(fp, "%s-->%s\n", namePtrTable[pa_ptr].c_str(),
+                        namePtrTable[node_ptr].c_str());
+        }
+        }
+    }
+    if (verbose) {
+        for (const auto &c : typeCounter) {
+        std::printf("%s - %zu\n", c.first.c_str(), c.second);
+        }
+    }
+
+    std::fprintf(fp, "\n");
+    std::fclose(fp);
+}
+
+///////////////////////////////////////////////////////
+//        TENSOR MANAGEMENT
+///////////////////////////////////////////////////////
+
+Aidge::IONb_t Aidge::GraphView::getNbDataInputs() const {
+  IONb_t nbDataInput = static_cast<IONb_t>(0);
+  assert(outputNodes().size() == static_cast<std::size_t>(1));
+  for (const std::shared_ptr<Node> &inNode : inputNodes()) {
+    nbDataInput += inNode->nbDataInputs();
+  }
+  return nbDataInput;
+}
+
+Aidge::IONb_t Aidge::GraphView::getNbFreeDataInputs() const {
+  IONb_t nbIn = 0;
+  for (const std::shared_ptr<Node> inputNode : mInputNodes) {
+    nbIn += inputNode->getNbFreeDataInputs();
+  }
+  return nbIn;
+}
+
+
+std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>>
+Aidge::GraphView::dataInputs() const {
+  IONb_t nbDataIn = 0U;
+  for (const std::shared_ptr<Node> inputNode : mInputNodes) {
+    nbDataIn += inputNode->nbDataInputs();
+  }
+  std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> res =
+      std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(nbDataIn);
+  nbDataIn = 0U;
+  for (const std::shared_ptr<Node> inputNode : mInputNodes) {
+    std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> inputNodeinputs =
+        inputNode->dataInputs();
+    std::move(inputNodeinputs.begin(), inputNodeinputs.end(),
+              res.begin() + nbDataIn);
+    nbDataIn += inputNode->nbDataInputs();
+    // res.insert(res.end(), (inputNode -> inputs()).begin(), (inputNode ->
+    // inputs()).end());
+  }
+  return res;
+}
+
+
+std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>>
+Aidge::GraphView::inputs() const {
+  std::size_t nbIn = 0U;
+  for (const std::shared_ptr<Node> inputNode : mInputNodes) {
+    nbIn += inputNode->nbInputs();
+  }
+  std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> res =
+      std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(nbIn);
+  nbIn = 0U;
+  for (const std::shared_ptr<Node> inputNode : mInputNodes) {
+    std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> inputNodeinputs =
+        inputNode->inputs();
+    std::move(inputNodeinputs.begin(), inputNodeinputs.end(),
+              res.begin() + nbIn);
+    nbIn += inputNode->nbInputs();
+    // res.insert(res.end(), (inputNode -> inputs()).begin(), (inputNode ->
+    // inputs()).end());
+  }
+  return res;
+}
+
+
+std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>>
+Aidge::GraphView::inputs(std::string name) const {
+  return mNodeRegistry.at(name)->inputs();
+}
+
+void Aidge::GraphView::forwardDims() {
+    // setInputs
+    // Link every tensor to the right pointer
+    // following parent - children informations
+    for (std::shared_ptr<Node> nodePtr : getNodes()) {
+        for (IOIndex_t i = 0; static_cast<IONb_t>(i) < nodePtr->nbInputs(); ++i) {
+            // assess if the input was not already set and is a Tensor then link it to parent output
+            if (strcmp(nodePtr->getOperator()->getInput(i)->type(), Tensor::Type) == 0 && std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getInput(i))->empty()) {
+                std::pair<std::shared_ptr<Node>, IOIndex_t> inputI = nodePtr->input(i);
+                // assert provided Data is of "Tensor" type
+                if (strcmp(inputI.first->getOperator()->getOutput(inputI.second)->type(), Tensor::Type)==0) {
+                    nodePtr->getOperator()->setInput(i, inputI.first->getOperator()->getOutput(inputI.second));
+                }
+                else {
+                    assert(false && "Non-tensor entries not handled yet.\n");
+                }
+            }
+        }
+    }
+    // Compute dimensions of every node
+    _forwardDims(inputNodes());
+}
+
+void Aidge::GraphView::_forwardDims(std::set<std::shared_ptr<Node>> listNodes) {
+  // TODO: support multi-inputs/outputs
+  std::set<std::shared_ptr<Node>> nextList = std::set<std::shared_ptr<Node>>();
+  for (std::shared_ptr<Node> nodePtr : listNodes) {
+    if (!nodePtr->getOperator()->outputDimsForwarded()) {
+      nodePtr->getOperator()->computeOutputDims();
+    }
+    if (!nodePtr->getOperator()->outputDimsForwarded()) {
+      nextList.insert(nodePtr);
+    } else {
+      std::set<std::shared_ptr<Node>> children = nodePtr->getChildren();
+      nextList.insert(children.begin(), children.end());
+    }
+  }
+  if (nextList.empty()) {
+    for (std::shared_ptr<Node> nodePtr : getNodes()) {
+      if (!nodePtr->getOperator()->outputDimsForwarded()) {
+        nextList.insert(nodePtr);
+      }
+    }
+  }
+  if (!nextList.empty()) {
+    _forwardDims(nextList);
+  }
+}
+
+void Aidge::GraphView::setBackend(const std::string &backend) {
+  for (auto node : getNodes()) {
+    node->getOperator()->setBackend(backend);
+  }
+}
+
+void Aidge::GraphView::setDatatype(const DataType &datatype) {
+  for (auto node : getNodes()) {
+    node->getOperator()->setDatatype(datatype);
+  }
+}
+
+void Aidge::GraphView::updateOutputNodes() {
+  mOutputNodes.clear();
+  for (const std::shared_ptr<Node> go_it : mNodes) {
+    if (go_it->nbOutputs() !=
+        go_it->nbValidOutputs()) { // an output linked to nothing
+      mOutputNodes.insert(go_it);
+      continue;
+    }
+    for (const std::shared_ptr<Node> ch_ptr : go_it->getChildren()) {
+      if (mNodes.find(ch_ptr) == mNodes.end()) { // Child not in the graph
+        mOutputNodes.insert(go_it);
+        break;
+      }
+    }
+  }
+}
+
+void Aidge::GraphView::updateOutputNodes(std::shared_ptr<Node> node) {
+  if (node->nbOutputs() !=
+      node->nbValidOutputs()) { // an output linked to nothing
+    mOutputNodes.insert(node);
+  } else { // don't enter if was already added to outputNodes
+    for (const std::shared_ptr<Node> &ch_ptr : node->getChildren()) {
+      if (mNodes.find(ch_ptr) == mNodes.end()) { // Child not in the graph
+        mOutputNodes.insert(node);
+        break;
+      }
+    }
+  }
+  // update other outputNodes
+  for (const std::shared_ptr<Node> &pa_ptr :
+       node->getParents()) { // check if any parent is in OutputNodes too
+    if ((pa_ptr != nullptr) &&
+        (mOutputNodes.find(pa_ptr) !=
+         mOutputNodes.end())) { // it's a match! Must check if the outputNode
+                                // found is still an outputNode
+      bool remove = (pa_ptr->nbOutputs() == pa_ptr->nbValidOutputs());
+      for (const std::shared_ptr<Node> ch_ptr : pa_ptr->getChildren()) {
+        if (mNodes.find(ch_ptr) == mNodes.end()) { // Child not in the graph
+          remove = false;
+          break;
+        }
+      }
+      if (remove) {
+        mOutputNodes.erase(pa_ptr);
+      }
+    }
+  }
+}
+
+std::vector<
+    std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>>>
+Aidge::GraphView::outputs() const {
+  std::vector<std::vector<std::pair<std::shared_ptr<Node>, Aidge::IOIndex_t>>>
+      outputTensors;
+  for (const std::shared_ptr<Node> outputNode : mOutputNodes) {
+    std::vector<std::vector<std::pair<std::shared_ptr<Node>, Aidge::IOIndex_t>>>
+        tmpOutputs = (outputNode->outputs());
+    outputTensors.insert(outputTensors.end(), tmpOutputs.begin(),
+                         tmpOutputs.end());
+  }
+  return outputTensors;
+}
+
+std::vector<
+    std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>>>
+Aidge::GraphView::outputs(std::string nodeName) const {
+  return mNodeRegistry.at(nodeName)->outputs();
+}
+
+void Aidge::GraphView::setInput(Aidge::IOIndex_t /*inID*/,
+                               Aidge::IOIndex_t /*newNodeOutID*/) {
+  printf("Not implemented yet.\n");
+}
+
+void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnableParam) {
+  // add to the GraphView nodes
+  node->addView(shared_from_this());
+  mNodes.insert(node);
+  if (!(node->name()).empty())
+    mNodeRegistry.insert(std::make_pair(node->name(), node));
+  // add learnable parameters to the graph
+  if (includeLearnableParam) {
+    for (IONb_t i = node->nbDataInputs(); i < node->nbInputs(); ++i) {
+      std::shared_ptr<Node> parentNode = node->getParents(static_cast<IOIndex_t>(i));
+      if (parentNode) {
+          parentNode->addView(shared_from_this());
+          mNodes.insert(parentNode);
+          if (!(parentNode->name()).empty())
+            mNodeRegistry.insert(std::make_pair(parentNode->name(), parentNode));
+          // check if the Node is an input node
+          updateInputNodes(parentNode);
+      }
+    }
+  }
+  // check if the Node is an input node
+  updateInputNodes(node);
+  // check if the Node is an input node
+  updateOutputNodes(node);
+}
+
+void Aidge::GraphView::add(std::set<std::shared_ptr<Node>> otherNodes, bool includeLearnableParam) {
+    for (auto& nodePtr : otherNodes) { add(nodePtr, includeLearnableParam); }
+}
+
+void Aidge::GraphView::add(std::shared_ptr<GraphView> graph) {
+  for (const std::shared_ptr<Node> &node_ptr : graph->getNodes()) {
+    node_ptr->addView(shared_from_this());
+    mNodes.insert(node_ptr);
+    if (!(node_ptr->name()).empty())
+      mNodeRegistry.insert(std::make_pair(node_ptr->name(), node_ptr));
+    // if node_ptr is part of graph inputNodes or outputNodes
+    // if (graph->isInputNode(node_ptr) || graph->isOutputNode(node_ptr)) {
+    // Update OutputNodes/inputNodes
+    updateInputNodes();
+    updateOutputNodes();
+  }
+}
+
+void Aidge::GraphView::addChild(std::shared_ptr<Node> toOtherNode,
+                               std::shared_ptr<Node> fromOutNode,
+                               const Aidge::IOIndex_t fromTensor,
+                               Aidge::IOIndex_t toTensor) {
+  if (fromOutNode)
+    assert(inView(fromOutNode) && "Output Node not found in the GraphView.");
+  else {
+    assert((outputNodes().size() == 1U) &&
+           "Must specify an outputNode or have only one.");
+    fromOutNode = *(outputNodes().begin());
+  }
+  fromOutNode->addChild(toOtherNode, fromTensor, toTensor);
+  add(toOtherNode);
+}
+
+void Aidge::GraphView::addChild(
+    std::shared_ptr<GraphView> toOtherView,
+    std::pair<std::shared_ptr<Node>, Aidge::IOIndex_t> fromOutNode,
+    std::pair<std::shared_ptr<Node>, Aidge::IOIndex_t> toNode) {
+  // assert output node is valid
+  if (!fromOutNode.first) {
+    assert(outputNodes().size() == 1U &&
+           "If no output node is provided, the graph should have only one to "
+           "make the choice explicit.");
+    fromOutNode.first = *(outputNodes().begin());
+  } else
+    assert(inView(fromOutNode.first));
+  // assert input node is valid
+  if (!toNode.first) {
+    assert(toOtherView->inputNodes().size() == 1U &&
+           "If no intput node is provided, the other graph should have only "
+           "one to make the choice explicit.");
+    toNode.first = *(toOtherView->inputNodes().begin());
+  } else {
+    assert(toOtherView->inView(toNode.first));
+  }
+  // Tensor assertions are performed in the Node adChild method
+  fromOutNode.first->addChild(toNode.first, fromOutNode.second, toNode.second);
+  // once linking performed, add other graph to current graph
+  add(toOtherView);
+}
+
+std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::getParents() const {
+  // TODO: choose if we return a set or a vector
+  std::set<std::shared_ptr<Node>> parents;
+  for (const std::shared_ptr<Node> inputNode : mInputNodes) {
+    parents.insert(inputNode->getParents().begin(),
+                   inputNode->getParents().end());
+  }
+  return parents;
+}
+
+std::vector<std::shared_ptr<Aidge::Node>> Aidge::GraphView::getParents(const std::string nodeName) const {
+  std::map<std::string, std::shared_ptr<Node>>::const_iterator it = mNodeRegistry.find(nodeName);
+  if (it == mNodeRegistry.end()) {
+    printf("No such node a %s in %s graph.\n", nodeName.c_str(), name().c_str());
+    exit(-1);
+  }
+  return (it->second)->getParents();
+}
+
+std::vector<std::vector<std::shared_ptr<Aidge::Node>>>
+Aidge::GraphView::getOrderedParents() const {
+  std::vector<std::vector<std::shared_ptr<Node>>> parents;
+  for (const std::shared_ptr<Node> inputNode : mInputNodes) {
+    parents.push_back(inputNode->getParents());
+  }
+  return parents;
+}
+
+std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::getChildren() const {
+  std::set<std::shared_ptr<Node>> children;
+  for (const std::shared_ptr<Node> outputNode : mOutputNodes) {
+    children.insert((outputNode->getChildren()).begin(),
+                    (outputNode->getChildren()).end());
+  }
+  return children;
+}
+
+std::vector<std::vector<std::shared_ptr<Aidge::Node>>>
+Aidge::GraphView::getChildren(const std::string nodeName) const {
+  std::map<std::string, std::shared_ptr<Node>>::const_iterator it =
+      mNodeRegistry.find(nodeName);
+  if (it == mNodeRegistry.end()) {
+    printf("No such node a %s in %s graph.\n", nodeName.c_str(),
+           name().c_str());
+    exit(-1);
+  }
+  return (it->second)->getOrderedChildren();
+}
+
+std::set<std::shared_ptr<Aidge::Node>>
+Aidge::GraphView::getChildren(const std::shared_ptr<Node> otherNode) const {
+  std::set<std::shared_ptr<Node>>::const_iterator it = mNodes.find(otherNode);
+  if (it == mNodes.end()) {
+    printf("No such node in graph.\n");
+    exit(-1);
+  }
+  return (*it)->getChildren();
+}
+
+
+std::shared_ptr<Aidge::Node>
+Aidge::GraphView::getNode(const char *nodeName) const {
+  std::map<std::string, std::shared_ptr<Node>>::const_iterator it =
+      mNodeRegistry.find(std::string(nodeName));
+  if (it != mNodeRegistry.end()) {
+    return it->second;
+  } else {
+    printf("No Node named %s in the current GraphView.\n", nodeName);
+    exit(-1);
+  }
+}
+
+
+void Aidge::GraphView::remove(std::shared_ptr<Node> nodePtr, bool includeLearnableParam) {
+  if (mNodes.find(nodePtr) != mNodes.end()) {
+    mNodes.erase(nodePtr);
+    nodePtr->removeView(shared_from_this());
+  }
+  if (!nodePtr->name().empty()) { mNodeRegistry.erase(nodePtr->name()); }
+  // same for learnable params
+
+  if (includeLearnableParam) {
+    for (IONb_t i = nodePtr->nbDataInputs(); i < nodePtr->nbInputs(); ++i) {
+      auto inputI = nodePtr->input(i);
+      bool removeNode = true;
+      for (const auto& parentOutput : inputI.first->outputs()) {
+        for (const auto& childOfParentOutput : parentOutput) {
+          if (childOfParentOutput.first != nodePtr) {
+            removeNode = false;
+            break;
+          }
+        }
+      }
+      if (removeNode) {
+        if (mNodes.find(inputI.first) != mNodes.end()) {
+          mNodes.erase(inputI.first);
+          inputI.first->removeView(shared_from_this());
+        }
+        if (!inputI.first->name().empty()) { mNodeRegistry.erase(inputI.first->name()); }
+      }
+    }
+  }
+  updateInputNodes();
+  updateOutputNodes();
+}
+
+
+bool Aidge::GraphView::swap(Node & /*node*/, Node & /*otherNode*/) {
+  printf("Swap() not implementated yet. Return false.\n");
+  return false;
+}
+
+void Aidge::GraphView::link(std::string /*name1_inID*/,
+                           std::string /*name2_outID*/) {
+  printf("Not implemented yet.\n");
+}
+
+void Aidge::GraphView::insert(Node & /*newNode*/, Node & /*inNode*/,
+                             std::initializer_list<Node> /*outNodes*/,
+                             IOIndex_t /*tensorIdx*/) {
+  printf("Not implemented yet.\n");
+}
+
+bool Aidge::GraphView::replaceWith(std::set<std::shared_ptr<Node>> newNodes) {
+  // TODO : only supports one input/output node for now
+  assert(mNodes.size()>0 && "There must be at least one ");
+
+  auto gNew = std::make_shared<GraphView>();
+  gNew->add(newNodes, false);
+
+  // finding the input connection. There should be a better way
+  // std::map<std::shared_ptr<Node>, std::size_t> inputNodesUnknownParents;
+  // for (const auto& inputNode : inputNodes()) {
+  //   for (IONb_t i = 0; i<inputNode->nbInputs(); ++i) {
+  //     auto parent = inputNode->getParents(static_cast<IOIndex_t>(i));
+  //     if(mNodes.find(parent) == mNodes.end()) { // input not replaced
+  //       if (inputNodesUnknownParents.find(parent) == inputNodesUnknownParents.end()) {
+  //         inputNodesUnknownParents[parent] = 0;
+  //       }
+  //       ++inputNodesUnknownParents[parent];
+  //     }
+  //   }
+  // }
+  // for (const auto& inputNode : gNew->inputNodes()) {
+  //   for (IONb_t i = 0; i<inputNode->nbInputs(); ++i) {
+  //     auto parent = inputNode->getParents(static_cast<IOIndex_t>(i));
+  //     if (--inputNodesUnknownParents[parent] == 0) {
+  //       inputNodesUnknownParents.erase(parent);
+  //     }
+  //   }
+  // }
+  // assert(inputNodesUnknownParents.size() == 1 && "Only one parent node should remain");
+  // assert((*inputNodesUnknownParents.begin()).second == 1 && "Only one connection should remain");
+
+
+
+
+  // auto gPrevNodeWithProd = std::make_shared<GraphView>();
+  // for (auto& nodePtr : mNodes) { gPrevNodeWithProd->add(nodePtr); }
+
+
+  bool replacable = ((outputNodes().size() == gNew->outputNodes().size()) &&
+                     (outputNodes().size() == 1));
+  // auto previousInputNode = (*inputNodes().begin());
+  auto previousOutputNode = (*outputNodes().begin());
+  // auto newInputNode = (*gNew->inputNodes().begin());
+  auto newOutputNode = (*gNew->outputNodes().begin());
+
+  replacable = replacable && (
+    // previousInputNode->nbDataInputs() == newInputNode->nbDataInputs() &&
+    // previousInputNode->nbInputs() == newInputNode->nbInputs() &&
+    previousOutputNode->nbOutputs() == newOutputNode->nbOutputs()
+  );
+
+  if (replacable) {
+    auto copyOutputs = previousOutputNode->outputs();
+    // auto copyInputs = previousInputNode->inputs();
+
+    // manage Views for newNodes
+    // only keep common views to each node for the new set
+    std::set<std::shared_ptr<GraphView>> commonGraphViews =  (*mNodes.begin())->views();
+    for (const auto& nodePtr : mNodes) {
+      const auto nodeView = nodePtr->views();
+      std::set<std::shared_ptr<GraphView>> intersection;
+      std::set_intersection(commonGraphViews.begin(), commonGraphViews.end(),
+                          nodeView.begin(), nodeView.end(),
+                          std::inserter(intersection, intersection.begin()));
+      commonGraphViews = intersection;
+    }
+
+    // clean Nodes to replace
+    std::set<std::shared_ptr<Node>> copyNode = mNodes;
+    for (auto& nodePtr : copyNode) { nodePtr->resetConnections(true); }
+
+    // copy output connections
+    for (IONb_t o = 0; o < previousOutputNode->nbOutputs(); ++o) {
+      auto outputPairs = copyOutputs[o];
+      for (const auto& onePair : outputPairs) {
+        newOutputNode->addChild(onePair.first, o, onePair.second);
+      }
+    }
+    //copy input connections
+    // for (IONb_t i = 0; i < previousInputNode->nbInputs(); ++i) {
+    //   auto inputPair = copyInputs[i];
+    //   if (inputPair.first) {
+    //     inputPair.first->addChild(newInputNode, inputPair.second, static_cast<IOIndex_t>(i));
+    //   }
+    // }
+
+    // insert new Nodes in the right GraphViews
+    for (auto& graphPtr : commonGraphViews) {
+      graphPtr->add(newNodes, false);
+    }
+  }
+  return replacable;
+}
+
+void Aidge::GraphView::updateInputNodes() {
+  mInputNodes.clear();
+  for (const std::shared_ptr<Node> go_ptr : mNodes) {
+    for (const std::shared_ptr<Node> pa_ptr : go_ptr->getParents()) {
+      if ((pa_ptr == nullptr) ||
+          (mNodes.find(pa_ptr) ==
+           mNodes.end())) { // Parent doesn't exist || Parent not in the graph
+        mInputNodes.insert(go_ptr);
+        break;
+      }
+    }
+  }
+}
+
+void Aidge::GraphView::updateInputNodes(std::shared_ptr<Node> node) {
+  // add node_ptr to inputNode if it can
+  std::size_t filledWithKnownInputs = 0U;
+  bool wasAdded = mInputNodes.find(node) != mInputNodes.end();
+  for (const std::shared_ptr<Node> pa_ptr : node->getParents()) {
+    if ((pa_ptr == nullptr) ||
+        (mNodes.find(pa_ptr) ==
+         mNodes.end())) { // Parent doesn't exist || Parent not in the graph
+      mInputNodes.insert(node);
+      wasAdded = true;
+      break;
+    }
+    ++filledWithKnownInputs;
+  }
+  if (filledWithKnownInputs == node->nbInputs() && wasAdded) {
+    mInputNodes.erase(node);
+  }
+  // update other inputNodes
+  for (const std::shared_ptr<Node> ch_ptr :
+       node->getChildren()) { // check if any child is in InputNodes too
+    if (mInputNodes.find(ch_ptr) !=
+        mInputNodes.end()) { // it's a match! Must check if the inputNode found
+                             // is still an inputNode
+                             // change here
+      bool remove = true;
+      for (const std::shared_ptr<Node> pa_ptr : ch_ptr->getParents()) {
+        if (pa_ptr == nullptr ||
+            mNodes.find(pa_ptr) ==
+                mNodes
+                    .end()) { // Parent doesn't exist || Parent not in the graph
+          remove = false;
+          break;
+        }
+      }
+      if (remove) {
+        mInputNodes.erase(ch_ptr);
+      }
+    }
+  }
+}
+
+
+void Aidge::GraphView::removeInputNode(const std::string nodeName) {
+  std::map<std::string, std::shared_ptr<Node>>::iterator it =
+      mNodeRegistry.find(nodeName);
+  if (it != mNodeRegistry.end()) {
+    const std::shared_ptr<Node> val = (*it).second;
+    if (mInputNodes.find(val) != mInputNodes.end()) {
+      mInputNodes.erase(val);
+    }
+  }
+}
+
+void Aidge::GraphView::removeOutputNode(const std::string nodeName) {
+  std::map<std::string, std::shared_ptr<Node>>::iterator it =
+      mNodeRegistry.find(nodeName);
+  if (it != mNodeRegistry.end()) {
+    const std::shared_ptr<Node> val = (*it).second;
+    if (mOutputNodes.find(val) != mOutputNodes.end()) {
+      mOutputNodes.erase(val);
+    }
+  }
+}
\ No newline at end of file
diff --git a/aidge/_Core/src/graph/Node.cpp b/aidge/_Core/src/graph/Node.cpp
new file mode 100644
index 00000000..6ec77217
--- /dev/null
+++ b/aidge/_Core/src/graph/Node.cpp
@@ -0,0 +1,312 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "graph/Node.hpp"
+
+#include "graph/GraphView.hpp"
+#include <memory>
+#include <vector>
+#include "utils/Types.h"
+
+Aidge::Node::Node(std::shared_ptr<Operator> op, const char *name)
+    : mName((name == nullptr) ? std::string() : std::string(name)),
+      mOperator(op),
+      mParents(std::vector<std::shared_ptr<Node>>(static_cast<std::size_t>(op->nbInputs()), nullptr)),
+      mChildren(std::vector<std::vector<std::shared_ptr<Node>>>(static_cast<std::size_t>(op->nbOutputs()),
+                                                                std::vector<std::shared_ptr<Node>>())),
+      mIdInChildren(
+              std::vector<std::vector<IOIndex_t>>(static_cast<std::size_t>(op->nbOutputs()), std::vector<IOIndex_t>())),
+      mIdOutParents(std::vector<IOIndex_t>(static_cast<std::size_t>(op->nbInputs()), gk_IODefaultIndex)) {
+    // ctor
+}
+
+///////////////////////////////////////////////////////
+//        FUNCTIONAL DESCRIPTION
+///////////////////////////////////////////////////////
+
+Aidge::Connector Aidge::Node::operator()(const std::vector<Connector> ctors) {
+    assert((ctors.size() == nbDataInputs()) && "Wrong number of arguments.\n");
+    for (std::pair<std::shared_ptr<Node>, IOIndex_t> &input : inputs()) {
+        assert((gk_IODefaultIndex == input.second) && "At least one input connection is not free.\n");
+    }
+    IOIndex_t i = 0;
+    for (const Connector &ctor : ctors) {
+        if (ctor.node() != nullptr) {  // ctor must be associated with a node
+            ctor.node()->addChild(shared_from_this(), ctor.index(), i++);
+        }
+    }
+    return Connector(shared_from_this());
+}
+
+///////////////////////////////////////////////////////
+//        INNER
+///////////////////////////////////////////////////////
+
+void Aidge::Node::setName(const std::string &name) { mName = name; }
+
+///////////////////////////////////////////////////////
+//        OPERATORS
+///////////////////////////////////////////////////////
+
+void Aidge::Node::forward() {
+    assert((mOperator != nullptr) && "No Operator interface provided, can't run forward().\n");
+    mOperator->forward();
+}
+
+void Aidge::Node::backward() {
+    assert((mOperator != nullptr) && "No Operator interface provided, can't run backward().\n");
+    mOperator->backward();
+}
+
+///////////////////////////////////////////////////////
+//        TENSOR MANAGEMENT
+///////////////////////////////////////////////////////
+
+bool Aidge::Node::valid() const {
+    for (IOIndex_t i = 0; static_cast<IONb_t>(i) < nbInputs(); ++i) {
+        if (mIdOutParents[static_cast<std::size_t>(i)] == gk_IODefaultIndex) {
+            return false;
+        }
+    }
+    return true;
+}
+
+Aidge::IONb_t Aidge::Node::getNbFreeDataInputs() const {
+    IONb_t nbFreeDataIn = 0;
+    for (IOIndex_t i = 0; static_cast<IONb_t>(i) < nbInputs(); ++i) {
+        if (input(i).second < 0) {
+            ++nbFreeDataIn;
+        }
+    }
+    return nbFreeDataIn;
+}
+
+std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>>
+Aidge::Node::dataInputs() const {
+    std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> res =
+            std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(nbDataInputs());
+    for (std::size_t i = 0; i < static_cast<std::size_t>(nbDataInputs()); ++i) {
+        res[i] = std::pair<std::shared_ptr<Node>, IOIndex_t>(mParents[i], mIdOutParents[i]);
+    }
+    return res;
+}
+
+std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>> Aidge::Node::inputs() const {
+    std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> res =
+        std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(nbInputs());
+    for (std::size_t i = 0; i < nbInputs(); ++i) {
+      res[i] =
+          std::pair<std::shared_ptr<Node>, IOIndex_t>(mParents[i], mIdOutParents[i]);
+    }
+    return res;
+}
+
+std::vector<std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>>>
+Aidge::Node::outputs() const {
+    std::vector<std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>> listOutputs =
+            std::vector<std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>>(mIdInChildren.size());
+    for (std::size_t i = 0; i < mIdInChildren.size(); ++i) {
+        listOutputs[i] = output(static_cast<IOIndex_t>(i));
+    }
+    return listOutputs;
+}
+
+std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>>
+Aidge::Node::output(Aidge::IOIndex_t outID) const {
+    std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> listOutputs =
+            std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(mIdInChildren[outID].size());
+    for (std::size_t i = 0; i < mIdInChildren[outID].size(); ++i) {
+        listOutputs[i] =
+                std::pair<std::shared_ptr<Node>, IOIndex_t>(mChildren[outID][i], mIdInChildren[outID][i]);
+    }
+    return listOutputs;
+}
+
+Aidge::IONb_t Aidge::Node::nbValidInputs() const {
+    IONb_t counter = 0;
+    for (IONb_t i = 0; i < nbInputs(); ++i) {
+        if (mIdOutParents[static_cast<std::size_t>(i)] < 0) ++counter;
+    }
+    return counter;
+}
+
+Aidge::IONb_t Aidge::Node::nbValidOutputs() const {
+    IONb_t counter = 0;
+    if (mIdInChildren.size() == 0) return 0;
+    for (std::size_t i = 0; i < nbOutputs(); ++i) {
+        if (mIdInChildren[i].size() > 0U) counter++;
+    }
+    return counter;
+}
+
+void Aidge::Node::setInput(IOIndex_t inID, IOIndex_t newNodeOutID) {
+    assert(inID != gk_IODefaultIndex && (static_cast<IONb_t>(inID) < nbInputs()) && "Must be a valid index");
+    if (mIdOutParents[inID] != gk_IODefaultIndex) {
+        std::printf("Warning: filling a Tensor already attributed\n");
+        auto originalParent = input(inID);
+        // remove original parent reference to child
+        // find the output ID for original Parent
+        // find first occurence of child in the output's children
+        std::size_t j = 0;
+        for (; (originalParent.first->getChildren(originalParent.second))[j] != shared_from_this(); ++j) {
+        }
+        originalParent.first->removeChild(originalParent.second, j);
+    }
+    mIdOutParents[inID] = newNodeOutID;
+}
+
+///////////////////////////////////////////////////////
+// TOPOLOGY
+///////////////////////////////////////////////////////
+
+void Aidge::Node::addChildOp(std::shared_ptr<Node> otherNode, const IOIndex_t outId, IOIndex_t otherInId) {
+    assert((otherInId != gk_IODefaultIndex) && (static_cast<IONb_t>(otherInId) < otherNode->nbInputs()) &&
+           "Input index out of bound.");
+    assert((outId != gk_IODefaultIndex) && (static_cast<IONb_t>(outId) < nbOutputs()) && "Output index out of bound.");
+    if (otherNode->input(otherInId).second >= 0) {
+        std::printf("Warning, the %d-th Parent of the child node already existed.\n", otherInId);
+    }
+    // manage tensors and potential previous parent
+    otherNode->setInput(otherInId, outId);
+    // manage nodes
+    mChildren[outId].push_back(otherNode);
+    mIdInChildren[outId].push_back(otherInId);
+    otherNode->addParent(shared_from_this(), otherInId);
+}
+
+void Aidge::Node::addChildView(std::shared_ptr<GraphView> other_graph, const IOIndex_t outID,
+                              std::pair<std::shared_ptr<Node>, IOIndex_t> otherInId) {
+    assert((otherInId.second != gk_IODefaultIndex) &&
+           (static_cast<IONb_t>(otherInId.second) < otherInId.first->nbInputs()) &&
+           "Other graph input index out of bound.");
+    assert((outID != gk_IODefaultIndex) && (static_cast<IONb_t>(outID) < nbOutputs()) && "Output index out of bound.");
+    std::set<std::shared_ptr<Node>> inNodes = other_graph->inputNodes();
+    if (inNodes.size() == std::size_t(0)) {  // no input Node
+        printf("Cannot add GraphView to the Node. No input node detected.\n");
+    } else  // inNodes.size() >= 1
+    {
+        assert((inNodes.find(otherInId.first) != inNodes.end()));  // assert it really is an input node
+        addChildOp(otherInId.first, outID, otherInId.second);
+    }
+}
+
+void Aidge::Node::addChild(std::shared_ptr<Node> otherNode, const IOIndex_t outId, IOIndex_t otherInId) {
+    otherInId = (otherInId >= 0) ? otherInId : otherNode->getFirstFreeDataInput();
+    addChildOp(otherNode, outId, otherInId);
+}
+
+void Aidge::Node::addChild(std::shared_ptr<GraphView> otherView, const IOIndex_t outId,
+                          std::pair<std::shared_ptr<Node>, IOIndex_t> otherInId) {
+    if (!otherInId.first) {
+        assert((otherView->inputNodes().size() == 1U) &&
+               "Specify an input Node for the GraphView. More or less than one "
+               "Node is not explicit.");
+        otherInId.first = *(otherView->inputNodes().begin());
+    }
+    otherInId.second = (otherInId.second >= 0) ? otherInId.second : otherInId.first->getFirstFreeDataInput();
+    addChildView(otherView, outId, otherInId);
+}
+
+void Aidge::Node::addParent(const std::shared_ptr<Node> other_node, const IOIndex_t inID) {
+    if (getParents(inID) != nullptr) {
+        printf("Warning, you're replacing a Parent.\n");
+    }
+    assert((inID != gk_IODefaultIndex) && (static_cast<IONb_t>(inID) < nbInputs()) && "Input index out of bound.");
+    mParents[inID] = other_node;
+}
+
+std::vector<std::shared_ptr<Aidge::Node>> Aidge::Node::getParents() const { return mParents; }
+
+std::shared_ptr<Aidge::Node> Aidge::Node::popParent(const IOIndex_t inID) {
+    assert((inID != gk_IODefaultIndex) && (static_cast<IONb_t>(inID) < nbInputs()) && "Input index out of bound.");
+    std::shared_ptr<Node> val = mParents[inID];
+    removeParent(inID);
+    return val;
+}
+
+void Aidge::Node::removeParent(IOIndex_t inID) {
+    assert((inID != gk_IODefaultIndex) && (static_cast<IONb_t>(inID) < nbInputs()) && "Parent index out of bound.");
+    mParents[inID] = nullptr;
+    mIdOutParents[inID] = gk_IODefaultIndex;
+}
+
+std::set<std::shared_ptr<Aidge::Node>> Aidge::Node::getChildren() const {
+    std::set<std::shared_ptr<Node>> children;
+    for (const std::vector<std::shared_ptr<Node>> &childrenOfOneOutput : mChildren) {
+        children.insert(childrenOfOneOutput.begin(), childrenOfOneOutput.end());
+    }
+    return children;
+}
+
+std::vector<std::vector<std::shared_ptr<Aidge::Node>>> Aidge::Node::getOrderedChildren() const { return mChildren; }
+
+std::vector<std::shared_ptr<Aidge::Node>> Aidge::Node::getChildren(IOIndex_t outID) const {
+    assert((outID != gk_IODefaultIndex) && (static_cast<IONb_t>(outID) < nbOutputs()) && "Output index out of bound.");
+    return mChildren[outID];
+}
+
+void Aidge::Node::removeChild(IOIndex_t outID, std::size_t childId) {
+    assert((outID != gk_IODefaultIndex) && (static_cast<IONb_t>(outID) < nbOutputs()) && "Child index out of bound.");
+    std::size_t validOutID = static_cast<std::size_t>(outID);
+    mChildren[validOutID].erase(mChildren[validOutID].begin() + childId);
+    mIdInChildren[validOutID].erase(mIdInChildren[validOutID].begin() + childId);
+}
+
+void Aidge::Node::resetConnections(bool includeLearnableParam) {
+    // remove every parents reference to it
+    IONb_t nbRemovedInputs = includeLearnableParam ? nbInputs() : nbDataInputs();
+    for (IOIndex_t i = 0; static_cast<IONb_t>(i) < nbRemovedInputs; ++i) {
+        std::pair<std::shared_ptr<Node>, IOIndex_t> parent = input(i);
+        if (parent.first) {
+            IOIndex_t nbChild = 0U;
+            // number of children linked to the parent's output
+            while (static_cast<IONb_t>(nbChild) < (parent.first->getChildren(parent.second)).size()) {
+                if ((parent.first->getChildren(parent.second))[static_cast<std::size_t>(nbChild)] == shared_from_this()) {
+                    parent.first->removeChild(parent.second, nbChild);
+                    nbChild = IOIndex_t(0);  // start from the beginning of the list
+                } else {
+                    ++nbChild;
+                }
+            }
+        }
+        // every reference to this object as child has been removed
+        // removing reference to parents.
+        mParents[i] = nullptr;
+        mIdOutParents[i] = gk_IODefaultIndex;
+    }
+    for (IOIndex_t i = 0; static_cast<IONb_t>(i) < nbOutputs(); ++i) {
+        for (std::pair<std::shared_ptr<Node>, IOIndex_t> child : output(i)) {
+            child.first->removeParent(child.second);
+        }
+        mChildren[i] = std::vector<std::shared_ptr<Node>>();
+        mIdInChildren[i] = std::vector<IOIndex_t>();
+    }
+    // removing this Node from every GraphView it belongs to
+    for (auto& graph : views()) {
+        // if keeping connections with LEarnable Parameters, then also remove them from graph
+        graph->remove(shared_from_this(), !includeLearnableParam);
+    }
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+// private
+
+///////////////////////////////////////////////////////
+//        FUNCTIONAL DESCRIPTION
+///////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////
+//        OPERATORS
+///////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////
+//        TENSOR MANAGEMENT
+///////////////////////////////////////////////////////
diff --git a/aidge/_Core/src/graph/OpArgs.cpp b/aidge/_Core/src/graph/OpArgs.cpp
new file mode 100644
index 00000000..93ceff0a
--- /dev/null
+++ b/aidge/_Core/src/graph/OpArgs.cpp
@@ -0,0 +1,73 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "graph/Node.hpp"
+#include "graph/GraphView.hpp"
+#include "graph/OpArgs.hpp"
+
+
+std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::initializer_list<OpArgs> inputs) {
+    std::shared_ptr<GraphView> gv = std::make_shared<GraphView>();
+    for (const OpArgs& elt : inputs) {
+        if(elt.node() != nullptr) {
+            // >= to allow incomplete graphViews
+            assert(static_cast<std::size_t>(elt.node()->getNbFreeDataInputs()) >= gv->outputNodes().size());
+            /* 
+            *  /!\ mn.view()->outputNodes() is a set, order of Nodes cannot be guaranted.
+            *  Prefer a functional descrition for detailed inputs
+            */
+            for (const std::shared_ptr<Node>& node_ptr : gv->outputNodes()) {
+                node_ptr -> addChild(elt.node()); // already check that node_ptr->nbOutput == 1
+            }
+            gv->add(elt.node());
+        }
+        else {
+            for (std::shared_ptr<Node> node_in : elt.view()->inputNodes()) {
+                // >= to allow incomplete graphViews
+                assert(static_cast<std::size_t>(node_in->getNbFreeDataInputs()) >= gv->outputNodes().size());
+                for (std::shared_ptr<Node> node_out : gv->outputNodes()) {
+                    node_out -> addChild(node_in); // assert one output Tensor per output Node
+                }
+            }
+            gv->add(elt.view());
+        }
+    }
+    return gv;
+}
+
+
+std::shared_ptr<Aidge::GraphView> Aidge::Parallel(std::initializer_list<OpArgs> inputs) {
+    std::shared_ptr<GraphView> gv = std::make_shared<GraphView>();
+    for(const OpArgs& elt : inputs) {
+        if (elt.node()!=nullptr)
+            gv->add(elt.node());
+        else
+            gv->add(elt.view());
+    }
+    return gv;
+}
+
+
+std::shared_ptr<Aidge::GraphView> Aidge::Residual(std::initializer_list<OpArgs> inputs) {
+    std::shared_ptr<GraphView> gv = Sequential(inputs);
+    assert(gv->outputNodes().size() == 1U && "Zero or more than one output Node for the GraphView, don't know which one to choose from for the residual connection");
+    std::shared_ptr<Node> lastNode = *gv->outputNodes().begin();
+    assert(gv->inputNodes().size() == 2U && "Zero or more than one input Node for the GraphView, don't know which one to choose from for the residual connection");
+    std::shared_ptr<Node> firstNode = nullptr;
+    for (const std::shared_ptr<Node> node_ptr : gv->inputNodes()) {
+        if (node_ptr != lastNode) {
+            firstNode = node_ptr;
+        }
+    }
+    assert(lastNode->getNbFreeDataInputs()>=1);
+    gv->addChild(lastNode, firstNode, 0U, gk_IODefaultIndex);
+    return gv;
+}
\ No newline at end of file
diff --git a/aidge/_Core/src/graphmatching/GRegex.cpp b/aidge/_Core/src/graphmatching/GRegex.cpp
new file mode 100644
index 00000000..80bc724c
--- /dev/null
+++ b/aidge/_Core/src/graphmatching/GRegex.cpp
@@ -0,0 +1,301 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "graphmatching/GRegex.hpp"
+#include "graph/GraphView.hpp"
+
+using namespace Aidge; 
+
+GRegex::GRegex(const std::map<std::string,NodeRegex*>& nodesRegex,std::vector<std::string>& seqRegexps ):mStmFab(nodesRegex){
+
+ 
+    //setup all the STM
+    for (const std::string& sequRegex : seqRegexps) {
+        mStmInit.push_back(mStmFab.makeNewStm(sequRegex));
+    }
+    
+}
+
+bool GRegex::walk_validation_all_stm_are_valid(const std::vector<std::vector<SeqStm*>> all_stm){
+    //test if all stm type are in a valid state
+    std::vector<int> number_of_valid;
+    number_of_valid.resize(all_stm.size());
+
+    for (std::size_t i = 0; i < all_stm.size(); ++i) {
+        number_of_valid[i] = 0;
+        for (auto it = all_stm[i].begin(); it != all_stm[i].end(); ++it) {
+            SeqStm* stm = *it;
+            if (stm->isValid()){ 
+                number_of_valid[i] +=1;
+            }
+        }   
+    }
+
+    for (std::size_t i = 0; i < number_of_valid.size(); ++i) {
+        if (number_of_valid[i] == 0) {
+            //std::cout << "NO MATCH at least one stm are not valid" << std::endl;
+            return false;
+        }
+        if (number_of_valid[i] > 1) {
+            //std::cout << "NO MATCH multiple brach match of stm (// quantification)" << std::endl;
+            return false;
+        }
+    }
+    return true;
+}
+
+bool GRegex::walk_validation_all_node_read_validate_by_one_stm(const std::vector<std::vector<SeqStm*>> all_stm){
+    std::set<NodeTmp> all_stm_node_tested;
+    std::set<NodeTmp> all_stm_node_validated;
+
+    for (std::size_t i = 0; i < all_stm.size(); ++i) {
+        //std::cout << "all stm index " << i <<  " on dimension 1 of size " << all_stm.size() <<std::endl;
+        for (std::size_t j = 0; j < all_stm[i].size(); ++j) {
+            //std::cout << "all stm index " << j <<  " on dimension 2 of size " << all_stm[i].size() <<std::endl;
+
+            std::set<NodeTmp> stm_node_tested = all_stm[i][j]->getAllNodeTested();
+            std::set<NodeTmp> stm_node_validated = all_stm[i][j]->getAllNodeValidated();
+
+            all_stm_node_tested.insert(stm_node_tested.begin(), stm_node_tested.end()); 
+            all_stm_node_validated.insert(stm_node_validated.begin(), stm_node_validated.end()); 
+        }   
+    }
+    
+
+    std::set<NodeTmp> test_but_not_valid;
+    for (const auto& x : all_stm_node_tested) {
+        if (all_stm_node_validated.find(x) == all_stm_node_validated.end()) {
+            test_but_not_valid.insert(x);
+        }
+    }
+
+
+    if (!test_but_not_valid.empty()) {
+        std::cout << "NO MATCH. The node(s) ";
+        for (const auto& x : test_but_not_valid) {
+            std::cout << x.get() << ", ";
+        }
+        std::cout << " have been tested but not validated." << std::endl;
+        return false;
+    }
+    return true;
+
+}
+
+bool GRegex::walk_validation_common_nodes_same_tag_for_all_stm(const std::vector<std::vector<SeqStm*>> all_stm){
+    std::map<NodeTmp, std::pair<std::string,int>> node_to_common_tag;
+    for (std::size_t i = 0; i < all_stm.size(); ++i) {
+        for (auto it = all_stm[i].begin(); it != all_stm[i].end(); ++it) {
+            SeqStm* stm = *it;
+            
+            if (!stm->isValid()){ 
+                continue;
+            }
+            
+            for (const auto& pair : stm->getAllCommonNode()) {
+                const NodeTmp node = pair.first;
+                const std::string common_tag = pair.second;
+
+                if (node_to_common_tag.find(node) != node_to_common_tag.end()) {
+                    std::string tag = node_to_common_tag[node].first;
+                    int& occurence = node_to_common_tag[node].second;
+                    if (tag!=common_tag){
+                        std::cout << "NO MATCH. The node " << node << " have two different tags "<< tag << " and " << common_tag  << std::endl;
+                        return false;
+                    } else {
+                        occurence += 1;
+                    }
+                } else {
+                    node_to_common_tag.insert(std::make_pair(node, std::make_pair(common_tag, 1)));
+                }
+            }
+        }   
+    }
+    /*std::cout << "Node to common tag ";
+    for (const auto& x : node_to_common_tag) {
+        std::cout << "(" << x.first << ", " << "[" << x.second.first << ", " << x.second.second << "]" << ") ; ";
+    }
+    std::cout << std::endl;*/
+
+
+    for (const auto& pair : node_to_common_tag) {
+        const std::pair<std::string, int> tag_occurence_pair = pair.second;
+        if (tag_occurence_pair.second < 1){
+            //std::cout << "NO MATCH. The common tag " << tag_occurence_pair.first  << " did not match " << std::endl;
+            return false;
+        }
+    }
+    
+    return true;
+}
+
+std::set<NodeTmp> GRegex::get_all_validate_nodes(const std::vector<std::vector<SeqStm*>> all_stm){
+    std::set<NodeTmp> all_stm_node_validated;
+
+    for (std::size_t i = 0; i < all_stm.size(); ++i) {
+        for (std::size_t j = 0; j < all_stm[i].size(); ++j) {
+            std::set<NodeTmp> stm_node_validated = all_stm[i][j]->getAllNodeValidated();
+            all_stm_node_validated.insert(stm_node_validated.begin(), stm_node_validated.end()); 
+        }   
+    }
+    return all_stm_node_validated;
+}
+
+
+std::set<NodeTmp> GRegex::matchFromStartNodes(const std::vector<NodeTmp> startNodes,const std::shared_ptr<GraphView> graphToMatch){
+    std::set<NodeTmp> empty_set_return;
+    //ASSERT
+    if(startNodes.size() != mStmInit.size()){
+        throw std::runtime_error ("bad GRegex start nodes");
+    }
+
+        //init the walk
+        std::vector<std::vector<SeqStm*>> allStm;
+        std::vector<std::pair<NodeTmp,SeqStm*>> currentWalk;
+
+        for (SeqStm* seqStmPtr : mStmInit) {
+            SeqStm* newStm = mStmFab.duplicateStm(seqStmPtr);
+            std::size_t idxStart = newStm->getStmIdx();
+            currentWalk.push_back(std::make_pair(startNodes[idxStart],newStm));
+            allStm.push_back(std::vector<SeqStm*>());
+        }
+
+        //walk
+        while (currentWalk.size()!=0)
+        {
+            std::vector<std::pair<NodeTmp,SeqStm*>> newWalk;
+            for (const auto& pair : currentWalk) {
+                const NodeTmp node = pair.first;
+                SeqStm* stmPtr = pair.second;
+
+                std::pair<int,std::string> test = stmPtr->testNode(node);
+                int res = test.first;
+                std::string commonTag = test.second;
+
+                std::set<NodeTmp> next_nodes = graphToMatch->getChildren(node);
+
+                /*std::cout << "Next nodes : " ;
+                for (const auto& x : next_nodes) {
+                    std::cout << x->name() << ", ";
+                }
+                std::cout << std::endl;*/
+                
+                // Test Match
+                if (commonTag == "" && next_nodes.size() > 1) {
+                    std::cout << "NO MATCH. The node " << node.get() << " is not common and has more than one child" << std::endl;
+                    return empty_set_return;
+                }
+
+                // If there is no more nodes --> Archive the branch
+                if (res == -1 || next_nodes.empty()) {
+                    int indexToInsert = stmPtr->getStmIdx();
+                    allStm[indexToInsert].push_back(stmPtr);
+                    //std::cout << "No more nodes --> STM archived : " << indexToInsert << std::endl;
+                    continue; // TODEV : replace this with 'else' that encapsulate the rest of the function ? 
+                }
+
+                bool first = true;
+
+                // Use an iterator to read through the next_nodes
+                std::set<NodeTmp>::iterator it;
+                for (it = next_nodes.begin(); it != next_nodes.end(); ++it) {
+                    // Access the current element using the iterator
+                    std::shared_ptr<Aidge::Node> next_node = *it;
+                    if (first){
+                        newWalk.push_back(std::make_pair(next_node, stmPtr));
+                        first = false;
+                    } else {
+                        SeqStm* new_stmPtr = mStmFab.duplicateStm(stmPtr);
+                        newWalk.push_back(std::make_pair(next_node, new_stmPtr));
+                    }
+                }
+            }
+            currentWalk = newWalk;
+        }
+    
+    //std::cout << "Walk finished" << std::endl;    
+
+    if (!walk_validation_all_stm_are_valid(allStm)){
+        return empty_set_return;
+    }
+    //std::cout << "walk_validation_all_stm_are_valid finished" << std::endl;
+    
+
+    if (!walk_validation_all_node_read_validate_by_one_stm(allStm)){
+        return empty_set_return;
+    }
+    //std::cout << "walk_validation_all_node_read_validate_by_one_stm finished" << std::endl;
+    
+
+    if (!walk_validation_common_nodes_same_tag_for_all_stm(allStm)){
+        return empty_set_return;
+    }
+    //std::cout << "walk_validation_common_nodes_same_tag_for_all_stm finished" << std::endl;
+
+    //std::cout << "MATCH" << std::endl;
+    
+    return get_all_validate_nodes(allStm);
+        
+}
+
+
+
+Match GRegex::match(const std::shared_ptr<GraphView> graphToMatch){
+
+    //std::vector<std::pair<std::vector<NodeTmp>,std::set<NodeTmp>>>  matches;
+    //std::set<std::pair<std::vector<NodeTmp>,std::set<NodeTmp>>>  matches;
+    Match  matches;
+    std::size_t nbStartNodes = mStmInit.size();
+    std::set<NodeTmp> allNodes = graphToMatch->getNodes();
+    std::size_t nbAllNodes = allNodes.size();
+
+    std::vector<std::size_t> indices(nbStartNodes, 0);
+
+       while (true) {
+        // Generate all permutations of the current combination
+        do {
+            std::vector<NodeTmp> startNodes;
+            //std::cout <<"start nodes :";
+            for (std::size_t i = 0; i < nbStartNodes; ++i) {
+                auto it = std::begin(allNodes);
+                std::advance(it, indices[i]);
+                //std::cout << (*it).get() << " ";
+                startNodes.push_back(*it);
+            }
+            //std::cout <<"\n";
+
+           std::set<NodeTmp> match =  matchFromStartNodes(startNodes, graphToMatch);
+           //std::cout << "match size : " << match.size() << " ";
+           if(match.size() != 0){
+                //matches.push_back(std::make_pair(startNodes,match));
+                //matches.insert(std::make_pair(startNodes,match));
+                matches.insert(startNodes,match);
+           }
+            
+        } while (std::next_permutation(indices.begin(), indices.end()));
+        
+        // Generate the next combination with replacement
+        std::size_t i = nbStartNodes - 1;
+        while (true) {
+            if (indices[i] < nbAllNodes - 1) {
+                ++indices[i];
+                break;
+            }
+            if (i == 0) {
+                return matches;
+            }
+            --i;
+        }
+        std::fill(indices.begin() + i + 1, indices.end(), indices[i]);
+    }
+
+    return matches;
+}
\ No newline at end of file
diff --git a/aidge/_Core/src/graphmatching/Match.cpp b/aidge/_Core/src/graphmatching/Match.cpp
new file mode 100644
index 00000000..9a87fac7
--- /dev/null
+++ b/aidge/_Core/src/graphmatching/Match.cpp
@@ -0,0 +1,37 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "graphmatching/Match.hpp"
+
+using namespace Aidge; 
+
+Match::Match(){
+    //ctr
+}
+
+size_t Match::getNbMatch(){
+    assert(mStartNodes.size() == mMatchNodes.size() && "Match corrupted");
+    return mStartNodes.size();
+}
+
+void Match::insert(std::vector<NodeTmp> startnodes, std::set<NodeTmp> matchnodes){
+    assert(mStartNodes.size() == mMatchNodes.size() && "Match corrupted");
+    mStartNodes.push_back(startnodes);
+    mMatchNodes.push_back(matchnodes);
+}
+
+std::vector<std::vector<NodeTmp>> Match::getStartNodes(){
+    return mStartNodes;
+}
+
+std::vector<std::set<NodeTmp>> Match::getMatchNodes(){
+    return mMatchNodes;
+}
\ No newline at end of file
diff --git a/aidge/_Core/src/graphmatching/NodeRegex.cpp b/aidge/_Core/src/graphmatching/NodeRegex.cpp
new file mode 100644
index 00000000..8ba6332d
--- /dev/null
+++ b/aidge/_Core/src/graphmatching/NodeRegex.cpp
@@ -0,0 +1,46 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "graphmatching/NodeRegex.hpp"
+
+
+// Verification done by the Parameter system
+
+
+// Version 1 - Only test the type of the node (no need for a lexer)
+// Input : Node_op
+// Output : bool
+// return mCondition == Node_op.type
+bool Aidge::NodeRegex::_is(std::shared_ptr<Node> &Node_op){
+
+    std::string NodeType = Node_op->type();
+
+    return strcmp(NodeType.c_str(), mCondition.c_str()) == 0;
+}
+
+
+bool Aidge::NodeRegex::isA(std::string NodeType){
+
+    return strcmp(NodeType.c_str(), mCondition.c_str()) == 0;
+}
+
+// Version 2 - Test the node to an advanced condition
+// Input : Node_op
+// Output : bool
+// return mCondition applied on Node
+/**bool NodeRegex::_is(string &Node_op){
+    // Parsing the condition is done in the initialization of the NodeRegex
+    
+    // assert parameters exist in the node with the parameter function isParam()
+
+    // get the parameters
+
+}*/
diff --git a/aidge/_Core/src/graphmatching/SeqStm.cpp b/aidge/_Core/src/graphmatching/SeqStm.cpp
new file mode 100755
index 00000000..89c932bc
--- /dev/null
+++ b/aidge/_Core/src/graphmatching/SeqStm.cpp
@@ -0,0 +1,247 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "graphmatching/SeqStm.hpp"
+
+using namespace Aidge; 
+
+
+
+
+    ///////////////////////////////////////////////////////
+
+    SeqStm::SeqStm( 
+        const int stmIdx,
+        const std::vector<std::vector<int>>& transitionMatrix,
+        const std::map<std::string,NodeRegex*>& nodesRegex,
+        const std::map<NodeTypeKey,int>& typeToIdxTransition,
+        int actSt, 
+        std::set<NodeTmp> allNodeValidated,
+        std::set<NodeTmp> allNodeTested,
+        std::set<std::pair<NodeTmp,std::string>> allCommonNode,
+        bool stmIsValid):mStmIdx(stmIdx),
+        mTransitionMatrix(transitionMatrix),
+        mNodesRegex(nodesRegex),
+        mTypeToIdxTransition(typeToIdxTransition)
+        {
+
+        //assert
+        if (transitionMatrix.size() == 0){
+            throw std::runtime_error ("no transitionMatrix");
+        }
+        if(transitionMatrix[0].size() == 0 || transitionMatrix[0].size() != typeToIdxTransition.size()){
+            throw std::runtime_error ("bad transitionMatrix");
+        }
+        int size = static_cast<int>(transitionMatrix.size());
+        if (actSt >= size){
+            throw std::runtime_error ("bad actSt");
+        }
+
+
+        mActSt              = actSt;
+        mAllNodeValidated   = allNodeValidated;
+        mAllNodeTested      = allNodeTested;
+        mAllCommonNode      = allCommonNode;
+        mStmIsValid         = stmIsValid;
+
+    }
+
+    SeqStm* SeqStm::duplicateStm(){
+
+        //deep copy of the set 
+        // std::set<Node> cAllNodeValidated(mAllNodeValidated.begin(), mAllNodeValidated.end());
+        // std::set<Node> cAllNodeTested(mAllNodeTested.begin(), mAllNodeTested.end());
+
+        // std::set<std::pair<Node,std::string>> cAllCommonNode;
+        // for (const auto& p : mAllCommonNode) {
+        //     cAllCommonNode.insert(p);
+        // }
+
+        auto newStm = new SeqStm(
+            mStmIdx,
+            mTransitionMatrix,
+            mNodesRegex,
+            mTypeToIdxTransition,
+            mActSt,
+            mAllNodeValidated,
+            mAllNodeTested,
+            mAllCommonNode,
+            mStmIsValid
+        );
+
+        return newStm;
+    }
+
+
+    std::pair<NodeRegex*,std::string> SeqStm::getNodeRegexAndCommonAt(int idxType)
+    {
+        //std::cout << "!" << idxType << "\n";
+        for (auto const& x : mTypeToIdxTransition)
+        {
+            //x.second is the value : idx in mTransitionMatrix for the type
+            //x.first pair of the node regex class and a string that is the common tag '',#,#n
+            if (x.second == idxType ){
+
+                if (mNodesRegex.find(x.first.first) != mNodesRegex.end()){
+                    return std::make_pair(mNodesRegex.find(x.first.first)->second, x.first.second);  
+                }else{
+                    throw std::runtime_error ("a type is not define in NodesRegex");
+                }
+            }
+        }
+        throw std::runtime_error ("bad idx in mNodesRegex");
+        return std::make_pair(nullptr,nullptr);
+    }
+
+
+    NodeType SeqStm::getTheNodeType(NodeTmp node)
+    {
+        //the node is a str of '{type}{idx}' and we juste want type 
+        // // std::regex re("([a-zA-Z]+)[0-9]+");
+        // // std::smatch match;
+        // // if (std::regex_search(node, match, re) == true) {
+        // //     return match.str(1);
+        // // }
+        // // throw std::runtime_error ("Type node not found");
+        // // return "";
+
+        //return node->name();
+        return node->type();
+    }
+
+
+    std::string SeqStm::transitionOnNodeType(NodeType nodeType){
+
+        if (!isStmBlocked()){
+            int idxType = 0;
+            for (auto & nextSt : mTransitionMatrix[mActSt]) {
+                // There are a next step for this type
+                //std::cout << "transition matrix next state -> "<< nextSt<<"\n" ;
+                if (nextSt != -1){
+                    //std::cout << "next -> "<< nextSt<< " "<< isAValidSt(nextSt) <<"\n" ;
+                    auto nodeRegex = getNodeRegexAndCommonAt(idxType);
+                    //std::cout << "-> "<< nodeRegex.second<<"\n" ;
+                    if (nodeRegex.first->isA(nodeType)){
+                        //std::cout << "nodetype tested !"<<"\n" ;
+                        if(isAValidSt(nextSt)){
+                            //std::cout << "Valid state !"<<"\n" ;
+                            mStmIsValid = true;
+                        }
+                        mActSt = nextSt;
+                        return nodeRegex.second;
+                    }
+                    
+                }
+                idxType += 1;
+            }
+
+            mActSt =-1;
+        }
+
+        return "";
+    }
+
+
+    std::pair<int,std::string> SeqStm::testNode(const NodeTmp node){
+        
+        std::string commonTag = "";
+        //std::cout << "0\n" ;
+        if (!isStmBlocked()){
+            bool isNextStEnd = std::all_of(mTransitionMatrix[mActSt].begin(), mTransitionMatrix[mActSt].end(), [&](int x){ return x == -1; });
+            //std::cout << "1:"<< isNextStEnd <<"\n" ;
+            //if the next state if full of -1 can we relay add the node test to all node tested 
+            // oker y test it but it sure that not be valid 
+            if(!isNextStEnd){
+                mAllNodeTested.insert(node);
+            }
+            //std::cout << "2\n" ;
+            //recurtion avoidance
+            if(mAllNodeValidated.find(node) == mAllNodeValidated.end()){
+                
+                NodeType nodeType = getTheNodeType(node);
+                //std::cout << "3 " << nodeType << "\n" ;
+                commonTag = transitionOnNodeType(nodeType);
+                //after the transition test, if the node is != -1 the node is valid for the stm
+                //std::cout << " mActSt = " << mActSt << "\n" ;
+                if( mActSt != -1 ){
+                    mAllNodeValidated.insert(node);
+                }
+            }else{
+                mActSt = -1;
+            }
+        }
+
+        if(commonTag != ""){
+            mAllCommonNode.insert(std::make_pair(node,commonTag));
+        }
+        return std::make_pair(mActSt,commonTag);
+    }
+
+
+void SeqStm::drawStm(){
+
+    //mTransitionMatrix
+ // Find the maximum width of each column
+    std::vector<std::size_t> max_widths(mTransitionMatrix[0].size(), 0);
+    for (std::size_t i = 0; i < mTransitionMatrix.size(); ++i)
+    {
+        for (std::size_t j = 0; j < mTransitionMatrix[i].size(); ++j)
+        {
+            std::size_t width = std::to_string(mTransitionMatrix[i][j]).length();
+            if (width > max_widths[j])
+            {
+                max_widths[j] = width;
+            }
+        }
+    }
+
+    // Print the vector with aligned columns
+    for (std::size_t i = 0; i < mTransitionMatrix.size(); ++i)
+    {
+        for (std::size_t j = 0; j < mTransitionMatrix[i].size(); ++j)
+        {
+            int i_int = static_cast<int>(i);
+            if (mActSt == -1 ){
+                if(mStmIsValid){
+                    std::cout << "\033[48;5;40m";
+                }else{
+                     std::cout << "\033[48;5;9m";
+                }
+            }
+            else if (mActSt == i_int){
+                std::cout << "\033[48;5;30m";
+            }else{
+                std::cout << "\033[48;5;27m";
+            }
+
+            // Pad the value with spaces to align it with the maximum width
+            std::size_t width = std::to_string(mTransitionMatrix[i][j]).length();
+            std::string padding(max_widths[j] - width, ' ');
+            std::cout << padding << mTransitionMatrix[i][j] << " ";
+            std::cout << "\033[0m";
+        }
+        std::cout << "\n";
+    }
+
+    std::cout << "mAllNodeTested : ";
+    for (const auto& x : mAllNodeTested) {
+        std::cout << x << ", ";
+    }
+    std::cout << "\n";
+
+
+    std::cout << "mAllNodeValidated : ";
+    for (const auto& x : mAllNodeValidated) {
+        std::cout << x << ", ";
+    }
+    std::cout << "\n";
+}
+
diff --git a/aidge/_Core/src/graphmatching/StmFactory.cpp b/aidge/_Core/src/graphmatching/StmFactory.cpp
new file mode 100644
index 00000000..4ca9c6d2
--- /dev/null
+++ b/aidge/_Core/src/graphmatching/StmFactory.cpp
@@ -0,0 +1,150 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "graphmatching/StmFactory.hpp"
+
+using namespace Aidge;
+
+StmFactory::StmFactory(const std::map<std::string, NodeRegex *> &nodesRegex)
+    : mNodesRegex(nodesRegex) {}
+
+SeqStm *StmFactory::duplicateStm(SeqStm *stm) { return stm->duplicateStm(); }
+
+SeqStm *StmFactory::makeNewStm(const std::string &sequRegex) {
+
+  ParsingReturn parsing = initParsingSequRegex(sequRegex);
+  std::vector<std::vector<int>> transitionMatrix =
+      initTransitionMatrix(parsing);
+
+  std::set<NodeTmp> allNodeValidated;
+  std::set<NodeTmp> allNodeTested;
+  std::set<std::pair<NodeTmp, std::string>> allCommonNode;
+
+  SeqStm *newStm = new SeqStm(static_cast<int>(mCmptStm), transitionMatrix, mNodesRegex,
+                              parsing.typeToIdxTransition, 0, allNodeValidated,
+                              allNodeTested, allCommonNode, false);
+  mCmptStm += 1;
+
+  return newStm;
+}
+
+ParsingReturn StmFactory::initParsingSequRegex(const std::string &sequRegex) {
+
+  std::string toMatch;
+  std::regex re("\\s*([A-Za-z]+)(#\\d*)?([+*])?\\s*(->|;)");
+  std::smatch matches;
+
+  int idxType = 0;
+  // return
+  ParsingReturn parsing;
+  // std::map<std::pair<NodeType,std::string>,int> typeToIdxTransition;
+  // std::vector<std::pair<std::pair<NodeType,std::string>,std::string>>
+  // transition;
+  // assert
+  std::map<NodeType, std::string> assertCommonNodeTypes;
+
+  for (std::size_t i = 0; i < sequRegex.length(); i++) {
+    toMatch += sequRegex[i];
+    if (std::regex_match(toMatch, matches, re)) {
+
+      std::string type = matches.str(1);
+      std::string commonTag = matches.str(2);
+      std::string quantification = matches.str(3);
+
+      if ((commonTag != "") && (quantification != "")) {
+        throw std::runtime_error("bad commonTag and quantification");
+      }
+
+      // make the typeToIdxTransition
+      NodeTypeKey typeTag = std::make_pair(type, commonTag);
+      /*std::cout << "              typeTag: " << type << "  " << commonTag
+                << parsing.typeToIdxTransition.size() << std::endl;*/
+      if (parsing.typeToIdxTransition.find(typeTag) ==
+          parsing.typeToIdxTransition.end()) {
+        parsing.typeToIdxTransition[typeTag] = idxType;
+        idxType += 1;
+      }
+      ////////////////////////////////////////////////////////////
+      // ASSERT
+      // SAME Common node in the sequ
+      if (commonTag != "") {
+        if (assertCommonNodeTypes.find(type) != assertCommonNodeTypes.end()) {
+          if (assertCommonNodeTypes[type] == commonTag) {
+            throw std::runtime_error("same common node in the sequ regex");
+          }
+        } else {
+          assertCommonNodeTypes[type] = commonTag;
+        }
+      }
+
+      // save all transition
+      parsing.transition.push_back(std::make_pair(typeTag, quantification));
+
+      /*std::cout << "Match found: " << matches.str() << std::endl;
+      std::cout << "Type: " << matches.str(1) << std::endl;
+      std::cout << "Common tag: " << matches.str(2) << std::endl;
+      std::cout << "Quantification: " << matches.str(3) << std::endl;*/
+
+      toMatch = "";
+    }
+  }
+  if (parsing.transition.size() == 0) {
+    throw std::runtime_error("Bad Parsing SequRegex ");
+  }
+
+  return parsing;
+}
+
+std::vector<std::vector<int>>
+StmFactory::initTransitionMatrix(ParsingReturn &parsing) {
+
+  // std::pair<NodeTypeKey,std::string>
+  std::vector<std::vector<int>> transitionMatrix;
+  std::size_t numberOfType = parsing.typeToIdxTransition.size();
+
+  if (numberOfType == 0) {
+    throw std::runtime_error("Bad number Of Type ");
+  }
+  // init start st
+  transitionMatrix.push_back(std::vector<int>(numberOfType, -1));
+
+  std::size_t idxTransition = 0;
+  int idxState = 0;
+  for (const auto &pair : parsing.transition) {
+    const NodeTypeKey &nodeTypeKey = pair.first;
+    const std::string &quant = pair.second;
+
+    /*std::cout << "Key: {" << nodeTypeKey.first << ", " << nodeTypeKey.second
+              << "}, Value: " << quant << std::endl;
+    std::cout << "idxState " << idxState << " TM: " << transitionMatrix.size()
+              << std::endl;*/
+    std::size_t idxType = parsing.typeToIdxTransition[nodeTypeKey];
+    /*std::cout << "idxType " << idxType << " TM: " << transitionMatrix[0].size()
+              << "type" << numberOfType << std::endl;*/
+
+    if (quant == "*") {
+      transitionMatrix[idxTransition][idxType] = idxState;
+    } else if (quant == "+") {
+      idxState += 1;
+      transitionMatrix[idxTransition][idxType] = idxState;
+      transitionMatrix.push_back(std::vector<int>(numberOfType, -1));
+      idxTransition += 1;
+      transitionMatrix[idxTransition][idxType] = idxState;
+    } else {
+
+      idxState += 1;
+      transitionMatrix[idxTransition][idxType] = idxState;
+      transitionMatrix.push_back(std::vector<int>(numberOfType, -1));
+      idxTransition += 1;
+    }
+  }
+  return transitionMatrix;
+}
\ No newline at end of file
diff --git a/aidge/_Core/src/operator/Operator.cpp b/aidge/_Core/src/operator/Operator.cpp
new file mode 100644
index 00000000..1db2feeb
--- /dev/null
+++ b/aidge/_Core/src/operator/Operator.cpp
@@ -0,0 +1,44 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+
+#include "backend/OperatorImpl.hpp"
+#include "operator/Operator.hpp"
+#include "utils/Types.h"
+
+// constexpr Aidge::Operator::Operator(const char* type)
+//     : mType(type)
+// {
+// 	// ctor
+// }
+
+Aidge::Operator::~Operator() = default;
+
+///////////////////////////////////////////////////////
+//        IMPLEMENTATION
+///////////////////////////////////////////////////////
+
+Aidge::NbElts_t Aidge::Operator::getNbRequiredData(Aidge::IOIndex_t inputIdx) const {
+    return mImpl->getNbRequiredData(inputIdx);
+}
+
+Aidge::NbElts_t Aidge::Operator::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
+    return mImpl->getNbConsumedData(inputIdx);
+}
+
+Aidge::NbElts_t Aidge::Operator::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
+    return mImpl->getNbProducedData(outputIdx);
+}
+
+void Aidge::Operator::forward() { mImpl->forward(); }
+
+void Aidge::Operator::backward() { mImpl->backward(); }
diff --git a/aidge/_Core/src/recipies/FuseMulAdd.cpp b/aidge/_Core/src/recipies/FuseMulAdd.cpp
new file mode 100644
index 00000000..ff57bb48
--- /dev/null
+++ b/aidge/_Core/src/recipies/FuseMulAdd.cpp
@@ -0,0 +1,103 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <set>
+#include <cassert>
+#include <memory>
+#include <string>
+#include <iostream>
+
+#include "operator/FC.hpp"
+#include "utils/Recipies.hpp"
+#include "graph/GraphView.hpp"
+#include "graph/Node.hpp"
+#include "operator/Producer.hpp"
+#include "operator/GenericOperator.hpp"
+
+using namespace Aidge;
+
+/**
+ * @brief Merge MatMul and Add Node into FC.
+ * 
+ * @param nodes Strict set of Node to merge.
+ */
+void Aidge::fuseMulAdd(std::set<std::shared_ptr<Node>> nodes){
+    // Fuse Mulmat & Add into FC
+    // Inputs : old nodes (pointers on mul & add)
+    
+    assert(nodes.size() == 2 && "Wrong number of nodes to replace\n");
+    // Too bad we lose information on the type after matching, how to keep the information after matching (not only for the type) ?
+    
+    // Step 0 : Assert the nodes types are correct to be fused
+    std::shared_ptr<Node> add;
+    std::shared_ptr<Node> matmul;
+    for (const auto& element : nodes) {
+        assert((element->type() == "MatMul" || element->type() == "Add") && "Wrong type for the nodes to replace");
+        if (element->type() == "MatMul"){
+            matmul = element;
+        }
+        else if (element->type() == "Add") {
+            add = element;
+        }
+    }
+    // auto matmul_parent0 = matmul->input(0);
+    // if (matmul_parent0.first) {
+    //     auto tensor0 = std::static_pointer_cast<Tensor>(std::static_pointer_cast<Producer_Op<2>>(matmul_parent0.first->getOperator())->getOutput(0));
+    //     auto d0 = tensor0->dims();
+    //     for (int i = 0; i< d0.size(); ++i)
+    //         std::cout << d0[i] << ", ";
+    //     std::cout << std::endl;
+    // }
+    // auto matmul_parent1 = matmul->input(1);
+    // if (matmul_parent1.first) {
+    //     auto tensor1 = std::static_pointer_cast<Tensor>(std::static_pointer_cast<Producer_Op<2>>(matmul_parent1.first->getOperator())->getOutput(0));
+    //     auto d1 = tensor1->dims();
+    //     for (int i = 0; i< d1.size(); ++i)
+    //         std::cout << d1[i] << ", ";
+    //     std::cout << std::endl;
+    // }
+
+    // Step 1 : Create FC
+    // Fetch the output dimension throught the bias size
+    auto producer_add_bias = add->input(1);
+    auto data_pointer = (producer_add_bias.first)->getOperator()->getOutput(0);
+    std::shared_ptr<Tensor> bias_tensor = std::static_pointer_cast<Tensor>(data_pointer);
+    auto dim = bias_tensor->dims();
+
+    // Instanciate FC  
+    //std::shared_ptr<Node> fc = FC(dim[0], false, "Fc");
+    std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(dim[0], false));
+
+    // Step 2 : Branch existing producers & create the others
+    // link weights & bias
+    auto producer_matmul_weights = matmul->input(1);
+    //addProducer(fc, 1, producer_matmul_weights.first);
+    //addProducer(fc, 2, producer_add_bias.first);
+    if (matmul->getParents(1)==nullptr) {
+        matmul->getParents(0)->addChild(fc, 0, 1);
+    } else {
+        if (matmul->getParents(0)!=nullptr)
+            matmul->getParents(0)->addChild(fc, 0, 0);
+        // (producer_matmul_weights.first)->addChild(fc,0,1);
+        matmul->getParents(1)->addChild(fc, 0, 1);
+    }
+    (producer_add_bias.first)->addChild(fc,0,2);
+
+
+    // Step 3 : Update all graphviews that contains at least one node to replace
+        // Case 1 : If all nodes are in a graph view : delete old nodes & branch input & output
+        // Case 2 : If not all nodes are in a graph view : only delete the nodes from the graphview
+        // Maybe create a central mechanism to update automatically all graph views rather than each node have graphview presence memory ?
+    auto nodeToReplace = std::make_shared<GraphView>();
+    nodeToReplace->add(nodes);
+    nodeToReplace->replaceWith({fc});
+
+}
\ No newline at end of file
diff --git a/aidge/_Core/src/scheduler/Scheduler.cpp b/aidge/_Core/src/scheduler/Scheduler.cpp
new file mode 100644
index 00000000..5f4b0295
--- /dev/null
+++ b/aidge/_Core/src/scheduler/Scheduler.cpp
@@ -0,0 +1,235 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "scheduler/Scheduler.hpp"
+
+#include <chrono>
+#include <memory>
+#include <set>
+#include <string>
+
+#include "graph/GraphView.hpp"
+#include "graph/Node.hpp"
+#include "utils/Types.h"
+
+void drawProgressBar(double progress, int barWidth, const char* additionalInfo = nullptr) {
+    putchar('[');
+    int pos = static_cast<int>(barWidth * progress);
+    for (int i = 0; i < barWidth; ++i) {
+        if (i <= pos)
+            putchar('#');
+        else
+            putchar(' ');
+    }
+    printf("] %d%% | %s\r", static_cast<int>(progress * 100), (additionalInfo ? additionalInfo : ""));
+    fflush(stdout);
+}
+
+// TODO: handle multiple inputs/outputs
+void Aidge::SequentialScheduler::forward(bool frowardDims, bool verbose) {
+    if (frowardDims) {mGraphView->forwardDims(); }
+
+    mScheduling.clear();
+
+    // setup initial producers list
+    // add each Producer Node.
+    std::set<std::shared_ptr<Node>> computationOver;
+    std::size_t computationNumber = 0;
+    std::set<std::shared_ptr<Node>> producers;
+    for (const std::shared_ptr<Node>& nodePtr : mGraphView->getNodes()) {
+        if (nodePtr->type() == "Producer") {
+            producers.insert(nodePtr);
+        } else {
+            ++computationNumber;
+        }
+    }
+    // add Data Input
+    // FIXME : shoudl be changed when the real system for providing
+    // data is implemented
+    for (const std::shared_ptr<Node>& nodePtr : mGraphView->inputNodes()) {
+        for (const auto& parentPtr : nodePtr->getParents()) {
+            if ((mGraphView->getNodes()).find(parentPtr) == (mGraphView->getNodes()).end()) {
+                // Node not found in the graph, it's an outside producer
+                producers.insert(parentPtr);
+            }
+        }
+    }
+
+    // setup consumer list
+    // std::set<std::shared_ptr<Node>> consumers = getConsumers(producers);
+
+    /* It may not be necessary to initialize producer */
+    std::set<std::shared_ptr<Node>> consumers = mGraphView->inputNodes();
+    do {
+        // find runnable consumers
+        std::set<std::shared_ptr<Node>> runnableConsumers;
+        if (verbose) printf("List of layers receiving data:\n");
+        for (const auto& consumer : consumers) {
+            if (verbose) {
+                printf("\t- consumer: "
+                       "\x1b[1;37m"
+                       "%s"
+                       "\x1b[0m"
+                       "\n\t\tR/C:\t",
+                       (consumer->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(consumer.get()))).c_str());
+                for (IOIndex_t inId = 0; static_cast<IONb_t>(inId) < consumer->nbInputs() - 1; ++inId) {
+                    printf("%ld/%ld\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId),
+                           consumer->getOperator()->getNbRequiredData(inId));
+                }
+                printf("%ld/%ld", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1),
+                       consumer->getOperator()->getNbRequiredData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1));
+                printf("\n\t\tP:\t");
+                for (IOIndex_t outId = 0; static_cast<IONb_t>(outId) < consumer->nbOutputs() - 1; ++outId) {
+                    printf("%ld\n\t\t\t", consumer->getOperator()->getNbProducedData(outId));
+                }
+                printf("%ld", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1));
+                printf("\n");
+            }
+            bool isRunnable = true;
+
+            IOIndex_t parentID = 0;  // FIXME: handle this correctly
+            // Check every input has got enought data to run
+            for (const auto& consumerParent : consumer->dataInputs()) {
+                if (consumerParent.first &&
+                    consumer->getOperator()->getNbRequiredData(parentID++) >
+                            consumerParent.first->getOperator()->getNbProducedData(consumerParent.second)) {
+                    // not enough data to run
+                    isRunnable = false;
+                    break;
+                }
+            }
+
+            if (isRunnable) {
+                runnableConsumers.insert(consumer);
+            }
+        }
+
+        // run sequencially every runnable consumers once
+        // TODO: handle memory allocation in scheduler
+        // TODO: optimize memory usage
+        for (const auto& runnable : runnableConsumers) {
+            if (verbose)
+                printf("run: %s\n",
+                       (runnable->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))).c_str());
+            else
+                drawProgressBar(static_cast<float>(computationOver.size()) / static_cast<float>(computationNumber), 50,
+                                (std::string("running ") + runnable->type() + "_" +
+                                 std::to_string(reinterpret_cast<uintptr_t>(runnable.get())))
+                                        .c_str());
+            const auto tStart = std::chrono::high_resolution_clock::now();
+            runnable->forward();
+            const auto tEnd = std::chrono::high_resolution_clock::now();
+            mScheduling.push_back(SchedulingElement(runnable, tStart, tEnd));
+        }
+
+        // update producers and consumers list
+        if (verbose) printf("Updating producer and consumer lists...\n");
+        const auto oldConsumers = consumers;
+
+        for (const auto& consumer : oldConsumers) {
+            if (verbose) {
+                printf("\t- consumer: %s\n\t\tR/C:\t",
+                       (consumer->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(consumer.get()))).c_str());
+                for (IOIndex_t inId = 0; static_cast<IONb_t>(inId) < consumer->nbInputs() - 1; ++inId) {
+                    printf("%ld/%ld\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId),
+                           consumer->getOperator()->getNbRequiredData(inId));
+                }
+                printf("%ld/%ld", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1),
+                       consumer->getOperator()->getNbRequiredData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1));
+                printf("\n\t\tP:\t");
+                for (IOIndex_t outId = 0; static_cast<IONb_t>(outId) < consumer->nbOutputs() - 1; ++outId) {
+                    printf("%ld\n\t\t\t", consumer->getOperator()->getNbProducedData(outId));
+                }
+                printf("%ld", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1));
+                printf("\n");
+            }
+            bool isStillConsumer = false;
+
+            IOIndex_t parentID = 0;  // FIXME: handle this correctly
+            // should we check input or dataInput ?
+            for (const auto& consumerParent : consumer->inputs()) {
+                if (consumerParent.first &&
+                    consumer->getOperator()->getNbConsumedData(parentID++) <
+                            consumerParent.first->getOperator()->getNbProducedData(consumerParent.second)) {
+                    // there is still data to consume
+                    isStillConsumer = true;
+                    break;
+                }
+            }
+
+            bool computationOverForConsumer = true;
+            for (IOIndex_t parentIDi = 0; static_cast<IONb_t>(parentIDi) < consumer->nbInputs(); ++parentIDi) {
+                if (consumer->getOperator()->getNbConsumedData(parentIDi) <
+                    consumer->getOperator()->getNbRequiredData(parentIDi)) {
+                    computationOverForConsumer = false;
+                    break;
+                }
+            }
+            if (computationOverForConsumer) {
+                computationOver.insert(consumer);
+            }
+
+            for (IOIndex_t outId = 0; static_cast<IONb_t>(outId) < consumer->nbOutputs(); ++outId) {
+                if (consumer->getOperator()->getNbProducedData(outId) > 0) {
+                    if (verbose) printf("  also producer\n");
+                    // make sure consumer is also a producer
+                    producers.insert(consumer);
+
+                    const auto& childs = consumer->getChildren();
+                    consumers.insert(childs.begin(), childs.end());
+                    break;
+                }
+            }
+
+            if (!isStillConsumer) {
+                if (verbose) printf("  no more consumer\n");
+                // consumer is no longer a consumer, only a producer
+                consumers.erase(consumer);
+            }
+        }
+
+        if (verbose) printf("*************\n");
+    } while (!consumers.empty());
+    if (!verbose) drawProgressBar(1.0, 50, "                                   ");
+    printf("\n");
+}
+
+void Aidge::SequentialScheduler::saveSchedulingDiagram(const std::string& fileName) const {
+    FILE* fp = std::fopen((fileName + ".mmd").c_str(), "w");
+    std::fprintf(fp, "gantt\ndateFormat x\naxisFormat %%s ms\n\n");
+
+    if (!mScheduling.empty()) {
+        const auto globalStart = mScheduling[0].start;
+
+        for (const auto& element : mScheduling) {
+            std::fprintf(fp, "%s :%ld, %ld\n",
+                         (element.node->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(element.node.get())))
+                                 .c_str(),
+                         std::chrono::duration_cast<std::chrono::microseconds>(element.start - globalStart).count(),
+                         std::chrono::duration_cast<std::chrono::microseconds>(element.end - globalStart).count());
+        }
+    }
+
+    std::fprintf(fp, "\n");
+    std::fclose(fp);
+}
+
+std::set<std::shared_ptr<Aidge::Node>> Aidge::SequentialScheduler::getConsumers(
+        const std::set<std::shared_ptr<Node>>& producers) const {
+    std::set<std::shared_ptr<Node>> consumers;
+
+    for (const auto& producer : producers) {
+        const auto& childs = producer->getChildren();
+        consumers.insert(childs.begin(), childs.end());
+    }
+
+    return consumers;
+}
\ No newline at end of file
-- 
GitLab