diff --git a/aidge/include/aidge/aidge.hpp b/aidge/include/aidge/aidge.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..7f32d695a41d954e9f31c6682e3cc6fc0226aed9
--- /dev/null
+++ b/aidge/include/aidge/aidge.hpp
@@ -0,0 +1,51 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_IMPORTS_H__
+#define __AIDGE_IMPORTS_H__
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/TensorImpl.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Connector.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/graphmatching/GRegex.hpp"
+#include "aidge/graphmatching/Match.hpp"
+#include "aidge/graphmatching/NodeRegex.hpp"
+#include "aidge/graphmatching/SeqStm.hpp"
+#include "aidge/graphmatching/StmFactory.hpp"
+#include "aidge/graphmatching/Utile.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/AvgPooling.hpp"
+#include "aidge/operator/BatchNorm.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/ConvDepthWise.hpp"
+#include "aidge/operator/FC.hpp"
+#include "aidge/operator/GenericOperator.hpp"
+#include "aidge/operator/Matmul.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/operator/ReLU.hpp"
+#include "aidge/operator/Softmax.hpp"
+#include "aidge/scheduler/Scheduler.hpp"
+#include "aidge/utils/CParameter.hpp"
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Recipies.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+//#include "aidge/utilsParsing/AstNode.hpp"
+//#include "aidge/utilsParsing/ParsingToken.hpp"
+
+#endif /* __AIDGE_IMPORTS_H__ */
diff --git a/aidge/include/aidge/backend/OperatorImpl.hpp b/aidge/include/aidge/backend/OperatorImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..7e022145d1eeaa8a2bd79afe69ca06ca57a62651
--- /dev/null
+++ b/aidge/include/aidge/backend/OperatorImpl.hpp
@@ -0,0 +1,60 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_OPERATORIMPL_H__
+#define __AIDGE_OPERATORIMPL_H__
+
+#include <cstddef>
+#include <vector>
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+class OperatorImpl {
+public:
+    virtual void forward(){};
+    virtual void backward() {}
+
+    /**
+     * @brief Minimum amount of data from a specific input required by the
+     * implementation to be run.
+     *
+     * @param inputIdx Index of the input analysed.
+     * @return std::size_t
+     */
+    virtual NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const = 0;
+
+    // Amount of input data that cannot be overwritten during the execution.
+    virtual NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const = 0;
+
+    // Memory required at an output for a given input size.
+    virtual NbElts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const = 0;
+
+    /**
+     * @brief Total amount of consumed data from a specific input.
+     *
+     * @param inputIdx Index of the input analysed.
+     * @return DimSize_t
+     */
+    virtual NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const = 0;
+
+    /**
+     * @brief TOtal amount of produced data ready to be used on a specific output.
+     *
+     * @param outputIdx Index of the output analysed.
+     * @return DimSize_t
+     */
+    virtual NbElts_t getNbProducedData(const IOIndex_t outputIdx) const = 0;
+
+    virtual ~OperatorImpl() = default;
+};
+} // namespace Aidge
+
+#endif /* __AIDGE_OPERATORIMPL_H__ */
diff --git a/aidge/include/aidge/backend/TensorImpl.hpp b/aidge/include/aidge/backend/TensorImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..58f2d547e513d540a491155045c463f9a7199578
--- /dev/null
+++ b/aidge/include/aidge/backend/TensorImpl.hpp
@@ -0,0 +1,41 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_TENSORIMPL_H__
+#define __AIDGE_TENSORIMPL_H__
+
+#include <cstddef>
+#include <cstdio>
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+class TensorImpl {
+public:
+    TensorImpl() = delete;
+    TensorImpl(const char *backend) : mBackend(backend){};
+    virtual void copy(const void *src, NbElts_t length) = 0;
+    virtual void *rawPtr() = 0;
+    virtual void setRawPtr(void* /*ptr*/)
+    {
+        printf("Cannot set raw pointer for backend %s\n", mBackend);
+    };  
+    virtual std::size_t scalarSize() const = 0; // Size of one scalar (in bytes)
+    constexpr const char *backend() const { return mBackend; }
+    virtual ~TensorImpl() = default;
+    virtual bool operator==(const TensorImpl &othImpl) const = 0;
+
+private:
+    const char *mBackend;
+};
+
+} // namespace Aidge
+
+#endif /* __AIDGE_TENSORIMPL_H__ */
diff --git a/aidge/include/aidge/data/Data.hpp b/aidge/include/aidge/data/Data.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..4edc4b9a5a9fd877cf9a3e84c7f644be2a11534a
--- /dev/null
+++ b/aidge/include/aidge/data/Data.hpp
@@ -0,0 +1,75 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_DATA_H__
+#define __AIDGE_DATA_H__
+
+#include "aidge/utils/Parameter.hpp"
+
+namespace Aidge {
+enum class DataType {
+    Float64,
+    Float32,
+    Float16,
+    BFloat16,
+    Binary,
+    Ternary,
+    Int2,
+    Int3,
+    Int4,
+    Int5,
+    Int6,
+    Int7,
+    Int8,
+    Int16,
+    Int32,
+    Int64,
+    UInt2,
+    UInt3,
+    UInt4,
+    UInt5,
+    UInt6,
+    UInt7,
+    UInt8,
+    UInt16,
+    UInt32,
+    UInt64
+};
+
+class Data {
+public:
+    constexpr Data(const char* type): mType(type) {};
+    constexpr const char* type() const {
+        return mType;
+    }
+    virtual ~Data() = default;
+
+private:
+    const char* mType;
+};
+}
+
+namespace {
+template <typename T> struct NativeType { static const Aidge::DataType type; };
+template <> const Aidge::DataType NativeType<double>::type = Aidge::DataType::Float64;
+template <> const Aidge::DataType NativeType<float>::type = Aidge::DataType::Float32;
+template <> const Aidge::DataType NativeType<long>::type = Aidge::DataType::Int64;
+template <> const Aidge::DataType NativeType<int>::type = Aidge::DataType::Int32;
+
+template <>
+const char* const EnumStrings<Aidge::DataType>::data[]
+    = {"Float64", "Float32", "Float16", "BFloat16", "Binary", "Ternary", 
+       "Int2", "Int3", "Int4", "Int5", "Int6", "Int7", "Int8", "Int16", 
+       "Int32", "Int64", "UInt2", "UInt3", "UInt4", "UInt5", "UInt6", 
+       "UInt7", "UInt8", "UInt16", "UInt32", "UInt64"};
+}
+
+#endif /* __AIDGE_DATA_H__ */
\ No newline at end of file
diff --git a/aidge/include/aidge/data/Tensor.hpp b/aidge/include/aidge/data/Tensor.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..01e2a5a51d86c28d3a89bd9085c60bfad297623f
--- /dev/null
+++ b/aidge/include/aidge/data/Tensor.hpp
@@ -0,0 +1,584 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_DATA_TENSOR_H__
+#define __AIDGE_CORE_DATA_TENSOR_H__
+
+#include <cstring>
+#include <set>
+#include <memory>
+#include <numeric>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/TensorImpl.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+// Helper to create default arrays
+template <typename T, std::size_t ... Is>
+constexpr std::array<T, sizeof...(Is)>
+create_array_impl(T value, std::index_sequence<Is...>)
+{
+    // cast Is to void to remove the warning: unused value
+    return {{(static_cast<void>(Is), value)...}};
+}
+
+template <typename T, std::size_t N>
+constexpr std::array<T, N> create_array(const T& value)
+{
+    return create_array_impl(value, std::make_index_sequence<N>());
+}
+
+
+// Helper to convert vector to array
+template <typename T, typename Iter, std::size_t... Is>
+constexpr auto to_array(Iter &iter, std::index_sequence<Is...>) -> std::array<T, sizeof...(Is)> {
+    return {{((void)Is, T(*iter++))...}};
+}
+
+/**
+ * @brief Convert an object with an iterator to an std::array.
+ */
+template <std::size_t N, typename U = void, typename Iter, typename V = typename std::iterator_traits<Iter>::value_type,
+          typename T = std::conditional_t<std::is_same<U, void>{}, V, U>>
+constexpr auto to_array(Iter iter) -> std::array<T, N> {
+    return to_array<T>(iter, std::make_index_sequence<N>{});
+}
+
+namespace detail {
+
+template <class T, std::size_t N, std::size_t... I>
+constexpr std::array<std::remove_cv_t<T>, N> to_array_impl(T (&a)[N], std::index_sequence<I...>) {
+    return {{a[I]...}};
+}
+
+}  // namespace detail
+
+/**
+ * @brief Convert a C-stype array into a C++ std::array.
+ *
+ * @tparam T Data type.
+ * @tparam N Number of elements.
+ * @param a C-style array to convert.
+ * @return constexpr std::array<std::remove_cv_t<T>, N>
+ */
+template <class T, std::size_t N>
+constexpr std::array<std::remove_cv_t<T>, N> to_array(T (&a)[N]) {
+    return detail::to_array_impl(a, std::make_index_sequence<N>{});
+}
+
+template <typename T, std::size_t N, std::size_t... I>
+constexpr std::array<T, N + 1> append(std::array<T, N> a, T t, std::index_sequence<I...>) {
+    return std::array<T, N + 1>{a[I]..., t};
+}
+
+template <typename T, std::size_t N, std::size_t... I>
+constexpr std::array<T, N + 1> append(T t, std::array<T, N> a, std::index_sequence<I...>) {
+    return std::array<T, N + 1>{t, a[I]...};
+}
+
+/**
+ * @brief Create a new array concatenating the initial one with the value to
+ * add.
+ * @details append({1,2,7}, 3) -> {1,2,7,3}
+ *
+ * @tparam T Data type.
+ * @tparam N Number of elements in the initilial array.
+ * @param a Initial array.
+ * @param t Element to add.
+ * @return constexpr std::array<T, N + 1>
+ */
+template <typename T, std::size_t N>
+constexpr std::array<T, N + 1> append(std::array<T, N> a, T t) {
+    return append(a, t, std::make_index_sequence<N>());
+}
+
+template <typename T, std::size_t N>
+constexpr std::array<T, N + 1> append(T t, std::array<T, N> a) {
+    return append(t, a, std::make_index_sequence<N>());
+}
+
+// Generic helper for initializing a Tensor
+template <typename T, std::size_t SIZE_0>
+struct Array1D {
+    T data[SIZE_0];
+};
+
+template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
+struct Array2D {
+    T data[SIZE_0][SIZE_1];
+};
+
+template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
+struct Array3D {
+    T data[SIZE_0][SIZE_1][SIZE_2];
+};
+
+template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
+struct Array4D {
+    T data[SIZE_0][SIZE_1][SIZE_2][SIZE_3];
+};
+
+/**
+ * @brief Description for the tensor data structure.
+ * @details Sets the properties of the tensor without actually containing any data.
+ * Contains a pointer to an actual contiguous implementation of data.
+ */
+class Tensor : public Data,
+               public Registrable<Tensor, std::tuple<std::string, DataType>, std::unique_ptr<TensorImpl>(const Tensor &)> {
+   private:
+    DataType mDataType; /** enum to specify data type. */
+    std::vector<DimSize_t> mDims; /** Dimensions of the tensor. */
+    std::unique_ptr<TensorImpl> mImpl; /** Pointer to the actual data implementation. */
+    std::shared_ptr<Tensor> mGrad; /** Pointer to the associated gradient Tensor instance. */
+
+    // Cached data
+    std::size_t mSize;    /** Number of elements in the Tensor. */
+    std::size_t mSizeM1;  /** Number of elements in the N-1 first dimensions */
+
+   public:
+    static constexpr const char *Type = "Tensor";
+
+    /**
+     * @brief Construct a new empty Tensor object.
+     * @param dataType Sets the type of inserted data.
+     */
+    Tensor(DataType dataType = DataType::Float32)
+        : Data(Type), 
+          mDataType(dataType), 
+          mDims({}), 
+          mSize(0), 
+          mSizeM1(0)
+    {
+        // ctor
+    }
+
+    /**
+     * @brief Construct a new Tensor object copied from another one.
+     * @param otherTensor 
+     */
+    Tensor(const Tensor& otherTensor)
+        : Data(Type), 
+          mDataType(otherTensor.mDataType), 
+          mDims(otherTensor.mDims), 
+          mSize(otherTensor.mSize), 
+          mSizeM1(otherTensor.mSizeM1) 
+    {
+        if (otherTensor.hasImpl()) {
+            mImpl = Registrar<Tensor>::create({otherTensor.mImpl->backend(), dataType()})(*this);
+            mImpl->copy(otherTensor.mImpl->rawPtr(), mSize);
+        }
+    }
+
+    /**
+     * @brief Construct a new Tensor object from the 1-dimension Array helper.
+     * @tparam T datatype
+     * @tparam SIZE_0 first array dimension.
+     */
+    template <typename T, std::size_t SIZE_0>
+    constexpr Tensor(Array1D<T, SIZE_0> &&arr)
+        : Data(Type),
+          mDataType(NativeType<T>::type),
+          mDims({SIZE_0}),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)),
+          mSize(SIZE_0),
+          mSizeM1(SIZE_0) {
+        mImpl->copy(&arr.data[0], SIZE_0);
+    }
+
+    template <typename T, std::size_t SIZE_0>
+    constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
+        resize({SIZE_0});
+        if (!mImpl) {
+            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this);
+        }
+        mImpl->copy(&arr.data[0], SIZE_0);
+        return *this;
+    }
+
+    /**
+     * @brief Construct a new Tensor object from the 2-dimensions Array helper.
+     * @tparam T datatype
+     * @tparam SIZE_0 first array dimension.
+     * @tparam SIZE_1 second array dimension.
+     */
+    template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
+    constexpr Tensor(Array2D<T, SIZE_0, SIZE_1> &&arr)
+        : Data(Type),
+          mDataType(NativeType<T>::type),
+          mDims({SIZE_0, SIZE_1}),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)),
+          mSize(SIZE_0 * SIZE_1),
+          mSizeM1(SIZE_1) {
+        mImpl->copy(&arr.data[0][0], SIZE_0 * SIZE_1);
+    }
+
+    template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
+    constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) {
+        resize({SIZE_0, SIZE_1});
+        if (!mImpl) {
+            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this);
+        }
+        mImpl->copy(&arr.data[0][0], SIZE_0 * SIZE_1);
+        return *this;
+    }
+
+    /**
+     * @brief Construct a new Tensor object from the 3-dimensions Array helper.
+     * @tparam T datatype
+     * @tparam SIZE_0 first array dimension.
+     * @tparam SIZE_1 second array dimension.
+     * @tparam SIZE_2 third array dimension.
+     */
+    template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
+    constexpr Tensor(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr)
+        : Data(Type),
+          mDataType(NativeType<T>::type),
+          mDims({SIZE_0, SIZE_1, SIZE_2}),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)),
+          mSize(SIZE_0 * SIZE_1 * SIZE_2),
+          mSizeM1(SIZE_1 * SIZE_2) {
+        mImpl->copy(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
+    }
+
+    template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
+    constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) {
+        resize({SIZE_0, SIZE_1, SIZE_2});
+        if (!mImpl) {
+            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this);
+        }
+        mImpl->copy(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
+        return *this;
+    }
+
+    /**
+     * @brief Construct a new Tensor object from the 4-dimensions Array helper.
+     * @tparam T datatype
+     * @tparam SIZE_0 first array dimension.
+     * @tparam SIZE_1 second array dimension.
+     * @tparam SIZE_2 third array dimension.
+     * @tparam SIZE_3 fourth array dimension.
+     */
+    template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
+    constexpr Tensor(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr)
+        : Data(Type),
+          mDataType(NativeType<T>::type),
+          mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)),
+          mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3),
+          mSizeM1(SIZE_1 * SIZE_2 * SIZE_3) {
+        mImpl->copy(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
+    }
+
+    template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
+    constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) {
+        resize({SIZE_0, SIZE_1, SIZE_2, SIZE_3});
+        if (!mImpl) {
+            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this);
+        }
+        mImpl->copy(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
+        return *this;
+    }
+
+    /**
+     * @brief Copy dimensions, datatype and data of another Tensor.
+     * @param t other Tensor object.
+     * @return Tensor&
+     */
+    Tensor &operator=(const Tensor &t) {
+        resize(t.dims());
+        setDatatype(t.dataType());
+        if (t.hasImpl()) {
+            setBackend(t.mImpl->backend());
+            mImpl->copy(t.mImpl->rawPtr(), size());
+        }
+        else {
+            mImpl = nullptr;
+        }
+        return *this;
+    }
+
+    /**
+     * @brief Assess data type, dimensions, backend and data are the same.
+     * @param otherTensor 
+     */
+    bool operator==(const Tensor &otherTensor) const {
+        if ((!mImpl && !otherTensor.mImpl) || (dataType() != otherTensor.dataType()) ||
+            (dims() != otherTensor.dims()) || (mImpl->backend() != otherTensor.mImpl->backend())) {
+            return false;
+        }
+        return *mImpl == *(otherTensor.mImpl);
+    }
+
+    /**
+     * @brief Set the backend of the Tensor associated implementation
+     * @details Create and initialized an implementation if non was associated.
+     * @param name 
+     */
+    inline void setBackend(const std::string &name) {
+        if (mImpl) {
+            if (strcmp(mImpl->backend(), name.c_str()) != 0) {
+                // Backend change: create new impl, copy from old to new and replace
+                // impl
+                std::unique_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(*this);
+                newImpl->copy(mImpl->rawPtr(), size());
+                mImpl = std::move(newImpl);
+            }
+        } else
+            mImpl = Registrar<Tensor>::create({name, mDataType})(*this);
+    }
+
+    /**
+     * @brief Get a list of available backends.
+     * @return std::set<std::string> 
+     */
+    static std::set<std::string> getAvailableBackends(){
+        std::set<std::string> backendsList;
+        for(std::tuple<std::string, DataType> tupleKey : Registrar<Tensor>::getKeys())
+            backendsList.insert(std::get<0>(tupleKey));
+        return backendsList;
+    }
+
+    /**
+     * @brief Get the data type enum.
+     * @return constexpr DataType 
+     */
+    constexpr DataType dataType() const { return mDataType; }
+
+    /**
+     * @brief Set the DataType of the Tensor and converts data
+     * if the Tensor has already been initialized.
+     * @param dt DataType.
+     */
+    void setDatatype(const DataType dt) {
+        if (mImpl && (dataType() != dt)) {
+            // get ptr before changing Tensor backend or the type difference will trigger a warning
+            const void *data = mImpl->rawPtr();
+            mDataType = dt;
+            std::unique_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), dt})(*this);
+            newImpl->copy(data, size());  // /!\ it does not cast data but reinterpret them
+            mImpl = std::move(newImpl);
+        }
+        mDataType = dt;
+    }
+
+    /**
+     * @brief Get the Impl object
+     * @return constexpr const std::unique_ptr<TensorImpl>& 
+     */
+    constexpr const std::unique_ptr<TensorImpl> &getImpl() { return mImpl; }
+
+    /**
+     * @brief Return if an implementaiton has been associated.
+     * @return true 
+     * @return false 
+     */
+    bool hasImpl() const { return (mImpl) ? true : false; }
+
+    /**
+     * @brief Get number of dimensions of the Tensor.
+     * @return std::size_t 
+     */
+    inline std::size_t nbDims() const { return mDims.size(); }
+
+    /**
+     * @brief Get dimensions of the Tensor object.
+     * @tparam DIM number of dimensions.
+     * @return constexpr std::array<DimSize_t, DIM> 
+     */
+    template <DimIdx_t DIM>
+    constexpr std::array<DimSize_t, DIM> dims() const {
+        assert(DIM == mDims.size() && "wrong number of dimensions");
+        return to_array<DIM>(mDims.cbegin());
+    }
+
+    /**
+     * @brief Get dimensions of the Tensor object.
+     * @return constexpr const std::vector<DimSize_t>& 
+     */
+    constexpr const std::vector<DimSize_t> &dims() const { return mDims; }
+
+    /**
+     * @brief Get the number of elements in the Tensor object.
+     * @return constexpr std::size_t 
+     */
+    constexpr std::size_t size() const { return mSize; }
+
+    /**
+     * @brief Get the number of elements in the N-1 dimensions of the Tensor object.
+     * @return constexpr std::size_t 
+     */
+    constexpr std::size_t sizeM1() const { return mSizeM1; }
+
+    /**
+     * @brief Change the shape of the Tensor object according to the given argument.
+     * @tparam DIM new dimensions.
+     * @param dims 
+     */
+    template <std::array<DimSize_t, 1>::size_type DIM> // deducing std::array size_type and declaring DIM accordingly
+    void resize(const std::array<DimSize_t, DIM> &dims) {
+        static_assert(DIM<=MaxDim,"Too many tensor dimensions required by resize, not supported");
+        mDims.assign(dims.begin(), dims.end());
+        computeSize();
+    }
+
+    void resize(const std::vector<DimSize_t> &dims) {
+        mDims = dims;
+        computeSize();
+    }
+
+    /**
+     * @brief Return if the Tensor object has at leastone element.
+     * @return true 
+     * @return false 
+     */
+    bool empty() const { return mDims.empty(); }
+
+    template <typename expectedType, std::array<std::size_t, 1>::size_type DIM>
+    constexpr expectedType &get(std::array<std::size_t, DIM> idx) {
+        assert(DIM == mDims.size());
+        assert(mImpl);
+        std::size_t unfoldedIdx = 0;
+        for (std::size_t i = 0; i < DIM - std::size_t(1); ++i) {
+            unfoldedIdx = (unfoldedIdx + idx[i]) * mDims[i + 1];
+        }
+        unfoldedIdx += idx[DIM - 1];
+        return static_cast<expectedType *>(mImpl->rawPtr())[unfoldedIdx];
+    }
+
+    std::string toString() {
+        if (dims().empty()) { return "{}"; }
+        std::string res;
+        std::size_t dim = 0;
+        std::size_t *dimVals = new std::size_t[nbDims()];
+        for (std::size_t i = 0; i < nbDims(); ++i) {
+            dimVals[i] = 0;
+        }
+        std::size_t counter = 0;
+        res += "{\n";
+        if (nbDims()>=2){
+            while (counter < mSize) {
+                std::string spaceString = std::string((dim+1)<<1,' ');
+                if (dim < nbDims()-2) {
+                    if (dimVals[dim] == 0) {
+                        res += spaceString + "{\n";
+                        ++dim;
+                    } else if (dimVals[dim] < static_cast<std::size_t>(dims()[dim])) {
+                        res += spaceString + "},\n" + spaceString + "{\n";
+                        ++dim;
+                    } else {
+                        res += spaceString + "}\n";
+                        dimVals[dim--] = 0;
+                        dimVals[dim]++;
+                    }
+                } else {
+                    for (; dimVals[dim] < static_cast<std::size_t>(dims()[dim]); ++dimVals[dim]) {
+                        res += spaceString + "{";
+                        for (DimSize_t j = 0; j < dims()[dim + 1] - 1; ++j) {
+                            switch (mDataType)
+                            {
+                            case DataType::Int32:
+                                res += " " + std::to_string(static_cast<int *>(mImpl->rawPtr())[counter++]) + ",";
+                                break;
+                            case DataType::Float64:
+                                res += " " + std::to_string(static_cast<double *>(mImpl->rawPtr())[counter++]) + ",";
+                                break;
+                            default:
+                                res += " " + std::to_string(static_cast<float *>(mImpl->rawPtr())[counter++]) + ",";
+                                break;
+                            }
+                        }
+                        switch (mDataType)
+                        {
+                        case DataType::Int32:
+                            res += " " + std::to_string(static_cast<int *>(mImpl->rawPtr())[counter++]) + "}";
+                            break;
+                        case DataType::Float64:
+                            res += " " + std::to_string(static_cast<double *>(mImpl->rawPtr())[counter++]) + "}";
+                            break;
+                        default:
+                            res += " " + std::to_string(static_cast<float *>(mImpl->rawPtr())[counter++]) + "}";
+                            break;
+                        }
+                        if (dimVals[dim] < static_cast<std::size_t>(dims()[dim] - 1)) {
+                            res += ",";
+                        }
+                        res += "\n";
+                    }
+                    dimVals[dim--] = 0;
+                    dimVals[dim]++;
+                }
+            }
+            for(int i = static_cast<int>(dim); i>=0; --i) {
+                res += std::string((dim+1)<<1,' ') + "}\n";
+            }
+        }else{
+            for (DimSize_t j = 0; j < dims()[0]; ++j) {
+                switch (mDataType)
+                {
+                case DataType::Int32:
+                    res += " " + std::to_string(static_cast<int *>(mImpl->rawPtr())[j]) + ((j < dims()[0]-1) ? "," : "\n");
+                    break;
+                case DataType::Float64:
+                    res += " " + std::to_string(static_cast<double *>(mImpl->rawPtr())[j]) + ((j < dims()[0]-1) ? "," : "\n");
+                    break;
+                default:
+                    res += " " + std::to_string(static_cast<float *>(mImpl->rawPtr())[j]) + ((j < dims()[0]-1) ? "," : "\n");
+                    break;
+                }
+            }
+        }
+        
+        
+        res += "}";
+        return res;
+    }
+
+    inline void print() { printf("%s\n", toString().c_str()); }
+
+    std::shared_ptr<Tensor> grad() {
+        if (!mGrad) {
+            mGrad = std::make_shared<Tensor>(mDataType);
+            mGrad->resize(mDims);
+
+            if (mImpl) mGrad->setBackend(mImpl->backend());
+        }
+
+        return mGrad;
+    }
+
+private:
+    ///\bug not protected against overflow
+    std::size_t computeSize() {
+        if (mDims.empty()) {
+            mSizeM1 = DimSize_t(0);
+            mSize = DimSize_t(0);
+        }
+        else if (mDims.size() == 1)
+        {
+            mSizeM1 = mDims[0];
+            mSize = mDims[0];
+        }
+        else {
+            mSizeM1 = std::accumulate(++mDims.begin(),mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>());
+            mSize = static_cast<std::size_t>(mSizeM1 * mDims[0]);
+        }
+        
+        return mSize;
+    }
+};
+}  // namespace Aidge
+
+#endif /* __AIDGE_CORE_DATA_TENSOR_H__ */
diff --git a/aidge/include/aidge/graph/Connector.hpp b/aidge/include/aidge/graph/Connector.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..c5dde5c97c61d3661c1ee9cebe7cc17080950eb9
--- /dev/null
+++ b/aidge/include/aidge/graph/Connector.hpp
@@ -0,0 +1,86 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#ifndef __AIDGE_CORE_GRAPH_CONNECTOR_H__
+#define __AIDGE_CORE_GRAPH_CONNECTOR_H__
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+    
+class Node;
+class GraphView;
+/**
+ * @brief Object meant for simpler and more instrinctive user API.
+ *
+ * example:
+ *  Connector x();
+ *  x = Conv(...)(x);
+ *  Connector y = Split(3)(x[0]); // Error! Cannot slice a Connector with one output only
+ *  Connector y = Split(3)(x);
+ *  CustomLayer cl(...);
+ *  Connector z = cl(y) // Error! y has multiple outputs, must specify which one to use
+ *  Connector z1 = cl(y[0]);
+ *  Connector z2 = cl(y[1]);
+ *  Connector z3 = cl(y[2]);
+ *  x = Sum(...)(z1, z2, z3);
+ *  GraphView g = x.generateGraph();
+ */
+class Connector {
+   private:
+    std::shared_ptr<Node> mNode;
+    ///\brief output id
+    ///\details gk_IODefaultIndex is reserved for?
+    ///\bug Is negative value pertinent?
+    IOIndex_t mOutputId = gk_IODefaultIndex;
+
+   public:
+    Connector() : mNode(nullptr) {
+        // ctor
+    }
+    Connector(std::shared_ptr<Node> node);
+
+    ~Connector() = default;
+
+   public:
+    Connector operator[](IOIndex_t index) {
+        assert((size() > 1) && "Cannot refer a slice of the output.");
+        return Connector(mNode, index);
+    }
+
+   public:
+    IOIndex_t size() const;
+
+    inline std::shared_ptr<Node> node() const { return mNode; }
+
+    inline IOIndex_t index() const { return mOutputId; }
+
+   private:
+    Connector(std::shared_ptr<Node> node, IOIndex_t index) : mNode(node) {
+        assert((index != gk_IODefaultIndex) && (index < size()) &&
+               "Non-valid output index.\n");
+        mOutputId = index;
+    }
+};
+
+/**
+ * @brief Generate a GraphView from a list of output Connectors
+ *
+ * @param ctors list of output Connector for the graph to generate.
+ * @return std::shared_ptr<GraphView>
+ */
+std::shared_ptr<GraphView> generateGraph(std::vector<Connector> ctors);
+}  // namespace Aidge
+
+#endif /* __AIDGE_CORE_GRAPH_CONNECTOR_H__ */
\ No newline at end of file
diff --git a/aidge/include/aidge/graph/GraphView.hpp b/aidge/include/aidge/graph/GraphView.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..e5fa35354968963859d0b4cbbc01139cbc309250
--- /dev/null
+++ b/aidge/include/aidge/graph/GraphView.hpp
@@ -0,0 +1,381 @@
+
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_GRAPH_GRAPHVIEW_H__
+#define __AIDGE_CORE_GRAPH_GRAPHVIEW_H__
+
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class DataType;
+
+/**
+ * @brief Groupement of Nodes forming a computational graph on which properties and functions
+ * can easily and safely be applied or run.
+ */
+class GraphView : public std::enable_shared_from_this<GraphView> {
+private:
+    /// @brief Name of the graphview
+    std::string mName; 
+
+    /// @brief Set of nodes included in the GraphView
+    std::set<NodePtr> mNodes; 
+
+    /// @brief Set of nodes included in the graphview with names
+    std::map<std::string, NodePtr> mNodeRegistry;
+    
+    /// @brief Nodes without input link
+    std::set<NodePtr> mInputNodes;
+
+    /// @brief Nodes without output link
+    std::set<NodePtr> mOutputNodes;
+
+public:
+    GraphView(std::string name="")
+        : mName(name) 
+    {
+        // ctor
+    }
+
+    // GraphView(std::set<NodePtr> nodes, std::string name="")
+    //     : mName(name) 
+    // {
+    //     add(nodes);
+    // }
+
+    bool operator==(const GraphView &gv) const 
+    {
+        return mNodes == gv.mNodes;
+    }
+
+    NodePtr operator[](std::string name) 
+    {
+        assert(mNodeRegistry.find(name) != mNodeRegistry.end() && "Could not find Node in the GraphView.");
+        return mNodeRegistry.at(name);
+    }
+
+///////////////////////////////////////////////////////
+//        FUNCTIONAL DESCRIPTION
+///////////////////////////////////////////////////////
+
+    Connector operator()(const std::vector<Connector> ctors);
+
+///////////////////////////////////////////////////////
+//        INNER
+///////////////////////////////////////////////////////
+public:
+    /**
+     * @brief Name of the node.
+     * @return std::string
+     */
+    std::string name() const;
+
+    /**
+     * @brief Set the node name.
+     * @warning Undefined behaviour when several Nodes have the same name.
+     * @param name New name for the node.
+     */
+    void setName(const std::string &name);
+
+    /**
+     * @brief Save the GraphView as a Mermaid graph in a .md file at the
+     * specified location.
+     * @param path
+     */
+    void save(std::string path, bool verbose = false) const;
+
+    inline bool inView(NodePtr nodePtr) const {
+        return mNodes.find(nodePtr) != mNodes.end();
+    }
+
+///////////////////////////////////////////////////////
+//        TENSOR MANAGEMENT
+///////////////////////////////////////////////////////
+public:
+    /** @brief Get reference to the set of input Nodes. */
+    inline const std::set<NodePtr>& inputNodes() const noexcept { return mInputNodes; }
+    /** @brief Get reference to the set of output Nodes. */
+    inline const std::set<NodePtr>& outputNodes() const noexcept { return mOutputNodes; }
+
+    /** @brief Assess if the given Node is an input Node of the GraphView object. */
+    inline bool isInputNode(NodePtr nodePtr) const {
+        return (mInputNodes.find(nodePtr) != mInputNodes.end()) ? true : false;
+    }
+    /** @brief Assess if the given Node is an output Node of the GraphView object. */
+    inline bool isOutputNode(NodePtr nodePtr) const {
+        return (mOutputNodes.find(nodePtr) != mOutputNodes.end()) ? true : false;
+    }
+
+    /**
+     * @brief List dataInput connections of the GraphView object's inputNodes.
+     * @return std::vector<std::pair<NodePtr, IOIndex_t>>
+     */
+    std::vector<std::pair<NodePtr, IOIndex_t>> dataInputs() const;
+
+    /**
+     * @brief List dataInput connections of the GraphView object's inputNodes.
+     * @param name Name of the Node.
+     * @return std::vector<std::pair<NodePtr, IOIndex_t>>
+     */
+    inline auto dataInputs(const std::string name) const { return mNodeRegistry.at(name)->dataInputs(); }
+
+    /**
+     * @brief List input connections of the GraphView object's inputNodes.
+     * @return std::vector<std::pair<NodePtr, IOIndex_t>>
+     */
+    std::vector<std::pair<NodePtr, IOIndex_t>> inputs() const;
+
+    /**
+     * @brief List input connections of the specified GraphView object's inputNode.
+     * @return std::vector<std::pair<NodePtr, IOIndex_t>>
+     */
+    std::vector<std::pair<NodePtr, IOIndex_t>> inputs(std::string name) const;
+
+    /**
+     * @brief List output connections of the GraphView object's outputNodes.
+     * @return std::vector<std::pair<NodePtr, IOIndex_t>>
+     */
+    std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>> outputs() const;
+
+    /**
+     * @brief Specific i-th output connection of the GraphView object.
+     * @param nodeName Name of the Node of which to show the output.
+     * @return std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>
+     */
+    std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>> outputs(
+            std::string nodeName) const;
+
+    /**
+     * @brief Compute dimensions of input/output Tensors for each Operator of the
+     * GraphView object's Nodes.
+     */
+    void forwardDims();
+
+    /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
+    void setBackend(const std::string &backend);
+    /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
+    void setDatatype(const DataType &datatype);
+
+///////////////////////////////////////////////////////
+//        TOPOLOGY
+///////////////////////////////////////////////////////
+public:
+    /**
+     * @brief Get the parents Nodes of inputNodes.
+     * @return std::set<NodePtr>
+     */
+    std::set<NodePtr> getParents() const;
+    /**
+     * @brief Get parents Nodes of the specified Node.
+     * @param nodeName Name of the Node.
+     * @return std::vector<NodePtr> 
+     */
+    std::vector<NodePtr> getParents(const std::string nodeName) const;
+    std::vector<std::vector<NodePtr>> getOrderedParents() const;
+
+    /**
+     * @brief Get the children Nodes of outputNodes.
+     * @return std::set<NodePtr>
+     */
+    std::set<NodePtr> getChildren() const;
+    /**
+     * @brief Get children Nodes of the specified Node.
+     * @param nodeName Name of the Node.
+     * @return std::vector<std::vector<NodePtr>>
+     */
+    std::vector<std::vector<NodePtr>> getChildren(const std::string nodeName) const;
+    std::set<NodePtr> getChildren(
+            const NodePtr otherNode) const;  // TODO change it for a vector<vector> ?
+
+    /**
+     * @brief Get the Nodes pointed to by the GraphView object.
+     * @return std::set<NodePtr> 
+     */
+    inline std::set<NodePtr> getNodes() const { return mNodes; }
+
+    /**
+     * @brief Get the operator with the corresponding name if it is in the
+     * GraphView.
+     * @param nodeName Name of the node.
+     * @return NodePtr returns a new empty node if the one asked for
+     * was not found.
+     */
+    NodePtr getNode(const char *nodeName) const;
+
+    /**
+     * @brief Remove a Node from the current GraphView scope without affecting its connections.
+     * @param nodePtr Node to remove
+     * @param includeLearnableParam Whether learnable parameters should also be removed. Default true.
+     */
+    void remove(NodePtr nodePtr, bool includeLearnableParam = true);
+
+    // Surrounding nodes management
+
+    void setInputId(IOIndex_t inID, IOIndex_t newNodeOutID);
+
+    /**
+     * @brief Include a Node to the current GraphView object.
+     * @param other_Nde Node to add.
+     * @param includeLearnableParam Include non-data inputs, like weights and biases 
+     * in the GraphView automatically. Default: true.
+     */
+    void add(NodePtr otherNode, bool includeLearnableParam = true);
+    /**
+     * @brief Include a set of Nodes to the current GraphView object.
+     * @param otherNodes 
+     * @param includeLearnableParam 
+     */
+    void add(std::set<NodePtr> otherNodes,
+             bool includeLearnableParam = true);
+
+    /**
+     * @brief Include every Node inside another GraphView to the current
+     * GraphView.
+     * @param other_graph GraphView containing the Nodes to include.
+     */
+    void add(std::shared_ptr<GraphView> otherGraph);
+
+    /**
+     * @brief Include a Node in the current GraphView and link it to another
+     * already contained Node.
+     *
+     * @param toOtherNode Pointer to the Node to add.
+     * @param fromOutNode Pointer to the already included Node the new Node will
+     * be linked to (it will become a parent of the new Node). If the GraphView
+     * only has one output Node, then default to this Node.
+     * @param fromTensor Ouput Tensor ID of the already included Node. Default to
+     * 0.
+     * @param toTensor Input Tensor ID of the new Node. Default to gk_IODefaultIndex, meaning
+     * first available data input for the Node.
+     */
+    void addChild(NodePtr toOtherNode, NodePtr fromOutNode = nullptr,
+                  const IOIndex_t fromTensor = IOIndex_t(0),
+                  IOIndex_t toTensor = gk_IODefaultIndex);
+
+    /**
+     * @brief Include a Node in the current GraphView and link it to another
+     * already contained Node.
+     *
+     * @param toOtherNode Pointer to the Node to add.
+     * @param fromOutNodeName Name of the already included Node the new Node will
+     * be linked to (it will become a parent of the new Node). As a name is
+     * optional, ensure such Node is in the GraphView or it will send back an
+     * error message.
+     * @param fromTensor Ouput Tensor ID of the already included Node. Default to
+     * 0.
+     * @param toTensor Input Tensor ID of the new Node. Default to gk_IODefaultIndex, meaning
+     * first available data input for the Node.
+     */
+    inline void addChild(NodePtr toOtherNode, std::string fromOutNodeName,
+                         const IOIndex_t fromTensor = IOIndex_t(0),
+                         IOIndex_t toTensor = gk_IODefaultIndex) {
+        assert(mNodeRegistry.find(fromOutNodeName) != mNodeRegistry.end() &&
+               "No Node with this name found in the GraphView.");
+        addChild(toOtherNode, mNodeRegistry.at(fromOutNodeName), fromTensor, toTensor);
+    }
+
+    /**
+     * @brief Include a GraphView content in the current GraphView and link
+     * the two sets by linking one Node from each GraphView.
+     * @param toOtherView Pointer to the GraphView whose content should be added.
+     * @param fromOutNode Pair of pointer to Node and Tensor ID for specifying the
+     * connection. If the GraphView including the other one has only one output
+     * Node, then it defaults to the first output Tensor of this Node.
+     * @param toNode Pair of pointer to Node and Tensor ID for specifying the
+     * connection. If the GraphView whose content is included has only one input
+     * Node, then it defaults to the first available data input Tensor of this
+     * Node.
+     */
+    void addChild(std::shared_ptr<GraphView> toOtherView,
+                  std::pair<NodePtr, IOIndex_t> fromOutNode =
+                          std::pair<NodePtr, IOIndex_t>(nullptr, IOIndex_t(0)),
+                  std::pair<NodePtr, IOIndex_t> toNode =
+                          std::pair<NodePtr, IOIndex_t>(nullptr, gk_IODefaultIndex));
+
+    /**
+     * @brief Swap two Node instances if possible.
+     * @param node
+     * @param otherNode
+     * @return true
+     * @return false
+     */
+    bool swap(Node &node, Node &otherNode);
+
+    void link(std::string name1_inID, std::string name2_outID);
+
+    void insert(Node &newNode, Node &inNode, std::initializer_list<Node> outNodes,
+                IOIndex_t tensorIdx);
+
+    /**
+     * @brief Replace the current GraphView with the set of given Nodes if possible
+     * @param newNodes Set of Nodes.
+     * @return true 
+     * @return false 
+     */
+    bool replaceWith(std::set<NodePtr> newNodes);
+    void updateInputNodes();
+    /**
+     * @brief Process from zero the set of output Nodes.
+     */
+    void updateOutputNodes();
+
+private:
+///////////////////////////////////////////////////////
+//        TENSOR MANAGEMENT
+///////////////////////////////////////////////////////
+
+    /**
+     * @brief Get the sum of the number of dataInput Nodes for all inputNodes of the GraphView object.
+     * @return IOIndex_t 
+     */
+    IOIndex_t getNbDataInputs() const;
+
+    /**
+     * @brief Get the sum of the number of free dataInput connection for all inputNodes of the GraphView object.
+     * @return IOIndex_t 
+     */
+    IOIndex_t getNbFreeDataInputs() const;
+
+    /**
+     * @brief Update the set of inputNodes with a new Node, checking if it can be
+     * added and removing any Node not part of mInputNode anymore.
+     * @param nodePtr
+     */
+    void updateInputNodes(NodePtr node);
+
+    /**
+     * @brief Update the set of outputNodes with a new Node, checking if it can be
+     * added and removing any Node not part of mOutputNode anymore.
+     * @param nodePtr
+     */
+    void updateOutputNodes(NodePtr node);
+
+    ///////////////////////////////////////////////////////
+    //        TOPOLOGY
+    ///////////////////////////////////////////////////////
+
+    void _forwardDims(std::set<NodePtr> listNodes);
+
+    void removeInputNode(const std::string nodeName);
+    void removeOutputNode(const std::string nodeName);
+};
+}  // namespace Aidge
+
+#endif /* __AIDGE_CORE_GRAPH_GRAPHVIEW_H__ */
\ No newline at end of file
diff --git a/aidge/include/aidge/graph/Node.hpp b/aidge/include/aidge/graph/Node.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..0780ce9a24da0ceb0c42b32944021f5df2fa9726
--- /dev/null
+++ b/aidge/include/aidge/graph/Node.hpp
@@ -0,0 +1,405 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_GRAPH_NODE_H__
+#define __AIDGE_CORE_GRAPH_NODE_H__
+
+#include <cassert>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+#include <utility>
+
+#include "aidge/graph/Connector.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+using NodePtr = std::shared_ptr<Node>;
+
+class GraphView;
+
+/**
+ * @brief Object carrying the topological information of the computational graph.
+ */
+class Node : public std::enable_shared_from_this<Node> {
+private:
+  struct weakCompare {
+      bool operator()(const std::weak_ptr<Aidge::GraphView>& a, const std::weak_ptr<Aidge::GraphView>& b) const {
+          // Compare the content of the weak_ptrs
+          auto sharedA = a.lock();
+          auto sharedB = b.lock();
+          if (!sharedB) return false; // nothing after expired pointer 
+          if (!sharedA) return true;
+          return sharedA < sharedB; // shared_ptr has a valid comparison operator
+      }
+  };
+  std::string mName; /** Name of the Node. Should be unique. */
+
+  std::set<std::weak_ptr<GraphView>, weakCompare> mViews; /** Set of pointers to GraphView instances including this Node instance. */
+  const std::shared_ptr<Operator> mOperator; // Pointer to the associated Operator
+
+  std::vector<NodePtr> mParents; /** List of parent node for each input (Parent --> Node --> Child) */
+  std::vector<std::vector<std::weak_ptr<Node>>> mChildren; /** List of children nodes for each output (Parent --> Node --> Child) */
+  std::vector<std::vector<IOIndex_t>> mIdInChildren; /** List of input index for each Node linked to each output of the Node. */
+  std::vector<IOIndex_t> mIdOutParents; /** index of the output linked to each input of the Node. Default: gk_IODefaultIndex. */
+
+public:
+  Node() = delete;
+
+  /**
+   * @brief Construct a new Node object associated with the input Operator.
+   * @param op Operator giving the Node its number of connections.
+   * @param name (optional) name for the Node.
+   */
+  Node(std::shared_ptr<Operator> op, const char *name = nullptr);
+
+  virtual ~Node() = default;
+
+  friend bool operator==(const Node &lhs, const Node &rhs) {
+    return lhs.shared_from_this() == rhs.shared_from_this();
+  }
+
+public:
+  ///////////////////////////////////////////////////////
+  //        FUNCTIONAL DESCRIPTION
+  ///////////////////////////////////////////////////////
+
+  /**
+   * @brief Functional operator for user-friendly connection interface using an ordered set of Connectors.
+   * @param ctors Ordered Connectors linking their associated Node to the input of the current Node with the same index.
+   * @return Connector 
+   */
+  Connector operator()(const std::vector<Connector> &ctors);
+
+public:
+  ///////////////////////////////////////////////////////
+  //        INNER
+  ///////////////////////////////////////////////////////
+
+  /**
+   * @brief Name of the Node.
+   * @return std::string
+   */
+  inline std::string name() const noexcept { return mName; }
+
+  /**
+   * @brief Set the Node name.
+   * @warning Undefined behaviour when several Nodes have the same name.
+   * @param name New name for the node.
+   */
+  void setName(const std::string &name);
+
+  /**
+   * @brief Type of the node.
+   * @return std::string
+   */
+  inline std::string type() const { return mOperator->type(); }
+
+  ///////////////////////////////////////////////////////
+  //        OPERATORS
+  ///////////////////////////////////////////////////////
+
+  /**
+   * @brief Run forward() function of the associated Operator.
+   */
+  void forward();
+
+  /**
+   * @brief Run backward() function of the associated Operator.
+   */
+  void backward();
+
+  /**
+   * @brief Get the Operator object of the Node.
+   * @return std::shared_ptr<Operator>
+   */
+  inline std::shared_ptr<Operator> getOperator() const { return mOperator; }
+
+  ///////////////////////////////////////////////////////
+  //        TENSOR MANAGEMENT
+  ///////////////////////////////////////////////////////
+
+  /**
+   * @brief Whether or not every input of the Node is linked to a Parent.
+   * If true then the Node is ready to be executed.
+   * @return true
+   * @return false
+   */
+  bool valid() const;
+
+  /**
+   * @brief List of pair <Parent, ID of the data intput>. When an input is not
+   * linked to any Parent, the pair is <nullptr, gk_IODefaultIndex>.
+   * @return std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>
+   */
+  std::vector<std::pair<NodePtr, IOIndex_t>> dataInputs() const;
+
+  /**
+   * @brief List of pair <Parent, ID of the parent output>. When an input is not linked
+   * to any Parent, the pair is <nullptr, gk_IODefaultIndex>.
+   * @return std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>
+   */
+  std::vector<std::pair<NodePtr, IOIndex_t>> inputs() const;
+
+  /**
+   * @brief Parent and its output Tensor ID linked to the inID-th input Tensor.
+   * If the input is not linked to any Parent, the pair is <nullptr, gk_IODefaultIndex>.
+   * @param inID
+   * @return std::pair<std::shared_ptr<Node>, IOIndex_t>
+   */
+  inline std::pair<NodePtr, IOIndex_t> input(const IOIndex_t inID) const {
+    assert((inID != gk_IODefaultIndex) && (inID < nbInputs()) && "Input index out of bound.");
+    return std::pair<NodePtr, IOIndex_t>(mParents[inID], mIdOutParents[inID]);
+  }
+
+  /**
+   * @brief Set fix value for the specified input by creating a Producer wrapping the given Tensor.
+   * 
+   * @param idx Input index.
+   * @param tensor Constant Tensor to add as parent for specified index.
+   */
+  void setInput(const IOIndex_t idx, const std::shared_ptr<Tensor> tensor);
+
+  /**
+   * @brief Get the lowest index in the InputData Parent list equal to the
+   * nullptr.
+   * @return std::size_t
+   */
+  inline IOIndex_t getFirstFreeDataInput() const {
+    IOIndex_t i = 0;
+    for (; (i < nbDataInputs()) && (input(i).second != gk_IODefaultIndex); ++i) {}
+    // assert((i<nbDataInputs()) && "No free data input for Node");
+    return (i < nbDataInputs()) ? i : gk_IODefaultIndex;
+  }
+
+
+  IOIndex_t getNbFreeDataInputs() const;
+
+  /**
+   * @brief List input ids of children liked to outputs of the node
+   * @return std::vector<std::vector<std::pair<std::shared_ptr<Node>,
+   * IOIndex_t>>>
+   */
+  std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>> outputs() const;
+
+  /**
+   * @brief Children and their input Tensor ID linked to the outId-th output
+   * Tensor.
+   * @param outId
+   * @return std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>
+   */
+  std::vector<std::pair<NodePtr, IOIndex_t>>
+  output(IOIndex_t outId) const;
+
+  /**
+   * @brief Number of inputs, including both data and learnable parameters.
+   * @details [data, data, weight, bias] => 4
+   * @return IOIndex_t
+   */
+  inline IOIndex_t nbInputs() const noexcept { return getOperator()->nbInputs(); }
+
+  /**
+   * @brief Number of input specifically for data
+   * @details [data, data, weight, bias] => 2
+   * @return IOIndex_t
+   */
+  inline IOIndex_t nbDataInputs() const noexcept {
+    return getOperator()->nbDataInputs();
+  }
+
+  /**
+   * @brief Number of inputs linked to a Parent's output.
+   * @return IOIndex_t
+   */
+  IOIndex_t nbValidInputs() const;
+
+  /**
+   * @brief Getter for the number of Output Tensors of the Node.
+   * @return IOIndex_t
+   */
+  inline IOIndex_t nbOutputs() const noexcept { return getOperator()->nbOutputs(); }
+
+  IOIndex_t nbValidOutputs() const;
+
+  ///////////////////////////////////////////////////////
+  //        TOPOLOGY
+  ///////////////////////////////////////////////////////
+
+  /**
+   * @brief Vector of pointers to each GraphView containing the object
+   * @return std::vector<GraphView>
+   */
+  inline std::set<std::shared_ptr<GraphView>> views() const noexcept {
+    std::set<std::shared_ptr<GraphView>> res;
+    for (const auto &v : mViews) {
+      res.insert(v.lock());
+    }
+    return res;
+  }
+
+  /**
+   * @brief Add a GraphView pointer to the list of GraphView containing
+   * the current Node. This feature allows transparent GraphViews.
+   * @param graphPtr Pointer to GraphView to add to the list.
+   */
+  inline void addView(const std::shared_ptr<GraphView> &graphPtr) {
+    mViews.insert(std::weak_ptr<GraphView>(graphPtr));
+  }
+
+  inline void removeView(const std::shared_ptr<GraphView> &graphPtr) {
+    std::set<std::weak_ptr<GraphView>, weakCompare>::const_iterator viewIt = mViews.cbegin();
+    for (; (viewIt != mViews.cend()) && ((*viewIt).lock() != graphPtr) ; ++viewIt) {}
+    mViews.erase(*viewIt);
+  }
+
+  /**
+   * @brief Link another Node to an output of the current Node.
+   * @param otherNode Pointer to the other Node.
+   * @param outId ID of the current Node output to connect to the other Node.
+   * Default to 0.
+   * @param otherInId ID of the other Node input to connect to the current Node.
+   * Default to the first avaible data input.
+   */
+  void addChild(NodePtr otherNode,
+                const IOIndex_t outId = IOIndex_t(0),
+                IOIndex_t otherInId = gk_IODefaultIndex);
+
+  /**
+   * @brief Link a Node from a specific GraphView to the current Node.
+   * @param otherView Pointer to the GraphView whose content should be
+   * linked to the current Node.
+   * @param outId ID of the output Tensor to connect to the other Node.
+   * Default to 0.
+   * @param otherInId Pair of pointer to Node and Tensor ID for specifying the
+   * connection. If the GraphView whose content is linked has only one input
+   * Node, then it defaults to the first available data input Tensor of this
+   * Node.
+   */
+  void addChild(std::shared_ptr<GraphView> otherView,
+                const IOIndex_t outId = IOIndex_t(0),
+                std::pair<NodePtr, IOIndex_t> otherInId =
+                std::pair<NodePtr, IOIndex_t>(nullptr, gk_IODefaultIndex));
+
+  /**
+   * @brief Get the list of parent Nodes. As an input is linked to a unique Node,
+   * if none is linked then the parent is a nullptr.
+   * @return std::vector<std::shared_ptr<Node>>
+   */
+  std::vector<NodePtr> getParents() const;
+
+  /**
+   * @brief Get the pointer to parent of the specified input index. This pointer is nullptr if no parent is linked.
+   * @param inId Input index.
+   * @return std::shared_ptr<Node>& 
+   */
+  inline NodePtr &getParents(const IOIndex_t inId) {
+    assert(inId != gk_IODefaultIndex);
+    return mParents.at(inId);
+  }
+
+  /**
+   * @brief Unlink the parent Node at the specified input index and return its pointer.
+   * Return a nullptr is no parent was linked.
+   * @param inId Input index.
+   * @return std::shared_ptr<Node> 
+   */
+  NodePtr popParent(const IOIndex_t inId);
+
+  bool removeParent(const IOIndex_t inId);
+
+  /**
+   * @brief Get the set of pointers to children Nodes linked to the current Node.object.
+   * @details The returned set does not include any nullptr as an output maybe linked to
+   * an undifined number of Nodes. It does not change the computation of its associated Operator.
+   * @return std::set<std::shared_ptr<Node>>>
+   */
+  std::set<NodePtr> getChildren() const;
+
+  std::vector<std::vector<NodePtr>> getOrderedChildren() const;
+
+  /**
+   * @brief Get the list of children Nodes linked to the output at specified index.
+   * @param outId Output index.
+   * @return std::vector<std::shared_ptr<Node>> 
+   */
+  std::vector<NodePtr> getChildren(const IOIndex_t outId) const;
+
+  /**
+   * @brief Remove registered child from children list of specified output if possible.
+   * If so, also remove current Node from child Node from parent.
+   * @param std::shared_ptr<Node> Node to remove.
+   * @param outId Output index. Default 0.
+   * @return true Child found and removed for given output index.
+   * @return false Child not found at given index. Nothing removed.
+   */
+  bool removeChild(const NodePtr nodePtr, const IOIndex_t outId = 0);
+
+  /**
+   * @brief Remove every link of surrounding nodes to it and conversly
+   */
+  void resetConnections(bool includeLearnableParam = false);
+
+private:
+  ///////////////////////////////////////////////////////
+  //        OPERATORS
+  ///////////////////////////////////////////////////////
+
+  // cannot change operator for now
+  // void setOperator(const std::shared_ptr<Operator> op_ptr);
+
+  ///////////////////////////////////////////////////////
+  //        TENSOR MANAGEMENT
+  ///////////////////////////////////////////////////////
+
+  /**
+   * @brief Set the idInChildren parameter.
+   * @param inID 
+   * @param newNodeOutID 
+   */
+  void setInputId(const IOIndex_t inID, const IOIndex_t newNodeOutID);
+
+  ///////////////////////////////////////////////////////
+  //        TOPOLOGY
+  ///////////////////////////////////////////////////////
+
+  /**
+   * @brief Add the given Node as a child for the current Node.
+   * @param otherNode 
+   * @param outId 
+   * @param otherInId 
+   */
+  void addChildOp(NodePtr otherNode, const IOIndex_t outId,
+                  const IOIndex_t otherInId);
+
+  /**
+   * @brief Add the given GraphView's input Node as a child for the current Node
+   * @param otherGraph 
+   * @param outId 
+   * @param otherInId pointer the GraphView's input Node and its input index. Defaults to the
+   * only input Node if the GraphView has got one.
+   */
+  void addChildView(std::shared_ptr<GraphView> otherGraph,
+                    const IOIndex_t outId,
+                    std::pair<NodePtr, IOIndex_t> otherInId);
+
+  /**
+   * @brief Add a Node to the list of parents.
+   * @param otherNode Node to add to parents list.
+   * @param inId index for adding the parent.
+   */
+  void addParent(const NodePtr otherNode, const IOIndex_t inId);
+};
+} // namespace Aidge
+
+#endif /* __AIDGE_CORE_GRAPH_NODE_H__ */
diff --git a/aidge/include/aidge/graph/OpArgs.hpp b/aidge/include/aidge/graph/OpArgs.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..dd0cfe1cca8a3f487c18875cff3f90cc56291107
--- /dev/null
+++ b/aidge/include/aidge/graph/OpArgs.hpp
@@ -0,0 +1,86 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_GRAPH_OPARGS_H__
+#define __AIDGE_CORE_GRAPH_OPARGS_H__
+
+#include <memory>
+#include <cassert>
+
+namespace Aidge {
+class Node;
+class GraphView;
+
+/**
+ * @brief Intermediate representation for Structural description.
+ */
+class OpArgs {
+private:
+    std::shared_ptr<Node> mNode = nullptr;
+    std::shared_ptr<GraphView> mView = nullptr;
+
+public:
+    OpArgs(const std::shared_ptr<GraphView>& view_)
+     : mView(view_) {assert(mView && "The GraphView provided should not be a nullptr.");}
+    
+    OpArgs(const std::shared_ptr<Node>& node_)
+     : mNode(node_) {assert(mNode && "The Node provided should not be a nullptr.");}
+
+    inline std::shared_ptr<Node> node() const noexcept {
+        return mNode;
+    }
+
+    inline std::shared_ptr<GraphView> view() const noexcept {
+        return mView;
+    }
+};
+
+
+/////////////////////////////
+// Sequential
+
+/**
+ * @brief Create a GraphView by linking every input with the next
+ * one in a sequential way. Nodes linked with the Sequential graph
+ * generation instructions must have a single output.
+ * Sequential(A, B, C) returns A-->B-->C.
+ * @param inputs List of Node and GraphView to link sequentially.
+ * @return std::shared_ptr<GraphView> Pointer to the generated view.
+ */
+std::shared_ptr<GraphView> Sequential(std::initializer_list<OpArgs> inputs);
+
+/////////////////////////////
+// Parallel
+
+/**
+ * @brief Creates a GraphView with provided Nodes without linking them.
+ * @param inputs List of Node and GraphView to link sequentially.
+ * @return std::shared_ptr<GraphView> pointer to the generated view.
+ */
+std::shared_ptr<GraphView> Parallel(std::initializer_list<OpArgs> inputs);
+
+/////////////////////////////
+// Residual
+
+/**
+ * @brief Create a GraphView by linking every input with the next
+ * one in a sequential way. Finally the first element output is used
+ * as another input for the last element. Nodes linked with the Recursive graph
+ * generation instructions must have a single output.
+ * Recursive(A, B, C) returns A-->B-->C , A-->C.
+ * @param inputs List of Node and GraphView to link sequentially.
+ * @return std::shared_ptr<GraphView> pointer to the generated view.
+ */
+std::shared_ptr<GraphView> Residual(std::initializer_list<OpArgs> inputs);
+
+}
+
+#endif /* __AIDGE_CORE_GRAPH_OPARGS_H__ */
\ No newline at end of file
diff --git a/aidge/include/aidge/graphmatching/GRegex.hpp b/aidge/include/aidge/graphmatching/GRegex.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..1292b607cee35f50dc0acc5f5113946be103065e
--- /dev/null
+++ b/aidge/include/aidge/graphmatching/GRegex.hpp
@@ -0,0 +1,63 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+
+#ifndef __AIDGE_GREGEX_H__
+#define __AIDGE_GREGEX_H__
+
+#include <stdexcept>    // for exception, runtime_error, out_of_range
+#include <regex>
+#include <memory>       // for shared_ptr
+#include <algorithm>    // for next_permutation
+
+#include "aidge/graphmatching/Utile.hpp"
+#include "aidge/graphmatching/StmFactory.hpp"
+#include "aidge/graphmatching/SeqStm.hpp"
+#include "aidge/graphmatching/NodeRegex.hpp"
+#include "aidge/graphmatching/Match.hpp"
+
+
+namespace Aidge{
+
+class GRegex {
+// __init__(self,nodes_regex:dict,seq_regexps:list)
+
+    StmFactory mStmFab;
+    std::vector<SeqStm*> mStmInit;
+
+public:
+    GRegex(const std::map<std::string,NodeRegex*>& nodesRegex,std::vector<std::string>& seqRegexps );
+
+    std::set<NodeTmp> matchFromStartNodes(const std::vector<NodeTmp> startNodes,const std::shared_ptr<GraphView> graphToMatch);
+
+    bool walk_validation_all_stm_are_valid(const std::vector<std::vector<SeqStm*>> all_stm);
+
+    bool walk_validation_all_node_read_validate_by_one_stm(const std::vector<std::vector<SeqStm*>> all_stm);
+
+    bool walk_validation_common_nodes_same_tag_for_all_stm(const std::vector<std::vector<SeqStm*>> all_stm);
+    
+    std::set<NodeTmp> get_all_validate_nodes(const std::vector<std::vector<SeqStm*>> all_stm);
+
+    std::vector<SeqStm*> getStmInit() const {
+        return mStmInit;
+    }
+
+    StmFactory getStmFab() const {
+        return mStmFab;
+    }
+    
+    //std::set<std::pair<std::vector<NodeTmp>,std::set<NodeTmp>>> match(const std::shared_ptr<GraphView> graphToMatch);
+    Match match(const std::shared_ptr<GraphView> graphToMatch);
+
+};
+
+}
+#endif //__AIDGE_GREGEX_H__
\ No newline at end of file
diff --git a/aidge/include/aidge/graphmatching/Match.hpp b/aidge/include/aidge/graphmatching/Match.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..27acc2e8a0880f8c62d0ba995fcde5479bdcb501
--- /dev/null
+++ b/aidge/include/aidge/graphmatching/Match.hpp
@@ -0,0 +1,44 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_MATCH_H__
+#define __AIDGE_MATCH_H__
+
+#include <vector>
+#include <set>
+#include <iostream>
+#include <cassert>
+#include "aidge/graphmatching/Utile.hpp"
+
+
+namespace Aidge{
+
+class Match {
+
+public:
+    Match();
+
+    size_t getNbMatch();
+
+    void insert(std::vector<NodeTmp> startnodes, std::set<NodeTmp> matchnodes);
+
+    std::vector<std::vector<NodeTmp>> getStartNodes();
+
+    std::vector<std::set<NodeTmp>> getMatchNodes();
+
+protected:
+    std::vector<std::vector<NodeTmp>> mStartNodes;
+    std::vector<std::set<NodeTmp>> mMatchNodes;
+
+};
+
+}
+#endif //__AIDGE_MATCH_H__
\ No newline at end of file
diff --git a/aidge/include/aidge/graphmatching/NodeRegex.hpp b/aidge/include/aidge/graphmatching/NodeRegex.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..387bfea46f0147613a116beac1f9c6102ed661e5
--- /dev/null
+++ b/aidge/include/aidge/graphmatching/NodeRegex.hpp
@@ -0,0 +1,41 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_NODEREGEX_H__
+#define __AIDGE_NODEREGEX_H__
+#include <cstdlib>
+#include <iostream>
+#include <cstring>
+#include "aidge/graph/Node.hpp"
+
+
+namespace Aidge {
+
+class NodeRegex
+{
+    public:
+    std::string mCondition;
+
+    NodeRegex(const std::string c){
+        mCondition = c;
+    };
+    
+    // Version 1 - Only test the type of the node (no need for a lexer)
+    // Input : Node_op
+    // Output : bool
+    // return mCondition == Node_op.type
+    bool _is(std::shared_ptr<Node> &Node_op);
+    bool isA(std::string NodeType);
+};
+
+}
+
+#endif /* ___AIDGE_NODEREGEX_H___ */
\ No newline at end of file
diff --git a/aidge/include/aidge/graphmatching/SeqStm.hpp b/aidge/include/aidge/graphmatching/SeqStm.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..6ccd6cfcd322c4d38af2ad04cd2b3a96d839e6cd
--- /dev/null
+++ b/aidge/include/aidge/graphmatching/SeqStm.hpp
@@ -0,0 +1,127 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_SEQSTM_H__
+#define __AIDGE_SEQSTM_H__
+
+#include <iostream>
+#include <map>
+#include <regex>
+#include <set>
+#include <stdexcept> // for exception, runtime_error, out_of_range
+#include <string>
+#include <utility>
+#include <vector>
+
+
+#include "aidge/graphmatching/NodeRegex.hpp"
+#include "aidge/graphmatching/Utile.hpp"
+
+
+namespace Aidge {
+
+class SeqStm {
+
+private:
+  const int mStmIdx;
+  const std::vector<std::vector<int>> mTransitionMatrix;
+  // str key of type like 'A' that ce use in the A->B .. extpr
+  const std::map<std::string, NodeRegex *> mNodesRegex;
+  // mTypeToIdxTransition.first = std::pair node_type , common_tag
+  // mTypeToIdxTransition.segond = idx in trans matrix
+  const std::map<NodeTypeKey, int> mTypeToIdxTransition;
+
+  int mActSt;
+  std::set<NodeTmp> mAllNodeValidated;
+  std::set<NodeTmp> mAllNodeTested;
+  std::set<std::pair<NodeTmp, std::string>> mAllCommonNode;
+  bool mStmIsValid;
+
+  std::pair<NodeRegex *, std::string> getNodeRegexAndCommonAt(int idxType);
+
+  /**
+   * @brief test the stm on a type
+   * @return the common tag
+   */
+  std::string transitionOnNodeType(NodeType nodeType);
+
+public:
+  SeqStm(const int mStmIdx,
+         const std::vector<std::vector<int>> &mTransitionMatrix,
+         const std::map<std::string, NodeRegex *> &mNodesRegex,
+         const std::map<NodeTypeKey, int> &mTypeToIdxTransition, int mActSt,
+         std::set<NodeTmp> mAllNodeValidated, std::set<NodeTmp> mAllNodeTested,
+         std::set<std::pair<NodeTmp, std::string>> mAllCommonNode,
+         bool mStmIsValid);
+
+  //////////////////////////////////////
+  // STM test
+  /////////////////////////////////////
+
+  /**
+   * @brief get if a st is a valide one
+   * @return bool
+   */
+  bool isAValidSt(int st) {
+    std::size_t size = mTransitionMatrix.size();
+    return st == static_cast<int>(size - 1) ? true : false;
+  }
+
+  /**
+   * @brief true if the stm is blocked into st
+   * @return bool
+   */
+  bool isStmBlocked() { return mActSt == -1 ? true : false; }
+
+  /**
+   * @brief true if the stm into valide st
+   * @return bool
+   */
+  bool isValid() { return mStmIsValid; }
+
+  /////////////////////////////////////
+  // utile
+  /////////////////////////////////////
+  /**
+   * @brief extract from a node is type
+   * @return bool
+   */
+  NodeType getTheNodeType(NodeTmp node);
+
+  void drawStm();
+  /////////////////////////////////////
+  // geter
+  /////////////////////////////////////
+
+  std::set<std::pair<NodeTmp, std::string>> getAllCommonNode() {
+    return mAllCommonNode;
+  }
+  std::set<NodeTmp> getAllNodeTested() { return mAllNodeTested; }
+
+  std::set<NodeTmp> getAllNodeValidated() { return mAllNodeValidated; }
+
+  SeqStm *duplicateStm();
+
+  int getStmIdx() { return mStmIdx; }
+
+  int getState() { return mActSt; }
+  //////////////////////////////////////////
+  // USE
+  //////////////////////////////////////////
+  /**
+   * @brief test the stm on a node
+   * @return  pair new stm state, the common tag
+   */
+  std::pair<int, std::string> testNode(const NodeTmp node);
+};
+} // namespace Aidge
+
+#endif /* __AIDGE_SEQSTM_H__ */
\ No newline at end of file
diff --git a/aidge/include/aidge/graphmatching/StmFactory.hpp b/aidge/include/aidge/graphmatching/StmFactory.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..929fdaf3595038f21367768254040c45b291641b
--- /dev/null
+++ b/aidge/include/aidge/graphmatching/StmFactory.hpp
@@ -0,0 +1,55 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_STMFACTORY_H__
+#define __AIDGE_STMFACTORY_H__
+
+#include <map>
+#include <utility>
+#include <set>
+#include <string>
+#include <vector>
+#include <iostream>
+#include <stdexcept>   // for exception, runtime_error, out_of_range
+#include <regex>
+
+#include "aidge/graphmatching/NodeRegex.hpp"
+#include "aidge/graphmatching/SeqStm.hpp"
+#include "aidge/graphmatching/Utile.hpp"
+
+namespace Aidge{
+
+
+
+class StmFactory {
+
+    const std::map<std::string,NodeRegex*>& mNodesRegex;
+    std::size_t mCmptStm = 0;
+public:
+    StmFactory(const std::map<std::string,NodeRegex*>& nodesRegex);
+    //StmFactory(){};
+
+    SeqStm* makeNewStm(const std::string& sequRegex);
+    SeqStm* duplicateStm(SeqStm* stm);
+
+    std::size_t getNumberOfStm(){
+        return mCmptStm;
+    }
+private:
+
+    ParsingReturn initParsingSequRegex(const std::string& sequRegex);
+
+    std::vector<std::vector<int>> initTransitionMatrix(ParsingReturn& parsing);
+
+};
+}
+
+#endif //__AIDGE_STMFACTORY_H__
\ No newline at end of file
diff --git a/aidge/include/aidge/graphmatching/Utile.hpp b/aidge/include/aidge/graphmatching/Utile.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..acda78cd181519c86ab0b14d5b01bf91223cec9d
--- /dev/null
+++ b/aidge/include/aidge/graphmatching/Utile.hpp
@@ -0,0 +1,50 @@
+
+/**
+ * @file
+ * @brief
+ * @version file 1.0.0
+ * @author vl241552
+ * @copyright
+ *  Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory.
+ * All rights reserved.
+ */
+
+#ifndef _utile_H_
+#define _utile_H_
+
+#include <map>
+
+#include "aidge/graph/Node.hpp"
+#include <map>
+
+namespace Aidge {
+
+using NodeTmp = std::shared_ptr<Node>;
+using NodeType = std::string;
+using CommonTag = std::string;
+using NodeTypeKey = std::pair<NodeType, CommonTag>;
+
+// type def
+// struct NodeTypeKey {
+//     NodeType nodeType;
+//     std::string commonTag;
+
+//     // for map find
+//     bool operator<(const NodeTypeKey& other) const {
+//         if (nodeType != other.nodeType or commonTag != other.commonTag) {
+//             return false;
+//         } else {
+//             return true;
+//         }
+//     }
+
+// };
+
+struct ParsingReturn {
+  std::map<NodeTypeKey, int> typeToIdxTransition;
+  std::vector<std::pair<NodeTypeKey, std::string>> transition;
+};
+
+} // namespace Aidge
+
+#endif //_utile_H_
\ No newline at end of file
diff --git a/aidge/include/aidge/operator/Add.hpp b/aidge/include/aidge/operator/Add.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..36e592682e61fbc178ed4623f88e9fa5f446f25d
--- /dev/null
+++ b/aidge/include/aidge/operator/Add.hpp
@@ -0,0 +1,147 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_OPERATOR_ADD_H__
+#define __AIDGE_CORE_OPERATOR_ADD_H__
+
+#include <numeric>
+#include <vector>
+#include <cmath>
+#include <memory>
+#include <array>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+template <std::size_t NUM>
+class Add_Op : public Operator,
+    public Registrable<Add_Op<NUM>, std::string, std::unique_ptr<OperatorImpl>(const Add_Op<NUM>&)> {
+public:
+    // FIXME: change accessibility
+    std::array<std::shared_ptr<Tensor>, NUM> mInputs;
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>(shared_from_this());
+
+public:
+    static constexpr const char* Type = "Add";
+
+    constexpr Add_Op()
+            : Operator(Type),
+            mOutput(std::make_shared<Tensor>())
+    {
+        assert(NUM > 0 && "Add should have at least one input");
+        for (std::size_t i = 0; i<NUM; ++i) {
+            mInputs[i] = std::make_shared<Tensor>();
+        }
+        setDatatype(DataType::Float32);
+    }
+
+    // Data operator[](const char* inputName) override final {
+    //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
+    //         (strcmp(inputName, "weight") ? mInputs[1] :
+    //         (strcmp(inputName, "bias") ? mInputs[2] :
+    //         nullptr));
+    //     assert((in!=nullptr) && "No such parameter");
+    //     return *in;
+    // }
+
+    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+
+        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    constexpr void computeOutputDims() override final {
+        if (!mInputs[0]->empty()) {
+            const auto expectedDims =  mInputs[0]->dims();
+            std::size_t nonEmptyInputTensor = 1;
+            for (; nonEmptyInputTensor<NUM && (!mInputs[nonEmptyInputTensor]->empty()); ++nonEmptyInputTensor) {
+                assert(expectedDims == mInputs[nonEmptyInputTensor]->dims());
+            }
+            if (nonEmptyInputTensor == NUM) {
+                mOutput->resize(expectedDims);
+            }
+        }
+    }
+
+    bool outputDimsForwarded() const override final {
+        std::size_t forwarded = 0;
+        for (; forwarded < NUM && (!mInputs[forwarded]->empty()); ++forwarded) {}
+        return ((forwarded==NUM) && !(mOutput->empty()));
+    }
+
+    // void checkDims() const override final {
+    //     assert(outputDimsForwarded());
+    //     for (const auto& in : mInputs) {
+    //         assert(in->dims() == mOutput->dims());
+    //     }
+    // }
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
+        return *(mInputs[inputIdx].get());
+    }
+    inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
+    
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
+        return mInputs[inputIdx];
+    }
+    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "Add Operators has only 1 outputs");
+        return mOutput;
+    }
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
+        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
+    }
+    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string& name) {
+        mImpl = Registrar<Add_Op<NUM>>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        for (std::size_t i = 0; i < NUM; ++i) {
+            mInputs[i]->setBackend(name);
+        }
+    }
+
+    void setDatatype(const DataType& datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        for (std::size_t i = 0; i < NUM; ++i) {
+            mInputs[i]->setDatatype(datatype);
+        }
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return NUM; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return NUM; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+};
+
+template <std::size_t NUM>
+inline std::shared_ptr<Node> Add(const char* name = nullptr) {
+    return std::make_shared<Node>(std::make_shared<Add_Op<NUM>>(), name);
+}
+}
+
+#endif /* __AIDGE_CORE_OPERATOR_ADD_H__ */
diff --git a/aidge/include/aidge/operator/AvgPooling.hpp b/aidge/include/aidge/operator/AvgPooling.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..a86942d14e531e5974c8924d8dafb8a4d0bebf85
--- /dev/null
+++ b/aidge/include/aidge/operator/AvgPooling.hpp
@@ -0,0 +1,169 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_OPERATOR_AVGPOOLING_H__
+#define __AIDGE_CORE_OPERATOR_AVGPOOLING_H__
+
+#include <array>
+#include <numeric>
+#include <vector>
+#include <cmath>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class AvgPoolingParam { StrideDims, KernelDims, PaddingDims };
+
+template <DimIdx_t DIM>
+class AvgPooling_Op : public Operator,
+                public Registrable<AvgPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
+                public Parameterizable<AvgPoolingParam,
+                                       std::array<DimSize_t, DIM>,
+                                       std::array<DimSize_t, DIM>,
+                                       std::array<DimSize_t, (DIM<<1) >> {
+private:
+    // FIXME: change accessibility
+    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char *Type = "AvgPooling";
+
+    AvgPooling_Op() = delete;
+
+    using Parameterizable_ = Parameterizable<AvgPoolingParam,
+                                             std::array<DimSize_t, DIM>, 
+                                             std::array<DimSize_t, DIM>,
+                                             std::array<DimSize_t, (DIM<<1)> >;
+    template <AvgPoolingParam e>
+    using param = typename Parameterizable_::template param<e>;
+
+    constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
+                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                            const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0))
+        : Operator(Type),
+          Parameterizable_(param<AvgPoolingParam::StrideDims>(stride_dims),
+                           param<AvgPoolingParam::KernelDims>(kernel_dims),
+                           param<AvgPoolingParam::PaddingDims>(padding_dims)),
+          mOutput(std::make_shared<Tensor>()) {
+        setDatatype(DataType::Float32);
+    }
+
+    constexpr void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 1 && "operators supports only 3 inputs");
+        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
+
+        mInput = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    constexpr void computeOutputDims() override final {
+        if (!mInput->empty()) {
+            std::array<DimSize_t, DIM + 2> outputDims = {};
+
+            for (std::size_t dim = 0; dim < this->template get<AvgPoolingParam::KernelDims>().size() ; ++dim) {
+                outputDims[dim+2] = 1 + static_cast<DimSize_t>(
+                                            std::floor(static_cast<float>(mInput->dims()[dim+2] - 
+                                                                    this->template get<AvgPoolingParam::KernelDims>()[dim] +
+                                                                    this->template get<AvgPoolingParam::PaddingDims>()[dim] +
+                                                                    this->template get<AvgPoolingParam::PaddingDims>()[dim+DIM]) /
+                                            static_cast<float>(this->template get<AvgPoolingParam::StrideDims>()[dim])));
+            }
+            outputDims[1] = mInput->dims()[1];
+            outputDims[0] = mInput->dims()[0];
+            mOutput->resize(outputDims);
+        }
+    }
+
+    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
+
+
+    inline Tensor& input(__attribute__((unused)) const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operators supports only 1 inputs");
+        return *(mInput.get());
+    }
+    inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "AvgPooling Operators supports only 1 inputs");
+        return mInput;
+    }
+    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "AvgPooling Operators has only 1 outputs");
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operators supports only 1 inputs");
+        return std::static_pointer_cast<Data>(mInput);
+    }
+    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string &name) {
+        mImpl = Registrar<AvgPooling_Op<DIM>>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInput->setBackend(name);
+    }
+
+    void setDatatype(const DataType &datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInput->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+};
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
+                                           const char *name = nullptr,
+                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                           const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
+    // FIXME: properly handle default w&b initialization in every cases
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
+    auto avgPool = std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, padding_dims), name);
+    return avgPool;
+}
+
+template <DimSize_t DIM>
+inline std::shared_ptr<Node> AvgPooling(
+    DimSize_t const (&kernel_dims)[DIM],
+    const char *name = nullptr,
+    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+    const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
+    return AvgPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::AvgPoolingParam>::data[] = {"StrideDims",
+                                                          "KernelDims", "PaddingDims"};
+}
+
+#endif /* __AIDGE_CORE_OPERATOR_AVGPOOLING_H__ */
diff --git a/aidge/include/aidge/operator/BatchNorm.hpp b/aidge/include/aidge/operator/BatchNorm.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..6c64ae44c04f9a8f37d0dde14b251da94ce72a3f
--- /dev/null
+++ b/aidge/include/aidge/operator/BatchNorm.hpp
@@ -0,0 +1,161 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_OPERATOR_BATCHNORM_H__
+#define __AIDGE_CORE_OPERATOR_BATCHNORM_H__
+
+#include <array>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Types.h"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Registrar.hpp"
+
+namespace Aidge {
+enum class BatchNormParam { Epsilon, Momentum };
+
+
+template <DimIdx_t DIM>
+class BatchNorm_Op : public Operator,
+                public Registrable<BatchNorm_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
+                public Parameterizable<BatchNormParam, float, float> {
+public:
+    // FIXME: change accessibility
+    std::array<std::shared_ptr<Tensor>, 5> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
+                                                      std::make_shared<Tensor>(), std::make_shared<Tensor>(),
+                                                      std::make_shared<Tensor>()};
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+   public:
+    static constexpr const char *Type = "BatchNorm";
+
+    BatchNorm_Op() = delete;
+
+    using Parameterizable_ = Parameterizable<BatchNormParam, float, float>;
+    template <BatchNormParam e>
+    using param = typename Parameterizable_::template param<e>;
+
+    constexpr BatchNorm_Op(float epsilon, float momentum)
+        : Operator(Type),
+          Parameterizable_(param<BatchNormParam::Epsilon>(epsilon),
+                           param<BatchNormParam::Momentum>(momentum)),
+          mOutput(std::make_shared<Tensor>()) {
+        setDatatype(DataType::Float32);        
+    }
+
+    // Data operator[](const char* inputName) override final {
+    //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
+    //         (strcmp(inputName, "weight") ? mInputs[1] :
+    //         (strcmp(inputName, "bias") ? mInputs[2] :
+    //         nullptr));
+    //     assert((in!=nullptr) && "No such parameter");
+    //     return *in;
+    // }
+
+    constexpr void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 5 && "operators supports only 5 inputs");
+        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
+
+        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    constexpr void computeOutputDims() override final {
+        if (!mInputs[0]->empty()) {
+            for (std::size_t i = nbDataInputs(); i < nbInputs(); ++i) {
+                if(mInputs[i]->size() != mInputs[0]->dims()[1]) {
+                    assert(!mInputs[0]->hasImpl() && "Incompatible size with already implemented learnable parameter");
+                    mInputs[i]->resize(std::array<DimSize_t, 1>({mInputs[0]->dims()[1]}));
+                }
+            }
+            mOutput->resize(mInputs[0]->dims());
+        }
+    }
+
+    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 5 && "operators supports only 5 inputs");
+        return *(mInputs[inputIdx].get()); }
+
+    inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 5 && "BatchNorm Operators supports only 5 inputs");
+        return mInputs[inputIdx];
+    }
+    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "BatchNorm Operator has only 1 output");
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 5 && "operators supports only 5 inputs");
+        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
+    }
+    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string &name) {
+        mImpl = Registrar<BatchNorm_Op<DIM>>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInputs[1]->setBackend(name);
+        mInputs[2]->setBackend(name);
+        mInputs[3]->setBackend(name);
+        mInputs[4]->setBackend(name);
+    }
+
+    void setDatatype(const DataType &datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInputs[1]->setDatatype(datatype);
+        mInputs[2]->setDatatype(datatype);
+        mInputs[3]->setDatatype(datatype);
+        mInputs[4]->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 5; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+};
+
+template <DimSize_t DIM>
+inline std::shared_ptr<Node> BatchNorm(const float epsilon = 1.0e-5F,
+                                       const float momentum = 0.1F,
+                                       const char *name = nullptr) {
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by BatchNorm, not supported");
+    auto batchNorm = std::make_shared<Node>(std::make_shared<BatchNorm_Op<static_cast<DimIdx_t>(DIM)>>(epsilon, momentum), name);
+    addProducer(batchNorm, 1, std::array<DimSize_t,0>({}), "scale");
+    addProducer(batchNorm, 2, std::array<DimSize_t,0>({}), "shift");
+    addProducer(batchNorm, 3, std::array<DimSize_t,0>({}), "batch_mean");
+    addProducer(batchNorm, 4, std::array<DimSize_t,0>({}), "batch_variance");
+    return batchNorm;
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::BatchNormParam>::data[] = { "Epsilon", "Momentum" };
+}
+
+#endif // __AIDGE_CORE_OPERATOR_BATCHNORM_H__
\ No newline at end of file
diff --git a/aidge/include/aidge/operator/Conv.hpp b/aidge/include/aidge/operator/Conv.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..babeac443dd8d51a8b9d3de5a2e96b8745636060
--- /dev/null
+++ b/aidge/include/aidge/operator/Conv.hpp
@@ -0,0 +1,200 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_OPERATOR_CONV_H__
+#define __AIDGE_CORE_OPERATOR_CONV_H__
+
+#include <array>
+#include <cmath>
+#include <numeric>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class ConvParam { StrideDims, DilationDims, InChannels, OutChannels, KernelDims, PaddingDims };
+
+template <DimIdx_t DIM>
+class Conv_Op : public Operator,
+                public Registrable<Conv_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
+                public Parameterizable<ConvParam, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t,
+                                       DimSize_t, std::array<DimSize_t, DIM>, std::array<DimSize_t, (DIM<<1) >> {
+public:
+    // FIXME: change accessibility
+    std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
+                                                      std::make_shared<Tensor>()};
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+   public:
+    static constexpr const char *Type = "Conv";
+
+    Conv_Op() = delete;
+
+    using Parameterizable_ = Parameterizable<ConvParam, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
+                                             DimSize_t, DimSize_t, std::array<DimSize_t, DIM>, std::array<DimSize_t, (DIM<<1) >>;
+    template <ConvParam e>
+    using param = typename Parameterizable_::template param<e>;
+
+    constexpr Conv_Op(DimSize_t in_channels,
+                      DimSize_t out_channels,
+                      const std::array<DimSize_t, DIM> &kernel_dims,
+                      const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                      const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
+                      const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
+        : Operator(Type),
+          Parameterizable_(param<ConvParam::StrideDims>(stride_dims),
+                           param<ConvParam::DilationDims>(dilation_dims),
+                           param<ConvParam::InChannels>(in_channels),
+                           param<ConvParam::OutChannels>(out_channels),
+                           param<ConvParam::KernelDims>(kernel_dims),
+                           param<ConvParam::PaddingDims>(padding_dims)),
+          mOutput(std::make_shared<Tensor>()) {
+        setDatatype(DataType::Float32);        
+    }
+
+    // Data operator[](const char* inputName) override final {
+    //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
+    //         (strcmp(inputName, "weight") ? mInputs[1] :
+    //         (strcmp(inputName, "bias") ? mInputs[2] :
+    //         nullptr));
+    //     assert((in!=nullptr) && "No such parameter");
+    //     return *in;
+    // }
+
+    // std::shared_ptr<Conv_Op> clone() const override final {
+
+    // }
+
+    constexpr void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 3 && "operators supports only 3 inputs");
+        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
+
+        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    constexpr void computeOutputDims() override final {
+        if (!mInputs[0]->empty()) {
+            std::array<DimSize_t, DIM + 2> outputDims = {};
+
+            for (std::size_t dim = 0; dim < this->template get<ConvParam::KernelDims>().size() ; ++dim) {
+                const DimSize_t kernelExtent = this->template get<ConvParam::DilationDims>()[dim] *
+                                                       (this->template get<ConvParam::KernelDims>()[dim] - 1) +
+                                               1;
+
+                outputDims[dim+2] = 1 + static_cast<DimSize_t>(
+                        floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent +
+                                                 this->template get<ConvParam::PaddingDims>()[dim] +
+                                                 this->template get<ConvParam::PaddingDims>()[dim+DIM]) /
+                              static_cast<float>(this->template get<ConvParam::StrideDims>()[dim])));
+            }
+
+            outputDims[1] = this->template get<ConvParam::OutChannels>();
+            outputDims[0] = mInputs[0]->dims()[0];
+            mOutput->resize(outputDims);
+        }
+    }
+
+    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 3 && "operators supports only 3 inputs");
+        return *(mInputs[inputIdx].get()); }
+    inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 3 && "Conv Operators supports only 3 inputs");
+        return mInputs[inputIdx];
+    }
+    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "Conv Operator has only 1 output");
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 3 && "operators supports only 3 inputs");
+        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
+    }
+    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string &name) {
+        mImpl = Registrar<Conv_Op<DIM>>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInputs[1]->setBackend(name);
+        mInputs[2]->setBackend(name);
+    }
+
+    void setDatatype(const DataType &datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setDatatype(datatype);
+        mInputs[1]->setDatatype(datatype);
+        mInputs[2]->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 3; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+};
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> Conv(DimSize_t in_channels, 
+                                  DimSize_t out_channels,
+                                  const std::array<DimSize_t, DIM> &kernel_dims,
+                                  const char *name = nullptr,
+                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
+    // FIXME: properly handle default w&b initialization in every cases
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, padding_dims, dilation_dims), name);
+    // addProducer(conv, 1, append(append(kernel_dims, in_channels), out_channels), "w");
+    addProducer(conv, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
+    addProducer(conv, 2, {out_channels}, "b");
+    return conv;
+}
+
+template <DimSize_t DIM>
+inline std::shared_ptr<Node> Conv(
+    DimSize_t in_channels,
+    DimSize_t out_channels,
+    DimSize_t const (&kernel_dims)[DIM],
+    const char *name = nullptr,
+    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+    const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
+    const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
+    return Conv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::ConvParam>::data[] = {"StrideDims", "DilationDims", "InChannels", "OutChannels",
+                                                          "KernelDims", "PaddingDims"};
+}
+
+#endif /* __AIDGE_CORE_OPERATOR_CONV_H__ */
diff --git a/aidge/include/aidge/operator/ConvDepthWise.hpp b/aidge/include/aidge/operator/ConvDepthWise.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..7cbc609798064e993c7744fdf08865d897518a89
--- /dev/null
+++ b/aidge/include/aidge/operator/ConvDepthWise.hpp
@@ -0,0 +1,196 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H__
+#define __AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H__
+
+#include <array>
+#include <cmath>
+#include <numeric>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class ConvDepthWiseParam { StrideDims, DilationDims, Channels, KernelDims, PaddingDims };
+
+template <DimIdx_t DIM>
+class ConvDepthWise_Op : public Operator,
+                public Registrable<ConvDepthWise_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
+                public Parameterizable<ConvDepthWiseParam,
+                                       std::array<DimSize_t, DIM>,
+                                       std::array<DimSize_t, DIM>,
+                                       DimSize_t,
+                                       std::array<DimSize_t, DIM>,
+                                       std::array<DimSize_t, (DIM<<1) >> {
+   public:
+    // FIXME: change accessibility
+    std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
+                                                      std::make_shared<Tensor>()};
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+   public:
+    static constexpr const char *Type = "ConvDepthWise";
+
+    ConvDepthWise_Op() = delete;
+
+    using Parameterizable_ = Parameterizable<ConvDepthWiseParam,
+                                             std::array<DimSize_t, DIM>, 
+                                             std::array<DimSize_t, DIM>,
+                                             DimSize_t, 
+                                             std::array<DimSize_t, DIM>,
+                                             std::array<DimSize_t, (DIM<<1) >>;
+    template <ConvDepthWiseParam e>
+    using param = typename Parameterizable_::template param<e>;
+
+    constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
+                               const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                               const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
+                               const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
+        : Operator(Type),
+          Parameterizable_(param<ConvDepthWiseParam::StrideDims>(stride_dims), 
+                           param<ConvDepthWiseParam::DilationDims>(dilation_dims),
+                           param<ConvDepthWiseParam::Channels>(0),
+                           param<ConvDepthWiseParam::KernelDims>(kernel_dims),
+                           param<ConvDepthWiseParam::PaddingDims>(padding_dims)),
+          mOutput(std::make_shared<Tensor>()) {
+        setDatatype(DataType::Float32);
+    }
+
+    constexpr void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 3 && "operators supports only 3 inputs");
+        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
+
+        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    constexpr void computeOutputDims() override final {
+        if (!mInputs[0]->empty()) {
+            std::array<DimSize_t, DIM + 2> outputDims = {};
+
+            for (std::size_t dim = 0; dim < this->template get<ConvDepthWiseParam::KernelDims>().size() ; ++dim) {
+                const DimSize_t kernelExtent = this->template get<ConvDepthWiseParam::DilationDims>()[dim] *
+                                                       (this->template get<ConvDepthWiseParam::KernelDims>()[dim] - 1) +
+                                               1;
+
+                outputDims[dim+2] = 1 + static_cast<DimSize_t>(
+                        floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent +
+                                                 this->template get<ConvDepthWiseParam::PaddingDims>()[dim] +
+                                                 this->template get<ConvDepthWiseParam::PaddingDims>()[dim+DIM]) /
+                              static_cast<float>(this->template get<ConvDepthWiseParam::StrideDims>()[dim])));
+            }
+            this->template get<ConvDepthWiseParam::Channels>() = mInputs[0]->dims()[1];
+            // std::array<DimSize_t, DIM+2> weightDims = append(mInputs[0]->dims()[1],append(1, this->template get<ConvDepthWiseParam::KernelDims>()));
+            // if (mInputs[1]->empty()) {
+            //     mInputs[1]->resize(weightDims);
+            // }
+            // if (mInputs[2]->empty()) {
+            //     mInputs[2]->resize({mInputs[0]->dims()[1]});
+            // }
+            outputDims[1] = mInputs[0]->dims()[1];
+            outputDims[0] = mInputs[0]->dims()[0];
+            mOutput->resize(outputDims);
+        }
+    }
+
+    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 3 && "operators supports only 3 inputs");
+        return *(mInputs[inputIdx].get());
+    }
+    inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 3 && "ConvDepthWise Operators supports only 3 inputs");
+        return mInputs[inputIdx];
+    }
+    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "ConvDepthWise Operator has only 1 output");
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 3 && "operators supports only 3 inputs");
+        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
+    }    
+    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+
+    void setBackend(const std::string &name) {
+        mImpl = Registrar<ConvDepthWise_Op<DIM>>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInputs[1]->setBackend(name);
+        mInputs[2]->setBackend(name);
+    }
+
+    void setDatatype(const DataType &datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setDatatype(datatype);
+        mInputs[1]->setDatatype(datatype);
+        mInputs[2]->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 3; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+};
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> ConvDepthWise(const std::array<DimSize_t, DIM> &kernel_dims,
+                                           const char *name = nullptr,
+                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                           const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
+                                           const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
+    // FIXME: properly handle default w&b initialization in every cases
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
+    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, padding_dims, dilation_dims), name);
+    addProducer(convDW, 1, std::array<DimSize_t,0>({}), "w");
+    addProducer(convDW, 2, std::array<DimSize_t,0>({}), "b");
+    return convDW;
+}
+
+template <DimSize_t DIM>
+inline std::shared_ptr<Node> ConvDepthWise(
+    DimSize_t const (&kernel_dims)[DIM],
+    const char *name = nullptr,
+    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+    const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
+    const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
+    return ConvDepthWise(to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::ConvDepthWiseParam>::data[] = {"StrideDims", "DilationDims", "Channels",
+                                                          "KernelDims", "PaddingDims"};
+}
+
+#endif /* __AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H__ */
diff --git a/aidge/include/aidge/operator/FC.hpp b/aidge/include/aidge/operator/FC.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..ebd3a8826dbca292b57f4d3cae749f4e1d7968c8
--- /dev/null
+++ b/aidge/include/aidge/operator/FC.hpp
@@ -0,0 +1,155 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_OPERATOR_FC_H__
+#define __AIDGE_CORE_OPERATOR_FC_H__
+
+#include <array>
+#include <cmath>
+#include <numeric>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Types.h"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Registrar.hpp"
+
+namespace Aidge {
+enum class FCParam { OutChannels, NoBias };
+
+class FC_Op : public Operator,
+              public Registrable<FC_Op,
+                                 std::string,
+                                 std::unique_ptr<OperatorImpl>(const FC_Op &)>,
+              public Parameterizable<FCParam, DimSize_t, bool> {
+public:
+    // FIXME: change accessibility
+    std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(), std::make_shared<Tensor>()};
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "FC";
+
+    FC_Op() = delete;
+
+    using Parameterizable_ = Parameterizable<FCParam, DimSize_t, bool>;
+    template <FCParam e> using param = typename Parameterizable_::template param<e>;
+
+    FC_Op(DimSize_t out_channels, bool noBias)
+            : Operator(Type),
+            Parameterizable_(
+                param<FCParam::OutChannels>(out_channels),
+                param<FCParam::NoBias>(noBias)),
+            mOutput(std::make_shared<Tensor>())
+    {
+        setDatatype(DataType::Float32);
+    }
+
+    void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 3 && "operators supports only 3 inputs");
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+        if (inputIdx == 2) {
+            assert(std::dynamic_pointer_cast<Tensor>(data)->size() == ((this->template get<FCParam::NoBias>()) == false ? static_cast<std::size_t>(this->template get<FCParam::OutChannels>()) : 0));
+            assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1);
+        }
+        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+        if (inputIdx == 0 && mInputs[0]->nbDims() == 1)
+            mInputs[inputIdx]->resize(std::array<DimSize_t, 2>({1, mInputs[inputIdx]->size()}));
+    }
+
+    void computeOutputDims() override final {
+        if (!mInputs[0]->empty()) {
+            // <in_features**, out_channels>
+            std::array<DimSize_t, 2> weightDims = {this->template get<FCParam::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
+            // <out_channels, batch>
+            std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template get<FCParam::OutChannels>()};
+            
+            mInputs[1]->resize(weightDims);
+            mOutput->resize(outputDims);
+        }
+    }
+
+    bool outputDimsForwarded() const override final {
+        return !(mOutput->empty());
+    }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 3 && "operators supports only 3 inputs");
+        return *(mInputs[inputIdx].get()); }
+    inline Tensor& output(__attribute__((unused)) const IOIndex_t inputIdx) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 3 && "FC Operators supports only 3 inputs");
+        return mInputs[inputIdx];
+    }
+    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "FC Operator has only 1 output");
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 3 && "operators supports only 3 inputs");
+        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
+    }
+    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string& name) {
+        mImpl = Registrar<FC_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setBackend(name);
+        mInputs[1]->setBackend(name);
+        mInputs[2]->setBackend(name);
+    }
+
+    void setDatatype(const DataType& datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setDatatype(datatype);
+        mInputs[1]->setDatatype(datatype);
+        mInputs[2]->setDatatype(datatype);
+    }
+
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 3; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+};
+
+inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const char* name = nullptr) {
+    // FIXME: properly handle default w&b initialization in every cases
+    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(out_channels, noBias), name);
+    addProducer(fc, 1, {out_channels, 1}, "w");
+    addProducer(fc, 2, {(noBias ? 0 : out_channels)}, "b"); // already sets bias dims
+    return fc;
+}
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::FCParam>::data[] = {"OutChannels",
+                                                        "NoBias"};
+}
+
+#endif /* __AIDGE_CORE_OPERATOR_FC_H__ */
\ No newline at end of file
diff --git a/aidge/include/aidge/operator/GenericOperator.hpp b/aidge/include/aidge/operator/GenericOperator.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..c8b162919cf381b9659b2d8788c4ad49ce57304e
--- /dev/null
+++ b/aidge/include/aidge/operator/GenericOperator.hpp
@@ -0,0 +1,171 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_OPERATOR_GENERICOPERATOR_H__
+#define __AIDGE_CORE_OPERATOR_GENERICOPERATOR_H__
+
+#include <memory>
+#include <vector>
+#include <string>
+#include <cassert>
+#include <iostream>
+#include <type_traits> // is_same<U, T>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/utils/CParameter.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+class GenericOperator_Op
+    : public Operator,
+      public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)> {
+   private:
+    CParameter mParams;
+    IOIndex_t mNbDataIn;
+    IOIndex_t mNbIn;
+    IOIndex_t mNbOut;
+    std::vector<std::shared_ptr<Tensor>> mInputs;
+    std::vector<std::shared_ptr<Tensor>> mOutputs;
+
+   public:
+    GenericOperator_Op(const char *type, IOIndex_t nbDataIn, IOIndex_t nbIn, IOIndex_t nbOut)
+        : Operator(type), mNbDataIn(nbDataIn), mNbIn(nbIn), mNbOut(nbOut)
+    {
+        mInputs = std::vector<std::shared_ptr<Tensor>>(nbIn);
+        for (std::size_t i = 0; i < nbIn; ++i) {
+            mInputs[i] = std::make_shared<Tensor>();
+        }
+        mOutputs = std::vector<std::shared_ptr<Tensor>>(nbOut);
+        for (std::size_t i = 0; i < nbOut; ++i) {
+            mOutputs[i] = std::make_shared<Tensor>();
+        }
+    }
+
+    /**
+     * @brief Get the Parameter object identified by its name.
+     * @tparam T expected parameter type.
+     * @param key Parameter name.
+     * @details assert if T is not the actual parameter type, if the parameter
+     * does not exist or internal parameter position is invalid.
+     * @todo Returning a T const& ? But dangerous => may get an address within
+     * param buffer that will get invalid after the CParam death.
+     * @note at() throws if the parameter does not exist, using find to test
+     * for parameter existance
+     * @return template<class T> The parameter.
+     */
+    template <class T>
+    T getParameter(std::string const &key) const {
+        return mParams.Get<T>(key);
+    }
+
+    ///\brief Add a parameter value, identified by its name
+    ///\tparam T expected parameter type
+    ///\param i_ParamName Parameter name
+    ///\param i_Value Parameter value
+    ///\todo Pass i_Value by ref if large or not trivial
+    ///\bug If parameter already exists, its value is changed but written in the
+    /// internal buffer in a new location (previous value is still in memory at
+    /// its previous location)
+    template <class T>
+    void addParameter(std::string const &key, T value) {
+        mParams.Add<T>(key, std::move(value));
+    }
+
+    std::string getParameterType(std::string const &key) { return mParams.getParamType(key); }
+
+    std::vector<std::string> getParametersName() { return mParams.getParametersName(); }
+
+    // Override Virtual Opertor methods
+    void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, __attribute__((unused)) std::shared_ptr<Data> data) override final {
+        printf("Info: using associateInput() on a GenericOperator.\n");
+    }
+
+    void computeOutputDims() override final { 
+        assert(false && "Cannot compute output dim of a GenericOperator");
+    }
+
+    bool outputDimsForwarded() const override final {
+        assert(false && "GenericOperator cannot forward dims");
+        return false;
+    }
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert((inputIdx < mNbIn) && "input index out of range for this instance of GenericOperator");
+        printf("Info: using getRawInput() on a GenericOperator.\n");
+        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
+    }
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert((inputIdx < mNbIn) && "input index out of range for this instance of GenericOperator");
+        printf("Info: using input() on a GenericOperator.\n");
+        return *mInputs[inputIdx];
+    }
+
+
+    std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert((inputIdx < mNbIn) && "input index out of range for this instance of GenericOperator");
+        printf("Info: using getInput() on a GenericOperator.\n");
+        return mInputs[inputIdx];
+    }
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final { 
+        assert((outputIdx < mNbOut) && "output index out of range for this instance of GenericOperator");
+        printf("Info: using getOutput() on a GenericOperator.\n");
+        return mOutputs[outputIdx];
+    }
+
+
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert((outputIdx < mNbOut) && "output index out of range for this instance of GenericOperator");
+        printf("Info: using getRawOutput() on a GenericOperator.\n");
+        return std::static_pointer_cast<Data>(mOutputs[outputIdx]);
+    }
+
+    Tensor& output(const IOIndex_t outputIdx) const override final {
+        assert((outputIdx < mNbOut) && "output index out of range for this instance of GenericOperator");
+        printf("Info: using output() on a GenericOperator.\n");
+        return *mOutputs[outputIdx];
+    }
+
+    ~GenericOperator_Op() = default;
+
+    void setBackend(const std::string & /*name*/) { printf("setBackend: not available yet.\n"); }
+    void setDatatype(const DataType & /*datatype*/) { printf("setDatatype: not available yet.\n"); }
+    void forward() override final { printf("forward: not available yet.\n"); }
+    void backward() override final { printf("backward: not available yet.\n"); }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return mNbIn; };
+    inline IOIndex_t nbDataInputs() const noexcept override final { return mNbDataIn; };
+    inline IOIndex_t nbOutputs() const noexcept override final { return mNbOut; };
+};
+
+/**
+ * @brief Fictive custom operator not associated with any implementation.
+ * Allows to import unknown operators and simulate new ones.
+ * @param type Type of the fictive operator.
+ * @param nbDataIn Number of input data.
+ * @param nbIn Number input data + number of learnt parameters.
+ * @param nbOut Number of output data.
+ * @param name (optional) name of the Operator.
+ * @return std::shared_ptr<Node> Node associated with the Generic Operator.
+ */
+inline std::shared_ptr<Node> GenericOperator(const char *type, IOIndex_t nbDataIn, IOIndex_t nbIn, IOIndex_t nbOut,
+                                             const char *name = nullptr) {
+    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbDataIn, nbIn, nbOut), name);
+}
+template <> void GenericOperator_Op::addParameter<std::vector<float>>(std::string const &key, std::vector<float> value);
+
+
+}  // namespace Aidge
+
+
+#endif /* __AIDGE_CORE_OPERATOR_GENERICOPERATOR_H__ */
diff --git a/aidge/include/aidge/operator/LeakyReLU.hpp b/aidge/include/aidge/operator/LeakyReLU.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..ed967001a23a6b9dd4cfe5db09ec4f1edd60e5ea
--- /dev/null
+++ b/aidge/include/aidge/operator/LeakyReLU.hpp
@@ -0,0 +1,127 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_OPERATOR_LEAKYRELU_H__
+#define __AIDGE_CORE_OPERATOR_LEAKYRELU_H__
+
+#include <vector>
+#include <memory>
+
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class LeakyReLUParam {
+    NegativeSlope
+};
+
+class LeakyReLU_Op : public Operator,
+    public Registrable<LeakyReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const LeakyReLU_Op&)>,
+    public Parameterizable<LeakyReLUParam, float> {
+public:
+    // FIXME: change accessibility
+    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "LeakyReLU";
+
+    LeakyReLU_Op() = delete;
+
+    using Parameterizable_ = Parameterizable<LeakyReLUParam, float>;
+    template <LeakyReLUParam e> using param = typename Parameterizable_::template param<e>;
+
+    LeakyReLU_Op(float negativeSlope)
+            : Operator(Type),
+            Parameterizable_(
+                param<LeakyReLUParam::NegativeSlope>(negativeSlope))
+    {
+        setDatatype(DataType::Float32);
+    }
+
+    void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx == 0 && "operator supports only 1 input");
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+        mInput = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        if (!mInput->empty())
+            mOutput->resize(mInput->dims());
+    }
+
+    bool outputDimsForwarded() const override final {
+        return !(mOutput->empty());
+    }
+
+
+    inline Tensor& input(__attribute__((unused)) const IOIndex_t inputIdx) const override final { return *(mInput.get()); }
+    inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { 
+        assert((inputIdx == 0) && "LeakyReLU Operator has only 1 input");
+        return mInput;
+    }
+    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "LeakyReLU Operator has only 1 output");
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operator supports only 1 input");
+        return std::static_pointer_cast<Data>(mInput);
+    }
+    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        return mOutput;
+    }
+
+
+    void setBackend(const std::string& name) {
+        mImpl = Registrar<LeakyReLU_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInput->setBackend(name);
+    }
+    void setDatatype(const DataType& datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInput->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+};
+
+inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const char* name = nullptr) {
+    // FIXME: properly handle default w&b initialization in every cases
+    return std::make_shared<Node>(std::make_shared<LeakyReLU_Op>(negativeSlope), name);
+}
+}
+
+namespace {
+template <>
+const char* const EnumStrings<Aidge::LeakyReLUParam>::data[]
+    = {"NegativeSlope"};
+}
+
+#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
diff --git a/aidge/include/aidge/operator/Matmul.hpp b/aidge/include/aidge/operator/Matmul.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..a871fe516c95802fdb67e81ca3f58fb3be4dce25
--- /dev/null
+++ b/aidge/include/aidge/operator/Matmul.hpp
@@ -0,0 +1,143 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_OPERATOR_MATMUL_H__
+#define __AIDGE_CORE_OPERATOR_MATMUL_H__
+
+#include <array>
+#include <cmath>
+#include <numeric>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Types.h"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Registrar.hpp"
+
+namespace Aidge {
+enum class MatmulParam { OutChannels };
+
+class Matmul_Op : public Operator,
+              public Registrable<Matmul_Op,
+                                 std::string,
+                                 std::unique_ptr<OperatorImpl>(const Matmul_Op &)>,
+              public Parameterizable<MatmulParam, DimSize_t> {
+public:
+    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "Matmul";
+
+    Matmul_Op() = delete;
+
+    using Parameterizable_ = Parameterizable<MatmulParam, DimSize_t>;
+    template <MatmulParam e> using param = typename Parameterizable_::template param<e>;
+
+    Matmul_Op(DimSize_t out_channels)
+            : Operator(Type),
+            Parameterizable_(
+                param<MatmulParam::OutChannels>(out_channels)),
+            mOutput(std::make_shared<Tensor>())
+    {
+        setDatatype(DataType::Float32);
+    }
+
+    void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 2 && "operators supports only 2 inputs");
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        if (!mInputs[0]->empty()) {
+            // <in_features**, out_channels>
+            std::array<DimSize_t, 2> weightDims = {static_cast<DimSize_t>(mInputs[0]->size()), this->template get<MatmulParam::OutChannels>()};
+            // <out_channels, batch>
+            std::array<DimSize_t, 1> outputDims = {this->template get<MatmulParam::OutChannels>()};
+            
+            mInputs[1]->resize(weightDims);
+            mOutput->resize(outputDims);
+        }
+    }
+
+    bool outputDimsForwarded() const override final {
+        return !(mOutput->empty());
+    }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 2 && "operators supports only 2 inputs");
+        return *(mInputs[inputIdx].get()); }
+    inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 2 && "MatMul Operators has 2 inputs");
+        return mInputs[inputIdx];
+    }
+    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "MatMul Operators has 1 output");
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 2 && "operators supports only 2 inputs");
+        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
+    }
+    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string& name) {
+        mImpl = Registrar<Matmul_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setBackend(name);
+        mInputs[1]->setBackend(name);
+    }
+
+    void setDatatype(const DataType& datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setDatatype(datatype);
+        mInputs[1]->setDatatype(datatype);
+    }
+
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+};
+
+inline std::shared_ptr<Node> Matmul(DimSize_t out_channels, const char* name = nullptr) {
+    // FIXME: properly handle default w&b initialization in every cases
+    auto matmul = std::make_shared<Node>(std::make_shared<Matmul_Op>(out_channels), name);
+    addProducer(matmul, 1, {1, out_channels}, "w");
+    return matmul;
+}
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::MatmulParam>::data[] = {"OutChannels"};
+}
+
+#endif /* __AIDGE_CORE_OPERATOR__MATMUL_H__ */
diff --git a/aidge/include/aidge/operator/MetaOperator.hpp b/aidge/include/aidge/operator/MetaOperator.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..7fa1a20449d055da9cd25e6dc4f987757aca3f4a
--- /dev/null
+++ b/aidge/include/aidge/operator/MetaOperator.hpp
@@ -0,0 +1,28 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_OPERATOR_METAOPERATOR_H__
+#define __AIDGE_CORE_OPERATOR_METAOPERATOR_H__
+
+#include "aidge/operator/Operator.hpp"
+
+namespace Aidge {
+class MetaOperator : public Operator {
+public:
+    MetaOperator()
+        : Operator("MetaOp")
+    {
+    }
+    ~MetaOperator() = default;
+};
+}
+
+#endif /* MetaOperator_H__ */
diff --git a/aidge/include/aidge/operator/Operator.hpp b/aidge/include/aidge/operator/Operator.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..9f24ce884863776f6856ee03fb4feb089e6323e2
--- /dev/null
+++ b/aidge/include/aidge/operator/Operator.hpp
@@ -0,0 +1,99 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_OPERATOR_OPERATOR_H__
+#define __AIDGE_CORE_OPERATOR_OPERATOR_H__
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Operator : public std::enable_shared_from_this<Operator> {
+protected:
+  std::unique_ptr<OperatorImpl> mImpl; // implementation of the operator
+
+private:
+  std::string mType;
+
+public:
+  Operator() = delete;
+  Operator(const char* type) : mType(type) {}
+  virtual ~Operator();
+
+
+public:
+
+    virtual void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) = 0;
+    virtual void computeOutputDims() = 0;
+    virtual bool outputDimsForwarded() const = 0;
+    virtual std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const = 0;
+    virtual std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const = 0;
+    virtual Tensor& input(const IOIndex_t /*inputIdx*/) const = 0;
+    virtual std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const = 0;
+    virtual std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const = 0;
+    virtual Tensor& output(const IOIndex_t /*outputIdx*/) const = 0;
+
+///////////////////////////////////////////////////////
+//        IMPLEMENTATION
+///////////////////////////////////////////////////////
+
+    virtual void setBackend(const std::string& name) = 0;
+    virtual void setDatatype(const DataType& datatype) = 0;
+
+    /**
+     * @brief Minimum amount of data from a specific input for one computation pass.
+     * @param inputIdx Index of the input analysed.
+     * @return NbElts_t 
+     */
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const;
+
+    /**
+     * @brief Amount of data from a specific input actually used in one computation pass.
+     * 
+     * @param inputIdx Index of the input analysed.
+     * @return NbElts_t 
+     */
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const;
+
+    /**
+     * @brief Amount of data ready to be used on a specific output.
+     * 
+     * @param outputIdx Index of the output analysed.
+     * @return NbElts_t 
+     */
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const;
+
+    virtual void forward();
+
+    virtual void backward();
+
+///////////////////////////////////////////////////////
+//        INNER
+///////////////////////////////////////////////////////
+
+    std::string type() const {
+        return mType;
+    }
+
+    virtual IOIndex_t nbInputs() const noexcept = 0;
+    virtual IOIndex_t nbDataInputs() const noexcept = 0;
+    virtual IOIndex_t nbOutputs() const noexcept = 0;
+};
+} // namespace Aidge
+
+#endif /* __AIDGE_CORE_OPERATOR_OPERATOR_H__ */
diff --git a/aidge/include/aidge/operator/Producer.hpp b/aidge/include/aidge/operator/Producer.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..4d5461957826e9ebea4a39bb9a7618604e80797a
--- /dev/null
+++ b/aidge/include/aidge/operator/Producer.hpp
@@ -0,0 +1,144 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_OPERATOR_PRODUCER_H__
+#define __AIDGE_CORE_OPERATOR_PRODUCER_H__
+
+#include <array>
+#include <vector>
+
+#include "aidge/utils/Types.h"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Registrar.hpp"
+
+namespace Aidge {
+
+class Producer_Op
+    : public Operator,
+      public Registrable<Producer_Op, std::string, std::unique_ptr<OperatorImpl>(
+                                          const Producer_Op &)> {
+private:
+    std::shared_ptr<Tensor> mOutput;
+
+public:
+    static constexpr const char* Type = "Producer";
+
+    template <std::size_t DIM>
+    Producer_Op(const std::array<DimSize_t, DIM>& dims)
+        : Operator(Type),
+          mOutput(std::make_shared<Tensor>())
+    {
+        //ctor
+        setDatatype(DataType::Float32);
+        mOutput->resize(dims);
+    }
+
+    Producer_Op(const std::shared_ptr<Tensor> tensor)
+        : Operator(Type),
+          mOutput(tensor)
+    {
+        setDatatype(tensor->dataType());
+    }
+
+    void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, __attribute__((unused)) std::shared_ptr<Data> data) override final {
+        assert(false && "Producer operator takes no input");
+    }
+
+    constexpr void computeOutputDims() override final {}
+
+    constexpr bool outputDimsForwarded() const override final {return true;}
+
+
+    [[noreturn]] inline Tensor& input(__attribute__((unused)) const IOIndex_t inputIdx) const override final {
+      assert(false);
+      exit(-1);
+    }
+    inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final {
+      assert(false && "Producer Operator has no input");
+      return nullptr;
+    }
+    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+      assert((outputIdx == 0) && "Producer Operator has only 1 output");
+      return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final {
+        assert(false && "Producer operator takes no input");
+        return nullptr;
+    }
+
+    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+    inline const std::vector<DimSize_t> dims() const noexcept { return mOutput->dims(); }
+
+    void setBackend(const std::string& name) {
+        mImpl = Registrar<Producer_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+    }
+    void setDatatype(const DataType& datatype) {
+        mOutput->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 0; };
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 0; };
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; };
+
+public:
+  void forward() override final {
+    printf("Basic Producer forward() function.\n");
+  }
+  void backward() override final {
+    printf("Basic Producer backward() function.\n");
+  }
+};
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const char *name = nullptr) {
+  static_assert(DIM<=MaxDim,"Too many tensor dimensions required by Producer, not supported");
+  return std::make_shared<Node>(std::make_shared<Producer_Op>(dims), name);
+}
+
+template <std::size_t DIM>
+inline std::shared_ptr<Node> Producer(DimSize_t const (&dims)[DIM], const char *name = nullptr) {
+  return Producer(to_array(dims), name);
+}
+
+inline std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, const char *name = nullptr) {
+  return std::make_shared<Node>(std::make_shared<Producer_Op>(tensor), name);
+}
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, const std::array<DimSize_t, DIM>& dims, const char* extension) {
+    assert(inputIdx != gk_IODefaultIndex);
+    static_assert(DIM<=MaxDim,"Too many tensor dimensions required by addProducer, not supported");
+    const char* prodName = otherNode->name().empty() ? nullptr : (otherNode->name() + std::string("_") + std::string(extension)).c_str();
+    auto prod = Producer(dims, prodName);
+    prod->addChild(otherNode, 0, inputIdx);
+    otherNode->getOperator()->associateInput(inputIdx, prod->getOperator()->getRawOutput(0));
+}
+
+template <std::size_t DIM>
+void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const char* extension) {
+    addProducer(otherNode, inputIdx, to_array(dims), extension);
+}
+} // namespace Aidge
+
+#endif /* __AIDGE_CORE_OPERATOR_PRODUCER_H__ */
\ No newline at end of file
diff --git a/aidge/include/aidge/operator/ReLU.hpp b/aidge/include/aidge/operator/ReLU.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..93bc9a74091c2893dc7b1f7fcc34c72828f34f27
--- /dev/null
+++ b/aidge/include/aidge/operator/ReLU.hpp
@@ -0,0 +1,110 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_OPERATOR_RELU_H__
+#define __AIDGE_CORE_OPERATOR_RELU_H__
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class ReLU_Op : public Operator,
+    public Registrable<ReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const ReLU_Op&)> {
+public:
+    // FIXME: change accessibility
+    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "ReLU";
+
+    ReLU_Op()
+            : Operator(Type)
+    {
+        setDatatype(DataType::Float32);
+    }
+
+    void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx == 0 && "operator supports only 1 input");
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+        mInput = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        if (!mInput->empty())
+            mOutput->resize(mInput->dims());
+    }
+
+    bool outputDimsForwarded() const override final {
+        return !(mOutput->empty());
+    }
+
+
+    inline Tensor& input(__attribute__((unused)) const IOIndex_t inputIdx) const override final { return *(mInput.get()); }
+    inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { 
+        assert((inputIdx == 0) && "ReLU Operator has only 1 input");
+        return mInput;
+    }
+    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "ReLU Operator has only 1 output");
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operator supports only 1 input");
+        return std::static_pointer_cast<Data>(mInput);
+    }
+    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string& name) {
+        mImpl = Registrar<ReLU_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInput->setBackend(name);
+    }
+    void setDatatype(const DataType& datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInput->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+};
+
+inline std::shared_ptr<Node> ReLU(const char* name = nullptr) {
+    // FIXME: properly handle default w&b initialization in every cases
+    return std::make_shared<Node>(std::make_shared<ReLU_Op>(), name);
+}
+}
+
+#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
diff --git a/aidge/include/aidge/operator/Softmax.hpp b/aidge/include/aidge/operator/Softmax.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..9be2acde8570bdc250054e9bed7a1b0d5c3e52ff
--- /dev/null
+++ b/aidge/include/aidge/operator/Softmax.hpp
@@ -0,0 +1,110 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_OPERATOR_SOFTMAX_H__
+#define __AIDGE_CORE_OPERATOR_SOFTMAX_H__
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Softmax_Op : public Operator,
+    public Registrable<Softmax_Op, std::string, std::unique_ptr<OperatorImpl>(const Softmax_Op&)> {
+public:
+    // FIXME: change accessibility
+    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "Softmax";
+
+    Softmax_Op()
+            : Operator(Type)
+    {
+        setDatatype(DataType::Float32);
+    }
+
+    void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx == 0 && "operator supports only 1 input");
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+        mInput = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        if (!mInput->empty())
+            mOutput->resize(mInput->dims());
+    }
+
+    bool outputDimsForwarded() const override final {
+        return !(mOutput->empty());
+    }
+
+
+    inline Tensor& input(__attribute__((unused)) const IOIndex_t inputIdx) const override final { return *(mInput.get()); }
+    inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { 
+        assert((inputIdx == 0) && "Softmax Operator has only 1 input");
+        return mInput;
+    }
+    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "Softmax Operator has only 1 output");
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operator supports only 1 input");
+        return std::static_pointer_cast<Data>(mInput);
+    }
+    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string& name) {
+        mImpl = Registrar<Softmax_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInput->setBackend(name);
+    }
+    void setDatatype(const DataType& datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInput->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+};
+
+inline std::shared_ptr<Node> Softmax(const char* name = nullptr) {
+    // FIXME: properly handle default w&b initialization in every cases
+    return std::make_shared<Node>(std::make_shared<Softmax_Op>(), name);
+}
+}
+
+#endif /* __AIDGE_CORE_OPERATOR_SOFTMAX_H__ */
diff --git a/aidge/include/aidge/scheduler/Scheduler.hpp b/aidge/include/aidge/scheduler/Scheduler.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..2abe90e111c0997928d270b149a6ab4a460eb3aa
--- /dev/null
+++ b/aidge/include/aidge/scheduler/Scheduler.hpp
@@ -0,0 +1,71 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_SCHEDULER_H__
+#define __AIDGE_SCHEDULER_H__
+
+#include <chrono>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+namespace Aidge {
+class Node;
+class GraphView;
+
+class SequentialScheduler {
+public:
+    struct SchedulingElement {
+        SchedulingElement(
+            std::shared_ptr<Node> node_,
+            std::chrono::time_point<std::chrono::high_resolution_clock> start_,
+            std::chrono::time_point<std::chrono::high_resolution_clock> end_)
+            : node(node_), start(start_), end(end_) {}
+
+        std::shared_ptr<Node> node;
+        std::chrono::time_point<std::chrono::high_resolution_clock> start;
+        std::chrono::time_point<std::chrono::high_resolution_clock> end;
+    };
+
+    SequentialScheduler(std::shared_ptr<GraphView> graphView)
+        : mGraphView(graphView)
+    {
+        // ctor
+    };
+    ~SequentialScheduler() = default;
+
+    /**
+     * @brief Run the provided Computational Graph with a batch of data
+     */
+    void forward(bool forwardDims = true, bool verbose = false);
+
+    /**
+     * @brief Save in a Markdown file the order of layers execution.
+     * @param fileName Name of the generated file.
+     */
+    void saveSchedulingDiagram(const std::string& fileName) const;
+
+private:
+    /**
+     * @brief Set of layers receiving an input from currently processing layers
+     * 
+     * @param producers Set of layers ready to run.
+     * @return std::set<std::shared_ptr<Node>> 
+     */
+    std::set<std::shared_ptr<Node>> getConsumers(const std::set<std::shared_ptr<Node>>& producers) const;
+
+    std::shared_ptr<GraphView> mGraphView;
+    std::vector<SchedulingElement> mScheduling;
+};
+} // namespace Aidge
+
+#endif /* __AIDGE_SCHEDULER_H__ */
\ No newline at end of file
diff --git a/aidge/include/aidge/utils/CParameter.hpp b/aidge/include/aidge/utils/CParameter.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..7f799c8eb7ef5b49644186f62b5828a03e395d1f
--- /dev/null
+++ b/aidge/include/aidge/utils/CParameter.hpp
@@ -0,0 +1,118 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CPARAMETER_H__
+#define __AIDGE_CPARAMETER_H__
+
+#include <assert.h>
+#include <map>
+#include <vector>
+#include <cstddef>
+
+namespace Aidge {
+
+///\todo store also a fix-sized code that indicates the type
+///\todo managing complex types or excluding non-trivial, non-aggregate types
+class CParameter
+{
+public:
+    // not copyable, not movable
+    CParameter(CParameter const &) = delete;
+    CParameter(CParameter &&) = delete;
+    CParameter &operator=(CParameter const &) = delete;
+    CParameter &operator=(CParameter &&) = delete;
+    CParameter() : m_Params({}){};
+
+    /**
+     * \brief Returning a parameter identified by its name
+     * \tparam T expected parameter type
+     * \param i_ParamName Parameter name
+     * \details assert if T is not the actual parameter type, if the parameter does not
+     *  exist or interna parameter position is invalid.
+     * \todo Returning a T const& ? But dangerous => the client may get an address within
+     *  param buffer that will get invalid after the CParam death.
+     * \note at() throws if the parameter does not exist, using find to test for parameter existance
+     */
+    template<class T> T Get(std::string const &i_ParamName) const
+    {
+        assert(m_Params.find(i_ParamName) != m_Params.end());
+        assert(m_Types.find(i_ParamName) != m_Types.end());
+        assert(m_Params.at(i_ParamName) <= m_Size);
+        assert(typeid(T).name() == m_Types.at(i_ParamName));
+        return *reinterpret_cast<T *>(m_Buffer + m_Params.at(i_ParamName));
+    }
+
+    ///\brief Add a parameter value, identified by its name
+    ///\tparam T expected parameter type
+    ///\param i_ParamName Parameter name
+    ///\param i_Value Parameter value
+    ///\todo Pass i_Value by ref if large or not trivial
+    ///\bug If parameter already exists, its value is changed but written in the
+    /// internal buffer in a new location (previous value is still in memory at its previous location)
+    template<class T> void Add(const std::string &i_ParamName, T&& i_Value)
+    {
+        const std::size_t addedSize = sizeof(T) / sizeof(std::uint8_t);
+        std::uint8_t *tmp = m_Buffer;
+        std::uint8_t *m_NewBuffer = static_cast<std::uint8_t *>(std::malloc((m_Size + addedSize)*sizeof(std::uint8_t)));
+
+        for (std::size_t i = 0; i < m_Size; ++i) {
+            m_NewBuffer[i] = m_Buffer[i];
+        }
+        free(tmp);
+        for (std::size_t i = 0; i < addedSize; ++i) {
+            m_NewBuffer[m_Size+i] = *(reinterpret_cast<const std::uint8_t *>(&i_Value) + i);
+        }
+        m_Buffer = m_NewBuffer;
+
+        m_Params[i_ParamName] = m_Size; // Copy pointer offset
+        m_Size += addedSize; // Increment offset
+        m_Types[i_ParamName] = typeid(i_Value).name();
+    }
+    
+    std::string getParamType(std::string const &i_ParamName){
+        return m_Types[i_ParamName];
+    }
+
+    std::vector<std::string> getParametersName(){
+        std::vector<std::string> parametersName;
+        for(auto const& it: m_Params)
+            parametersName.push_back(it.first);
+        return parametersName;
+    }
+
+
+    ~CParameter() {
+        free(m_Buffer);
+    }
+
+private:
+    /// @brief Number of elements in m_Buffer
+    std::size_t m_Size = 0;
+
+    std::map<std::string, std::size_t> m_Params; // { Param name : offset }
+
+    ///\brief Map to check type error
+    /* Note : i tried this : `std::map<std::string, std::type_info const *> m_Types;`
+    but looks like the type_ingo object was destroyed.
+    I am not a hugde fan of storing a string and making string comparison.
+    Maybe we can use a custom enum type (or is there a standard solution ?)  
+    */
+    std::map<std::string, std::string> m_Types;
+
+    ///\brief All parameters values concatenated in raw binary form.
+    std::uint8_t *m_Buffer = static_cast<std::uint8_t *>(std::malloc(0));
+};
+
+template<> void CParameter::Add<std::vector<float>>(const std::string &i_ParamName, std::vector<float>&& i_Value);
+
+}
+
+#endif /* __AIDGE_CPARAMETER_H__ */
diff --git a/aidge/include/aidge/utils/Parameter.hpp b/aidge/include/aidge/utils/Parameter.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..6a8fcca41ff03951eeac80493cd9f86a2ea3586b
--- /dev/null
+++ b/aidge/include/aidge/utils/Parameter.hpp
@@ -0,0 +1,197 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_UTILS_PARAMETER_H__
+#define __AIDGE_CORE_UTILS_PARAMETER_H__
+
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <string> // Add this inclue to print error
+#endif
+#include <tuple>
+#include <cassert>
+#include <cstddef>
+
+#ifdef PYBIND
+namespace py = pybind11;
+#endif
+
+namespace {
+// This is the type that will hold all the strings. Each enumerate type will
+// declare its own specialization.
+template <typename T> struct EnumStrings {
+    static const char* const data[];
+};
+}
+
+namespace Aidge {
+template<class T, std::size_t N>
+constexpr std::size_t size(T (&)[N]) { return N; }
+
+#ifdef PYBIND
+/* This abstract class allows to avoid binding Parametrizable.
+*  Otherwise we would need to bind every template possible of Parametrizable.
+*  Every operators can access the methods of this class by inheriting from 
+*  PyAbstractParametrizable in the binding code.
+*/
+class PyAbstractParametrizable{ 
+    public:
+        /* Bindable get function, does not recquire any templating.
+        *  This is thanks to py::object which allow the function to
+        *  be agnostic from its return type.
+        */
+        virtual py::object getPy(const char* /*name*/) = 0;
+}; 
+#endif
+
+template <class PARAM_ENUM, class ...T>
+class Parameterizable
+#ifdef PYBIND
+    : public PyAbstractParametrizable 
+#endif
+    {
+public:
+    using Parameters = std::tuple<T...>;
+
+    // Helper class to pass to the constructor
+    template <PARAM_ENUM paramEnum>
+    class param {
+    public:
+        constexpr param(const typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& v) : value(v) {}
+        const typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type value;
+    };
+
+/*
+    // Direct tuple initialization
+    Parameterizable(T... params) : mParams({params...}) {
+
+    }
+*/
+
+    // Constructor for parameters initialization.
+    // Compile-time garantee that every parameter is initialized.
+    template <PARAM_ENUM ...paramEnum> // non-type parameter pack
+    constexpr Parameterizable(const param<paramEnum>&&... params) {
+        // Check number of params consistency
+        static_assert(sizeof...(params) == std::tuple_size<std::tuple<T...>>::value, "wrong number of parameters in constructor");
+        // static_assert(size(EnumStrings<PARAM_ENUM>::data) == std::tuple_size<std::tuple<T...>>::value, "wrong number of parameters in enum string");
+
+        // Check no duplicates
+        constexpr std::array<PARAM_ENUM, std::tuple_size<std::tuple<T...>>::value> pe = { paramEnum... };
+        static_assert(!hasDuplicates(pe), "duplicate parameter"); // requires C++14
+
+        // Init params with constructor arguments
+        const std::array<PARAM_ENUM, std::tuple_size<std::tuple<T...>>::value> p = { ((void)(get<paramEnum>() = params.value), paramEnum) ... };
+        (void)p; // avoid unused warning
+    }
+
+    // Compile-time access with enum
+    template <PARAM_ENUM paramEnum>
+    constexpr typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& get() {
+        return std::get<static_cast<std::size_t>(paramEnum)>(mParams);
+    }
+    
+    template <PARAM_ENUM paramEnum>
+    constexpr const typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& get() const {
+        return std::get<static_cast<std::size_t>(paramEnum)>(mParams);
+    }
+
+    // Runtime access with enum
+    template <typename R>
+    constexpr R& get(PARAM_ENUM paramEnum) {
+        return get<R>(static_cast<std::size_t>(paramEnum));
+    }
+
+    template <typename R>
+    constexpr const R& get(PARAM_ENUM paramEnum) const {
+        return get<R>(static_cast<std::size_t>(paramEnum));
+    }
+
+    // Runtime existance check with name
+    constexpr bool isParam(const char* name) const {
+        for (std::size_t i = 0; i < size(EnumStrings<PARAM_ENUM>::data); ++i) {
+            if (strcmp(EnumStrings<PARAM_ENUM>::data[i], name) == 0) {
+                return true;
+            }
+        }
+
+        return false;
+    }
+
+    // Runtime access with name
+    template <typename R>
+    constexpr R& get(const char* name) {
+        for (std::size_t i = 0; i < size(EnumStrings<PARAM_ENUM>::data); ++i) {
+            if (strcmp(EnumStrings<PARAM_ENUM>::data[i], name) == 0) {
+                return get<R>(i);
+            }
+        }
+
+        assert(false && "parameter not found");
+    }
+
+    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value-1>
+    constexpr typename std::enable_if<(SIZE > 0), R&>::type get(std::size_t i) {
+        if (i == SIZE) {
+            if (std::is_same<R, typename std::tuple_element<SIZE,std::tuple<T...>>::type>::value) {
+                return reinterpret_cast<R&>(std::get<SIZE>(mParams));
+            }
+            else {
+                assert(false && "wrong parameter type");
+            }
+        }
+        else {
+            return get<R, SIZE-1>(i);
+        }
+    }
+
+    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value-1>
+    constexpr typename std::enable_if<(SIZE <= 0), R&>::type get(std::size_t i) {
+        assert(false && "parameter not found");
+    }
+
+    constexpr const std::tuple<T...>& getParams() const {
+        return mParams;
+    }
+
+    #ifdef PYBIND
+    py::object getPy(const char* name){
+        for (std::size_t i = 0; i < size(EnumStrings<PARAM_ENUM>::data); ++i) {
+            if (strcmp(EnumStrings<PARAM_ENUM>::data[i], name) == 0) {
+                // https://github.com/pybind/pybind11/blob/f3e0602802c7840992c97f4960515777cad6a5c7/include/pybind11/pytypes.h#L1119-L1138
+                // Normal accessor would not work has we convert the tuple to a py::object which can be anything
+                return py::detail::accessor_policies::tuple_item::get(py::cast(mParams), static_cast<py::size_t>(i));
+            }
+        }
+        throw py::value_error("Parameter : " + std::string(name) + " does not exist." );
+    };
+    #endif
+
+private:
+    template <typename V, std::size_t N>
+    static constexpr bool hasDuplicates(const std::array<V, N>& array) {
+        for (std::size_t i = 1; i < N; i++) {
+            for (std::size_t j = 0; j < i; j++) {
+                if (array[i] == array[j]) {
+                    return true;
+                }
+            }
+        }
+
+        return false;
+    }
+
+    std::tuple<T...> mParams;
+};
+}
+
+#endif /* AIDGE_CORE_UTILS_PARAMETER_H__ */
diff --git a/aidge/include/aidge/utils/Recipies.hpp b/aidge/include/aidge/utils/Recipies.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..d6104c56ce288d260ac78c5eb9d1e83d75ca34c8
--- /dev/null
+++ b/aidge/include/aidge/utils/Recipies.hpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_UTILS_RECIPIES_H__
+#define __AIDGE_CORE_UTILS_RECIPIES_H__
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/GraphView.hpp"
+
+namespace Aidge{
+
+void fuseMulAdd(std::set<std::shared_ptr<Node>> nodes);
+void removeFlatten(std::set<std::shared_ptr<Node>> nodes);
+
+
+}
+
+
+#endif /* __AIDGE_CORE_UTILS_RECIPIES_H__ */
\ No newline at end of file
diff --git a/aidge/include/aidge/utils/Registrar.hpp b/aidge/include/aidge/utils/Registrar.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..8348eb98d3f3ab4da0873c8b3f4a476a9f8e1afc
--- /dev/null
+++ b/aidge/include/aidge/utils/Registrar.hpp
@@ -0,0 +1,75 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_UTILS_REGISTRAR_H__
+#define __AIDGE_CORE_UTILS_REGISTRAR_H__
+
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#endif
+
+#include <functional>
+#include <map>
+#include <cassert>
+
+namespace Aidge {
+#ifdef PYBIND
+namespace py = pybind11;
+#endif
+
+template <class DerivedClass, class Key, class Func> // curiously rucurring template pattern
+class Registrable {
+public:
+    typedef Key registrar_key;
+    typedef std::function<Func> registrar_type;
+
+    static std::map<Key, std::function<Func>>& registry()
+    {
+        #ifdef PYBIND
+        if (std::getenv("AIDGE_CORE_WITH_PYBIND")){
+            std::string name = std::string("registrar_")+typeid(Registrable<DerivedClass, Key, Func>).name();
+            static auto shared_data = reinterpret_cast<std::map<Key, std::function<Func>> *>(py::get_shared_data(name));
+            if (!shared_data)
+                shared_data = static_cast<std::map<Key, std::function<Func>> *>(py::set_shared_data(name, new std::map<Key, std::function<Func>>()));
+            return *shared_data;
+        }
+        #endif // PYBIND
+        static std::map<Key, std::function<Func>> rMap;
+        return rMap;
+    }
+
+};
+
+template <class C>
+struct Registrar {
+    Registrar(const typename C::registrar_key& key, typename C::registrar_type func) {
+        //printf("REGISTRAR: %s\n", key.c_str());
+        bool newInsert;
+        std::tie(std::ignore, newInsert) = C::registry().insert(std::make_pair(key, func));
+        //assert(newInsert && "registrar already exists");
+    }
+
+    static auto create(const typename C::registrar_key& key){
+        const auto it = C::registry().find(key);
+        assert(it != C::registry().end() && "invalid registrar key");
+
+        return (*it).second;
+    }
+    static std::vector<typename C::registrar_key> getKeys(){
+        std::vector<typename C::registrar_key> keys;
+        for(auto keyValue : C::registry())
+            keys.push_back(keyValue.first);
+        return keys;
+    }    
+};
+}
+
+#endif // __AIDGE_CORE_UTILS_REGISTRAR_H__
\ No newline at end of file
diff --git a/aidge/include/aidge/utils/Types.h b/aidge/include/aidge/utils/Types.h
new file mode 100644
index 0000000000000000000000000000000000000000..d05c64ead0e147a8d66c7f40dbd978283401683a
--- /dev/null
+++ b/aidge/include/aidge/utils/Types.h
@@ -0,0 +1,62 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+
+#ifndef __AIDGE_TYPES_H__
+#define __AIDGE_TYPES_H__
+
+#include <limits>
+#include <type_traits>
+#include <cstddef>
+#include <cstdint>
+
+namespace Aidge
+{
+//////////////////////////////////////
+///          Tensor
+//////////////////////////////////////
+
+/// @brief Number of elements used for scheduling
+using NbElts_t = std::size_t;
+constexpr NbElts_t MaxElts = std::numeric_limits<NbElts_t>::max();
+
+///\brief Signed dimension size for Tensor (allow for negative coordinates).
+using Coord_t = std::make_signed<std::size_t>::type;
+constexpr Coord_t MaxCoord = std::numeric_limits<Coord_t>::max();
+
+///\brief Unsigned value for the size of each dimension for a Tensor.
+using DimSize_t = std::size_t;
+constexpr DimSize_t MaxDimSize = std::numeric_limits<DimSize_t>::max();
+
+///\brief Unsigned index for a Tensor's number of dimension.
+using DimIdx_t = std::uint8_t;
+constexpr DimIdx_t MaxDim = std::numeric_limits<DimIdx_t>::max();
+
+//////////////////////////////////////
+///          Operator/Nodes
+//////////////////////////////////////
+
+///\brief Signed integral type to hold an IO index.
+///\details <0 values reserved
+///\todo Change it for an unsigned value with default to numeric_limit and max to numeric_limit-1
+using IOIndex_t = std::uint16_t;
+/// @brief Default for absence of connection
+constexpr IOIndex_t gk_IODefaultIndex = std::numeric_limits<IOIndex_t>::max();
+constexpr IOIndex_t gk_IOMaxIndex = std::numeric_limits<IOIndex_t>::max() - 1;
+
+// ///\brief Number of input/output connections for a Node/Operator
+// using IOIndex_t = std::uint16_t;
+// constexpr IOIndex_t gk_IOMaxNb = std::numeric_limits<IOIndex_t>::max();
+
+
+} // namespace Aidge
+
+#endif // __AIDGE_TYPES_H__
\ No newline at end of file
diff --git a/aidge/include/aidge/utilsParsing/AstNode.hpp b/aidge/include/aidge/utilsParsing/AstNode.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..1158ae148a22993476adb00ecbf8ebd24101830c
--- /dev/null
+++ b/aidge/include/aidge/utilsParsing/AstNode.hpp
@@ -0,0 +1,69 @@
+
+
+#ifndef _AIDGE_AST_NODE_H_
+#define _AIDGE_AST_NODE_H_
+
+#include <string>
+#include <type_traits>
+#include <vector>
+#include <memory>
+#include "aidge/utilsParsing/ParsingToken.hpp"
+
+namespace Aidge{
+
+    template <typename EnumType>
+    class AstNode: public std::enable_shared_from_this<AstNode>
+    {
+        static_assert(std::is_enum<EnumType>::value, "AstNode EnumType must be an enum type");
+        public:
+        AstNode(std::shared_ptr<ParsingToken<EnumType>> token,std::vector<std::shared_ptr<AstNode>> child ={}):mToken(token),mChild(child){}
+        /**
+         * @brief get the type of the token
+         * @return the type
+         */
+        EnumType getType() const{
+            return mToken->getType();
+        }
+
+        /**
+         * @brief get the lexeme of the token
+         * @return the lexeme
+         */
+        std::string getValue() const{
+            return mToken->getLexeme();
+        }
+        /**
+         * @brief get the child of the node
+         * @return child
+         */
+        const std::vector<std::shared_ptr<AstNode>>& getChilds() const {
+            return mChild;
+        }
+        /**
+         * @brief test if the node is a leaf in the tree
+         * @return true if a leaf 
+         */
+        bool isLeaf() const {
+            return mChild.size() == 0;
+        }
+
+        /**
+         * @brief get the number of child
+         * @return the number of child
+         */
+        std::size_t nbChild() const{
+            return mChild.size();
+        }
+        private:
+        /**
+         * @brief the token of the node
+         */
+        const std::shared_ptr<ParsingToken<EnumType>> mToken;
+        /**
+         * @brief list of child
+         */
+        const std::vector<std::shared_ptr<AstNode>> mChild;
+    };
+}
+
+#endif //_AIDGE_AST_NODE_H_
diff --git a/aidge/include/aidge/utilsParsing/ParsingToken.hpp b/aidge/include/aidge/utilsParsing/ParsingToken.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..78045cf3085a18bfd0565354fd34aef02ef395bd
--- /dev/null
+++ b/aidge/include/aidge/utilsParsing/ParsingToken.hpp
@@ -0,0 +1,66 @@
+
+#ifndef _AIDGE_PARSING_TOKEN_H_
+#define _AIDGE_PARSING_TOKEN_H_
+
+#include <string>
+#include <type_traits>
+
+namespace Aidge{
+    template <typename EnumType>
+    class ParsingToken: public std::enable_shared_from_this<ParsingToken>
+    {
+        static_assert(std::is_enum<EnumType>::value, "ParsingToken EnumType must be an enum type");
+        public:
+        /**
+         * @brief Token container
+         * @param type one of the token type
+         * @param lexeme String representing aditional information of the token
+         */
+        ParsingToken(const EnumType type , const std::string lexeme )mLexeme(lexeme),mType(type){}
+
+        /**
+         * @brief get the lexeme
+         * @return std::string 
+         */
+        const std::string getLexeme(void){
+            return mLexeme;
+        }
+
+        /**
+         * @brief get the token type
+         * 
+         * @return ParsingToken 
+         */
+        const EnumType getType(void){
+            return mType;
+        }
+
+        /**
+         * @brief copy the token
+         * @return deep copy of the token
+         */
+        std::shared_ptr<Aidge::ParsingToken> copy();
+
+        //TODO
+        std::ostringstream rep(void){
+            std::ostringstream out;
+            out << " Token ("  << mLexeme <<")" << "\n";
+            return out;
+        }
+        private:
+
+        /**
+         * @brief additional information of the token
+         */
+        const std::string mLexeme;
+
+        /**
+         * @brief type of the token
+         * @see ConditionalTokenTypes
+         */
+        const EnumType mType;
+
+    };
+}
+
+#endif //_AIDGE_PARSING_TOKEN_H_
\ No newline at end of file