From 48a0df4a68afc6f88444562076d556a5ec6ada77 Mon Sep 17 00:00:00 2001
From: NAUD Maxence <maxence.naud@cea.fr>
Date: Thu, 13 Mar 2025 10:29:25 +0000
Subject: [PATCH] Move generic implementations to their own files

---
 .../backend/generic/operator/ConcatImpl.hpp   |   7 +-
 .../generic/operator/DepthToSpaceImpl.hpp     |   5 +
 .../backend/generic/operator/FlattenImpl.hpp  |  47 +++++
 .../backend/generic/operator/GatherImpl.hpp   |  42 ++++
 .../backend/generic/operator/IdentityImpl.hpp |  35 ++++
 .../backend/generic/operator/MemorizeImpl.hpp | 119 +++++++++++
 .../backend/generic/operator/MoveImpl.hpp     |  30 +++
 .../backend/generic/operator/PopImpl.hpp      |  92 ++++++++
 .../backend/generic/operator/ReshapeImpl.hpp  |  48 +++++
 .../backend/generic/operator/SelectImpl.hpp   |  45 ++++
 .../backend/generic/operator/ShapeImpl.hpp    |  46 ++++
 .../backend/generic/operator/SliceImpl.hpp    |  44 ++++
 .../backend/generic/operator/SplitImpl.hpp    |  44 ++++
 .../backend/generic/operator/SqueezeImpl.hpp  |  40 ++++
 .../backend/generic/operator/StackImpl.hpp    | 100 +++++++++
 .../backend/generic/operator/UnfoldImpl.hpp   |  47 +++++
 .../generic/operator/UnsqueezeImpl.hpp        |  39 ++++
 include/aidge/operator/Flatten.hpp            |  26 ---
 include/aidge/operator/Gather.hpp             |  21 --
 include/aidge/operator/Identity.hpp           |  10 -
 include/aidge/operator/Memorize.hpp           |  92 --------
 include/aidge/operator/Move.hpp               |   7 -
 include/aidge/operator/Pop.hpp                |  70 -------
 include/aidge/operator/Reshape.hpp            |  27 ---
 include/aidge/operator/Select.hpp             |  22 +-
 include/aidge/operator/Shape.hpp              |  24 ---
 include/aidge/operator/Slice.hpp              |  22 --
 include/aidge/operator/Split.hpp              |  22 --
 include/aidge/operator/Squeeze.hpp            |  86 +++-----
 include/aidge/operator/Stack.hpp              |  74 +------
 include/aidge/operator/Unfold.hpp             |  22 --
 include/aidge/operator/Unsqueeze.hpp          |  48 +----
 src/backend/generic/operator/FlattenImpl.cpp  |  31 +++
 src/backend/generic/operator/GatherImpl.cpp   |  52 +++++
 src/backend/generic/operator/IdentityImpl.cpp |  34 +++
 src/backend/generic/operator/MemorizeImpl.cpp |  79 +++++++
 src/backend/generic/operator/MoveImpl.cpp     |  26 +++
 src/backend/generic/operator/PopImpl.cpp      |  49 +++++
 src/backend/generic/operator/ReshapeImpl.cpp  |  41 ++++
 src/backend/generic/operator/SelectImpl.cpp   |  46 ++++
 src/backend/generic/operator/ShapeImpl.cpp    |  27 +++
 src/backend/generic/operator/SliceImpl.cpp    | 104 ++++++++++
 src/backend/generic/operator/SplitImpl.cpp    |  51 +++++
 src/backend/generic/operator/SqueezeImpl.cpp  |  36 ++++
 src/backend/generic/operator/StackImpl.cpp    |  72 +++++++
 src/backend/generic/operator/UnfoldImpl.cpp   |  67 ++++++
 .../generic/operator/UnsqueezeImpl.cpp        |  34 +++
 src/operator/Flatten.cpp                      |   8 +-
 src/operator/Gather.cpp                       |  29 +--
 src/operator/Identity.cpp                     |  13 +-
 src/operator/Memorize.cpp                     |  54 +----
 src/operator/Move.cpp                         |   9 +-
 src/operator/Pop.cpp                          |  30 +--
 src/operator/Reshape.cpp                      |  17 +-
 src/operator/Select.cpp                       |  26 +--
 src/operator/Shape.cpp                        |   9 +-
 src/operator/Slice.cpp                        |  82 +-------
 src/operator/Split.cpp                        |  34 +--
 src/operator/Squeeze.cpp                      | 196 +++++++++---------
 src/operator/Stack.cpp                        |  52 +----
 src/operator/Unfold.cpp                       |  46 +---
 src/operator/Unsqueeze.cpp                    |  37 +++-
 src/recipes/ExplicitCastMove.cpp              |   3 +-
 unit_tests/operator/Test_Squeeze_Op.cpp       |  12 +-
 64 files changed, 1760 insertions(+), 1049 deletions(-)
 create mode 100644 include/aidge/backend/generic/operator/FlattenImpl.hpp
 create mode 100644 include/aidge/backend/generic/operator/GatherImpl.hpp
 create mode 100644 include/aidge/backend/generic/operator/IdentityImpl.hpp
 create mode 100644 include/aidge/backend/generic/operator/MemorizeImpl.hpp
 create mode 100644 include/aidge/backend/generic/operator/MoveImpl.hpp
 create mode 100644 include/aidge/backend/generic/operator/PopImpl.hpp
 create mode 100644 include/aidge/backend/generic/operator/ReshapeImpl.hpp
 create mode 100644 include/aidge/backend/generic/operator/SelectImpl.hpp
 create mode 100644 include/aidge/backend/generic/operator/ShapeImpl.hpp
 create mode 100644 include/aidge/backend/generic/operator/SliceImpl.hpp
 create mode 100644 include/aidge/backend/generic/operator/SplitImpl.hpp
 create mode 100644 include/aidge/backend/generic/operator/SqueezeImpl.hpp
 create mode 100644 include/aidge/backend/generic/operator/StackImpl.hpp
 create mode 100644 include/aidge/backend/generic/operator/UnfoldImpl.hpp
 create mode 100644 include/aidge/backend/generic/operator/UnsqueezeImpl.hpp
 create mode 100644 src/backend/generic/operator/FlattenImpl.cpp
 create mode 100644 src/backend/generic/operator/GatherImpl.cpp
 create mode 100644 src/backend/generic/operator/IdentityImpl.cpp
 create mode 100644 src/backend/generic/operator/MemorizeImpl.cpp
 create mode 100644 src/backend/generic/operator/MoveImpl.cpp
 create mode 100644 src/backend/generic/operator/PopImpl.cpp
 create mode 100644 src/backend/generic/operator/ReshapeImpl.cpp
 create mode 100644 src/backend/generic/operator/SelectImpl.cpp
 create mode 100644 src/backend/generic/operator/ShapeImpl.cpp
 create mode 100644 src/backend/generic/operator/SliceImpl.cpp
 create mode 100644 src/backend/generic/operator/SplitImpl.cpp
 create mode 100644 src/backend/generic/operator/SqueezeImpl.cpp
 create mode 100644 src/backend/generic/operator/StackImpl.cpp
 create mode 100644 src/backend/generic/operator/UnfoldImpl.cpp
 create mode 100644 src/backend/generic/operator/UnsqueezeImpl.cpp

diff --git a/include/aidge/backend/generic/operator/ConcatImpl.hpp b/include/aidge/backend/generic/operator/ConcatImpl.hpp
index 5e2cccbf7..cbe0c4e29 100644
--- a/include/aidge/backend/generic/operator/ConcatImpl.hpp
+++ b/include/aidge/backend/generic/operator/ConcatImpl.hpp
@@ -9,6 +9,9 @@
  *
  ********************************************************************************/
 
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_CONCATIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_CONCATIMPL_H_
+
 #include <string>
 
 #include "aidge/backend/OperatorImpl.hpp"
@@ -38,4 +41,6 @@ public:
      */
     void forward() override;
 };
-}  // namespace Aidge
\ No newline at end of file
+}  // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_CONCATIMPL_H_
diff --git a/include/aidge/backend/generic/operator/DepthToSpaceImpl.hpp b/include/aidge/backend/generic/operator/DepthToSpaceImpl.hpp
index 3cb638c9d..9a518ed14 100644
--- a/include/aidge/backend/generic/operator/DepthToSpaceImpl.hpp
+++ b/include/aidge/backend/generic/operator/DepthToSpaceImpl.hpp
@@ -9,6 +9,9 @@
  *
  ********************************************************************************/
 
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_DEPTHTOSPACEIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_DEPTHTOSPACEIMPL_H_
+
 #include <string>
 
 #include "aidge/backend/OperatorImpl.hpp"
@@ -35,3 +38,5 @@ public:
     void forward() override;
 };
 }  // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_DEPTHTOSPACEIMPL_H_
diff --git a/include/aidge/backend/generic/operator/FlattenImpl.hpp b/include/aidge/backend/generic/operator/FlattenImpl.hpp
new file mode 100644
index 000000000..710624e73
--- /dev/null
+++ b/include/aidge/backend/generic/operator/FlattenImpl.hpp
@@ -0,0 +1,47 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_FLATTENIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_FLATTENIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace Aidge {
+
+/**
+ * @brief Implementation of the Flatten operation.
+ *
+ * Since Flatten operation is just backend-agnostic, its implementation is located in aidge_core.
+ */
+class Flatten_OpImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructor for Flatten operator implementation.
+     * @param op Operator instance.
+     * @param backend Optional. Name of the backend.
+     */
+    Flatten_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Compute the forward pass of the Flatten operation.
+     */
+    void forward() override;
+
+    std::shared_ptr<ProdConso> getProdConso() const override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_FLATTENIMPL_H_
diff --git a/include/aidge/backend/generic/operator/GatherImpl.hpp b/include/aidge/backend/generic/operator/GatherImpl.hpp
new file mode 100644
index 000000000..622045fc9
--- /dev/null
+++ b/include/aidge/backend/generic/operator/GatherImpl.hpp
@@ -0,0 +1,42 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_GATHERIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_GATHERIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace Aidge {
+
+/**
+ * @class Gather_OpImpl
+ * @brief Backend implementation for the Gather operation.
+ *
+ * The Gather operation selects elements from the input tensor based on specified indices
+ * and an axis, producing a tensor with a gathered shape.
+ */
+class Gather_OpImpl : public OperatorImpl {
+public:
+    Gather_OpImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Execute the Gather operation.
+     */
+    void forward() override;
+};
+
+}  // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_GATHERIMPL_H_
diff --git a/include/aidge/backend/generic/operator/IdentityImpl.hpp b/include/aidge/backend/generic/operator/IdentityImpl.hpp
new file mode 100644
index 000000000..72af3e0c1
--- /dev/null
+++ b/include/aidge/backend/generic/operator/IdentityImpl.hpp
@@ -0,0 +1,35 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_IDENTITYIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_IDENTITYIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace Aidge {
+
+class Identity_OpImpl : public OperatorImpl {
+public:
+    Identity_OpImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend) {}
+
+    void forward() override;
+
+    std::shared_ptr<ProdConso> getProdConso() const override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_IDENTITYIMPL_H_
diff --git a/include/aidge/backend/generic/operator/MemorizeImpl.hpp b/include/aidge/backend/generic/operator/MemorizeImpl.hpp
new file mode 100644
index 000000000..2c12ae54c
--- /dev/null
+++ b/include/aidge/backend/generic/operator/MemorizeImpl.hpp
@@ -0,0 +1,119 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_MEMORIZEIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_MEMORIZEIMPL_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Elts.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+/**
+ * @class Memorize_ProdConso
+ * @brief Implements the producer-consumer principle for the `Memorize` operator.
+ *
+ * The `Memorize_ProdConso` class defines the logic for managing data dependencies during
+ * the forward process of the `Memorize` operator.
+ *
+ * This class ensures that:
+ * - All data produced by the `Memorize` operator is properly consumed.
+ * - All required outputs are correctly filled during the forward pass.
+ *
+ * It also calculates data and memory requirements specific to the `Memorize` operator.
+ */
+class Memorize_ProdConso : public ProdConso {
+public:
+    /**
+     * @brief Constructor for the `Memorize_ProdConso` class.
+     * @param[in] op The operator instance for which producer-consumer relationships are managed.
+     *
+     * @details:
+     * - The provided `Operator` instance is used to initialize the base `ProdConso` class.
+     * - This operator will determine the specific requirements for data production
+     *   and consumption during the forward process.
+     */
+    Memorize_ProdConso(const Operator& op): ProdConso(op) {}
+
+    /**
+     * @brief Get the number of data elements required from an input tensor for forward computation.
+     * @param[in] inputIdx The index of the input tensor.
+     * @return The number of required elements (`Elts_t`).
+     *
+     * @details:
+     * - For each input tensor identified by `inputIdx`, this method calculates the
+     *   minimum amount of data needed by the `Memorize` operator to perform its forward step.
+     */
+    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+
+    /**
+     * @brief Compute the memory requirements for an output tensor.
+     * @param[in] outputIdx The index of the output tensor.
+     * @param[in] inputsSize A vector containing the dimensions of the input tensors.
+     * @return The memory required (`Elts_t`) for the specified output tensor.
+     *
+     * @details:
+     * - This method evaluates how much memory is needed for the `outputIdx` tensor
+     *   based on the input tensor dimensions and the attributes of the `Memorize` operator.
+     * - Memory requirements are influenced by factors such as sequence length and
+     *   the forward step configuration.
+     */
+    Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
+
+    /**
+     * @brief Update the producer-consumer relationships for the `Memorize` operator.
+     * @details:
+     * - This method ensures that all data produced by the `Memorize` operator is
+     *   appropriately consumed by downstream operators in the computational graph.
+     * - It also verifies that all required outputs are filled during the forward pass,
+     *   maintaining consistency in the data flow.
+     * - This step is crucial for ensuring correctness in recurrent computations and
+     *   maintaining dependencies in the graph.
+     */
+    void updateConsummerProducer() override;
+};
+
+/**
+ * @brief Implementation of the Memorize operation.
+ *
+ * Since Memorize operation is just backend-agnostic, its implementation is located in aidge_core.
+ */
+class Memorize_OpImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructs a Memorize_OpImpl object.
+     * @param[in] op The operator to be implemented.
+     * @param[in] backend The backend used for execution.
+     */
+    Memorize_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Get the Producer Consumer object of the operator.
+     * @return A shared pointer to the ProdConso object.
+     */
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_shared<Memorize_ProdConso>(mOp); };
+
+    /**
+     * @brief Executes the forward pass for the Memorize operation.
+     */
+    void forward() override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_MEMORIZEIMPL_H_
diff --git a/include/aidge/backend/generic/operator/MoveImpl.hpp b/include/aidge/backend/generic/operator/MoveImpl.hpp
new file mode 100644
index 000000000..fc2747351
--- /dev/null
+++ b/include/aidge/backend/generic/operator/MoveImpl.hpp
@@ -0,0 +1,30 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_MOVEIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_MOVEIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace Aidge {
+
+class Move_OpImpl : public OperatorImpl {
+public:
+    Move_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_MOVEIMPL_H_
diff --git a/include/aidge/backend/generic/operator/PopImpl.hpp b/include/aidge/backend/generic/operator/PopImpl.hpp
new file mode 100644
index 000000000..7718a0691
--- /dev/null
+++ b/include/aidge/backend/generic/operator/PopImpl.hpp
@@ -0,0 +1,92 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_POPIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_POPIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace Aidge {
+
+/**
+ * @class Pop_ProdConso
+ * @brief Implements the producer-consumer principle for the `Pop` operator.
+ *
+ * The `Pop_ProdConso` class defines the logic for managing data dependencies during
+ * the forward process of the `Pop` operator.
+ *
+ * This class ensures that:
+ * - All data consumed by the `Pop` operator is correctly handled.
+ * - The operator respects memory and data requirements during the forward computation.
+ */
+class Pop_ProdConso : public ProdConso {
+public:
+    /**
+     * @brief Constructor for the `Pop_ProdConso` class.
+     * @param[in] op The operator instance for which producer-consumer relationships are managed.
+     *
+     * @details:
+     * - The provided `Operator` instance is used to initialize the base `ProdConso` class.
+     * - This operator determines specific requirements for data consumption during the forward process.
+     */
+    Pop_ProdConso(const Operator& op): ProdConso(op) {}
+
+    /**
+     * @brief Get the number of data elements required from an input tensor for forward computation.
+     * @param[in] inputIdx The index of the input tensor.
+     * @return The number of required elements (`Elts_t`).
+     *
+     * @details:
+     * - For each input tensor identified by `inputIdx`, this method calculates the
+     *   minimum amount of data needed by the `Pop` operator to perform its forward step.
+     */
+    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
+};
+
+/**
+ * @class Pop_OpImpl
+ * @brief Implementation of the `Pop` operation.
+ *
+ * The `Pop_OpImpl` class defines the backend-agnostic logic for executing
+ * the forward pass of the `Pop` operator.
+ */
+class Pop_OpImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructs a `Pop_OpImpl` object.
+     * @param[in] op The operator to be implemented.
+     * @param[in] backend The backend used for execution (optional).
+     */
+    Pop_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Get the Producer Consumer object of the operator.
+     * @return A shared pointer to the `Pop_ProdConso` object.
+     */
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_shared<Pop_ProdConso>(mOp); }
+
+    /**
+     * @brief Executes the forward pass for the `Pop` operation.
+     */
+    void forward() override;
+
+    /**
+     * @brief Executes the backward pass for the `Pop` operation.
+     */
+    void backward() override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_POPIMPL_H_
diff --git a/include/aidge/backend/generic/operator/ReshapeImpl.hpp b/include/aidge/backend/generic/operator/ReshapeImpl.hpp
new file mode 100644
index 000000000..1461f89eb
--- /dev/null
+++ b/include/aidge/backend/generic/operator/ReshapeImpl.hpp
@@ -0,0 +1,48 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_RESHAPEIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_RESHAPEIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace Aidge {
+
+/**
+ * @brief Implementation of the Reshape operator.
+ * @note This operator implementation is agnostic to the backend and is located here instead of in aidge_backend.
+ */
+class Reshape_OpImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructor for Reshape_OpImpl.
+     * @param[in] op The Operator instance.
+     * @param[in] backend The backend name (optional).
+     */
+    Reshape_OpImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Perform the forward operation for the reshape.
+     */
+    void forward() override;
+    void backward() override;
+
+    std::shared_ptr<ProdConso> getProdConso() const override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_RESHAPEIMPL_H_
diff --git a/include/aidge/backend/generic/operator/SelectImpl.hpp b/include/aidge/backend/generic/operator/SelectImpl.hpp
new file mode 100644
index 000000000..c8e3d3a68
--- /dev/null
+++ b/include/aidge/backend/generic/operator/SelectImpl.hpp
@@ -0,0 +1,45 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SELECTIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SELECTIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace Aidge {
+
+/**
+ * @brief Implementation of the Select operator.
+ * @note This operator implementation is agnostic to the backend and is located here instead of in aidge_backend.
+ */
+class Select_OpImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructor for Select_OpImpl.
+     * @param[in] op The Operator instance.
+     * @param[in] backend The backend name (optional).
+     */
+    Select_OpImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Perform the forward operation for the reshape.
+     */
+    void forward() override;
+    void backward() override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SELECTIMPL_H_
diff --git a/include/aidge/backend/generic/operator/ShapeImpl.hpp b/include/aidge/backend/generic/operator/ShapeImpl.hpp
new file mode 100644
index 000000000..03ec05976
--- /dev/null
+++ b/include/aidge/backend/generic/operator/ShapeImpl.hpp
@@ -0,0 +1,46 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SHAPEIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SHAPEIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace Aidge {
+
+/**
+ * @class Shape_OpImpl
+ * @brief Backend-agnostic implementation of the Shape operator.
+ *
+ * This implementation is responsible for extracting and returning the shape
+ * of the input tensor. Specific backend functionality can extend this.
+ */
+class Shape_OpImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructor for the Shape_OpImpl class.
+     * @param[in] op The Operator instance.
+     * @param[in] backend The backend name (optional).
+     */
+    Shape_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Perform the forward operation to compute the shape of the tensor.
+     */
+    void forward() override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SHAPEIMPL_H_
diff --git a/include/aidge/backend/generic/operator/SliceImpl.hpp b/include/aidge/backend/generic/operator/SliceImpl.hpp
new file mode 100644
index 000000000..4f4d449d6
--- /dev/null
+++ b/include/aidge/backend/generic/operator/SliceImpl.hpp
@@ -0,0 +1,44 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SLICEIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SLICEIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace Aidge {
+
+/**
+ * @brief Implementation of the Slice operation.
+ *
+ * Since Slice operation is just backend-agnostic, its implementation is located in aidge_core.
+ */
+class Slice_OpImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructs a Slice_OpImpl object.
+     * @param[in] op The operator to be implemented.
+     * @param[in] backend The backend used for execution.
+     */
+    Slice_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Executes the forward pass for the Slice operation.
+     */
+    void forward() override;
+};
+
+}  // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SLICEIMPL_H_
diff --git a/include/aidge/backend/generic/operator/SplitImpl.hpp b/include/aidge/backend/generic/operator/SplitImpl.hpp
new file mode 100644
index 000000000..6a793ab95
--- /dev/null
+++ b/include/aidge/backend/generic/operator/SplitImpl.hpp
@@ -0,0 +1,44 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_STACKIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_STACKIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace Aidge {
+
+/**
+ * @class  Implementation of the Split operation.
+ *
+ * Since Split operation is just backend-agnostic, its implementation is located in aidge_core.
+ */
+class Split_OpImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructor for the Split operator implementation.
+     * @param[in] op Operator to be implemented.
+     * @param[in] backend Name of the backend.
+     */
+    Split_OpImpl(const Operator& op, const std::string& backend = "") : OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Executes the forward pass for the Split operation.
+     */
+    void forward() override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_STACKIMPL_H_
diff --git a/include/aidge/backend/generic/operator/SqueezeImpl.hpp b/include/aidge/backend/generic/operator/SqueezeImpl.hpp
new file mode 100644
index 000000000..a5b5bf4e2
--- /dev/null
+++ b/include/aidge/backend/generic/operator/SqueezeImpl.hpp
@@ -0,0 +1,40 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SQUEEZEIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SQUEEZEIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace Aidge {
+
+/**
+ * @brief implementation of the operator squeeze.
+ * @note Since this operator implementation is agnostic to the backend it is
+ * located here instead of in aidge_backend_cpu/cuda.
+ */
+class Squeeze_OpImpl : public OperatorImpl {
+public:
+    Squeeze_OpImpl(const Operator &op, const std::string &backend = "")
+        : OperatorImpl(op, backend) {}
+
+    void forward() override;
+
+    std::shared_ptr<ProdConso> getProdConso() const override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SQUEEZEIMPL_H_
diff --git a/include/aidge/backend/generic/operator/StackImpl.hpp b/include/aidge/backend/generic/operator/StackImpl.hpp
new file mode 100644
index 000000000..4d3ad6f4a
--- /dev/null
+++ b/include/aidge/backend/generic/operator/StackImpl.hpp
@@ -0,0 +1,100 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_STACKIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_STACKIMPL_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+/**
+ * @class StackProdConso
+ * @brief Implements the producer-consumer principle for the `Stack` operator.
+ *
+ * The `StackProdConso` class defines the logic for managing data dependencies
+ * during the forward process of the `Stack` operator. It ensures proper allocation
+ * and consumption of resources required for stacking operations.
+ */
+class StackProdConso : public ProdConso {
+public:
+    /**
+     * @brief Constructor for the `StackProdConso` class.
+     * @param[in] op The operator instance for which producer-consumer relationships are managed.
+     */
+    StackProdConso(const Operator& op) : ProdConso(op) {}
+
+    /**
+     * @brief Compute the memory requirements for an output tensor.
+     * @param[in] outputIdx The index of the output tensor.
+     * @param[in] inputsSize A vector containing the dimensions of the input tensors.
+     * @return The memory required (`Elts_t`) for the specified output tensor.
+     *
+     * @details:
+     * - This method calculates how much memory is needed to store the stacked tensor.
+     * - Memory requirements depend on the number and size of the input tensors.
+     */
+    Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
+
+    /**
+     * @brief Reset producer-consumer relationships for the `Stack` operator.
+     *
+     * @details:
+     * - This method clears and reinitializes the producer-consumer relationships,
+     *   ensuring proper data flow and allocation for the stacking operation.
+     */
+    void resetConsummerProducer() override;
+};
+
+/**
+ * @class StackOpImpl
+ * @brief Backend-specific implementation of the `Stack` operator.
+ *
+ * The `StackOpImpl` class handles the execution of the `Stack` operation, including
+ * forward computation and backend-specific optimizations.
+ */
+class StackOpImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructs a StackOpImpl object.
+     * @param[in] op The operator to be implemented.
+     * @param[in] backend The backend used for execution.
+     */
+    StackOpImpl(const Operator& op, const std::string& backend = "") : OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Get the Producer Consumer object of the operator.
+     * @return A shared pointer to the ProdConso object.
+     */
+    std::shared_ptr<ProdConso> getProdConso() const override {
+        return std::make_shared<StackProdConso>(mOp);
+    }
+
+    /**
+     * @brief Executes the forward pass for the Stack operation.
+     */
+    void forward() override;
+
+    /**
+     * @brief Executes the backward pass for the Stack operation.
+     */
+    void backward() override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_STACKIMPL_H_
diff --git a/include/aidge/backend/generic/operator/UnfoldImpl.hpp b/include/aidge/backend/generic/operator/UnfoldImpl.hpp
new file mode 100644
index 000000000..de8a4b5bd
--- /dev/null
+++ b/include/aidge/backend/generic/operator/UnfoldImpl.hpp
@@ -0,0 +1,47 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_UNFOLDIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_UNFOLDIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace Aidge {
+
+/**
+ * @brief Implementation of the Unfold operator.
+ * @tparam DIM Number of dimensions in the operation.
+ */
+template <DimIdx_t DIM>
+class Unfold_OpImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructor for Unfold_OpImpl.
+     * @param[in] op The Operator instance.
+     * @param[in] backend The backend name (optional).
+     */
+    Unfold_OpImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Perform the forward operation for the unfold.
+     */
+    void forward() override;
+};
+
+extern template class Unfold_OpImpl<2>;
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_UNFOLDIMPL_H_
diff --git a/include/aidge/backend/generic/operator/UnsqueezeImpl.hpp b/include/aidge/backend/generic/operator/UnsqueezeImpl.hpp
new file mode 100644
index 000000000..a9f7b15b8
--- /dev/null
+++ b/include/aidge/backend/generic/operator/UnsqueezeImpl.hpp
@@ -0,0 +1,39 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_UNSQUEEZEIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_UNSQUEEZEIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace Aidge {
+
+/**
+ * @brief implementation of the operator unsqueeze.
+ * @note Since this operator implementation is agnostic to the backend it is
+ * located here instead of in aidge_backend_cpu/cuda.
+ */
+class Unsqueeze_OpImpl : public OperatorImpl {
+public:
+    Unsqueeze_OpImpl(const Operator &op, const std::string &backend = "")
+        : OperatorImpl(op, backend) {}
+    void forward() override;
+
+    std::shared_ptr<ProdConso> getProdConso() const override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_UNSQUEEZEIMPL_H_
diff --git a/include/aidge/operator/Flatten.hpp b/include/aidge/operator/Flatten.hpp
index 7493b25d7..11775aafb 100644
--- a/include/aidge/operator/Flatten.hpp
+++ b/include/aidge/operator/Flatten.hpp
@@ -22,32 +22,6 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-
-/**
- * @brief Implementation of the Flatten operation.
- *
- * Since Flatten operation is just backend-agnostic, its implementation is located in aidge_core.
- */
-class Flatten_OpImpl : public OperatorImpl {
-public:
-    /**
-     * @brief Constructor for Flatten operator implementation.
-     * @param op Operator instance.
-     * @param backend Optional. Name of the backend.
-     */
-    Flatten_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
-
-    std::shared_ptr<ProdConso> getProdConso() const override {
-        return std::make_shared<ProdConso>(mOp, true);  // Flatten is an in-place operation!
-    }
-
-    /**
-     * @brief Compute the forward pass of the Flatten operation.
-     */
-    void forward() override;
-};
-}  // namespace Aidge
 
 #define LIST_FLATTEN_ATTR(X)  \
     X(Axis, "axis", std::int64_t)
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index 964e1b45d..8bd8239ec 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -24,27 +24,6 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-
-/**
- * @class Gather_OpImpl
- * @brief Backend implementation for the Gather operation.
- *
- * The Gather operation selects elements from the input tensor based on specified indices
- * and an axis, producing a tensor with a gathered shape.
- */
-class Gather_OpImpl : public OperatorImpl {
-public:
-    Gather_OpImpl(const Operator& op, const std::string& backend = "")
-        : OperatorImpl(op, backend) {}
-
-    /**
-     * @brief Execute the Gather operation.
-     */
-    void forward() override;
-};
-} // namespace Aidge
-
 
 #define LIST_GATHER_ATTR(X)  \
     X(Axis, "axis", std::int8_t),  \
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index b1849cbc5..a0200db6f 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -26,16 +26,6 @@
 #include "aidge/utils/ErrorHandling.hpp"
 
 namespace Aidge {
-class Identity_OpImpl : public OperatorImpl {
-public:
-    Identity_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
-
-    std::shared_ptr<ProdConso> getProdConso() const override {
-        return std::make_shared<ProdConso>(mOp, true);  // Identity is an in-place operation!
-    }
-
-    void forward() override;
-};
 
 /**
  * @brief Indentity_Op is an helper operator made to ease the declaration of MetaNodes.
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index e1eea4a28..49a0091a6 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -17,104 +17,12 @@
 #include <vector>
 
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-/**
- * @class Memorize_ProdConso
- * @brief Implements the producer-consumer principle for the `Memorize` operator.
- *
- * The `Memorize_ProdConso` class defines the logic for managing data dependencies during
- * the forward process of the `Memorize` operator.
- *
- * This class ensures that:
- * - All data produced by the `Memorize` operator is properly consumed.
- * - All required outputs are correctly filled during the forward pass.
- *
- * It also calculates data and memory requirements specific to the `Memorize` operator.
- */
-class Memorize_ProdConso : public ProdConso {
-public:
-    /**
-     * @brief Constructor for the `Memorize_ProdConso` class.
-     * @param[in] op The operator instance for which producer-consumer relationships are managed.
-     *
-     * @details:
-     * - The provided `Operator` instance is used to initialize the base `ProdConso` class.
-     * - This operator will determine the specific requirements for data production
-     *   and consumption during the forward process.
-     */
-    Memorize_ProdConso(const Operator& op): ProdConso(op) {}
-
-    /**
-     * @brief Get the number of data elements required from an input tensor for forward computation.
-     * @param[in] inputIdx The index of the input tensor.
-     * @return The number of required elements (`Elts_t`).
-     *
-     * @details:
-     * - For each input tensor identified by `inputIdx`, this method calculates the
-     *   minimum amount of data needed by the `Memorize` operator to perform its forward step.
-     */
-    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
-
-    /**
-     * @brief Compute the memory requirements for an output tensor.
-     * @param[in] outputIdx The index of the output tensor.
-     * @param[in] inputsSize A vector containing the dimensions of the input tensors.
-     * @return The memory required (`Elts_t`) for the specified output tensor.
-     *
-     * @details:
-     * - This method evaluates how much memory is needed for the `outputIdx` tensor
-     *   based on the input tensor dimensions and the attributes of the `Memorize` operator.
-     * - Memory requirements are influenced by factors such as sequence length and
-     *   the forward step configuration.
-     */
-    Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
-
-    /**
-     * @brief Update the producer-consumer relationships for the `Memorize` operator.
-     * @details:
-     * - This method ensures that all data produced by the `Memorize` operator is
-     *   appropriately consumed by downstream operators in the computational graph.
-     * - It also verifies that all required outputs are filled during the forward pass,
-     *   maintaining consistency in the data flow.
-     * - This step is crucial for ensuring correctness in recurrent computations and
-     *   maintaining dependencies in the graph.
-     */
-    void updateConsummerProducer() override;
-};
-
-/**
- * @brief Implementation of the Memorize operation.
- *
- * Since Memorize operation is just backend-agnostic, its implementation is located in aidge_core.
- */
-class Memorize_OpImpl : public OperatorImpl {
-public:
-    /**
-     * @brief Constructs a Memorize_OpImpl object.
-     * @param[in] op The operator to be implemented.
-     * @param[in] backend The backend used for execution.
-     */
-    Memorize_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
-
-    /**
-     * @brief Get the Producer Consumer object of the operator.
-     * @return A shared pointer to the ProdConso object.
-     */
-    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_shared<Memorize_ProdConso>(mOp); };
-
-    /**
-     * @brief Executes the forward pass for the Memorize operation.
-     */
-    void forward() override;
-};
-} // namespace Aidge
 
 #define LIST_MEMORIZE_ATTR(X)                        \
     X(ScheduleStep, "schedule_step", std::uint32_t), \
diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp
index caea7a646..b516ef549 100644
--- a/include/aidge/operator/Move.hpp
+++ b/include/aidge/operator/Move.hpp
@@ -12,23 +12,16 @@
 #ifndef AIDGE_CORE_OPERATOR_MOVE_H_
 #define AIDGE_CORE_OPERATOR_MOVE_H_
 
-#include <cassert>
 #include <memory>
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-class Move_OpImpl : public OperatorImpl {
-public:
-    Move_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
-    void forward() override;
-};
 
 /**
  * @brief Description of a Move operation that copies the input Tensor to the output Tensor.
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index 9790f05e9..e8d4269f2 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -23,76 +23,6 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-
-/**
- * @class Pop_ProdConso
- * @brief Implements the producer-consumer principle for the `Pop` operator.
- *
- * The `Pop_ProdConso` class defines the logic for managing data dependencies during
- * the forward process of the `Pop` operator.
- *
- * This class ensures that:
- * - All data consumed by the `Pop` operator is correctly handled.
- * - The operator respects memory and data requirements during the forward computation.
- */
-class Pop_ProdConso : public ProdConso {
-public:
-    /**
-     * @brief Constructor for the `Pop_ProdConso` class.
-     * @param[in] op The operator instance for which producer-consumer relationships are managed.
-     *
-     * @details:
-     * - The provided `Operator` instance is used to initialize the base `ProdConso` class.
-     * - This operator determines specific requirements for data consumption during the forward process.
-     */
-    Pop_ProdConso(const Operator& op): ProdConso(op) {}
-
-    /**
-     * @brief Get the number of data elements required from an input tensor for forward computation.
-     * @param[in] inputIdx The index of the input tensor.
-     * @return The number of required elements (`Elts_t`).
-     *
-     * @details:
-     * - For each input tensor identified by `inputIdx`, this method calculates the
-     *   minimum amount of data needed by the `Pop` operator to perform its forward step.
-     */
-    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
-};
-
-/**
- * @class Pop_OpImpl
- * @brief Implementation of the `Pop` operation.
- *
- * The `Pop_OpImpl` class defines the backend-agnostic logic for executing
- * the forward pass of the `Pop` operator.
- */
-class Pop_OpImpl : public OperatorImpl {
-public:
-    /**
-     * @brief Constructs a `Pop_OpImpl` object.
-     * @param[in] op The operator to be implemented.
-     * @param[in] backend The backend used for execution (optional).
-     */
-    Pop_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
-
-    /**
-     * @brief Get the Producer Consumer object of the operator.
-     * @return A shared pointer to the `Pop_ProdConso` object.
-     */
-    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_shared<Pop_ProdConso>(mOp); }
-
-    /**
-     * @brief Executes the forward pass for the `Pop` operation.
-     */
-    void forward() override;
-
-    /**
-     * @brief Executes the backward pass for the `Pop` operation.
-     */
-    void backward() override;
-};
-} //namespace Aidge
 
 #define LIST_POP_ATTR(X)  \
     X(ForwardStep, "forward_step", std::uint32_t),  \
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index c8ac87b6f..c93ef09c9 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -22,33 +22,6 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-/**
- * @brief Implementation of the Reshape operator.
- * @note This operator implementation is agnostic to the backend and is located here instead of in aidge_backend.
- */
-class Reshape_OpImpl : public OperatorImpl {
-public:
-    /**
-     * @brief Constructor for Reshape_OpImpl.
-     * @param[in] op The Operator instance.
-     * @param[in] backend The backend name (optional).
-     */
-    Reshape_OpImpl(const Operator& op, const std::string& backend = "")
-        : OperatorImpl(op, backend) {}
-
-    std::shared_ptr<ProdConso> getProdConso() const override {
-        return std::make_shared<ProdConso>(mOp, true);  // Reshape is an in-place operation!
-    }
-
-    /**
-     * @brief Perform the forward operation for the reshape.
-     */
-    void forward() override;
-    void backward() override;
-};
-} // namespace Aidge
-
 
 #define LIST_RESHAPE_ATTR(X)  \
     X(Shape, "shape", std::vector<std::int64_t>),  \
diff --git a/include/aidge/operator/Select.hpp b/include/aidge/operator/Select.hpp
index 4dcace84e..cd0a56bb9 100644
--- a/include/aidge/operator/Select.hpp
+++ b/include/aidge/operator/Select.hpp
@@ -23,29 +23,9 @@
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-/**
- * @brief Implementation of the Select operator.
- * @note This operator implementation is agnostic to the backend and is located here instead of in aidge_backend.
- */
-class Select_OpImpl : public OperatorImpl {
-public:
-    /**
-     * @brief Constructor for Select_OpImpl.
-     * @param[in] op The Operator instance.
-     * @param[in] backend The backend name (optional).
-     */
-    Select_OpImpl(const Operator& op, const std::string& backend = "")
-        : OperatorImpl(op, backend) {}
-
-    /**
-     * @brief Perform the forward operation for the reshape.
-     */
-    void forward() override;
-    void backward() override;
-};
 
 /**
- * @brief 
+ * @brief
  * @see OperatorTensor
  * @see Registrable
  */
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index 290d95eef..4028c4041 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -24,30 +24,6 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-
-/**
- * @class Shape_OpImpl
- * @brief Backend-agnostic implementation of the Shape operator.
- *
- * This implementation is responsible for extracting and returning the shape
- * of the input tensor. Specific backend functionality can extend this.
- */
-class Shape_OpImpl : public OperatorImpl {
-public:
-    /**
-     * @brief Constructor for the Shape_OpImpl class.
-     * @param[in] op The Operator instance.
-     * @param[in] backend The backend name (optional).
-     */
-    Shape_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
-
-    /**
-     * @brief Perform the forward operation to compute the shape of the tensor.
-     */
-    void forward() override;
-};
-}
 
 #define LIST_SHAPE_ATTR(X) \
     X(Start, "start", std::int64_t), \
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index b425fe752..434fb8ab9 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -23,28 +23,6 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-
-/**
- * @brief Implementation of the Slice operation.
- *
- * Since Slice operation is just backend-agnostic, its implementation is located in aidge_core.
- */
-class Slice_OpImpl : public OperatorImpl {
-public:
-    /**
-     * @brief Constructs a Slice_OpImpl object.
-     * @param[in] op The operator to be implemented.
-     * @param[in] backend The backend used for execution.
-     */
-    Slice_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
-
-    /**
-     * @brief Executes the forward pass for the Slice operation.
-     */
-    void forward() override;
-};
-}  // namespace Aidge
 
 #define LIST_SLICE_ATTR(X) \
     X(Starts, "starts", std::vector<std::int64_t>), \
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index 038879f05..e9e43a350 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -23,28 +23,6 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-
-/**
- * @class  Implementation of the Split operation.
- *
- * Since Split operation is just backend-agnostic, its implementation is located in aidge_core.
- */
-class Split_OpImpl : public OperatorImpl {
-public:
-    /**
-     * @brief Constructor for the Split operator implementation.
-     * @param[in] op Operator to be implemented.
-     * @param[in] backend Name of the backend.
-     */
-    Split_OpImpl(const Operator& op, const std::string& backend = "") : OperatorImpl(op, backend) {}
-
-    /**
-     * @brief Executes the forward pass for the Split operation.
-     */
-    void forward() override;
-};
-} // naemspace Aidge
 
 #define LIST_SPLIT_ATTR(X) \
     X(Axis, "axis", std::int8_t), \
diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp
index ed0f6d366..03db92a84 100644
--- a/include/aidge/operator/Squeeze.hpp
+++ b/include/aidge/operator/Squeeze.hpp
@@ -13,9 +13,7 @@
 #define AIDGE_CORE_OPERATOR_SQUEEZE_H_
 
 #include <cstdint>
-#include <cstdlib>
 #include <functional>
-#include <limits>
 #include <memory>
 #include <string>
 #include <vector>
@@ -23,29 +21,10 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-/**
- * @brief implementation of the operator squeeze.
- * @note Since this operator implementation is agnostic to the backend it is
- * located here instead of in aidge_backend_cpu/cuda.
- */
-class Squeeze_OpImpl : public OperatorImpl {
-public:
-  Squeeze_OpImpl(const Operator &op, const std::string &backend = "")
-      : OperatorImpl(op, backend) {}
-
-  std::shared_ptr<ProdConso> getProdConso() const override {
-      return std::make_shared<ProdConso>(mOp, true);  // Squeeze is an in-place operation!
-  }
-
-  void forward() override;
-};
-} // namespace Aidge
 
 #define LIST_SQUEEZE_ATTR(X) \
     X(Axes, "axes", std::vector<std::int8_t>)
@@ -99,7 +78,7 @@ public:
       Type; // name of the type of the operation (Here "Squeeze")
 
 private:
-  using Attributes_ = StaticAttributes<SqueezeAttr, std::vector<int8_t>>;
+  using Attributes_ = StaticAttributes<SqueezeAttr, std::vector<std::int8_t>>;
   template <SqueezeAttr e> using attr = typename Attributes_::template attr<e>;
   const std::shared_ptr<Attributes_> mAttributes;
 
@@ -108,13 +87,7 @@ public:
    * @brief constructor for Squeeze op
    * @param[in] axes around which perform the operation
    */
-  Squeeze_Op(const std::vector<int8_t> &axes = {})
-      : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData},
-                       1),
-        mAttributes(
-            std::make_shared<Attributes_>(attr<SqueezeAttr::Axes>(axes))) {
-    mImpl = std::make_shared<Squeeze_OpImpl>(*this);
-  }
+  Squeeze_Op(const std::vector<std::int8_t> &axes = {});
 
   /**
    * @brief Copy-constructor. Copy the operator attributes and its output
@@ -122,14 +95,7 @@ public:
    * associated).
    * @param op Operator to copy.
    */
-  Squeeze_Op(const Squeeze_Op &op)
-      : OperatorTensor(op), mAttributes(op.mAttributes) {
-    if (!op.backend().empty()) {
-      SET_IMPL_MACRO(Squeeze_Op, *this, op.backend());
-    } else {
-      mImpl = std::make_shared<Squeeze_OpImpl>(*this);
-    }
-  }
+  Squeeze_Op(const Squeeze_Op &op);
 
   /**
    * @brief Clone the operator using its copy-constructor.
@@ -153,36 +119,34 @@ public:
     return mAttributes;
   }
 
-  /**
-   * @brief axes to squeeze, if left empty all 1 sized
-   * dimensions will be removed.
-   */
-  inline std::vector<int8_t> &axes() const noexcept {
-    return mAttributes->template getAttr<SqueezeAttr::Axes>();
-  }
+    /**
+     * @brief axes to squeeze, if left empty all 1 sized
+     * dimensions will be removed.
+     */
+    inline std::vector<std::int8_t> &axes() const noexcept {
+        return mAttributes->template getAttr<SqueezeAttr::Axes>();
+    }
 
-  static const std::vector<std::string> getInputsName() {
-    return {"data_input", "axes_to_squeeze"};
-  }
-  static const std::vector<std::string> getOutputsName() {
-    return {"squeezed"};
-  }
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input", "axes_to_squeeze"};
+    }
+    static const std::vector<std::string> getOutputsName() {
+        return {"squeezed"};
+    }
 
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::SqueezeAttr>::data;
-	}
+    /**
+     * @brief Retrieves the names of the attributes for the operator.
+     * @return A vector containing the attributes name.
+     */
+    static constexpr const char* const* attributesName(){
+        return EnumStrings<Aidge::SqueezeAttr>::data;
+    }
 };
 
 // helper with C-style array instead of std::array for kernel_dims to allow
 // automatic template DIM deduction
-inline std::shared_ptr<Node> Squeeze(const std::vector<int8_t> axes = {},
-                                     const std::string &name = "") {
-  return std::make_shared<Node>(std::make_shared<Squeeze_Op>(axes), name);
-}
+std::shared_ptr<Node> Squeeze(const std::vector<std::int8_t> axes = {},
+                              const std::string &name = "");
 } // namespace Aidge
 
 #undef LIST_SQUEEZE_ATTR
diff --git a/include/aidge/operator/Stack.hpp b/include/aidge/operator/Stack.hpp
index 843413756..e0d741226 100644
--- a/include/aidge/operator/Stack.hpp
+++ b/include/aidge/operator/Stack.hpp
@@ -23,78 +23,6 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-/**
- * @class StackProdConso
- * @brief Implements the producer-consumer principle for the `Stack` operator.
- *
- * The `StackProdConso` class defines the logic for managing data dependencies
- * during the forward process of the `Stack` operator. It ensures proper allocation
- * and consumption of resources required for stacking operations.
- */
-class StackProdConso : public ProdConso {
-public:
-    /**
-     * @brief Constructor for the `StackProdConso` class.
-     * @param[in] op The operator instance for which producer-consumer relationships are managed.
-     */
-    StackProdConso(const Operator& op) : ProdConso(op) {}
-
-    /**
-     * @brief Compute the memory requirements for an output tensor.
-     * @param[in] outputIdx The index of the output tensor.
-     * @param[in] inputsSize A vector containing the dimensions of the input tensors.
-     * @return The memory required (`Elts_t`) for the specified output tensor.
-     *
-     * @details:
-     * - This method calculates how much memory is needed to store the stacked tensor.
-     * - Memory requirements depend on the number and size of the input tensors.
-     */
-    Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
-
-    /**
-     * @brief Reset producer-consumer relationships for the `Stack` operator.
-     *
-     * @details:
-     * - This method clears and reinitializes the producer-consumer relationships,
-     *   ensuring proper data flow and allocation for the stacking operation.
-     */
-    void resetConsummerProducer() override;
-};
-
-/**
- * @class StackOpImpl
- * @brief Backend-specific implementation of the `Stack` operator.
- *
- * The `StackOpImpl` class handles the execution of the `Stack` operation, including
- * forward computation and backend-specific optimizations.
- */
-class StackOpImpl : public OperatorImpl {
-public:
-    /**
-     * @brief Constructs a StackOpImpl object.
-     * @param[in] op The operator to be implemented.
-     * @param[in] backend The backend used for execution.
-     */
-    StackOpImpl(const Operator& op, const std::string& backend = "") : OperatorImpl(op, backend) {}
-
-    /**
-     * @brief Get the Producer Consumer object of the operator.
-     * @return A shared pointer to the ProdConso object.
-     */
-    std::shared_ptr<ProdConso> getProdConso() const override {
-        return std::make_shared<StackProdConso>(mOp);
-    }
-
-    /**
-     * @brief Executes the forward pass for the Stack operation.
-     */
-    void forward() override;
-
-    /**
-     * @brief Executes the backward pass for the Stack operation.
-     */
-    void backward() override;
-};
 
 #define LIST_STACK_ATTR(X)  \
     X(ForwardStep, "forward_step", std::uint32_t), \
@@ -151,7 +79,7 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-    static const std::string s_type;
+    static const std::string Type;
 
     /**
      * @brief Constructs a new Stack Operator.
diff --git a/include/aidge/operator/Unfold.hpp b/include/aidge/operator/Unfold.hpp
index fe85f9d5e..664dafc93 100644
--- a/include/aidge/operator/Unfold.hpp
+++ b/include/aidge/operator/Unfold.hpp
@@ -27,28 +27,6 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-/**
- * @brief Implementation of the Unfold operator.
- * @tparam DIM Number of dimensions in the operation.
- */
-template <DimIdx_t DIM>
-class Unfold_OpImpl : public OperatorImpl {
-public:
-    /**
-     * @brief Constructor for Unfold_OpImpl.
-     * @param[in] op The Operator instance.
-     * @param[in] backend The backend name (optional).
-     */
-    Unfold_OpImpl(const Operator& op, const std::string& backend = "")
-        : OperatorImpl(op, backend) {}
-
-    /**
-     * @brief Perform the forward operation for the unfold.
-     */
-    void forward() override;
-};
-} //namespace Aidge
 
 #define LIST_UNFOLD_ATTR(X)  \
     X(StrideDims, "stride_dims", sizeArr_t<DIM>),  \
diff --git a/include/aidge/operator/Unsqueeze.hpp b/include/aidge/operator/Unsqueeze.hpp
index 8d95f5cdd..27b3851fc 100644
--- a/include/aidge/operator/Unsqueeze.hpp
+++ b/include/aidge/operator/Unsqueeze.hpp
@@ -20,29 +20,10 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-/**
- * @brief implementation of the operator unsqueeze.
- * @note Since this operator implementation is agnostic to the backend it is
- * located here instead of in aidge_backend_cpu/cuda.
- */
-class Unsqueeze_OpImpl : public OperatorImpl {
-public:
-  Unsqueeze_OpImpl(const Operator &op, const std::string &backend = "")
-      : OperatorImpl(op, backend) {}
-
-  std::shared_ptr<ProdConso> getProdConso() const override {
-      return std::make_shared<ProdConso>(mOp, true);  // Unsqueeze is an in-place operation!
-  }
-
-  void forward() override;
-};
-}  // namespace Aidge
 
 #define LIST_UNSQUEEZE_ATTR(X)  \
     X(Axes, "axes", std::vector<std::int8_t>)
@@ -87,8 +68,7 @@ class Unsqueeze_Op
                          std::function<std::shared_ptr<OperatorImpl>(const Unsqueeze_Op &)>> {
 
 public:
-  static const std::string
-      Type; // name of the type of the operation (Here "Unsqueeze")
+  static const std::string Type;
 
 private:
   using Attributes_ = StaticAttributes<UnsqueezeAttr, GENERATE_LIST_ATTR_TYPE(LIST_UNSQUEEZE_ATTR)>;
@@ -97,20 +77,13 @@ private:
   const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-  Unsqueeze_Op() =
-      delete; // no default constructor since this class has attributes
+  Unsqueeze_Op() = delete;
 
   /**
    * @brief constructor for Unsqueeze op
    * @param[in] axis around which perform the operation
    */
-  Unsqueeze_Op(const std::vector<int8_t> &axes)
-      : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData},
-                       1),
-        mAttributes(
-            std::make_shared<Attributes_>(attr<UnsqueezeAttr::Axes>(axes))) {
-    mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
-  }
+  Unsqueeze_Op(const std::vector<int8_t> &axes);
 
   /**
    * @brief Copy-constructor. Copy the operator attributes and its output
@@ -118,14 +91,7 @@ public:
    * associated).
    * @param op Operator to copy.
    */
-  Unsqueeze_Op(const Unsqueeze_Op &op)
-      : OperatorTensor(op), mAttributes(op.mAttributes) {
-    if (!op.backend().empty()) {
-      SET_IMPL_MACRO(Unsqueeze_Op, *this, op.backend());
-    } else {
-      mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
-    }
-  }
+  Unsqueeze_Op(const Unsqueeze_Op &op);
 
   /**
    * @brief Clone the operator using its copy-constructor.
@@ -176,10 +142,8 @@ public:
 
 // helper with C-style array instead of std::array for kernel_dims to allow
 // automatic template DIM deduction
-inline std::shared_ptr<Node> Unsqueeze(const std::vector<int8_t> &axes = {},
-                                       const std::string &name = "") {
-  return std::make_shared<Node>(std::make_shared<Unsqueeze_Op>(axes), name);
-}
+std::shared_ptr<Node> Unsqueeze(const std::vector<int8_t> &axes = {},
+                                       const std::string &name = "");
 } // namespace Aidge
 
 #undef LIST_UNSQUEEZE_ATTR
diff --git a/src/backend/generic/operator/FlattenImpl.cpp b/src/backend/generic/operator/FlattenImpl.cpp
new file mode 100644
index 000000000..0ed74f85c
--- /dev/null
+++ b/src/backend/generic/operator/FlattenImpl.cpp
@@ -0,0 +1,31 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/FlattenImpl.hpp"
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Flatten.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace Aidge {
+
+void Flatten_OpImpl::forward() {
+    const Flatten_Op& op = dynamic_cast<const Flatten_Op&>(mOp);
+    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
+}
+
+std::shared_ptr<ProdConso> Flatten_OpImpl::getProdConso() const {
+    return std::make_shared<ProdConso>(mOp, true);  // Flatten is an in-place operation!
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/GatherImpl.cpp b/src/backend/generic/operator/GatherImpl.cpp
new file mode 100644
index 000000000..cbe9fdba5
--- /dev/null
+++ b/src/backend/generic/operator/GatherImpl.cpp
@@ -0,0 +1,52 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/GatherImpl.hpp"
+
+#include <array>
+#include <cstddef>  // std::size_t
+#include <vector>
+
+#include "aidge/operator/Gather.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+void Gather_OpImpl::forward() {
+    const Gather_Op& op = dynamic_cast<const Gather_Op&>(mOp);
+
+    const std::size_t axisIdx = static_cast<std::size_t>(op.axis()) + (op.axis() >= 0 ? 0 : op.getInput(0)->dims().size());
+
+    std::size_t postAxisElems = 1;
+    for (std::size_t i = axisIdx + 1; i < op.getInput(0)->dims().size(); ++i) {
+        postAxisElems *= op.getInput(0)->dims()[i];
+    }
+    std::size_t preAxisElems = 1;
+    for (std::size_t i = 0; i < axisIdx; ++i) {
+        preAxisElems *= op.getInput(0)->dims()[i];
+    }
+
+    std::size_t outputOffset = 0;
+    for (std::size_t i=0; i<preAxisElems; ++i)
+    {
+        for(std::size_t j = 0; j < op.indices().size(); ++j)
+        {
+            const std::size_t idx = op.indices()[j] >= 0 ?
+                                        static_cast<std::size_t>(op.indices()[j]) :
+                                        static_cast<std::size_t>(op.indices()[j] + static_cast<int>(op.getInput(0)->dims()[axisIdx]));
+            op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(i * postAxisElems * op.getInput(0)->dims()[axisIdx] + idx * postAxisElems), postAxisElems, outputOffset);
+            outputOffset += postAxisElems;
+        }
+    }
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/IdentityImpl.cpp b/src/backend/generic/operator/IdentityImpl.cpp
new file mode 100644
index 000000000..d1be34799
--- /dev/null
+++ b/src/backend/generic/operator/IdentityImpl.cpp
@@ -0,0 +1,34 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/IdentityImpl.hpp"
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Identity.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace Aidge {
+
+void Identity_OpImpl::forward() {
+    const Identity_Op& op = dynamic_cast<const Identity_Op&>(mOp);
+    op.getOutput(0)->setBackend(op.getInput(0)->backend(), op.getInput(0)->device());
+    op.getOutput(0)->setDataType(op.getInput(0)->dataType());
+    op.getOutput(0)->setDataFormat(op.getInput(0)->dataFormat());
+    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
+}
+
+std::shared_ptr<ProdConso> Identity_OpImpl::getProdConso() const {
+    return std::make_shared<ProdConso>(mOp, true);  // Identity is an in-place operation!
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/MemorizeImpl.cpp b/src/backend/generic/operator/MemorizeImpl.cpp
new file mode 100644
index 000000000..be13f0f6b
--- /dev/null
+++ b/src/backend/generic/operator/MemorizeImpl.cpp
@@ -0,0 +1,79 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/MemorizeImpl.hpp"
+
+#include <memory>
+#include <vector>
+
+#include "aidge/data/Elts.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Memorize.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+Aidge::Elts_t Aidge::Memorize_ProdConso::getNbRequiredData(
+    Aidge::IOIndex_t inputIdx) const
+{
+    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
+
+    if (op.scheduleStep() == 0 && inputIdx == 0) {
+        // No data input is required for the initial step.
+        // Initialization data is required however.
+        return Elts_t::NoneElts();
+    }
+    else if (op.scheduleStep() > 0 && inputIdx == 1) {
+        // No initialization data is required after the initial step.
+        return Elts_t::NoneElts();
+    }
+    else {
+        return ProdConso::getNbRequiredData(inputIdx);
+    }
+}
+
+Aidge::Elts_t Aidge::Memorize_ProdConso::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
+                                                            const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
+    assert(mOp.getRawOutput(outputIdx) && "requires valid output");
+
+    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
+
+    if ((op.endStep() > 0) && (outputIdx == 1) && (op.scheduleStep() >= op.endStep())) {
+        return Elts_t::NoneElts();
+    }
+    else {
+        return Elts_t::DataElts(op.getOutput(outputIdx)->size());
+    }
+}
+
+void Aidge::Memorize_ProdConso::updateConsummerProducer() {
+    ProdConso::updateConsummerProducer();
+
+    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
+    AIDGE_ASSERT(op.endStep() == 0 || op.scheduleStep() <= op.endStep(), "cannot update consumer producer anymore, number of cycles exceeded");
+}
+
+void Aidge::Memorize_OpImpl::forward() {
+    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
+
+    AIDGE_ASSERT((op.endStep() == 0) || (op.forwardStep() <= op.endStep()), "cannot forward anymore, number of cycles exceeded");
+
+    if (op.forwardStep() == 0) {
+        op.getOutput(0)->getImpl()->copy(op.getInput(1)->getImpl()->rawPtr(), op.getInput(1)->size());
+    }
+    else {
+        op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
+    }
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/MoveImpl.cpp b/src/backend/generic/operator/MoveImpl.cpp
new file mode 100644
index 000000000..2986d94b0
--- /dev/null
+++ b/src/backend/generic/operator/MoveImpl.cpp
@@ -0,0 +1,26 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/MoveImpl.hpp"
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Move.hpp"
+
+namespace Aidge {
+
+void Aidge::Move_OpImpl::forward() {
+    const Move_Op& op = dynamic_cast<const Move_Op&>(mOp);
+    op.getOutput(0)->copyFrom(*(op.getInput(0)));
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/PopImpl.cpp b/src/backend/generic/operator/PopImpl.cpp
new file mode 100644
index 000000000..2596b3dda
--- /dev/null
+++ b/src/backend/generic/operator/PopImpl.cpp
@@ -0,0 +1,49 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/PopImpl.hpp"
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Pop.hpp"
+
+namespace Aidge {
+
+Elts_t Aidge::Pop_ProdConso::getNbRequiredData(const IOIndex_t inputIdx) const {
+    assert(mOp.getRawInput(inputIdx) && "requires valid input");
+
+    const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
+    AIDGE_ASSERT(!op.getInput(inputIdx)->empty(), "Pop operator requires known, non-empty, input dims for scheduling. You might have an unresolved data dependency upstream in the computing graph.");
+    return Elts_t::DataElts(op.getInput(inputIdx)->size()
+        / op.getInput(inputIdx)->dims()[0]);
+}
+
+void Aidge::Pop_OpImpl::forward() {
+    const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
+
+    assert(op.getInput(0) && "missing input #0");
+    *op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()}).clone();
+}
+
+void Aidge::Pop_OpImpl::backward() {
+    const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
+
+    auto outputGrad = op.getOutput(0)->grad();
+    auto inputGrad = op.getInput(0)->grad();
+
+    inputGrad->getImpl()->copy(
+        outputGrad->getImpl()->rawPtr(),
+        outputGrad->size(),
+        (op.backwardStep()-1) * outputGrad->size());
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/ReshapeImpl.cpp b/src/backend/generic/operator/ReshapeImpl.cpp
new file mode 100644
index 000000000..0ccef5253
--- /dev/null
+++ b/src/backend/generic/operator/ReshapeImpl.cpp
@@ -0,0 +1,41 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/ReshapeImpl.hpp"
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Reshape.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+
+namespace Aidge {
+
+void Reshape_OpImpl::forward() {
+    const Reshape_Op& op = dynamic_cast<const Reshape_Op&>(mOp);
+    AIDGE_ASSERT(op.getInput(0), "missing input#0");
+    // const auto& input = op.getInput(0)->refCastFrom(mInputFallback, *op.getOutput(0));
+    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
+}
+
+void Reshape_OpImpl::backward() {
+    const Reshape_Op& op = dynamic_cast<const Reshape_Op&>(mOp);
+    AIDGE_ASSERT(op.getOutput(0)->grad(), "missing gradient for output#0");
+    // const auto& output_grad = op.getOutput(0)->grad()->refCastFrom(mOutputGradFallback, *op.getOutput(0)->grad());
+    op.getInput(0)->grad()->getImpl()->copy(op.getOutput(0)->grad()->getImpl()->rawPtr(), op.getOutput(0)->size());
+}
+
+std::shared_ptr<ProdConso> Reshape_OpImpl::getProdConso() const {
+    return std::make_shared<ProdConso>(mOp, true);  // Reshape is an in-place operation!
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/SelectImpl.cpp b/src/backend/generic/operator/SelectImpl.cpp
new file mode 100644
index 000000000..9746223d3
--- /dev/null
+++ b/src/backend/generic/operator/SelectImpl.cpp
@@ -0,0 +1,46 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/SelectImpl.hpp"
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Select.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+
+namespace Aidge {
+
+void Select_OpImpl::forward() {
+    const Select_Op& op = dynamic_cast<const Select_Op&>(mOp);
+    AIDGE_ASSERT(op.getInput(0)->size() > 0, "Select input is empty!");
+
+    std::shared_ptr<Tensor> selectFallback;
+    const auto& select = op.getInput(0)->refCastFrom(selectFallback, DataType::Int32, "cpu");
+    const auto selectVal = select.get<int32_t>(0);
+    AIDGE_ASSERT(selectVal >= 0 && selectVal < op.nbInputs() - 1, "Select input out of range. Expected value in range [0, {}], got {}", op.nbInputs() - 2, selectVal);
+
+    op.getOutput(0)->getImpl()->copy(op.getInput(selectVal + 1)->getImpl()->rawPtr(), op.getInput(selectVal + 1)->size());
+}
+
+void Select_OpImpl::backward() {
+    const Select_Op& op = dynamic_cast<const Select_Op&>(mOp);
+    AIDGE_ASSERT(op.getInput(0)->size() > 0, "Select input is empty!");
+
+    std::shared_ptr<Tensor> selectFallback;
+    const auto& select = op.getInput(0)->refCastFrom(selectFallback, DataType::Int32, "cpu");
+    const auto selectVal = select.get<int32_t>(0);
+    AIDGE_ASSERT(selectVal >= 0 && selectVal < op.nbInputs() - 1, "Select input out of range. Expected value in range [0, {}], got {}", op.nbInputs() - 2, selectVal);
+
+    op.getInput(selectVal + 1)->grad()->getImpl()->copy(op.getOutput(0)->grad()->getImpl()->rawPtr(), op.getOutput(0)->size());
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/ShapeImpl.cpp b/src/backend/generic/operator/ShapeImpl.cpp
new file mode 100644
index 000000000..b14c0abef
--- /dev/null
+++ b/src/backend/generic/operator/ShapeImpl.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/ShapeImpl.hpp"
+
+#include "aidge/operator/Shape.hpp"
+#include "aidge/data/Tensor.hpp"
+
+namespace Aidge {
+
+void Shape_OpImpl::forward() {
+    // Output is already valid after forwardDims()
+    // But it may be with the wrong device (default cpu)
+    // This can happen if forwardDims is called before setBackend
+    const Shape_Op& op = dynamic_cast<const Shape_Op&>(mOp);
+    op.getOutput(0)->setBackend(op.getInput(0)->backend(), op.getInput(0)->device());
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/SliceImpl.cpp b/src/backend/generic/operator/SliceImpl.cpp
new file mode 100644
index 000000000..4a4ea27ee
--- /dev/null
+++ b/src/backend/generic/operator/SliceImpl.cpp
@@ -0,0 +1,104 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/SliceImpl.hpp"
+
+#include <array>
+#include <cstddef>  // std::size_t
+#include <cstdint>  // std::int32_t
+#include <vector>
+
+#include "aidge/operator/Slice.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+// Helper function to calculate the linear index for multi-dimensional data
+static std::size_t getLinearIndex(const std::vector<std::size_t>& dims, const std::vector<std::size_t>& indices) {
+    size_t linearIndex = 0;
+    size_t stride = 1;
+    for (int i = dims.size() - 1; i >= 0; --i) {
+        linearIndex += indices[i] * stride;
+        stride *= dims[i];
+    }
+    return linearIndex;
+}
+
+void Slice_OpImpl::forward() {
+    const Slice_Op& op = dynamic_cast<const Slice_Op&>(mOp);
+
+    if (!op.getInput(0)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", op.Type);
+    }
+    AIDGE_ASSERT((op.axes().size() == op.ends().size()) &&
+                    (op.axes().size() == op.starts().size()),
+                    "Starts, Ends and Axes arguments should be the same size.");
+
+    const std::vector<std::size_t> inputDims = op.getInput(0)->dims();
+    std::vector<std::size_t> indices(inputDims.size(), 0); // Initialize indices for each dimension
+
+    // Create an array of ranges for each axis
+    std::vector<std::vector<int>> ranges(inputDims.size());
+
+    // Generate ranges dynamically for each dimension
+    for (std::size_t axisIdx = 0; axisIdx < inputDims.size(); ++axisIdx) {
+        if (std::find(op.axes().begin(), op.axes().end(), axisIdx) != op.axes().end()) {
+            // This axis is being sliced
+            int start = op.starts()[axisIdx];
+            int end = op.ends()[axisIdx];
+            int step = op.steps()[axisIdx];
+
+            start = start >= 0 ? start: start + inputDims[axisIdx];
+            start = std::max(0, std::min(start, static_cast<int>(inputDims[axisIdx])));
+            end = end >= 0 ? end: end + inputDims[axisIdx];
+            end = std::max(0, std::min(end, static_cast<int>(inputDims[axisIdx])));
+            // Generate the range of indices for this axis
+            for (int idx = start; (step > 0) ? (idx < end) : (idx > end); idx += step) {
+                ranges[axisIdx].push_back(idx);
+            }
+        } else {
+            // This axis is not being sliced, keep its full range (just one index in the range)
+            ranges[axisIdx].push_back(0);
+        }
+    }
+
+    // Use iterative stack to handle all dimensions dynamically
+    std::vector<std::size_t> currentIndex(inputDims.size(), 0); // Track current index in each dimension
+    std::vector<std::size_t> stackPointer(inputDims.size(), 0); // Pointers to ranges for each dimension
+    std::size_t dim = 0; // Start at the first dimension
+    std::size_t offset = 0; // Offset in the output tensor
+
+    while (dim < inputDims.size()) {
+        if (stackPointer[dim] < ranges[dim].size()) {
+            // Set the current index for this dimension
+            currentIndex[dim] = ranges[dim][stackPointer[dim]];
+            stackPointer[dim]++;
+
+            if (dim == inputDims.size() - 1) {
+                // We've reached the last dimension, process this index combination
+                std::size_t linearIndex = getLinearIndex(inputDims, currentIndex);
+                op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(linearIndex), 1, offset);
+                offset++;
+            } else {
+                // Move to the next dimension
+                dim++;
+            }
+        } else {
+            // Reset this dimension and move back to the previous one
+            stackPointer[dim] = 0;
+            dim--;
+        }
+    }
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/SplitImpl.cpp b/src/backend/generic/operator/SplitImpl.cpp
new file mode 100644
index 000000000..4ed9d28d1
--- /dev/null
+++ b/src/backend/generic/operator/SplitImpl.cpp
@@ -0,0 +1,51 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/SplitImpl.hpp"
+
+#include <functional>  // std::multiplies
+#include <memory>
+#include <numeric>     // std::accumulate
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Split.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+void Aidge::Split_OpImpl::forward() {
+    const Split_Op& op = dynamic_cast<const Split_Op&>(mOp);
+    const auto& axis = op.axis();
+    const auto& splits = op.split();
+    const auto& dims = op.getInput(0)->dims();
+
+    //Compute pre/post axis strides
+    const std::size_t stride_pre = std::accumulate(dims.cbegin(), dims.cbegin() + axis, 1, std::multiplies<std::size_t>());
+    const std::size_t stride_post = std::accumulate(dims.crbegin(), dims.crbegin() + dims.size() -1 - axis, 1, std::multiplies<std::size_t>());
+    for (auto i = 0; i < op.nbOutputs(); ++i)
+    {
+        DimSize_t chunkIdxOnAxis = std::accumulate(splits.cbegin(), splits.cbegin() + i, 0) * stride_post;
+        DimSize_t offset = 0;
+        for (std::size_t j = 0; j < stride_pre; ++j)
+        {
+            // Compute chunk position in input tensor
+            DimSize_t idx = j * stride_post * dims[axis] + chunkIdxOnAxis;
+            // Copy chunk in output
+            op.getOutput(i)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(idx),
+                                            splits[i] * stride_post, offset);
+            offset += splits[i] * stride_post;
+        }
+
+    }
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/SqueezeImpl.cpp b/src/backend/generic/operator/SqueezeImpl.cpp
new file mode 100644
index 000000000..da285366c
--- /dev/null
+++ b/src/backend/generic/operator/SqueezeImpl.cpp
@@ -0,0 +1,36 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/SqueezeImpl.hpp"
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Squeeze.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace Aidge {
+
+void Aidge::Squeeze_OpImpl::forward() {
+    const Squeeze_Op &op_ = static_cast<const Squeeze_Op &>(mOp);
+    // Check if input is provided
+    AIDGE_ASSERT(op_.getInput(0), "Squeeze : missing input 0");
+
+    op_.getOutput(0)->getImpl()->copy(op_.getInput(0)->getImpl()->rawPtr(),
+                                        op_.getInput(0)->size());
+}
+
+
+std::shared_ptr<ProdConso> Squeeze_OpImpl::getProdConso() const {
+    return std::make_shared<ProdConso>(mOp, true);  // Squeeze is an in-place operation!
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/StackImpl.cpp b/src/backend/generic/operator/StackImpl.cpp
new file mode 100644
index 000000000..1d22a50dd
--- /dev/null
+++ b/src/backend/generic/operator/StackImpl.cpp
@@ -0,0 +1,72 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/StackImpl.hpp"
+
+#include <memory>
+#include <vector>
+
+#include "aidge/data/Elts.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Stack.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+// TODO: Check why getRequiredMemory is always called with empty vector as
+// inputSize
+Elts_t StackProdConso::getRequiredMemory(
+    const Aidge::IOIndex_t inputIdx,
+    const std::vector<DimSize_t> &/*inputsSize*/) const {
+    AIDGE_ASSERT(mOp.getRawInput(inputIdx), "requires valid input");
+
+    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
+    // The produced data after one forward pass is simply the input size,
+    // we do not produce the whole output tensor every time.
+    if (op.forwardStep() <= op.maxElements()) {
+        return Elts_t::DataElts(op.getInput(inputIdx)->size());
+    } else {
+        return Elts_t::NoneElts();
+    }
+}
+
+void StackProdConso::resetConsummerProducer() {
+    ProdConso::resetConsummerProducer();
+
+    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
+    op.forwardStep() = 0;
+}
+
+void StackOpImpl::forward() {
+    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
+    AIDGE_ASSERT(op.getInput(0), "missing input #0");
+    AIDGE_ASSERT((op.forwardStep() < op.maxElements()),
+                 "cannot forward anymore, maximum number of elements to stack "
+                 "exceeded");
+
+    op.getOutput(0)->getImpl()->copy(
+        op.getInput(0)->getImpl()->rawPtr(),
+        op.getInput(0)->size(),
+        op.forwardStep() * op.getInput(0)->size());
+}
+
+void StackOpImpl::backward() {
+    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
+    AIDGE_ASSERT(op.backwardStep() > 0, "Stack operator has not been run forward");
+
+    auto inputGrad = op.getInput(0)->grad();
+    auto outputGrad = op.getOutput(0)->grad();
+
+    *inputGrad = outputGrad->extract({op.backwardStep() -1 }).clone();
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/UnfoldImpl.cpp b/src/backend/generic/operator/UnfoldImpl.cpp
new file mode 100644
index 000000000..d17016bb3
--- /dev/null
+++ b/src/backend/generic/operator/UnfoldImpl.cpp
@@ -0,0 +1,67 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/UnfoldImpl.hpp"
+
+#include <cmath>  // std::floor
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Unfold.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+template <DimIdx_t DIM>
+void Unfold_OpImpl<DIM>::forward() {
+    const Unfold_Op<DIM>& op = dynamic_cast<const Unfold_Op<DIM>&>(mOp);
+    const auto kernelDims = op.kernelDims();
+    const auto dilationDims = op.dilationDims();
+    const auto strideDims = op.strideDims();
+    const DimSize_t inHeight = op.getInput(0)->dims()[2];
+    const DimSize_t inWidth = op.getInput(0)->dims()[3];
+    const DimSize_t inChannels = op.getInput(0)->dims()[1];
+
+    const DimSize_t kernelExtentHeight = op.dilationDims()[0] *
+                                            (op.kernelDims()[0] - 1) + 1;
+    const DimSize_t outHeight = 1 + static_cast<DimSize_t>(
+                    std::floor(static_cast<float>(inHeight - kernelExtentHeight) /
+                            static_cast<float>(op.strideDims()[0])));
+    const DimSize_t kernelExtentWidth = op.dilationDims()[1] *
+                                            (op.kernelDims()[1] - 1) + 1;
+    const DimSize_t outWidth = 1 + static_cast<DimSize_t>(
+                    std::floor(static_cast<float>(inWidth - kernelExtentWidth) /
+                            static_cast<float>(op.strideDims()[1])));
+    const DimSize_t outChannels = op.getOutput(0)->dims()[1];
+
+    for (DimSize_t n = 0; n < op.getOutput(0)->dims()[0]; ++n) {
+        for (DimSize_t outC = 0; outC < outChannels; ++outC) {
+            const auto inOffsetW = outC % kernelDims[1];
+            const auto inOffsetH = (outC / kernelDims[1]) % kernelDims[0];
+            const auto inC = outC / kernelDims[0] / kernelDims[1];
+
+            for (DimSize_t outH = 0; outH < outHeight; ++outH) {
+                const auto inH = outH * strideDims[0] + inOffsetH * dilationDims[0];
+
+                for (DimSize_t outW = 0; outW < outWidth; ++outW) {
+                    const auto inW = outW * strideDims[1] + inOffsetW * dilationDims[1];
+
+                    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(((n * inChannels + inC) * inHeight + inH) * inWidth + inW), 1,
+                        ((n * outChannels + outC) * outHeight + outH) * outWidth + outW);
+                }
+            }
+        }
+    }
+}
+
+template class Unfold_OpImpl<2>;
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/UnsqueezeImpl.cpp b/src/backend/generic/operator/UnsqueezeImpl.cpp
new file mode 100644
index 000000000..9e8a52774
--- /dev/null
+++ b/src/backend/generic/operator/UnsqueezeImpl.cpp
@@ -0,0 +1,34 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/UnsqueezeImpl.hpp"
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Unsqueeze.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace Aidge {
+
+void Unsqueeze_OpImpl::forward() {
+    const Unsqueeze_Op &op_ = static_cast<const Unsqueeze_Op &>(mOp);
+    // Check if input is provided
+    AIDGE_ASSERT(op_.getInput(0), "Unsqueeze : missing input 0");
+    op_.getOutput(0)->getImpl()->copy(op_.getInput(0)->getImpl()->rawPtr(),
+                                        op_.getInput(0)->size());
+}
+
+std::shared_ptr<ProdConso> Unsqueeze_OpImpl::getProdConso() const {
+    return std::make_shared<ProdConso>(mOp, true);  // Unsqueeze is an in-place operation!
+}
+
+} // namespace Aidge
diff --git a/src/operator/Flatten.cpp b/src/operator/Flatten.cpp
index c77adc374..e02c7abe6 100644
--- a/src/operator/Flatten.cpp
+++ b/src/operator/Flatten.cpp
@@ -14,21 +14,15 @@
 #include <cstddef>    // std::size_t
 #include <cstdint>    // std::int64_t
 #include <memory>
-#include <stdexcept>  // std::runtime_error
 #include <string>
 #include <vector>
 
+#include "aidge/backend/generic/operator/FlattenImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-void Aidge::Flatten_OpImpl::forward() {
-    const Flatten_Op& op = dynamic_cast<const Flatten_Op&>(mOp);
-    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
-}
-
-//////////////////////////////////////////////////
 
 const std::string Aidge::Flatten_Op::Type = "Flatten";
 
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index 10e20046f..a4cb4aab0 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -14,6 +14,7 @@
 #include <string>
 #include <vector>
 
+#include "aidge/backend/generic/operator/GatherImpl.hpp"
 #include "aidge/operator/Gather.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
@@ -50,34 +51,6 @@ std::shared_ptr<Aidge::Operator> Aidge::Gather_Op::clone() const {
     return std::make_shared<Gather_Op>(*this);
 }
 
-void Aidge::Gather_OpImpl::forward() {
-    const Gather_Op& op = dynamic_cast<const Gather_Op&>(mOp);
-
-    const std::size_t axisIdx = static_cast<std::size_t>(op.axis()) + (op.axis() >= 0 ? 0 : op.getInput(0)->dims().size());
-
-    std::size_t postAxisElems = 1;
-    for (std::size_t i = axisIdx + 1; i < op.getInput(0)->dims().size(); ++i) {
-        postAxisElems *= op.getInput(0)->dims()[i];
-    }
-    std::size_t preAxisElems = 1;
-    for (std::size_t i = 0; i < axisIdx; ++i) {
-        preAxisElems *= op.getInput(0)->dims()[i];
-    }
-
-    std::size_t outputOffset = 0;
-    for (std::size_t i=0; i<preAxisElems; ++i)
-    {
-        for(std::size_t j = 0; j < op.indices().size(); ++j)
-        {
-            const std::size_t idx = op.indices()[j] >= 0 ?
-                                        static_cast<std::size_t>(op.indices()[j]) :
-                                        static_cast<std::size_t>(op.indices()[j] + static_cast<int>(op.getInput(0)->dims()[axisIdx]));
-            op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(i * postAxisElems * op.getInput(0)->dims()[axisIdx] + idx * postAxisElems), postAxisElems, outputOffset);
-            outputOffset += postAxisElems;
-        }
-    }
-}
-
 bool Aidge::Gather_Op::dimsForwarded() const {
     if (getInput(1) && !getInput(1)->undefined()) {
         // output dims are data dependent
diff --git a/src/operator/Identity.cpp b/src/operator/Identity.cpp
index d01d57678..25bb5a5b6 100644
--- a/src/operator/Identity.cpp
+++ b/src/operator/Identity.cpp
@@ -9,19 +9,12 @@
  *
  ********************************************************************************/
 
-#include <string>
-
 #include "aidge/operator/Identity.hpp"
 
-void Aidge::Identity_OpImpl::forward() {
-    const Identity_Op& op = dynamic_cast<const Identity_Op&>(mOp);
-    op.getOutput(0)->setBackend(op.getInput(0)->backend(), op.getInput(0)->device());
-    op.getOutput(0)->setDataType(op.getInput(0)->dataType());
-    op.getOutput(0)->setDataFormat(op.getInput(0)->dataFormat());
-    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
-}
+#include <string>
+
+#include "aidge/backend/generic/operator/IdentityImpl.hpp"
 
-//////////////////////////////////////////////////
 
 const std::string Aidge::Identity_Op::Type = "Identity";
 
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index 05815f929..9301b0af2 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -15,63 +15,11 @@
 #include <string>
 #include <vector>
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/generic/operator/MemorizeImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Elts_t Aidge::Memorize_ProdConso::getNbRequiredData(
-    Aidge::IOIndex_t inputIdx) const
-{
-    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-
-    if (op.scheduleStep() == 0 && inputIdx == 0) {
-        // No data input is required for the initial step.
-        // Initialization data is required however.
-        return Elts_t::NoneElts();
-    }
-    else if (op.scheduleStep() > 0 && inputIdx == 1) {
-        // No initialization data is required after the initial step.
-        return Elts_t::NoneElts();
-    }
-    else {
-        return ProdConso::getNbRequiredData(inputIdx);
-    }
-}
-
-Aidge::Elts_t Aidge::Memorize_ProdConso::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
-                                                         const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
-    assert(mOp.getRawOutput(outputIdx) && "requires valid output");
-
-    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-
-    if ((op.endStep() > 0) && (outputIdx == 1) && (op.scheduleStep() >= op.endStep())) {
-        return Elts_t::NoneElts();
-    }
-    else {
-        return Elts_t::DataElts(op.getOutput(outputIdx)->size());
-    }
-}
-
-void Aidge::Memorize_ProdConso::updateConsummerProducer() {
-    ProdConso::updateConsummerProducer();
-
-    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-    AIDGE_ASSERT(op.endStep() == 0 || op.scheduleStep() <= op.endStep(), "cannot update consumer producer anymore, number of cycles exceeded");
-}
-
-void Aidge::Memorize_OpImpl::forward() {
-    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-
-    AIDGE_ASSERT((op.endStep() == 0) || (op.forwardStep() <= op.endStep()), "cannot forward anymore, number of cycles exceeded");
-
-    if (op.forwardStep() == 0) {
-        op.getOutput(0)->getImpl()->copy(op.getInput(1)->getImpl()->rawPtr(), op.getInput(1)->size());
-    }
-    else {
-        op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
-    }
-}
 
 const std::string Aidge::Memorize_Op::Type = "Memorize";
 
diff --git a/src/operator/Move.cpp b/src/operator/Move.cpp
index adabcd0d3..a637f8331 100644
--- a/src/operator/Move.cpp
+++ b/src/operator/Move.cpp
@@ -9,13 +9,12 @@
  *
  ********************************************************************************/
 
-#include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Move.hpp"
 
-void Aidge::Move_OpImpl::forward() {
-    const Move_Op& op = dynamic_cast<const Move_Op&>(mOp);
-    op.getOutput(0)->copyFrom(*(op.getInput(0)));
-}
+#include <string>
+
+#include "aidge/backend/generic/operator/MoveImpl.hpp"
+#include "aidge/data/Tensor.hpp"
 
 const std::string Aidge::Move_Op::Type = "Move";
 
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index c93078ed1..e3a41bc7a 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -15,41 +15,13 @@
 #include <stdexcept>
 #include <string>
 
+#include "aidge/backend/generic/operator/PopImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Elts_t Aidge::Pop_ProdConso::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
-    assert(mOp.getRawInput(inputIdx) && "requires valid input");
-
-    const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
-    AIDGE_ASSERT(!op.getInput(inputIdx)->empty(), "Pop operator requires known, non-empty, input dims for scheduling. You might have an unresolved data dependency upstream in the computing graph.");
-    return Elts_t::DataElts(op.getInput(inputIdx)->size()
-        / op.getInput(inputIdx)->dims()[0]);
-}
-
-void Aidge::Pop_OpImpl::forward() {
-    const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
-
-    assert(op.getInput(0) && "missing input #0");
-    *op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()}).clone();
-}
-
-void Aidge::Pop_OpImpl::backward() {
-    const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
-
-    auto outputGrad = op.getOutput(0)->grad();
-    auto inputGrad = op.getInput(0)->grad();
-
-    inputGrad->getImpl()->copy(
-        outputGrad->getImpl()->rawPtr(),
-        outputGrad->size(),
-        (op.backwardStep()-1) * outputGrad->size());
-}
-
-//////////////////////////////////////////////////////////
 
 const std::string Aidge::Pop_Op::Type = "Pop";
 
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index 8b42cb514..b12fd486d 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -14,30 +14,15 @@
 #include <cstddef>    // std::size_t
 #include <cstdint>    // std::int64_t
 #include <memory>
-#include <stdexcept>  // std::runtime_error
 #include <string>
 #include <vector>
 
+#include "aidge/backend/generic/operator/ReshapeImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-void Aidge::Reshape_OpImpl::forward() {
-    const Reshape_Op& op = dynamic_cast<const Reshape_Op&>(mOp);
-    AIDGE_ASSERT(op.getInput(0), "missing input#0");
-    // const auto& input = op.getInput(0)->refCastFrom(mInputFallback, *op.getOutput(0));
-    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
-}
-
-void Aidge::Reshape_OpImpl::backward() {
-    const Reshape_Op& op = dynamic_cast<const Reshape_Op&>(mOp);
-    AIDGE_ASSERT(op.getOutput(0)->grad(), "missing gradient for output#0");
-    // const auto& output_grad = op.getOutput(0)->grad()->refCastFrom(mOutputGradFallback, *op.getOutput(0)->grad());
-    op.getInput(0)->grad()->getImpl()->copy(op.getOutput(0)->grad()->getImpl()->rawPtr(), op.getOutput(0)->size());
-}
-
-//////////////////////////////////////////////////
 
 const std::string Aidge::Reshape_Op::Type = "Reshape";
 
diff --git a/src/operator/Select.cpp b/src/operator/Select.cpp
index 67e792cd0..6e686ecc4 100644
--- a/src/operator/Select.cpp
+++ b/src/operator/Select.cpp
@@ -14,37 +14,13 @@
 #include <string>
 #include <vector>
 
+#include "aidge/backend/generic/operator/SelectImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Select.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 
-void Aidge::Select_OpImpl::forward() {
-    const Select_Op& op = dynamic_cast<const Select_Op&>(mOp);
-    AIDGE_ASSERT(op.getInput(0)->size() > 0, "Select input is empty!");
-
-    std::shared_ptr<Tensor> selectFallback;
-    const auto& select = op.getInput(0)->refCastFrom(selectFallback, DataType::Int32, "cpu");
-    const auto selectVal = select.get<int32_t>(0);
-    AIDGE_ASSERT(selectVal >= 0 && selectVal < op.nbInputs() - 1, "Select input out of range. Expected value in range [0, {}], got {}", op.nbInputs() - 2, selectVal);
-
-    op.getOutput(0)->getImpl()->copy(op.getInput(selectVal + 1)->getImpl()->rawPtr(), op.getInput(selectVal + 1)->size());
-}
-
-void Aidge::Select_OpImpl::backward() {
-    const Select_Op& op = dynamic_cast<const Select_Op&>(mOp);
-    AIDGE_ASSERT(op.getInput(0)->size() > 0, "Select input is empty!");
-
-    std::shared_ptr<Tensor> selectFallback;
-    const auto& select = op.getInput(0)->refCastFrom(selectFallback, DataType::Int32, "cpu");
-    const auto selectVal = select.get<int32_t>(0);
-    AIDGE_ASSERT(selectVal >= 0 && selectVal < op.nbInputs() - 1, "Select input out of range. Expected value in range [0, {}], got {}", op.nbInputs() - 2, selectVal);
-
-    op.getInput(selectVal + 1)->grad()->getImpl()->copy(op.getOutput(0)->grad()->getImpl()->rawPtr(), op.getOutput(0)->size());
-}
-
-//////////////////////////////////////////////////
 
 const std::string Aidge::Select_Op::Type = "Select";
 
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
index c38f52d76..4db470473 100644
--- a/src/operator/Shape.cpp
+++ b/src/operator/Shape.cpp
@@ -14,20 +14,13 @@
 #include <string>
 #include <vector>
 
+#include "aidge/backend/generic/operator/ShapeImpl.hpp"
 #include "aidge/operator/Shape.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/Log.hpp"
 
-void Aidge::Shape_OpImpl::forward() {
-    // Output is already valid after forwardDims()
-    // But it may be with the wrong device (default cpu)
-    // This can happen if forwardDims is called before setBackend
-    const Shape_Op& op = dynamic_cast<const Shape_Op&>(mOp);
-    op.getOutput(0)->setBackend(op.getInput(0)->backend(), op.getInput(0)->device());
-}
-
 ///////////////////////////////////////////////
 
 const std::string Aidge::Shape_Op::Type = "Shape";
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index 7945200aa..de08ae6e9 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -19,12 +19,10 @@
 
 #include <fmt/format.h>
 
-#include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Data.hpp"
+#include "aidge/backend/generic/operator/SliceImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
-#include "aidge/data/Data.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 
@@ -65,84 +63,6 @@ std::shared_ptr<Aidge::Operator> Aidge::Slice_Op::clone() const {
     return std::make_shared<Slice_Op>(*this);
 }
 
-// Helper function to calculate the linear index for multi-dimensional data
-size_t getLinearIndex(const std::vector<size_t>& dims, const std::vector<size_t>& indices) {
-    size_t linearIndex = 0;
-    size_t stride = 1;
-    for (int i = dims.size() - 1; i >= 0; --i) {
-        linearIndex += indices[i] * stride;
-        stride *= dims[i];
-    }
-    return linearIndex;
-}
-
-void Aidge::Slice_OpImpl::forward() {
-    const Slice_Op& op = dynamic_cast<const Slice_Op&>(mOp);
-
-    if (!op.getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", op.Type);
-    }
-    AIDGE_ASSERT((op.axes().size() == op.ends().size()) &&
-                 (op.axes().size() == op.starts().size()),
-                 "Starts, Ends and Axes arguments should be the same size.");
-
-    const std::vector<size_t> inputDims = op.getInput(0)->dims();
-    std::vector<size_t> indices(inputDims.size(), 0); // Initialize indices for each dimension
-
-    // Create an array of ranges for each axis
-    std::vector<std::vector<int>> ranges(inputDims.size());
-
-    // Generate ranges dynamically for each dimension
-    for (size_t axisIdx = 0; axisIdx < inputDims.size(); ++axisIdx) {
-        if (std::find(op.axes().begin(), op.axes().end(), axisIdx) != op.axes().end()) {
-            // This axis is being sliced
-            int start = op.starts()[axisIdx];
-            int end = op.ends()[axisIdx];
-            int step = op.steps()[axisIdx];
-
-            start = start >= 0 ? start: start + inputDims[axisIdx];
-            start = std::max(0, std::min(start, static_cast<int>(inputDims[axisIdx])));
-            end = end >= 0 ? end: end + inputDims[axisIdx];
-            end = std::max(0, std::min(end, static_cast<int>(inputDims[axisIdx])));
-            // Generate the range of indices for this axis
-            for (int idx = start; (step > 0) ? (idx < end) : (idx > end); idx += step) {
-                ranges[axisIdx].push_back(idx);
-            }
-        } else {
-            // This axis is not being sliced, keep its full range (just one index in the range)
-            ranges[axisIdx].push_back(0);
-        }
-    }
-
-    // Use iterative stack to handle all dimensions dynamically
-    std::vector<size_t> currentIndex(inputDims.size(), 0); // Track current index in each dimension
-    std::vector<size_t> stackPointer(inputDims.size(), 0); // Pointers to ranges for each dimension
-    size_t dim = 0; // Start at the first dimension
-    size_t offset = 0; // Offset in the output tensor
-
-    while (dim < inputDims.size()) {
-        if (stackPointer[dim] < ranges[dim].size()) {
-            // Set the current index for this dimension
-            currentIndex[dim] = ranges[dim][stackPointer[dim]];
-            stackPointer[dim]++;
-
-            if (dim == inputDims.size() - 1) {
-                // We've reached the last dimension, process this index combination
-                size_t linearIndex = getLinearIndex(inputDims, currentIndex);
-                op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(linearIndex), 1, offset);
-                offset++;
-            } else {
-                // Move to the next dimension
-                dim++;
-            }
-        } else {
-            // Reset this dimension and move back to the previous one
-            stackPointer[dim] = 0;
-            dim--;
-        }
-    }
-}
-
 bool Aidge::Slice_Op::dimsForwarded() const {
     if ((getInput(1) && !getInput(1)->undefined())
         || (getInput(2) && !getInput(2)->undefined())
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
index 09aad0674..f93a36606 100644
--- a/src/operator/Split.cpp
+++ b/src/operator/Split.cpp
@@ -11,48 +11,16 @@
 
 #include "aidge/operator/Split.hpp"
 
-#include <cassert>
 #include <cstddef>
 #include <cstdint>
 #include <string>
-#include <utility>
 #include <vector>
 
-#include <fmt/format.h>
-
-#include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Data.hpp"
+#include "aidge/backend/generic/operator/SplitImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-void Aidge::Split_OpImpl::forward() {
-    const Split_Op& op = dynamic_cast<const Split_Op&>(mOp);
-    const auto axis = op.axis();
-    const auto splits = op.split();
-    const auto dims = op.getInput(0)->dims();
-
-    //Compute pre/post axis strides
-    const std::size_t stride_pre = std::accumulate(dims.cbegin(), dims.cbegin() + axis, 1, std::multiplies<std::size_t>());
-    const std::size_t stride_post = std::accumulate(dims.crbegin(), dims.crbegin() + dims.size() -1 - axis, 1, std::multiplies<std::size_t>());
-    for (auto i = 0; i < op.nbOutputs(); ++i)
-    {
-        DimSize_t chunkIdxOnAxis = std::accumulate(splits.cbegin(), splits.cbegin() + i, 0) * stride_post;
-        DimSize_t offset = 0;
-        for (std::size_t j = 0; j < stride_pre; ++j)
-        {
-            // Compute chunk position in input tensor
-            DimSize_t idx = j * stride_post * dims[axis] + chunkIdxOnAxis;
-            // Copy chunk in output
-            op.getOutput(i)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(idx),
-                                            splits[i] * stride_post, offset);
-            offset += splits[i] * stride_post;
-        }
-
-    }
-}
-
-/////////////////////////////////////////////////////
 
 const std::string Aidge::Split_Op::Type = "Split";
 
diff --git a/src/operator/Squeeze.cpp b/src/operator/Squeeze.cpp
index a44146366..ea3452878 100644
--- a/src/operator/Squeeze.cpp
+++ b/src/operator/Squeeze.cpp
@@ -12,18 +12,13 @@
 #include "aidge/operator/Squeeze.hpp"
 
 #include <algorithm>
-#include <bitset>
+#include <cstddef>
 #include <cstdint>
-#include <fmt/core.h>
-#include <functional>
-#include <iterator>
-#include <limits>
 #include <memory>
-#include <stdexcept>
 #include <string>
 #include <vector>
 
-#include "aidge/data/Data.hpp"
+#include "aidge/backend/generic/operator/SqueezeImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Log.hpp"
@@ -33,6 +28,28 @@
 namespace Aidge {
 const std::string Squeeze_Op::Type = "Squeeze";
 
+Squeeze_Op::Squeeze_Op(const std::vector<std::int8_t> &axes)
+    : OperatorTensor(
+        Type,
+        {InputCategory::Data, InputCategory::OptionalData},
+        1),
+    mAttributes(
+        std::make_shared<Attributes_>(attr<SqueezeAttr::Axes>(axes)))
+{
+    mImpl = std::make_shared<Squeeze_OpImpl>(*this);
+}
+
+Squeeze_Op::Squeeze_Op(const Squeeze_Op &op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Squeeze_Op, *this, op.backend());
+    } else {
+        mImpl = std::make_shared<Squeeze_OpImpl>(*this);
+    }
+}
+
 bool Squeeze_Op::dimsForwarded() const {
   if ((getInput(1) && !getInput(1)->undefined())) {
     // output dims are data dependent
@@ -43,103 +60,80 @@ bool Squeeze_Op::dimsForwarded() const {
 }
 
 bool Squeeze_Op::forwardDims(bool allowDataDependency) {
-  // error checking
-  if (!inputsAssociated(false) || getInput(0)->undefined()) {
-    return false;
-  }
-
-  std::shared_ptr<Tensor> fallback;
-  // Input 1 is axes to squeeze (can also be given via attribute)
-  if (getInput(1)) {
-    if (!this->axes().empty()) {
-      Log::notice("{} : ignoring non-empty axes attribute because input#1 "
-                  "takes precedence",
-                  type());
+    // error checking
+    if (!inputsAssociated(false) || getInput(0)->undefined()) {
+        return false;
     }
 
-    if (!allowDataDependency) {
-      Log::warn("{} : unable to forwardDims() because output dims are data "
-                "dependent on input#1",
-                type());
-      return false;
+    std::shared_ptr<Tensor> fallback;
+    // Input 1 is axes to squeeze (can also be given via attribute)
+    if (getInput(1)) {
+        if (!this->axes().empty()) {
+            Log::warn("{} : ignoring non-empty axes attribute because input#1 "
+                        "takes precedence",
+                        type());
+        }
+
+        if (!allowDataDependency) {
+            Log::error("{} : unable to forwardDims() because output dims are data "
+                        "dependent on input#1",
+                        type());
+            return false;
+        }
+
+        this->axes().clear(); // If both are provided input would override attrs
+        this->axes().reserve(getInput(1)->size());
+        const auto &axes =
+                    getInput(1)->refCastFrom(fallback, NativeType_v<std::int8_t>, "cpu");
+        if (axes.nbDims() == 0) {
+            this->axes().clear();
+        } else {
+            AIDGE_ASSERT(
+                axes.nbDims() == 1,
+                "Axes input tensor should be of size 1. Received {} dimensions : {}",
+                axes.nbDims(), axes.dims());
+            std::copy_n(static_cast<std::int8_t *>(axes.getImpl()->hostPtr()), axes.size(),
+                        std::back_inserter(this->axes()));
+        }
     }
-
-    this->axes().clear(); // If both are provided input would override attrs
-    this->axes().reserve(getInput(1)->size());
-    const auto &axes =
-        getInput(1)->refCastFrom(fallback, NativeType_v<int8_t>, "cpu");
-    if (axes.nbDims() == 0) {
-      this->axes().clear();
+    std::vector<DimSize_t> inputDims = getInput(0)->dims();
+    std::size_t newSize = inputDims.size();
+
+    if (this->axes().size() == 0) {
+        for (std::size_t i = 0; i < inputDims.size(); ++i) {
+            if (inputDims[i] == 1) {
+                --newSize;
+                inputDims[i] = 0;
+            }
+        }
     } else {
-      AIDGE_ASSERT(
-          axes.nbDims() == 1,
-          "Axes input tensor should be of size 1. Received {} dimensions : {}",
-          axes.nbDims(), axes.dims());
-      std::copy_n(static_cast<int8_t *>(axes.getImpl()->hostPtr()), axes.size(),
-                  std::back_inserter(this->axes()));
+        for (std::int8_t axis : this->axes()) {
+            axis = axis >= 0 ? axis : axis + static_cast<std::int8_t>(inputDims.size());
+            if ((axis < 0) || (axis >= static_cast<std::int8_t>(inputDims.size()))) {
+                Log::error("{} : Axis index OutOfBounds error, expected value "
+                    "within size limits of input tensor : "
+                    "[-{},{}], got {}.",
+                    type(), inputDims.size(), inputDims.size() - 1, axis);
+                return false;
+            }
+            if (inputDims[axis] > 1) {
+                Log::error("Cannot squeeze dimensions with shape greater than 1");
+                return false;
+            }
+            newSize -= inputDims[axis];
+            inputDims[axis] = 0;
+        }
     }
-  }
 
-  std::vector<DimSize_t> input_dims = getInput(0)->dims();
-  std::vector<DimSize_t> output_dims;
-  output_dims.reserve(input_dims.size());
-  std::vector<DimIdx_t> axes_rectified_idx;
-  axes_rectified_idx.reserve(input_dims.size());
-
-  if (this->axes().size() == 0) { // squeeze() => squeeze all 1 sized dimensions
-    Log::debug("this->axes() is empty, all 1 sized dim will be squeezed. If "
-               "this is an error ensure that the values are properly set via "
-               "attribute or data input#1.");
-    std::copy_if(input_dims.begin(), input_dims.end(),
-                 std::back_inserter(output_dims),
-                 [](DimSize_t dim) { return dim != 1; });
-  } else { // squeeze({N,.....}) => squeeze all specified dimensions that are of
-           // size 1.
-    /////// ensure indexes validity and set pythonic negative indexes to their
-    // positive value
-    for (const int8_t &axis : this->axes()) {
-      AIDGE_ASSERT(axis >= static_cast<int8_t>(-input_dims.size()) &&
-                       axis < static_cast<int8_t>(input_dims.size()),
-                   "{} : Axis index OutOfBounds error, expected value "
-                   "within size limits of input tensor : "
-                   "[-{},{}], got {}.",
-                   type(), input_dims.size(), input_dims.size() - 1, axis);
-      auto temp =
-          static_cast<DimIdx_t>(axis >= 0 ? axis : axis + input_dims.size());
-      if (axes_rectified_idx.end() == std::find(axes_rectified_idx.begin(),
-                                                axes_rectified_idx.end(),
-                                                temp)) {
-        axes_rectified_idx.push_back(temp);
-      }
+    std::vector<DimSize_t> outputDims;
+    outputDims.reserve(newSize);
+    for (const auto& dim : inputDims) {
+        if (dim > 0)
+            outputDims.push_back(dim);
     }
 
-    // Create output_dims
-    // speeds up binary search
-    std::sort(axes_rectified_idx.begin(), axes_rectified_idx.end());
-    DimSize_t i = 0;
-    std::copy_if(
-        input_dims.begin(), input_dims.end(), std::back_inserter(output_dims),
-        [&axes_rectified_idx, &i, &input_dims](DimSize_t dim) {
-          // if current dim index is found in axes to squeeze
-          // we ensure that this axis is 1 sized, otherwise an error is thrown
-          bool ok = true;
-          if (std::binary_search(axes_rectified_idx.begin(),
-                                 axes_rectified_idx.end(), i)) {
-            AIDGE_ASSERT(dim == 1,
-                         "{} : Tried to squeeze axis nb {} of a tensor of dim "
-                         "{}. Dim to squeeze has to be 1-sized, got size {}."
-                         "Axes to squeeze : {}",
-                         __func__, i, input_dims, input_dims[i],
-                         axes_rectified_idx);
-            ok = false;
-          }
-          i++; // Incrementing counter since there is no enumerate
-               // fctn (until C++23)
-          return ok;
-        });
-  }
-  mOutputs[0]->resize(output_dims);
-  return true;
+    mOutputs[0]->resize(outputDims);
+    return true;
 }
 
 void Squeeze_Op::setBackend(const std::string &name,
@@ -156,13 +150,11 @@ std::set<std::string> Aidge::Squeeze_Op::getAvailableBackends() const {
   return Registrar<Squeeze_Op>::getKeys();
 }
 
-void Aidge::Squeeze_OpImpl::forward() {
-  const Squeeze_Op &op_ = static_cast<const Squeeze_Op &>(mOp);
-  // Check if input is provided
-  AIDGE_ASSERT(op_.getInput(0), "Squeeze : missing input 0");
+////////////////////////////////////////////////////////////////////////////////
 
-  op_.getOutput(0)->getImpl()->copy(op_.getInput(0)->getImpl()->rawPtr(),
-                                    op_.getInput(0)->size());
+std::shared_ptr<Node> Squeeze(const std::vector<std::int8_t> axes,
+    const std::string &name) {
+    return std::make_shared<Node>(std::make_shared<Squeeze_Op>(axes), name);
 }
 
 } // namespace Aidge
diff --git a/src/operator/Stack.cpp b/src/operator/Stack.cpp
index 9f8cd1639..b884baaa4 100644
--- a/src/operator/Stack.cpp
+++ b/src/operator/Stack.cpp
@@ -14,6 +14,7 @@
 #include <memory>
 #include <string>
 
+#include "aidge/backend/generic/operator/StackImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -22,57 +23,10 @@
 
 namespace Aidge {
 
-// TODO: Check why getRequiredMemory is always called with empty vector as
-// inputSize
-Elts_t StackProdConso::getRequiredMemory(
-    const Aidge::IOIndex_t inputIdx,
-    const std::vector<DimSize_t> &/*inputsSize*/) const {
-    assert(mOp.getRawInput(inputIdx) && "requires valid input");
-
-    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
-    // The produced data after one forward pass is simply the input size,
-    // we do not produce the whole output tensor every time.
-    if (op.forwardStep() <= op.maxElements()) {
-        return Elts_t::DataElts(op.getInput(inputIdx)->size());
-    } else {
-        return Elts_t::NoneElts();
-    }
-}
-
-void StackProdConso::resetConsummerProducer() {
-    ProdConso::resetConsummerProducer();
-
-    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
-    op.forwardStep() = 0;
-}
-
-const std::string StackOp::s_type = "Stack";
-
-void StackOpImpl::forward() {
-    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
-    AIDGE_ASSERT(op.getInput(0), "missing input #0");
-    AIDGE_ASSERT((op.forwardStep() < op.maxElements()),
-                 "cannot forward anymore, maximum number of elements to stack "
-                 "exceeded");
-
-    op.getOutput(0)->getImpl()->copy(
-        op.getInput(0)->getImpl()->rawPtr(),
-        op.getInput(0)->size(),
-        op.forwardStep() * op.getInput(0)->size());
-}
-
-void StackOpImpl::backward() {
-    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
-    AIDGE_ASSERT(op.backwardStep() > 0, "Stack operator has not been run forward");
-
-    auto inputGrad = op.getInput(0)->grad();
-    auto outputGrad = op.getOutput(0)->grad();
-
-    *inputGrad = outputGrad->extract({op.backwardStep() -1 }).clone();
-}
+const std::string StackOp::Type = "Stack";
 
 StackOp::StackOp(std::uint32_t maxElements)
-    : OperatorTensor(s_type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
       mAttributes(std::make_shared<Attributes_>(
           attr<StackAttr::MaxElements>(maxElements),
           attr<StackAttr::BackwardStep>(0),
diff --git a/src/operator/Unfold.cpp b/src/operator/Unfold.cpp
index 53b8bd544..888109240 100644
--- a/src/operator/Unfold.cpp
+++ b/src/operator/Unfold.cpp
@@ -18,56 +18,12 @@
 #include <utility>    // std::pair
 #include <vector>
 
+#include "aidge/backend/generic/operator/UnfoldImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-template <Aidge::DimIdx_t DIM>
-void Aidge::Unfold_OpImpl<DIM>::forward() {
-    const Unfold_Op<DIM>& op = dynamic_cast<const Unfold_Op<DIM>&>(mOp);
-    const auto kernelDims = op.kernelDims();
-    const auto dilationDims = op.dilationDims();
-    const auto strideDims = op.strideDims();
-    const DimSize_t inHeight = op.getInput(0)->dims()[2];
-    const DimSize_t inWidth = op.getInput(0)->dims()[3];
-    const DimSize_t inChannels = op.getInput(0)->dims()[1];
-
-    const DimSize_t kernelExtentHeight = op.dilationDims()[0] *
-                                            (op.kernelDims()[0] - 1) + 1;
-    const DimSize_t outHeight = 1 + static_cast<DimSize_t>(
-                    floor(static_cast<float>(inHeight - kernelExtentHeight) /
-                            static_cast<float>(op.strideDims()[0])));
-    const DimSize_t kernelExtentWidth = op.dilationDims()[1] *
-                                            (op.kernelDims()[1] - 1) + 1;
-    const DimSize_t outWidth = 1 + static_cast<DimSize_t>(
-                    floor(static_cast<float>(inWidth - kernelExtentWidth) /
-                            static_cast<float>(op.strideDims()[1])));
-    const DimSize_t outChannels = op.getOutput(0)->dims()[1];
-
-    for (DimSize_t n = 0; n < op.getOutput(0)->dims()[0]; ++n) {
-        for (DimSize_t outC = 0; outC < outChannels; ++outC) {
-            const auto inOffsetW = outC % kernelDims[1];
-            const auto inOffsetH = (outC / kernelDims[1]) % kernelDims[0];
-            const auto inC = outC / kernelDims[0] / kernelDims[1];
-
-            for (DimSize_t outH = 0; outH < outHeight; ++outH) {
-                const auto inH = outH * strideDims[0] + inOffsetH * dilationDims[0];
-
-                for (DimSize_t outW = 0; outW < outWidth; ++outW) {
-                    const auto inW = outW * strideDims[1] + inOffsetW * dilationDims[1];
-
-                    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(((n * inChannels + inC) * inHeight + inH) * inWidth + inW), 1,
-                        ((n * outChannels + outC) * outHeight + outH) * outWidth + outW);
-                }
-            }
-        }
-    }
-}
-
-template class Aidge::Unfold_OpImpl<2>;
-
-/////////////////////////////////////////////////////////////
 
 template <Aidge::DimIdx_t DIM>
 const std::string Aidge::Unfold_Op<DIM>::Type = "Unfold";
diff --git a/src/operator/Unsqueeze.cpp b/src/operator/Unsqueeze.cpp
index 414afc10f..679b420ec 100644
--- a/src/operator/Unsqueeze.cpp
+++ b/src/operator/Unsqueeze.cpp
@@ -12,22 +12,40 @@
 #include "aidge/operator/Unsqueeze.hpp"
 
 #include <cstdint>
-#include <fmt/core.h>
-#include <functional>
 #include <memory>
 #include <string>
 #include <vector>
 
-#include "aidge/data/Data.hpp"
+#include "aidge/backend/generic/operator/UnsqueezeImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Log.hpp"
-#include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 const std::string Unsqueeze_Op::Type = "Unsqueeze";
 
+
+Unsqueeze_Op::Unsqueeze_Op(const std::vector<int8_t> &axes)
+    : OperatorTensor(Type,
+                    {InputCategory::Data, InputCategory::OptionalData},
+                    1),
+      mAttributes(std::make_shared<Attributes_>(attr<UnsqueezeAttr::Axes>(axes)))
+{
+    mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
+}
+
+Unsqueeze_Op::Unsqueeze_Op(const Unsqueeze_Op &op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Unsqueeze_Op, *this, op.backend());
+    } else {
+        mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
+    }
+}
+
 bool Aidge::Unsqueeze_Op::dimsForwarded() const {
   if ((getInput(1) && !getInput(1)->undefined())) {
     // output dims are data dependent
@@ -120,12 +138,11 @@ std::set<std::string> Aidge::Unsqueeze_Op::getAvailableBackends() const {
   return Registrar<Unsqueeze_Op>::getKeys();
 }
 
-void Aidge::Unsqueeze_OpImpl::forward() {
-  const Unsqueeze_Op &op_ = static_cast<const Unsqueeze_Op &>(mOp);
-  // Check if input is provided
-  AIDGE_ASSERT(op_.getInput(0), "Unsqueeze : missing input 0");
-  op_.getOutput(0)->getImpl()->copy(op_.getInput(0)->getImpl()->rawPtr(),
-                                    op_.getInput(0)->size());
+////////////////////////////////////////////////////////////////////////////////
+
+std::shared_ptr<Node> Unsqueeze(const std::vector<int8_t> &axes,
+    const std::string &name) {
+return std::make_shared<Node>(std::make_shared<Unsqueeze_Op>(axes), name);
 }
 
 } // namespace Aidge
diff --git a/src/recipes/ExplicitCastMove.cpp b/src/recipes/ExplicitCastMove.cpp
index c860b9e8a..43eb75797 100644
--- a/src/recipes/ExplicitCastMove.cpp
+++ b/src/recipes/ExplicitCastMove.cpp
@@ -9,10 +9,11 @@
  *
  ********************************************************************************/
 
-#include "aidge/recipes/Recipes.hpp"
+#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Cast.hpp"
 #include "aidge/operator/Move.hpp"
+#include "aidge/recipes/Recipes.hpp"
 
 void Aidge::explicitCastMove(std::shared_ptr<GraphView> graph) {
     // First, remove existing Cast and Move operators, if not needed anymore
diff --git a/unit_tests/operator/Test_Squeeze_Op.cpp b/unit_tests/operator/Test_Squeeze_Op.cpp
index 41822742c..b6b4a7712 100644
--- a/unit_tests/operator/Test_Squeeze_Op.cpp
+++ b/unit_tests/operator/Test_Squeeze_Op.cpp
@@ -80,7 +80,7 @@ TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
                   [&gen, &idx_dims_to_squeeze_dist]() {
                     return idx_dims_to_squeeze_dist(gen);
                   });
-    Log::error("dims_to_sqeeze = {}", dims_to_squeeze);
+    Log::error("dims_to_sqeeze = {}", dims_to_squeeze);
 
     std::shared_ptr<Node> squeeze_node = Squeeze(dims_to_squeeze);
     auto op =
@@ -97,7 +97,7 @@ TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
     // Test
     input_T->resize(dims_in);
     op->setInput(0, input_T);
-    REQUIRE_THROWS(op->forwardDims());
+    REQUIRE(false == op->forwardDims());
   }
   SECTION("Compare with reference output") {
     SECTION("axes is given via attribute") {
@@ -140,7 +140,7 @@ TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
         std::vector<DimSize_t> dims_in{1, 2, 3, 4};
         input_T->resize(dims_in);
 
-        REQUIRE_THROWS(op->forwardDims());
+        REQUIRE(false == (op->forwardDims()));
       }
       SECTION("Squeeze multiple non-sized-axes") {
         std::shared_ptr<Node> squeeze_node =
@@ -152,7 +152,7 @@ TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
         std::array<DimSize_t, 3> dims_in{2, 3, 4};
         input_T->resize(dims_in);
 
-        REQUIRE_THROWS((op->forwardDims()));
+        REQUIRE(false == (op->forwardDims()));
       }
     }
     SECTION("axes is given via tensor") {
@@ -287,7 +287,7 @@ TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
 
       if (nb_dims_tensor > max_nb_dims || not_in_bounds ||
           dim_to_squeeze_not_1_sized) {
-        REQUIRE_THROWS(op->forwardDims());
+        REQUIRE(false == (op->forwardDims()));
       } else {
         // output tensor
         int i = 0;
@@ -381,7 +381,7 @@ TEST_CASE("[core/operator] Squeeze(forward)", "[Squeeze][forward]") {
     }
     if (nb_dims_tensor > max_nb_dims || not_in_bounds ||
         dim_to_squeeze_not_1_sized) {
-      REQUIRE_THROWS(op->forwardDims());
+      REQUIRE(false == (op->forwardDims()));
     } else {
       // output tensor
       int i = 0;
-- 
GitLab