diff --git a/include/aidge/backend/generic/operator/ConcatImpl.hpp b/include/aidge/backend/generic/operator/ConcatImpl.hpp
index 5e2cccbf78066b1e30b6ac15b6cb4fe620087320..cbe0c4e295393d4170102b158c1e939aa1a38317 100644
--- a/include/aidge/backend/generic/operator/ConcatImpl.hpp
+++ b/include/aidge/backend/generic/operator/ConcatImpl.hpp
@@ -9,6 +9,9 @@
  *
  ********************************************************************************/
 
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_CONCATIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_CONCATIMPL_H_
+
 #include <string>
 
 #include "aidge/backend/OperatorImpl.hpp"
@@ -38,4 +41,6 @@ public:
      */
     void forward() override;
 };
-}  // namespace Aidge
\ No newline at end of file
+}  // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_CONCATIMPL_H_
diff --git a/include/aidge/backend/generic/operator/DepthToSpaceImpl.hpp b/include/aidge/backend/generic/operator/DepthToSpaceImpl.hpp
index 3cb638c9d9ebeb067cb3dec771ad7c82954659ad..9a518ed14893f28c145f80a19fb93dae41d42d6d 100644
--- a/include/aidge/backend/generic/operator/DepthToSpaceImpl.hpp
+++ b/include/aidge/backend/generic/operator/DepthToSpaceImpl.hpp
@@ -9,6 +9,9 @@
  *
  ********************************************************************************/
 
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_DEPTHTOSPACEIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_DEPTHTOSPACEIMPL_H_
+
 #include <string>
 
 #include "aidge/backend/OperatorImpl.hpp"
@@ -35,3 +38,5 @@ public:
     void forward() override;
 };
 }  // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_DEPTHTOSPACEIMPL_H_
diff --git a/include/aidge/backend/generic/operator/FlattenImpl.hpp b/include/aidge/backend/generic/operator/FlattenImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..710624e73a4f3004f3e1444d3f1fae62b31d49e6
--- /dev/null
+++ b/include/aidge/backend/generic/operator/FlattenImpl.hpp
@@ -0,0 +1,47 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_FLATTENIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_FLATTENIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace Aidge {
+
+/**
+ * @brief Implementation of the Flatten operation.
+ *
+ * Since Flatten operation is just backend-agnostic, its implementation is located in aidge_core.
+ */
+class Flatten_OpImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructor for Flatten operator implementation.
+     * @param op Operator instance.
+     * @param backend Optional. Name of the backend.
+     */
+    Flatten_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Compute the forward pass of the Flatten operation.
+     */
+    void forward() override;
+
+    std::shared_ptr<ProdConso> getProdConso() const override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_FLATTENIMPL_H_
diff --git a/include/aidge/backend/generic/operator/GatherImpl.hpp b/include/aidge/backend/generic/operator/GatherImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..622045fc960e5b6f07ac3a26e75387f9467cc188
--- /dev/null
+++ b/include/aidge/backend/generic/operator/GatherImpl.hpp
@@ -0,0 +1,42 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_GATHERIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_GATHERIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace Aidge {
+
+/**
+ * @class Gather_OpImpl
+ * @brief Backend implementation for the Gather operation.
+ *
+ * The Gather operation selects elements from the input tensor based on specified indices
+ * and an axis, producing a tensor with a gathered shape.
+ */
+class Gather_OpImpl : public OperatorImpl {
+public:
+    Gather_OpImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Execute the Gather operation.
+     */
+    void forward() override;
+};
+
+}  // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_GATHERIMPL_H_
diff --git a/include/aidge/backend/generic/operator/IdentityImpl.hpp b/include/aidge/backend/generic/operator/IdentityImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..72af3e0c1e44b25735a1a6387d35a94f85b94f10
--- /dev/null
+++ b/include/aidge/backend/generic/operator/IdentityImpl.hpp
@@ -0,0 +1,35 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_IDENTITYIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_IDENTITYIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace Aidge {
+
+class Identity_OpImpl : public OperatorImpl {
+public:
+    Identity_OpImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend) {}
+
+    void forward() override;
+
+    std::shared_ptr<ProdConso> getProdConso() const override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_IDENTITYIMPL_H_
diff --git a/include/aidge/backend/generic/operator/MemorizeImpl.hpp b/include/aidge/backend/generic/operator/MemorizeImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..2c12ae54cef6846d73d444873d6a428148f3ed9b
--- /dev/null
+++ b/include/aidge/backend/generic/operator/MemorizeImpl.hpp
@@ -0,0 +1,119 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_MEMORIZEIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_MEMORIZEIMPL_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Elts.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+/**
+ * @class Memorize_ProdConso
+ * @brief Implements the producer-consumer principle for the `Memorize` operator.
+ *
+ * The `Memorize_ProdConso` class defines the logic for managing data dependencies during
+ * the forward process of the `Memorize` operator.
+ *
+ * This class ensures that:
+ * - All data produced by the `Memorize` operator is properly consumed.
+ * - All required outputs are correctly filled during the forward pass.
+ *
+ * It also calculates data and memory requirements specific to the `Memorize` operator.
+ */
+class Memorize_ProdConso : public ProdConso {
+public:
+    /**
+     * @brief Constructor for the `Memorize_ProdConso` class.
+     * @param[in] op The operator instance for which producer-consumer relationships are managed.
+     *
+     * @details:
+     * - The provided `Operator` instance is used to initialize the base `ProdConso` class.
+     * - This operator will determine the specific requirements for data production
+     *   and consumption during the forward process.
+     */
+    Memorize_ProdConso(const Operator& op): ProdConso(op) {}
+
+    /**
+     * @brief Get the number of data elements required from an input tensor for forward computation.
+     * @param[in] inputIdx The index of the input tensor.
+     * @return The number of required elements (`Elts_t`).
+     *
+     * @details:
+     * - For each input tensor identified by `inputIdx`, this method calculates the
+     *   minimum amount of data needed by the `Memorize` operator to perform its forward step.
+     */
+    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+
+    /**
+     * @brief Compute the memory requirements for an output tensor.
+     * @param[in] outputIdx The index of the output tensor.
+     * @param[in] inputsSize A vector containing the dimensions of the input tensors.
+     * @return The memory required (`Elts_t`) for the specified output tensor.
+     *
+     * @details:
+     * - This method evaluates how much memory is needed for the `outputIdx` tensor
+     *   based on the input tensor dimensions and the attributes of the `Memorize` operator.
+     * - Memory requirements are influenced by factors such as sequence length and
+     *   the forward step configuration.
+     */
+    Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
+
+    /**
+     * @brief Update the producer-consumer relationships for the `Memorize` operator.
+     * @details:
+     * - This method ensures that all data produced by the `Memorize` operator is
+     *   appropriately consumed by downstream operators in the computational graph.
+     * - It also verifies that all required outputs are filled during the forward pass,
+     *   maintaining consistency in the data flow.
+     * - This step is crucial for ensuring correctness in recurrent computations and
+     *   maintaining dependencies in the graph.
+     */
+    void updateConsummerProducer() override;
+};
+
+/**
+ * @brief Implementation of the Memorize operation.
+ *
+ * Since Memorize operation is just backend-agnostic, its implementation is located in aidge_core.
+ */
+class Memorize_OpImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructs a Memorize_OpImpl object.
+     * @param[in] op The operator to be implemented.
+     * @param[in] backend The backend used for execution.
+     */
+    Memorize_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Get the Producer Consumer object of the operator.
+     * @return A shared pointer to the ProdConso object.
+     */
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_shared<Memorize_ProdConso>(mOp); };
+
+    /**
+     * @brief Executes the forward pass for the Memorize operation.
+     */
+    void forward() override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_MEMORIZEIMPL_H_
diff --git a/include/aidge/backend/generic/operator/MoveImpl.hpp b/include/aidge/backend/generic/operator/MoveImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..fc2747351875d72cef6f436dcac7a79960e64c24
--- /dev/null
+++ b/include/aidge/backend/generic/operator/MoveImpl.hpp
@@ -0,0 +1,30 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_MOVEIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_MOVEIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace Aidge {
+
+class Move_OpImpl : public OperatorImpl {
+public:
+    Move_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_MOVEIMPL_H_
diff --git a/include/aidge/backend/generic/operator/PopImpl.hpp b/include/aidge/backend/generic/operator/PopImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..7718a0691df6c6f956fd4a8ebaa1c848e74b0496
--- /dev/null
+++ b/include/aidge/backend/generic/operator/PopImpl.hpp
@@ -0,0 +1,92 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_POPIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_POPIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace Aidge {
+
+/**
+ * @class Pop_ProdConso
+ * @brief Implements the producer-consumer principle for the `Pop` operator.
+ *
+ * The `Pop_ProdConso` class defines the logic for managing data dependencies during
+ * the forward process of the `Pop` operator.
+ *
+ * This class ensures that:
+ * - All data consumed by the `Pop` operator is correctly handled.
+ * - The operator respects memory and data requirements during the forward computation.
+ */
+class Pop_ProdConso : public ProdConso {
+public:
+    /**
+     * @brief Constructor for the `Pop_ProdConso` class.
+     * @param[in] op The operator instance for which producer-consumer relationships are managed.
+     *
+     * @details:
+     * - The provided `Operator` instance is used to initialize the base `ProdConso` class.
+     * - This operator determines specific requirements for data consumption during the forward process.
+     */
+    Pop_ProdConso(const Operator& op): ProdConso(op) {}
+
+    /**
+     * @brief Get the number of data elements required from an input tensor for forward computation.
+     * @param[in] inputIdx The index of the input tensor.
+     * @return The number of required elements (`Elts_t`).
+     *
+     * @details:
+     * - For each input tensor identified by `inputIdx`, this method calculates the
+     *   minimum amount of data needed by the `Pop` operator to perform its forward step.
+     */
+    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
+};
+
+/**
+ * @class Pop_OpImpl
+ * @brief Implementation of the `Pop` operation.
+ *
+ * The `Pop_OpImpl` class defines the backend-agnostic logic for executing
+ * the forward pass of the `Pop` operator.
+ */
+class Pop_OpImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructs a `Pop_OpImpl` object.
+     * @param[in] op The operator to be implemented.
+     * @param[in] backend The backend used for execution (optional).
+     */
+    Pop_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Get the Producer Consumer object of the operator.
+     * @return A shared pointer to the `Pop_ProdConso` object.
+     */
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_shared<Pop_ProdConso>(mOp); }
+
+    /**
+     * @brief Executes the forward pass for the `Pop` operation.
+     */
+    void forward() override;
+
+    /**
+     * @brief Executes the backward pass for the `Pop` operation.
+     */
+    void backward() override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_POPIMPL_H_
diff --git a/include/aidge/backend/generic/operator/ReshapeImpl.hpp b/include/aidge/backend/generic/operator/ReshapeImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..1461f89ebdca858caa86f2bf93aa2b785e5857f6
--- /dev/null
+++ b/include/aidge/backend/generic/operator/ReshapeImpl.hpp
@@ -0,0 +1,48 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_RESHAPEIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_RESHAPEIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace Aidge {
+
+/**
+ * @brief Implementation of the Reshape operator.
+ * @note This operator implementation is agnostic to the backend and is located here instead of in aidge_backend.
+ */
+class Reshape_OpImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructor for Reshape_OpImpl.
+     * @param[in] op The Operator instance.
+     * @param[in] backend The backend name (optional).
+     */
+    Reshape_OpImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Perform the forward operation for the reshape.
+     */
+    void forward() override;
+    void backward() override;
+
+    std::shared_ptr<ProdConso> getProdConso() const override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_RESHAPEIMPL_H_
diff --git a/include/aidge/backend/generic/operator/SelectImpl.hpp b/include/aidge/backend/generic/operator/SelectImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..c8e3d3a689de2f8cd2c312ecc3100d8a471b8e79
--- /dev/null
+++ b/include/aidge/backend/generic/operator/SelectImpl.hpp
@@ -0,0 +1,45 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SELECTIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SELECTIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace Aidge {
+
+/**
+ * @brief Implementation of the Select operator.
+ * @note This operator implementation is agnostic to the backend and is located here instead of in aidge_backend.
+ */
+class Select_OpImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructor for Select_OpImpl.
+     * @param[in] op The Operator instance.
+     * @param[in] backend The backend name (optional).
+     */
+    Select_OpImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Perform the forward operation for the reshape.
+     */
+    void forward() override;
+    void backward() override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SELECTIMPL_H_
diff --git a/include/aidge/backend/generic/operator/ShapeImpl.hpp b/include/aidge/backend/generic/operator/ShapeImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..03ec05976fb3186a89d239ad0286736531e681aa
--- /dev/null
+++ b/include/aidge/backend/generic/operator/ShapeImpl.hpp
@@ -0,0 +1,46 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SHAPEIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SHAPEIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace Aidge {
+
+/**
+ * @class Shape_OpImpl
+ * @brief Backend-agnostic implementation of the Shape operator.
+ *
+ * This implementation is responsible for extracting and returning the shape
+ * of the input tensor. Specific backend functionality can extend this.
+ */
+class Shape_OpImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructor for the Shape_OpImpl class.
+     * @param[in] op The Operator instance.
+     * @param[in] backend The backend name (optional).
+     */
+    Shape_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Perform the forward operation to compute the shape of the tensor.
+     */
+    void forward() override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SHAPEIMPL_H_
diff --git a/include/aidge/backend/generic/operator/SliceImpl.hpp b/include/aidge/backend/generic/operator/SliceImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..4f4d449d66ac7dad651350e25b115abfe9bb2804
--- /dev/null
+++ b/include/aidge/backend/generic/operator/SliceImpl.hpp
@@ -0,0 +1,44 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SLICEIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SLICEIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace Aidge {
+
+/**
+ * @brief Implementation of the Slice operation.
+ *
+ * Since Slice operation is just backend-agnostic, its implementation is located in aidge_core.
+ */
+class Slice_OpImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructs a Slice_OpImpl object.
+     * @param[in] op The operator to be implemented.
+     * @param[in] backend The backend used for execution.
+     */
+    Slice_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Executes the forward pass for the Slice operation.
+     */
+    void forward() override;
+};
+
+}  // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SLICEIMPL_H_
diff --git a/include/aidge/backend/generic/operator/SplitImpl.hpp b/include/aidge/backend/generic/operator/SplitImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..6a793ab9586803ebc506df5ff727189d2ea33958
--- /dev/null
+++ b/include/aidge/backend/generic/operator/SplitImpl.hpp
@@ -0,0 +1,44 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_STACKIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_STACKIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace Aidge {
+
+/**
+ * @class  Implementation of the Split operation.
+ *
+ * Since Split operation is just backend-agnostic, its implementation is located in aidge_core.
+ */
+class Split_OpImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructor for the Split operator implementation.
+     * @param[in] op Operator to be implemented.
+     * @param[in] backend Name of the backend.
+     */
+    Split_OpImpl(const Operator& op, const std::string& backend = "") : OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Executes the forward pass for the Split operation.
+     */
+    void forward() override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_STACKIMPL_H_
diff --git a/include/aidge/backend/generic/operator/SqueezeImpl.hpp b/include/aidge/backend/generic/operator/SqueezeImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..a5b5bf4e29d2eb0e2b962b8a6e19b63f3fea48e0
--- /dev/null
+++ b/include/aidge/backend/generic/operator/SqueezeImpl.hpp
@@ -0,0 +1,40 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SQUEEZEIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SQUEEZEIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace Aidge {
+
+/**
+ * @brief implementation of the operator squeeze.
+ * @note Since this operator implementation is agnostic to the backend it is
+ * located here instead of in aidge_backend_cpu/cuda.
+ */
+class Squeeze_OpImpl : public OperatorImpl {
+public:
+    Squeeze_OpImpl(const Operator &op, const std::string &backend = "")
+        : OperatorImpl(op, backend) {}
+
+    void forward() override;
+
+    std::shared_ptr<ProdConso> getProdConso() const override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_SQUEEZEIMPL_H_
diff --git a/include/aidge/backend/generic/operator/StackImpl.hpp b/include/aidge/backend/generic/operator/StackImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..4d3ad6f4ad7979477fff70a805363dbe14e8ca54
--- /dev/null
+++ b/include/aidge/backend/generic/operator/StackImpl.hpp
@@ -0,0 +1,100 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_STACKIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_STACKIMPL_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+/**
+ * @class StackProdConso
+ * @brief Implements the producer-consumer principle for the `Stack` operator.
+ *
+ * The `StackProdConso` class defines the logic for managing data dependencies
+ * during the forward process of the `Stack` operator. It ensures proper allocation
+ * and consumption of resources required for stacking operations.
+ */
+class StackProdConso : public ProdConso {
+public:
+    /**
+     * @brief Constructor for the `StackProdConso` class.
+     * @param[in] op The operator instance for which producer-consumer relationships are managed.
+     */
+    StackProdConso(const Operator& op) : ProdConso(op) {}
+
+    /**
+     * @brief Compute the memory requirements for an output tensor.
+     * @param[in] outputIdx The index of the output tensor.
+     * @param[in] inputsSize A vector containing the dimensions of the input tensors.
+     * @return The memory required (`Elts_t`) for the specified output tensor.
+     *
+     * @details:
+     * - This method calculates how much memory is needed to store the stacked tensor.
+     * - Memory requirements depend on the number and size of the input tensors.
+     */
+    Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
+
+    /**
+     * @brief Reset producer-consumer relationships for the `Stack` operator.
+     *
+     * @details:
+     * - This method clears and reinitializes the producer-consumer relationships,
+     *   ensuring proper data flow and allocation for the stacking operation.
+     */
+    void resetConsummerProducer() override;
+};
+
+/**
+ * @class StackOpImpl
+ * @brief Backend-specific implementation of the `Stack` operator.
+ *
+ * The `StackOpImpl` class handles the execution of the `Stack` operation, including
+ * forward computation and backend-specific optimizations.
+ */
+class StackOpImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructs a StackOpImpl object.
+     * @param[in] op The operator to be implemented.
+     * @param[in] backend The backend used for execution.
+     */
+    StackOpImpl(const Operator& op, const std::string& backend = "") : OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Get the Producer Consumer object of the operator.
+     * @return A shared pointer to the ProdConso object.
+     */
+    std::shared_ptr<ProdConso> getProdConso() const override {
+        return std::make_shared<StackProdConso>(mOp);
+    }
+
+    /**
+     * @brief Executes the forward pass for the Stack operation.
+     */
+    void forward() override;
+
+    /**
+     * @brief Executes the backward pass for the Stack operation.
+     */
+    void backward() override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_STACKIMPL_H_
diff --git a/include/aidge/backend/generic/operator/UnfoldImpl.hpp b/include/aidge/backend/generic/operator/UnfoldImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..de8a4b5bdb387cd7fb8b99b38fc8a667e5a862a3
--- /dev/null
+++ b/include/aidge/backend/generic/operator/UnfoldImpl.hpp
@@ -0,0 +1,47 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_UNFOLDIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_UNFOLDIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace Aidge {
+
+/**
+ * @brief Implementation of the Unfold operator.
+ * @tparam DIM Number of dimensions in the operation.
+ */
+template <DimIdx_t DIM>
+class Unfold_OpImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructor for Unfold_OpImpl.
+     * @param[in] op The Operator instance.
+     * @param[in] backend The backend name (optional).
+     */
+    Unfold_OpImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Perform the forward operation for the unfold.
+     */
+    void forward() override;
+};
+
+extern template class Unfold_OpImpl<2>;
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_UNFOLDIMPL_H_
diff --git a/include/aidge/backend/generic/operator/UnsqueezeImpl.hpp b/include/aidge/backend/generic/operator/UnsqueezeImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..a9f7b15b806df0a43017434146db0eece218df18
--- /dev/null
+++ b/include/aidge/backend/generic/operator/UnsqueezeImpl.hpp
@@ -0,0 +1,39 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_UNSQUEEZEIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_UNSQUEEZEIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace Aidge {
+
+/**
+ * @brief implementation of the operator unsqueeze.
+ * @note Since this operator implementation is agnostic to the backend it is
+ * located here instead of in aidge_backend_cpu/cuda.
+ */
+class Unsqueeze_OpImpl : public OperatorImpl {
+public:
+    Unsqueeze_OpImpl(const Operator &op, const std::string &backend = "")
+        : OperatorImpl(op, backend) {}
+    void forward() override;
+
+    std::shared_ptr<ProdConso> getProdConso() const override;
+};
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_UNSQUEEZEIMPL_H_
diff --git a/include/aidge/operator/Flatten.hpp b/include/aidge/operator/Flatten.hpp
index 7493b25d76c0960996dc8ca15147351a06259c5a..11775aafbbb987ee3c6f922472992ec0789893bf 100644
--- a/include/aidge/operator/Flatten.hpp
+++ b/include/aidge/operator/Flatten.hpp
@@ -22,32 +22,6 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-
-/**
- * @brief Implementation of the Flatten operation.
- *
- * Since Flatten operation is just backend-agnostic, its implementation is located in aidge_core.
- */
-class Flatten_OpImpl : public OperatorImpl {
-public:
-    /**
-     * @brief Constructor for Flatten operator implementation.
-     * @param op Operator instance.
-     * @param backend Optional. Name of the backend.
-     */
-    Flatten_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
-
-    std::shared_ptr<ProdConso> getProdConso() const override {
-        return std::make_shared<ProdConso>(mOp, true);  // Flatten is an in-place operation!
-    }
-
-    /**
-     * @brief Compute the forward pass of the Flatten operation.
-     */
-    void forward() override;
-};
-}  // namespace Aidge
 
 #define LIST_FLATTEN_ATTR(X)  \
     X(Axis, "axis", std::int64_t)
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index 964e1b45de9e0690ea2109bd9bb0d42c82a073e9..8bd8239ec664a7bcb9d520c3dc37488f932437bb 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -24,27 +24,6 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-
-/**
- * @class Gather_OpImpl
- * @brief Backend implementation for the Gather operation.
- *
- * The Gather operation selects elements from the input tensor based on specified indices
- * and an axis, producing a tensor with a gathered shape.
- */
-class Gather_OpImpl : public OperatorImpl {
-public:
-    Gather_OpImpl(const Operator& op, const std::string& backend = "")
-        : OperatorImpl(op, backend) {}
-
-    /**
-     * @brief Execute the Gather operation.
-     */
-    void forward() override;
-};
-} // namespace Aidge
-
 
 #define LIST_GATHER_ATTR(X)  \
     X(Axis, "axis", std::int8_t),  \
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index b1849cbc5385687d554537eb9a46e0437241e65a..a0200db6f6001c23dda644e2513253687432463d 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -26,16 +26,6 @@
 #include "aidge/utils/ErrorHandling.hpp"
 
 namespace Aidge {
-class Identity_OpImpl : public OperatorImpl {
-public:
-    Identity_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
-
-    std::shared_ptr<ProdConso> getProdConso() const override {
-        return std::make_shared<ProdConso>(mOp, true);  // Identity is an in-place operation!
-    }
-
-    void forward() override;
-};
 
 /**
  * @brief Indentity_Op is an helper operator made to ease the declaration of MetaNodes.
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index e1eea4a284f494553708fa56f99477162eab93ab..49a0091a6360348ef8b32815cc90a0ce2b2f6ef3 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -17,104 +17,12 @@
 #include <vector>
 
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-/**
- * @class Memorize_ProdConso
- * @brief Implements the producer-consumer principle for the `Memorize` operator.
- *
- * The `Memorize_ProdConso` class defines the logic for managing data dependencies during
- * the forward process of the `Memorize` operator.
- *
- * This class ensures that:
- * - All data produced by the `Memorize` operator is properly consumed.
- * - All required outputs are correctly filled during the forward pass.
- *
- * It also calculates data and memory requirements specific to the `Memorize` operator.
- */
-class Memorize_ProdConso : public ProdConso {
-public:
-    /**
-     * @brief Constructor for the `Memorize_ProdConso` class.
-     * @param[in] op The operator instance for which producer-consumer relationships are managed.
-     *
-     * @details:
-     * - The provided `Operator` instance is used to initialize the base `ProdConso` class.
-     * - This operator will determine the specific requirements for data production
-     *   and consumption during the forward process.
-     */
-    Memorize_ProdConso(const Operator& op): ProdConso(op) {}
-
-    /**
-     * @brief Get the number of data elements required from an input tensor for forward computation.
-     * @param[in] inputIdx The index of the input tensor.
-     * @return The number of required elements (`Elts_t`).
-     *
-     * @details:
-     * - For each input tensor identified by `inputIdx`, this method calculates the
-     *   minimum amount of data needed by the `Memorize` operator to perform its forward step.
-     */
-    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
-
-    /**
-     * @brief Compute the memory requirements for an output tensor.
-     * @param[in] outputIdx The index of the output tensor.
-     * @param[in] inputsSize A vector containing the dimensions of the input tensors.
-     * @return The memory required (`Elts_t`) for the specified output tensor.
-     *
-     * @details:
-     * - This method evaluates how much memory is needed for the `outputIdx` tensor
-     *   based on the input tensor dimensions and the attributes of the `Memorize` operator.
-     * - Memory requirements are influenced by factors such as sequence length and
-     *   the forward step configuration.
-     */
-    Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
-
-    /**
-     * @brief Update the producer-consumer relationships for the `Memorize` operator.
-     * @details:
-     * - This method ensures that all data produced by the `Memorize` operator is
-     *   appropriately consumed by downstream operators in the computational graph.
-     * - It also verifies that all required outputs are filled during the forward pass,
-     *   maintaining consistency in the data flow.
-     * - This step is crucial for ensuring correctness in recurrent computations and
-     *   maintaining dependencies in the graph.
-     */
-    void updateConsummerProducer() override;
-};
-
-/**
- * @brief Implementation of the Memorize operation.
- *
- * Since Memorize operation is just backend-agnostic, its implementation is located in aidge_core.
- */
-class Memorize_OpImpl : public OperatorImpl {
-public:
-    /**
-     * @brief Constructs a Memorize_OpImpl object.
-     * @param[in] op The operator to be implemented.
-     * @param[in] backend The backend used for execution.
-     */
-    Memorize_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
-
-    /**
-     * @brief Get the Producer Consumer object of the operator.
-     * @return A shared pointer to the ProdConso object.
-     */
-    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_shared<Memorize_ProdConso>(mOp); };
-
-    /**
-     * @brief Executes the forward pass for the Memorize operation.
-     */
-    void forward() override;
-};
-} // namespace Aidge
 
 #define LIST_MEMORIZE_ATTR(X)                        \
     X(ScheduleStep, "schedule_step", std::uint32_t), \
diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp
index caea7a646f9ec013c29ad42e82f3f368caf553aa..b516ef54959c1ae22f607fddea0de94a7436b365 100644
--- a/include/aidge/operator/Move.hpp
+++ b/include/aidge/operator/Move.hpp
@@ -12,23 +12,16 @@
 #ifndef AIDGE_CORE_OPERATOR_MOVE_H_
 #define AIDGE_CORE_OPERATOR_MOVE_H_
 
-#include <cassert>
 #include <memory>
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-class Move_OpImpl : public OperatorImpl {
-public:
-    Move_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
-    void forward() override;
-};
 
 /**
  * @brief Description of a Move operation that copies the input Tensor to the output Tensor.
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index 9790f05e9375435f7adf2dfbf3fe0460487416fc..e8d4269f2f25c7020aae6ed7306fd338ab95770a 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -23,76 +23,6 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-
-/**
- * @class Pop_ProdConso
- * @brief Implements the producer-consumer principle for the `Pop` operator.
- *
- * The `Pop_ProdConso` class defines the logic for managing data dependencies during
- * the forward process of the `Pop` operator.
- *
- * This class ensures that:
- * - All data consumed by the `Pop` operator is correctly handled.
- * - The operator respects memory and data requirements during the forward computation.
- */
-class Pop_ProdConso : public ProdConso {
-public:
-    /**
-     * @brief Constructor for the `Pop_ProdConso` class.
-     * @param[in] op The operator instance for which producer-consumer relationships are managed.
-     *
-     * @details:
-     * - The provided `Operator` instance is used to initialize the base `ProdConso` class.
-     * - This operator determines specific requirements for data consumption during the forward process.
-     */
-    Pop_ProdConso(const Operator& op): ProdConso(op) {}
-
-    /**
-     * @brief Get the number of data elements required from an input tensor for forward computation.
-     * @param[in] inputIdx The index of the input tensor.
-     * @return The number of required elements (`Elts_t`).
-     *
-     * @details:
-     * - For each input tensor identified by `inputIdx`, this method calculates the
-     *   minimum amount of data needed by the `Pop` operator to perform its forward step.
-     */
-    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
-};
-
-/**
- * @class Pop_OpImpl
- * @brief Implementation of the `Pop` operation.
- *
- * The `Pop_OpImpl` class defines the backend-agnostic logic for executing
- * the forward pass of the `Pop` operator.
- */
-class Pop_OpImpl : public OperatorImpl {
-public:
-    /**
-     * @brief Constructs a `Pop_OpImpl` object.
-     * @param[in] op The operator to be implemented.
-     * @param[in] backend The backend used for execution (optional).
-     */
-    Pop_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
-
-    /**
-     * @brief Get the Producer Consumer object of the operator.
-     * @return A shared pointer to the `Pop_ProdConso` object.
-     */
-    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_shared<Pop_ProdConso>(mOp); }
-
-    /**
-     * @brief Executes the forward pass for the `Pop` operation.
-     */
-    void forward() override;
-
-    /**
-     * @brief Executes the backward pass for the `Pop` operation.
-     */
-    void backward() override;
-};
-} //namespace Aidge
 
 #define LIST_POP_ATTR(X)  \
     X(ForwardStep, "forward_step", std::uint32_t),  \
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index c8ac87b6f28120d9a99b05212532b1ee25c14b6c..c93ef09c9dd35ca887b0b491bd8c1177dbbb35e1 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -22,33 +22,6 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-/**
- * @brief Implementation of the Reshape operator.
- * @note This operator implementation is agnostic to the backend and is located here instead of in aidge_backend.
- */
-class Reshape_OpImpl : public OperatorImpl {
-public:
-    /**
-     * @brief Constructor for Reshape_OpImpl.
-     * @param[in] op The Operator instance.
-     * @param[in] backend The backend name (optional).
-     */
-    Reshape_OpImpl(const Operator& op, const std::string& backend = "")
-        : OperatorImpl(op, backend) {}
-
-    std::shared_ptr<ProdConso> getProdConso() const override {
-        return std::make_shared<ProdConso>(mOp, true);  // Reshape is an in-place operation!
-    }
-
-    /**
-     * @brief Perform the forward operation for the reshape.
-     */
-    void forward() override;
-    void backward() override;
-};
-} // namespace Aidge
-
 
 #define LIST_RESHAPE_ATTR(X)  \
     X(Shape, "shape", std::vector<std::int64_t>),  \
diff --git a/include/aidge/operator/Select.hpp b/include/aidge/operator/Select.hpp
index 4dcace84edcbdde92138f4a65aa3e44b4b0a9a54..cd0a56bb905ff8f2a626f0b735cde73c266c7738 100644
--- a/include/aidge/operator/Select.hpp
+++ b/include/aidge/operator/Select.hpp
@@ -23,29 +23,9 @@
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-/**
- * @brief Implementation of the Select operator.
- * @note This operator implementation is agnostic to the backend and is located here instead of in aidge_backend.
- */
-class Select_OpImpl : public OperatorImpl {
-public:
-    /**
-     * @brief Constructor for Select_OpImpl.
-     * @param[in] op The Operator instance.
-     * @param[in] backend The backend name (optional).
-     */
-    Select_OpImpl(const Operator& op, const std::string& backend = "")
-        : OperatorImpl(op, backend) {}
-
-    /**
-     * @brief Perform the forward operation for the reshape.
-     */
-    void forward() override;
-    void backward() override;
-};
 
 /**
- * @brief 
+ * @brief
  * @see OperatorTensor
  * @see Registrable
  */
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index 290d95eefd7972dad3d0ed05a01eb7105f5f9a62..4028c4041584833f14a4fa4db0f944dca2c2f035 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -24,30 +24,6 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-
-/**
- * @class Shape_OpImpl
- * @brief Backend-agnostic implementation of the Shape operator.
- *
- * This implementation is responsible for extracting and returning the shape
- * of the input tensor. Specific backend functionality can extend this.
- */
-class Shape_OpImpl : public OperatorImpl {
-public:
-    /**
-     * @brief Constructor for the Shape_OpImpl class.
-     * @param[in] op The Operator instance.
-     * @param[in] backend The backend name (optional).
-     */
-    Shape_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
-
-    /**
-     * @brief Perform the forward operation to compute the shape of the tensor.
-     */
-    void forward() override;
-};
-}
 
 #define LIST_SHAPE_ATTR(X) \
     X(Start, "start", std::int64_t), \
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index b425fe75208b37105ce6baadd4f2ff63f94f2f3c..434fb8ab96bc6bb681aa27c44b7ff3f4c63e273d 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -23,28 +23,6 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-
-/**
- * @brief Implementation of the Slice operation.
- *
- * Since Slice operation is just backend-agnostic, its implementation is located in aidge_core.
- */
-class Slice_OpImpl : public OperatorImpl {
-public:
-    /**
-     * @brief Constructs a Slice_OpImpl object.
-     * @param[in] op The operator to be implemented.
-     * @param[in] backend The backend used for execution.
-     */
-    Slice_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
-
-    /**
-     * @brief Executes the forward pass for the Slice operation.
-     */
-    void forward() override;
-};
-}  // namespace Aidge
 
 #define LIST_SLICE_ATTR(X) \
     X(Starts, "starts", std::vector<std::int64_t>), \
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index 038879f05dfc57f6451d0c490ec52e8283a1b93f..e9e43a350eadac3bd15bf2afdcec4370b697e55a 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -23,28 +23,6 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-
-/**
- * @class  Implementation of the Split operation.
- *
- * Since Split operation is just backend-agnostic, its implementation is located in aidge_core.
- */
-class Split_OpImpl : public OperatorImpl {
-public:
-    /**
-     * @brief Constructor for the Split operator implementation.
-     * @param[in] op Operator to be implemented.
-     * @param[in] backend Name of the backend.
-     */
-    Split_OpImpl(const Operator& op, const std::string& backend = "") : OperatorImpl(op, backend) {}
-
-    /**
-     * @brief Executes the forward pass for the Split operation.
-     */
-    void forward() override;
-};
-} // naemspace Aidge
 
 #define LIST_SPLIT_ATTR(X) \
     X(Axis, "axis", std::int8_t), \
diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp
index ed0f6d36674300da200db958b418ee6ea4762b05..03db92a844594bc45af65cc3c77e7956a38e2cad 100644
--- a/include/aidge/operator/Squeeze.hpp
+++ b/include/aidge/operator/Squeeze.hpp
@@ -13,9 +13,7 @@
 #define AIDGE_CORE_OPERATOR_SQUEEZE_H_
 
 #include <cstdint>
-#include <cstdlib>
 #include <functional>
-#include <limits>
 #include <memory>
 #include <string>
 #include <vector>
@@ -23,29 +21,10 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-/**
- * @brief implementation of the operator squeeze.
- * @note Since this operator implementation is agnostic to the backend it is
- * located here instead of in aidge_backend_cpu/cuda.
- */
-class Squeeze_OpImpl : public OperatorImpl {
-public:
-  Squeeze_OpImpl(const Operator &op, const std::string &backend = "")
-      : OperatorImpl(op, backend) {}
-
-  std::shared_ptr<ProdConso> getProdConso() const override {
-      return std::make_shared<ProdConso>(mOp, true);  // Squeeze is an in-place operation!
-  }
-
-  void forward() override;
-};
-} // namespace Aidge
 
 #define LIST_SQUEEZE_ATTR(X) \
     X(Axes, "axes", std::vector<std::int8_t>)
@@ -99,7 +78,7 @@ public:
       Type; // name of the type of the operation (Here "Squeeze")
 
 private:
-  using Attributes_ = StaticAttributes<SqueezeAttr, std::vector<int8_t>>;
+  using Attributes_ = StaticAttributes<SqueezeAttr, std::vector<std::int8_t>>;
   template <SqueezeAttr e> using attr = typename Attributes_::template attr<e>;
   const std::shared_ptr<Attributes_> mAttributes;
 
@@ -108,13 +87,7 @@ public:
    * @brief constructor for Squeeze op
    * @param[in] axes around which perform the operation
    */
-  Squeeze_Op(const std::vector<int8_t> &axes = {})
-      : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData},
-                       1),
-        mAttributes(
-            std::make_shared<Attributes_>(attr<SqueezeAttr::Axes>(axes))) {
-    mImpl = std::make_shared<Squeeze_OpImpl>(*this);
-  }
+  Squeeze_Op(const std::vector<std::int8_t> &axes = {});
 
   /**
    * @brief Copy-constructor. Copy the operator attributes and its output
@@ -122,14 +95,7 @@ public:
    * associated).
    * @param op Operator to copy.
    */
-  Squeeze_Op(const Squeeze_Op &op)
-      : OperatorTensor(op), mAttributes(op.mAttributes) {
-    if (!op.backend().empty()) {
-      SET_IMPL_MACRO(Squeeze_Op, *this, op.backend());
-    } else {
-      mImpl = std::make_shared<Squeeze_OpImpl>(*this);
-    }
-  }
+  Squeeze_Op(const Squeeze_Op &op);
 
   /**
    * @brief Clone the operator using its copy-constructor.
@@ -153,36 +119,34 @@ public:
     return mAttributes;
   }
 
-  /**
-   * @brief axes to squeeze, if left empty all 1 sized
-   * dimensions will be removed.
-   */
-  inline std::vector<int8_t> &axes() const noexcept {
-    return mAttributes->template getAttr<SqueezeAttr::Axes>();
-  }
+    /**
+     * @brief axes to squeeze, if left empty all 1 sized
+     * dimensions will be removed.
+     */
+    inline std::vector<std::int8_t> &axes() const noexcept {
+        return mAttributes->template getAttr<SqueezeAttr::Axes>();
+    }
 
-  static const std::vector<std::string> getInputsName() {
-    return {"data_input", "axes_to_squeeze"};
-  }
-  static const std::vector<std::string> getOutputsName() {
-    return {"squeezed"};
-  }
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input", "axes_to_squeeze"};
+    }
+    static const std::vector<std::string> getOutputsName() {
+        return {"squeezed"};
+    }
 
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::SqueezeAttr>::data;
-	}
+    /**
+     * @brief Retrieves the names of the attributes for the operator.
+     * @return A vector containing the attributes name.
+     */
+    static constexpr const char* const* attributesName(){
+        return EnumStrings<Aidge::SqueezeAttr>::data;
+    }
 };
 
 // helper with C-style array instead of std::array for kernel_dims to allow
 // automatic template DIM deduction
-inline std::shared_ptr<Node> Squeeze(const std::vector<int8_t> axes = {},
-                                     const std::string &name = "") {
-  return std::make_shared<Node>(std::make_shared<Squeeze_Op>(axes), name);
-}
+std::shared_ptr<Node> Squeeze(const std::vector<std::int8_t> axes = {},
+                              const std::string &name = "");
 } // namespace Aidge
 
 #undef LIST_SQUEEZE_ATTR
diff --git a/include/aidge/operator/Stack.hpp b/include/aidge/operator/Stack.hpp
index 84341375649e6d8d4948283971e86042cc003fd4..e0d741226af66c0ed85323fe85ff5bf437003a5d 100644
--- a/include/aidge/operator/Stack.hpp
+++ b/include/aidge/operator/Stack.hpp
@@ -23,78 +23,6 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-/**
- * @class StackProdConso
- * @brief Implements the producer-consumer principle for the `Stack` operator.
- *
- * The `StackProdConso` class defines the logic for managing data dependencies
- * during the forward process of the `Stack` operator. It ensures proper allocation
- * and consumption of resources required for stacking operations.
- */
-class StackProdConso : public ProdConso {
-public:
-    /**
-     * @brief Constructor for the `StackProdConso` class.
-     * @param[in] op The operator instance for which producer-consumer relationships are managed.
-     */
-    StackProdConso(const Operator& op) : ProdConso(op) {}
-
-    /**
-     * @brief Compute the memory requirements for an output tensor.
-     * @param[in] outputIdx The index of the output tensor.
-     * @param[in] inputsSize A vector containing the dimensions of the input tensors.
-     * @return The memory required (`Elts_t`) for the specified output tensor.
-     *
-     * @details:
-     * - This method calculates how much memory is needed to store the stacked tensor.
-     * - Memory requirements depend on the number and size of the input tensors.
-     */
-    Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
-
-    /**
-     * @brief Reset producer-consumer relationships for the `Stack` operator.
-     *
-     * @details:
-     * - This method clears and reinitializes the producer-consumer relationships,
-     *   ensuring proper data flow and allocation for the stacking operation.
-     */
-    void resetConsummerProducer() override;
-};
-
-/**
- * @class StackOpImpl
- * @brief Backend-specific implementation of the `Stack` operator.
- *
- * The `StackOpImpl` class handles the execution of the `Stack` operation, including
- * forward computation and backend-specific optimizations.
- */
-class StackOpImpl : public OperatorImpl {
-public:
-    /**
-     * @brief Constructs a StackOpImpl object.
-     * @param[in] op The operator to be implemented.
-     * @param[in] backend The backend used for execution.
-     */
-    StackOpImpl(const Operator& op, const std::string& backend = "") : OperatorImpl(op, backend) {}
-
-    /**
-     * @brief Get the Producer Consumer object of the operator.
-     * @return A shared pointer to the ProdConso object.
-     */
-    std::shared_ptr<ProdConso> getProdConso() const override {
-        return std::make_shared<StackProdConso>(mOp);
-    }
-
-    /**
-     * @brief Executes the forward pass for the Stack operation.
-     */
-    void forward() override;
-
-    /**
-     * @brief Executes the backward pass for the Stack operation.
-     */
-    void backward() override;
-};
 
 #define LIST_STACK_ATTR(X)  \
     X(ForwardStep, "forward_step", std::uint32_t), \
@@ -151,7 +79,7 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-    static const std::string s_type;
+    static const std::string Type;
 
     /**
      * @brief Constructs a new Stack Operator.
diff --git a/include/aidge/operator/Unfold.hpp b/include/aidge/operator/Unfold.hpp
index fe85f9d5e999ab2e6b6a0ae65f3d8ef43cdea0b3..664dafc93b570baf4b24216a4818e6716065330c 100644
--- a/include/aidge/operator/Unfold.hpp
+++ b/include/aidge/operator/Unfold.hpp
@@ -27,28 +27,6 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-/**
- * @brief Implementation of the Unfold operator.
- * @tparam DIM Number of dimensions in the operation.
- */
-template <DimIdx_t DIM>
-class Unfold_OpImpl : public OperatorImpl {
-public:
-    /**
-     * @brief Constructor for Unfold_OpImpl.
-     * @param[in] op The Operator instance.
-     * @param[in] backend The backend name (optional).
-     */
-    Unfold_OpImpl(const Operator& op, const std::string& backend = "")
-        : OperatorImpl(op, backend) {}
-
-    /**
-     * @brief Perform the forward operation for the unfold.
-     */
-    void forward() override;
-};
-} //namespace Aidge
 
 #define LIST_UNFOLD_ATTR(X)  \
     X(StrideDims, "stride_dims", sizeArr_t<DIM>),  \
diff --git a/include/aidge/operator/Unsqueeze.hpp b/include/aidge/operator/Unsqueeze.hpp
index 8d95f5cddc57a152a0fd6539f59aefddc546a483..27b3851fc7b741955889f7119bdf2b829918950a 100644
--- a/include/aidge/operator/Unsqueeze.hpp
+++ b/include/aidge/operator/Unsqueeze.hpp
@@ -20,29 +20,10 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-/**
- * @brief implementation of the operator unsqueeze.
- * @note Since this operator implementation is agnostic to the backend it is
- * located here instead of in aidge_backend_cpu/cuda.
- */
-class Unsqueeze_OpImpl : public OperatorImpl {
-public:
-  Unsqueeze_OpImpl(const Operator &op, const std::string &backend = "")
-      : OperatorImpl(op, backend) {}
-
-  std::shared_ptr<ProdConso> getProdConso() const override {
-      return std::make_shared<ProdConso>(mOp, true);  // Unsqueeze is an in-place operation!
-  }
-
-  void forward() override;
-};
-}  // namespace Aidge
 
 #define LIST_UNSQUEEZE_ATTR(X)  \
     X(Axes, "axes", std::vector<std::int8_t>)
@@ -87,8 +68,7 @@ class Unsqueeze_Op
                          std::function<std::shared_ptr<OperatorImpl>(const Unsqueeze_Op &)>> {
 
 public:
-  static const std::string
-      Type; // name of the type of the operation (Here "Unsqueeze")
+  static const std::string Type;
 
 private:
   using Attributes_ = StaticAttributes<UnsqueezeAttr, GENERATE_LIST_ATTR_TYPE(LIST_UNSQUEEZE_ATTR)>;
@@ -97,20 +77,13 @@ private:
   const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-  Unsqueeze_Op() =
-      delete; // no default constructor since this class has attributes
+  Unsqueeze_Op() = delete;
 
   /**
    * @brief constructor for Unsqueeze op
    * @param[in] axis around which perform the operation
    */
-  Unsqueeze_Op(const std::vector<int8_t> &axes)
-      : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData},
-                       1),
-        mAttributes(
-            std::make_shared<Attributes_>(attr<UnsqueezeAttr::Axes>(axes))) {
-    mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
-  }
+  Unsqueeze_Op(const std::vector<int8_t> &axes);
 
   /**
    * @brief Copy-constructor. Copy the operator attributes and its output
@@ -118,14 +91,7 @@ public:
    * associated).
    * @param op Operator to copy.
    */
-  Unsqueeze_Op(const Unsqueeze_Op &op)
-      : OperatorTensor(op), mAttributes(op.mAttributes) {
-    if (!op.backend().empty()) {
-      SET_IMPL_MACRO(Unsqueeze_Op, *this, op.backend());
-    } else {
-      mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
-    }
-  }
+  Unsqueeze_Op(const Unsqueeze_Op &op);
 
   /**
    * @brief Clone the operator using its copy-constructor.
@@ -176,10 +142,8 @@ public:
 
 // helper with C-style array instead of std::array for kernel_dims to allow
 // automatic template DIM deduction
-inline std::shared_ptr<Node> Unsqueeze(const std::vector<int8_t> &axes = {},
-                                       const std::string &name = "") {
-  return std::make_shared<Node>(std::make_shared<Unsqueeze_Op>(axes), name);
-}
+std::shared_ptr<Node> Unsqueeze(const std::vector<int8_t> &axes = {},
+                                       const std::string &name = "");
 } // namespace Aidge
 
 #undef LIST_UNSQUEEZE_ATTR
diff --git a/src/backend/generic/operator/FlattenImpl.cpp b/src/backend/generic/operator/FlattenImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0ed74f85c789f35bf7f70686a53313ebf2d70fc0
--- /dev/null
+++ b/src/backend/generic/operator/FlattenImpl.cpp
@@ -0,0 +1,31 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/FlattenImpl.hpp"
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Flatten.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace Aidge {
+
+void Flatten_OpImpl::forward() {
+    const Flatten_Op& op = dynamic_cast<const Flatten_Op&>(mOp);
+    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
+}
+
+std::shared_ptr<ProdConso> Flatten_OpImpl::getProdConso() const {
+    return std::make_shared<ProdConso>(mOp, true);  // Flatten is an in-place operation!
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/GatherImpl.cpp b/src/backend/generic/operator/GatherImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..cbe9fdba5f2827ca70effd423f37dbce2e7b93ff
--- /dev/null
+++ b/src/backend/generic/operator/GatherImpl.cpp
@@ -0,0 +1,52 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/GatherImpl.hpp"
+
+#include <array>
+#include <cstddef>  // std::size_t
+#include <vector>
+
+#include "aidge/operator/Gather.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+void Gather_OpImpl::forward() {
+    const Gather_Op& op = dynamic_cast<const Gather_Op&>(mOp);
+
+    const std::size_t axisIdx = static_cast<std::size_t>(op.axis()) + (op.axis() >= 0 ? 0 : op.getInput(0)->dims().size());
+
+    std::size_t postAxisElems = 1;
+    for (std::size_t i = axisIdx + 1; i < op.getInput(0)->dims().size(); ++i) {
+        postAxisElems *= op.getInput(0)->dims()[i];
+    }
+    std::size_t preAxisElems = 1;
+    for (std::size_t i = 0; i < axisIdx; ++i) {
+        preAxisElems *= op.getInput(0)->dims()[i];
+    }
+
+    std::size_t outputOffset = 0;
+    for (std::size_t i=0; i<preAxisElems; ++i)
+    {
+        for(std::size_t j = 0; j < op.indices().size(); ++j)
+        {
+            const std::size_t idx = op.indices()[j] >= 0 ?
+                                        static_cast<std::size_t>(op.indices()[j]) :
+                                        static_cast<std::size_t>(op.indices()[j] + static_cast<int>(op.getInput(0)->dims()[axisIdx]));
+            op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(i * postAxisElems * op.getInput(0)->dims()[axisIdx] + idx * postAxisElems), postAxisElems, outputOffset);
+            outputOffset += postAxisElems;
+        }
+    }
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/IdentityImpl.cpp b/src/backend/generic/operator/IdentityImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d1be34799744f2962bbfbc2e5a581b6592d1cf4c
--- /dev/null
+++ b/src/backend/generic/operator/IdentityImpl.cpp
@@ -0,0 +1,34 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/IdentityImpl.hpp"
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Identity.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace Aidge {
+
+void Identity_OpImpl::forward() {
+    const Identity_Op& op = dynamic_cast<const Identity_Op&>(mOp);
+    op.getOutput(0)->setBackend(op.getInput(0)->backend(), op.getInput(0)->device());
+    op.getOutput(0)->setDataType(op.getInput(0)->dataType());
+    op.getOutput(0)->setDataFormat(op.getInput(0)->dataFormat());
+    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
+}
+
+std::shared_ptr<ProdConso> Identity_OpImpl::getProdConso() const {
+    return std::make_shared<ProdConso>(mOp, true);  // Identity is an in-place operation!
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/MemorizeImpl.cpp b/src/backend/generic/operator/MemorizeImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..be13f0f6b8dfd9852f10e5260955e7b1f48fb144
--- /dev/null
+++ b/src/backend/generic/operator/MemorizeImpl.cpp
@@ -0,0 +1,79 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/MemorizeImpl.hpp"
+
+#include <memory>
+#include <vector>
+
+#include "aidge/data/Elts.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Memorize.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+Aidge::Elts_t Aidge::Memorize_ProdConso::getNbRequiredData(
+    Aidge::IOIndex_t inputIdx) const
+{
+    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
+
+    if (op.scheduleStep() == 0 && inputIdx == 0) {
+        // No data input is required for the initial step.
+        // Initialization data is required however.
+        return Elts_t::NoneElts();
+    }
+    else if (op.scheduleStep() > 0 && inputIdx == 1) {
+        // No initialization data is required after the initial step.
+        return Elts_t::NoneElts();
+    }
+    else {
+        return ProdConso::getNbRequiredData(inputIdx);
+    }
+}
+
+Aidge::Elts_t Aidge::Memorize_ProdConso::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
+                                                            const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
+    assert(mOp.getRawOutput(outputIdx) && "requires valid output");
+
+    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
+
+    if ((op.endStep() > 0) && (outputIdx == 1) && (op.scheduleStep() >= op.endStep())) {
+        return Elts_t::NoneElts();
+    }
+    else {
+        return Elts_t::DataElts(op.getOutput(outputIdx)->size());
+    }
+}
+
+void Aidge::Memorize_ProdConso::updateConsummerProducer() {
+    ProdConso::updateConsummerProducer();
+
+    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
+    AIDGE_ASSERT(op.endStep() == 0 || op.scheduleStep() <= op.endStep(), "cannot update consumer producer anymore, number of cycles exceeded");
+}
+
+void Aidge::Memorize_OpImpl::forward() {
+    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
+
+    AIDGE_ASSERT((op.endStep() == 0) || (op.forwardStep() <= op.endStep()), "cannot forward anymore, number of cycles exceeded");
+
+    if (op.forwardStep() == 0) {
+        op.getOutput(0)->getImpl()->copy(op.getInput(1)->getImpl()->rawPtr(), op.getInput(1)->size());
+    }
+    else {
+        op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
+    }
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/MoveImpl.cpp b/src/backend/generic/operator/MoveImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2986d94b0bd9cb7856e2d55e665ac6c0a4ee00ae
--- /dev/null
+++ b/src/backend/generic/operator/MoveImpl.cpp
@@ -0,0 +1,26 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/MoveImpl.hpp"
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Move.hpp"
+
+namespace Aidge {
+
+void Aidge::Move_OpImpl::forward() {
+    const Move_Op& op = dynamic_cast<const Move_Op&>(mOp);
+    op.getOutput(0)->copyFrom(*(op.getInput(0)));
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/PopImpl.cpp b/src/backend/generic/operator/PopImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2596b3dda97218ece8561b4d373de8c1c300a9e1
--- /dev/null
+++ b/src/backend/generic/operator/PopImpl.cpp
@@ -0,0 +1,49 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/PopImpl.hpp"
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Pop.hpp"
+
+namespace Aidge {
+
+Elts_t Aidge::Pop_ProdConso::getNbRequiredData(const IOIndex_t inputIdx) const {
+    assert(mOp.getRawInput(inputIdx) && "requires valid input");
+
+    const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
+    AIDGE_ASSERT(!op.getInput(inputIdx)->empty(), "Pop operator requires known, non-empty, input dims for scheduling. You might have an unresolved data dependency upstream in the computing graph.");
+    return Elts_t::DataElts(op.getInput(inputIdx)->size()
+        / op.getInput(inputIdx)->dims()[0]);
+}
+
+void Aidge::Pop_OpImpl::forward() {
+    const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
+
+    assert(op.getInput(0) && "missing input #0");
+    *op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()}).clone();
+}
+
+void Aidge::Pop_OpImpl::backward() {
+    const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
+
+    auto outputGrad = op.getOutput(0)->grad();
+    auto inputGrad = op.getInput(0)->grad();
+
+    inputGrad->getImpl()->copy(
+        outputGrad->getImpl()->rawPtr(),
+        outputGrad->size(),
+        (op.backwardStep()-1) * outputGrad->size());
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/ReshapeImpl.cpp b/src/backend/generic/operator/ReshapeImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0ccef525389108e55f047614c0e2c073a923a086
--- /dev/null
+++ b/src/backend/generic/operator/ReshapeImpl.cpp
@@ -0,0 +1,41 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/ReshapeImpl.hpp"
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Reshape.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+
+namespace Aidge {
+
+void Reshape_OpImpl::forward() {
+    const Reshape_Op& op = dynamic_cast<const Reshape_Op&>(mOp);
+    AIDGE_ASSERT(op.getInput(0), "missing input#0");
+    // const auto& input = op.getInput(0)->refCastFrom(mInputFallback, *op.getOutput(0));
+    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
+}
+
+void Reshape_OpImpl::backward() {
+    const Reshape_Op& op = dynamic_cast<const Reshape_Op&>(mOp);
+    AIDGE_ASSERT(op.getOutput(0)->grad(), "missing gradient for output#0");
+    // const auto& output_grad = op.getOutput(0)->grad()->refCastFrom(mOutputGradFallback, *op.getOutput(0)->grad());
+    op.getInput(0)->grad()->getImpl()->copy(op.getOutput(0)->grad()->getImpl()->rawPtr(), op.getOutput(0)->size());
+}
+
+std::shared_ptr<ProdConso> Reshape_OpImpl::getProdConso() const {
+    return std::make_shared<ProdConso>(mOp, true);  // Reshape is an in-place operation!
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/SelectImpl.cpp b/src/backend/generic/operator/SelectImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9746223d390a0f29b719f5f19b4877aadff5eef6
--- /dev/null
+++ b/src/backend/generic/operator/SelectImpl.cpp
@@ -0,0 +1,46 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/SelectImpl.hpp"
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Select.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+
+namespace Aidge {
+
+void Select_OpImpl::forward() {
+    const Select_Op& op = dynamic_cast<const Select_Op&>(mOp);
+    AIDGE_ASSERT(op.getInput(0)->size() > 0, "Select input is empty!");
+
+    std::shared_ptr<Tensor> selectFallback;
+    const auto& select = op.getInput(0)->refCastFrom(selectFallback, DataType::Int32, "cpu");
+    const auto selectVal = select.get<int32_t>(0);
+    AIDGE_ASSERT(selectVal >= 0 && selectVal < op.nbInputs() - 1, "Select input out of range. Expected value in range [0, {}], got {}", op.nbInputs() - 2, selectVal);
+
+    op.getOutput(0)->getImpl()->copy(op.getInput(selectVal + 1)->getImpl()->rawPtr(), op.getInput(selectVal + 1)->size());
+}
+
+void Select_OpImpl::backward() {
+    const Select_Op& op = dynamic_cast<const Select_Op&>(mOp);
+    AIDGE_ASSERT(op.getInput(0)->size() > 0, "Select input is empty!");
+
+    std::shared_ptr<Tensor> selectFallback;
+    const auto& select = op.getInput(0)->refCastFrom(selectFallback, DataType::Int32, "cpu");
+    const auto selectVal = select.get<int32_t>(0);
+    AIDGE_ASSERT(selectVal >= 0 && selectVal < op.nbInputs() - 1, "Select input out of range. Expected value in range [0, {}], got {}", op.nbInputs() - 2, selectVal);
+
+    op.getInput(selectVal + 1)->grad()->getImpl()->copy(op.getOutput(0)->grad()->getImpl()->rawPtr(), op.getOutput(0)->size());
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/ShapeImpl.cpp b/src/backend/generic/operator/ShapeImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b14c0abef27493b950231f8d3789cfe99c2c603d
--- /dev/null
+++ b/src/backend/generic/operator/ShapeImpl.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/ShapeImpl.hpp"
+
+#include "aidge/operator/Shape.hpp"
+#include "aidge/data/Tensor.hpp"
+
+namespace Aidge {
+
+void Shape_OpImpl::forward() {
+    // Output is already valid after forwardDims()
+    // But it may be with the wrong device (default cpu)
+    // This can happen if forwardDims is called before setBackend
+    const Shape_Op& op = dynamic_cast<const Shape_Op&>(mOp);
+    op.getOutput(0)->setBackend(op.getInput(0)->backend(), op.getInput(0)->device());
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/SliceImpl.cpp b/src/backend/generic/operator/SliceImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..4a4ea27eeb61237554aa18078a5158426b89beb0
--- /dev/null
+++ b/src/backend/generic/operator/SliceImpl.cpp
@@ -0,0 +1,104 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/SliceImpl.hpp"
+
+#include <array>
+#include <cstddef>  // std::size_t
+#include <cstdint>  // std::int32_t
+#include <vector>
+
+#include "aidge/operator/Slice.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+// Helper function to calculate the linear index for multi-dimensional data
+static std::size_t getLinearIndex(const std::vector<std::size_t>& dims, const std::vector<std::size_t>& indices) {
+    size_t linearIndex = 0;
+    size_t stride = 1;
+    for (int i = dims.size() - 1; i >= 0; --i) {
+        linearIndex += indices[i] * stride;
+        stride *= dims[i];
+    }
+    return linearIndex;
+}
+
+void Slice_OpImpl::forward() {
+    const Slice_Op& op = dynamic_cast<const Slice_Op&>(mOp);
+
+    if (!op.getInput(0)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", op.Type);
+    }
+    AIDGE_ASSERT((op.axes().size() == op.ends().size()) &&
+                    (op.axes().size() == op.starts().size()),
+                    "Starts, Ends and Axes arguments should be the same size.");
+
+    const std::vector<std::size_t> inputDims = op.getInput(0)->dims();
+    std::vector<std::size_t> indices(inputDims.size(), 0); // Initialize indices for each dimension
+
+    // Create an array of ranges for each axis
+    std::vector<std::vector<int>> ranges(inputDims.size());
+
+    // Generate ranges dynamically for each dimension
+    for (std::size_t axisIdx = 0; axisIdx < inputDims.size(); ++axisIdx) {
+        if (std::find(op.axes().begin(), op.axes().end(), axisIdx) != op.axes().end()) {
+            // This axis is being sliced
+            int start = op.starts()[axisIdx];
+            int end = op.ends()[axisIdx];
+            int step = op.steps()[axisIdx];
+
+            start = start >= 0 ? start: start + inputDims[axisIdx];
+            start = std::max(0, std::min(start, static_cast<int>(inputDims[axisIdx])));
+            end = end >= 0 ? end: end + inputDims[axisIdx];
+            end = std::max(0, std::min(end, static_cast<int>(inputDims[axisIdx])));
+            // Generate the range of indices for this axis
+            for (int idx = start; (step > 0) ? (idx < end) : (idx > end); idx += step) {
+                ranges[axisIdx].push_back(idx);
+            }
+        } else {
+            // This axis is not being sliced, keep its full range (just one index in the range)
+            ranges[axisIdx].push_back(0);
+        }
+    }
+
+    // Use iterative stack to handle all dimensions dynamically
+    std::vector<std::size_t> currentIndex(inputDims.size(), 0); // Track current index in each dimension
+    std::vector<std::size_t> stackPointer(inputDims.size(), 0); // Pointers to ranges for each dimension
+    std::size_t dim = 0; // Start at the first dimension
+    std::size_t offset = 0; // Offset in the output tensor
+
+    while (dim < inputDims.size()) {
+        if (stackPointer[dim] < ranges[dim].size()) {
+            // Set the current index for this dimension
+            currentIndex[dim] = ranges[dim][stackPointer[dim]];
+            stackPointer[dim]++;
+
+            if (dim == inputDims.size() - 1) {
+                // We've reached the last dimension, process this index combination
+                std::size_t linearIndex = getLinearIndex(inputDims, currentIndex);
+                op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(linearIndex), 1, offset);
+                offset++;
+            } else {
+                // Move to the next dimension
+                dim++;
+            }
+        } else {
+            // Reset this dimension and move back to the previous one
+            stackPointer[dim] = 0;
+            dim--;
+        }
+    }
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/SplitImpl.cpp b/src/backend/generic/operator/SplitImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..4ed9d28d125fba5a2df36da6deec0bf05e07f675
--- /dev/null
+++ b/src/backend/generic/operator/SplitImpl.cpp
@@ -0,0 +1,51 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/SplitImpl.hpp"
+
+#include <functional>  // std::multiplies
+#include <memory>
+#include <numeric>     // std::accumulate
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Split.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+void Aidge::Split_OpImpl::forward() {
+    const Split_Op& op = dynamic_cast<const Split_Op&>(mOp);
+    const auto& axis = op.axis();
+    const auto& splits = op.split();
+    const auto& dims = op.getInput(0)->dims();
+
+    //Compute pre/post axis strides
+    const std::size_t stride_pre = std::accumulate(dims.cbegin(), dims.cbegin() + axis, 1, std::multiplies<std::size_t>());
+    const std::size_t stride_post = std::accumulate(dims.crbegin(), dims.crbegin() + dims.size() -1 - axis, 1, std::multiplies<std::size_t>());
+    for (auto i = 0; i < op.nbOutputs(); ++i)
+    {
+        DimSize_t chunkIdxOnAxis = std::accumulate(splits.cbegin(), splits.cbegin() + i, 0) * stride_post;
+        DimSize_t offset = 0;
+        for (std::size_t j = 0; j < stride_pre; ++j)
+        {
+            // Compute chunk position in input tensor
+            DimSize_t idx = j * stride_post * dims[axis] + chunkIdxOnAxis;
+            // Copy chunk in output
+            op.getOutput(i)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(idx),
+                                            splits[i] * stride_post, offset);
+            offset += splits[i] * stride_post;
+        }
+
+    }
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/SqueezeImpl.cpp b/src/backend/generic/operator/SqueezeImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..da285366cfe06fac479169f67018ebf1aef5354f
--- /dev/null
+++ b/src/backend/generic/operator/SqueezeImpl.cpp
@@ -0,0 +1,36 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/SqueezeImpl.hpp"
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Squeeze.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace Aidge {
+
+void Aidge::Squeeze_OpImpl::forward() {
+    const Squeeze_Op &op_ = static_cast<const Squeeze_Op &>(mOp);
+    // Check if input is provided
+    AIDGE_ASSERT(op_.getInput(0), "Squeeze : missing input 0");
+
+    op_.getOutput(0)->getImpl()->copy(op_.getInput(0)->getImpl()->rawPtr(),
+                                        op_.getInput(0)->size());
+}
+
+
+std::shared_ptr<ProdConso> Squeeze_OpImpl::getProdConso() const {
+    return std::make_shared<ProdConso>(mOp, true);  // Squeeze is an in-place operation!
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/StackImpl.cpp b/src/backend/generic/operator/StackImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1d22a50ddf7f9773370ea4ba86c720d040c81bb0
--- /dev/null
+++ b/src/backend/generic/operator/StackImpl.cpp
@@ -0,0 +1,72 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/StackImpl.hpp"
+
+#include <memory>
+#include <vector>
+
+#include "aidge/data/Elts.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Stack.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+// TODO: Check why getRequiredMemory is always called with empty vector as
+// inputSize
+Elts_t StackProdConso::getRequiredMemory(
+    const Aidge::IOIndex_t inputIdx,
+    const std::vector<DimSize_t> &/*inputsSize*/) const {
+    AIDGE_ASSERT(mOp.getRawInput(inputIdx), "requires valid input");
+
+    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
+    // The produced data after one forward pass is simply the input size,
+    // we do not produce the whole output tensor every time.
+    if (op.forwardStep() <= op.maxElements()) {
+        return Elts_t::DataElts(op.getInput(inputIdx)->size());
+    } else {
+        return Elts_t::NoneElts();
+    }
+}
+
+void StackProdConso::resetConsummerProducer() {
+    ProdConso::resetConsummerProducer();
+
+    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
+    op.forwardStep() = 0;
+}
+
+void StackOpImpl::forward() {
+    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
+    AIDGE_ASSERT(op.getInput(0), "missing input #0");
+    AIDGE_ASSERT((op.forwardStep() < op.maxElements()),
+                 "cannot forward anymore, maximum number of elements to stack "
+                 "exceeded");
+
+    op.getOutput(0)->getImpl()->copy(
+        op.getInput(0)->getImpl()->rawPtr(),
+        op.getInput(0)->size(),
+        op.forwardStep() * op.getInput(0)->size());
+}
+
+void StackOpImpl::backward() {
+    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
+    AIDGE_ASSERT(op.backwardStep() > 0, "Stack operator has not been run forward");
+
+    auto inputGrad = op.getInput(0)->grad();
+    auto outputGrad = op.getOutput(0)->grad();
+
+    *inputGrad = outputGrad->extract({op.backwardStep() -1 }).clone();
+}
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/UnfoldImpl.cpp b/src/backend/generic/operator/UnfoldImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d17016bb32ef3e42e8c19299f70d48221eee69a0
--- /dev/null
+++ b/src/backend/generic/operator/UnfoldImpl.cpp
@@ -0,0 +1,67 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/UnfoldImpl.hpp"
+
+#include <cmath>  // std::floor
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Unfold.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+template <DimIdx_t DIM>
+void Unfold_OpImpl<DIM>::forward() {
+    const Unfold_Op<DIM>& op = dynamic_cast<const Unfold_Op<DIM>&>(mOp);
+    const auto kernelDims = op.kernelDims();
+    const auto dilationDims = op.dilationDims();
+    const auto strideDims = op.strideDims();
+    const DimSize_t inHeight = op.getInput(0)->dims()[2];
+    const DimSize_t inWidth = op.getInput(0)->dims()[3];
+    const DimSize_t inChannels = op.getInput(0)->dims()[1];
+
+    const DimSize_t kernelExtentHeight = op.dilationDims()[0] *
+                                            (op.kernelDims()[0] - 1) + 1;
+    const DimSize_t outHeight = 1 + static_cast<DimSize_t>(
+                    std::floor(static_cast<float>(inHeight - kernelExtentHeight) /
+                            static_cast<float>(op.strideDims()[0])));
+    const DimSize_t kernelExtentWidth = op.dilationDims()[1] *
+                                            (op.kernelDims()[1] - 1) + 1;
+    const DimSize_t outWidth = 1 + static_cast<DimSize_t>(
+                    std::floor(static_cast<float>(inWidth - kernelExtentWidth) /
+                            static_cast<float>(op.strideDims()[1])));
+    const DimSize_t outChannels = op.getOutput(0)->dims()[1];
+
+    for (DimSize_t n = 0; n < op.getOutput(0)->dims()[0]; ++n) {
+        for (DimSize_t outC = 0; outC < outChannels; ++outC) {
+            const auto inOffsetW = outC % kernelDims[1];
+            const auto inOffsetH = (outC / kernelDims[1]) % kernelDims[0];
+            const auto inC = outC / kernelDims[0] / kernelDims[1];
+
+            for (DimSize_t outH = 0; outH < outHeight; ++outH) {
+                const auto inH = outH * strideDims[0] + inOffsetH * dilationDims[0];
+
+                for (DimSize_t outW = 0; outW < outWidth; ++outW) {
+                    const auto inW = outW * strideDims[1] + inOffsetW * dilationDims[1];
+
+                    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(((n * inChannels + inC) * inHeight + inH) * inWidth + inW), 1,
+                        ((n * outChannels + outC) * outHeight + outH) * outWidth + outW);
+                }
+            }
+        }
+    }
+}
+
+template class Unfold_OpImpl<2>;
+
+} // namespace Aidge
diff --git a/src/backend/generic/operator/UnsqueezeImpl.cpp b/src/backend/generic/operator/UnsqueezeImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9e8a5277429d2c9575cf30ce09c3977eba8fb7c0
--- /dev/null
+++ b/src/backend/generic/operator/UnsqueezeImpl.cpp
@@ -0,0 +1,34 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/UnsqueezeImpl.hpp"
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Unsqueeze.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace Aidge {
+
+void Unsqueeze_OpImpl::forward() {
+    const Unsqueeze_Op &op_ = static_cast<const Unsqueeze_Op &>(mOp);
+    // Check if input is provided
+    AIDGE_ASSERT(op_.getInput(0), "Unsqueeze : missing input 0");
+    op_.getOutput(0)->getImpl()->copy(op_.getInput(0)->getImpl()->rawPtr(),
+                                        op_.getInput(0)->size());
+}
+
+std::shared_ptr<ProdConso> Unsqueeze_OpImpl::getProdConso() const {
+    return std::make_shared<ProdConso>(mOp, true);  // Unsqueeze is an in-place operation!
+}
+
+} // namespace Aidge
diff --git a/src/operator/Flatten.cpp b/src/operator/Flatten.cpp
index c77adc37400b0f14faade331965f75ac76c8c7b9..e02c7abe6557469763e042044609fe8b59115a45 100644
--- a/src/operator/Flatten.cpp
+++ b/src/operator/Flatten.cpp
@@ -14,21 +14,15 @@
 #include <cstddef>    // std::size_t
 #include <cstdint>    // std::int64_t
 #include <memory>
-#include <stdexcept>  // std::runtime_error
 #include <string>
 #include <vector>
 
+#include "aidge/backend/generic/operator/FlattenImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-void Aidge::Flatten_OpImpl::forward() {
-    const Flatten_Op& op = dynamic_cast<const Flatten_Op&>(mOp);
-    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
-}
-
-//////////////////////////////////////////////////
 
 const std::string Aidge::Flatten_Op::Type = "Flatten";
 
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index 10e20046f0565d098275141e90e920ce78725e0f..a4cb4aab0f10cbb3b197e743a5b40208b4a0da94 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -14,6 +14,7 @@
 #include <string>
 #include <vector>
 
+#include "aidge/backend/generic/operator/GatherImpl.hpp"
 #include "aidge/operator/Gather.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
@@ -50,34 +51,6 @@ std::shared_ptr<Aidge::Operator> Aidge::Gather_Op::clone() const {
     return std::make_shared<Gather_Op>(*this);
 }
 
-void Aidge::Gather_OpImpl::forward() {
-    const Gather_Op& op = dynamic_cast<const Gather_Op&>(mOp);
-
-    const std::size_t axisIdx = static_cast<std::size_t>(op.axis()) + (op.axis() >= 0 ? 0 : op.getInput(0)->dims().size());
-
-    std::size_t postAxisElems = 1;
-    for (std::size_t i = axisIdx + 1; i < op.getInput(0)->dims().size(); ++i) {
-        postAxisElems *= op.getInput(0)->dims()[i];
-    }
-    std::size_t preAxisElems = 1;
-    for (std::size_t i = 0; i < axisIdx; ++i) {
-        preAxisElems *= op.getInput(0)->dims()[i];
-    }
-
-    std::size_t outputOffset = 0;
-    for (std::size_t i=0; i<preAxisElems; ++i)
-    {
-        for(std::size_t j = 0; j < op.indices().size(); ++j)
-        {
-            const std::size_t idx = op.indices()[j] >= 0 ?
-                                        static_cast<std::size_t>(op.indices()[j]) :
-                                        static_cast<std::size_t>(op.indices()[j] + static_cast<int>(op.getInput(0)->dims()[axisIdx]));
-            op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(i * postAxisElems * op.getInput(0)->dims()[axisIdx] + idx * postAxisElems), postAxisElems, outputOffset);
-            outputOffset += postAxisElems;
-        }
-    }
-}
-
 bool Aidge::Gather_Op::dimsForwarded() const {
     if (getInput(1) && !getInput(1)->undefined()) {
         // output dims are data dependent
diff --git a/src/operator/Identity.cpp b/src/operator/Identity.cpp
index d01d576782d43a45c3810ab9e0d6d4bd42030662..25bb5a5b63d76fba4effa8d8532a029fbc2d8cbc 100644
--- a/src/operator/Identity.cpp
+++ b/src/operator/Identity.cpp
@@ -9,19 +9,12 @@
  *
  ********************************************************************************/
 
-#include <string>
-
 #include "aidge/operator/Identity.hpp"
 
-void Aidge::Identity_OpImpl::forward() {
-    const Identity_Op& op = dynamic_cast<const Identity_Op&>(mOp);
-    op.getOutput(0)->setBackend(op.getInput(0)->backend(), op.getInput(0)->device());
-    op.getOutput(0)->setDataType(op.getInput(0)->dataType());
-    op.getOutput(0)->setDataFormat(op.getInput(0)->dataFormat());
-    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
-}
+#include <string>
+
+#include "aidge/backend/generic/operator/IdentityImpl.hpp"
 
-//////////////////////////////////////////////////
 
 const std::string Aidge::Identity_Op::Type = "Identity";
 
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index 05815f9291cb2673545e745bd558095cc1b9be41..9301b0af2b9649cc57329799f89412db94cbf338 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -15,63 +15,11 @@
 #include <string>
 #include <vector>
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/generic/operator/MemorizeImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Elts_t Aidge::Memorize_ProdConso::getNbRequiredData(
-    Aidge::IOIndex_t inputIdx) const
-{
-    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-
-    if (op.scheduleStep() == 0 && inputIdx == 0) {
-        // No data input is required for the initial step.
-        // Initialization data is required however.
-        return Elts_t::NoneElts();
-    }
-    else if (op.scheduleStep() > 0 && inputIdx == 1) {
-        // No initialization data is required after the initial step.
-        return Elts_t::NoneElts();
-    }
-    else {
-        return ProdConso::getNbRequiredData(inputIdx);
-    }
-}
-
-Aidge::Elts_t Aidge::Memorize_ProdConso::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
-                                                         const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
-    assert(mOp.getRawOutput(outputIdx) && "requires valid output");
-
-    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-
-    if ((op.endStep() > 0) && (outputIdx == 1) && (op.scheduleStep() >= op.endStep())) {
-        return Elts_t::NoneElts();
-    }
-    else {
-        return Elts_t::DataElts(op.getOutput(outputIdx)->size());
-    }
-}
-
-void Aidge::Memorize_ProdConso::updateConsummerProducer() {
-    ProdConso::updateConsummerProducer();
-
-    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-    AIDGE_ASSERT(op.endStep() == 0 || op.scheduleStep() <= op.endStep(), "cannot update consumer producer anymore, number of cycles exceeded");
-}
-
-void Aidge::Memorize_OpImpl::forward() {
-    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-
-    AIDGE_ASSERT((op.endStep() == 0) || (op.forwardStep() <= op.endStep()), "cannot forward anymore, number of cycles exceeded");
-
-    if (op.forwardStep() == 0) {
-        op.getOutput(0)->getImpl()->copy(op.getInput(1)->getImpl()->rawPtr(), op.getInput(1)->size());
-    }
-    else {
-        op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
-    }
-}
 
 const std::string Aidge::Memorize_Op::Type = "Memorize";
 
diff --git a/src/operator/Move.cpp b/src/operator/Move.cpp
index adabcd0d359927693965cec1987d2fad083328b9..a637f8331190b07e317706197c6a199cdc491e9a 100644
--- a/src/operator/Move.cpp
+++ b/src/operator/Move.cpp
@@ -9,13 +9,12 @@
  *
  ********************************************************************************/
 
-#include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Move.hpp"
 
-void Aidge::Move_OpImpl::forward() {
-    const Move_Op& op = dynamic_cast<const Move_Op&>(mOp);
-    op.getOutput(0)->copyFrom(*(op.getInput(0)));
-}
+#include <string>
+
+#include "aidge/backend/generic/operator/MoveImpl.hpp"
+#include "aidge/data/Tensor.hpp"
 
 const std::string Aidge::Move_Op::Type = "Move";
 
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index c93078ed159257ee52602dc2fdf675b24af05155..e3a41bc7aabb44eea3668ef301cc008ed02e40bb 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -15,41 +15,13 @@
 #include <stdexcept>
 #include <string>
 
+#include "aidge/backend/generic/operator/PopImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Elts_t Aidge::Pop_ProdConso::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
-    assert(mOp.getRawInput(inputIdx) && "requires valid input");
-
-    const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
-    AIDGE_ASSERT(!op.getInput(inputIdx)->empty(), "Pop operator requires known, non-empty, input dims for scheduling. You might have an unresolved data dependency upstream in the computing graph.");
-    return Elts_t::DataElts(op.getInput(inputIdx)->size()
-        / op.getInput(inputIdx)->dims()[0]);
-}
-
-void Aidge::Pop_OpImpl::forward() {
-    const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
-
-    assert(op.getInput(0) && "missing input #0");
-    *op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()}).clone();
-}
-
-void Aidge::Pop_OpImpl::backward() {
-    const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
-
-    auto outputGrad = op.getOutput(0)->grad();
-    auto inputGrad = op.getInput(0)->grad();
-
-    inputGrad->getImpl()->copy(
-        outputGrad->getImpl()->rawPtr(),
-        outputGrad->size(),
-        (op.backwardStep()-1) * outputGrad->size());
-}
-
-//////////////////////////////////////////////////////////
 
 const std::string Aidge::Pop_Op::Type = "Pop";
 
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index 8b42cb51440cbc61bf8d4dbf69524adb15dbeb44..b12fd486d16beb0a676e38cfdf808fa71996a5ba 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -14,30 +14,15 @@
 #include <cstddef>    // std::size_t
 #include <cstdint>    // std::int64_t
 #include <memory>
-#include <stdexcept>  // std::runtime_error
 #include <string>
 #include <vector>
 
+#include "aidge/backend/generic/operator/ReshapeImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-void Aidge::Reshape_OpImpl::forward() {
-    const Reshape_Op& op = dynamic_cast<const Reshape_Op&>(mOp);
-    AIDGE_ASSERT(op.getInput(0), "missing input#0");
-    // const auto& input = op.getInput(0)->refCastFrom(mInputFallback, *op.getOutput(0));
-    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
-}
-
-void Aidge::Reshape_OpImpl::backward() {
-    const Reshape_Op& op = dynamic_cast<const Reshape_Op&>(mOp);
-    AIDGE_ASSERT(op.getOutput(0)->grad(), "missing gradient for output#0");
-    // const auto& output_grad = op.getOutput(0)->grad()->refCastFrom(mOutputGradFallback, *op.getOutput(0)->grad());
-    op.getInput(0)->grad()->getImpl()->copy(op.getOutput(0)->grad()->getImpl()->rawPtr(), op.getOutput(0)->size());
-}
-
-//////////////////////////////////////////////////
 
 const std::string Aidge::Reshape_Op::Type = "Reshape";
 
diff --git a/src/operator/Select.cpp b/src/operator/Select.cpp
index 67e792cd014282c486be70842de3ea51efdacc6b..6e686ecc4ea097a480f281cb0e9ff24315334e15 100644
--- a/src/operator/Select.cpp
+++ b/src/operator/Select.cpp
@@ -14,37 +14,13 @@
 #include <string>
 #include <vector>
 
+#include "aidge/backend/generic/operator/SelectImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Select.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 
-void Aidge::Select_OpImpl::forward() {
-    const Select_Op& op = dynamic_cast<const Select_Op&>(mOp);
-    AIDGE_ASSERT(op.getInput(0)->size() > 0, "Select input is empty!");
-
-    std::shared_ptr<Tensor> selectFallback;
-    const auto& select = op.getInput(0)->refCastFrom(selectFallback, DataType::Int32, "cpu");
-    const auto selectVal = select.get<int32_t>(0);
-    AIDGE_ASSERT(selectVal >= 0 && selectVal < op.nbInputs() - 1, "Select input out of range. Expected value in range [0, {}], got {}", op.nbInputs() - 2, selectVal);
-
-    op.getOutput(0)->getImpl()->copy(op.getInput(selectVal + 1)->getImpl()->rawPtr(), op.getInput(selectVal + 1)->size());
-}
-
-void Aidge::Select_OpImpl::backward() {
-    const Select_Op& op = dynamic_cast<const Select_Op&>(mOp);
-    AIDGE_ASSERT(op.getInput(0)->size() > 0, "Select input is empty!");
-
-    std::shared_ptr<Tensor> selectFallback;
-    const auto& select = op.getInput(0)->refCastFrom(selectFallback, DataType::Int32, "cpu");
-    const auto selectVal = select.get<int32_t>(0);
-    AIDGE_ASSERT(selectVal >= 0 && selectVal < op.nbInputs() - 1, "Select input out of range. Expected value in range [0, {}], got {}", op.nbInputs() - 2, selectVal);
-
-    op.getInput(selectVal + 1)->grad()->getImpl()->copy(op.getOutput(0)->grad()->getImpl()->rawPtr(), op.getOutput(0)->size());
-}
-
-//////////////////////////////////////////////////
 
 const std::string Aidge::Select_Op::Type = "Select";
 
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
index c38f52d765eb994553e5ef8fedf66d79be64808c..4db4704739b362426adb1831c1c95b3796aa918a 100644
--- a/src/operator/Shape.cpp
+++ b/src/operator/Shape.cpp
@@ -14,20 +14,13 @@
 #include <string>
 #include <vector>
 
+#include "aidge/backend/generic/operator/ShapeImpl.hpp"
 #include "aidge/operator/Shape.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/Log.hpp"
 
-void Aidge::Shape_OpImpl::forward() {
-    // Output is already valid after forwardDims()
-    // But it may be with the wrong device (default cpu)
-    // This can happen if forwardDims is called before setBackend
-    const Shape_Op& op = dynamic_cast<const Shape_Op&>(mOp);
-    op.getOutput(0)->setBackend(op.getInput(0)->backend(), op.getInput(0)->device());
-}
-
 ///////////////////////////////////////////////
 
 const std::string Aidge::Shape_Op::Type = "Shape";
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index 7945200aabbae23abce7d1698b5ddbe8f7ec0882..de08ae6e9a4c6955de262c98d817a154a2d6b045 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -19,12 +19,10 @@
 
 #include <fmt/format.h>
 
-#include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Data.hpp"
+#include "aidge/backend/generic/operator/SliceImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
-#include "aidge/data/Data.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 
@@ -65,84 +63,6 @@ std::shared_ptr<Aidge::Operator> Aidge::Slice_Op::clone() const {
     return std::make_shared<Slice_Op>(*this);
 }
 
-// Helper function to calculate the linear index for multi-dimensional data
-size_t getLinearIndex(const std::vector<size_t>& dims, const std::vector<size_t>& indices) {
-    size_t linearIndex = 0;
-    size_t stride = 1;
-    for (int i = dims.size() - 1; i >= 0; --i) {
-        linearIndex += indices[i] * stride;
-        stride *= dims[i];
-    }
-    return linearIndex;
-}
-
-void Aidge::Slice_OpImpl::forward() {
-    const Slice_Op& op = dynamic_cast<const Slice_Op&>(mOp);
-
-    if (!op.getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", op.Type);
-    }
-    AIDGE_ASSERT((op.axes().size() == op.ends().size()) &&
-                 (op.axes().size() == op.starts().size()),
-                 "Starts, Ends and Axes arguments should be the same size.");
-
-    const std::vector<size_t> inputDims = op.getInput(0)->dims();
-    std::vector<size_t> indices(inputDims.size(), 0); // Initialize indices for each dimension
-
-    // Create an array of ranges for each axis
-    std::vector<std::vector<int>> ranges(inputDims.size());
-
-    // Generate ranges dynamically for each dimension
-    for (size_t axisIdx = 0; axisIdx < inputDims.size(); ++axisIdx) {
-        if (std::find(op.axes().begin(), op.axes().end(), axisIdx) != op.axes().end()) {
-            // This axis is being sliced
-            int start = op.starts()[axisIdx];
-            int end = op.ends()[axisIdx];
-            int step = op.steps()[axisIdx];
-
-            start = start >= 0 ? start: start + inputDims[axisIdx];
-            start = std::max(0, std::min(start, static_cast<int>(inputDims[axisIdx])));
-            end = end >= 0 ? end: end + inputDims[axisIdx];
-            end = std::max(0, std::min(end, static_cast<int>(inputDims[axisIdx])));
-            // Generate the range of indices for this axis
-            for (int idx = start; (step > 0) ? (idx < end) : (idx > end); idx += step) {
-                ranges[axisIdx].push_back(idx);
-            }
-        } else {
-            // This axis is not being sliced, keep its full range (just one index in the range)
-            ranges[axisIdx].push_back(0);
-        }
-    }
-
-    // Use iterative stack to handle all dimensions dynamically
-    std::vector<size_t> currentIndex(inputDims.size(), 0); // Track current index in each dimension
-    std::vector<size_t> stackPointer(inputDims.size(), 0); // Pointers to ranges for each dimension
-    size_t dim = 0; // Start at the first dimension
-    size_t offset = 0; // Offset in the output tensor
-
-    while (dim < inputDims.size()) {
-        if (stackPointer[dim] < ranges[dim].size()) {
-            // Set the current index for this dimension
-            currentIndex[dim] = ranges[dim][stackPointer[dim]];
-            stackPointer[dim]++;
-
-            if (dim == inputDims.size() - 1) {
-                // We've reached the last dimension, process this index combination
-                size_t linearIndex = getLinearIndex(inputDims, currentIndex);
-                op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(linearIndex), 1, offset);
-                offset++;
-            } else {
-                // Move to the next dimension
-                dim++;
-            }
-        } else {
-            // Reset this dimension and move back to the previous one
-            stackPointer[dim] = 0;
-            dim--;
-        }
-    }
-}
-
 bool Aidge::Slice_Op::dimsForwarded() const {
     if ((getInput(1) && !getInput(1)->undefined())
         || (getInput(2) && !getInput(2)->undefined())
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
index 09aad0674bc424f50483c064cb7201bc20499faa..f93a36606b61a067ba058f4780db741f7f281fb4 100644
--- a/src/operator/Split.cpp
+++ b/src/operator/Split.cpp
@@ -11,48 +11,16 @@
 
 #include "aidge/operator/Split.hpp"
 
-#include <cassert>
 #include <cstddef>
 #include <cstdint>
 #include <string>
-#include <utility>
 #include <vector>
 
-#include <fmt/format.h>
-
-#include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Data.hpp"
+#include "aidge/backend/generic/operator/SplitImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-void Aidge::Split_OpImpl::forward() {
-    const Split_Op& op = dynamic_cast<const Split_Op&>(mOp);
-    const auto axis = op.axis();
-    const auto splits = op.split();
-    const auto dims = op.getInput(0)->dims();
-
-    //Compute pre/post axis strides
-    const std::size_t stride_pre = std::accumulate(dims.cbegin(), dims.cbegin() + axis, 1, std::multiplies<std::size_t>());
-    const std::size_t stride_post = std::accumulate(dims.crbegin(), dims.crbegin() + dims.size() -1 - axis, 1, std::multiplies<std::size_t>());
-    for (auto i = 0; i < op.nbOutputs(); ++i)
-    {
-        DimSize_t chunkIdxOnAxis = std::accumulate(splits.cbegin(), splits.cbegin() + i, 0) * stride_post;
-        DimSize_t offset = 0;
-        for (std::size_t j = 0; j < stride_pre; ++j)
-        {
-            // Compute chunk position in input tensor
-            DimSize_t idx = j * stride_post * dims[axis] + chunkIdxOnAxis;
-            // Copy chunk in output
-            op.getOutput(i)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(idx),
-                                            splits[i] * stride_post, offset);
-            offset += splits[i] * stride_post;
-        }
-
-    }
-}
-
-/////////////////////////////////////////////////////
 
 const std::string Aidge::Split_Op::Type = "Split";
 
diff --git a/src/operator/Squeeze.cpp b/src/operator/Squeeze.cpp
index a44146366d5466768d937261729325697ce24f6e..ea34528787f8a8f0e9f8032d97302b1df21532f7 100644
--- a/src/operator/Squeeze.cpp
+++ b/src/operator/Squeeze.cpp
@@ -12,18 +12,13 @@
 #include "aidge/operator/Squeeze.hpp"
 
 #include <algorithm>
-#include <bitset>
+#include <cstddef>
 #include <cstdint>
-#include <fmt/core.h>
-#include <functional>
-#include <iterator>
-#include <limits>
 #include <memory>
-#include <stdexcept>
 #include <string>
 #include <vector>
 
-#include "aidge/data/Data.hpp"
+#include "aidge/backend/generic/operator/SqueezeImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Log.hpp"
@@ -33,6 +28,28 @@
 namespace Aidge {
 const std::string Squeeze_Op::Type = "Squeeze";
 
+Squeeze_Op::Squeeze_Op(const std::vector<std::int8_t> &axes)
+    : OperatorTensor(
+        Type,
+        {InputCategory::Data, InputCategory::OptionalData},
+        1),
+    mAttributes(
+        std::make_shared<Attributes_>(attr<SqueezeAttr::Axes>(axes)))
+{
+    mImpl = std::make_shared<Squeeze_OpImpl>(*this);
+}
+
+Squeeze_Op::Squeeze_Op(const Squeeze_Op &op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Squeeze_Op, *this, op.backend());
+    } else {
+        mImpl = std::make_shared<Squeeze_OpImpl>(*this);
+    }
+}
+
 bool Squeeze_Op::dimsForwarded() const {
   if ((getInput(1) && !getInput(1)->undefined())) {
     // output dims are data dependent
@@ -43,103 +60,80 @@ bool Squeeze_Op::dimsForwarded() const {
 }
 
 bool Squeeze_Op::forwardDims(bool allowDataDependency) {
-  // error checking
-  if (!inputsAssociated(false) || getInput(0)->undefined()) {
-    return false;
-  }
-
-  std::shared_ptr<Tensor> fallback;
-  // Input 1 is axes to squeeze (can also be given via attribute)
-  if (getInput(1)) {
-    if (!this->axes().empty()) {
-      Log::notice("{} : ignoring non-empty axes attribute because input#1 "
-                  "takes precedence",
-                  type());
+    // error checking
+    if (!inputsAssociated(false) || getInput(0)->undefined()) {
+        return false;
     }
 
-    if (!allowDataDependency) {
-      Log::warn("{} : unable to forwardDims() because output dims are data "
-                "dependent on input#1",
-                type());
-      return false;
+    std::shared_ptr<Tensor> fallback;
+    // Input 1 is axes to squeeze (can also be given via attribute)
+    if (getInput(1)) {
+        if (!this->axes().empty()) {
+            Log::warn("{} : ignoring non-empty axes attribute because input#1 "
+                        "takes precedence",
+                        type());
+        }
+
+        if (!allowDataDependency) {
+            Log::error("{} : unable to forwardDims() because output dims are data "
+                        "dependent on input#1",
+                        type());
+            return false;
+        }
+
+        this->axes().clear(); // If both are provided input would override attrs
+        this->axes().reserve(getInput(1)->size());
+        const auto &axes =
+                    getInput(1)->refCastFrom(fallback, NativeType_v<std::int8_t>, "cpu");
+        if (axes.nbDims() == 0) {
+            this->axes().clear();
+        } else {
+            AIDGE_ASSERT(
+                axes.nbDims() == 1,
+                "Axes input tensor should be of size 1. Received {} dimensions : {}",
+                axes.nbDims(), axes.dims());
+            std::copy_n(static_cast<std::int8_t *>(axes.getImpl()->hostPtr()), axes.size(),
+                        std::back_inserter(this->axes()));
+        }
     }
-
-    this->axes().clear(); // If both are provided input would override attrs
-    this->axes().reserve(getInput(1)->size());
-    const auto &axes =
-        getInput(1)->refCastFrom(fallback, NativeType_v<int8_t>, "cpu");
-    if (axes.nbDims() == 0) {
-      this->axes().clear();
+    std::vector<DimSize_t> inputDims = getInput(0)->dims();
+    std::size_t newSize = inputDims.size();
+
+    if (this->axes().size() == 0) {
+        for (std::size_t i = 0; i < inputDims.size(); ++i) {
+            if (inputDims[i] == 1) {
+                --newSize;
+                inputDims[i] = 0;
+            }
+        }
     } else {
-      AIDGE_ASSERT(
-          axes.nbDims() == 1,
-          "Axes input tensor should be of size 1. Received {} dimensions : {}",
-          axes.nbDims(), axes.dims());
-      std::copy_n(static_cast<int8_t *>(axes.getImpl()->hostPtr()), axes.size(),
-                  std::back_inserter(this->axes()));
+        for (std::int8_t axis : this->axes()) {
+            axis = axis >= 0 ? axis : axis + static_cast<std::int8_t>(inputDims.size());
+            if ((axis < 0) || (axis >= static_cast<std::int8_t>(inputDims.size()))) {
+                Log::error("{} : Axis index OutOfBounds error, expected value "
+                    "within size limits of input tensor : "
+                    "[-{},{}], got {}.",
+                    type(), inputDims.size(), inputDims.size() - 1, axis);
+                return false;
+            }
+            if (inputDims[axis] > 1) {
+                Log::error("Cannot squeeze dimensions with shape greater than 1");
+                return false;
+            }
+            newSize -= inputDims[axis];
+            inputDims[axis] = 0;
+        }
     }
-  }
 
-  std::vector<DimSize_t> input_dims = getInput(0)->dims();
-  std::vector<DimSize_t> output_dims;
-  output_dims.reserve(input_dims.size());
-  std::vector<DimIdx_t> axes_rectified_idx;
-  axes_rectified_idx.reserve(input_dims.size());
-
-  if (this->axes().size() == 0) { // squeeze() => squeeze all 1 sized dimensions
-    Log::debug("this->axes() is empty, all 1 sized dim will be squeezed. If "
-               "this is an error ensure that the values are properly set via "
-               "attribute or data input#1.");
-    std::copy_if(input_dims.begin(), input_dims.end(),
-                 std::back_inserter(output_dims),
-                 [](DimSize_t dim) { return dim != 1; });
-  } else { // squeeze({N,.....}) => squeeze all specified dimensions that are of
-           // size 1.
-    /////// ensure indexes validity and set pythonic negative indexes to their
-    // positive value
-    for (const int8_t &axis : this->axes()) {
-      AIDGE_ASSERT(axis >= static_cast<int8_t>(-input_dims.size()) &&
-                       axis < static_cast<int8_t>(input_dims.size()),
-                   "{} : Axis index OutOfBounds error, expected value "
-                   "within size limits of input tensor : "
-                   "[-{},{}], got {}.",
-                   type(), input_dims.size(), input_dims.size() - 1, axis);
-      auto temp =
-          static_cast<DimIdx_t>(axis >= 0 ? axis : axis + input_dims.size());
-      if (axes_rectified_idx.end() == std::find(axes_rectified_idx.begin(),
-                                                axes_rectified_idx.end(),
-                                                temp)) {
-        axes_rectified_idx.push_back(temp);
-      }
+    std::vector<DimSize_t> outputDims;
+    outputDims.reserve(newSize);
+    for (const auto& dim : inputDims) {
+        if (dim > 0)
+            outputDims.push_back(dim);
     }
 
-    // Create output_dims
-    // speeds up binary search
-    std::sort(axes_rectified_idx.begin(), axes_rectified_idx.end());
-    DimSize_t i = 0;
-    std::copy_if(
-        input_dims.begin(), input_dims.end(), std::back_inserter(output_dims),
-        [&axes_rectified_idx, &i, &input_dims](DimSize_t dim) {
-          // if current dim index is found in axes to squeeze
-          // we ensure that this axis is 1 sized, otherwise an error is thrown
-          bool ok = true;
-          if (std::binary_search(axes_rectified_idx.begin(),
-                                 axes_rectified_idx.end(), i)) {
-            AIDGE_ASSERT(dim == 1,
-                         "{} : Tried to squeeze axis nb {} of a tensor of dim "
-                         "{}. Dim to squeeze has to be 1-sized, got size {}."
-                         "Axes to squeeze : {}",
-                         __func__, i, input_dims, input_dims[i],
-                         axes_rectified_idx);
-            ok = false;
-          }
-          i++; // Incrementing counter since there is no enumerate
-               // fctn (until C++23)
-          return ok;
-        });
-  }
-  mOutputs[0]->resize(output_dims);
-  return true;
+    mOutputs[0]->resize(outputDims);
+    return true;
 }
 
 void Squeeze_Op::setBackend(const std::string &name,
@@ -156,13 +150,11 @@ std::set<std::string> Aidge::Squeeze_Op::getAvailableBackends() const {
   return Registrar<Squeeze_Op>::getKeys();
 }
 
-void Aidge::Squeeze_OpImpl::forward() {
-  const Squeeze_Op &op_ = static_cast<const Squeeze_Op &>(mOp);
-  // Check if input is provided
-  AIDGE_ASSERT(op_.getInput(0), "Squeeze : missing input 0");
+////////////////////////////////////////////////////////////////////////////////
 
-  op_.getOutput(0)->getImpl()->copy(op_.getInput(0)->getImpl()->rawPtr(),
-                                    op_.getInput(0)->size());
+std::shared_ptr<Node> Squeeze(const std::vector<std::int8_t> axes,
+    const std::string &name) {
+    return std::make_shared<Node>(std::make_shared<Squeeze_Op>(axes), name);
 }
 
 } // namespace Aidge
diff --git a/src/operator/Stack.cpp b/src/operator/Stack.cpp
index 9f8cd163922d5dec22614d213a6f4145b14b9aa1..b884baaa4ec94e57c17eb7c3c48c0919f8a587d3 100644
--- a/src/operator/Stack.cpp
+++ b/src/operator/Stack.cpp
@@ -14,6 +14,7 @@
 #include <memory>
 #include <string>
 
+#include "aidge/backend/generic/operator/StackImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -22,57 +23,10 @@
 
 namespace Aidge {
 
-// TODO: Check why getRequiredMemory is always called with empty vector as
-// inputSize
-Elts_t StackProdConso::getRequiredMemory(
-    const Aidge::IOIndex_t inputIdx,
-    const std::vector<DimSize_t> &/*inputsSize*/) const {
-    assert(mOp.getRawInput(inputIdx) && "requires valid input");
-
-    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
-    // The produced data after one forward pass is simply the input size,
-    // we do not produce the whole output tensor every time.
-    if (op.forwardStep() <= op.maxElements()) {
-        return Elts_t::DataElts(op.getInput(inputIdx)->size());
-    } else {
-        return Elts_t::NoneElts();
-    }
-}
-
-void StackProdConso::resetConsummerProducer() {
-    ProdConso::resetConsummerProducer();
-
-    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
-    op.forwardStep() = 0;
-}
-
-const std::string StackOp::s_type = "Stack";
-
-void StackOpImpl::forward() {
-    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
-    AIDGE_ASSERT(op.getInput(0), "missing input #0");
-    AIDGE_ASSERT((op.forwardStep() < op.maxElements()),
-                 "cannot forward anymore, maximum number of elements to stack "
-                 "exceeded");
-
-    op.getOutput(0)->getImpl()->copy(
-        op.getInput(0)->getImpl()->rawPtr(),
-        op.getInput(0)->size(),
-        op.forwardStep() * op.getInput(0)->size());
-}
-
-void StackOpImpl::backward() {
-    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
-    AIDGE_ASSERT(op.backwardStep() > 0, "Stack operator has not been run forward");
-
-    auto inputGrad = op.getInput(0)->grad();
-    auto outputGrad = op.getOutput(0)->grad();
-
-    *inputGrad = outputGrad->extract({op.backwardStep() -1 }).clone();
-}
+const std::string StackOp::Type = "Stack";
 
 StackOp::StackOp(std::uint32_t maxElements)
-    : OperatorTensor(s_type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
       mAttributes(std::make_shared<Attributes_>(
           attr<StackAttr::MaxElements>(maxElements),
           attr<StackAttr::BackwardStep>(0),
diff --git a/src/operator/Unfold.cpp b/src/operator/Unfold.cpp
index 53b8bd5442081e601a55853115f44067ae17fc2b..888109240ce35841da82aac2600151d3632ffd2b 100644
--- a/src/operator/Unfold.cpp
+++ b/src/operator/Unfold.cpp
@@ -18,56 +18,12 @@
 #include <utility>    // std::pair
 #include <vector>
 
+#include "aidge/backend/generic/operator/UnfoldImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-template <Aidge::DimIdx_t DIM>
-void Aidge::Unfold_OpImpl<DIM>::forward() {
-    const Unfold_Op<DIM>& op = dynamic_cast<const Unfold_Op<DIM>&>(mOp);
-    const auto kernelDims = op.kernelDims();
-    const auto dilationDims = op.dilationDims();
-    const auto strideDims = op.strideDims();
-    const DimSize_t inHeight = op.getInput(0)->dims()[2];
-    const DimSize_t inWidth = op.getInput(0)->dims()[3];
-    const DimSize_t inChannels = op.getInput(0)->dims()[1];
-
-    const DimSize_t kernelExtentHeight = op.dilationDims()[0] *
-                                            (op.kernelDims()[0] - 1) + 1;
-    const DimSize_t outHeight = 1 + static_cast<DimSize_t>(
-                    floor(static_cast<float>(inHeight - kernelExtentHeight) /
-                            static_cast<float>(op.strideDims()[0])));
-    const DimSize_t kernelExtentWidth = op.dilationDims()[1] *
-                                            (op.kernelDims()[1] - 1) + 1;
-    const DimSize_t outWidth = 1 + static_cast<DimSize_t>(
-                    floor(static_cast<float>(inWidth - kernelExtentWidth) /
-                            static_cast<float>(op.strideDims()[1])));
-    const DimSize_t outChannels = op.getOutput(0)->dims()[1];
-
-    for (DimSize_t n = 0; n < op.getOutput(0)->dims()[0]; ++n) {
-        for (DimSize_t outC = 0; outC < outChannels; ++outC) {
-            const auto inOffsetW = outC % kernelDims[1];
-            const auto inOffsetH = (outC / kernelDims[1]) % kernelDims[0];
-            const auto inC = outC / kernelDims[0] / kernelDims[1];
-
-            for (DimSize_t outH = 0; outH < outHeight; ++outH) {
-                const auto inH = outH * strideDims[0] + inOffsetH * dilationDims[0];
-
-                for (DimSize_t outW = 0; outW < outWidth; ++outW) {
-                    const auto inW = outW * strideDims[1] + inOffsetW * dilationDims[1];
-
-                    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(((n * inChannels + inC) * inHeight + inH) * inWidth + inW), 1,
-                        ((n * outChannels + outC) * outHeight + outH) * outWidth + outW);
-                }
-            }
-        }
-    }
-}
-
-template class Aidge::Unfold_OpImpl<2>;
-
-/////////////////////////////////////////////////////////////
 
 template <Aidge::DimIdx_t DIM>
 const std::string Aidge::Unfold_Op<DIM>::Type = "Unfold";
diff --git a/src/operator/Unsqueeze.cpp b/src/operator/Unsqueeze.cpp
index 414afc10f5ea091ba9f30c327ccfbcfe6b3fd558..679b420ec3d794f7efbbe730dd0d75fde4553dea 100644
--- a/src/operator/Unsqueeze.cpp
+++ b/src/operator/Unsqueeze.cpp
@@ -12,22 +12,40 @@
 #include "aidge/operator/Unsqueeze.hpp"
 
 #include <cstdint>
-#include <fmt/core.h>
-#include <functional>
 #include <memory>
 #include <string>
 #include <vector>
 
-#include "aidge/data/Data.hpp"
+#include "aidge/backend/generic/operator/UnsqueezeImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Log.hpp"
-#include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 const std::string Unsqueeze_Op::Type = "Unsqueeze";
 
+
+Unsqueeze_Op::Unsqueeze_Op(const std::vector<int8_t> &axes)
+    : OperatorTensor(Type,
+                    {InputCategory::Data, InputCategory::OptionalData},
+                    1),
+      mAttributes(std::make_shared<Attributes_>(attr<UnsqueezeAttr::Axes>(axes)))
+{
+    mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
+}
+
+Unsqueeze_Op::Unsqueeze_Op(const Unsqueeze_Op &op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Unsqueeze_Op, *this, op.backend());
+    } else {
+        mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
+    }
+}
+
 bool Aidge::Unsqueeze_Op::dimsForwarded() const {
   if ((getInput(1) && !getInput(1)->undefined())) {
     // output dims are data dependent
@@ -120,12 +138,11 @@ std::set<std::string> Aidge::Unsqueeze_Op::getAvailableBackends() const {
   return Registrar<Unsqueeze_Op>::getKeys();
 }
 
-void Aidge::Unsqueeze_OpImpl::forward() {
-  const Unsqueeze_Op &op_ = static_cast<const Unsqueeze_Op &>(mOp);
-  // Check if input is provided
-  AIDGE_ASSERT(op_.getInput(0), "Unsqueeze : missing input 0");
-  op_.getOutput(0)->getImpl()->copy(op_.getInput(0)->getImpl()->rawPtr(),
-                                    op_.getInput(0)->size());
+////////////////////////////////////////////////////////////////////////////////
+
+std::shared_ptr<Node> Unsqueeze(const std::vector<int8_t> &axes,
+    const std::string &name) {
+return std::make_shared<Node>(std::make_shared<Unsqueeze_Op>(axes), name);
 }
 
 } // namespace Aidge
diff --git a/src/recipes/ExplicitCastMove.cpp b/src/recipes/ExplicitCastMove.cpp
index c860b9e8a0e1fcbf467eb13e1366f371d731a47d..43eb75797b2444d5c5b4e62af2765006761ff963 100644
--- a/src/recipes/ExplicitCastMove.cpp
+++ b/src/recipes/ExplicitCastMove.cpp
@@ -9,10 +9,11 @@
  *
  ********************************************************************************/
 
-#include "aidge/recipes/Recipes.hpp"
+#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Cast.hpp"
 #include "aidge/operator/Move.hpp"
+#include "aidge/recipes/Recipes.hpp"
 
 void Aidge::explicitCastMove(std::shared_ptr<GraphView> graph) {
     // First, remove existing Cast and Move operators, if not needed anymore
diff --git a/unit_tests/operator/Test_Squeeze_Op.cpp b/unit_tests/operator/Test_Squeeze_Op.cpp
index 41822742c36bfcbb62c4be3784b9028b39ab1cb2..b6b4a77129ae0d91639f85e0e5b8b41dfb6c16e2 100644
--- a/unit_tests/operator/Test_Squeeze_Op.cpp
+++ b/unit_tests/operator/Test_Squeeze_Op.cpp
@@ -80,7 +80,7 @@ TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
                   [&gen, &idx_dims_to_squeeze_dist]() {
                     return idx_dims_to_squeeze_dist(gen);
                   });
-    Log::error("dims_to_sqeeze = {}", dims_to_squeeze);
+    Log::error("dims_to_sqeeze = {}", dims_to_squeeze);
 
     std::shared_ptr<Node> squeeze_node = Squeeze(dims_to_squeeze);
     auto op =
@@ -97,7 +97,7 @@ TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
     // Test
     input_T->resize(dims_in);
     op->setInput(0, input_T);
-    REQUIRE_THROWS(op->forwardDims());
+    REQUIRE(false == op->forwardDims());
   }
   SECTION("Compare with reference output") {
     SECTION("axes is given via attribute") {
@@ -140,7 +140,7 @@ TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
         std::vector<DimSize_t> dims_in{1, 2, 3, 4};
         input_T->resize(dims_in);
 
-        REQUIRE_THROWS(op->forwardDims());
+        REQUIRE(false == (op->forwardDims()));
       }
       SECTION("Squeeze multiple non-sized-axes") {
         std::shared_ptr<Node> squeeze_node =
@@ -152,7 +152,7 @@ TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
         std::array<DimSize_t, 3> dims_in{2, 3, 4};
         input_T->resize(dims_in);
 
-        REQUIRE_THROWS((op->forwardDims()));
+        REQUIRE(false == (op->forwardDims()));
       }
     }
     SECTION("axes is given via tensor") {
@@ -287,7 +287,7 @@ TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
 
       if (nb_dims_tensor > max_nb_dims || not_in_bounds ||
           dim_to_squeeze_not_1_sized) {
-        REQUIRE_THROWS(op->forwardDims());
+        REQUIRE(false == (op->forwardDims()));
       } else {
         // output tensor
         int i = 0;
@@ -381,7 +381,7 @@ TEST_CASE("[core/operator] Squeeze(forward)", "[Squeeze][forward]") {
     }
     if (nb_dims_tensor > max_nb_dims || not_in_bounds ||
         dim_to_squeeze_not_1_sized) {
-      REQUIRE_THROWS(op->forwardDims());
+      REQUIRE(false == (op->forwardDims()));
     } else {
       // output tensor
       int i = 0;