diff --git a/include/aidge/operator/Stack.hpp b/include/aidge/operator/Stack.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..9644620d71276c5e35fc9daaf634f4d4cdb28405
--- /dev/null
+++ b/include/aidge/operator/Stack.hpp
@@ -0,0 +1,106 @@
+#ifndef AIDGE_CORE_OPERATOR_STACK_H_
+#define AIDGE_CORE_OPERATOR_STACK_H_
+
+#include <memory>
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+class StackProdConso : public ProdConso {
+  public:
+    StackProdConso(const Operator &op) : ProdConso(op) {}
+    Elts_t getRequiredMemory(
+        const IOIndex_t outputIdx,
+        const std::vector<DimSize_t> &inputsSize) const override final;
+    void resetConsummerProducer() override;
+};
+
+class StackOpImpl : public OperatorImpl {
+  public:
+    StackOpImpl(const Operator &op, const std::string &backend = "")
+        : OperatorImpl(op, backend) {}
+
+    std::shared_ptr<ProdConso> getProdConso() const override {
+        return std::make_shared<StackProdConso>(mOp);
+    };
+    void forward() override;
+};
+
+enum class StackAttr { ForwardStep, MaxElements };
+
+class StackOp
+    : public OperatorTensor,
+      public Registrable<
+          StackOp,
+          std::string,
+          std::function<std::unique_ptr<OperatorImpl>(const StackOp &)>> {
+
+  private:
+    using Attributes_ =
+        StaticAttributes<StackAttr, std::uint32_t, std::uint32_t>;
+    template <StackAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+  public:
+    static const std::string s_type;
+
+    StackOp(std::uint32_t maxElements);
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output
+     * tensor(s), but not its input tensors (the new operator has no input
+     * associated).
+     * @param op Operator to copy.
+     */
+    StackOp(const StackOp &op);
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::StackOp
+     */
+    std::shared_ptr<Operator> clone() const override;
+
+    void setBackend(const std::string &name,
+                    DeviceIdx_t device = 0) override final;
+
+    std::set<std::string> getAvailableBackends() const override;
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+    void forward() override;
+
+    inline std::shared_ptr<Attributes> attributes() const override {
+        return mAttributes;
+    }
+
+    inline std::uint32_t &maxElements() const {
+        return mAttributes->template getAttr<StackAttr::MaxElements>();
+    }
+
+    inline std::uint32_t &forwardStep() const {
+        return mAttributes->template getAttr<StackAttr::ForwardStep>();
+    }
+
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName() {
+        return {"data_output"};
+    }
+};
+
+std::shared_ptr<Node> stack(std::uint32_t maxElements,
+                            const std::string &name = "");
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::StackAttr>::data[] = {"max_elements"};
+}
+
+#endif
diff --git a/python_binding/operator/pybind_Stack.cpp b/python_binding/operator/pybind_Stack.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2328892108d724438a39cc37eaf97b856caa3a8a
--- /dev/null
+++ b/python_binding/operator/pybind_Stack.cpp
@@ -0,0 +1,41 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Stack.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+void init_Stack(py::module &m) {
+    py::class_<StackOp, std::shared_ptr<StackOp>, OperatorTensor>(
+        m,
+        "StackOp",
+        py::multiple_inheritance(),
+        R"mydelimiter(Initialize a Stack operator.)mydelimiter")
+        .def(py::init<const std::uint32_t>(), py::arg("max_elements"))
+        .def_static("get_inputs_name", &StackOp::getInputsName)
+        .def_static("get_outputs_name", &StackOp::getOutputsName)
+        .def_readonly_static("Type", &StackOp::s_type);
+
+    m.def("Stack",
+          &stack,
+          py::arg("max_elements"),
+          py::arg("name") = "",
+          R"mydelimiter(
+        Initialize a node containing a Stack operator.
+            :param max_elements : the maximum number of tensors to be stacked.
+			:param name: name of the node.
+		)mydelimiter");
+}
+} // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 7a88f61f518777e9b2e359f9c89eb1f3ebcd53d0..4f7ffea5fefe299a2670fd7bcb816c86070bf315 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -77,6 +77,7 @@ void init_Softmax(py::module&);
 void init_Split(py::module&);
 void init_Sqrt(py::module&);
 void init_Squeeze(py::module&);
+void init_Stack(py::module&);
 void init_Sub(py::module&);
 void init_Tanh(py::module&);
 void init_Transpose(py::module&);
@@ -169,6 +170,7 @@ void init_Aidge(py::module& m) {
     init_Split(m);
     init_Sqrt(m);
     init_Squeeze(m);
+    init_Stack(m);
     init_Sub(m);
     init_Tanh(m);
     init_Transpose(m);
diff --git a/src/operator/Stack.cpp b/src/operator/Stack.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..efe6296a351f69ef3a11d4e1bc04bd0b52d46a06
--- /dev/null
+++ b/src/operator/Stack.cpp
@@ -0,0 +1,124 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Stack.hpp"
+
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+// TODO: Check why getRequiredMemory is always called with empty vector as
+// inputSize
+Elts_t StackProdConso::getRequiredMemory(
+    const Aidge::IOIndex_t inputIdx,
+    const std::vector<DimSize_t> &inputsSize) const {
+    assert(mOp.getRawInput(inputIdx) && "requires valid input");
+
+    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
+    // The produced data after one forward pass is simply the input size,
+    // we do not produce the whole output tensor everytime.
+    if (op.forwardStep() <= op.maxElements()) {
+        return Elts_t::DataElts(op.getInput(inputIdx)->size());
+    } else {
+        return Elts_t::NoneElts();
+    }
+}
+
+void StackProdConso::resetConsummerProducer() {
+    ProdConso::resetConsummerProducer();
+
+    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
+    op.forwardStep() = 0;
+}
+
+const std::string StackOp::s_type = "Stack";
+
+void StackOpImpl::forward() {
+    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
+    AIDGE_ASSERT(op.getInput(0), "missing input #0");
+    AIDGE_ASSERT((op.forwardStep() < op.maxElements()),
+                 "cannot forward anymore, maximum number of elements to stack "
+                 "exceeded");
+
+    op.getOutput(0)->getImpl()->copy(
+        op.getInput(0)->getImpl()->rawPtr(),
+        op.getInput(0)->size(),
+        op.forwardStep() * op.getInput(0)->size());
+}
+
+StackOp::StackOp(std::uint32_t maxElements)
+    : OperatorTensor(s_type, {InputCategory::Data}, 1),
+      mAttributes(std::make_shared<Attributes_>(
+          attr<StackAttr::MaxElements>(maxElements),
+          attr<StackAttr::ForwardStep>(0))) {
+    if (maxElements == 0) {
+        AIDGE_THROW_OR_ABORT(
+            std::invalid_argument,
+            "StackOp creation failed: maxElements must be greater than 0.");
+    }
+    mImpl = std::make_shared<StackOpImpl>(*this);
+}
+
+StackOp::StackOp(const Aidge::StackOp &op)
+    : OperatorTensor(op), mAttributes(op.mAttributes) {
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(StackOp, *this, op.backend());
+    } else {
+        mImpl = std::make_shared<StackOpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::StackOp::clone() const {
+    return std::make_shared<StackOp>(*this);
+}
+
+bool Aidge::StackOp::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        auto inputDims = getInput(0)->dims();
+        inputDims.insert(inputDims.begin(), maxElements());
+        getOutput(0)->resize(inputDims);
+        return true;
+    }
+
+    return false;
+}
+
+void StackOp::setBackend(const std::string &name, DeviceIdx_t device) {
+    if (Registrar<StackOp>::exists({name})) {
+        SET_IMPL_MACRO(StackOp, *this, name);
+    } else {
+        mImpl = std::make_shared<StackOpImpl>(*this);
+    }
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> StackOp::getAvailableBackends() const {
+    return Registrar<StackOp>::getKeys();
+}
+
+void StackOp::forward() {
+    Operator::forward();
+    ++forwardStep();
+}
+
+std::shared_ptr<Node> stack(std::uint32_t maxElements,
+                            const std::string &name) {
+    return std::make_shared<Node>(std::make_shared<StackOp>(maxElements),
+                                  name);
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_PopImpl.cpp b/unit_tests/operator/Test_PopImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f46131ed8c324f38874eb433f97d13977b4253a4
--- /dev/null
+++ b/unit_tests/operator/Test_PopImpl.cpp
@@ -0,0 +1,36 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Pop.hpp"
+#include "aidge/utils/TensorUtils.hpp"
+
+using Aidge::Tensor;
+using Aidge::Pop;
+
+TEST_CASE("[cpu/operator] Pop(forward)", "[Pop][CPU]") {
+    std::shared_ptr<Tensor> pop1 = std::make_shared<Tensor>(Aidge::Array1D<int,3>{{4,5,6}});
+    std::shared_ptr<Tensor> pop2 = std::make_shared<Tensor>(Aidge::Array1D<int,3>{{1,2,3}});
+    std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Aidge::Array2D<int,2,3>{{{1,2,3}, {4,5,6}}});
+
+    auto pop = Aidge::Pop("pop");
+    pop->getOperator()->associateInput(0, input);
+    pop->getOperator()->setBackend("cpu");
+    pop->getOperator()->setDataType(Aidge::DataType::Int32);
+
+    REQUIRE_NOTHROW(pop->forward());
+    REQUIRE(*std::static_pointer_cast<Aidge::OperatorTensor>(pop->getOperator())->getOutput(0) == *pop2);
+    REQUIRE_NOTHROW(pop->forward());
+    REQUIRE(*std::static_pointer_cast<Aidge::OperatorTensor>(pop->getOperator())->getOutput(0) == *pop1);
+}
diff --git a/unit_tests/operator/Test_StackImpl.cpp b/unit_tests/operator/Test_StackImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d853a1ba27fea0a071c1c2373bbd7ef7f4eacd11
--- /dev/null
+++ b/unit_tests/operator/Test_StackImpl.cpp
@@ -0,0 +1,172 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+#include <catch2/matchers/catch_matchers_string.hpp>
+#include <cstddef>
+#include <memory>
+#include <numeric>
+#include <random>
+#include <stdexcept>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Stack.hpp"
+#include "aidge/utils/TensorUtils.hpp"
+
+using Catch::Matchers::Equals;
+
+namespace Aidge {
+
+TEST_CASE("[core/operator] Stack(forward)", "[Stack]") {
+    constexpr auto nbTrials = 10;
+    auto rd = Catch::Generators::Detail::getSeed;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> nbDist(1, 100);
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(2, 5);
+    std::uniform_real_distribution<float> valueDist(0.1f, 1.1f);
+    // std::uniform_int_distribution<std::size_t> tensorNbDimsDist(2U, 5U);
+
+    const std::size_t nbDimsTensor = nbDimsDist(gen);
+    std::vector<std::size_t> dimsIn(nbDimsTensor);
+    std::shared_ptr<Tensor> t1 = std::make_shared<Tensor>();
+
+    SECTION("Constructors") {
+        // Valid arguments
+        for (auto i = 0; i < nbTrials; ++i) {
+            auto maxElements = nbDist(gen);
+            REQUIRE_NOTHROW(StackOp(maxElements));
+
+            auto op1 = StackOp(maxElements);
+            REQUIRE(op1.maxElements() == maxElements);
+            REQUIRE(op1.forwardStep() == 0);
+
+            // Copy Constructor
+            auto op2 = op1;
+            REQUIRE(op2.maxElements() == maxElements);
+            REQUIRE(op2.forwardStep() == 0);
+        }
+
+        // Invalid arguments
+        REQUIRE_THROWS_AS(StackOp(0), std::invalid_argument);
+    }
+
+    SECTION("forwardDims") {
+        for (auto i = 0; i < nbTrials; ++i) {
+            auto maxElements = nbDist(gen);
+            auto op = StackOp(maxElements);
+
+            const std::size_t nbDims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nbDims);
+            for (std::size_t i = 0; i < nbDims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            t1->resize(dims);
+
+            REQUIRE_THROWS_WITH(
+                op.forwardDims(),
+                Equals("Stack: input #0 should be associated with a Tensor"));
+            op.associateInput(0, t1);
+            REQUIRE_NOTHROW(op.forwardDims());
+            REQUIRE(op.getOutput(0)->dims()[0] == maxElements);
+        }
+    }
+
+    SECTION("forward") {
+
+        std::generate(dimsIn.begin(), dimsIn.end(), [&gen, &dimsDist]() {
+            return dimsDist(gen);
+        });
+        const std::size_t nbElems =
+            std::accumulate(dimsIn.cbegin(),
+                            dimsIn.cend(),
+                            1u,
+                            std::multiplies<std::size_t>());
+
+        std::uniform_int_distribution<std::size_t> numTensorsDist(2, 10);
+        const std::size_t numTensors = numTensorsDist(gen);
+
+        std::vector<std::shared_ptr<Tensor>> tensors(numTensors);
+        std::vector<float *> arrays(numTensors);
+
+        for (std::size_t i = 0; i < numTensors; ++i) {
+            arrays[i] = new float[nbElems];
+            for (std::size_t j = 0; j < nbElems; ++j) {
+                arrays[i][j] = valueDist(gen);
+            }
+            tensors[i] = std::make_shared<Tensor>();
+            tensors[i]->resize(dimsIn);
+            tensors[i]->setBackend("cpu");
+            tensors[i]->setDataType(DataType::Float32);
+            tensors[i]->getImpl()->setRawPtr(arrays[i], nbElems);
+        }
+
+        auto myStack = stack(numTensors);
+        myStack->getOperator()->setBackend("cpu");
+        myStack->getOperator()->setDataType(DataType::Float32);
+        auto op =
+            std::static_pointer_cast<OperatorTensor>(myStack->getOperator());
+
+        op->associateInput(0, tensors[0]);
+        op->forwardDims();
+        op->getOutput(0)->zeros();
+
+        // Perform forward passes for each tensor
+        for (std::size_t i = 0; i < numTensors; ++i) {
+            if (i > 0) {
+                op->associateInput(0, tensors[i]);
+            }
+            op->forward();
+        }
+
+        auto output = op->getOutput(0);
+
+        std::vector<DimSize_t> expectedDims = dimsIn;
+        expectedDims.insert(expectedDims.begin(), numTensors);
+
+        REQUIRE(output->dims() == expectedDims);
+
+        // Compare output slices with input tensors
+        for (std::size_t i = 0; i < numTensors; ++i) {
+            Tensor outputTensor = output->extract(
+                {static_cast<std::uint64_t>(static_cast<int>(i))});
+            Tensor inputTensor(DataType::Float32);
+            inputTensor.resize(dimsIn);
+            inputTensor.setBackend("cpu");
+            inputTensor.getImpl()->setRawPtr(arrays[i], nbElems);
+
+            REQUIRE(approxEq<float>(outputTensor, inputTensor));
+        }
+
+        // Attempt to exceed maxElements
+        std::shared_ptr<Tensor> extraTensor = std::make_shared<Tensor>();
+        extraTensor->resize(dimsIn);
+        extraTensor->setBackend("cpu");
+        extraTensor->setDataType(DataType::Float32);
+        float *extraArray = new float[nbElems];
+        for (std::size_t j = 0; j < nbElems; ++j) {
+            extraArray[j] = valueDist(gen);
+        }
+        extraTensor->getImpl()->setRawPtr(extraArray, nbElems);
+
+        REQUIRE_THROWS_AS((op->associateInput(0, extraTensor), op->forward()),
+                          std::runtime_error);
+
+        // Clean up
+        delete[] extraArray;
+        for (std::size_t i = 0; i < numTensors; ++i) {
+            delete[] arrays[i];
+        }
+    }
+}
+} // namespace Aidge