diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 6ef7441f8d62471fcc7b2425a8b465184778b752..d77e6693b27c08da5c60f5410406a08e4863f1c4 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -36,6 +36,8 @@
 #include "aidge/nodeTester/ConditionalInterpreter.hpp"
 
 #include "aidge/operator/Add.hpp"
+#include "aidge/operator/And.hpp"
+#include "aidge/operator/ArgMax.hpp"
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/operator/Concat.hpp"
@@ -58,6 +60,7 @@
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/Pow.hpp"
 #include "aidge/operator/ReduceMean.hpp"
+#include "aidge/operator/ReduceSum.hpp"
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/operator/Reshape.hpp"
 #include "aidge/operator/Resize.hpp"
diff --git a/include/aidge/data/DataProvider.hpp b/include/aidge/data/DataProvider.hpp
index 62d10a6983e8cf5fd8e2730d3203bed97284e336..6c19b5355e406454a2e20bc8994d0ab04d53576a 100644
--- a/include/aidge/data/DataProvider.hpp
+++ b/include/aidge/data/DataProvider.hpp
@@ -35,6 +35,9 @@ private:
     // Desired size of the produced batches
     const std::size_t mBatchSize;
 
+    // The backend for data tensors
+    std::string mBackend;
+
     // Enable random shuffling for learning
     const bool mShuffle;
 
@@ -67,7 +70,7 @@ public:
      * @param database database from which to load the data.
      * @param batchSize number of data samples per batch.
      */
-    DataProvider(const Database& database, const std::size_t batchSize, const bool shuffle = false, const bool dropLast = false);
+    DataProvider(const Database& database, const std::size_t batchSize, const std::string& backend = "cpu", const bool shuffle = false, const bool dropLast = false);
 
 public:
     /**
diff --git a/include/aidge/operator/And.hpp b/include/aidge/operator/And.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..04a2fab1ed3569da161049ecece85a6e906e1cd8
--- /dev/null
+++ b/include/aidge/operator/And.hpp
@@ -0,0 +1,81 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_AND_H_
+#define AIDGE_CORE_OPERATOR_AND_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+/**
+ * @brief Tensor element-wise logical and operation.
+ */
+class And_Op : public OperatorTensor,
+    public Registrable<And_Op, std::string, std::shared_ptr<OperatorImpl>(const And_Op&)> {
+public:
+    static const std::string Type;
+
+    /**
+     * @brief Compute element-wise and operation on two given inputs.
+     * @details supports broadcasting of both operands.
+     */
+    And_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    And_Op(const And_Op& op)
+        : OperatorTensor(op)
+    {
+        if (op.mImpl) {
+            SET_IMPL_MACRO(And_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::And_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<And_Op>(*this);
+    }
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input_1", "data_input_2"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> And(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<And_Op>(), name);
+}
+} // namespace Aidge
+
+#endif /* AIDGE_CORE_OPERATOR_AND_H_ */
diff --git a/include/aidge/operator/ArgMax.hpp b/include/aidge/operator/ArgMax.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..1b11e211d23563d75bf943a96fa26bc84a3aa4b8
--- /dev/null
+++ b/include/aidge/operator/ArgMax.hpp
@@ -0,0 +1,135 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_ARGMAX_H_
+#define AIDGE_CORE_OPERATOR_ARGMAX_H_
+
+#include <cstdint>    // std::int32_t
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class ArgMaxAttr { Axis, KeepDims, SelectLastIndex };
+
+/**
+ * @brief This operator has as purpose to reduce given dimension by replacing with the Max value's index.
+*/
+class ArgMax_Op : public OperatorTensor,
+                public Registrable<ArgMax_Op, std::string, std::shared_ptr<OperatorImpl>(const ArgMax_Op &)> {
+
+public:
+    static const std::string Type;
+
+private:
+    using Attributes_ = StaticAttributes<ArgMaxAttr,
+                                        std::int32_t,
+                                        bool,
+                                        bool>;
+    template <ArgMaxAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    ArgMax_Op() = delete;
+
+    /**
+     * @brief constructor for ArgMax op
+     * @param[in] axis around which perform the operation
+     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axis and 
+     * if false we remove the dimension completely
+     * @param[in] select_last_index in case we have many maximum, if true the last index is returned 
+     * if false the first index is returned. 
+     */
+    ArgMax_Op(std::int32_t axis, bool keep_dims, bool select_last_index)
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ArgMaxAttr::Axis>(axis),
+            attr<ArgMaxAttr::KeepDims>(keep_dims),
+            attr<ArgMaxAttr::SelectLastIndex>(select_last_index)))
+    {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    ArgMax_Op(const ArgMax_Op& op)
+        : OperatorTensor(op),
+          mAttributes(op.mAttributes)
+    {
+        if (op.mImpl){
+            SET_IMPL_MACRO(ArgMax_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::ArgMax_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<ArgMax_Op>(*this);
+    }
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::int32_t& axis() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::Axis>(); }
+    inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::KeepDims>(); }
+    inline bool& selectLastIndex() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::SelectLastIndex>(); }
+
+
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName() {
+        return {"data_output"};
+    }
+};
+
+/**
+ * @brief Compute the max value of a Tensor over the provided axes. Dimensions
+ * may be reduced by erasing the provided axis or not.
+ *
+ * @param axis Dimension over which data max should be computed.
+ * @param keep_dims Whether or not reduced dimensions are to be erased.
+ * @param select_last_index Whether to select the last index of max elements in case there are many maximums.
+ * By default the first max element index is 
+ * @param name Name of the Operator.
+ * @return std::shared_ptr<Node> Node containing the Operator.
+ */
+inline std::shared_ptr<Node> ArgMax(std::int32_t axis=0,
+                                    bool keep_dims=true,
+                                    bool select_last_index=false,
+                                    const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<ArgMax_Op>(axis, keep_dims, select_last_index), name);
+
+}
+
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::ArgMaxAttr>::data[] = {"axis", "keep_dims", "select_last_index"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_ARGMAX_H_ */
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 07beb0a39a88254f0aecdda35cd63f5d338af532..43b121be2654c1dd63116075be397e421823b9b5 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -26,8 +26,11 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ReduceMeanAttr { Axes, KeepDims };
+enum class ReduceMeanAttr { Axes, KeepDims, NoopWithEmptyAxes };
 
+/**
+ * @brief This operator has as purpose to reduce given axes by replacing with the mean value.
+*/
 class ReduceMean_Op : public OperatorTensor,
                 public Registrable<ReduceMean_Op, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)> {
 
@@ -37,7 +40,8 @@ public:
 private:
     using Attributes_ = StaticAttributes<ReduceMeanAttr,
                                             std::vector<std::int32_t>,
-                                            DimSize_t>;
+                                            bool,
+                                            bool>;
     template <ReduceMeanAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
@@ -45,7 +49,15 @@ private:
 public:
     ReduceMean_Op() = delete;
 
-    ReduceMean_Op(const std::vector<std::int32_t>& axes, DimSize_t keep_dims);
+    /**
+     * @brief constructor for ReduceMean op
+     * @param[in] axes around which perform the operation
+     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and 
+     * if false we remove the dimension completely
+     * @param[in] noop_with_empty_axes used when no axes are provided, if set to true, the operator does nothing
+     * and if false, we reduce on all axes
+     */
+    ReduceMean_Op(const std::vector<std::int32_t>& axes, bool keep_dims, bool noop_with_empty_axes);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -65,7 +77,8 @@ public:
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::Axes>(); }
-    inline DimSize_t& keepDims() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::KeepDims>(); }
+    inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::KeepDims>(); }
+    inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::NoopWithEmptyAxes>(); }
 
 
     static const std::vector<std::string> getInputsName() {
@@ -85,15 +98,30 @@ public:
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> Node containing the Operator.
  */
+
+
+// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+// template <DimSize_t DIM>
+// inline std::shared_ptr<Node> ReduceMean(
+//     std::int32_t const (&axes)[DIM],
+//     DimSize_t keep_dims = 1,
+//     const std::string& name = "") {
+//     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ReduceMean, not supported");
+//     return ReduceMean(to_array(axes), keep_dims, name);
+// }
+
+// template <DimIdx_t DIM>
+// const std::string ReduceMean_Op::Type = "ReduceMean";
 std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes,
-                                        DimSize_t keep_dims=1,
+                                        bool keep_dims=true,
+                                        bool noop_with_empty_axes=false,
                                         const std::string& name = "");
 
 }  // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {"axes", "keep_dims"};
+const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ */
diff --git a/include/aidge/operator/ReduceSum.hpp b/include/aidge/operator/ReduceSum.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..9d1220b6b2e7c1e8029ebe20b03d5501d90ae0f6
--- /dev/null
+++ b/include/aidge/operator/ReduceSum.hpp
@@ -0,0 +1,135 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_REDUCESUM_H_
+#define AIDGE_CORE_OPERATOR_REDUCESUM_H_
+
+#include <cstdint>    // std::int32_t
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class ReduceSumAttr { Axes, KeepDims, NoopWithEmptyAxes };
+
+
+/**
+ * @brief This operator has as purpose to reduce given axes by replacing with the sum value.
+*/
+class ReduceSum_Op : public OperatorTensor,
+                public Registrable<ReduceSum_Op, std::string, std::shared_ptr<OperatorImpl>(const ReduceSum_Op &)> {
+
+public:
+    static const std::string Type;
+
+private:
+    using Attributes_ = StaticAttributes<ReduceSumAttr,
+                                            std::vector<std::int32_t>,
+                                            bool,
+                                            bool>;
+    template <ReduceSumAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    ReduceSum_Op() = delete;
+
+    /**
+     * @brief constructor for ReduceSum op
+     * @param[in] axes around which perform the operation
+     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and 
+     * if false we remove the dimension completely
+     * @param[in] noop_with_empty_axes used when no axes are provided, if set to true, the operator does nothing
+     * and if false, we reduce on all axes
+     */
+    ReduceSum_Op(const std::vector<std::int32_t>& axes, bool keep_dims, bool noop_with_empty_axes)
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ReduceSumAttr::Axes>(axes),
+            attr<ReduceSumAttr::KeepDims>(keep_dims),
+            attr<ReduceSumAttr::NoopWithEmptyAxes>(noop_with_empty_axes)))
+    {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    ReduceSum_Op(const ReduceSum_Op& op)
+        : OperatorTensor(op),
+          mAttributes(op.mAttributes)
+    {
+        if (op.mImpl){
+            SET_IMPL_MACRO(ReduceSum_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::ReduceSum_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<ReduceSum_Op>(*this);
+    }
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::Axes>(); }
+    inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::KeepDims>(); }
+    inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::NoopWithEmptyAxes>(); }
+
+
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName() {
+        return {"data_output"};
+    }
+};
+
+/**
+ * @brief Compute the sum value of a Tensor over the provided axes. Dimensions
+ * may be reduced by erasing the provided axes or not.
+ *
+ * @param axes Dimensions over which data sum should be computed.
+ * @param keep_dims Whether or not reduced dimensions are to be erased.
+ * @param name Name of the Operator.
+ * @return std::shared_ptr<Node> Node containing the Operator.
+ */
+inline std::shared_ptr<Node> ReduceSum(const std::vector<std::int32_t> &axes={},
+                                        bool keep_dims=true,
+                                        bool noop_with_empty_axes=false,
+                                        const std::string& name = "") {
+    // FIXME: properly handle default w&b initialization in every cases
+    AIDGE_ASSERT(axes.size()<=MaxDim, "Too many kernel dimensions required by ReduceSum, not supported");
+    return std::make_shared<Node>(std::make_shared<ReduceSum_Op>(axes, keep_dims, noop_with_empty_axes), name);
+
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::ReduceSumAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_REDUCESUM_H_ */
diff --git a/python_binding/data/pybind_DataProvider.cpp b/python_binding/data/pybind_DataProvider.cpp
index 2f652aff5008f8008952ffb1bb6fb1738021b436..c0b7218cdfd69d2ad0d8493a99833b80785c9d39 100644
--- a/python_binding/data/pybind_DataProvider.cpp
+++ b/python_binding/data/pybind_DataProvider.cpp
@@ -27,7 +27,7 @@ std::vector<std::shared_ptr<Aidge::Tensor>> DataProvider::next() {
 void init_DataProvider(py::module& m){
 
     py::class_<DataProvider, std::shared_ptr<DataProvider>>(m, "DataProvider")
-        .def(py::init<Database&, std::size_t, bool, bool>(), py::arg("database"), py::arg("batch_size"), py::arg("shuffle"), py::arg("drop_last"))
+        .def(py::init<Database&, std::size_t, std::string, bool, bool>(), py::arg("database"), py::arg("batch_size"), py::arg("backend"), py::arg("shuffle"), py::arg("drop_last"))
         .def("__iter__", &DataProvider::iter)
         .def("__next__", &DataProvider::next)
         .def("__len__", &DataProvider::getNbBatch);
diff --git a/python_binding/operator/pybind_And.cpp b/python_binding/operator/pybind_And.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..08dddfc8168bb77086a3dd72aca45b110a4cbce9
--- /dev/null
+++ b/python_binding/operator/pybind_And.cpp
@@ -0,0 +1,34 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/And.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_And(py::module& m) {
+    py::class_<And_Op, std::shared_ptr<And_Op>, OperatorTensor>(m, "AndOp", py::multiple_inheritance(),
+          R"mydelimiter( Initialize an And operator.)mydelimiter")
+    .def(py::init<>())
+    .def_static("get_inputs_name", &And_Op::getInputsName)
+    .def_static("get_outputs_name", &And_Op::getOutputsName);
+    declare_registrable<And_Op>(m, "AndOp");
+    m.def("And", &And, py::arg("name") = "",
+	   R"mydelimiter(
+        Initialize a node containing an And operator.
+			:param name : name of the node.
+		)mydelimiter");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_ArgMax.cpp b/python_binding/operator/pybind_ArgMax.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3de54afd7a669347cc2b272cff9b87cf152be09a
--- /dev/null
+++ b/python_binding/operator/pybind_ArgMax.cpp
@@ -0,0 +1,72 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <array>
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/ArgMax.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_ArgMax(py::module &m) {
+  const std::string pyClassName("ArgMaxOp");
+  py::class_<ArgMax_Op, std::shared_ptr<ArgMax_Op>, OperatorTensor>(
+    m, pyClassName.c_str(), py::multiple_inheritance(),
+      R"mydelimiter(
+		Initialize an ArgMax operator.
+			:param axis: The axis along which to compute the max element. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axis: int
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param select_last_index: If True, selects the last index if there are multiple occurrences 
+									of the max value. If False (default), selects the first occurrence.
+			:type select_last_index: bool
+		)mydelimiter")
+    .def(py::init<std::int32_t, bool, bool>(), py::arg("axis"), py::arg("keep_dims"), py::arg("select_last_index"))
+    .def_static("get_inputs_name", &ArgMax_Op::getInputsName)
+    .def_static("get_outputs_name", &ArgMax_Op::getOutputsName)
+    ;
+  declare_registrable<ArgMax_Op>(m, pyClassName);
+
+  m.def("ArgMax", [](std::int32_t axis,
+                    bool keepDims,
+                    bool selectLastIndex,
+                    const std::string& name) {
+        return ArgMax(axis, keepDims, selectLastIndex, name);
+    }, py::arg("axis") = 0,
+       py::arg("keep_dims") = true,
+       py::arg("select_last_index") = false,
+       py::arg("name") = "",
+	   R"mydelimiter(
+        Initialize a node containing an ArgMax operator.
+			:param axis: The axis along which to compute the max element. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axis: int
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param select_last_index: If True, selects the last index if there are multiple occurrences 
+									of the max value. If False (default), selects the first occurrence.
+			:type select_last_index: bool
+			:param name : name of the node.
+		)mydelimiter");
+}
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index 3023c077e2f3695902ca76dfa21831749f0ca82e..31f88d149d4d654c464b37f3b49c2839c13ea64d 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -27,22 +27,49 @@ namespace Aidge {
 void declare_ReduceMeanOp(py::module &m) {
   const std::string pyClassName("ReduceMeanOp");
   py::class_<ReduceMean_Op, std::shared_ptr<ReduceMean_Op>, OperatorTensor>(
-    m, pyClassName.c_str(), py::multiple_inheritance())
-    .def(py::init<std::vector<std::int32_t>, DimSize_t>(), py::arg("axes"), py::arg("keep_dims"))
+    m, pyClassName.c_str(), py::multiple_inheritance(),
+      R"mydelimiter(
+		Initialize a ReduceMean operator.
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axes: List[int]
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param noop_with_empty_axes: If True, the operator just copies the input, 
+      if False, the operatpr reduces all the dimensions.
+			:type noop_with_empty_axes: bool
+		)mydelimiter")
+    .def(py::init<std::vector<std::int32_t>, bool, bool>(), py::arg("axes"), py::arg("keep_dims"), py::arg("noop_with_empty_axes"))
     .def_static("get_inputs_name", &ReduceMean_Op::getInputsName)
     .def_static("get_outputs_name", &ReduceMean_Op::getOutputsName)
     ;
   declare_registrable<ReduceMean_Op>(m, pyClassName);
 
   m.def("ReduceMean", [](const std::vector<int>& axes,
-                                                                DimSize_t keepDims,
-                                                                const std::string& name) {
+                          bool keepDims,
+                          bool noopWithEmptyAxes,
+                          const std::string& name) {
         // AIDGE_ASSERT(axes.size() == DIM, "axes size [{}] does not match DIM [{}]", axes.size(), DIM);
 
-        return ReduceMean(axes, keepDims, name);
-    }, py::arg("axes"),
-       py::arg("keep_dims") = 1,
-       py::arg("name") = "");
+        return ReduceMean(axes, keepDims, noopWithEmptyAxes, name);
+    }, py::arg("axes") = std::vector<std::int32_t>(),
+       py::arg("keep_dims") = true,
+       py::arg("noop_with_empty_axes") = false,
+       py::arg("name") = "",
+	   R"mydelimiter(
+        Initialize a node containing a ReduceMean operator.
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axes: List[int]
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param noop_with_empty_axes: If True, the operator just copies the input, 
+      if False, the operatpr reduces all the dimensions.
+			:type noop_with_empty_axes: bool
+			:param name : name of the node.
+		)mydelimiter");
 }
 
 
diff --git a/python_binding/operator/pybind_ReduceSum.cpp b/python_binding/operator/pybind_ReduceSum.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..eaa57ef1c663a03cfd59ce02c13c3c7028b69e01
--- /dev/null
+++ b/python_binding/operator/pybind_ReduceSum.cpp
@@ -0,0 +1,72 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <array>
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/ReduceSum.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_ReduceSum(py::module &m) {
+  const std::string pyClassName("ReduceSumOp");
+  py::class_<ReduceSum_Op, std::shared_ptr<ReduceSum_Op>, OperatorTensor>(
+    m, pyClassName.c_str(), py::multiple_inheritance(),
+      R"mydelimiter(
+		Initialize a ReduceMean operator.
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axes: List[int]
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param noop_with_empty_axes: If True, the operator just copies the input, 
+      if False, the operatpr reduces all the dimensions.
+			:type noop_with_empty_axes: bool
+		)mydelimiter")
+    .def(py::init<std::vector<std::int32_t>, bool, bool>(), py::arg("axes"), py::arg("keep_dims"), py::arg("noop_with_empty_axes"))
+    .def_static("get_inputs_name", &ReduceSum_Op::getInputsName)
+    .def_static("get_outputs_name", &ReduceSum_Op::getOutputsName)
+    ;
+  declare_registrable<ReduceSum_Op>(m, pyClassName);
+
+  m.def("ReduceSum", [](const std::vector<int>& axes,
+                        bool keepDims,
+                        bool noopWithEmptyAxes,
+                        const std::string& name) {
+        return ReduceSum(axes, keepDims, noopWithEmptyAxes, name);
+    }, py::arg("axes") = std::vector<std::int32_t>(),
+       py::arg("keep_dims") = true,
+       py::arg("noop_with_empty_axes") = false,
+       py::arg("name") = "",
+	   R"mydelimiter(
+        Initialize a node containing a ReduceMean operator.
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axes: List[int]
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param noop_with_empty_axes: If True, the operator just copies the input, 
+      if False, the operatpr reduces all the dimensions.
+			:type noop_with_empty_axes: bool
+			:param name : name of the node.
+		)mydelimiter");
+}
+} // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 918143213f3dd490ef0e448f086c09135b05f6af..a9efa0538951087cd2e846b5b74017c36dc72f04 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -28,6 +28,8 @@ void init_Operator(py::module&);
 void init_OperatorTensor(py::module&);
 
 void init_Add(py::module&);
+void init_And(py::module&);
+void init_ArgMax(py::module&);
 void init_AvgPooling(py::module&);
 void init_BatchNorm(py::module&);
 void init_Concat(py::module&);
@@ -50,6 +52,7 @@ void init_Pad(py::module&);
 void init_Pop(py::module&);
 void init_Pow(py::module&);
 void init_ReduceMean(py::module&);
+void init_ReduceSum(py::module&);
 void init_ReLU(py::module&);
 void init_Reshape(py::module&);
 void init_Resize(py::module&);
@@ -100,6 +103,8 @@ void init_Aidge(py::module& m) {
     init_Operator(m);
     init_OperatorTensor(m);
     init_Add(m);
+    init_And(m);
+    init_ArgMax(m);
     init_AvgPooling(m);
     init_BatchNorm(m);
     init_Concat(m);
@@ -122,6 +127,7 @@ void init_Aidge(py::module& m) {
     init_Pop(m);
     init_Pow(m);
     init_ReduceMean(m);
+    init_ReduceSum(m);
     init_ReLU(m);
     init_Reshape(m);
     init_Resize(m);
diff --git a/src/data/DataProvider.cpp b/src/data/DataProvider.cpp
index fc6b842edef17c80a4ef80667fc814bf85df25a4..7f4eb71aa1f1e05c42aef8090988d0ea05aa6cb2 100644
--- a/src/data/DataProvider.cpp
+++ b/src/data/DataProvider.cpp
@@ -23,9 +23,10 @@
 #include "aidge/utils/Random.hpp"
 
 
-Aidge::DataProvider::DataProvider(const Aidge::Database& database, const std::size_t batchSize, const bool shuffle, const bool dropLast)
+Aidge::DataProvider::DataProvider(const Aidge::Database& database, const std::size_t batchSize, const std::string& backend, const bool shuffle, const bool dropLast)
     : mDatabase(database),
       mBatchSize(batchSize),
+      mBackend(backend),
       mShuffle(shuffle),
       mDropLast(dropLast),
       mNumberModality(database.getItem(0).size()),
@@ -63,7 +64,7 @@ std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch() con
         dataBatchDims[i].insert(dataBatchDims[i].begin(), current_batch_size);
         auto batchData = std::make_shared<Tensor>();
         batchData->resize(dataBatchDims[i]);
-        batchData->setBackend("cpu");
+        batchData->setBackend(mBackend);
         batchData->setDataType(mDataTypes[i]);
         batchTensors.push_back(batchData);
     }
@@ -78,6 +79,8 @@ std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch() con
 
         // Browse each modularity in the database item
         for (std::size_t j = 0; j < mNumberModality; ++j) {
+
+            dataItem[j]->setBackend(mBackend);
             auto dataSample = dataItem[j];
 
             // Assert tensor sizes
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index e382fe2aca4d6e27a00e4e96233e08b50a92418d..abfc91c6cdf9fd4f6eb46100074b22083514d82e 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -125,7 +125,7 @@ Aidge::Tensor Aidge::Tensor::mean() const {
     // No need to specify the list of all axes!
     std::vector<std::int32_t> axes(nbDims());
     std::iota(std::begin(axes), std::end(axes), 0);
-    auto mean_ = ReduceMean_Op(axes, 0);
+    auto mean_ = ReduceMean_Op(axes, false, false);
     mean_.associateInput(0, std::make_shared<Tensor>(*this));
     mean_.setDataType(dataType());
     mean_.setDataFormat(dataFormat());
diff --git a/src/operator/And.cpp b/src/operator/And.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..43aeebe24ef0e6d0e0b820d1459f25d64e7054a7
--- /dev/null
+++ b/src/operator/And.cpp
@@ -0,0 +1,58 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstddef>    // std::size_t
+#include <memory>
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/And.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::And_Op::Type = "And";
+
+bool Aidge::And_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
+        const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
+
+        std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
+        const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
+
+        std::size_t out_id = outDims.size() - 1;
+        std::size_t low_id = lowDims.size() - 1;
+        std::size_t i = 0;
+        while (i++ < lowDims.size()) {
+            if (outDims[out_id] == 1) {
+                outDims[out_id] = lowDims[low_id];
+            }
+            else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for And Operation: {} for input#0 vs {} for input#1",
+                    inputsDims0, inputsDims1);
+            }
+            --out_id;
+            --low_id;
+        }
+        mOutputs[0]->resize(outDims);
+        return true;
+    }
+
+    return false;
+}
+
+void Aidge::And_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(And_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
diff --git a/src/operator/ArgMax.cpp b/src/operator/ArgMax.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..58ade4754a013a65af80e5b754d0d44ad3b18189
--- /dev/null
+++ b/src/operator/ArgMax.cpp
@@ -0,0 +1,53 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ArgMax.hpp"
+
+#include <cstddef>    // std::size_t
+#include <cstdint>    // std::int32_t
+#include <memory>
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::ArgMax_Op::Type = "ArgMax";
+
+bool Aidge::ArgMax_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        // make Axis attribute positive
+        std::int32_t axis = mAttributes->template getAttr<ArgMaxAttr::Axis>();
+        axis = axis >= 0 ? axis: axis+static_cast<std::int32_t>(getInput(0)->nbDims());
+
+        // build output dimensions
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
+        if (mAttributes->template getAttr<ArgMaxAttr::KeepDims>()) {
+            outDims[axis] = 1;
+        }
+        else {
+            outDims.erase(outDims.begin() + static_cast<std::size_t>(axis));
+        }
+
+        // TODO: change {1} for {} when scalar Tensors are better handled.
+        mOutputs[0]->resize((outDims.size()>0) ? outDims : std::vector<DimSize_t>({1}));
+        return true;
+    }
+    return false;
+}
+
+void Aidge::ArgMax_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(ArgMax_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
\ No newline at end of file
diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp
index d80525adc68f9692a042fdca2ce6869ac0600f5a..2a215d897884e936aa9265e5ae16b1774d94bae6 100644
--- a/src/operator/ReduceMean.cpp
+++ b/src/operator/ReduceMean.cpp
@@ -15,6 +15,7 @@
 #include <cstddef>    // std::size_t
 #include <cstdint>    // std::int32_t
 #include <memory>
+#include <numeric> // For std::iota
 #include <stdexcept>  // std::runtime_error
 #include <string>
 #include <vector>
@@ -26,11 +27,12 @@
 
 const std::string Aidge::ReduceMean_Op::Type = "ReduceMean";
 
-Aidge::ReduceMean_Op::ReduceMean_Op(const std::vector<std::int32_t>& axes, Aidge::DimSize_t keep_dims)
+Aidge::ReduceMean_Op::ReduceMean_Op(const std::vector<std::int32_t>& axes, bool keep_dims, bool noop_with_empty_axes)
     : OperatorTensor(Type, {InputCategory::Data}, 1),
         mAttributes(std::make_shared<Attributes_>(
         attr<ReduceMeanAttr::Axes>(axes),
-        attr<ReduceMeanAttr::KeepDims>(keep_dims)))
+        attr<ReduceMeanAttr::KeepDims>(keep_dims),
+        attr<ReduceMeanAttr::NoopWithEmptyAxes>(noop_with_empty_axes)))
 {}
 
 Aidge::ReduceMean_Op::ReduceMean_Op(const Aidge::ReduceMean_Op& op)
@@ -60,6 +62,18 @@ bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
 
         // build output dimensions
         std::vector<DimSize_t> outDims = getInput(0)->dims();
+
+        if (axes.empty())
+        {
+            if(mAttributes->template getAttr<ReduceMeanAttr::NoopWithEmptyAxes>()) {
+                mOutputs[0]->resize(outDims);
+                return true;
+            }
+            // if no axes are provided and NoopWithEmptyAxes is false, reduce on all axes
+            axes.resize(getInput(0)->nbDims());
+            std::iota(axes.begin(), axes.end(), 0);
+        }
+
         if (mAttributes->template getAttr<ReduceMeanAttr::KeepDims>()) {
             std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
         }
@@ -83,8 +97,9 @@ void Aidge::ReduceMean_Op::setBackend(const std::string& name, Aidge::DeviceIdx_
 ////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::ReduceMean(const std::vector<std::int32_t> &axes,
-                                        Aidge::DimSize_t keep_dims,
+                                        bool keep_dims,
+                                        bool noop_with_empty_axes,
                                         const std::string& name) {
     AIDGE_ASSERT(axes.size()<=MaxDim, "Too many kernel dimensions required by ReduceMean, not supported");
-    return std::make_shared<Node>(std::make_shared<ReduceMean_Op>(axes, keep_dims), name);
+    return std::make_shared<Node>(std::make_shared<ReduceMean_Op>(axes, keep_dims, noop_with_empty_axes), name);
 }
\ No newline at end of file
diff --git a/src/operator/ReduceSum.cpp b/src/operator/ReduceSum.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..aa8271f4c1696d46274e536e14d255525d848f80
--- /dev/null
+++ b/src/operator/ReduceSum.cpp
@@ -0,0 +1,72 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ReduceSum.hpp"
+
+#include <algorithm>  // std::for_each, std::sort
+#include <cstddef>    // std::size_t
+#include <cstdint>    // std::int32_t
+#include <memory>
+#include <numeric> // For std::iota
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::ReduceSum_Op::Type = "ReduceSum";
+
+bool Aidge::ReduceSum_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        // make Axes attribute positive
+        std::vector<std::int32_t>& axes = mAttributes->template getAttr<ReduceSumAttr::Axes>();
+        std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) {
+            if (val < 0)
+                val+=static_cast<std::int32_t>(getInput(0)->nbDims());
+        });
+        std::sort(axes.begin(), axes.end());
+
+        // build output dimensions
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
+
+        if (axes.empty())
+        {
+            if(mAttributes->template getAttr<ReduceSumAttr::NoopWithEmptyAxes>()) {
+                mOutputs[0]->resize(outDims);
+                return true;
+            }
+            // if no axes are provided and NoopWithEmptyAxes is false, reduce on all axes
+            axes.resize(getInput(0)->nbDims());
+            std::iota(axes.begin(), axes.end(), 0);
+        }
+
+        if (mAttributes->template getAttr<ReduceSumAttr::KeepDims>()) {
+            std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
+        }
+        else {
+            for (auto it = axes.crbegin(); it != axes.crend(); ++it)
+                outDims.erase(outDims.begin() + static_cast<std::size_t>(*it));
+        }
+
+        // TODO: change {1} for {} when scalar Tensors are better handled.
+        mOutputs[0]->resize((outDims.size()>0) ? outDims : std::vector<DimSize_t>({1}));
+        return true;
+    }
+    return false;
+}
+
+void Aidge::ReduceSum_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(ReduceSum_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
\ No newline at end of file