diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py
index 825ca6100382116443699a00bcff27b9bbca028a..fb7ed0587fb074858e9f3766d5de0d43b39d1ef5 100644
--- a/aidge_core/unit_tests/test_operator_binding.py
+++ b/aidge_core/unit_tests/test_operator_binding.py
@@ -125,6 +125,23 @@ class test_operator_binding(unittest.TestCase):
         generic_op.forward() # Increment idx
         self.assertEqual(customImpl.idx, 1)
 
+    def test_magic_meth(self):
+        myVar = 2
+        myBool = True
+        # Test dynamic attribute set
+        gop = aidge_core.GenericOperator("test", 1, 0, 1, "FictiveName", myVar=myVar).get_operator()
+        gop.myBool = myBool
+        # Test variable set by kwargs
+        self.assertEqual(gop.myVar, myVar)
+        # Test set attr
+        self.assertEqual(gop.myBool, myBool)
+
+        # Test static attribute set !
+        prod = aidge_core.Producer([1]).get_operator()
+        self.assertEqual(prod.Constant, False)
+        prod.Constant = True # By default Constant is False
+        self.assertEqual(prod.Constant, True)
+
 
 
 if __name__ == '__main__':
diff --git a/aidge_core/unit_tests/test_tensor.py b/aidge_core/unit_tests/test_tensor.py
index a214a0e354c64b515d0a7ac24d81c85e116938ca..d479c98b20534daa804f6019b63d528883c2b568 100644
--- a/aidge_core/unit_tests/test_tensor.py
+++ b/aidge_core/unit_tests/test_tensor.py
@@ -10,16 +10,16 @@ SPDX-License-Identifier: EPL-2.0
 
 import unittest
 import aidge_core
-
 from functools import reduce
+
 import numpy as np
 
+
 class test_tensor(unittest.TestCase):
-    """
+    """Test tensor binding
     """
     def setUp(self):
         pass
-
     def tearDown(self):
         pass
 
@@ -35,10 +35,60 @@ class test_tensor(unittest.TestCase):
             idx = t.get_idx(coord)
             self.assertEqual(idx, i)
 
-if __name__ == '__main__':
-    unittest.main()
+    def test_getavailable_backends(self):
+        self.assertTrue("cpu" in aidge_core.Tensor.get_available_backends())
+
+    def test_numpy_int_to_tensor(self):
+        np_array = np.arange(9).reshape(1,1,3,3).astype(np.int32)
+        # Numpy -> Tensor
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.DataType.Int32)
+        for i_t, i_n in zip(t, np_array.flatten()):
+            self.assertTrue(i_t == i_n)
+        for i,j in zip(t.dims(), np_array.shape):
+            self.assertEqual(i,j)
+    def test_tensor_int_to_numpy(self):
+        np_array = np.arange(9).reshape(1,1,3,3)
+        # Numpy -> Tensor
+        t = aidge_core.Tensor(np_array)
+        # Tensor -> Numpy
+        nnarray = np.array(t)
+        for i_nn, i_n in zip(nnarray.flatten(), np_array.flatten()):
+            self.assertTrue(i_nn == i_n)
+        for i,j in zip(t.dims(), nnarray.shape):
+            self.assertEqual(i,j)
 
+    def test_numpy_int64_to_tensor(self):
+        np_array = np.arange(9).reshape(1,1,3,3).astype(np.int64)
+        # Numpy -> Tensor
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.DataType.Int64)
+        for i_t, i_n in zip(t, np_array.flatten()):
+            self.assertTrue(i_t == i_n)
+        for i,j in zip(t.dims(), np_array.shape):
+            self.assertEqual(i,j)
 
+    def test_numpy_float_to_tensor(self):
+        t = aidge_core.Tensor()
+        np_array = np.random.rand(1, 1, 3, 3).astype(np.float32)
+        # Numpy -> Tensor
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.DataType.Float32)
+        for i_t, i_n in zip(t, np_array.flatten()):
+            self.assertTrue(i_t == i_n) # TODO : May need to change this to a difference
+        for i,j in zip(t.dims(), np_array.shape):
+            self.assertEqual(i,j)
 
+    def test_get_set(self):
+        dims = [2,2,2]
 
+        np_array = np.arange(8).reshape(dims).astype(np.int32)
+        # Numpy -> Tensor
+        t = aidge_core.Tensor(np_array)
+        for i in range(8):
+            self.assertEqual(t[i], i)
+            t[i] = 5
+            self.assertEqual(t[i], 5)
 
+if __name__ == '__main__':
+    unittest.main()
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index e6a6f13348337f6d52eea7ed8818c2e31fd95396..9e0e457b49fe40b2a6e9e3ce5c5e4b77bee1d93e 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -15,6 +15,10 @@
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/backend/TensorImpl.hpp"
 #include "aidge/backend/StimulusImpl.hpp"
+
+#include "aidge/backend/cpu/data/TensorImpl.hpp"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
+
 #include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Database.hpp"
diff --git a/include/aidge/backend/cpu/data/GetCPUPtr.h b/include/aidge/backend/cpu/data/GetCPUPtr.h
new file mode 100644
index 0000000000000000000000000000000000000000..47e3b07e8fa08cdcd714745a9a49bb03e30f79f5
--- /dev/null
+++ b/include/aidge/backend/cpu/data/GetCPUPtr.h
@@ -0,0 +1,24 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_DATA_GETCPUPTR_H_
+#define AIDGE_CPU_DATA_GETCPUPTR_H_
+
+#include "aidge/data/Tensor.hpp"
+
+namespace Aidge {
+inline void *getCPUPtr(std::shared_ptr<Aidge::Data> const &data) {
+  const auto tensor = std::static_pointer_cast<Tensor>(data);
+  return tensor->getImpl()->hostPtr(tensor->getImplOffset());
+}
+} // namespace Aidge
+
+#endif // AIDGE_CPU_DATA_GETCPUPTR_H_
\ No newline at end of file
diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..46dfae3d53b4b201507290bd538ea13737919c3e
--- /dev/null
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -0,0 +1,193 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_DATA_TENSORIMPL_H_
+#define AIDGE_CPU_DATA_TENSORIMPL_H_
+
+#include "aidge/backend/TensorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/half.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/future_std/span.hpp"
+
+namespace Aidge {
+
+template <class T>
+class TensorImpl_cpu : public TensorImpl {
+private:
+    /// Pointer to the data and its capacity
+    future_std::span<T> mData;
+    /// If this instance own the data, std::unique_ptr manages it
+    std::unique_ptr<T[]> mDataOwner;
+
+public:
+    static constexpr const char *Backend = "cpu";
+
+    TensorImpl_cpu(DeviceIdx_t device, NbElts_t length) : TensorImpl(Backend, device, length) {}
+
+    bool operator==(const TensorImpl &otherImpl) const override final {
+        const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl);
+        AIDGE_INTERNAL_ASSERT(typedOtherImpl.size() >= mNbElts);
+
+        std::size_t i = 0;
+        for (; i < mNbElts &&
+               *(mData.data()+i) == *static_cast<const T*>(typedOtherImpl.rawPtr(i));
+               ++i) {
+        }
+        return i == mNbElts;
+    }
+
+    static std::shared_ptr<TensorImpl_cpu> create(DeviceIdx_t device, NbElts_t length) {
+        return std::make_shared<TensorImpl_cpu<T>>(device, length);
+    }
+
+    inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
+
+    void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
+        const T* srcT = static_cast<const T *>(src);
+        T* dstT = static_cast<T *>(rawPtr(offset));
+
+        AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
+        AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "overlapping copy is not supported");
+        std::copy(srcT, srcT + length, dstT);
+    }
+
+    void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final {
+        if (length == 0) {
+            return;
+        }
+
+        T* dstT = static_cast<T *>(rawPtr(offset));
+        AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
+        switch (srcDt)
+        {
+            case DataType::Float64:
+                std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length,
+                        dstT);
+                break;
+            case DataType::Float32:
+                std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length,
+                        dstT);
+                break;
+            case DataType::Float16:
+                std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length,
+                        dstT);
+                break;
+            case DataType::Int64:
+                std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length,
+                        dstT);
+                break;
+            case DataType::UInt64:
+                std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length,
+                        dstT);
+                break;
+            case DataType::Int32:
+                std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length,
+                        dstT);
+                break;
+            case DataType::UInt32:
+                std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length,
+                        dstT);
+                break;
+            case DataType::Int16:
+                std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length,
+                        dstT);
+                break;
+            case DataType::UInt16:
+                std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length,
+                        dstT);
+                break;
+            case DataType::Int8:
+                std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
+                        dstT);
+                break;
+            case DataType::UInt8:
+                std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
+                        dstT);
+                break;
+            default:
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type.");
+                break;
+        }
+    }
+
+    void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final {
+        AIDGE_ASSERT(device.first == Backend, "backend must match");
+        AIDGE_ASSERT(device.second == 0, "device cannot be != 0 for CPU backend");
+        copy(src, length, offset);
+    }
+
+    inline void copyFromHost(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
+        copy(src, length, offset);
+    }
+
+    void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override final {
+        const T* src = static_cast<const T*>(rawPtr(offset));
+        AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
+        std::copy(src, src + length, static_cast<T *>(dst));
+    }
+
+    void *rawPtr(NbElts_t offset = 0) override final {
+        lazyInit();
+        return (mData.data() + offset);
+    };
+
+    const void *rawPtr(NbElts_t offset = 0) const override final {
+        AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const rawPtr");
+        return (mData.data() + offset);
+    };
+
+    void *hostPtr(NbElts_t offset = 0) override final {
+        lazyInit();
+        return (mData.data() + offset);
+    };
+
+    const void *hostPtr(NbElts_t offset = 0) const override final {
+        AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const hostPtr");
+        return (mData.data() + offset);
+    };
+
+    void setRawPtr(void *ptr, NbElts_t length) override final {
+        AIDGE_ASSERT(length >= mNbElts, "trying to set raw pointer of insufficient capacity");
+        mData = future_std::span<T>(static_cast<T *>(ptr), length);
+        mDataOwner.reset();
+    };
+
+    virtual ~TensorImpl_cpu() = default;
+
+private:
+    void lazyInit() {
+        if (mData.size() < mNbElts) {
+            // Need more data, a re-allocation will occur
+            AIDGE_ASSERT(mData.empty() || mDataOwner != nullptr, "trying to enlarge non-owned data");
+            mDataOwner.reset(new T[mNbElts]);
+            mData = future_std::span<T>(mDataOwner.get(), mNbElts);
+        }
+    }
+};
+
+namespace {
+static Registrar<Tensor> registrarTensorImpl_cpu_Float64(
+        {"cpu", DataType::Float64}, Aidge::TensorImpl_cpu<double>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_Float32(
+        {"cpu", DataType::Float32}, Aidge::TensorImpl_cpu<float>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_Float16(
+        {"cpu", DataType::Float16}, Aidge::TensorImpl_cpu<half_float::half>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
+        {"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_Int64(
+        {"cpu", DataType::Int64}, Aidge::TensorImpl_cpu<long>::create);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_DATA_TENSORIMPL_H_ */
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 52d0118743373c23a4afe4a51d3f22adbe9e6848..5f07cddfa667e7e494defe38a5667332744c3e20 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -12,8 +12,10 @@
 #ifndef AIDGE_CORE_OPERATOR_REDUCEMEAN_H_
 #define AIDGE_CORE_OPERATOR_REDUCEMEAN_H_
 
+#include <algorithm>  // std::for_each
 #include <array>
 #include <cmath>
+#include <cstdint>    // std::int32_t
 #include <numeric>
 #include <vector>
 
@@ -31,18 +33,18 @@ enum class ReduceMeanAttr { Axes, KeepDims };
 template <DimIdx_t DIM>
 class ReduceMean_Op : public OperatorTensor,
                 public Registrable<ReduceMean_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ReduceMean_Op<DIM> &)>,
-                public StaticAttributes<ReduceMeanAttr, std::array<int, DIM>, DimSize_t> {
+                public StaticAttributes<ReduceMeanAttr, std::array<std::int32_t, DIM>, DimSize_t> {
 
    public:
     static const std::string Type;
 
     ReduceMean_Op() = delete;
 
-    using Attributes_ = StaticAttributes<ReduceMeanAttr, std::array<int, DIM>, DimSize_t>;
+    using Attributes_ = StaticAttributes<ReduceMeanAttr, std::array<std::int32_t, DIM>, DimSize_t>;
     template <ReduceMeanAttr e>
     using attr = typename Attributes_::template attr<e>;
 
-    constexpr ReduceMean_Op(const std::array<int, DIM> &axes, DimSize_t keep_dims)
+    constexpr ReduceMean_Op(const std::array<std::int32_t, DIM> &axes, DimSize_t keep_dims)
         : OperatorTensor(Type, 1, 0, 1),
           Attributes_(attr<ReduceMeanAttr::Axes>(axes),
                       attr<ReduceMeanAttr::KeepDims>(keep_dims)) {}
@@ -67,29 +69,28 @@ class ReduceMean_Op : public OperatorTensor,
     }
 
     void computeOutputDims() override final {
+        if (!getInput(0)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+        }
         if (!getInput(0)->empty()) {
-            std::vector<DimSize_t> outDims;
-            for(std::size_t d=0; d<getInput(0)->dims().size(); ++d)
-            {
-                bool reducedDim =  false;
-                for(std::size_t i=0; i<DIM; ++i)
-                {
-                    int axis_ = this->template getAttr<ReduceMeanAttr::Axes>()[i];
-                    std::size_t axis= axis_>=0? axis_: axis_ + getInput(0)->nbDims();
-                    if(axis == d)
-                    {
-                        reducedDim = true;
-                        break;
-                    }
-                }
-                if(reducedDim)
-                {
-                    if(this->template getAttr<ReduceMeanAttr::KeepDims>())
-                        outDims.push_back(1);
-                }
-                else
-                    outDims.push_back(getInput(0)->dims()[d]);
+            // make Axes attribute positive
+            std::array<std::int32_t, DIM>& axes = this->template getAttr<ReduceMeanAttr::Axes>();
+            std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) {
+                if (val < 0)
+                    val+=static_cast<std::int32_t>(getInput(0)->nbDims());
+            });
+            std::sort(axes.begin(), axes.end());
+
+            // build output dimensions
+            std::vector<DimSize_t> outDims = getInput(0)->dims();
+            if (this->template getAttr<ReduceMeanAttr::KeepDims>()) {
+                std::for_each(axes.begin(), axes.end(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
+            }
+            else {
+                for (auto it = axes.crbegin(); it != axes.crend(); ++it)
+                    outDims.erase(outDims.begin() + static_cast<std::size_t>(*it));
             }
+
             if(outDims.size()>0)
                 mOutputs[0]->resize(outDims);
             else
@@ -111,7 +112,7 @@ class ReduceMean_Op : public OperatorTensor,
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> ReduceMean(const std::array<int, DIM> &axes,
+inline std::shared_ptr<Node> ReduceMean(const std::array<std::int32_t, DIM> &axes,
                                         DimSize_t keep_dims=1,
                                         const std::string& name = "") {
     // FIXME: properly handle default w&b initialization in every cases
@@ -123,7 +124,7 @@ inline std::shared_ptr<Node> ReduceMean(const std::array<int, DIM> &axes,
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> ReduceMean(
-    int const (&axes)[DIM],
+    std::int32_t const (&axes)[DIM],
     DimSize_t keep_dims = 1,
     const std::string& name = "") {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ReduceMean, not supported");
diff --git a/include/aidge/utils/Attributes.hpp b/include/aidge/utils/Attributes.hpp
index d3444000191022b575adaf1430319479daa5d4fc..927686cfd5cca910c5ffb25364ae4bc971ad18bf 100644
--- a/include/aidge/utils/Attributes.hpp
+++ b/include/aidge/utils/Attributes.hpp
@@ -69,6 +69,11 @@ public:
     *  be agnostic from its return type.
     */
     virtual py::object getAttrPy(const std::string& name) const = 0;
+    /* Bindable set function, does not recquire any templating.
+    *  This is thanks to py::object which allow the function to
+    *  be agnostic from ``value`` type.
+    */
+    virtual void setAttrPy(const std::string& name, py::object&& value) = 0;
 #endif
     virtual ~Attributes() {}
 };
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index 2af8f47e9420f266cc6eca21f167944c761db7ea..44c3b1f5e8df833344fa9b7fe72bdb4ef1e0ec12 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -135,7 +135,7 @@ public:
         assert(res.second && "attribute already exists");
     }
 
-    void setAttrPy(const std::string& name, py::object&& value)
+    void setAttrPy(const std::string& name, py::object&& value) override final
     {
         auto resPy = mAttrsPy.emplace(std::make_pair(name, value));
         if (!resPy.second)
@@ -204,7 +204,7 @@ private:
     // Stores C++ attributes (copy) and Python-only attributes
     // Code should be compiled with -fvisibility=hidden
     // See https://pybind11.readthedocs.io/en/stable/faq.html:
-    // “‘SomeClass’ declared with greater visibility than the type of its 
+    // “‘SomeClass’ declared with greater visibility than the type of its
     // field ‘SomeClass::member’ [-Wattributes]”
     // This map will only be populated if Python interpreter is running
     std::map<std::string, py::object> mAttrsPy;
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index a90a08b01915c461bc8951c08ee2dbd979b957de..be00932e47a93cc4349d39f6cad542cec506c38a 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -202,6 +202,22 @@ public:
     }
 
     #ifdef PYBIND
+    /**
+     * @brief Return a set of attributes defined.
+     * This method is used to automatically retrieve attributes in the documentation.
+     * This method is a duplicate of ``getAttrsName`` but static.
+     *
+     * @return std::set<std::string>
+     */
+    static std::set<std::string> staticGetAttrsName() {
+        std::set<std::string> attrsName;
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            attrsName.insert(EnumStrings<ATTRS_ENUM>::data[i]);
+        }
+        return attrsName;
+    }
+
+
     py::object getAttrPy(const std::string& name) const override {
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
             if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
@@ -212,7 +228,22 @@ public:
         }
 
         AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"%s\" not found", name.c_str());
-    };
+    }
+
+
+    void setAttrPy(const std::string& name, py::object&& value) override final{
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
+                // Cannot update attribute using reference has it would require templating
+                // Use a dirty
+                auto tmpAttr = py::cast(mAttrs);
+                py::detail::accessor_policies::tuple_item::set(tmpAttr, static_cast<py::size_t>(i), value);
+                mAttrs = py::cast<std::tuple<T...>>(tmpAttr);
+                return;
+            }
+        }
+        AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"%s\" not found", name.c_str());
+    }
     #endif
 
 private:
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index b09570792b0737376c7d477fa7addd477a212bd8..e07f70eaa7de8dc4daa489ec93c8fd9273559ff2 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -31,24 +31,26 @@ void addCtor(py::class_<Tensor,
                         Registrable<Tensor,
                                     std::tuple<std::string, DataType>,
                                     std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>>& mTensor){
-    mTensor.def(py::init([]( py::array_t<T, py::array::c_style | py::array::forcecast> b) {
+    mTensor.def(py::init([](
+        py::array_t<T, py::array::c_style | py::array::forcecast> b,
+        std::string backend = "cpu") {
         /* Request a buffer descriptor from Python */
         py::buffer_info info = b.request();
         Tensor* newTensor = new Tensor();
         newTensor->setDataType(NativeType<T>::type);
         const std::vector<DimSize_t> dims(info.shape.begin(), info.shape.end());
         newTensor->resize(dims);
-        // TODO : Find a better way to choose backend
+
         std::set<std::string> availableBackends = Tensor::getAvailableBackends();
-        if (availableBackends.find("cpu") != availableBackends.end()){
-            newTensor->setBackend("cpu");
+        if (availableBackends.find(backend) != availableBackends.end()){
+            newTensor->setBackend(backend);
             newTensor->getImpl()->copyFromHost(static_cast<T*>(info.ptr), newTensor->size());
         }else{
-            printf("Warning : Could not use aidge_cpu backend, verify you have `import aidge_cpu`\n");
+            AIDGE_THROW_OR_ABORT(py::value_error, "Could not find backend %s, verify you have `import aidge_backend_%s`.\n", backend.c_str(), backend.c_str());
         }
 
         return newTensor;
-    }))
+    }), py::arg("array"), py::arg("backend")="cpu")
     .def("__setitem__", (void (Tensor::*)(std::size_t, T)) &Tensor::set)
     .def("__setitem__", (void (Tensor::*)(std::vector<std::size_t>, T)) &Tensor::set)
     ;
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index f87cd5dd66f44535ff895f73b160fc5988e1009a..dc586b7d947c6d8433fabe2fbfaa0990de5c132a 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -26,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
-  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, OperatorTensor, Attributes>(
+  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Attributes, OperatorTensor>(
     m, ("AvgPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
@@ -34,7 +34,8 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
         py::arg("kernel_dims"),
         py::arg("stride_dims"))
   .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
-  .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName);
+  .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
+  .def("attributes_name", &AvgPooling_Op<DIM>::staticGetAttrsName);
 
   m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 411a2e1b6ae78065a79b92f25c23dac13e341997..c81c7ade4de50e6879fd32c59f6574b14c473398 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -21,9 +21,10 @@ namespace Aidge {
 
 template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
-    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, OperatorTensor, Attributes>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
+    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
     .def("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
-    .def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName);
+    .def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
+    .def("attributes_name", &BatchNorm_Op<DIM>::staticGetAttrsName);
 
     m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("nbFeatures"), py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index 2b7e5d6b99194e914e48dc6263d0bdcd6a4a8a2f..8cdd138b8cde2a582e9f569a17ae33811637092c 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -19,9 +19,10 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Concat(py::module& m) {
-    py::class_<Concat_Op, std::shared_ptr<Concat_Op>, OperatorTensor, Attributes>(m, "ConcatOp", py::multiple_inheritance())
+    py::class_<Concat_Op, std::shared_ptr<Concat_Op>, Attributes, OperatorTensor>(m, "ConcatOp", py::multiple_inheritance())
     .def("get_inputs_name", &Concat_Op::getInputsName)
-    .def("get_outputs_name", &Concat_Op::getOutputsName);
+    .def("get_outputs_name", &Concat_Op::getOutputsName)
+    .def("attributes_name", &Concat_Op::staticGetAttrsName);
 
     m.def("Concat", &Concat, py::arg("nbIn"), py::arg("axis"), py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 2200cd3fec1450011d6e0b5197f8b99b4dfeb4c3..455ea4024438b97b7ac6f07e5fc6722658b42ea4 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -24,7 +24,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
-  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, OperatorTensor, Attributes>(
+  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Attributes, OperatorTensor>(
     m, ("ConvOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<DimSize_t,
@@ -39,6 +39,7 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
         py::arg("dilation_dims"))
     .def("get_inputs_name", &Conv_Op<DIM>::getInputsName)
     .def("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
+    .def("attributes_name", &Conv_Op<DIM>::staticGetAttrsName)
     ;
 
   m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels,
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 15f2c1c8acb4a1b59cfb0f35ebb78cb611647d3b..d858336b6578b580378778f64984ba565e28f941 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -26,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
-  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, OperatorTensor, Attributes>(
+  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Attributes, OperatorTensor>(
     m, ("ConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const DimSize_t,
@@ -38,7 +38,8 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
         py::arg("stride_dims"),
         py::arg("dilation_dims"))
   .def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
-  .def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName);
+  .def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
+  .def("attributes_name", &ConvDepthWise_Op<DIM>::staticGetAttrsName);
 
   m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels,
                                                                   const std::vector<DimSize_t>& kernel_dims,
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index 606b9ae948847f98d5a1129c08db21e073311879..ad589d73d0aea94d96e62e8065b70bd517633f88 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -20,9 +20,10 @@ namespace py = pybind11;
 namespace Aidge {
 
 void declare_FC(py::module &m) {
-  py::class_<FC_Op, std::shared_ptr<FC_Op>, OperatorTensor, Attributes>(m, "FCOp", py::multiple_inheritance())
+  py::class_<FC_Op, std::shared_ptr<FC_Op>, Attributes, OperatorTensor>(m, "FCOp", py::multiple_inheritance())
   .def("get_inputs_name", &FC_Op::getInputsName)
-  .def("get_outputs_name", &FC_Op::getOutputsName);
+  .def("get_outputs_name", &FC_Op::getOutputsName)
+  .def("attributes_name", &FC_Op::staticGetAttrsName);
 
   m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index 4369d4d22b205a40140cf5160d999743b2e9b4c1..f0d55e2f40bd89269c96564cea6b5a002b477b8b 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -19,9 +19,10 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Gather(py::module& m) {
-    py::class_<Gather_Op, std::shared_ptr<Gather_Op>, OperatorTensor, Attributes>(m, "GatherOp", py::multiple_inheritance())
+    py::class_<Gather_Op, std::shared_ptr<Gather_Op>, Attributes, OperatorTensor>(m, "GatherOp", py::multiple_inheritance())
     .def("get_inputs_name", &Gather_Op::getInputsName)
-    .def("get_outputs_name", &Gather_Op::getOutputsName);
+    .def("get_outputs_name", &Gather_Op::getOutputsName)
+    .def("attributes_name", &Gather_Op::staticGetAttrsName);
 
     m.def("Gather", &Gather, py::arg("indices"), py::arg("gathered_shape"), py::arg("axis"), py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index 154fdfa64f279d8d6bb40ea7077acdb4c0fd51b9..6be4f31acde5bac14595d06570d7a3158d398db8 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -21,13 +21,36 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_GenericOperator(py::module& m) {
-    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, OperatorTensor, DynamicAttributes>(m, "GenericOperatorOp",
+    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, DynamicAttributes, OperatorTensor>(m, "GenericOperatorOp",
                                                                                   py::multiple_inheritance())
     .def_readonly_static("identity", &GenericOperator_Op::Identity)
     .def("compute_output_dims", &GenericOperator_Op::computeOutputDims)
     .def("set_compute_output_dims", &GenericOperator_Op::setComputeOutputDims, py::arg("computation_function"));
 
-    m.def("GenericOperator", &GenericOperator, py::arg("type"), py::arg("nb_data"), py::arg("nb_param"), py::arg("nb_out"),
-          py::arg("name") = "");
+    // &GenericOperator
+    m.def("GenericOperator",
+        []( const std::string& type,
+            IOIndex_t nbData,
+            IOIndex_t nbParam,
+            IOIndex_t nbOut,
+            const std::string& name,
+            const py::kwargs kwargs){
+            std::shared_ptr<Node> genericNode = GenericOperator(
+                type,
+                nbData,
+                nbParam,
+                nbOut,
+                name
+            );
+            if (kwargs){
+                std::shared_ptr<GenericOperator_Op> gop = std::static_pointer_cast<GenericOperator_Op>(genericNode->getOperator());
+                for (auto item : kwargs) {
+                    std::string key = py::cast<std::string>(item.first);
+                    py::object value = py::reinterpret_borrow<py::object>(item.second);
+                    gop->setAttrPy(key, std::move(value));
+                }
+            }
+            return genericNode;
+        }, py::arg("type"), py::arg("nb_data"), py::arg("nb_param"), py::arg("nb_out"), py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index 07300633ad1fb8163d4456afd744c4eb5d7b0ed1..3e9acb831eb3334bd126d3b360f3b5aa39d83731 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -18,9 +18,10 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_LeakyReLU(py::module& m) {
-    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, OperatorTensor, Attributes>(m, "LeakyReLUOp", py::multiple_inheritance())
+    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Attributes, OperatorTensor>(m, "LeakyReLUOp", py::multiple_inheritance())
     .def("get_inputs_name", &LeakyReLU_Op::getInputsName)
-    .def("get_outputs_name", &LeakyReLU_Op::getOutputsName);
+    .def("get_outputs_name", &LeakyReLU_Op::getOutputsName)
+    .def("attributes_name", &LeakyReLU_Op::staticGetAttrsName);
 
     m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index 242bf2c451723677e1b9063edfc3098d4159e5a4..72bc0f817fd911f1ba0801fc841df05166388b84 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -20,9 +20,10 @@ namespace py = pybind11;
 namespace Aidge {
 
 void declare_MatMul(py::module &m) {
-  py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor, Attributes>(m, "MatMulOp", py::multiple_inheritance())
+  py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, Attributes, OperatorTensor>(m, "MatMulOp", py::multiple_inheritance())
   .def("get_inputs_name", &MatMul_Op::getInputsName)
-  .def("get_outputs_name", &MatMul_Op::getOutputsName);
+  .def("get_outputs_name", &MatMul_Op::getOutputsName)
+  .def("attributes_name", &MatMul_Op::staticGetAttrsName);
 
   m.def("MatMul", &MatMul, py::arg("in_channels"), py::arg("out_channels"), py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 0ee3d9df80d7ea7b7be2b8d5c456d5d739506882..485e0eaf6e6e68367ae9037fd922da07433a76e3 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -26,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
-  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, OperatorTensor, Attributes>(
+  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Attributes, OperatorTensor>(
     m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
@@ -36,7 +36,8 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
         py::arg("stride_dims"),
         py::arg("ceil_mode"))
   .def("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
-  .def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName);
+  .def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
+  .def("attributes_name", &MaxPooling_Op<DIM>::staticGetAttrsName);
 
   m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
index 0956d6260e50d3be2418b1cf4089df87e442e54a..df3fdc297ce44cf96ff26bffb4cd96fa1fe8fe22 100644
--- a/python_binding/operator/pybind_Pad.cpp
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -25,7 +25,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
-  py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Operator, Attributes>(
+  py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Attributes, Operator>(
     m, ("PadOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, 2*DIM> &,
@@ -36,6 +36,7 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
         py::arg("borderValue") = 0.0)
     .def("get_inputs_name", &Pad_Op<DIM>::getInputsName)
     .def("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
+    .def("attributes_name", &Pad_Op<DIM>::staticGetAttrsName)
     ;
 
   m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples,
diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp
index 78d9ce3489a8309c42cc90189e588a448fd9649a..3caa438d18b3919dbedcf66e4ba53b92b84a50b5 100644
--- a/python_binding/operator/pybind_Producer.cpp
+++ b/python_binding/operator/pybind_Producer.cpp
@@ -30,13 +30,14 @@ void declare_Producer(py::module &m) {
 
 
 void init_Producer(py::module &m) {
-    py::class_<Producer_Op,  std::shared_ptr<Producer_Op>, OperatorTensor, Attributes>(
+    py::class_<Producer_Op,  std::shared_ptr<Producer_Op>, Attributes, OperatorTensor>(
         m,
         "ProducerOp",
         py::multiple_inheritance())
     .def("dims", &Producer_Op::dims)
     .def("get_inputs_name", &Producer_Op::getInputsName)
-    .def("get_outputs_name", &Producer_Op::getOutputsName);
+    .def("get_outputs_name", &Producer_Op::getOutputsName)
+    .def("attributes_name", &Producer_Op::staticGetAttrsName);
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&, bool)>(&Producer), py::arg("tensor"), py::arg("name") = "", py::arg("constant") = false);
 
     declare_Producer<1>(m);
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index e5de98b69adde5133dde302f7306bc8a5c471eef..1a50edba03f62e6c43ff60320fe4c3d5caa65f41 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -24,10 +24,11 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ReduceMeanOp(py::module &m) {
-  py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, OperatorTensor, Attributes>(
+  py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, Attributes, OperatorTensor>(
     m, ("ReduceMeanOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
     .def("get_inputs_name", &ReduceMean_Op<DIM>::getInputsName)
     .def("get_outputs_name", &ReduceMean_Op<DIM>::getOutputsName)
+    .def("attributes_name", &ReduceMean_Op<DIM>::staticGetAttrsName)
     ;
 
   m.def(("ReduceMean" + std::to_string(DIM) + "D").c_str(), [](const std::vector<int>& axes,
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 04e92d39971a731931397e943aba6e296a81a14d..780cffdef695b71dbc2781ba30936b3b45657cbb 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -19,9 +19,10 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Softmax(py::module& m) {
-    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, OperatorTensor, Attributes>(m, "SoftmaxOp", py::multiple_inheritance())
+    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, Attributes, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance())
     .def("get_inputs_name", &Softmax_Op::getInputsName)
-    .def("get_outputs_name", &Softmax_Op::getOutputsName);
+    .def("get_outputs_name", &Softmax_Op::getOutputsName)
+    .def("attributes_name", &Softmax_Op::staticGetAttrsName);
 
     m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index e92e9c2aaafe2d20220da053a2b9d799fbe8466d..d535a2c932c8d61c0395f03ffc0978caf7ad692f 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -25,12 +25,13 @@
 namespace py = pybind11;
 namespace Aidge {
 
-template <DimIdx_t DIM> 
+template <DimIdx_t DIM>
 void declare_Transpose(py::module &m) {
-  py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, OperatorTensor, Attributes>(
+  py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, Attributes, OperatorTensor>(
     m, ("TransposeOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
   .def("get_inputs_name", &Transpose_Op<DIM>::getInputsName)
-  .def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName);
+  .def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName)
+  .def("attributes_name", &Transpose_Op<DIM>::staticGetAttrsName);
 
   m.def(("Transpose" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& output_dims_order,
                                                                   const std::string& name) {
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index c3d84ba3986c4dca6047b669f3ef509aa7336eee..ebf73e85583d3300ce68078dc8236001a4db1c96 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -11,6 +11,9 @@
 
 #include <pybind11/pybind11.h>
 
+#include "aidge/backend/cpu/data/TensorImpl.hpp" // This include add Tensor
+
+
 namespace py = pybind11;
 
 namespace Aidge {
diff --git a/python_binding/utils/pybind_Parameter.cpp b/python_binding/utils/pybind_Attributes.cpp
similarity index 79%
rename from python_binding/utils/pybind_Parameter.cpp
rename to python_binding/utils/pybind_Attributes.cpp
index 2957876f31ad0781a36905cef3a5ae88934b6a8a..bfce891176822a3b1c07b1ded0c46c9c94a43c0a 100644
--- a/python_binding/utils/pybind_Parameter.cpp
+++ b/python_binding/utils/pybind_Attributes.cpp
@@ -1,6 +1,7 @@
 #include <pybind11/pybind11.h>
 #include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/DynamicAttributes.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
@@ -21,11 +22,13 @@ void init_Attributes(py::module& m){
     .def("has_attr", &Attributes::hasAttr, py::arg("name"))
     .def("get_attr_type", &Attributes::getAttrType, py::arg("name"))
     .def("get_attrs_name", &Attributes::getAttrsName)
-    .def("get_attr", &Attributes::getAttrPy, py::arg("name"));
+    .def("get_attr", &Attributes::getAttrPy, py::arg("name"))
+    .def("__getattr__", &Attributes::getAttrPy, py::arg("name"))
+    .def("set_attr", &Attributes::setAttrPy, py::arg("name"), py::arg("value"))
+    .def("__setattr__", &Attributes::setAttrPy, py::arg("name"), py::arg("value"));
 
     py::class_<DynamicAttributes, std::shared_ptr<DynamicAttributes>, Attributes>(m, "DynamicAttributes")
     .def("add_attr", &DynamicAttributes::addAttrPy, py::arg("name"), py::arg("value"))
-    .def("set_attr", &DynamicAttributes::setAttrPy, py::arg("name"), py::arg("value"))
     .def("del_attr", &DynamicAttributes::delAttr, py::arg("name"));
 
     m.def("test_DynamicAttributes_binding", &test_DynamicAttributes_binding);
diff --git a/unit_tests/data/Test_TensorImpl.cpp b/unit_tests/data/Test_TensorImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..cfcfb45e3735538c1650cfd990ea85e2333916ad
--- /dev/null
+++ b/unit_tests/data/Test_TensorImpl.cpp
@@ -0,0 +1,100 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <array>
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/TensorUtils.hpp"
+#include "aidge/backend/cpu/data/TensorImpl.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("Tensor creation") {
+  SECTION("from const array") {
+    Tensor x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
+
+    Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
+
+    Tensor xFloat =
+        Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
+
+    SECTION("Tensor features") {
+      REQUIRE(x.nbDims() == 3);
+      REQUIRE(x.dims()[0] == 2);
+      REQUIRE(x.dims()[1] == 2);
+      REQUIRE(x.dims()[2] == 2);
+      REQUIRE(x.size() == 8);
+    }
+
+    SECTION("Access to array") {
+      REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1);
+      REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8);
+    }
+
+    SECTION("get function") {
+      REQUIRE(x.get<int>({0, 0, 0}) == 1);
+      REQUIRE(x.get<int>({0, 0, 1}) == 2);
+      REQUIRE(x.get<int>({0, 1, 1}) == 4);
+      REQUIRE(x.get<int>({1, 1, 0}) == 7);
+      x.set<int>({1, 1, 1}, 36);
+      REQUIRE(x.get<int>({1, 1, 1}) == 36);
+    }
+
+    SECTION("Pretty printing for debug") { REQUIRE_NOTHROW(x.print()); }
+
+    SECTION("Tensor (in)equality") {
+      REQUIRE(x == xCopy);
+      REQUIRE_FALSE(x == xFloat);
+    }
+  }
+}
+
+TEST_CASE("Tensor methods") {
+  Tensor x = Array3D<int, 2, 2, 2>{{
+    {{1, 2},
+     {3, 4}},
+    {{5, 6},
+     {7, 8}}
+  }};
+
+  Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
+
+  Tensor xFloat =
+      Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
+
+  SECTION("Tensor sharing") {
+    Tensor xCopyCtor(x);
+    REQUIRE(xCopyCtor.getImpl() == x.getImpl());
+
+    Tensor xEqOp = x;
+    REQUIRE(xEqOp.getImpl() == x.getImpl());
+
+    Tensor xCloned = x.clone();
+    REQUIRE(xCloned.getImpl() != x.getImpl());
+    REQUIRE(xCloned == x);
+  }
+
+  SECTION("Tensor extract") {
+    Tensor y = x.extract({0, 1});
+    REQUIRE(y.getImpl() == x.getImpl());
+    REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}}));
+    REQUIRE(y.isContiguous());
+    
+    Tensor y2 = x.extract({0, 1, 1}, {2, 1, 1});
+    REQUIRE(y2.getImpl() == x.getImpl());
+    REQUIRE(!y2.isContiguous());
+    Tensor y3 = y2.clone();
+    REQUIRE(y3.isContiguous());
+    REQUIRE(approxEq<int>(y3, Array3D<int, 2, 1, 1>{{{{4}}, {{8}}}}));
+  }
+}