diff --git a/.gitlab/ci/build.gitlab-ci.yml b/.gitlab/ci/build.gitlab-ci.yml
index b9fdd937b358b714fd83a36d8417ad2b417d0385..18963ced1084c56c1e4c04dceec735126bba962a 100644
--- a/.gitlab/ci/build.gitlab-ci.yml
+++ b/.gitlab/ci/build.gitlab-ci.yml
@@ -1,3 +1,6 @@
+include:
+  - remote: 'https://gitlab.eclipse.org/eclipse/aidge/gitlab_shared_files/-/raw/main/.gitlab/ci/shared_script.gitlab-ci.yml'
+
 build:ubuntu_cpp:
   stage: build
   needs: []
@@ -6,9 +9,9 @@ build:ubuntu_cpp:
   script:
     # Download dependencies
     # aidge_core
-    - 'curl --location --output build_artifacts.zip "https://gitlab.eclipse.org/api/v4/projects/5139/jobs/artifacts/main/download?job=build:ubuntu_cpp"'
-    - unzip -o build_artifacts.zip -d .
-    - rm -rf build_cpp
+    - DEPENDENCY_NAME="aidge_core"
+    - DEPENDENCY_JOB="build:ubuntu_cpp"
+    - !reference [.download_dependency, script]
 
     # Build current module
     - export CMAKE_PREFIX_PATH=../install_cpp
@@ -32,9 +35,9 @@ build:ubuntu_cpp_g++10:
   script:
     # Download dependencies
     # aidge_core
-    - 'curl --location --output build_artifacts.zip "https://gitlab.eclipse.org/api/v4/projects/5139/jobs/artifacts/main/download?job=build:ubuntu_cpp"'
-    - unzip -o build_artifacts.zip -d .
-    - rm -rf build_cpp
+    - DEPENDENCY_NAME="aidge_core"
+    - DEPENDENCY_JOB="build:ubuntu_cpp"
+    - !reference [.download_dependency, script]
 
     # Build current module
     - export CMAKE_PREFIX_PATH=../install_cpp
@@ -55,9 +58,9 @@ build:ubuntu_cpp_g++12:
   script:
     # Download dependencies
     # aidge_core
-    - 'curl --location --output build_artifacts.zip "https://gitlab.eclipse.org/api/v4/projects/5139/jobs/artifacts/main/download?job=build:ubuntu_cpp"'
-    - unzip -o build_artifacts.zip -d .
-    - rm -rf build_cpp
+    - DEPENDENCY_NAME="aidge_core"
+    - DEPENDENCY_JOB="build:ubuntu_cpp"
+    - !reference [.download_dependency, script]
 
     # Build current module
     - export CMAKE_PREFIX_PATH=../install_cpp
@@ -78,9 +81,9 @@ build:ubuntu_cpp_clang12:
   script:
     # Download dependencies
     # aidge_core
-    - 'curl --location --output build_artifacts.zip "https://gitlab.eclipse.org/api/v4/projects/5139/jobs/artifacts/main/download?job=build:ubuntu_cpp"'
-    - unzip -o build_artifacts.zip -d .
-    - rm -rf build_cpp
+    - DEPENDENCY_NAME="aidge_core"
+    - DEPENDENCY_JOB="build:ubuntu_cpp"
+    - !reference [.download_dependency, script]
 
     # Build current module
     - export CMAKE_PREFIX_PATH=../install_cpp
@@ -101,9 +104,9 @@ build:ubuntu_cpp_clang15:
   script:
     # Download dependencies
     # aidge_core
-    - 'curl --location --output build_artifacts.zip "https://gitlab.eclipse.org/api/v4/projects/5139/jobs/artifacts/main/download?job=build:ubuntu_cpp"'
-    - unzip -o build_artifacts.zip -d .
-    - rm -rf build_cpp
+    - DEPENDENCY_NAME="aidge_core"
+    - DEPENDENCY_JOB="build:ubuntu_cpp"
+    - !reference [.download_dependency, script]
 
     # Build current module
     - export CMAKE_PREFIX_PATH=../install_cpp
@@ -120,86 +123,92 @@ build:ubuntu_python:
   needs: []
   tags:
     - docker
+
   script:
     # Download dependencies
     # aidge_core (Python)
-    - 'curl --location --output build_artifacts.zip "https://gitlab.eclipse.org/api/v4/projects/5139/jobs/artifacts/main/download?job=build:ubuntu_python"'
-    - unzip -o build_artifacts.zip -d .
+    - DEPENDENCY_NAME="aidge_core"
+    - DEPENDENCY_JOB="build:ubuntu_python"
+    - !reference [.download_dependency, script]
 
     - python3 -m pip install virtualenv
     - virtualenv venv
     - source venv/bin/activate
     - python3 -m pip install -r requirements.txt
     - python3 -m pip install .
+    - python3 -m pip install numpy unittest-xml-reporting
+    - python3 -m pip list
   artifacts:
     expire_in: 1 week
     paths:
       - venv/
 
-# build:windows_cpp:
-#   stage: build
-#   needs: []
-#   tags:
-#     - windows
-
-#   image: buildtools
-#   before_script:
-#     # Install Chocolatey
-#     - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
-#     # Install dependencies
-#     - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
-#     - choco install git -Y
-#     - choco install python -Y
-#     # Update PATH
-#     - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
-#   script:
-#     # Download dependencies
-#     # aidge_core
-#     - 'curl "https://gitlab.eclipse.org/api/v4/projects/5139/jobs/artifacts/main/download?job=build:windows_cpp" -o build_artifacts.zip'
-#     - Expand-Archive -Path .\build_artifacts.zip -DestinationPath . -Force
-#     - Remove-Item .\build_cpp\ -Recurse
-
-#     - $env:CMAKE_PREFIX_PATH = '../install_cpp'
-#     - mkdir -p build_cpp
-#     - cd build_cpp
-#     - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug ..
-#     - cmake --build . -j2
-#     - cmake --install . --config Debug
-
-#   artifacts:
-#     expire_in: 1 week
-#     paths:
-#       - build_cpp/
-#       - install_cpp/
-
-# build:windows_python:
-#   stage: build
-#   needs: []
-#   tags:
-#     - windows
-
-#   image: buildtools
-#   before_script:
-#     # Install Chocolatey
-#     - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
-#     # Install dependencies
-#     - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
-#     - choco install git -Y
-#     - choco install python -Y
-#     # Update PATH
-#     - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
-#   script:
-#     # Download dependencies
-#     # aidge_core (Python)
-#     - 'curl "https://gitlab.eclipse.org/api/v4/projects/5139/jobs/artifacts/main/download?job=build:windows_python" -o build_artifacts.zip'
-#     - Expand-Archive -Path .\build_artifacts.zip -DestinationPath . -Force
-
-#     - python -m pip install virtualenv
-#     - virtualenv venv
-#     - venv\Scripts\Activate.ps1
-#     - python -m pip install -r requirements.txt
-#     - python -m pip install .
-#   artifacts:
-#     expire_in: 1 week
-#     paths:
-#       - venv/
+build:windows_cpp:
+  stage: build
+  needs: []
+  tags:
+    - windows
+
+  image: buildtools
+  before_script:
+    # Install Chocolatey
+    - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
+    # Install dependencies
+    - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
+    - choco install git -Y
+    - choco install python -Y
+    # Update PATH
+    - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
+  script:
+    # Download dependencies
+    # aidge_core
+    - $DEPENDENCY_NAME="aidge_core"
+    - $DEPENDENCY_JOB="build:windows_cpp"
+    - !reference [.download_dependency_windows, script]
+    - Remove-Item .\build_cpp\ -Recurse -Force -ErrorAction Ignore
+
+    - $env:CMAKE_PREFIX_PATH = '../install_cpp'
+    - mkdir -p build_cpp
+    - cd build_cpp
+    - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug ..
+    - cmake --build . -j2
+    - cmake --install . --config Debug
+
+  artifacts:
+    expire_in: 1 week
+    paths:
+      - build_cpp/
+      - install_cpp/
+
+build:windows_python:
+  stage: build
+  needs: []
+  tags:
+    - windows
+
+  image: buildtools
+  before_script:
+    # Install Chocolatey
+    - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
+    # Install dependencies
+    - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
+    - choco install git -Y
+    - choco install python -Y
+    # Update PATH
+    - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
+  script:
+    # Download dependencies
+    # aidge_core (Python)
+    - $DEPENDENCY_NAME="aidge_core"
+    - $DEPENDENCY_JOB="build:windows_python"
+    - !reference [.download_dependency_windows, script]
+
+    - python -m pip install virtualenv
+    - virtualenv venv
+    - venv\Scripts\Activate.ps1
+    - python -m pip install -r requirements.txt
+    - python -m pip install .
+  artifacts:
+    expire_in: 1 week
+    paths:
+      - venv/
diff --git a/.gitlab/ci/test.gitlab-ci.yml b/.gitlab/ci/test.gitlab-ci.yml
index 8f6b1e54109c4c2dcfa026fd477a93b6c0a1c641..3cada635eb25b3eb87e8318eb6e26723f7a27dd6 100644
--- a/.gitlab/ci/test.gitlab-ci.yml
+++ b/.gitlab/ci/test.gitlab-ci.yml
@@ -18,32 +18,31 @@ test:ubuntu_python:
   script:
     - source venv/bin/activate
     - cd ${CI_PROJECT_NAME}
-    - python3 -m pip install numpy unittest-xml-reporting
-    - python3 -m pip list
-    # Run on discovery all tests located in core/unit_tests/python and discard the stdout 
+
+    # Run on discovery all tests located in core/unit_tests/python and discard the stdout
     # only to show the errors/warnings and the results of the tests
     - python3 -m xmlrunner discover -s unit_tests/ -v -b --output-file xmlrunner-results.xml
   artifacts:
     reports:
       junit: ${CI_PROJECT_NAME}/xmlrunner-results.xml
 
-# test:windows_cpp:
-#   stage: test
-#   needs: ["build:windows_cpp"]
-#   tags:
-#     - windows
-#   image: buildtools
-#   before_script:
-#     # Install Chocolatey
-#     - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
-#     # Install dependencies
-#     - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
-#     - choco install python -Y
-#     # Update PATH
-#     - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
-#   script:
-#     - cd build_cpp
-#     - ctest --output-junit ctest-results.xml --output-on-failure
-#   artifacts:
-#     reports:
-#       junit: build_cpp/ctest-results.xml
+test:windows_cpp:
+  stage: test
+  needs: ["build:windows_cpp"]
+  tags:
+    - windows
+  image: buildtools
+  before_script:
+    # Install Chocolatey
+    - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
+    # Install dependencies
+    - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
+    - choco install python -Y
+    # Update PATH
+    - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
+  script:
+    - cd build_cpp
+    - ctest --output-junit ctest-results.xml --output-on-failure
+  artifacts:
+    reports:
+      junit: build_cpp/ctest-results.xml
diff --git a/include/aidge/backend/cpu.hpp b/include/aidge/backend/cpu.hpp
index f78598057cafe0b5b02d268bd5a73ede5a2981d8..2020c9dbcd1b0ed690e499bca44bbb70c49f7e45 100644
--- a/include/aidge/backend/cpu.hpp
+++ b/include/aidge/backend/cpu.hpp
@@ -21,18 +21,23 @@
 #include "aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp"
 #include "aidge/backend/cpu/operator/ConvImpl.hpp"
 #include "aidge/backend/cpu/operator/DivImpl.hpp"
+#include "aidge/backend/cpu/operator/ErfImpl.hpp"
 #include "aidge/backend/cpu/operator/FCImpl.hpp"
+#include "aidge/backend/cpu/operator/GatherImpl.hpp"
 #include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp"
 #include "aidge/backend/cpu/operator/MatMulImpl.hpp"
 #include "aidge/backend/cpu/operator/MulImpl.hpp"
 #include "aidge/backend/cpu/operator/PadImpl.hpp"
 #include "aidge/backend/cpu/operator/PowImpl.hpp"
 #include "aidge/backend/cpu/operator/ProducerImpl.hpp"
+#include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp"
 #include "aidge/backend/cpu/operator/ReLUImpl.hpp"
+#include "aidge/backend/cpu/operator/ReshapeImpl.hpp"
 #include "aidge/backend/cpu/operator/ScalingImpl.hpp"
 #include "aidge/backend/cpu/operator/SliceImpl.hpp"
 #include "aidge/backend/cpu/operator/SqrtImpl.hpp"
 #include "aidge/backend/cpu/operator/SoftmaxImpl.hpp"
 #include "aidge/backend/cpu/operator/SubImpl.hpp"
+#include "aidge/backend/cpu/operator/TransposeImpl.hpp"
 
 #endif /* AIDGE_CPU_IMPORTS_H_ */
\ No newline at end of file
diff --git a/include/aidge/backend/cpu/data/GetCPUPtr.h b/include/aidge/backend/cpu/data/GetCPUPtr.h
index 38ea848afc29fa4c23ff500f97e0c57954695021..47e3b07e8fa08cdcd714745a9a49bb03e30f79f5 100644
--- a/include/aidge/backend/cpu/data/GetCPUPtr.h
+++ b/include/aidge/backend/cpu/data/GetCPUPtr.h
@@ -16,7 +16,8 @@
 
 namespace Aidge {
 inline void *getCPUPtr(std::shared_ptr<Aidge::Data> const &data) {
-  return std::static_pointer_cast<Tensor>(data)->getImpl()->rawPtr();
+  const auto tensor = std::static_pointer_cast<Tensor>(data);
+  return tensor->getImpl()->hostPtr(tensor->getImplOffset());
 }
 } // namespace Aidge
 
diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index c451b4a5beccacb7980c834d56b979c1b76cdd3f..46dfae3d53b4b201507290bd538ea13737919c3e 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -25,8 +25,6 @@ namespace Aidge {
 template <class T>
 class TensorImpl_cpu : public TensorImpl {
 private:
-    const Tensor &mTensor;  // Impl needs to access Tensor information, but is not
-                            // supposed to change it!
     /// Pointer to the data and its capacity
     future_std::span<T> mData;
     /// If this instance own the data, std::unique_ptr manages it
@@ -35,88 +33,87 @@ private:
 public:
     static constexpr const char *Backend = "cpu";
 
-    TensorImpl_cpu(const Tensor &tensor) : TensorImpl(Backend), mTensor(tensor) {}
+    TensorImpl_cpu(DeviceIdx_t device, NbElts_t length) : TensorImpl(Backend, device, length) {}
 
     bool operator==(const TensorImpl &otherImpl) const override final {
         const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl);
-        AIDGE_INTERNAL_ASSERT(typedOtherImpl.size() >= mTensor.size());
+        AIDGE_INTERNAL_ASSERT(typedOtherImpl.size() >= mNbElts);
 
         std::size_t i = 0;
-        for (; i < mTensor.size() &&
+        for (; i < mNbElts &&
                *(mData.data()+i) == *static_cast<const T*>(typedOtherImpl.rawPtr(i));
                ++i) {
         }
-        return i == mTensor.size();
+        return i == mNbElts;
     }
 
-    static std::unique_ptr<TensorImpl_cpu> create(const Tensor &tensor) {
-        return std::make_unique<TensorImpl_cpu<T>>(tensor);
+    static std::shared_ptr<TensorImpl_cpu> create(DeviceIdx_t device, NbElts_t length) {
+        return std::make_shared<TensorImpl_cpu<T>>(device, length);
     }
 
-    inline std::size_t size() const noexcept override final { return mData.size(); }
     inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
 
-    void setDevice(DeviceIdx_t device) override final {
-        AIDGE_ASSERT(device == 0, "device cannot be != 0 for CPU backend");
-    }
-
     void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
-        AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
-        std::copy(static_cast<const T *>(src), static_cast<const T *>(src) + length,
-                  static_cast<T *>(rawPtr()) + offset);
+        const T* srcT = static_cast<const T *>(src);
+        T* dstT = static_cast<T *>(rawPtr(offset));
+
+        AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
+        AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "overlapping copy is not supported");
+        std::copy(srcT, srcT + length, dstT);
     }
 
-    void copyCast(const void *src, NbElts_t length, const DataType srcDt) override final {
+    void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final {
         if (length == 0) {
             return;
         }
 
-        AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
+        T* dstT = static_cast<T *>(rawPtr(offset));
+        AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
         switch (srcDt)
         {
             case DataType::Float64:
                 std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length,
-                        static_cast<T *>(rawPtr()));
+                        dstT);
                 break;
             case DataType::Float32:
                 std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length,
-                        static_cast<T *>(rawPtr()));
+                        dstT);
                 break;
             case DataType::Float16:
                 std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length,
-                        static_cast<T *>(rawPtr()));
+                        dstT);
                 break;
             case DataType::Int64:
                 std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length,
-                        static_cast<T *>(rawPtr()));
+                        dstT);
                 break;
             case DataType::UInt64:
                 std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length,
-                        static_cast<T *>(rawPtr()));
+                        dstT);
                 break;
             case DataType::Int32:
                 std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length,
-                        static_cast<T *>(rawPtr()));
+                        dstT);
                 break;
             case DataType::UInt32:
                 std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length,
-                        static_cast<T *>(rawPtr()));
+                        dstT);
                 break;
             case DataType::Int16:
                 std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length,
-                        static_cast<T *>(rawPtr()));
+                        dstT);
                 break;
             case DataType::UInt16:
                 std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length,
-                        static_cast<T *>(rawPtr()));
+                        dstT);
                 break;
             case DataType::Int8:
                 std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
-                        static_cast<T *>(rawPtr()));
+                        dstT);
                 break;
             case DataType::UInt8:
                 std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
-                        static_cast<T *>(rawPtr()));
+                        dstT);
                 break;
             default:
                 AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type.");
@@ -124,21 +121,20 @@ public:
         }
     }
 
-    void copyFromDevice(const void *src, NbElts_t length, const std::pair<std::string, DeviceIdx_t>& device) override final {
+    void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final {
         AIDGE_ASSERT(device.first == Backend, "backend must match");
         AIDGE_ASSERT(device.second == 0, "device cannot be != 0 for CPU backend");
-        copy(src, length);
+        copy(src, length, offset);
     }
 
-    inline void copyFromHost(const void *src, NbElts_t length) override final {
-        copy(src, length);
+    inline void copyFromHost(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
+        copy(src, length, offset);
     }
 
-    void copyToHost(void *dst, NbElts_t length) const override final {
-        AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
-        const T* src = static_cast<const T*>(rawPtr());
-        std::copy(static_cast<const T *>(src), static_cast<const T *>(src) + length,
-                  static_cast<T *>(dst));
+    void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override final {
+        const T* src = static_cast<const T*>(rawPtr(offset));
+        AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
+        std::copy(src, src + length, static_cast<T *>(dst));
     }
 
     void *rawPtr(NbElts_t offset = 0) override final {
@@ -147,7 +143,7 @@ public:
     };
 
     const void *rawPtr(NbElts_t offset = 0) const override final {
-        AIDGE_ASSERT(mData.size() >= mTensor.size(), "accessing uninitialized const rawPtr");
+        AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const rawPtr");
         return (mData.data() + offset);
     };
 
@@ -157,12 +153,12 @@ public:
     };
 
     const void *hostPtr(NbElts_t offset = 0) const override final {
-        AIDGE_ASSERT(mData.size() >= mTensor.size(), "accessing uninitialized const hostPtr");
+        AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const hostPtr");
         return (mData.data() + offset);
     };
 
     void setRawPtr(void *ptr, NbElts_t length) override final {
-        AIDGE_ASSERT(length >= mTensor.size(), "trying to set raw pointer of insufficient capacity");
+        AIDGE_ASSERT(length >= mNbElts, "trying to set raw pointer of insufficient capacity");
         mData = future_std::span<T>(static_cast<T *>(ptr), length);
         mDataOwner.reset();
     };
@@ -171,11 +167,11 @@ public:
 
 private:
     void lazyInit() {
-        if (mData.size() < mTensor.size()) {
+        if (mData.size() < mNbElts) {
             // Need more data, a re-allocation will occur
             AIDGE_ASSERT(mData.empty() || mDataOwner != nullptr, "trying to enlarge non-owned data");
-            mDataOwner.reset(new T[mTensor.size()]);
-            mData = future_std::span<T>(mDataOwner.get(), mTensor.size());
+            mDataOwner.reset(new T[mNbElts]);
+            mData = future_std::span<T>(mDataOwner.get(), mNbElts);
         }
     }
 };
diff --git a/include/aidge/backend/cpu/operator/ErfImpl.hpp b/include/aidge/backend/cpu/operator/ErfImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..5c0a6fd49f4e2d435eed8e8baa979f59dbd84e68
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/ErfImpl.hpp
@@ -0,0 +1,50 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_ERFIMPL_H_
+#define AIDGE_CPU_OPERATOR_ERFIMPL_H_
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Erf.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include <memory>
+#include <vector>
+
+namespace Aidge {
+// class Erf_Op;
+
+// compute kernel registry for forward and backward
+class ErfImplForward_cpu
+    : public Registrable<ErfImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+};
+class ErfImplBackward_cpu
+    : public Registrable<ErfImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+};
+
+class ErfImpl_cpu : public OperatorImpl {
+public:
+    ErfImpl_cpu(const Erf_Op& op) : OperatorImpl(op) {}
+
+    static std::unique_ptr<ErfImpl_cpu> create(const Erf_Op& op) {
+        return std::make_unique<ErfImpl_cpu>(op);
+    }
+
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
+
+namespace {
+static Registrar<Erf_Op> registrarErfImpl_cpu("cpu", Aidge::ErfImpl_cpu::create);
+}
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_ERFIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/ErfImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ErfImpl_forward_kernels.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..bb92401b6e72b1528d0342474bf394a7c29a4042
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/ErfImpl_forward_kernels.hpp
@@ -0,0 +1,45 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_ERFIMPL_FORWARD_KERNEL_H_
+#define AIDGE_CPU_OPERATOR_ERFIMPL_FORWARD_KERNEL_H_
+
+#include <cmath>
+
+#include "aidge/utils/Registrar.hpp"
+
+#include "aidge/backend/cpu/operator/ErfImpl.hpp"
+
+namespace Aidge {
+template <class I, class O>
+void ErfImpl_cpu_forward_kernel(std::size_t inputLenght,
+                                     const void* input_,
+                                     void* output_) {
+
+    const I* input = static_cast<const I*>(input_);
+    O* output = static_cast<O*>(output_);
+
+    for (std::size_t i = 0; i < inputLenght; ++i) {
+        output[i] = std::erf(input[i]);
+    }
+}
+
+namespace {
+static Registrar<ErfImplForward_cpu> registrarErfImplForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::ErfImpl_cpu_forward_kernel<float, float>);
+static Registrar<ErfImplForward_cpu> registrarErfImplForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32}, Aidge::ErfImpl_cpu_forward_kernel<int, int>);
+static Registrar<ErfImplForward_cpu> registrarErfImplForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64}, Aidge::ErfImpl_cpu_forward_kernel<double, double>);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_ERFIMPL_FORWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/GatherImpl.hpp b/include/aidge/backend/cpu/operator/GatherImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..1d235ff14ca01955c268a7b061e6ecb7b2bbbb2a
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/GatherImpl.hpp
@@ -0,0 +1,50 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_GATHERIMPL_H_
+#define AIDGE_CPU_OPERATOR_GATHERIMPL_H_
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Gather.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include <memory>
+#include <vector>
+
+namespace Aidge {
+// class Gather_Op;
+
+// compute kernel registry for forward and backward
+class GatherImplForward_cpu
+    : public Registrable<GatherImplForward_cpu, std::tuple<DataType, DataType>, void(const typename Gather_Op::Attrs&, const std::vector<DimSize_t>&, const void*, void*)> {
+};
+class GatherImplBackward_cpu
+    : public Registrable<GatherImplBackward_cpu, std::tuple<DataType, DataType>, void(const typename Gather_Op::Attrs&, const std::vector<DimSize_t>&, const void*, void*)> {
+};
+
+class GatherImpl_cpu : public OperatorImpl {
+public:
+    GatherImpl_cpu(const Gather_Op& op) : OperatorImpl(op) {}
+
+    static std::unique_ptr<GatherImpl_cpu> create(const Gather_Op& op) {
+        return std::make_unique<GatherImpl_cpu>(op);
+    }
+
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
+
+namespace {
+static Registrar<Gather_Op> registrarGatherImpl_cpu("cpu", Aidge::GatherImpl_cpu::create);
+}
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_GATHERIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/GatherImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/GatherImpl_forward_kernels.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..0d312e3c143720c7d920128c8d484d4c68439a24
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/GatherImpl_forward_kernels.hpp
@@ -0,0 +1,66 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_GATHERIMPL_FORWARD_KERNEL_H_
+#define AIDGE_CPU_OPERATOR_GATHERIMPL_FORWARD_KERNEL_H_
+
+#include "aidge/utils/Registrar.hpp"
+#include <cstddef>
+#include <cmath>
+#include "aidge/data/Data.hpp"
+#include "aidge/utils/Types.h"
+
+#include "aidge/backend/cpu/operator/GatherImpl.hpp"
+
+namespace Aidge {
+template <class I, class O>
+void GatherImpl_cpu_forward_kernel(const typename Gather_Op::Attrs& attrs, const std::vector<DimSize_t>& inputDims, const void* input_, void* output_)
+{
+    const I* input = static_cast<const I*>(input_);
+    O* output = static_cast<O*>(output_);
+
+    const std::size_t axisIdx = std::get<2>(attrs)>=0 ?
+                                std::get<2>(attrs) :
+                                static_cast<std::size_t>(std::get<2>(attrs)) + inputDims.size();
+
+    std::size_t postAxisElems = 1;
+    for (std::size_t i = axisIdx + 1; i < inputDims.size(); ++i) {
+        postAxisElems *= inputDims[i];
+    }
+    std::size_t preAxisElems = 1;
+    for (std::size_t i = 0; i < axisIdx; ++i) {
+        preAxisElems *= inputDims[i];
+    }
+
+    const std::vector<std::int64_t> indices = std::get<0>(attrs);
+    for (std::size_t i=0; i<preAxisElems; ++i)
+    {
+        for(std::size_t j=0; j<indices.size(); ++j)
+        {
+            const std::size_t idx = indices[j] >= 0 ? indices[j] : static_cast<std::size_t>(indices[j]) + inputDims[axisIdx];
+            const I* startPtr = std::next(input, i * postAxisElems * inputDims[axisIdx] + idx * postAxisElems);
+            std::copy_n(startPtr, postAxisElems, output);
+            output += postAxisElems;
+        }
+    }
+}
+
+namespace {
+static Registrar<GatherImplForward_cpu> registrarGatherImplForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::GatherImpl_cpu_forward_kernel<float, float>);
+static Registrar<GatherImplForward_cpu> registrarGatherImplForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32}, Aidge::GatherImpl_cpu_forward_kernel<int, int>);
+static Registrar<GatherImplForward_cpu> registrarGatherImplForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64}, Aidge::GatherImpl_cpu_forward_kernel<double, double>);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_GATHERIMPL_FORWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp
index 761b9579c3c3dc187e4b0fac24812fa77f916e65..d10b32e18ee983fc1270bc4a7cce35e18f601071 100644
--- a/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp
@@ -25,7 +25,7 @@ void LeakyReLUImpl_cpu_forward_kernel(const LeakyReLU_Op::Attrs& attrs,
 
     const I* input = static_cast<const I*>(input_);
     O* output = static_cast<O*>(output_);
-    I negativeSlope = static_cast<I>(std::get<0>(attrs));
+    const I negativeSlope = static_cast<const I>(std::get<0>(attrs));
 
     for (std::size_t i = 0; i < inputLenght; ++i) {
         output[i] = input[i] >= 0 ? input[i] : input[i] * negativeSlope;
diff --git a/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp b/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..9b85eb812caffca3820a711d46775e1134db863f
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp
@@ -0,0 +1,104 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_H_
+#define AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_H_
+
+#include <array>
+#include <memory>
+#include <tuple>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/ReduceMean.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+// class ReduceMean_Op;
+
+// compute kernel registry for forward and backward
+// DIM 1
+class ReduceMeanImpl1DForward_cpu
+    : public Registrable<ReduceMeanImpl1DForward_cpu,
+                         std::tuple<DataType, DataType>,
+                         void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
+class ReduceMeanImpl1DBackward_cpu
+    : public Registrable<ReduceMeanImpl1DBackward_cpu,
+                         std::tuple<DataType, DataType>,
+                         void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *,  void *)> {};
+
+// DIM 2
+class ReduceMeanImpl2DForward_cpu
+    : public Registrable<ReduceMeanImpl2DForward_cpu,
+                         std::tuple<DataType, DataType>,
+                         void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
+class ReduceMeanImpl2DBackward_cpu
+    : public Registrable<ReduceMeanImpl2DBackward_cpu,
+                         std::tuple<DataType, DataType>,
+                         void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *,  void *)> {};
+// DIM 3
+class ReduceMeanImpl3DForward_cpu
+    : public Registrable<ReduceMeanImpl3DForward_cpu,
+                         std::tuple<DataType, DataType>,
+                         void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
+class ReduceMeanImpl3DBackward_cpu
+    : public Registrable<ReduceMeanImpl3DBackward_cpu,
+                         std::tuple<DataType, DataType>,
+                         void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
+
+class ReduceMeanImpl1D_cpu : public OperatorImpl {
+   public:
+    ReduceMeanImpl1D_cpu(const ReduceMean_Op<1>& op) : OperatorImpl(op) {}
+
+    static std::unique_ptr<ReduceMeanImpl1D_cpu> create(const ReduceMean_Op<1> &op) {
+        return std::make_unique<ReduceMeanImpl1D_cpu>(op);
+    }
+
+   public:
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
+
+class ReduceMeanImpl2D_cpu : public OperatorImpl {
+   public:
+    ReduceMeanImpl2D_cpu(const ReduceMean_Op<2>& op) : OperatorImpl(op) {}
+
+    static std::unique_ptr<ReduceMeanImpl2D_cpu> create(const ReduceMean_Op<2> &op) {
+        return std::make_unique<ReduceMeanImpl2D_cpu>(op);
+    }
+
+   public:
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
+
+class ReduceMeanImpl3D_cpu : public OperatorImpl {
+   public:
+    ReduceMeanImpl3D_cpu(const ReduceMean_Op<3>& op) : OperatorImpl(op) {}
+
+    static std::unique_ptr<ReduceMeanImpl3D_cpu> create(const ReduceMean_Op<3> &op) {
+        return std::make_unique<ReduceMeanImpl3D_cpu>(op);
+    }
+
+   public:
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
+namespace {
+// add cpu backend to ReduceMean_Op<2> implementation registry
+static Registrar<ReduceMean_Op<1>> registrarReduceMeanImpl1D_cpu("cpu", Aidge::ReduceMeanImpl1D_cpu::create);
+static Registrar<ReduceMean_Op<2>> registrarReduceMeanImpl2D_cpu("cpu", Aidge::ReduceMeanImpl2D_cpu::create);
+static Registrar<ReduceMean_Op<3>> registrarReduceMeanImpl3D_cpu("cpu", Aidge::ReduceMeanImpl3D_cpu::create);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..46eb61f2f03acd47d74725ade1425a92f028690c
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp
@@ -0,0 +1,132 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_FORWARD_KERNEL_H_
+#define AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_FORWARD_KERNEL_H_
+
+#include <cstddef>
+#include <algorithm>   // std::copy, std::for_each
+#include <numeric>     //std::accumulate
+#include <functional>  //std::multiplies
+
+#include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/operator/ReduceMean.hpp"
+#include "aidge/utils/Registrar.hpp"
+
+namespace Aidge {
+template <class I, class O, DimSize_t DIM>
+void ReduceMeanImpl_cpu_forward_kernel(const typename ReduceMean_Op<DIM>::Attrs& attrs,
+                                     const std::vector<DimSize_t>& inputDims,
+                                     const void* input_,
+                                     void* output_) {
+
+    const I* input = static_cast<const I*>(input_);
+    O* output = static_cast<O*>(output_);
+
+    const std::size_t nb_dims = inputDims.size();
+    const std::size_t totalElements = std::accumulate(inputDims.cbegin(), inputDims.cend(), 1, std::multiplies<std::size_t>());
+
+    if (DIM == 1) {
+        const std::size_t stride_pre = std::accumulate(inputDims.cbegin(), inputDims.cbegin() + std::get<0>(attrs)[0], 1, std::multiplies<std::size_t>());
+        const std::size_t stride_post = std::accumulate(inputDims.crbegin(), inputDims.crbegin() + nb_dims -1 - std::get<0>(attrs)[0], 1, std::multiplies<std::size_t>());
+
+        const std::size_t dim_i = inputDims[std::get<0>(attrs)[0]];
+        for (std::size_t pre = 0; pre < stride_pre; ++pre) {
+            for (std::size_t post = 0; post < stride_post; ++post) {
+                const std::size_t idx_i = pre * dim_i * stride_post + post;
+                const std::size_t idx_o = pre * stride_post + post;
+                output[idx_o] = input[idx_i];
+                for (std::size_t i = 1; i < dim_i; ++i) {
+                    output[idx_o] += input[idx_i + i*stride_post];
+                }
+                output[idx_o] /= dim_i;
+            }
+        }
+    } else {
+        std::size_t outputElements = totalElements;
+
+        std::size_t *stride_post = new std::size_t[nb_dims];
+        stride_post[nb_dims - 1] = 1;
+        for (std::size_t i = nb_dims-2; i != static_cast<std::size_t>(-1); --i) {
+            stride_post[i] = stride_post[i+1]*inputDims[i+1];
+        }
+        std::size_t *stride_pre = new std::size_t[nb_dims];
+        stride_pre[0] = 1;
+        for (std::size_t i = 1; i < nb_dims; ++i) {
+            stride_pre[i] = stride_pre[i-1]*inputDims[i-1];
+        }
+
+        const I* inputAccumulation = input;
+        I* outputAccumulation = nullptr;
+
+        for (const auto& axisInt : std::get<0>(attrs)) {
+            const std::size_t a = static_cast<std::size_t>(axisInt);
+            outputElements /= inputDims[a];
+            outputAccumulation = new I[outputElements];
+            const std::size_t dim_i = inputDims[a];
+            for (std::size_t pre = 0; pre < stride_pre[a]; ++pre) {
+                for (std::size_t post = 0; post < stride_post[a]; ++post) {
+                    const std::size_t idx_i = pre * dim_i * stride_post[a] + post;
+                    const std::size_t idx_o = pre * stride_post[a] + post;
+                    outputAccumulation[idx_o] = inputAccumulation[idx_i];
+                    for (std::size_t i = 1; i < dim_i; ++i) {
+                        outputAccumulation[idx_o] += inputAccumulation[idx_i + i*stride_post[a]];
+                    }
+                }
+            }
+            std::for_each(stride_pre+a+1, stride_pre+nb_dims, [dim_i] (std::size_t& val) { val /= dim_i; });
+            if (inputAccumulation != input) {
+                delete[] inputAccumulation;
+            }
+            inputAccumulation = outputAccumulation;
+        }
+
+        // Copy elements from inputAccumulation to output while dividing by divisor
+        I divisor = totalElements / outputElements;
+        std::transform(inputAccumulation, inputAccumulation + outputElements, output,
+                    [divisor](int element) { return element / divisor; });
+        if (outputAccumulation) {
+            delete[] outputAccumulation;
+        }
+        delete[] stride_post;
+        delete[] stride_pre;
+    }
+}
+
+namespace {
+// DIM = 1
+static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,1>);
+static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Int32(
+        {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,1>);
+static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Float64(
+        {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,1>);
+
+// DIM = 2
+static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,2>);
+static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Int32(
+        {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,2>);
+static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Float64(
+        {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,2>);
+
+// DIM = 3
+static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,3>);
+static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Int32(
+        {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,3>);
+static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Float64(
+        {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,3>);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_FORWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/ReshapeImpl.hpp b/include/aidge/backend/cpu/operator/ReshapeImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..d5754b34e952d52b2071744e9f8e863074ef9fa3
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/ReshapeImpl.hpp
@@ -0,0 +1,50 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_RESHAPEIMPL_H_
+#define AIDGE_CPU_OPERATOR_RESHAPEIMPL_H_
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Reshape.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include <memory>
+#include <vector>
+
+namespace Aidge {
+// class Reshape_Op;
+
+// compute kernel registry for forward and backward
+class ReshapeImplForward_cpu
+    : public Registrable<ReshapeImplForward_cpu, std::tuple<DataType, DataType>, void(std::size_t, const void*, void*)> {
+};
+class ReshapeImplBackward_cpu
+    : public Registrable<ReshapeImplBackward_cpu, std::tuple<DataType, DataType>, void(std::size_t, const void*, void*)> {
+};
+
+class ReshapeImpl_cpu : public OperatorImpl {
+public:
+    ReshapeImpl_cpu(const Reshape_Op& op) : OperatorImpl(op) {}
+
+    static std::unique_ptr<ReshapeImpl_cpu> create(const Reshape_Op& op) {
+        return std::make_unique<ReshapeImpl_cpu>(op);
+    }
+
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
+
+namespace {
+static Registrar<Reshape_Op> registrarReshapeImpl_cpu("cpu", Aidge::ReshapeImpl_cpu::create);
+}
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_RESHAPEIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/ReshapeImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ReshapeImpl_forward_kernels.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..cefdab57ee41ffab0b98a87698d95f5d89a0206d
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/ReshapeImpl_forward_kernels.hpp
@@ -0,0 +1,45 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_RESHAPEIMPL_FORWARD_KERNEL_H_
+#define AIDGE_CPU_OPERATOR_RESHAPEIMPL_FORWARD_KERNEL_H_
+
+#include "aidge/utils/Registrar.hpp"
+#include <cmath>
+
+#include "aidge/backend/cpu/operator/ReshapeImpl.hpp"
+
+namespace Aidge {
+template <class I, class O>
+void ReshapeImpl_cpu_forward_kernel(std::size_t inputLength,
+                                    const void* input_,
+                                    void* output_) {
+
+    const I* input = static_cast<const I*>(input_);
+    O* output = static_cast<O*>(output_);
+
+    std::copy_n(input, inputLength, output);
+}
+
+namespace {
+static Registrar<ReshapeImplForward_cpu> registrarReshapeImplForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32},
+        Aidge::ReshapeImpl_cpu_forward_kernel<float, float>);
+static Registrar<ReshapeImplForward_cpu> registrarReshapeImplForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32},
+        Aidge::ReshapeImpl_cpu_forward_kernel<int, int>);
+static Registrar<ReshapeImplForward_cpu> registrarReshapeImplForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64},
+        Aidge::ReshapeImpl_cpu_forward_kernel<double, double>);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_RESHAPEIMPL_FORWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/SliceImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/SliceImpl_forward_kernels.hpp
index 9f08fab758a1d8c717ccb5f0a0357f94fd86e5e4..d92e9008aff2a4e3c9e392fcc51871001020ce5a 100644
--- a/include/aidge/backend/cpu/operator/SliceImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/SliceImpl_forward_kernels.hpp
@@ -35,7 +35,7 @@ void SliceImpl_cpu_forward_kernel(const typename Slice_Op::Attrs& attrs,
         const std::int64_t axis_ = std::get<2>(attrs)[i];
         const std::int64_t start_ = std::get<0>(attrs)[i];
         const std::int64_t end_ = std::get<1>(attrs)[i];
-        const std::size_t axis = axis_ >= 0 ? axis_ : static_cast<std::size_t>(axis_ + static_cast<std::int32_t>(inputDims.size()));
+        const std::size_t axis = axis_ >= 0 ? axis_ : static_cast<std::size_t>(axis_) + inputDims.size();
         const std::size_t start = start_ >= 0 ? start_ : start_ + inputDims[axis];
         const std::size_t end = end_ >= 0 ? end_ : end_ + inputDims[axis];
         std::size_t stride = 1;
diff --git a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
index 15fb2b5d30e32febca7c8028c8b5212e5b96775f..005b52f646f9e9ddf14af09cc22d9e2a44ba6dd4 100644
--- a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
@@ -25,10 +25,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class SoftmaxImplForward_cpu
-    : public Registrable<SoftmaxImplForward_cpu, std::tuple<DataType, DataType>, void(const DimSize_t, const DimSize_t, const DimSize_t, const void*, void*)> {
+    : public Registrable<SoftmaxImplForward_cpu, std::tuple<DataType, DataType>, void(std::size_t, const std::vector<DimSize_t>&, const void*, void*)> {
 };
 class SoftmaxImplBackward_cpu
-    : public Registrable<SoftmaxImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+    : public Registrable<SoftmaxImplBackward_cpu, std::tuple<DataType, DataType>, void(std::size_t, const std::vector<DimSize_t>&, const void*, void*)> {
 };
 
 class SoftmaxImpl_cpu : public OperatorImpl {
diff --git a/include/aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp
index a5a168a08cf85e952cffd556e0cc34d29d35fffa..cc384c38e34d01887fc328d11de383aeef39fb8e 100644
--- a/include/aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp
@@ -23,30 +23,33 @@
 
 namespace Aidge {
 template <class I, class O>
-void SoftmaxImpl_cpu_forward_kernel(const DimSize_t batchSize,
-                                        const DimSize_t channelSize,
-                                        const DimSize_t featureSize,
-                                        const void* input_,
-                                        void* output_) {
-
+void SoftmaxImpl_cpu_forward_kernel(std::size_t axisIdx, const std::vector<DimSize_t>& inputDims, const void* input_, void* output_)
+{
     const I* input = static_cast<const I*>(input_);
     O* output = static_cast<O*>(output_);
 
-    for (std::size_t batch = 0; batch < batchSize; ++batch) {
-        for (std::size_t feature = 0; feature < featureSize; ++feature) {
-            std::size_t ioIndex = batch*channelSize*featureSize + feature;
+    std::size_t postAxisElems = 1;
+    for (std::size_t i = axisIdx + 1; i < inputDims.size(); ++i) {
+        postAxisElems *= inputDims[i];
+    }
+    std::size_t preAxisElems = 1;
+    for (std::size_t i = 0; i < axisIdx; ++i) {
+        preAxisElems *= inputDims[i];
+    }
 
-            I sum(0.0);
-            for (std::size_t ch = 0; ch < channelSize; ++ch) {
-                output[ioIndex] = std::exp(input[ioIndex]);
-                sum += output[ioIndex];
-                ioIndex+=featureSize;
+    for (std::size_t i = 0; i < preAxisElems; ++i) {
+        for (std::size_t j = 0; j < postAxisElems; ++j) {
+            // Calculate sum of exponentials within the axis
+            I sumExp = 0;
+            for (std::size_t k = 0; k < inputDims[axisIdx]; ++k) {
+                std::size_t inIdx = i * inputDims[axisIdx] * postAxisElems + k * postAxisElems + j;
+                sumExp += std::exp(input[inIdx]);
             }
 
-            ioIndex = batch*channelSize*featureSize + feature;
-            for (std::size_t ch = 0; ch < channelSize; ++ch) {
-                output[ioIndex] /= sum;
-                ioIndex += featureSize;
+            // Calculate softmax for the current slice along the axis
+            for (std::size_t  k = 0; k < inputDims[axisIdx]; ++k) {
+                std::size_t inIdx = i * inputDims[axisIdx] * postAxisElems + k * postAxisElems + j;
+                output[inIdx] = std::exp(input[inIdx]) / sumExp;
             }
         }
     }
diff --git a/include/aidge/backend/cpu/operator/TransposeImpl.hpp b/include/aidge/backend/cpu/operator/TransposeImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..712e672752648f5ff8a3c073f6c81bbe7cc85d9d
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/TransposeImpl.hpp
@@ -0,0 +1,123 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_TransposeIMPL_H_
+#define AIDGE_CPU_OPERATOR_TransposeIMPL_H_
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Transpose.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include <memory>
+#include <vector>
+
+namespace Aidge {
+// class Transpose_Op;
+
+// compute kernel registry for forward and backward
+class TransposeImpl2DForward_cpu
+    : public Registrable<TransposeImpl2DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<2>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> {
+};
+class TransposeImpl3DForward_cpu
+    : public Registrable<TransposeImpl3DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<3>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> {
+};
+class TransposeImpl4DForward_cpu
+    : public Registrable<TransposeImpl4DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<4>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> {
+};
+class TransposeImpl5DForward_cpu
+    : public Registrable<TransposeImpl5DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<5>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> {
+};
+class TransposeImpl6DForward_cpu
+    : public Registrable<TransposeImpl6DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<6>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> {
+};
+class TransposeImpl2DBackward_cpu
+    : public Registrable<TransposeImpl2DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<2>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> {
+};
+class TransposeImpl3DBackward_cpu
+    : public Registrable<TransposeImpl3DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<3>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> {
+};
+class TransposeImpl4DBackward_cpu
+    : public Registrable<TransposeImpl4DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<4>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> {
+};
+class TransposeImpl5DBackward_cpu
+    : public Registrable<TransposeImpl5DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<5>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> {
+};
+class TransposeImpl6DBackward_cpu
+    : public Registrable<TransposeImpl6DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<6>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> {
+};
+
+
+class TransposeImpl2D_cpu : public OperatorImpl {
+public:
+    TransposeImpl2D_cpu(const Transpose_Op<2>& op) : OperatorImpl(op) {}
+
+    static std::unique_ptr<TransposeImpl2D_cpu> create(const Transpose_Op<2>& op) {
+        return std::make_unique<TransposeImpl2D_cpu>(op);
+    }
+
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
+class TransposeImpl3D_cpu : public OperatorImpl {
+public:
+    TransposeImpl3D_cpu(const Transpose_Op<3>& op) : OperatorImpl(op) {}
+
+    static std::unique_ptr<TransposeImpl3D_cpu> create(const Transpose_Op<3>& op) {
+        return std::make_unique<TransposeImpl3D_cpu>(op);
+    }
+
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
+class TransposeImpl4D_cpu : public OperatorImpl {
+public:
+    TransposeImpl4D_cpu(const Transpose_Op<4>& op) : OperatorImpl(op) {}
+
+    static std::unique_ptr<TransposeImpl4D_cpu> create(const Transpose_Op<4>& op) {
+        return std::make_unique<TransposeImpl4D_cpu>(op);
+    }
+
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
+class TransposeImpl5D_cpu : public OperatorImpl {
+public:
+    TransposeImpl5D_cpu(const Transpose_Op<5>& op) : OperatorImpl(op) {}
+
+    static std::unique_ptr<TransposeImpl5D_cpu> create(const Transpose_Op<5>& op) {
+        return std::make_unique<TransposeImpl5D_cpu>(op);
+    }
+
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
+class TransposeImpl6D_cpu : public OperatorImpl {
+public:
+    TransposeImpl6D_cpu(const Transpose_Op<6>& op) : OperatorImpl(op) {}
+
+    static std::unique_ptr<TransposeImpl6D_cpu> create(const Transpose_Op<6>& op) {
+        return std::make_unique<TransposeImpl6D_cpu>(op);
+    }
+
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
+
+namespace {
+static Registrar<Transpose_Op<2>> registrarTransposeImpl2D_cpu("cpu", Aidge::TransposeImpl2D_cpu::create);
+static Registrar<Transpose_Op<3>> registrarTransposeImpl3D_cpu("cpu", Aidge::TransposeImpl3D_cpu::create);
+static Registrar<Transpose_Op<4>> registrarTransposeImpl4D_cpu("cpu", Aidge::TransposeImpl4D_cpu::create);
+static Registrar<Transpose_Op<5>> registrarTransposeImpl5D_cpu("cpu", Aidge::TransposeImpl5D_cpu::create);
+static Registrar<Transpose_Op<6>> registrarTransposeImpl6D_cpu("cpu", Aidge::TransposeImpl6D_cpu::create);
+}
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_TransposeIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/TransposeImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/TransposeImpl_forward_kernels.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..9fd5e5b58ed8e850c0a902e2de93b65cc75d274a
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/TransposeImpl_forward_kernels.hpp
@@ -0,0 +1,110 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_TRANSPOSEIMPL_FORWARD_KERNEL_H_
+#define AIDGE_CPU_OPERATOR_TRANSPOSEIMPL_FORWARD_KERNEL_H_
+
+#include "aidge/utils/Registrar.hpp"
+#include <cstddef>
+#include <cmath>
+#include "aidge/data/Data.hpp"
+#include "aidge/utils/Types.h"
+
+#include "aidge/backend/cpu/operator/TransposeImpl.hpp"
+
+namespace Aidge {
+template <class I, class O, DimSize_t DIM>
+void TransposeImpl_cpu_forward_kernel( const typename Transpose_Op<DIM>::Attrs& attrs, const std::vector<DimSize_t>& inputDims, const std::vector<DimSize_t>& outputDims, const void* input_, void* output_)
+{
+    O* output = static_cast<O*>(output_);
+    const I* input = static_cast<const I*>(input_);
+    
+    // Compute total number of elements in the input array
+    size_t totalElements = 1;
+    for (size_t dimSize : inputDims) {
+        totalElements *= dimSize;
+    }
+
+	std::vector<std::size_t> outStrides(DIM, 1);
+	for (size_t i = 0; i < DIM; ++i) {
+			for (size_t j = i+1; j < DIM; ++j)
+			{
+					outStrides[i] *= outputDims[j];
+			}
+	}
+
+    std::vector<size_t> indices(outputDims.size(), 0);
+    for (size_t i = 0; i < totalElements; ++i) {
+        size_t idx = 0;
+        // Permute indices based on OutputDimsOrder attr
+        std::vector<size_t> permutedIndices(DIM);
+        for (size_t j = 0; j < DIM; ++j) {
+            permutedIndices[j] = indices[std::get<0>(attrs)[j]];
+        }
+
+        for (int j = DIM -1; j >=0; --j) {
+            idx += permutedIndices[j] * outStrides[j];
+        }
+        // Copy the value in output
+        output[idx] = input[i];
+
+        // Update indices for the next iteration
+        for (int j = DIM - 1; j >= 0; --j) {
+            if (indices[j] < inputDims[j] - 1) {
+                indices[j]++;
+                break;
+            } else {
+                indices[j] = 0;
+            }
+        }
+    }
+
+}
+namespace {
+// DIM = 2
+static Registrar<TransposeImpl2DForward_cpu> registrarTransposeImpl2DForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::TransposeImpl_cpu_forward_kernel<float, float, 2>);
+static Registrar<TransposeImpl2DForward_cpu> registrarTransposeImpl2DForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32}, Aidge::TransposeImpl_cpu_forward_kernel<int, int, 2>);
+static Registrar<TransposeImpl2DForward_cpu> registrarTransposeImpl2DForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64}, Aidge::TransposeImpl_cpu_forward_kernel<double, double, 2>);
+// DIM = 3
+static Registrar<TransposeImpl3DForward_cpu> registrarTransposeImpl3DForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::TransposeImpl_cpu_forward_kernel<float, float, 3>);
+static Registrar<TransposeImpl3DForward_cpu> registrarTransposeImpl3DForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32}, Aidge::TransposeImpl_cpu_forward_kernel<int, int, 3>);
+static Registrar<TransposeImpl3DForward_cpu> registrarTransposeImpl3DForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64}, Aidge::TransposeImpl_cpu_forward_kernel<double, double, 3>);
+// DIM = 4
+static Registrar<TransposeImpl4DForward_cpu> registrarTransposeImpl4DForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::TransposeImpl_cpu_forward_kernel<float, float, 4>);
+static Registrar<TransposeImpl4DForward_cpu> registrarTransposeImpl4DForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32}, Aidge::TransposeImpl_cpu_forward_kernel<int, int, 4>);
+static Registrar<TransposeImpl4DForward_cpu> registrarTransposeImpl4DForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64}, Aidge::TransposeImpl_cpu_forward_kernel<double, double, 4>);
+// DIM = 5
+static Registrar<TransposeImpl5DForward_cpu> registrarTransposeImpl5DForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::TransposeImpl_cpu_forward_kernel<float, float, 5>);
+static Registrar<TransposeImpl5DForward_cpu> registrarTransposeImpl5DForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32}, Aidge::TransposeImpl_cpu_forward_kernel<int, int, 5>);
+static Registrar<TransposeImpl5DForward_cpu> registrarTransposeImpl5DForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64}, Aidge::TransposeImpl_cpu_forward_kernel<double, double, 5>);
+// DIM = 6
+static Registrar<TransposeImpl6DForward_cpu> registrarTransposeImpl6DForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::TransposeImpl_cpu_forward_kernel<float, float, 6>);
+static Registrar<TransposeImpl6DForward_cpu> registrarTransposeImpl6DForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32}, Aidge::TransposeImpl_cpu_forward_kernel<int, int, 6>);
+static Registrar<TransposeImpl6DForward_cpu> registrarTransposeImpl6DForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64}, Aidge::TransposeImpl_cpu_forward_kernel<double, double, 6>);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_TRANSPOSEIMPL_FORWARD_KERNEL_H_ */
diff --git a/src/operator/ErfImpl.cpp b/src/operator/ErfImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..06ec65008aee41215192cd05e126ac4f82388c1b
--- /dev/null
+++ b/src/operator/ErfImpl.cpp
@@ -0,0 +1,40 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <chrono>  // std::chrono::milliseconds
+#include <numeric> // std::accumulate
+#include <thread>  // std::this_thread::sleep_for
+#include <vector>
+
+#include "aidge/operator/Erf.hpp"
+#include "aidge/utils/Types.h"
+
+#include "aidge/backend/cpu/operator/ErfImpl.hpp"
+#include "aidge/backend/cpu/operator/ErfImpl_forward_kernels.hpp"
+
+Aidge::NbElts_t Aidge::ErfImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
+    // this implementation can be in-place
+    return 0;
+}
+
+void Aidge::ErfImpl_cpu::forward() {
+
+    // Find the correct kernel type
+    auto kernelFunc = Registrar<ErfImplForward_cpu>::create({
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+
+    // Call kernel
+    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+}
diff --git a/src/operator/GatherImpl.cpp b/src/operator/GatherImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ce98627d95e0d05541db1ccaf4896abe756431b0
--- /dev/null
+++ b/src/operator/GatherImpl.cpp
@@ -0,0 +1,40 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <chrono>  // std::chrono::milliseconds
+#include <numeric> // std::accumulate
+#include <thread>  // std::this_thread::sleep_for
+#include <vector>
+
+#include "aidge/operator/Gather.hpp"
+#include "aidge/utils/Types.h"
+
+#include "aidge/backend/cpu/operator/GatherImpl.hpp"
+#include "aidge/backend/cpu/operator/GatherImpl_forward_kernels.hpp"
+
+Aidge::NbElts_t Aidge::GatherImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
+    // this implementation can be in-place
+    return 0;
+}
+
+void Aidge::GatherImpl_cpu::forward() {
+
+    auto kernelFunc = Registrar<GatherImplForward_cpu>::create({
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+
+    // Call kernel
+    kernelFunc(dynamic_cast<const Gather_Op&>(mOp).getStaticAttributes(),
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+}
diff --git a/src/operator/ReduceMeanImpl.cpp b/src/operator/ReduceMeanImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e31a53d84947e5b2ced14ee9ee6e2badaef07071
--- /dev/null
+++ b/src/operator/ReduceMeanImpl.cpp
@@ -0,0 +1,79 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <chrono>  // std::chrono::milliseconds
+#include <numeric> // std::accumulate
+#include <thread>  // std::this_thread::sleep_for
+#include <vector>
+
+#include "aidge/utils/Types.h"
+#include "aidge/operator/ReduceMean.hpp"
+
+#include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp"
+#include "aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp"
+Aidge::NbElts_t Aidge::ReduceMeanImpl1D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
+    // this implementation can be in-place
+    return 0;
+}
+Aidge::NbElts_t Aidge::ReduceMeanImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
+    // this implementation can be in-place
+    return 0;
+}
+Aidge::NbElts_t Aidge::ReduceMeanImpl3D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
+    // this implementation can be in-place
+    return 0;
+}
+
+void Aidge::ReduceMeanImpl1D_cpu::forward() {
+
+    // Find the correct kernel type
+    auto kernelFunc =
+            Registrar<ReduceMeanImpl1DForward_cpu>::create({
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+
+    // Call kernel
+    kernelFunc(dynamic_cast<const ReduceMean_Op<1>&>(mOp).getStaticAttributes(),
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
+               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+}
+
+void Aidge::ReduceMeanImpl2D_cpu::forward() {
+
+    // Find the correct kernel type
+    auto kernelFunc =
+            Registrar<ReduceMeanImpl2DForward_cpu>::create({
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+
+    // Call kernel
+    kernelFunc(dynamic_cast<const ReduceMean_Op<2>&>(mOp).getStaticAttributes(),
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
+               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+}
+
+void Aidge::ReduceMeanImpl3D_cpu::forward() {
+
+    // Find the correct kernel type
+    auto kernelFunc =
+            Registrar<ReduceMeanImpl3DForward_cpu>::create({
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+
+    // Call kernel
+    kernelFunc(dynamic_cast<const ReduceMean_Op<3>&>(mOp).getStaticAttributes(),
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
+               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+}
\ No newline at end of file
diff --git a/src/operator/ReshapeImpl.cpp b/src/operator/ReshapeImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..02dea1da3d4422abf37b62193bba83e83c87a83f
--- /dev/null
+++ b/src/operator/ReshapeImpl.cpp
@@ -0,0 +1,39 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+
+#include "aidge/operator/Reshape.hpp"
+#include "aidge/utils/Types.h"
+
+#include "aidge/backend/cpu/operator/ReshapeImpl.hpp"
+#include "aidge/backend/cpu/operator/ReshapeImpl_forward_kernels.hpp"
+
+Aidge::NbElts_t Aidge::ReshapeImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
+    // this implementation can be in-place
+    return 0;
+}
+
+void Aidge::ReshapeImpl_cpu::forward() {
+    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size() == 
+           std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->size()
+            && "input must have the same overall size as shape");
+
+    // Find the correct kernel type
+    auto kernelFunc = Registrar<ReshapeImplForward_cpu>::create({
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+
+    // Call kernel
+    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
+               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+}
diff --git a/src/operator/SliceImpl.cpp b/src/operator/SliceImpl.cpp
index b60bbe60188f416f28ff2562875dce6e5ee15bd5..32d31f046465425a269d6f8e3fc52eaad31c663a 100644
--- a/src/operator/SliceImpl.cpp
+++ b/src/operator/SliceImpl.cpp
@@ -79,4 +79,4 @@ void Aidge::SliceImpl_cpu::forward() {
     mNbProducedData[0] += getRequiredMemory(0, {});
 }
 
-void Aidge::SliceImpl_cpu::backward() { printf("Not implemented yet.\n"); }
\ No newline at end of file
+void Aidge::SliceImpl_cpu::backward() { printf("Not implemented yet.\n"); }
diff --git a/src/operator/SoftmaxImpl.cpp b/src/operator/SoftmaxImpl.cpp
index c3086d8f9067996b9b0a8546b6deb3e281c777b4..5f5d7411b7bb28ae28480b39c8bfdf5674f877ed 100644
--- a/src/operator/SoftmaxImpl.cpp
+++ b/src/operator/SoftmaxImpl.cpp
@@ -36,13 +36,12 @@ void Aidge::SoftmaxImpl_cpu::forward() {
         std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
         std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
-    DimSize_t batchSize = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[0];
-    DimSize_t channelSize = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[1];
-    DimSize_t featureSize = (std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size()/batchSize)/channelSize;
+    Softmax_Op::Attrs attr = dynamic_cast<const Softmax_Op&>(mOp).getStaticAttributes();
+    const int& axisIdx = static_cast<const int&>(std::get<0>(attr));
+
     // Call kernel
-    kernelFunc(batchSize,
-               channelSize,
-               featureSize,
-               getCPUPtr(mOp.getRawInput(0)),
-               getCPUPtr(mOp.getRawOutput(0)));
+    kernelFunc(axisIdx,
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
+               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
 }
diff --git a/src/operator/TransposeImpl.cpp b/src/operator/TransposeImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1fc4458ccb85e4776228a2bf9e1c73589c201a35
--- /dev/null
+++ b/src/operator/TransposeImpl.cpp
@@ -0,0 +1,123 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <chrono>  // std::chrono::milliseconds
+#include <numeric> // std::accumulate
+#include <thread>  // std::this_thread::sleep_for
+#include <vector>
+
+#include "aidge/utils/Types.h"
+#include "aidge/operator/Transpose.hpp"
+
+#include "aidge/backend/cpu/operator/TransposeImpl.hpp"
+#include "aidge/backend/cpu/operator/TransposeImpl_forward_kernels.hpp"
+
+Aidge::NbElts_t Aidge::TransposeImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
+    // this implementation can be in-place
+    return 0;
+}
+Aidge::NbElts_t Aidge::TransposeImpl3D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
+    // this implementation can be in-place
+    return 0;
+}
+Aidge::NbElts_t Aidge::TransposeImpl4D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
+    // this implementation can be in-place
+    return 0;
+}
+Aidge::NbElts_t Aidge::TransposeImpl5D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
+    // this implementation can be in-place
+    return 0;
+}
+Aidge::NbElts_t Aidge::TransposeImpl6D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
+    // this implementation can be in-place
+    return 0;
+}
+
+void Aidge::TransposeImpl2D_cpu::forward() {
+    // Find the correct kernel type
+    auto kernelFunc =
+            Registrar<TransposeImpl2DForward_cpu>::create({
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+
+    // auto attr = dynamic_cast<const Transpose_Op<2>&>(mOp).getStaticAttributes();
+    // std::vector<DimIdx_t> outDimsOrder;
+    // outDimsOrder.reserve(std::get<0>(attr).size()); // Reserve space for the new vector
+
+    // std::transform(std::get<0>(attr).begin(), std::get<0>(attr).end(), std::back_inserter(outDimsOrder), 
+    //                [](int intValue) { return static_cast<DimIdx_t>(intValue); });
+
+    // Call kernel
+    kernelFunc(dynamic_cast<const Transpose_Op<2>&>(mOp).getStaticAttributes(),
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
+               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
+               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+}
+
+void Aidge::TransposeImpl3D_cpu::forward() {
+    // Find the correct kernel type
+    auto kernelFunc =
+            Registrar<TransposeImpl3DForward_cpu>::create({
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+
+    // Call kernel
+    kernelFunc(dynamic_cast<const Transpose_Op<3>&>(mOp).getStaticAttributes(),
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
+               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
+               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+}
+
+void Aidge::TransposeImpl4D_cpu::forward() {
+    // Find the correct kernel type
+    auto kernelFunc =
+            Registrar<TransposeImpl4DForward_cpu>::create({
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+
+    // Call kernel
+    kernelFunc(dynamic_cast<const Transpose_Op<4>&>(mOp).getStaticAttributes(),
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
+               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
+               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+}
+void Aidge::TransposeImpl5D_cpu::forward() {
+    // Find the correct kernel type
+    auto kernelFunc =
+            Registrar<TransposeImpl5DForward_cpu>::create({
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+
+    // Call kernel
+    kernelFunc(dynamic_cast<const Transpose_Op<5>&>(mOp).getStaticAttributes(),
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
+               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
+               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+}
+void Aidge::TransposeImpl6D_cpu::forward() {
+    // Find the correct kernel type
+    auto kernelFunc =
+            Registrar<TransposeImpl6DForward_cpu>::create({
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+
+    // Call kernel
+    kernelFunc(dynamic_cast<const Transpose_Op<6>&>(mOp).getStaticAttributes(),
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
+               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
+               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+}
\ No newline at end of file
diff --git a/unit_tests/data/Test_TensorImpl.cpp b/unit_tests/data/Test_TensorImpl.cpp
index b75c49077f190ed61486fea8eaa18152423a73ed..cfcfb45e3735538c1650cfd990ea85e2333916ad 100644
--- a/unit_tests/data/Test_TensorImpl.cpp
+++ b/unit_tests/data/Test_TensorImpl.cpp
@@ -14,6 +14,7 @@
 #include <catch2/catch_test_macros.hpp>
 
 #include "aidge/data/Tensor.hpp"
+#include "aidge/utils/TensorUtils.hpp"
 #include "aidge/backend/cpu/data/TensorImpl.hpp"
 
 using namespace Aidge;
@@ -57,3 +58,43 @@ TEST_CASE("Tensor creation") {
     }
   }
 }
+
+TEST_CASE("Tensor methods") {
+  Tensor x = Array3D<int, 2, 2, 2>{{
+    {{1, 2},
+     {3, 4}},
+    {{5, 6},
+     {7, 8}}
+  }};
+
+  Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
+
+  Tensor xFloat =
+      Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
+
+  SECTION("Tensor sharing") {
+    Tensor xCopyCtor(x);
+    REQUIRE(xCopyCtor.getImpl() == x.getImpl());
+
+    Tensor xEqOp = x;
+    REQUIRE(xEqOp.getImpl() == x.getImpl());
+
+    Tensor xCloned = x.clone();
+    REQUIRE(xCloned.getImpl() != x.getImpl());
+    REQUIRE(xCloned == x);
+  }
+
+  SECTION("Tensor extract") {
+    Tensor y = x.extract({0, 1});
+    REQUIRE(y.getImpl() == x.getImpl());
+    REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}}));
+    REQUIRE(y.isContiguous());
+    
+    Tensor y2 = x.extract({0, 1, 1}, {2, 1, 1});
+    REQUIRE(y2.getImpl() == x.getImpl());
+    REQUIRE(!y2.isContiguous());
+    Tensor y3 = y2.clone();
+    REQUIRE(y3.isContiguous());
+    REQUIRE(approxEq<int>(y3, Array3D<int, 2, 1, 1>{{{{4}}, {{8}}}}));
+  }
+}
diff --git a/unit_tests/operator/Test_ErfImpl.cpp b/unit_tests/operator/Test_ErfImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..db2ae0437742d1cd1b298d62f5bdd7241b755ec4
--- /dev/null
+++ b/unit_tests/operator/Test_ErfImpl.cpp
@@ -0,0 +1,90 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Erf.hpp"
+
+#include "aidge/backend/cpu.hpp"
+
+#include <memory>
+
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Erf(forward)") {
+    SECTION("1D Tensor") {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<float,10> {
+            {0.41384590, 0.43120754, 0.93762982, 0.31049860, 0.77547199, 0.09514862,
+              0.16145366, 0.42776686, 0.43487436, 0.41170865}
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<float,10> {
+                {0.44163144, 0.45801866, 0.81516320, 0.33941913, 0.72722000, 0.10704061,
+              0.18061027, 0.45479023, 0.46144873, 0.43959764}
+        });
+
+        std::shared_ptr<Node> myErf = Erf();
+        auto op = std::static_pointer_cast<OperatorTensor>(myErf -> getOperator());
+        op->associateInput(0,input0);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
+        myErf->forward();
+
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
+        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
+        for (std::size_t i = 0; i< expectedOutput->size(); ++i) {
+            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+        }
+    }
+
+    SECTION("3D Tensor") {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array3D<float,2,2,3> {
+            {
+                {
+                    {0.97037154, 0.86208081, 0.77767169},
+                    {0.38160080, 0.11422747, 0.77284443},
+                },
+                {
+                    {0.51592529, 0.72543722, 0.54641193},
+                    {0.93866944, 0.97767913, 0.34172094}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<float,2,2,3> {
+            {
+                {
+                    {0.83003384, 0.77721894, 0.72857803},
+                    {0.41057193, 0.12833349, 0.72559172},
+                },
+                {
+                    {0.53438270, 0.69507217, 0.56032562},
+                    {0.81564975, 0.83322692, 0.37109339}
+                }
+            }
+        });
+
+        std::shared_ptr<Node> myErf = Erf();
+        auto op = std::static_pointer_cast<OperatorTensor>(myErf -> getOperator());
+        op->associateInput(0,input0);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
+        myErf->forward();
+
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
+        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
+        for (std::size_t i = 0; i< expectedOutput->size(); ++i) {
+            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+        }
+    }
+}
\ No newline at end of file
diff --git a/unit_tests/operator/Test_GatherImpl.cpp b/unit_tests/operator/Test_GatherImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a8345917ab0a141065e86638c09b2689902679ec
--- /dev/null
+++ b/unit_tests/operator/Test_GatherImpl.cpp
@@ -0,0 +1,100 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Gather.hpp"
+
+#include "aidge/backend/cpu.hpp"
+
+#include <memory>
+
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Gather(forward)") {
+    SECTION("2D Tensor axis 0") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<int,3,3> {
+            {
+                {1, 2, 3},
+                {4, 5, 6},
+                {7, 8, 9}
+            }
+        });
+        std::shared_ptr<Tensor> indexes = std::make_shared<Tensor>(Array2D<int,1,2> {
+            {
+                {1, 2}
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,1,2,3> {
+            {
+                {
+                    {4, 5, 6},
+                    {7, 8, 9}
+                }
+            }
+        });
+
+        std::shared_ptr<Node> myGather = Gather({1, 2}, {1, 2}, 0);
+        auto op = std::static_pointer_cast<OperatorTensor>(myGather -> getOperator());
+        op->associateInput(0,input);
+        // op->associateInput(1,indexes);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
+        myGather->forward();
+        op->getOutput(0)->print();
+        expectedOutput->print();
+
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+
+    }
+    SECTION("2D Tensor axis 1") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<int,3,3> {
+            {
+                {1, 2, 3},
+                {4, 5, 6},
+                {7, 8, 9}
+            }
+        });
+        std::shared_ptr<Tensor> indexes = std::make_shared<Tensor>(Array2D<int,1,2> {
+            {
+                {0, 2}
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,3,1,2> {
+            {
+                {
+                    {1, 3}
+                },
+                {
+                    {4, 6}
+                },
+                {
+                    {7, 9}
+                }
+            }
+        });
+
+        std::shared_ptr<Node> myGather = Gather({0, 2}, {1, 2}, 1);
+        auto op = std::static_pointer_cast<OperatorTensor>(myGather -> getOperator());
+        op->associateInput(0,input);
+        // op->associateInput(1,indexes);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
+        myGather->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+
+    }
+}
\ No newline at end of file
diff --git a/unit_tests/operator/Test_ReduceMeanImpl.cpp b/unit_tests/operator/Test_ReduceMeanImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..494b7a6ace17173ef7b956bc9dabf4d27e665e5a
--- /dev/null
+++ b/unit_tests/operator/Test_ReduceMeanImpl.cpp
@@ -0,0 +1,172 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/ReduceMean.hpp"
+#include "aidge/operator/Conv.hpp"
+
+#include "aidge/backend/cpu.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] ReduceMean(forward)", "[ReduceMean][CPU]") {
+    SECTION("KeepDims") {
+        SECTION("test 1") {
+            std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> {
+                {
+                    {
+                        { 5.0, 1.0 },
+                        { 20.0, 2.0 }
+                    },
+                    {
+                        { 30.0, 1.0 },
+                        { 40.0, 2.0 }
+                    },
+                    {
+                        { 55.0, 1.0 },
+                        { 60.0, 2.0 }
+                    }
+                }
+            });
+            Tensor myOutput = Tensor(Array3D<float,3,1,2> {
+                {
+
+                    {{ 12.5, 1.5 }},
+                    {{ 35.0, 1.5 }},
+                    {{ 57.5, 1.5 }}
+                }
+            });
+
+            std::shared_ptr<Node> myReduceMean = ReduceMean({1}, 1);
+            auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
+            op->associateInput(0,myInput);
+            op->setDataType(DataType::Float32);
+            op->setBackend("cpu");
+            op->computeOutputDims();
+            myReduceMean->forward();
+            op->getOutput(0)->print();
+
+            REQUIRE(*(op->getOutput(0)) == myOutput);
+        }
+        SECTION("test 2") {
+            std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,3,2> {
+                {
+                    {
+                        { 0.0, 0.0 },
+                        { 1.0, 1.0 },
+                        { 2.0, 2.0 }
+                    },
+                    {
+                        { 3.0, 3.0 },
+                        { 4.0, 4.0 },
+                        { 5.0, 5.0 }
+                    },
+                    {
+                        { 6.0, 6.0 },
+                        { 7.0, 7.0 },
+                        { 8.0, 8.0 }
+                    }
+                }
+            });
+            Tensor myOutput = Tensor(Array3D<float,3,1,1> {
+                {
+
+                    {{ 1.0 }},
+                    {{ 4.0 }},
+                    {{ 7.0 }}
+                }
+            });
+
+            std::shared_ptr<Node> myReduceMean = ReduceMean({1, 2}, 1);
+            auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
+            op->associateInput(0,myInput);
+            op->setDataType(DataType::Float32);
+            op->setBackend("cpu");
+            op->computeOutputDims();
+            myReduceMean->forward();
+            myOutput.print();
+            op->getOutput(0)->print();
+            REQUIRE(*(op->getOutput(0)) == myOutput);
+        }
+    }
+    SECTION("not_KeepDims") {
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> {
+            {
+                {
+                    { 5.0, 1.0 },
+                    { 20.0, 2.0 }
+                },
+                {
+                    { 30.0, 1.0 },
+                    { 40.0, 2.0 }
+                },
+                {
+                    { 55.0, 1.0 },
+                    { 60.0, 2.0 }
+                }
+            }
+        });
+        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array2D<float,3,2> {
+            {
+                { 12.5, 1.5 },
+                { 35.0, 1.5 },
+                { 57.5, 1.5 }
+            }
+        });
+
+        std::shared_ptr<Node> myReduceMean = ReduceMean({1}, 0);
+        auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
+        op->associateInput(0,myInput);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
+        myReduceMean->forward();
+        op->getOutput(0)->print();
+
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
+
+    }
+    SECTION("all_axes") {
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> {
+            {
+                {
+                    { 5.0, 1.0 },
+                    { 20.0, 2.0 }
+                },
+                {
+                    { 30.0, 1.0 },
+                    { 40.0, 2.0 }
+                },
+                {
+                    { 55.0, 1.0 },
+                    { 60.0, 2.0 }
+                }
+            }
+        });
+        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array1D<float,1> {
+            {18.25}
+        });
+
+        std::shared_ptr<Node> myReduceMean = ReduceMean({0, 1, 2}, 0);
+        auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
+        op->associateInput(0,myInput);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
+        myReduceMean->forward();
+        op->getOutput(0)->print();
+
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
+    }
+}
\ No newline at end of file
diff --git a/unit_tests/operator/Test_ReshapeImpl.cpp b/unit_tests/operator/Test_ReshapeImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1fee1f4cd132acf9ee39a86759f2e628317fce19
--- /dev/null
+++ b/unit_tests/operator/Test_ReshapeImpl.cpp
@@ -0,0 +1,71 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Reshape.hpp"
+
+#include "aidge/backend/cpu.hpp"
+
+#include <memory>
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Reshape(forward)") {
+    SECTION("1D Tensor") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array1D<float,6> {
+            {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,3> {
+            {
+                {1.0, 2.0, 3.0},
+                {4.0, 5.0, 6.0}
+            }
+        });
+
+        std::shared_ptr<Node> myReshape = Reshape({2, 3});
+        auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator());
+        op->associateInput(0, input);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
+        myReshape->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+    }
+    SECTION("2D Tensor") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<float,2,3> {
+            {
+                {1.0, 2.0, 3.0},
+                {4.0, 5.0, 6.0}
+            }
+
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,3,2> {
+            {
+                {1.0, 2.0},
+                {3.0, 4.0},
+                {5.0, 6.0}
+            }
+        });
+
+        std::shared_ptr<Node> myReshape = Reshape({3, 2});
+        auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator());
+        op->associateInput(0, input);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
+        myReshape->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+    }
+}
\ No newline at end of file
diff --git a/unit_tests/operator/Test_SliceImpl.cpp b/unit_tests/operator/Test_SliceImpl.cpp
index 7a71f31e9850852cadd659c91683c30ddcbe9849..0b5ae682c659bf5a0f8d50448733b9ec18a4c36e 100644
--- a/unit_tests/operator/Test_SliceImpl.cpp
+++ b/unit_tests/operator/Test_SliceImpl.cpp
@@ -163,4 +163,4 @@ TEST_CASE("[cpu/operator] Slice(forward)", "[Slice][CPU]") {
         REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
         REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
     }
-}
\ No newline at end of file
+}
diff --git a/unit_tests/operator/Test_SoftmaxImpl.cpp b/unit_tests/operator/Test_SoftmaxImpl.cpp
index 360b7440599030dbd93954e345f0d5986eb83b15..7459a45e48cad74e722dc881e4653d34b7f549d0 100644
--- a/unit_tests/operator/Test_SoftmaxImpl.cpp
+++ b/unit_tests/operator/Test_SoftmaxImpl.cpp
@@ -41,15 +41,15 @@ TEST_CASE("[cpu/operator] Softmax(forward)", "[Softmax][CPU]") {
 
         std::shared_ptr<Node> mySoftmax = Softmax(1);
         auto op = std::static_pointer_cast<OperatorTensor>(mySoftmax -> getOperator());
-        mySoftmax->getOperator()->associateInput(0,input);
-        mySoftmax->getOperator()->setDataType(DataType::Float32);
-        mySoftmax->getOperator()->setBackend("cpu");
+        op->associateInput(0,input);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
         op->computeOutputDims();
         mySoftmax->forward();
 
         float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 20; ++i) {
+        for (std::size_t i = 0; i< expectedOutput->size(); ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
         }
 
@@ -110,17 +110,16 @@ TEST_CASE("[cpu/operator] Softmax(forward)", "[Softmax][CPU]") {
 
         std::shared_ptr<Node> mySoftmax = Softmax(1);
         auto op = std::static_pointer_cast<OperatorTensor>(mySoftmax -> getOperator());
-        mySoftmax->getOperator()->associateInput(0,input);
-        mySoftmax->getOperator()->setDataType(DataType::Float32);
-        mySoftmax->getOperator()->setBackend("cpu");
+        op->associateInput(0,input);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
         op->computeOutputDims();
         mySoftmax->forward();
 
         float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 54; ++i) {
+        for (std::size_t i = 0; i< expectedOutput->size(); ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
         }
-        // REQUIRE(*mySoftmax->getOperator()->getOutput(0) == *expectedOutput);
     }
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_TransposeImpl.cpp b/unit_tests/operator/Test_TransposeImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d381faadd7750f6a9a48fe9371f98e813b94a310
--- /dev/null
+++ b/unit_tests/operator/Test_TransposeImpl.cpp
@@ -0,0 +1,127 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Transpose.hpp"
+
+#include "aidge/backend/cpu.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Transpose(forward)") {
+    SECTION("3D Tensor") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array3D<float,2,3,4> {
+            {
+                {{0.42507452, 0.11244237, 0.43243718, 0.62354952},
+                {0.90250170, 0.48719984, 0.45781207, 0.92536664},
+                {0.06348717, 0.91678733, 0.64452291, 0.00484818}},
+
+                {{0.66873497, 0.99508536, 0.55714869, 0.84887981},
+                {0.41666120, 0.92365038, 0.80034822, 0.38721532},
+                {0.52037925, 0.53937608, 0.66380072, 0.36330253}}
+            }
+        });
+        std::shared_ptr<Tensor> output = std::make_shared<Tensor>(Array3D<float,2,4,3> { 
+            {
+                {{0.42507452, 0.90250170, 0.06348717},
+                {0.11244237, 0.48719984, 0.91678733},
+                {0.43243718, 0.45781207, 0.64452291},
+                {0.62354952, 0.92536664, 0.00484818}},
+
+                {{0.66873497, 0.41666120, 0.52037925},
+                {0.99508536, 0.92365038, 0.53937608},
+                {0.55714869, 0.80034822, 0.66380072},
+                {0.84887981, 0.38721532, 0.36330253}}
+            }
+        });
+        std::shared_ptr<Node> myTranspose = Transpose<3>(std::array<DimSize_t,3>{{0,2,1}});
+        auto op = std::static_pointer_cast<OperatorTensor>(myTranspose -> getOperator());
+        op->associateInput(0,input);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
+        myTranspose->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *output);
+    }
+    SECTION("4D Tensor") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array4D<int,2,3,1,4> {
+            {
+                {
+                    {
+                        {1, 2, 3, 4}
+                    },
+                    {
+                        {5, 6, 7, 8}
+                    },
+                    {
+                        {9, 10, 11, 12}
+                    }
+                },
+                {
+                    {
+                        {13, 14, 15, 16}
+                    },
+                    {
+                        {17, 18, 19, 20}
+                    },
+                    {
+                        {21, 22, 23, 24}
+                    }
+                }
+            }
+        });
+        std::shared_ptr<Tensor> output = std::make_shared<Tensor>(Array4D<int,2,4,1,3> { 
+            {
+                {
+                    {
+                        {1, 5, 9}
+                    },
+                    {
+                        {2, 6, 10}
+                    },
+                    {
+                        {3, 7, 11}
+                    },
+                    {
+                        {4, 8, 12}
+                    }
+                },
+                {
+                    {
+                        {13, 17, 21}
+                    },
+                    {
+                        {14, 18, 22}
+                    },
+                    {
+                        {15, 19, 23}
+                    },
+                    {
+                        {16, 20, 24}
+                    }
+                }
+            }
+        });
+        std::shared_ptr<Node> myTranspose = Transpose<4>(std::array<DimSize_t,4>{{0,3,2,1}});
+        auto op = std::static_pointer_cast<OperatorTensor>(myTranspose -> getOperator());
+        op->associateInput(0,input);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
+        myTranspose->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *output);
+    }
+}
\ No newline at end of file