diff --git a/include/aidge/backend/cpu/data/GetCPUPtr.h b/include/aidge/backend/cpu/data/GetCPUPtr.h
new file mode 100644
index 0000000000000000000000000000000000000000..38ea848afc29fa4c23ff500f97e0c57954695021
--- /dev/null
+++ b/include/aidge/backend/cpu/data/GetCPUPtr.h
@@ -0,0 +1,23 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_DATA_GETCPUPTR_H_
+#define AIDGE_CPU_DATA_GETCPUPTR_H_
+
+#include "aidge/data/Tensor.hpp"
+
+namespace Aidge {
+inline void *getCPUPtr(std::shared_ptr<Aidge::Data> const &data) {
+  return std::static_pointer_cast<Tensor>(data)->getImpl()->rawPtr();
+}
+} // namespace Aidge
+
+#endif // AIDGE_CPU_DATA_GETCPUPTR_H_
\ No newline at end of file
diff --git a/include/aidge/backend/cpu/operator/AddImpl.hpp b/include/aidge/backend/cpu/operator/AddImpl.hpp
index 806bbb02d760dbdec58df137641d4c211443039e..fa1b837902ee72f22c54afdec0ff897db3b39b76 100644
--- a/include/aidge/backend/cpu/operator/AddImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AddImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/Add.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 
diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
index e3c3a6a28b08386a3b93702f8ce64df68f703119..bfb2b1947281fc30e38fd1fe1663bd5de415d3ee 100644
--- a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
@@ -21,6 +21,7 @@
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
 // class AvgPooling_Op;
diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp
index ea46a540ad04b6227d6ec01c965e2eb99806d5e1..5598cc9cdfd463b6e40e6801b74203b911a318e6 100644
--- a/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp
@@ -16,6 +16,7 @@
 
 #include "aidge/backend/cpu/operator/AvgPoolingImpl.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include "aidge/data/Data.hpp"
 #include <array>
 #include <tuple>
diff --git a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
index 060e19b135c12832e8a7e8cc9c0db828d4a204d1..a599aeb7b427161eb7541829242820c0306d0d31 100644
--- a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
+++ b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
@@ -21,6 +21,7 @@
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
 // class BatchNorm_Op;
diff --git a/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp
index 486829e782ae2173332a7efa6646bb7bba322252..cfde6ebe7cab8cfe2f793723983c8552bd9747b8 100644
--- a/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp
@@ -16,6 +16,7 @@
 
 #include "aidge/backend/cpu/operator/BatchNormImpl.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <array>
 #include <cmath>
 #include <algorithm>
diff --git a/include/aidge/backend/cpu/operator/ConcatImpl.hpp b/include/aidge/backend/cpu/operator/ConcatImpl.hpp
index a5e0c56e856217509445844be2ae5631bad05728..d0d3e06365c524da1af485583dda6d6208ef3fb9 100644
--- a/include/aidge/backend/cpu/operator/ConcatImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConcatImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/Concat.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 
diff --git a/include/aidge/backend/cpu/operator/ConcatImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ConcatImpl_forward_kernels.hpp
index b76f384bd73e874a194cddd0fc2e146ba6ff872d..ed849b0e1cdb5089275784dea418c832a38dfe66 100644
--- a/include/aidge/backend/cpu/operator/ConcatImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ConcatImpl_forward_kernels.hpp
@@ -22,6 +22,7 @@
 #include "aidge/operator/Concat.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
 
diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
index 7b5dbfb0801fb314d91da15c8a9c4b80fe62eb35..f72890d8903ca4a9876809759587ed4b1ac22e67 100644
--- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
@@ -21,6 +21,7 @@
 #include "aidge/operator/ConvDepthWise.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
 // class ConvDepthWise_Op;
diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp
index f40dfee00ee940e6bc73a2fc337bbee61607d17f..95a1aaeccbe728eb2bb957913a5b79f4b8a9548b 100644
--- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp
@@ -16,6 +16,7 @@
 
 #include "aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <cmath>
 #include <cstddef>
 #include <array>
diff --git a/include/aidge/backend/cpu/operator/ConvImpl.hpp b/include/aidge/backend/cpu/operator/ConvImpl.hpp
index 3db91ab507456244676c990427287e5755ab019b..9bc2f27412f388a7fd03db06ac97c612044fab5f 100644
--- a/include/aidge/backend/cpu/operator/ConvImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConvImpl.hpp
@@ -21,6 +21,7 @@
 #include "aidge/operator/Conv.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
 // class Conv_Op;
diff --git a/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp
index 03e2c35170432181c7a9b3934d61f0bd18471876..cbd784698fcce5152c0bb42a192c327abb2b10dd 100644
--- a/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp
@@ -16,6 +16,7 @@
 
 #include "aidge/backend/cpu/operator/ConvImpl.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <cmath>
 #include <array>
 #include <algorithm>
diff --git a/include/aidge/backend/cpu/operator/DivImpl.hpp b/include/aidge/backend/cpu/operator/DivImpl.hpp
index 655a9f6c8accb80fc85d8bc7bd9bf378d4f48a6b..73809ee81e26fff23e40763405857ddd2c95db0c 100644
--- a/include/aidge/backend/cpu/operator/DivImpl.hpp
+++ b/include/aidge/backend/cpu/operator/DivImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/Div.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 
diff --git a/include/aidge/backend/cpu/operator/FCImpl.hpp b/include/aidge/backend/cpu/operator/FCImpl.hpp
index 5d79369077d06288e218b9002274e7e3d1880b59..86bb7fd1271e5857b595dda8efc0354851c94b7e 100644
--- a/include/aidge/backend/cpu/operator/FCImpl.hpp
+++ b/include/aidge/backend/cpu/operator/FCImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/FC.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 #include <array>
diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
index 371e2905a81d1dc2e114f6044388d7e6686122f8..4a1da034935e6b1f6c2069b4f91153b77a9f0636 100644
--- a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
+++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/LeakyReLU.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 
diff --git a/include/aidge/backend/cpu/operator/MatMulImpl.hpp b/include/aidge/backend/cpu/operator/MatMulImpl.hpp
index 2e4b3157360065b0fa857a8bcdd85f1b7442ee63..e8654c6e9cc8fab9080bbb5ed57ea78ee0b7978c 100644
--- a/include/aidge/backend/cpu/operator/MatMulImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MatMulImpl.hpp
@@ -20,6 +20,7 @@
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
 // class MatMul_Op;
diff --git a/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp b/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
index a96fcc226b927b135465ef9cf395d10f844a2646..6cde34d9b123b4f83cbfce412ffa62e0144af8d4 100644
--- a/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
@@ -21,6 +21,7 @@
 #include "aidge/operator/MaxPooling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
 // class MaxPooling_Op;
diff --git a/include/aidge/backend/cpu/operator/MaxPoolingImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/MaxPoolingImpl_forward_kernels.hpp
index caa99e8678a72c7fd3c77fe8b7579ea739ac64c7..c4baccdee5def0be93be42b5657d77d21240328c 100644
--- a/include/aidge/backend/cpu/operator/MaxPoolingImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/MaxPoolingImpl_forward_kernels.hpp
@@ -16,6 +16,7 @@
 
 #include "aidge/backend/cpu/operator/MaxPoolingImpl.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include "aidge/data/Data.hpp"
 #include <array>
 #include <tuple>
diff --git a/include/aidge/backend/cpu/operator/MulImpl.hpp b/include/aidge/backend/cpu/operator/MulImpl.hpp
index 54361e4f5f7a361032c9f4928392f18f183724ac..f1b58e59b9ac1d3a1d34162a1054534830b8d508 100644
--- a/include/aidge/backend/cpu/operator/MulImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MulImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/Mul.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 
diff --git a/include/aidge/backend/cpu/operator/PadImpl.hpp b/include/aidge/backend/cpu/operator/PadImpl.hpp
index 9d93828f5817043f4f5cb07166db213c02866ca1..2320662710f9802878811e51ec4439bd812aea67 100644
--- a/include/aidge/backend/cpu/operator/PadImpl.hpp
+++ b/include/aidge/backend/cpu/operator/PadImpl.hpp
@@ -21,6 +21,7 @@
 #include "aidge/operator/Pad.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
 // class Pad_Op;
diff --git a/include/aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp
index 8b793257d2f3f126793316d463fe2542512da939..f6f00bc4df661921708e605f44056a77bb8125f4 100644
--- a/include/aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp
@@ -16,6 +16,7 @@
 
 #include "aidge/backend/cpu/operator/PadImpl.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <cmath>
 #include <array>
 #include <algorithm>
diff --git a/include/aidge/backend/cpu/operator/PowImpl.hpp b/include/aidge/backend/cpu/operator/PowImpl.hpp
index c33fbf0ed4adf4a0206ce8ed32ffdce2cd9ad17c..d3cafa7e7380e31dd331950e381e08210c3f3a4c 100644
--- a/include/aidge/backend/cpu/operator/PowImpl.hpp
+++ b/include/aidge/backend/cpu/operator/PowImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/Pow.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 
diff --git a/include/aidge/backend/cpu/operator/ProducerImpl.hpp b/include/aidge/backend/cpu/operator/ProducerImpl.hpp
index 19361f1903e8737562dba63b24f3410e6eba1e5b..c1d27f7efc4457fd3b02b6cde006401e2ca71661 100644
--- a/include/aidge/backend/cpu/operator/ProducerImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ProducerImpl.hpp
@@ -18,6 +18,7 @@
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
 class ProducerImpl_cpu : public OperatorImpl {
diff --git a/include/aidge/backend/cpu/operator/ReLUImpl.hpp b/include/aidge/backend/cpu/operator/ReLUImpl.hpp
index 6596c1c9052ca8f919c3cb2fa7ef5a2fa1f823d4..3338d0c40c057995fe37b1652966241bf4a96b59 100644
--- a/include/aidge/backend/cpu/operator/ReLUImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ReLUImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 
diff --git a/include/aidge/backend/cpu/operator/ScalingImpl.hpp b/include/aidge/backend/cpu/operator/ScalingImpl.hpp
index e336adb003769afd97770fd3dd65796b5bbf6a2d..bbcb4553d7aa4b17d733e0f455373bebb9c3581c 100644
--- a/include/aidge/backend/cpu/operator/ScalingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ScalingImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/Scaling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 #include <array>
diff --git a/include/aidge/backend/cpu/operator/SliceImpl.hpp b/include/aidge/backend/cpu/operator/SliceImpl.hpp
index 06c7cf29638c2ef0881db111702eeba83863a9e2..56fdf8b60977527382e0ad916d0dfdabc5db2846 100644
--- a/include/aidge/backend/cpu/operator/SliceImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SliceImpl.hpp
@@ -26,95 +26,26 @@ namespace Aidge {
 // class Slice_Op;
 
 // compute kernel registry for forward and backward
-template <DimIdx_t DIM>
 class SliceImplForward_cpu
-    : public Registrable<SliceImplForward_cpu<DIM>, std::tuple<DataType>,
-                         void(const typename Slice_Op<DIM>::Attrs&,
-                              const std::array<std::size_t, DIM>,
+    : public Registrable<SliceImplForward_cpu, std::tuple<DataType>,
+                         void(const typename Slice_Op::Attrs&,
+                              const std::vector<std::size_t>,
                               const void*,
                               void*)> {};
-template <DimIdx_t DIM>
 class SliceImplBackward_cpu
-    : public Registrable<SliceImplBackward_cpu<DIM>, std::tuple<DataType>,
-                         void(const typename Slice_Op<DIM>::Attrs&,
-                              const std::array<std::size_t, DIM>,
+    : public Registrable<SliceImplBackward_cpu, std::tuple<DataType>,
+                         void(const typename Slice_Op::Attrs&,
+                              const std::vector<std::size_t>,
                               const void*,
                               void*)> {};
 
-template <DimIdx_t DIM>
-class SliceImpl_cpu : public OperatorImpl {
-   public:
-    SliceImpl_cpu(const Slice_Op<DIM>& op) : OperatorImpl(op) {}
-
-    static std::unique_ptr<SliceImpl_cpu<DIM>> create(const Slice_Op<DIM>& op) {
-        return std::make_unique<SliceImpl_cpu<DIM>>(op);
-    }
-
-   public:
-    NbElts_t getNbRequiredData(const IOIndex_t /*inputIdx*/) const override final {
-        assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "requires valid input");
-
-        // Requires the whole tensors
-        const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims();
-
-        return std::accumulate(inputDims.begin(), inputDims.end(), static_cast<NbElts_t>(1),
-                               std::multiplies<NbElts_t>());
-    }
-    NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final { return 0; }
-    NbElts_t getRequiredMemory(const IOIndex_t outputIdx,
-                               const std::vector<DimSize_t>& inputsSize) const override final {
-        (void)outputIdx;
-        (void)inputsSize;
-        const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims();
-        return std::accumulate(outputDims.begin(), outputDims.end(), static_cast<NbElts_t>(1),
-                               std::multiplies<NbElts_t>());
-    }
-    NbElts_t getNbConsumedData(const IOIndex_t /*inputIdx*/) const override final {
-        return mNbConsumedData[0];
-    }
-    NbElts_t getNbProducedData(const IOIndex_t /*outputIdx*/) const override final {
-        return mNbProducedData[0];
-    }
-    void updateConsummerProducer() override final {
-        // each input is consumed by the minimum amount for a forward pass
-        mNbConsumedData[0] += getNbRequiredData(0);
-
-        mNbProducedData[0] += getRequiredMemory(0, {});
-    }
-
-    void forward() override {
-        // FIXME: uncomment the following code once memory handling will work
-        assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
-
-        // Find the correct kernel type
-        auto kernelFunc = Registrar<SliceImplForward_cpu<DIM>>::create(
-                {std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType()});
-
-        // Call kernel
-        kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<DIM>(),
-                   std::get<1>(std::static_pointer_cast<const Slice_Op<DIM>&>(mOp).getStaticAttributes()),
-                   std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-                   std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()
-                );
-
-        // each input is consumed by the minimum amount for a forward pass
-        mNbConsumedData[0] += getNbRequiredData(0);
-
-        mNbProducedData[0] += getRequiredMemory(0, {});
-    }
-
-    void backward() override { printf("Not implemented yet.\n"); }
-};
 
-/******************************************************************************/
-
-template <>
-class SliceImpl_cpu<1> : public OperatorImpl {
+class SliceImpl_cpu : public OperatorImpl {
 public:
-    SliceImpl_cpu(const Slice_Op<1>& op) : OperatorImpl(op) {}
+    SliceImpl_cpu(const Slice_Op& op) : OperatorImpl(op) {}
 
-    static std::unique_ptr<SliceImpl_cpu<1>> create(const Slice_Op<1>& op) {
-        return std::make_unique<SliceImpl_cpu<1>>(op);
+    static std::unique_ptr<SliceImpl_cpu> create(const Slice_Op& op) {
+        return std::make_unique<SliceImpl_cpu>(op);
     }
 
 public:
@@ -127,88 +58,13 @@ public:
     void updateConsummerProducer() override final;
 
     void forward() override;
-    void backward() override;
-};
-
-/******************************************************************************/
-
-template <>
-class SliceImpl_cpu<2> : public OperatorImpl {
-   public:
-    SliceImpl_cpu(const Slice_Op<2>& op) : OperatorImpl(op) {}
 
-    static std::unique_ptr<SliceImpl_cpu<2>> create(const Slice_Op<2>& op) {
-        return std::make_unique<SliceImpl_cpu<2>>(op);
-    }
-
-   public:
-    NbElts_t getNbRequiredData(const IOIndex_t /*inputIdx*/) const override final;
-    NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
-    NbElts_t getRequiredMemory(const IOIndex_t outputIdx,
-                               const std::vector<DimSize_t>& inputsSize) const override final;
-    NbElts_t getNbConsumedData(const IOIndex_t /*inputIdx*/) const override final;
-    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-    void updateConsummerProducer() override final;
-
-    void forward() override;
-    void backward() override;
-};
-
-/******************************************************************************/
-
-template <>
-class SliceImpl_cpu<3> : public OperatorImpl {
-   public:
-    SliceImpl_cpu(const Slice_Op<3>& op) : OperatorImpl(op) {}
-
-    static std::unique_ptr<SliceImpl_cpu<3>> create(const Slice_Op<3>& op) {
-        return std::make_unique<SliceImpl_cpu<3>>(op);
-    }
-
-   public:
-    NbElts_t getNbRequiredData(const IOIndex_t /*inputIdx*/) const override final;
-    NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
-    NbElts_t getRequiredMemory(const IOIndex_t outputIdx,
-                               const std::vector<DimSize_t>& inputsSize) const override final;
-    NbElts_t getNbConsumedData(const IOIndex_t /*inputIdx*/) const override final;
-    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-    void updateConsummerProducer() override final;
-
-    void forward() override;
     void backward() override;
 };
 
-/******************************************************************************/
-
-template <>
-class SliceImpl_cpu<4> : public OperatorImpl {
-   public:
-    SliceImpl_cpu(const Slice_Op<4>& op) : OperatorImpl(op) {}
-
-    static std::unique_ptr<SliceImpl_cpu<4>> create(const Slice_Op<4>& op) {
-        return std::make_unique<SliceImpl_cpu<4>>(op);
-    }
-
-   public:
-    NbElts_t getNbRequiredData(const IOIndex_t /*inputIdx*/) const override final;
-    NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
-    NbElts_t getRequiredMemory(const IOIndex_t outputIdx,
-                               const std::vector<DimSize_t>& inputsSize) const override final;
-    NbElts_t getNbConsumedData(const IOIndex_t /*inputIdx*/) const override final;
-    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-    void updateConsummerProducer() override final;
-
-    void forward() override;
-    void backward() override;
-};
-
-
 
 namespace {
-static Registrar<Slice_Op<1>> registrarSliceImpl_1D_cpu("cpu", Aidge::SliceImpl_cpu<1>::create);
-static Registrar<Slice_Op<2>> registrarSliceImpl_2D_cpu("cpu", Aidge::SliceImpl_cpu<2>::create);
-static Registrar<Slice_Op<3>> registrarSliceImpl_3D_cpu("cpu", Aidge::SliceImpl_cpu<3>::create);
-static Registrar<Slice_Op<4>> registrarSliceImpl_4D_cpu("cpu", Aidge::SliceImpl_cpu<4>::create);
+static Registrar<Slice_Op> registrarSliceImpl_cpu("cpu", Aidge::SliceImpl_cpu::create);
 }  // namespace
 }  // namespace Aidge
 
diff --git a/include/aidge/backend/cpu/operator/SliceImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/SliceImpl_forward_kernels.hpp
index bbf4ccbae77089ca75dfee34f5bc5b0dd7d3697d..7eb4b9dc2cb8dddc8b7fdaf4d63b8f1d39d879b0 100644
--- a/include/aidge/backend/cpu/operator/SliceImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/SliceImpl_forward_kernels.hpp
@@ -15,46 +15,47 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/operator/Slice.hpp"
 #include "aidge/backend/cpu/operator/SliceImpl.hpp"
-#include <array>
+#include <vector>
 #include <cstddef>
 
 #include "aidge/data/Data.hpp"
 
 namespace Aidge {
-template <class I, std::size_t DIM>
-void SliceImpl_cpu_forward_kernel(const typename Slice_Op<DIM>::Attrs& attrs,
-                                     const std::array<std::size_t, DIM> inputDims,
+template <class I>
+void SliceImpl_cpu_forward_kernel(const typename Slice_Op::Attrs& attrs,
+                                     const std::vector<std::size_t> inputDims,
                                      const void* input_,
                                      void* output_) {
 
     const I* input = static_cast<const I*>(input_) + std::get<0>(attrs);
     I* output = static_cast<I*>(output_);
-	const std::array<std::size_t, DIM> slicedDims = std::get<1>(attrs);
+    const std::vector<std::size_t> slicedDims = std::get<1>(attrs);
+    const std::size_t nbDims = slicedDims.size();
 
 	// for inputDims = {4,5,5,3} & slicedDims = {3,2,2,1}, substractDims = {1,5,5,3}
-    std::array<std::size_t, DIM> substractedDims;
-    for (std::size_t i = 0; i < DIM; ++i) {
+    std::vector<std::size_t> substractedDims = std::vector<std::size_t>(nbDims);
+    for (std::size_t i = 0; i < nbDims; ++i) {
         substractedDims[i] = inputDims[i] - slicedDims[i];
     }
 
 	// for slicedDims = {3,2,2,1}, prodSlicedDims = {12,4,2,1}
-    std::array<std::size_t, DIM> prodSlicedDims;
-    std::array<std::size_t, DIM+1> prodInputDims;
-	prodSlicedDims[DIM - 1] = slicedDims[DIM - 1];
-	prodInputDims[DIM - 1] = inputDims[DIM - 1];
-	prodInputDims[DIM] = 1;
-	for (std::size_t i = 2; i <= DIM; ++i) {
-		prodSlicedDims[DIM - i] = prodSlicedDims[DIM - i + 1]*slicedDims[DIM - i];
-		prodInputDims[DIM - i] = prodInputDims[DIM - i + 1]*inputDims[DIM - i];
+    std::vector<std::size_t> prodSlicedDims = std::vector<std::size_t>(nbDims);
+    std::vector<std::size_t> prodInputDims = std::vector<std::size_t>(nbDims+1);
+	prodSlicedDims[nbDims - 1] = slicedDims[nbDims - 1];
+	prodInputDims[nbDims - 1] = inputDims[nbDims - 1];
+	prodInputDims[nbDims] = 1;
+	for (std::size_t i = 2; i <= nbDims; ++i) {
+		prodSlicedDims[nbDims - i] = prodSlicedDims[nbDims - i + 1]*slicedDims[nbDims - i];
+		prodInputDims[nbDims - i] = prodInputDims[nbDims - i + 1]*inputDims[nbDims - i];
 	}
 
 	std::size_t j = 0;
 	std::size_t i = 0;
 	for (; j < prodSlicedDims[0];) {
 		output[j] = input[i++];
-                ++j;
-		for (std::size_t idx = DIM - 1; idx > 0; --idx) {
-				i += j % prodSlicedDims[idx] == 0 ? substractedDims[idx]*prodInputDims[idx+1] : 0;
+        ++j;
+		for (std::size_t idx = nbDims - 1; idx > 0; --idx) {
+			i += j % prodSlicedDims[idx] == 0 ? substractedDims[idx]*prodInputDims[idx+1] : 0;
 		}
 	}
 }
@@ -62,37 +63,13 @@ void SliceImpl_cpu_forward_kernel(const typename Slice_Op<DIM>::Attrs& attrs,
 namespace {
 
 // DIM = 1
-static Registrar<SliceImplForward_cpu<1>> registrarSliceImplForward_1D_cpu_Float32(
-        {DataType::Float32}, Aidge::SliceImpl_cpu_forward_kernel<float, 1>);
-static Registrar<SliceImplForward_cpu<1>> registrarSliceImplForward_1D_cpu_Int32(
-        {DataType::Int32}, Aidge::SliceImpl_cpu_forward_kernel<int, 1>);
-static Registrar<SliceImplForward_cpu<1>> registrarSliceImplForward_1D_cpu_Float64(
-        {DataType::Float64}, Aidge::SliceImpl_cpu_forward_kernel<double, 1>);
-
-// DIM = 2
-static Registrar<SliceImplForward_cpu<2>> registrarSliceImplForward_2D_cpu_Float32(
-        {DataType::Float32}, Aidge::SliceImpl_cpu_forward_kernel<float, 2>);
-static Registrar<SliceImplForward_cpu<2>> registrarSliceImplForward_2D_cpu_Int32(
-        {DataType::Int32}, Aidge::SliceImpl_cpu_forward_kernel<int, 2>);
-static Registrar<SliceImplForward_cpu<2>> registrarSliceImplForward_2D_cpu_Float64(
-        {DataType::Float64}, Aidge::SliceImpl_cpu_forward_kernel<double, 2>);
-
-// DIM = 3
-static Registrar<SliceImplForward_cpu<3>> registrarSliceImplForward_3D_cpu_Float32(
-        {DataType::Float32}, Aidge::SliceImpl_cpu_forward_kernel<float, 3>);
-static Registrar<SliceImplForward_cpu<3>> registrarSliceImplForward_3D_cpu_Int32(
-        {DataType::Int32}, Aidge::SliceImpl_cpu_forward_kernel<int, 3>);
-static Registrar<SliceImplForward_cpu<3>> registrarSliceImplForward_3D_cpu_Float64(
-        {DataType::Float64}, Aidge::SliceImpl_cpu_forward_kernel<double, 3>);
-
-// DIM = 4
-static Registrar<SliceImplForward_cpu<4>> registrarSliceImplForward_4D_cpu_Float32(
-        {DataType::Float32}, Aidge::SliceImpl_cpu_forward_kernel<float, 4>);
-static Registrar<SliceImplForward_cpu<4>> registrarSliceImplForward_4D_cpu_Int32(
-        {DataType::Int32}, Aidge::SliceImpl_cpu_forward_kernel<int, 4>);
-static Registrar<SliceImplForward_cpu<4>> registrarSliceImplForward_4D_cpu_Float64(
-        {DataType::Float64}, Aidge::SliceImpl_cpu_forward_kernel<double, 4>);
+static Registrar<SliceImplForward_cpu> registrarSliceImplForward_cpu_Float32(
+        {DataType::Float32}, Aidge::SliceImpl_cpu_forward_kernel<float>);
+static Registrar<SliceImplForward_cpu> registrarSliceImplForward_cpu_Int32(
+        {DataType::Int32}, Aidge::SliceImpl_cpu_forward_kernel<int>);
+static Registrar<SliceImplForward_cpu> registrarSliceImplForward_cpu_Float64(
+        {DataType::Float64}, Aidge::SliceImpl_cpu_forward_kernel<double>);
 }  // namespace
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_FORWARD_KERNEL_H_ */
+#endif /* AIDGE_CPU_OPERATOR_SLICEIMPL_FORWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
index 995f57f7c0168328e1982315358201c9f8940235..15fb2b5d30e32febca7c8028c8b5212e5b96775f 100644
--- a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/Softmax.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 
diff --git a/include/aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp
index 297a3a321667dfc8c5a2bb0e3fc3bebce8825950..a5a168a08cf85e952cffd556e0cc34d29d35fffa 100644
--- a/include/aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp
@@ -17,6 +17,7 @@
 #include <cmath>
 #include "aidge/data/Data.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/SoftmaxImpl.hpp"
 
diff --git a/include/aidge/backend/cpu/operator/SqrtImpl.hpp b/include/aidge/backend/cpu/operator/SqrtImpl.hpp
index 1880408cd52f537c6d4965438ece88151d4df6e3..b3723f27b077b9d5ea7e69fd33bd012d02654ffe 100644
--- a/include/aidge/backend/cpu/operator/SqrtImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SqrtImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/Sqrt.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 
diff --git a/include/aidge/backend/cpu/operator/SubImpl.hpp b/include/aidge/backend/cpu/operator/SubImpl.hpp
index 08ec69e509b2b6c02e30f613abd83208de254f75..2d4c22f0d7f5e850ce805e0c78fb3e64bfa8f42b 100644
--- a/include/aidge/backend/cpu/operator/SubImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SubImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/Sub.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 
diff --git a/src/operator/AddImpl.cpp b/src/operator/AddImpl.cpp
index 851aaa5c6bcd1acc3e8bc17b11dd00143c543b5b..91d4533c4ee2754dce1b9b7ea9ca8c598f530a52 100644
--- a/src/operator/AddImpl.cpp
+++ b/src/operator/AddImpl.cpp
@@ -14,6 +14,7 @@
 #include <vector>
 
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
 
@@ -74,10 +75,10 @@ void  Aidge::AddImpl_cpu::forward() {
 
     std::vector<const void*> opInputs;
     for (IOIndex_t i = 0; i < mOp.nbInputs(); ++i) {
-        opInputs.push_back(std::static_pointer_cast<Tensor>(mOp.getRawInput(i))->getImpl()->rawPtr());
+        opInputs.push_back(getCPUPtr(mOp.getRawInput(i)));
     }
 
     kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
                opInputs,
-               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+               getCPUPtr(mOp.getRawOutput(0)));
 }
\ No newline at end of file
diff --git a/src/operator/AvgPoolingImpl.cpp b/src/operator/AvgPoolingImpl.cpp
index ad236f004ec9d806d43b9549adcec1c094573a1d..9e0a77e3285c1e3701142828c74898cb9da5b405 100644
--- a/src/operator/AvgPoolingImpl.cpp
+++ b/src/operator/AvgPoolingImpl.cpp
@@ -15,6 +15,7 @@
 #include <vector>
 
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include "aidge/operator/AvgPooling.hpp"
 
 #include "aidge/backend/cpu/operator/AvgPoolingImpl.hpp"
@@ -35,6 +36,6 @@ void Aidge::AvgPoolingImpl2D_cpu::forward() {
     // Call kernel
     kernelFunc(dynamic_cast<const AvgPooling_Op<2>&>(mOp).getStaticAttributes(),
                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+               getCPUPtr(mOp.getRawInput(0)),
+               getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/BatchNormImpl.cpp b/src/operator/BatchNormImpl.cpp
index 4cfd4b1bef0a027dfd28c95235dd864101ced3c6..c84f2cb6b09c707f68ed83cc7554624fc6489b84 100644
--- a/src/operator/BatchNormImpl.cpp
+++ b/src/operator/BatchNormImpl.cpp
@@ -14,6 +14,7 @@
 #include <vector>
 
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include "aidge/operator/BatchNorm.hpp"
 
 #include "aidge/backend/cpu/operator/BatchNormImpl.hpp"
@@ -41,11 +42,11 @@ void Aidge::BatchNormImpl2D_cpu::forward() {
     // Call kernel
     kernelFunc(dynamic_cast<const BatchNorm_Op<2>&>(mOp).getStaticAttributes(),
                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->getImpl()->rawPtr(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->getImpl()->rawPtr(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(3))->getImpl()->rawPtr(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(4))->getImpl()->rawPtr(),
-               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr(),
+               getCPUPtr(mOp.getRawInput(0)),
+               getCPUPtr(mOp.getRawInput(1)),
+               getCPUPtr(mOp.getRawInput(2)),
+               getCPUPtr(mOp.getRawInput(3)),
+               getCPUPtr(mOp.getRawInput(4)),
+               getCPUPtr(mOp.getRawOutput(0)),
                true);
 }
diff --git a/src/operator/ConcatImpl.cpp b/src/operator/ConcatImpl.cpp
index d46054480bab433f3493ffac7fbff48f27f2b570..ceefb9031f279be417a8ab0485567a56edea7824 100644
--- a/src/operator/ConcatImpl.cpp
+++ b/src/operator/ConcatImpl.cpp
@@ -14,6 +14,7 @@
 #include <vector>
 
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
 
@@ -75,7 +76,7 @@ void  Aidge::ConcatImpl_cpu::forward() {
     std::vector<const void*> opInputs;
     std::vector<DimSize_t> opInputAxis;
     for (IOIndex_t i = 0; i < mOp.nbInputs(); ++i) {
-        opInputs.push_back(std::static_pointer_cast<Tensor>(mOp.getRawInput(i))->getImpl()->rawPtr());
+        opInputs.push_back(getCPUPtr(mOp.getRawInput(i)));
         opInputAxis.push_back(std::static_pointer_cast<Tensor>(mOp.getRawInput(i))->dims()[dynamic_cast<const Concat_Op&>(mOp).template getAttr<DimSize_t>("Axis")]);
     }
 
@@ -83,7 +84,7 @@ void  Aidge::ConcatImpl_cpu::forward() {
                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
                opInputAxis,
                opInputs,
-               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+               getCPUPtr(mOp.getRawOutput(0)));
 }
 
 void  Aidge::ConcatImpl_cpu::backward() { printf("Not implemented yet.\n"); }
\ No newline at end of file
diff --git a/src/operator/ConvDepthWiseImpl.cpp b/src/operator/ConvDepthWiseImpl.cpp
index 4a722a5e4b00412617eb998f8cbfb36eb0c46035..1b4262e394f78ab0bda4a36440ac7b9cb15c164c 100644
--- a/src/operator/ConvDepthWiseImpl.cpp
+++ b/src/operator/ConvDepthWiseImpl.cpp
@@ -16,6 +16,7 @@
 #include <vector>
 
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include "aidge/operator/ConvDepthWise.hpp"
 
 #include "aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp"
@@ -42,8 +43,8 @@ void Aidge::ConvDepthWiseImpl2D_cpu::forward() {
 
     // Call kernel
     kernelFunc(dynamic_cast<const ConvDepthWise_Op<2>&>(mOp).getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->getImpl()->rawPtr(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->getImpl()->rawPtr(),
-               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+               getCPUPtr(mOp.getRawInput(0)),
+               getCPUPtr(mOp.getRawInput(1)),
+               getCPUPtr(mOp.getRawInput(2)),
+               getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/ConvImpl.cpp b/src/operator/ConvImpl.cpp
index 87b54afbfd0b4c2d3bb57812d07575bc0e255626..d476f84717c0ed6f7bd45d68bd24b4d7ada6cbbd 100644
--- a/src/operator/ConvImpl.cpp
+++ b/src/operator/ConvImpl.cpp
@@ -16,6 +16,7 @@
 #include <vector>
 
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include "aidge/operator/Conv.hpp"
 
 #include "aidge/backend/cpu/operator/ConvImpl.hpp"
@@ -41,6 +42,6 @@ void Aidge::ConvImpl2D_cpu::forward() {
 
     // Call kernel
     kernelFunc(dynamic_cast<const Conv_Op<2>&>(mOp).getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->getImpl()->rawPtr(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->getImpl()->rawPtr(), std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+               getCPUPtr(mOp.getRawInput(0)), getCPUPtr(mOp.getRawInput(1)),
+               getCPUPtr(mOp.getRawInput(2)), getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/DivImpl.cpp b/src/operator/DivImpl.cpp
index 2e913df1a4c42a2c6132a4096a92c1ab0eeab0c0..f5cde077bd5a414d8b9add8b8b8715952a27ad01 100644
--- a/src/operator/DivImpl.cpp
+++ b/src/operator/DivImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/Div.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/DivImpl.hpp"
 #include "aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp"
@@ -36,7 +37,7 @@ void Aidge::DivImpl_cpu::forward() {
     // Call kernel
     kernelFunc(std::static_pointer_cast<Tensor>(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)))->size(),
         std::static_pointer_cast<Tensor>(std::static_pointer_cast<Tensor>(mOp.getRawInput(1)))->size(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->getImpl()->rawPtr(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+        getCPUPtr(mOp.getRawInput(0)),
+        getCPUPtr(mOp.getRawInput(1)),
+        getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/FCImpl.cpp b/src/operator/FCImpl.cpp
index 1e5450d330ee89bdceb30aca846800d7764ca911..14f59f6f7baff57602ad71c8c08023038963b5f0 100644
--- a/src/operator/FCImpl.cpp
+++ b/src/operator/FCImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/FC.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/FCImpl.hpp"
 #include "aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp"
@@ -39,7 +40,7 @@ void Aidge::FCImpl_cpu::forward()
     //     kernelFunc(
     //         mOp.getStaticAttributes(),
     //         std::static_pointer_cast<Tensor>(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>(),
-    //         std::static_pointer_cast<Tensor>(mOp.getRawInput(0)->getImpl()->rawPtr(),
+    //         getCPUPtr(mOp.getRawInput(0),
     //         mOp.mInputs[1]->getImpl()->rawPtr(),
     //         mOp.mInputs[2]->getImpl()->rawPtr(),
     //         mOp.getOutput(0)->getImpl()->rawPtr());
@@ -49,8 +50,8 @@ void Aidge::FCImpl_cpu::forward()
         dynamic_cast<const FC_Op&>(mOp).getStaticAttributes(),
         std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[0],
         std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->sizeM1(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->getImpl()->rawPtr(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->getImpl()->rawPtr(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+        getCPUPtr(mOp.getRawInput(0)),
+        getCPUPtr(mOp.getRawInput(1)),
+        getCPUPtr(mOp.getRawInput(2)),
+        getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/LeakyReLUImpl.cpp b/src/operator/LeakyReLUImpl.cpp
index 9ea0f2400d954bf5fd25428a7bf46fbd188b4994..17912eb1dc75930eaf7595eb189af39df4d4fa2e 100644
--- a/src/operator/LeakyReLUImpl.cpp
+++ b/src/operator/LeakyReLUImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/LeakyReLU.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp"
 #include "aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp"
@@ -37,6 +38,6 @@ void Aidge::LeakyReLUImpl_cpu::forward() {
     // Call kernel
     kernelFunc(dynamic_cast<const LeakyReLU_Op&>(mOp).getStaticAttributes(),
         std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+        getCPUPtr(mOp.getRawInput(0)),
+        getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/MatMulImpl.cpp b/src/operator/MatMulImpl.cpp
index 0ad9bd6ceb4ac27c35e72e21477864980bcecfd9..1abd75db070bbd3b197519318f5bf23c7b46ee5a 100644
--- a/src/operator/MatMulImpl.cpp
+++ b/src/operator/MatMulImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/MatMulImpl.hpp"
 #include "aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp"
@@ -40,16 +41,16 @@ void Aidge::MatMulImpl_cpu::forward()
     //         mOp.getInput(0))->getImpl()->rawPtr(),
     //         mOp.mInputs[1]->getImpl()->rawPtr(),
     //         mOp.mInputs[2]->getImpl()->rawPtr(),
-    //         std::static_pointer_cast<Tensor>(mOp.getRawOutput(0)->getImpl()->rawPtr());
+    //         getCPUPtr(mOp.getRawOutput(0));
     // }
     // else
     kernelFunc(
         dynamic_cast<const MatMul_Op&>(mOp).getStaticAttributes(),
         std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[0],
         std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->sizeM1(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->getImpl()->rawPtr(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+        getCPUPtr(mOp.getRawInput(0)),
+        getCPUPtr(mOp.getRawInput(1)),
+        getCPUPtr(mOp.getRawOutput(0)));
 
 
 }
diff --git a/src/operator/MaxPoolingImpl.cpp b/src/operator/MaxPoolingImpl.cpp
index 00a279707424e0fce4eb0af07cdade80fe2dffd9..e21dab07df4c20eb7253e680146042f205bc210b 100644
--- a/src/operator/MaxPoolingImpl.cpp
+++ b/src/operator/MaxPoolingImpl.cpp
@@ -15,6 +15,7 @@
 #include <vector>
 
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include "aidge/operator/MaxPooling.hpp"
 
 #include "aidge/backend/cpu/operator/MaxPoolingImpl.hpp"
@@ -35,6 +36,6 @@ void Aidge::MaxPoolingImpl2D_cpu::forward() {
     // Call kernel
     kernelFunc(dynamic_cast<const MaxPooling_Op<2>&>(mOp).getStaticAttributes(),
                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+               getCPUPtr(mOp.getRawInput(0)),
+               getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/MulImpl.cpp b/src/operator/MulImpl.cpp
index dfd33445a527459245137f4ca6e6d5e8f416d82c..fda49c3f20ed5cbe519d729a0bf759f0964a99fd 100644
--- a/src/operator/MulImpl.cpp
+++ b/src/operator/MulImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/Mul.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/MulImpl.hpp"
 #include "aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp"
@@ -36,7 +37,7 @@ void Aidge::MulImpl_cpu::forward() {
     // Call kernel
     kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
         std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->size(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->getImpl()->rawPtr(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+        getCPUPtr(mOp.getRawInput(0)),
+        getCPUPtr(mOp.getRawInput(1)),
+        getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/PadImpl.cpp b/src/operator/PadImpl.cpp
index a5bf1c52ba39a2f805f2b28562cbab19191ac754..219bf425fa34cdaaa378c49dd7c9837f9d94d97e 100644
--- a/src/operator/PadImpl.cpp
+++ b/src/operator/PadImpl.cpp
@@ -16,6 +16,7 @@
 #include <vector>
 
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include "aidge/operator/Conv.hpp"
 
 #include "aidge/backend/cpu/operator/PadImpl.hpp"
@@ -42,6 +43,6 @@ void Aidge::PadImpl2D_cpu::forward() {
     // Call kernel
     kernelFunc(dynamic_cast<const Pad_Op<2>&>(mOp).getStaticAttributes(),
                         std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>(),
-                        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-                        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+                        getCPUPtr(mOp.getRawInput(0)),
+                        getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/PowImpl.cpp b/src/operator/PowImpl.cpp
index 30fafa9b3d060eada9468fc9a2bf94ec6aeab3c0..496646402e33869cfcbe7dae96e1fc81b875d0dd 100644
--- a/src/operator/PowImpl.cpp
+++ b/src/operator/PowImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/Pow.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/PowImpl.hpp"
 #include "aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp"
@@ -36,7 +37,7 @@ void Aidge::PowImpl_cpu::forward() {
     // Call kernel
     kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
         std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->size(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->getImpl()->rawPtr(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+        getCPUPtr(mOp.getRawInput(0)),
+        getCPUPtr(mOp.getRawInput(1)),
+        getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/ProducerImpl.cpp b/src/operator/ProducerImpl.cpp
index 827f969e4d636862919e07b2f9457bed3e19e0f3..4c5883a9b0155e7bb6e16cbac1b8de1a3a9e9e16 100644
--- a/src/operator/ProducerImpl.cpp
+++ b/src/operator/ProducerImpl.cpp
@@ -16,6 +16,7 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/ProducerImpl.hpp"
 
diff --git a/src/operator/ReLUImpl.cpp b/src/operator/ReLUImpl.cpp
index 2819bb864cb552adfcd1c52e57b734fa0a9d0ce1..8863be282ce0c7b7bfbfb938372cf304bc4cc4bd 100644
--- a/src/operator/ReLUImpl.cpp
+++ b/src/operator/ReLUImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/ReLUImpl.hpp"
 #include "aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp"
@@ -36,6 +37,6 @@ void Aidge::ReLUImpl_cpu::forward() {
 
     // Call kernel
     kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+        getCPUPtr(mOp.getRawInput(0)),
+        getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/ScalingImpl.cpp b/src/operator/ScalingImpl.cpp
index c2d2b17245811ab20de310e4656bc67b1a28b257..6b9aab31a9d61d2d7a5ff89961de3fa6a2b5ebd2 100644
--- a/src/operator/ScalingImpl.cpp
+++ b/src/operator/ScalingImpl.cpp
@@ -18,6 +18,7 @@
 #include "aidge/backend/cpu/operator/ScalingImpl.hpp"
 #include "aidge/backend/cpu/operator/ScalingImpl_forward_kernels.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <vector>
 
 Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
@@ -36,6 +37,6 @@ void Aidge::ScalingImpl_cpu::forward() {
     // Call kernel
     kernelFunc(dynamic_cast<const Scaling_Op&>(mOp).getStaticAttributes(),
         std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+        getCPUPtr(mOp.getRawInput(0)),
+        getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/SliceImpl.cpp b/src/operator/SliceImpl.cpp
index 3ae56e1a4f613a4188dc51659853e07674e74768..b60bbe60188f416f28ff2562875dce6e5ee15bd5 100644
--- a/src/operator/SliceImpl.cpp
+++ b/src/operator/SliceImpl.cpp
@@ -22,231 +22,55 @@
 #include <cassert>
 #include <tuple>
 
-
-Aidge::NbElts_t Aidge::SliceImpl_cpu<1>::getNbRequiredData(const Aidge::IOIndex_t /*inputIdx*/) const {
-    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "requires valid input");
-
-    // Requires the whole tensors
-    return std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<1>()[0];
-}
-
-Aidge::NbElts_t Aidge::SliceImpl_cpu<1>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { return 0; }
-
-Aidge::NbElts_t Aidge::SliceImpl_cpu<1>::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
-                               const std::vector<Aidge::DimSize_t>& inputsSize) const {
-    (void)outputIdx;
-    (void)inputsSize;
-    return std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->template dims<1>()[0];
-}
-
-Aidge::NbElts_t Aidge::SliceImpl_cpu<1>::getNbConsumedData(const Aidge::IOIndex_t /*inputIdx*/) const {
-    return mNbConsumedData[0];
-}
-
-Aidge::NbElts_t Aidge::SliceImpl_cpu<1>::getNbProducedData(const Aidge::IOIndex_t /*outputIdx*/) const {
-    return mNbProducedData[0];
-}
-
-void Aidge::SliceImpl_cpu<1>::updateConsummerProducer() {
-    // each input is consumed by the minimum amount for a forward pass
-    mNbConsumedData[0] += getNbRequiredData(0);
-
-    mNbProducedData[0] += getRequiredMemory(0, {});
-}
-
-void Aidge::SliceImpl_cpu<1>::forward() {
-    // FIXME: uncomment the following code once memory handling will work
-    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
-
-    // Find the correct kernel type
-    auto kernelFunc = Registrar<SliceImplForward_cpu<1>>::create(
-            {std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType()});
-
-    // Call kernel
-    kernelFunc(dynamic_cast<const Slice_Op<1>&>(mOp).getStaticAttributes(),
-                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<1>(),
-                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-                std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()
-            );
-
-    // each input is consumed by the minimum amount for a forward pass
-    mNbConsumedData[0] += getNbRequiredData(0);
-
-    mNbProducedData[0] += getRequiredMemory(0, {});
-}
-
-void Aidge::SliceImpl_cpu<1>::backward() { printf("Not implemented yet.\n"); }
-
-/////////////////////////////////////////////////////////////////////////
-
-Aidge::NbElts_t Aidge::SliceImpl_cpu<2>::getNbRequiredData(const Aidge::IOIndex_t /*inputIdx*/) const {
-    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "requires valid input");
-
-    // Requires the whole tensors
-    const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<2>();
-    return inputDims[0]*inputDims[1];
-}
-
-Aidge::NbElts_t Aidge::SliceImpl_cpu<2>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { return 0; }
-
-Aidge::NbElts_t Aidge::SliceImpl_cpu<2>::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
-                               const std::vector<Aidge::DimSize_t>& inputsSize) const {
-    (void)outputIdx;
-    (void)inputsSize;
-    const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->template dims<2>();
-    return outputDims[0]*outputDims[1];
-}
-
-Aidge::NbElts_t Aidge::SliceImpl_cpu<2>::getNbConsumedData(const Aidge::IOIndex_t /*inputIdx*/) const {
-    return mNbConsumedData[0];
-}
-
-Aidge::NbElts_t Aidge::SliceImpl_cpu<2>::getNbProducedData(const Aidge::IOIndex_t /*outputIdx*/) const {
-    return mNbProducedData[0];
-}
-
-void Aidge::SliceImpl_cpu<2>::updateConsummerProducer() {
-    // each input is consumed by the minimum amount for a forward pass
-    mNbConsumedData[0] += getNbRequiredData(0);
-
-    mNbProducedData[0] += getRequiredMemory(0, {});
-}
-
-void Aidge::SliceImpl_cpu<2>::forward() {
-    // FIXME: uncomment the following code once memory handling will work
-    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
-
-    // Find the correct kernel type
-    auto kernelFunc = Registrar<SliceImplForward_cpu<2>>::create(
-            {std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType()});
-
-    // Call kernel
-    kernelFunc(dynamic_cast<const Slice_Op<2>&>(mOp).getStaticAttributes(),
-                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<2>(),
-                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-                std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()
-            );
-
-    // each input is consumed by the minimum amount for a forward pass
-    mNbConsumedData[0] += getNbRequiredData(0);
-
-    mNbProducedData[0] += getRequiredMemory(0, {});
-}
-
-void Aidge::SliceImpl_cpu<2>::backward() { printf("Not implemented yet.\n"); }
-
-////////////////////////////////////////////////////////////////////////////
-
-Aidge::NbElts_t Aidge::SliceImpl_cpu<3>::getNbRequiredData(const Aidge::IOIndex_t /*inputIdx*/) const {
-    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "requires valid input");
-
-    // Requires the whole tensors
-    const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<3>();
-
-    return std::accumulate(inputDims.begin(), inputDims.end(), static_cast<NbElts_t>(1),
-                            std::multiplies<NbElts_t>());
-}
-
-Aidge::NbElts_t Aidge::SliceImpl_cpu<3>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { return 0; }
-
-Aidge::NbElts_t Aidge::SliceImpl_cpu<3>::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
-                               const std::vector<Aidge::DimSize_t>& inputsSize) const {
-    (void)outputIdx;
-    (void)inputsSize;
-    const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->template dims<3>();
-    return std::accumulate(outputDims.begin(), outputDims.end(), static_cast<NbElts_t>(1),
-                            std::multiplies<NbElts_t>());
-}
-
-Aidge::NbElts_t Aidge::SliceImpl_cpu<3>::getNbConsumedData(const Aidge::IOIndex_t /*inputIdx*/) const {
-    return mNbConsumedData[0];
-}
-
-Aidge::NbElts_t Aidge::SliceImpl_cpu<3>::getNbProducedData(const Aidge::IOIndex_t /*outputIdx*/) const {
-    return mNbProducedData[0];
-}
-
-void Aidge::SliceImpl_cpu<3>::updateConsummerProducer() {
-    // each input is consumed by the minimum amount for a forward pass
-    mNbConsumedData[0] += getNbRequiredData(0);
-
-    mNbProducedData[0] += getRequiredMemory(0, {});
-}
-
-void Aidge::SliceImpl_cpu<3>::forward() {
-    // FIXME: uncomment the following code once memory handling will work
-    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
-
-    // Find the correct kernel type
-    auto kernelFunc = Registrar<SliceImplForward_cpu<3>>::create(
-            {std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType()});
-
-    // Call kernel
-    kernelFunc(dynamic_cast<const Slice_Op<3>&>(mOp).getStaticAttributes(),
-                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<3>(),
-                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-                std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()
-            );
-
-    // each input is consumed by the minimum amount for a forward pass
-    mNbConsumedData[0] += getNbRequiredData(0);
-
-    mNbProducedData[0] += getRequiredMemory(0, {});
-}
-
-void Aidge::SliceImpl_cpu<3>::backward() { printf("Not implemented yet.\n"); }
-
-//////////////////////////////////////////////////////////////////////////////
-
-Aidge::NbElts_t Aidge::SliceImpl_cpu<4>::getNbRequiredData(const Aidge::IOIndex_t /*inputIdx*/) const {
+Aidge::NbElts_t Aidge::SliceImpl_cpu::getNbRequiredData(const Aidge::IOIndex_t /*inputIdx*/) const {
     assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "requires valid input");
 
     // Requires the whole tensors
-    const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>();
+    const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims();
 
     return std::accumulate(inputDims.begin(), inputDims.end(), static_cast<NbElts_t>(1),
                             std::multiplies<NbElts_t>());
 }
 
-Aidge::NbElts_t Aidge::SliceImpl_cpu<4>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { return 0; }
+Aidge::NbElts_t Aidge::SliceImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { return 0; }
 
-Aidge::NbElts_t Aidge::SliceImpl_cpu<4>::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
-                               const std::vector<Aidge::DimSize_t>& inputsSize) const {
+Aidge::NbElts_t Aidge::SliceImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
+                            const std::vector<Aidge::DimSize_t>& inputsSize) const {
     (void)outputIdx;
     (void)inputsSize;
-    const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->template dims<4>();
+    const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims();
     return std::accumulate(outputDims.begin(), outputDims.end(), static_cast<NbElts_t>(1),
                             std::multiplies<NbElts_t>());
 }
 
-Aidge::NbElts_t Aidge::SliceImpl_cpu<4>::getNbConsumedData(const Aidge::IOIndex_t /*inputIdx*/) const {
+Aidge::NbElts_t Aidge::SliceImpl_cpu::getNbConsumedData(const Aidge::IOIndex_t /*inputIdx*/) const {
     return mNbConsumedData[0];
 }
 
-Aidge::NbElts_t Aidge::SliceImpl_cpu<4>::getNbProducedData(const Aidge::IOIndex_t /*outputIdx*/) const {
+Aidge::NbElts_t Aidge::SliceImpl_cpu::getNbProducedData(const Aidge::IOIndex_t /*outputIdx*/) const {
     return mNbProducedData[0];
 }
 
-void Aidge::SliceImpl_cpu<4>::updateConsummerProducer() {
+void Aidge::SliceImpl_cpu::updateConsummerProducer() {
     // each input is consumed by the minimum amount for a forward pass
     mNbConsumedData[0] += getNbRequiredData(0);
 
     mNbProducedData[0] += getRequiredMemory(0, {});
 }
 
-void Aidge::SliceImpl_cpu<4>::forward() {
+void Aidge::SliceImpl_cpu::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
 
     // Find the correct kernel type
-    auto kernelFunc = Registrar<SliceImplForward_cpu<4>>::create(
+    auto kernelFunc = Registrar<SliceImplForward_cpu>::create(
             {std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType()});
 
     // Call kernel
-    kernelFunc(dynamic_cast<const Slice_Op<4>&>(mOp).getStaticAttributes(),
-                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>(),
-                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-                std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()
+    kernelFunc(dynamic_cast<const Slice_Op&>(mOp).getStaticAttributes(),
+            std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
+            std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
+            std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()
             );
 
     // each input is consumed by the minimum amount for a forward pass
@@ -255,4 +79,4 @@ void Aidge::SliceImpl_cpu<4>::forward() {
     mNbProducedData[0] += getRequiredMemory(0, {});
 }
 
-void Aidge::SliceImpl_cpu<4>::backward() { printf("Not implemented yet.\n"); }
\ No newline at end of file
+void Aidge::SliceImpl_cpu::backward() { printf("Not implemented yet.\n"); }
\ No newline at end of file
diff --git a/src/operator/SoftmaxImpl.cpp b/src/operator/SoftmaxImpl.cpp
index 1ce9564a19e9dc4b992705d3604a820065b1f725..428d32fc7a4c1a2b639d4f78601c78ab41376b47 100644
--- a/src/operator/SoftmaxImpl.cpp
+++ b/src/operator/SoftmaxImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/Softmax.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/SoftmaxImpl.hpp"
 #include "aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp"
@@ -42,6 +43,6 @@ void Aidge::SoftmaxImpl_cpu::forward() {
     kernelFunc(batchSize,
                channelSize,
                featureSize,
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+               getCPUPtr(mOp.getRawInput(0)),
+               getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/SqrtImpl.cpp b/src/operator/SqrtImpl.cpp
index b9a01ff9146921b55dede775e1e3f61deed21a8e..2766e8ae21738775aadad86629a99d0a180e537e 100644
--- a/src/operator/SqrtImpl.cpp
+++ b/src/operator/SqrtImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/Sqrt.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/SqrtImpl.hpp"
 #include "aidge/backend/cpu/operator/SqrtImpl_forward_kernels.hpp"
@@ -36,6 +37,6 @@ void Aidge::SqrtImpl_cpu::forward() {
 
     // Call kernel
     kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+        getCPUPtr(mOp.getRawInput(0)),
+        getCPUPtr(mOp.getRawOutput(0)));
 }
\ No newline at end of file
diff --git a/src/operator/SubImpl.cpp b/src/operator/SubImpl.cpp
index 7d33d935f8aed4eef741ef81e316f387ad676abe..038a1154182ea8f359cf1b485c3de251ffbbaed5 100644
--- a/src/operator/SubImpl.cpp
+++ b/src/operator/SubImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/Sub.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/SubImpl.hpp"
 #include "aidge/backend/cpu/operator/SubImpl_forward_kernels.hpp"
@@ -37,7 +38,7 @@ void Aidge::SubImpl_cpu::forward() {
     // Call kernel
     kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
         std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->size(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->getImpl()->rawPtr(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+        getCPUPtr(mOp.getRawInput(0)),
+        getCPUPtr(mOp.getRawInput(1)),
+        getCPUPtr(mOp.getRawOutput(0)));
 }