diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index b93e0c044c492ac3b48607c6976871293537a0f0..d03ff86ae0a927ec0bf7054143030c490c4b0a80 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -44,8 +44,8 @@ public:
         mvDimensions(i_Dimensions)
     {
         assert(
-            mvDimensions.size() == mvFirstDataCoordinates.size()
-            && "Tensors origin coordinates and dimensions must have the same size");
+            detail::IsAreaValid(i_FirstDataCoordinates, i_Dimensions)
+            && "Tensors requested area is invalid");
         mScalarSize = detail::sizeOf(mDataType);
         computeLayout();
     };
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 31786cfe7afdc9ab3b5a3d36f196c16327be13fc..0967863c86c2c86c2953a5f7a8dd07d4a95dbc77 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -52,6 +52,38 @@ struct SImplDeleter
 };
 using ImplPtr_t = std::unique_ptr<TensorImpl, SImplDeleter>;
 } // namespace pimpl
+
+/// @brief Check if a valid area is included in another one.
+/// @param i_SubFirstDataLogicalCoordinates Logical coordinates of the first data of the
+/// area tested for inclusion (the point whose all coordinates are minimal).
+/// @param i_SubDimensions Dimensions of the area tested for inclusion.
+/// @param i_FirstDataLogicalCoordinates Logical coordinates of the first data of the
+/// possibly including area (the point whose all coordinates are minimal).
+/// @param i_Dimensions Dimensions of the possibly including area.
+/// @details Both input vector must have the same sizes.<br>
+/// This size must be within Tensor limits.<br>
+/// All values must be within Tensors limits.
+/// @return true if sub-area is included inside the other one, false otherwise.
+/// @sa Coord_t
+bool IsSubTensorIncluded(
+    std::vector<Coord_t> const &i_SubFirstDataLogicalCoordinates,
+    std::vector<DimSize_t> const &i_SubDimensions,
+    std::vector<Coord_t> const &i_FirstDataLogicalCoordinates,
+    std::vector<DimSize_t> const &i_Dimensions);
+
+/// @brief Check if a given area is valid.
+/// @param i_FirstDataLogicalCoordinates Logical coordinates of the first data of the area
+/// (the point whose all coordinates are minimal).
+/// @param i_Dimensions Dimensions of the area.
+/// @details Both input vector must have the same sizes.<br>
+/// This size must be within Tensor limits.<br>
+/// All values must be within Tensors limits.
+/// @return true for valid area, false otherwise.
+/// @sa Coord_t
+bool IsAreaValid(
+    std::vector<Coord_t> const &i_FirstDataLogicalCoordinates,
+    std::vector<DimSize_t> const &i_Dimensions);
+
 } // namespace detail
 
 /**
@@ -121,6 +153,23 @@ public:
      */
     Tensor(const Tensor &otherTensor);
 
+    /**
+     * @brief Construct a new Tensor object whose active area is a sub-area of a larger
+     * one one.
+     * @param otherTensor Tensor from which a sub-area is to be referenced or copied
+     * @param i_FirstDataLogicalCoordinates Logical coordinates of the first data of the
+     * area (the point whose all coordinates are minimal).
+     * @param i_Dimensions Dimensions of the area.
+     * @details So far, the new Tensor is sharing its data with the source one: modifying
+     * the new tensor will modify also data in the source tensor as they share the same
+     * storage.
+     * @warning The behaviour is undefined if source Tensor has no implementation
+     */
+    Tensor(
+        const Tensor &otherTensor,
+        std::vector<Coord_t> const &i_FirstDataLogicalCoordinates,
+        std::vector<DimSize_t> const &i_Dimensions);
+
     /**
      * @brief Construct a new Tensor object from the 1-dimension Array helper.
      * @tparam T datatype
@@ -396,6 +445,15 @@ public:
         return mDims;
     }
 
+    /// @brief Gets the logical coordinates of the first data stored of the Tensor active
+    /// area (the point whose all coordinates are minimal).
+    /// @sa Coords_t.
+    /// @returns Logical coordinates of first data in the active area (lexical order).
+    inline std::vector<Coord_t> const &getFirstDataCoordinates() const noexcept
+    {
+        return mvActiveAreaOrigin;
+    };
+
     /**
      * @brief Get the number of elements in the Tensor object.
      * @return constexpr NbElts_t
@@ -425,7 +483,7 @@ public:
     void resize(const std::array<DimSize_t, DIM> &dims)
     {
         static_assert(
-            DIM <= MaxDim,
+            DIM <= kMaxDim,
             "Too many tensor dimensions required by resize, not supported");
         mDims.assign(dims.begin(), dims.end());
         resize(mDims);
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 60247a1a301b2b15ac777325b752caa2f0c7fc5e..64c7f4faad8566b325663a3b602b6fc82417ebf6 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -221,7 +221,7 @@ inline std::shared_ptr<Node> AvgPooling(
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1))
 {
     static_assert(
-        DIM <= MaxDim,
+        DIM <= kMaxDim,
         "Too many kernel dimensions required by AvgPooling, not supported");
     return std::make_shared<Node>(
         std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(
@@ -238,7 +238,7 @@ inline std::shared_ptr<Node> AvgPooling(
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1))
 {
     static_assert(
-        DIM <= MaxDim,
+        DIM <= kMaxDim,
         "Too many kernel dimensions required by AvgPooling, not supported");
     return AvgPooling(to_array(kernel_dims), name, stride_dims);
 }
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index b0d200e482aeba246ec4072ebb2fb53d09446125..04fff71152f9f80d2a0ad96191bac28dcff350ee 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -220,7 +220,8 @@ inline std::shared_ptr<Node> BatchNorm(
     const std::string &name = "")
 {
     static_assert(
-        DIM <= MaxDim, "Too many kernel dimensions required by BatchNorm, not supported");
+        DIM <= kMaxDim,
+        "Too many kernel dimensions required by BatchNorm, not supported");
     auto batchNorm = std::make_shared<Node>(
         std::make_shared<BatchNorm_Op<static_cast<DimIdx_t>(DIM)>>(epsilon, momentum),
         name);
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 96f68f490c9738ee41a5fe0c6eeead855087e65b..8dc9268af0f230b3253291f435172917dc182ea7 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -260,7 +260,7 @@ inline std::shared_ptr<Node> Conv(
 {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(
-        DIM <= MaxDim, "Too many kernel dimensions required by Conv, not supported");
+        DIM <= kMaxDim, "Too many kernel dimensions required by Conv, not supported");
     auto conv = std::make_shared<Node>(
         std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(
             in_channels, out_channels, kernel_dims, stride_dims, dilation_dims),
@@ -283,7 +283,7 @@ inline std::shared_ptr<Node> Conv(
     const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t, DIM>(1))
 {
     static_assert(
-        DIM <= MaxDim, "Too many kernel dimensions required by Conv, not supported");
+        DIM <= kMaxDim, "Too many kernel dimensions required by Conv, not supported");
     return Conv(
         in_channels,
         out_channels,
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 265e36b6f0a72ed27decea30629f8ded0efc681d..e0a01b939427fee43e15389f4d961a0dfc4c1f0b 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -249,7 +249,7 @@ inline std::shared_ptr<Node> ConvDepthWise(
 {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(
-        DIM <= MaxDim,
+        DIM <= kMaxDim,
         "Too many kernel dimensions required by ConvDepthWise, not supported");
     auto convDW = std::make_shared<Node>(
         std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(
@@ -270,7 +270,7 @@ inline std::shared_ptr<Node> ConvDepthWise(
     const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t, DIM>(1))
 {
     static_assert(
-        DIM <= MaxDim,
+        DIM <= kMaxDim,
         "Too many kernel dimensions required by ConvDepthWise, not supported");
     return ConvDepthWise(to_array(kernel_dims), name, stride_dims, dilation_dims);
 }
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 7d2133f98c18a0af0b9948073195cde9287733f1..01df76eac059ee7d81d2f372f8391823c2795185 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -106,7 +106,7 @@ public:
         if (inputIdx == 0 && mInputs[0]->nbDims() == 1)
         {
             assert(
-                (mInputs[inputIdx]->size() <= MaxDimSize)
+                (mInputs[inputIdx]->size() <= kkMaxDimSize)
                 && "Too many elements in the tensor to vectorize it");
             mInputs[inputIdx]->resize(std::array<DimSize_t, 2>(
                 {1, static_cast<DimSize_t>(mInputs[inputIdx]->size())}));
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 0678dddac7136e8792c365ee0e542c6d507b084b..b8708b03e419bf98e6e07b61e83d7734a0634332 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -222,7 +222,7 @@ inline std::shared_ptr<Node> MaxPooling(
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1))
 {
     static_assert(
-        DIM <= MaxDim,
+        DIM <= kMaxDim,
         "Too many kernel dimensions required by MaxPooling, not supported");
     return std::make_shared<Node>(
         std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(
@@ -239,7 +239,7 @@ inline std::shared_ptr<Node> MaxPooling(
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1))
 {
     static_assert(
-        DIM <= MaxDim,
+        DIM <= kMaxDim,
         "Too many kernel dimensions required by MaxPooling, not supported");
     return MaxPooling(to_array(kernel_dims), name, stride_dims);
 }
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index cbebb16e1e24501b0ea371fb45211047f6e2b5e7..73240b541504225282a36ea9e96b9594281bcfe5 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -13,29 +13,46 @@
 #define AIDGE_CORE_OPERATOR_PAD_H_
 
 #include <array>
+#include <cmath>
 #include <numeric>
 #include <vector>
-#include <cmath>
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-enum class PadAttr { BeginEndBorders, BorderType, BorderValue };
-enum class PadBorderType { Constant, Edge, Reflect, Wrap };
+namespace Aidge
+{
+enum class PadAttr
+{
+    BeginEndBorders,
+    BorderType,
+    BorderValue
+};
+enum class PadBorderType
+{
+    Constant,
+    Edge,
+    Reflect,
+    Wrap
+};
 
-template <DimIdx_t DIM>
+template<DimIdx_t DIM>
 class Pad_Op : public Operator,
-                public Registrable<Pad_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
-                public StaticAttributes<PadAttr,
-                                       std::array<DimSize_t, 2*DIM>,
-                                       PadBorderType,
-                                       double> {
+               public Registrable<
+                   Pad_Op<DIM>,
+                   std::string,
+                   std::unique_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
+               public StaticAttributes<
+                   PadAttr,
+                   std::array<DimSize_t, 2 * DIM>,
+                   PadBorderType,
+                   double>
+{
 private:
     // FIXME: change accessibility
     std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
@@ -46,31 +63,33 @@ public:
 
     Pad_Op() = delete;
 
-    using Attributes_ = StaticAttributes<PadAttr,
-                                             std::array<DimSize_t, 2*DIM>,
-                                             PadBorderType,
-                                             double>;
-    template <PadAttr e>
-    using attr = typename Attributes_::template attr<e>;
-
-    constexpr Pad_Op(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
-                     const PadBorderType &borderType = PadBorderType::Constant,
-                     double borderValue = 0.0)
-        : Operator(Type),
-          Attributes_(attr<PadAttr::BeginEndBorders>(beginEndTuples),
-                           attr<PadAttr::BorderType>(borderType),
-                           attr<PadAttr::BorderValue>(borderValue)) {
+    using Attributes_ = StaticAttributes<
+        PadAttr,
+        std::array<DimSize_t, 2 * DIM>,
+        PadBorderType,
+        double>;
+    template<PadAttr e> using attr = typename Attributes_::template attr<e>;
+
+    constexpr Pad_Op(
+        const std::array<DimSize_t, 2 * DIM> &beginEndTuples,
+        const PadBorderType &borderType = PadBorderType::Constant,
+        double borderValue = 0.0) :
+        Operator(Type),
+        Attributes_(
+            attr<PadAttr::BeginEndBorders>(beginEndTuples),
+            attr<PadAttr::BorderType>(borderType),
+            attr<PadAttr::BorderValue>(borderValue))
+    {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but
+     * not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Pad_Op(const Pad_Op& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    Pad_Op(const Pad_Op &op) :
+        Operator(Type), Attributes_(op), mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
         setDatatype(op.mOutput->dataType());
@@ -80,26 +99,35 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Pad_Op
      */
-    std::shared_ptr<Operator> clone() const override {
+    std::shared_ptr<Operator> clone() const override
+    {
         return std::make_shared<Pad_Op<DIM>>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void
+    associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final
+    {
         assert(inputIdx < 1 && "operators supports only 3 inputs");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
+        (void)inputIdx; // avoid unused warning
+        assert(
+            strcmp(data->type(), Tensor::Type) == 0
+            && "input data must be of Tensor type");
 
         mInput = std::dynamic_pointer_cast<Tensor>(data);
     }
 
-    void computeOutputDims() override final {
-        if (!mInput->empty()) {
+    void computeOutputDims() override final
+    {
+        if (!mInput->empty())
+        {
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
-            for (std::size_t dim = 0; dim < DIM; ++dim) {
-                outputDims[dim+2] = this->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
-                                    + mInput->dims()[dim+2]
-                                    + this->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
+            for (std::size_t dim = 0; dim < DIM; ++dim)
+            {
+                outputDims[dim + 2]
+                    = this->template getAttr<PadAttr::BeginEndBorders>()[2 * dim]
+                      + mInput->dims()[dim + 2]
+                      + this->template getAttr<PadAttr::BeginEndBorders>()[2 * dim + 1];
             }
             outputDims[1] = mInput->dims()[1];
             outputDims[0] = mInput->dims()[0];
@@ -107,42 +135,51 @@ public:
         }
     }
 
-    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
-
+    bool outputDimsForwarded() const override final
+    {
+        return !(mOutput->empty());
+    }
 
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+    inline Tensor &input(const IOIndex_t inputIdx) const override final
+    {
         assert(inputIdx == 0 && "operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
+        (void)inputIdx; // avoid unused warning
         return *(mInput.get());
     }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
+    inline Tensor &output(const IOIndex_t /*outputIdx*/) const override final
+    {
+        return *(mOutput.get());
+    }
 
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final
+    {
         assert(inputIdx == 0 && "Pad Operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
+        (void)inputIdx; // avoid unused warning
         return mInput;
     }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+    inline std::shared_ptr<Tensor>
+    getOutput(const IOIndex_t outputIdx) const override final
+    {
         assert(outputIdx == 0 && "Pad Operators has only 1 outputs");
-        (void) outputIdx; // avoid unused warning
+        (void)outputIdx; // avoid unused warning
         return mOutput;
     }
 
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final
+    {
         assert(inputIdx == 0 && "operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
+        (void)inputIdx; // avoid unused warning
         return std::static_pointer_cast<Data>(mInput);
     }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final
+    {
         assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
+        (void)outputIdx; // avoid unused warning
         return std::static_pointer_cast<Data>(mOutput);
     }
 
-
-    void setBackend(const std::string &name) override {
+    void setBackend(const std::string &name) override
+    {
         mImpl = Registrar<Pad_Op<DIM>>::create(name)(*this);
         mOutput->setBackend(name);
 
@@ -150,52 +187,73 @@ public:
         mInput->setBackend(name);
     }
 
-    void setDatatype(const DataType &datatype) override {
+    void setDatatype(const DataType &datatype) override
+    {
         mOutput->setDatatype(datatype);
 
         // FIXME: temporary workaround
         mInput->setDatatype(datatype);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
-    static const std::vector<std::string> getInputsName(){
+    inline IOIndex_t nbInputs() const noexcept override final
+    {
+        return 1;
+    }
+    inline IOIndex_t nbDataInputs() const noexcept override final
+    {
+        return 1;
+    }
+    inline IOIndex_t nbOutputs() const noexcept override final
+    {
+        return 1;
+    }
+    static const std::vector<std::string> getInputsName()
+    {
         return {"data_input"};
     }
-    static const std::vector<std::string> getOutputsName(){
+    static const std::vector<std::string> getOutputsName()
+    {
         return {"data_output"};
     }
 };
 
-template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Pad(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
-                                           const std::string& name = "",
-                                           const PadBorderType &borderType = PadBorderType::Constant,
-                                           double borderValue = 0.0)
+template<std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node>
+Pad(const std::array<DimSize_t, 2 * DIM> &beginEndTuples,
+    const std::string &name = "",
+    const PadBorderType &borderType = PadBorderType::Constant,
+    double borderValue = 0.0)
 {
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
-    return std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
+    static_assert(
+        DIM <= kMaxDim, "Too many kernel dimensions required by Pad, not supported");
+    return std::make_shared<Node>(
+        std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(
+            beginEndTuples, borderType, borderValue),
+        name);
 }
 
-// helper with C-style array instead of std::array for beginEndTuples to allow automatic template DIM deduction
-template <DimSize_t DIM>
-inline std::shared_ptr<Node> Pad(
-    DimSize_t const (&beginEndTuples)[2*DIM],
-    const std::string& name = "",
+// helper with C-style array instead of std::array for beginEndTuples to allow automatic
+// template DIM deduction
+template<DimSize_t DIM>
+inline std::shared_ptr<Node>
+Pad(DimSize_t const (&beginEndTuples)[2 * DIM],
+    const std::string &name = "",
     const PadBorderType &borderType = PadBorderType::Constant,
     double borderValue = 0.0)
 {
     return Pad<DIM>(to_array(beginEndTuples), name, borderType, borderValue);
 }
-}  // namespace Aidge
-
-namespace {
-template <>
-const char *const EnumStrings<Aidge::PadAttr>::data[] = {"BeginEndBorders", "BorderType", "BorderValue"};
+} // namespace Aidge
 
-template <>
-const char *const EnumStrings<Aidge::PadBorderType>::data[] = {"Constant", "Edge", "Reflect", "Wrap"};
-}
+namespace
+{
+template<>
+const char *const EnumStrings<Aidge::PadAttr>::data[]
+    = {"BeginEndBorders", "BorderType", "BorderValue"};
+
+template<>
+const char *const EnumStrings<Aidge::PadBorderType>::data[]
+    = {"Constant", "Edge", "Reflect", "Wrap"};
+} // namespace
 
 #endif /* AIDGE_CORE_OPERATOR_PAD_H_ */
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 4c3bd9dd6f31c9b95f20662ae97227a25db14bd8..1b1ca824f32359e833da30cae7be9d93d9fc4829 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -189,7 +189,7 @@ inline std::shared_ptr<Node>
 Producer(const std::array<DimSize_t, DIM>& dims, const std::string& name = "")
 {
     static_assert(
-        DIM <= MaxDim, "Too many tensor dimensions required by Producer, not supported");
+        DIM <= kMaxDim, "Too many tensor dimensions required by Producer, not supported");
     return std::make_shared<Node>(std::make_shared<Producer_Op>(dims), name);
 }
 
@@ -217,7 +217,7 @@ void addProducer(
 {
     assert(inputIdx != gk_IODefaultIndex);
     static_assert(
-        DIM <= MaxDim,
+        DIM <= kMaxDim,
         "Too many tensor dimensions required by addProducer, not supported");
     const std::string prodName = (otherNode->name().empty()) ?
                                      "" :
diff --git a/include/aidge/utils/Types.h b/include/aidge/utils/Types.h
index 0be5b44a2cb44eb9f285bcc23092911ec29ebac8..20eccdda7c2fd66f496e4c9542fcb58a30f76045 100644
--- a/include/aidge/utils/Types.h
+++ b/include/aidge/utils/Types.h
@@ -62,17 +62,17 @@ using DimIdx_t = std::uint8_t;
 // Useful constants
 
 /// @brief Maximal number of elements used for scheduling
-constexpr NbElts_t MaxElts = std::numeric_limits<NbElts_t>::max();
+constexpr NbElts_t kMaxElts = std::numeric_limits<NbElts_t>::max();
 /// @brief Maximal image dimension (width and height).
-constexpr DimSize_t MaxDimSize = std::numeric_limits<DimSize_t>::max();
+constexpr DimSize_t kkMaxDimSize = std::numeric_limits<DimSize_t>::max();
 /// @brief Maximal number of tensor dimensions.
-constexpr DimIdx_t MaxDim = std::numeric_limits<DimIdx_t>::max();
+constexpr DimIdx_t kMaxDim = std::numeric_limits<DimIdx_t>::max();
 /// @brief Maximal data coordinates (when handling edge effects).
 /// @note Necessarily positive.
-constexpr Coord_t MaxCoord = std::numeric_limits<Coord_t>::max();
+constexpr Coord_t kMaxCoord = std::numeric_limits<Coord_t>::max();
 /// @brief Maximal data offset.
 /// @note Necessarily positive.
-constexpr Coord_t kMaxOffset = MaxCoord - MaxDimSize + 1;
+constexpr Coord_t kMaxOffset = kMaxCoord - kkMaxDimSize + 1;
 /// @brief Minimal data offset.
 /// @\note Necessarily negative.
 constexpr Coord_t kMinOffset = -kMaxOffset;
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index 68c6d739a506eb39853734678643857dd24fb81c..729a237f86a11f4ed33043ce33c7052f072277df 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -20,6 +20,59 @@ void detail::pimpl::DeleteImpl(TensorImpl *pImpl) noexcept
     delete pImpl;
 }
 
+/// @brief Check if a valid area is included in another one.
+bool detail::IsSubTensorIncluded(
+    std::vector<Coord_t> const &i_SubFirstDataLogicalCoordinates,
+    std::vector<DimSize_t> const &i_SubDimensions,
+    std::vector<Coord_t> const &i_FirstDataLogicalCoordinates,
+    std::vector<DimSize_t> const &i_Dimensions)
+{
+    for (std::size_t i = 0; i < i_Dimensions.size(); ++i)
+    {
+        if ((i_SubFirstDataLogicalCoordinates[i] < i_FirstDataLogicalCoordinates[i])
+            || ((i_SubFirstDataLogicalCoordinates[i] + i_SubDimensions[i])
+                > (i_FirstDataLogicalCoordinates[i] + i_Dimensions[i])))
+        {
+            return false;
+        }
+    }
+    return true;
+}
+
+/// @brief Check if a given area is valid
+bool detail::IsAreaValid(
+    std::vector<Coord_t> const &i_FirstDataLogicalCoordinates,
+    std::vector<DimSize_t> const &i_Dimensions)
+{
+    if (i_FirstDataLogicalCoordinates.size() != i_Dimensions.size())
+    {
+        return false;
+    }
+    if (i_Dimensions.size() > kMaxDim)
+    {
+        return false;
+    }
+    NbElts_t N = 1;
+    for (std::size_t i = 0; i < i_Dimensions.size(); ++i)
+    {
+        if ((i_FirstDataLogicalCoordinates[i] < kMinCoord)
+            || (i_FirstDataLogicalCoordinates[i] > kMaxCoord))
+        {
+            return false;
+        }
+        if (i_Dimensions[i] > kkMaxDimSize)
+        {
+            return false;
+        }
+        N = N * i_Dimensions[i];
+        if (N > kMaxElts)
+        {
+            return false;
+        }
+    }
+    return true;
+}
+
 /// @brief Assess data type, dimensions, backend and data are the same.
 bool Tensor::operator==(const Tensor &otherTensor) const noexcept
 {
@@ -158,6 +211,7 @@ std::shared_ptr<Tensor> Tensor::grad()
 Tensor::Tensor(const Tensor &otherTensor) :
     Data(Type),
     mDataType(otherTensor.mDataType),
+    mvActiveAreaOrigin(otherTensor.mvActiveAreaOrigin),
     mDims(otherTensor.mDims),
     mContext({}),
     mSize(otherTensor.mSize),
@@ -168,6 +222,29 @@ Tensor::Tensor(const Tensor &otherTensor) :
         mImpl = otherTensor.getImpl().Clone();
     }
 }
+/// @brief Construct a new Tensor object whose active area is a sub-area of a larger
+Tensor::Tensor(
+    const Tensor &otherTensor,
+    std::vector<Coord_t> const &i_FirstDataLogicalCoordinates,
+    std::vector<DimSize_t> const &i_Dimensions) :
+    Data(Type),
+    mDataType(otherTensor.mDataType),
+    mvActiveAreaOrigin(i_FirstDataLogicalCoordinates),
+    mDims(i_Dimensions),
+    mContext({})
+{
+    assert(otherTensor.hasImpl() && "Trying to make a view inside an incomplete Tensor");
+    assert(
+        detail::IsAreaValid(mvActiveAreaOrigin, mDims)
+        && "Requested active area is invalid");
+    assert(
+        detail::IsSubTensorIncluded(
+            mvActiveAreaOrigin, mDims, otherTensor.mvActiveAreaOrigin, otherTensor.mDims)
+        && "Requested active area is not available inside source Tensor");
+    computeSize();
+    // now sharing data
+    // mimpl = otherTensor.getImpl();
+}
 
 void Tensor::resize(const std::vector<DimSize_t> &dims)
 {