diff --git a/CMakeLists.txt b/CMakeLists.txt
index e078a7d89f1be6a7875415083c3bc29f1e9e84d9..beec9fbb427afadccef156139bd277d743e6999f 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -14,6 +14,9 @@ add_definitions(-DPROJECT_VERSION="${version}")
 message(STATUS "Project name: ${CMAKE_PROJECT_NAME}")
 message(STATUS "Project version: ${version}")
 
+# helper for LSP users
+set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
+
 # Note : project name is {project} and python module name is also {project}
 set(module_name _${CMAKE_PROJECT_NAME}) # target name
 set(pybind_module_name ${CMAKE_PROJECT_NAME}) # name of submodule for python bindings
@@ -26,6 +29,7 @@ option(TEST "Enable tests" ON)
 option(COVERAGE "Enable coverage" OFF)
 option(ENABLE_ASAN "Enable ASan (AddressSanitizer) for runtime analysis of memory use (over/underflow, memory leak, ...)" OFF)
 
+
 ##############################################
 # Import utils CMakeLists
 set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake")
diff --git a/include/aidge/data/Interpolation.hpp b/include/aidge/data/Interpolation.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..2d53ebdd0dd5141acc9a3bce8e906f42f7a557a2
--- /dev/null
+++ b/include/aidge/data/Interpolation.hpp
@@ -0,0 +1,151 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_UTILS_INTERPOLATION_H_
+#define AIDGE_CORE_UTILS_INTERPOLATION_H_
+
+#include <cstdint>  // std::int64_t
+#include <utility>  // std::pair
+#include <vector>
+
+#include "aidge/operator/Pad.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+/* @brief generic class to hold interpolation */
+class Interpolation {
+  public:
+    /**
+     * @brief simple type alias to describe a coordinates
+     * @note the indexes are deliberately chosen to be signed values as some
+     * points retrieved by interpolation are out of bound, hence their coords
+     * can be < 0
+     */
+    using Coords = std::vector<std::int64_t>;
+    /**
+     * @brief type alias to designate a point of any type : hence coordinates &
+     * associated value
+     */
+    template <class T> using Point = std::pair<Coords, T>;
+
+    /**
+     * @brief details how coordinates are transformed from interpolated tensor
+     * to original tensor
+     */
+    enum CoordinateTransformation {
+        HalfPixel,
+        HalfPixelSymmetric,
+        PytorchHalfPixel,
+        AlignCorners,
+        Asymmetric,
+    };
+
+    /**
+     * @brief apply transformation to coords in interpolated Tensor to find
+     * equivalent coordinates in original tensor reference frame.
+     * @warning it is assumed that all parameters have the same
+     * number of dimensions.
+     * @param[in] transformedCoords : coords in interpolated tensor
+     * @param[in] inputDims: input dimensions of tensor
+     * @param[in] inputDims: output dimensions of tensor
+     * @return std::vector containing coords in orginal tensor reference frame
+     */
+    static std::vector<float> untransformCoordinates(
+        const std::vector<DimSize_t> &transformedCoords,
+        const std::vector<DimSize_t> &inputDims,
+        const std::vector<DimSize_t> &outputDims,
+        const Interpolation::CoordinateTransformation coordTransfoMode);
+
+    /**
+     * @brief retrieves neighbouring value of a given index
+     * @param[in] tensorValues raw pointer of the tensor values
+     * retrieved with
+     * @code
+     * tensor->getImpl()->rawPtr()
+     * @endcode
+     * @param[in] tensorDimensions dimensions of given tensor
+     * retrieved with
+     * @code
+     * tensor->dims()
+     * @endcode
+     * @param[in] coords coordinates in the tensor of the values we want to
+     * find the neighbours of.
+     * @return static std::vector<std::pair<std::vector<DimSize_t>, T>>
+     * containing both indexes of neighbours & their values
+     */
+    template <typename T>
+    static std::set<Point<T>>
+    retrieveNeighbours(const T *tensorValues,
+                       const std::vector<DimSize_t> &tensorDims,
+                       const std::vector<float> &coords,
+                       const PadBorderType paddingMode = PadBorderType::Zero);
+
+    /* @brief interpolation type */
+    enum Mode {
+        Cubic,
+        Linear,
+        RoundPreferFloor,
+        RoundPreferCeil,
+        Floor,
+        Ceil
+    };
+
+    /*
+     * @brief Interpolates values given via input in given mode.
+     *
+     * @warning This function is empty and is meant to be overriden in derived
+     * class in backend libraries.
+     *
+     * Values are contiguously arranged in a "square" shape around the point to
+     * interpolate. Depending on interpolation mode.
+     * The point that will be interpolated is located right in the
+     * middle of all points.
+     * Immediate neighbours :
+     * 1D interp :     2D interp :
+     *                 . . . . . .
+     * . . 1 2 . .     . . . . . .
+     *                 . . 1 2 . .
+     *                 . . 3 4 . .
+     *                 . . . . . .
+     *                 . . . . . .
+     *
+     * 2 neighbours :
+     * 1D interp :         2D interp :
+     *                   .  .  .  .  .  .  . .
+     *                   .  .  .  .  .  .  . .
+     * . . 1 2 3 4 . .   .  .  1  2  3  4  . .
+     *                   .  .  5  6  7  8  . .
+     *                   .  .  9 10 11 12  . .
+     *                   .  . 13 14 15 16  . .
+     *                   .  .  .  .  .  .  . .
+     *                   .  .  .  .  .  .  . .
+     *
+     * @param[in] originalIndex: index of the point to in the original picture
+     * Since the coord are being transformed from the interpolatedTensor frame
+     * to originalTensor frame, the result might be in float.
+     * @param[in] points : points to interpolate, arranged in a vector of a
+     * pairs ((point_coord), value) :
+     * [[[X1, X2, ..., XN], Xval], ...., [[A1, A2, ..., AN],Aval]].
+     * With :
+     * - N: the number of dimensions.
+     * - A: the number of points of the grid to interpolate.
+     * - All coordinates expressed in originalTensor frame.
+     * @param[in] interpMode: interpolation mode
+     * @return interpolated value
+     */
+    template <typename T>
+    [[noreturn]] static T interpolate(const std::vector<float> &originalIndex,
+                                      const std::vector<Point<T>> &points,
+                                      const Mode interpMode);
+};
+} // namespace Aidge
+
+#endif
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 8d9f77bc41b0fa5225e7201d2e4d03eb2ff72502..5c84f52e052e67ca27bfc851f510e522d485e4b7 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -12,6 +12,7 @@
 #ifndef AIDGE_CORE_DATA_TENSOR_H_
 #define AIDGE_CORE_DATA_TENSOR_H_
 
+#include <algorithm>
 #include <cstddef>      // std::size_t
 #include <cstring>
 #include <functional>   // std::multiplies
@@ -24,10 +25,10 @@
 
 #include "aidge/backend/TensorImpl.hpp"
 #include "aidge/data/Data.hpp"
-
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
-#include "aidge/utils/ArrayHelpers.hpp"
 
 namespace Aidge {
 /**
@@ -562,9 +563,9 @@ public:
 
     template <typename expectedType>
     const expectedType& get(std::size_t idx) const {
-        AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "wrong data type");
-        AIDGE_ASSERT(mImpl->hostPtr() != nullptr, "get() can only be used for backends providing a valid host pointer");
-        AIDGE_ASSERT(idx < mSize, "idx out of range");
+        AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "Tensor::get<>({}): wrong data type, expected {}, got {}", idx, mDataType, NativeType<expectedType>::type);
+        AIDGE_ASSERT(mImpl->hostPtr() != nullptr, "Tensor::get<>({}): can only be used for backends providing a valid host pointer.", idx);
+        AIDGE_ASSERT(idx < mSize, "Tensor::get<>({}): idx {} out of range, tensor size {}", idx, mSize);
         return *reinterpret_cast<expectedType *>(mImpl->hostPtr(mImplOffset + idx));
     }
 
@@ -621,20 +622,41 @@ public:
      * @brief From the the 1D contiguous index, return the coordinate of an element in the tensor.
      * Beware: do not use this function with the storage index!
      *
-     * @param flatIdx 1D contiguous index of the value considering a flatten, contiguous, tensor.
+     * @param index 1D contiguous index of the value considering a flatten, contiguous, tensor.
      * @return std::vector<DimSize_t>
      */
-    std::vector<std::size_t> getCoord(std::size_t flatIdx) const {
-        std::vector<std::size_t> coordIdx(mDims.size());
-        std::size_t i = mDims.size();
+    static std::vector<std::size_t>
+    toCoord(const std::vector<Aidge::DimSize_t> &dimensions, std::size_t index);
+
 
-        while (i-- > 0) {
-            coordIdx[i] = (flatIdx % mDims[i]);
-            flatIdx/=mDims[i];
+    /**
+     * @brief From the the 1D contiguous index, return the coordinate of an element in the tensor.
+     * Beware: do not use this function with the storage index!
+     *
+     * @param flatIdx 1D contiguous index of the value considering a flatten, contiguous, tensor.
+     * @return std::vector<DimSize_t>
+     */
+    std::vector<std::size_t> getCoord(std::size_t index) const {
+        if (isInBounds(mDims, index)) {
+            return toCoord(mDims, index);
+        } else {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Out of bound coordinates.");
         }
-        return coordIdx;
     }
 
+    /**
+     * @brief From the coordinate returns the 1D contiguous index of an element in the tensor.
+     * If the number of coordinates is inferior to the number of dimensions,
+     * the remaining coordinates are assumed to be 0.
+     * Beware: the contiguous index will only correspond to the storage index
+     * if the tensor is contiguous!
+     * Note that the coordIdx may be an empty vector.
+     *
+     * @param coords Coordinate to an element in the tensor
+     * @return DimSize_t Contiguous index
+     */
+    static std::size_t toIndex(const std::vector<DimSize_t>& dimensions, const std::vector<std::size_t>& coords);
+
     /**
      * @brief From the coordinate returns the 1D contiguous index of an element in the tensor.
      * If the number of coordinates is inferior to the number of dimensions,
@@ -646,18 +668,27 @@ public:
      * @param coordIdx Coordinate to an element in the tensor
      * @return DimSize_t Contiguous index
      */
-    std::size_t getIdx(const std::vector<std::size_t>& coordIdx) const {
-        AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions");
-        std::size_t flatIdx = 0;
-        for(std::size_t i = 0; i < mDims.size(); ++i) {
-            auto coord = i < coordIdx.size() ? coordIdx[i]: 0;
-            AIDGE_ASSERT(coord < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor");
-            auto nextDimSize  = i + 1 < mDims.size() ? mDims[i + 1]: 1;
-            flatIdx = (flatIdx + coord) * nextDimSize;
+    std::size_t getIdx(const std::vector<std::size_t>& coords) const {
+        if (isInBounds<std::size_t>(mDims, coords)) {
+            return toIndex(mDims, coords);
+        } else {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Out of bound coordinates.");
         }
-        return flatIdx;
     }
 
+    /**
+     * @brief check if index is in bound of given tensor dimensions
+     * @warning this function is templated in order to welcome cases like interpolation where indexes are not integers.
+     * However, the only types accepted are floating, integer & size_t
+     * @param tensorDims : tensor dimensions
+     * @param coords : coords of the tensor you want to flattened index of
+     * @return true if all coords are in bound. False otherwise
+     */
+    template<typename T>
+    static bool isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<T>& coords);
+
+    static bool isInBounds(const std::vector<DimSize_t>& dimensions, const std::size_t index);
+
     /**
      * @brief From the coordinate returns the 1D storage index of an element in the tensor.
      * If the number of coordinates is inferior to the number of dimensions,
diff --git a/include/aidge/data/half_fmt.hpp b/include/aidge/data/half_fmt.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..5e2072038c10fffd8db5f7fe93381f002f2119b1
--- /dev/null
+++ b/include/aidge/data/half_fmt.hpp
@@ -0,0 +1,18 @@
+#include "aidge/data/half.hpp"
+#include <fmt/core.h>
+
+// Specialize fmt::formatter for half_float::half
+template <>
+struct fmt::formatter<half_float::half> : fmt::formatter<float> {
+    // Parses the format specifications and stores them in the base formatter
+    template <typename ParseContext>
+    constexpr auto parse(ParseContext& ctx) {
+        return fmt::formatter<float>::parse(ctx);
+    }
+
+    // Formats the half type by first converting it to float
+    template <typename FormatContext>
+    auto format(const half_float::half& value, FormatContext& ctx) const {
+        return fmt::formatter<float>::format(static_cast<float>(value), ctx);
+    }
+};
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index bc1852ec0759ffaafa015143f22b0a1c8f6c893e..181a4e88a0fe30e2a86c44adda2195d7e6f5293d 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -26,7 +26,15 @@
 
 namespace Aidge {
 enum class PadAttr { BeginEndBorders, BorderType, BorderValue };
-enum class PadBorderType { Constant, Edge, Reflect, Wrap };
+enum class PadBorderType {
+    /** @brief all out of bound values will be set to a given value.*/
+    Constant,
+    Edge,
+    Reflect,
+    Wrap,
+    /** @brief all out of bound values will be set to 0.*/
+    Zero,
+};
 
 template <DimIdx_t DIM>
 class Pad_Op : public OperatorTensor,
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index a48b95aff2a18750d83f12a62c408ad41b20afee..c3c7838efc16a0d091f5f0422442225cef8a0ab5 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -9,60 +9,226 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_CORE_OPERATOR_Resize_H_
-#define AIDGE_CORE_OPERATOR_Resize_H_
+#ifndef AIDGE_CORE_OPERATOR_RESIZE_H_
+#define AIDGE_CORE_OPERATOR_RESIZE_H_
 
 #include <memory>
 #include <string>
 #include <vector>
 
 #include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Interpolation.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Pad.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 
-class Resize_Op : public OperatorTensor,
-                  public Registrable<Resize_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Resize_Op&)>>{
+/* @brief attributes for the aidge operator */
+enum class ResizeAttr {
+    //   antialias,
+    // axes,
+    CoordinateTransformationMode,
+    CubicCoeffA,
+    // excludeOutside,
+    //   extrapolation_value,
+    //   keep_aspect_ratio_policy,
+    InterpolationMode,
+    PaddingMode
+};
 
-public:
+/**
+ * @brief Resize operator, will up/downscale a given tensor given the input.
+ * @verbatim
+ * Output size can be computed in 2 ways :
+ * 1. Image can be rescaled proportionally to the input size :
+ *    output_dimension = floor(input_dimension * (roi_end - roi_start) * scale)
+ * 2. Output dimensions are directly given via the size input(#4)
+ *
+ * Hence, either input Scale or Input Sizes can be defined, if both are
+ * connected, the operator will throw an error.
+ *
+ * Resize takes (up to) 4 different inputs :
+ * #1 Input to resize :
+ *   N-D tensor.
+ *
+ * #2 ROI (optional) :
+ *   1-D tensor of coordinates given as [start1, …, startN, end1, …, endN]
+ *   where N is the rank of X or the length of axes, if provided. The RoIs’
+ *   coordinates are normalized in the coordinate system of the input image.
+ *   If not set default ROI is the entier image.
+ * #3 scales (optional) - tensor(float):
+ *   The scale array along each dimension.
+ *    The number of elements of ‘scales’ should be the same as the rank of
+ * input ‘X’ or the length of ‘axes’, if provided. Accepted values: (0,inf)
+ *    - (0,1)   : downsampling
+ *    - 1       : identity
+ *    - (1,inf) : upsampling
+ * #4. Sizes - tensor(int64):
+ *   Target size of the output tensor.
+ *   Its interpretation depends on the ‘keep_aspect_ratio_policy’ value.
+ *   The number of elements of ‘sizes’ should be the same as either :
+ *   - The rank of input ‘X’
+ *   - The length of ‘axes’ attribute, if provided.
+ * @endverbatim
+ * @warning : Only one of ‘scales’ and ‘sizes’ can be specified.
+ * @param coordinate_transformation_mode
+ * @param cubic_coeff_a the a coefficient of cubic interpolation. Moost often
+ * it is set to -0.75
+ * @param InterpolationMode type of interpolation (currently only support cubic
+ * interpolation)
+ */
+class Resize_Op
+    : public OperatorTensor,
+      public Registrable<
+          Resize_Op,
+          std::string,
+          std::function<std::shared_ptr<OperatorImpl>(const Resize_Op &)>> {
+
+  private:
+    using Attributes_ =
+        StaticAttributes<ResizeAttr,
+                         Interpolation::CoordinateTransformation,
+                         float,
+                         Interpolation::Mode,
+                         PadBorderType>;
+    template <ResizeAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+  public:
     static const std::string Type;
-
-    Resize_Op();
+    /**
+     * @brief creates a resize operator
+     * This node can take 4 different inputs, more details in the class
+     * doxygen.
+     * 1. Input to resize :
+     * 2. ROI NOT SUPPORTED (optional) :
+     * 3. scales (optional) - tensor(float):
+     * 4. sizes - tensor(int64):
+     * @param[in] coordinate_transformation_mode
+     * @param[in] cubic_coeff_a the a coefficient of cubic interpolation. Only
+     * used if interpolation_mode = Interpolation::Mode::Cubic
+     * @param[in] interpolationMode : Type of interpolation used for
+     * up/downsampling
+     * @warning Scales & ROI input cannot be set simultaneously. If bot are
+     * set, forward will fail.
+     * @return NodePtr
+     */
+    Resize_Op(
+        Interpolation::CoordinateTransformation coordTransfoMode,
+        Interpolation::Mode interpol_mode = Interpolation::Mode::RoundPreferFloor,
+        float cubic_coef_a = -.75f,
+        PadBorderType paddingMode = PadBorderType::Edge)
+        : OperatorTensor(Type,
+                         {InputCategory::Data,
+                          InputCategory::OptionalData,
+                          InputCategory::OptionalData,
+                          InputCategory::OptionalData},
+                         1),
+          mAttributes(std::make_shared<Attributes_>(
+              attr<ResizeAttr::CubicCoeffA>(cubic_coef_a),
+              attr<ResizeAttr::CoordinateTransformationMode>(coordTransfoMode),
+              attr<ResizeAttr::InterpolationMode>(interpol_mode),
+              attr<ResizeAttr::PaddingMode>(paddingMode))) {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
-     * but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output
+     * tensor(s), but not its input tensors : The new operator has no input
+     * associated).
      * @param op Operator to copy.
      */
-    Resize_Op(const Resize_Op& op);
+    Resize_Op(const Resize_Op &op)
+        : OperatorTensor(op), mAttributes(op.mAttributes) {
+        if (!op.backend().empty()) {
+            SET_IMPL_MACRO(Resize_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Resize_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<Operator> clone() const override final {
+        return std::make_shared<Resize_Op>(*this);
+    }
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
+    void setBackend(const std::string &name,
+                    DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override {
+        return Registrar<Resize_Op>::getKeys();
+    }
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    inline Interpolation::CoordinateTransformation
+    coordinateTransformationMode() const {
+        return mAttributes
+            ->template getAttr<ResizeAttr::CoordinateTransformationMode>();
+    }
+    inline float cubicCoefA() const {
+        return mAttributes->template getAttr<ResizeAttr::CubicCoeffA>();
+    }
+    inline Interpolation::Mode interpolationMode() const {
+        return mAttributes->template getAttr<ResizeAttr::InterpolationMode>();
+    }
+    inline PadBorderType paddingMode() const {
+        return mAttributes->template getAttr<ResizeAttr::PaddingMode>();
+    }
 
-    static const std::vector<std::string> getInputsName(){
+    static const std::vector<std::string> getInputsName() {
         //  roi, scales, sizes, even if considered as const parameters/input
         return {"data_input", "roi ", "scales", "sizes"};
     }
-    static const std::vector<std::string> getOutputsName(){
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
 
-std::shared_ptr<Node> Resize(const std::string &name = "");
-
-}  // namespace Aidge
-
-
-#endif /* AIDGE_CORE_OPERATOR_Resize_H_ */
\ No newline at end of file
+/**
+ * @brief creates a node that contains a resize operator
+ * This node can take 4 different inputs, more details in the class doxygen.
+ * #0 Input to resize
+ * #1 ROI NOT SUPPORTED (optional) - Tensor(double|float|float16)
+ * #2 scales (optional) - tensor(float)
+ * #3 sizes - tensor(int64)
+ * @param[in] coordinate_transformation_mode
+ * @param[in] interpolationMode type of interpolation used in case of
+ * upsampling
+ * @param[in] cubic_coeff_a the "a" coefficient of cubic interpolation. Only
+ * used if interpolation_mode = Interpolation::Mode::Cubic
+ * @warning Scales & ROI input cannot be set simultaneously. If bot are set,
+ * forward will fail.
+ * @warning Padding mode will tell how values out of bound are treated.
+ * @return NodePtr
+ */
+std::shared_ptr<Node>
+Resize(std::vector<float> scale = std::vector<float>(),
+        std::vector<std::size_t> size = std::vector<std::size_t>(),
+       Interpolation::CoordinateTransformation coordTransfoMode =
+           Interpolation::CoordinateTransformation::HalfPixel,
+       Interpolation::Mode interpolMode =
+           Interpolation::Mode::RoundPreferFloor,
+       float cubicCoefA = -.75f,
+       const std::string &name = "");
+
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::ResizeAttr>::data[] = {
+    "coordinateTransformationMode",
+    "cubicCoeffA",
+    "InterpolationMode",
+    "PaddingMode"
+};
+}
+#endif /* AIDGE_CORE_OPERATOR_RESIZE_H_ */
diff --git a/include/aidge/utils/Log.hpp b/include/aidge/utils/Log.hpp
index cb9348dc24d1ac4c10b090e3676fabea2035a35b..794f14124436668cd9ab0895ff602b8d43ad5dcc 100644
--- a/include/aidge/utils/Log.hpp
+++ b/include/aidge/utils/Log.hpp
@@ -9,7 +9,6 @@
  *
  ********************************************************************************/
 
-
 #ifndef AIDGE_LOG_H_
 #define AIDGE_LOG_H_
 
@@ -19,44 +18,36 @@
 #include <fmt/format.h>
 #include <fmt/ranges.h>
 
+#include "aidge/data/half_fmt.hpp"
+
 #include "aidge/utils/Attributes.hpp"
 
 namespace Aidge {
 /**
  * Helper to define a context anywhere, hidding the scoped variable name
  * which has no relevance.
-*/
-#define AIDGE_LOG_CONTEXT(...) const Log::Context logContext_##__LINE__(__VA_ARGS__)
+ */
+#define AIDGE_LOG_CONTEXT(...)                                                \
+    const Log::Context logContext_##__LINE__(__VA_ARGS__)
 
-
-template<class U>
-static void discard_args(U parg) {
+template <class U> static void discard_args(U parg) {
     (void)parg;
 }
-template<class U, class... Us>
-static void discard_args(U parg, Us... pargs) {
+template <class U, class... Us> static void discard_args(U parg, Us... pargs) {
     (void)parg;
     discard_args(pargs...);
 }
 
 /**
  * Aidge logging class, for displaying and file logging of events.
-*/
+ */
 class Log {
-public:
-    enum Level {
-        Debug = 0,
-        Info,
-        Notice,
-        Warn,
-        Error,
-        Fatal
-    };
+  public:
+    enum Level { Debug = 0, Info, Notice, Warn, Error, Fatal };
 
     class Context {
-    public:
-        template <typename... Args>
-        Context(Args&&... args) {
+      public:
+        template <typename... Args> Context(Args &&...args) {
             Log::mContext.push_back(fmt::format(std::forward<Args>(args)...));
         }
 
@@ -68,13 +59,12 @@ public:
     /**
      * Detailed messages for debugging purposes, providing information helpful
      * for developers to trace and identify issues.
-     * Detailed insights of what is appening in an operation, not useful for the
-     * end-user. The operation is performed nominally.
+     * Detailed insights of what is appening in an operation, not useful for
+     * the end-user. The operation is performed nominally.
      * @note This level is disabled at compile time for Release, therefore
      * inducing no runtime overhead for Release.
-    */
-    template <typename... Args>
-    static void debug(Args&&... args) {
+     */
+    template <typename... Args> static void debug(Args &&...args) {
 #ifndef NDEBUG
         // only when compiled in Debug
         log(Debug, fmt::format(std::forward<Args>(args)...));
@@ -86,22 +76,19 @@ public:
     /**
      * Messages that provide a record of the normal operation, about
      * the application's state, progress, or important events.
-     * Reports normal start, end and key steps in an operation. The operation is
-     * performed nominally.
-    */
-    template <typename... Args>
-    static void info(Args&&... args) {
+     * Reports normal start, end and key steps in an operation. The operation
+     * is performed nominally.
+     */
+    template <typename... Args> static void info(Args &&...args) {
         log(Info, fmt::format(std::forward<Args>(args)...));
     }
 
     /**
-     * Applies to normal but significant conditions that may require monitoring,
-     * like unusual or normal fallback events.
-     * Reports specific paths in an operation. The operation can still be
-     * performed normally.
-    */
-    template <typename... Args>
-    static void notice(Args&&... args) {
+     * Applies to normal but significant conditions that may require
+     * monitoring, like unusual or normal fallback events. Reports specific
+     * paths in an operation. The operation can still be performed normally.
+     */
+    template <typename... Args> static void notice(Args &&...args) {
         log(Notice, fmt::format(std::forward<Args>(args)...));
     }
 
@@ -110,9 +97,8 @@ public:
      * not necessarily cause immediate problems.
      * Some specific steps of the operation could not be performed, but it can
      * still provide an exploitable result.
-    */
-    template <typename... Args>
-    static void warn(Args&&... args) {
+     */
+    template <typename... Args> static void warn(Args &&...args) {
         log(Warn, fmt::format(std::forward<Args>(args)...));
     }
 
@@ -121,26 +107,24 @@ public:
      * recover from, but attention is needed to prevent further issues.
      * The operation could not be performed, but it does not prevent potential
      * further operations.
-    */
-    template <typename... Args>
-    static void error(Args&&... args) {
+     */
+    template <typename... Args> static void error(Args &&...args) {
         log(Error, fmt::format(std::forward<Args>(args)...));
     }
 
     /**
-     * Represents a critical error or condition that leads to the termination of
-     * the application, indicating a severe and unrecoverable problem.
-     * The operation could not be performed and any further operation is
+     * Represents a critical error or condition that leads to the termination
+     * of the application, indicating a severe and unrecoverable problem. The
+     * operation could not be performed and any further operation is
      * impossible.
-    */
-    template <typename... Args>
-    static void fatal(Args&&... args) {
+     */
+    template <typename... Args> static void fatal(Args &&...args) {
         log(Fatal, fmt::format(std::forward<Args>(args)...));
     }
 
     /**
      * Set the minimum log level displayed in the console.
-    */
+     */
     static void setConsoleLevel(Level level) {
         mConsoleLevel = level;
     }
@@ -148,14 +132,14 @@ public:
     /**
      * Set or disable colors on console.
      * Initial value should be assumed true.
-    */
+     */
     static void setConsoleColor(bool enabled) {
         mConsoleColor = enabled;
     }
 
     /**
      * Set the minimum log level saved in the log file.
-    */
+     */
     constexpr static void setFileLevel(Level level) {
         mFileLevel = level;
     }
@@ -164,8 +148,8 @@ public:
      * Set the log file name.
      * Close the current log file and open the one with the new file name.
      * If empty, stop logging into a file.
-    */
-    static void setFileName(const std::string& fileName) {
+     */
+    static void setFileName(const std::string &fileName) {
         if (fileName != mFileName) {
             mFileName = fileName;
             mFile.release();
@@ -187,8 +171,8 @@ public:
      * warnings.
      */
     struct fcloseDeleter {
-        void operator()(FILE *f) const noexcept { 
-            std::fclose(f); 
+        void operator()(FILE *f) const noexcept {
+            std::fclose(f);
         }
     };
 
@@ -203,11 +187,12 @@ private:
     static std::unique_ptr<FILE, fcloseDeleter> mFile;
     static std::vector<std::string> mContext;
 };
-}
+} // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::Log::Level>::data[] = {"Debug", "Info", "Notice", "Warn", "Error", "Fatal"};
+const char *const EnumStrings<Aidge::Log::Level>::data[] =
+    {"Debug", "Info", "Notice", "Warn", "Error", "Fatal"};
 }
 
-#endif //AIDGE_LOG_H_
+#endif // AIDGE_LOG_H_
diff --git a/include/aidge/utils/TensorUtils.hpp b/include/aidge/utils/TensorUtils.hpp
index 1bfe0929bf67bb0c6d3b893f3dbaf6993dcfd6ff..88312280d572302ecce4157c34db0ba1efd52da9 100644
--- a/include/aidge/utils/TensorUtils.hpp
+++ b/include/aidge/utils/TensorUtils.hpp
@@ -49,6 +49,7 @@ bool approxEq(const Tensor& t1, const Tensor& t2, float relative = 1e-5f, float
     }
     return true;
 }
-}
+
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_UTILS_TENSOR_UTILS_H_s */
diff --git a/python_binding/data/pybind_Interpolation.cpp b/python_binding/data/pybind_Interpolation.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0839d1c04925f46595630191da9291217d40f10f
--- /dev/null
+++ b/python_binding/data/pybind_Interpolation.cpp
@@ -0,0 +1,42 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/data/Interpolation.hpp"
+#include "aidge/utils/Registrar.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Interpolation(py::module &m) {
+    auto pyInterpolation = py::class_<Aidge::Interpolation>(m, "Interpolation");
+
+    py::enum_<Interpolation::Mode>(pyInterpolation, "Mode")
+    .value("CUBIC", Interpolation::Mode::Cubic)
+    .value("LINEAR", Interpolation::Mode::Linear)
+    .value("ROUND_PREFER_FLOOR", Interpolation::Mode::RoundPreferFloor)
+    .value("ROUND_PREFER_CEIL", Interpolation::Mode::RoundPreferCeil)
+    .value("FLOOR", Interpolation::Mode::Floor)
+    .value("CEIL", Interpolation::Mode::Ceil)
+    .export_values();
+
+    py::enum_<Interpolation::CoordinateTransformation>(pyInterpolation, "CoordinateTransformation")
+    .value("HALF_PIXEL", Interpolation::CoordinateTransformation::HalfPixel)
+    .value("HALF_PIXEL_SYMETRIC", Interpolation::CoordinateTransformation::HalfPixelSymmetric)
+    .value("PYTORCH_HALF_PIXEL", Interpolation::CoordinateTransformation::PytorchHalfPixel)
+    .value("ALIGN_CORNERS", Interpolation::CoordinateTransformation::AlignCorners)
+    .value("ASYMMETRIC", Interpolation::CoordinateTransformation::Asymmetric)
+    .export_values();
+}
+
+} // namespace Aidge
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index b972c87dcda8f912ff40feef0001b95d5feac71e..0ac42f507b722d5006a36ea59816766d54164c8d 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -329,8 +329,8 @@ void init_Tensor(py::module& m){
     .def("capacity", &Tensor::capacity)
     .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize, py::arg("dims"), py::arg("strides") = std::vector<DimSize_t>())
     .def("has_impl", &Tensor::hasImpl)
-    .def("get_coord", &Tensor::getCoord)
-    .def("get_idx", &Tensor::getIdx)
+    .def("get_coord", (std::vector<std::size_t> (Tensor::*)(const std::size_t)) &Tensor::getCoord, py::arg("flatIdx"))
+    .def("get_idx",(std::size_t (Tensor::*)(const std::vector<std::size_t> &)) &Tensor::getIdx, py::arg("coords"))
     .def_static("get_available_backends", &Tensor::getAvailableBackends)
     .def("undefined", &Tensor::undefined)
     .def("cpy_transpose", (void (Tensor::*)(const Tensor& src, const std::vector<DimSize_t>& transpose)) &Tensor::copyTranspose, py::arg("src"), py::arg("transpose"))
diff --git a/python_binding/operator/pybind_Resize.cpp b/python_binding/operator/pybind_Resize.cpp
index 35321f525e486107af3715ce1c09f48b7c5cd60f..2aa62609835a7042dd0df54f28b453b7e33a3b5b 100644
--- a/python_binding/operator/pybind_Resize.cpp
+++ b/python_binding/operator/pybind_Resize.cpp
@@ -9,22 +9,51 @@
  *
  ********************************************************************************/
 
+#include <cstddef>  // std::size_t
+
 #include <pybind11/pybind11.h>
 
-#include "aidge/operator/Resize.hpp"
+#include "aidge/data/Interpolation.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/operator/Resize.hpp"
+#include "aidge/utils/Registrar.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
-void init_Resize(py::module& m) {
-    py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(m, "ResizeOp", py::multiple_inheritance())
-        .def_static("get_inputs_name", &Resize_Op::getInputsName)
-        .def_static("get_outputs_name", &Resize_Op::getOutputsName)
-        .def_readonly_static("Type", &Resize_Op::Type);
+void init_Resize(py::module &m) {
+  py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(
+          m, "ResizeOp", py::multiple_inheritance())
+          .def(py::init<Interpolation::CoordinateTransformation, Interpolation::Mode, float, PadBorderType>(), py::arg("coordinate_transformation_mode"), py::arg("interpolation_mode"), py::arg("cubic_coeff_a") = -0.75f, py::arg("padding_mode") = PadBorderType::Edge)
+          .def_static("get_inputs_name", &Resize_Op::getInputsName)
+          .def_static("get_outputs_name", &Resize_Op::getOutputsName)
+          .def_readonly_static("Type", &Resize_Op::Type);
 
-    declare_registrable<Resize_Op>(m, "ResizeOp");
+  declare_registrable<Resize_Op>(m, "ResizeOp");
 
-    m.def("Resize", &Resize, py::arg("name") = "");
+  m.def("Resize", &Resize,
+        py::arg("scale") = std::vector<float>({}),
+        py::arg("size") = std::vector<std::size_t>({}),
+        py::arg("coord_transfo_mode") =
+            Interpolation::CoordinateTransformation::HalfPixel,
+        py::arg("interpolation_mode") =
+            Interpolation::Mode::RoundPreferFloor,
+        py::arg("cubic_interpolation_coefficient_a") = -.75f,
+        py::arg("name") = "", R"mydelimiter(
+    Initialize a node containing a Resize operator.
+    This node can take 4 different inputs.
+    #0 Input to resize
+    #1 ROI NOT SUPPORTED (optional) - Tensor(double|float|float16)
+    #2 scales (optional) - tensor(float): #3 sizes - tensor(int64)
+    #3 sizes - tensor(int64)
+	:type coordinate_transformation_mode : :py:class: List[Int]
+	:param interpolationMode : Type of interpolation used in case of upsampling
+	:type interpolationMode : Interpolation::Mode
+	:param cubic_coeff_a : "A" coefficient of cubic interpolation. Only used if interpolation_mode = Interpolation::Mode::Cubic
+	:type cubic_coeff_a  : float
+    :param name : name of the node.
+    :type name : str
+    )mydelimiter");
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 4f7ffea5fefe299a2670fd7bcb816c86070bf315..006eeb289f25570ddf337f048b05816102624028 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -20,6 +20,7 @@ void init_Random(py::module&);
 void init_Data(py::module&);
 void init_Database(py::module&);
 void init_DataProvider(py::module&);
+void init_Interpolation(py::module&);
 void init_Tensor(py::module&);
 void init_TensorImpl(py::module&);
 void init_Attributes(py::module&);
@@ -107,6 +108,7 @@ void init_Aidge(py::module& m) {
     init_Data(m);
     init_Database(m);
     init_DataProvider(m);
+    init_Interpolation(m);
     init_Tensor(m);
     init_TensorImpl(m);
     init_Attributes(m);
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index dd5c5c110154427a8af7afbf70b2c76b61e507a8..1708d9e36c174527c648e37b63b080211aa6df05 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -95,6 +95,10 @@ Aidge::ImplSpec Aidge::OperatorImpl::getBestMatch(const ImplSpec& requiredSpecs)
     Log::debug("getBestMatch() for requirements: {}", requiredSpecs);
 
     const auto availableSpecsSet = getAvailableImplSpecs();
+    AIDGE_ASSERT(availableSpecsSet.size() > 0 , 
+                 "OperatorImpl::getBestMatch(): No available specs found by"
+                 "getAvailableSpecs(). "
+                 "Cannot find best implementation for required specs, aborting.");
     const std::vector<ImplSpec> availableSpecs(availableSpecsSet.begin(), availableSpecsSet.end());
     std::vector<int> matchingSpecs(availableSpecs.size(), -1);
 
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index 3dcdcc65d0ef40b0443eb5b9662111420ce4fb86..e6f6cd799b48991556b8c99006ab94583459117c 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -14,6 +14,7 @@
 #include <cstddef>
 #include <vector>
 
+#include "aidge/data/half.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/operator/Abs.hpp"
@@ -23,14 +24,14 @@
 #include "aidge/operator/ReduceMean.hpp"
 #include "aidge/operator/Sub.hpp"
 #include "aidge/operator/Sqrt.hpp"
-#include "aidge/operator/Transpose.hpp"
 #include "aidge/utils/Types.h"
 
+namespace Aidge {
 
-Aidge::Tensor::~Tensor() noexcept = default;
+Tensor::~Tensor() noexcept = default;
 
 
-Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const {
+Tensor Tensor::operator+(const Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
     AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
     AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
@@ -47,7 +48,7 @@ Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const {
 }
 
 
-Aidge::Tensor Aidge::Tensor::operator-(const Aidge::Tensor& other) const {
+Tensor Tensor::operator-(const Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
     AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
     AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
@@ -64,7 +65,7 @@ Aidge::Tensor Aidge::Tensor::operator-(const Aidge::Tensor& other) const {
 }
 
 
-Aidge::Tensor Aidge::Tensor::operator*(const Aidge::Tensor& other) const {
+Tensor Tensor::operator*(const Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
     AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
     AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
@@ -81,7 +82,7 @@ Aidge::Tensor Aidge::Tensor::operator*(const Aidge::Tensor& other) const {
 }
 
 
-Aidge::Tensor Aidge::Tensor::operator/(const Aidge::Tensor& other) const {
+Tensor Tensor::operator/(const Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
     AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
     AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
@@ -97,7 +98,7 @@ Aidge::Tensor Aidge::Tensor::operator/(const Aidge::Tensor& other) const {
     return div_.getOutput(0)->clone();
 }
 
-Aidge::Tensor Aidge::Tensor::sqrt() const {
+Tensor Tensor::sqrt() const {
     AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
     auto sqrt_ = Sqrt_Op();
     sqrt_.associateInput(0, std::make_shared<Tensor>(*this));
@@ -108,7 +109,7 @@ Aidge::Tensor Aidge::Tensor::sqrt() const {
     return sqrt_.getOutput(0)->clone();
 }
 
-Aidge::Tensor Aidge::Tensor::abs() const {
+Tensor Tensor::abs() const {
     AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
     auto abs_ = Abs_Op();
     abs_.associateInput(0, std::make_shared<Tensor>(*this));
@@ -119,7 +120,7 @@ Aidge::Tensor Aidge::Tensor::abs() const {
     return abs_.getOutput(0)->clone();
 }
 
-Aidge::Tensor Aidge::Tensor::mean() const {
+Tensor Tensor::mean() const {
     AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
     // TODO: should be the default behavior of ReduceMean_Op
     // No need to specify the list of all axes!
@@ -134,7 +135,7 @@ Aidge::Tensor Aidge::Tensor::mean() const {
     return mean_.getOutput(0)->clone();
 }
 
-Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
+Tensor& Tensor::operator=(const Tensor& other) {
     if (this == &other) {
         return *this;
     }
@@ -154,7 +155,7 @@ Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
 }
 
 
-void Aidge::Tensor::setBackend(const std::string &name, Aidge::DeviceIdx_t device, bool copyFrom) {
+void Tensor::setBackend(const std::string &name, DeviceIdx_t device, bool copyFrom) {
     if (mImpl) {
         if (mImpl->device() != std::make_pair(name, device)) {
             // Backend change: create new impl, copy from old to new and replace
@@ -171,8 +172,8 @@ void Aidge::Tensor::setBackend(const std::string &name, Aidge::DeviceIdx_t devic
     }
     }
 
-void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims,
-                           std::vector<Aidge::DimSize_t> strides) {
+void Tensor::resize(const std::vector<DimSize_t>& dims,
+                           std::vector<DimSize_t> strides) {
     if (dims.empty()) {  // scalar
         mDims = std::vector<DimSize_t>(0);
         mStrides = std::vector<DimSize_t>({1});
@@ -234,7 +235,7 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims,
     }
 }
 
-std::string Aidge::Tensor::toString() const {
+std::string Tensor::toString() const {
 
     if (!hasImpl() || undefined()) {
         // Return no value on no implementation or undefined size
@@ -343,7 +344,7 @@ std::string Aidge::Tensor::toString() const {
     return res;
 }
 
-Aidge::Tensor Aidge::Tensor::extract(
+Tensor Tensor::extract(
     const std::vector<std::size_t>& fixedCoord) const {
     AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
     AIDGE_ASSERT(fixedCoord.size() <= mDims.size(),
@@ -359,7 +360,7 @@ Aidge::Tensor Aidge::Tensor::extract(
     return subTensor;
 }
 
-Aidge::Tensor Aidge::Tensor::extract(
+Tensor Tensor::extract(
     const std::vector<std::size_t>& startCoord,
     const std::vector<std::size_t>& dims) const {
     AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
@@ -373,7 +374,7 @@ Aidge::Tensor Aidge::Tensor::extract(
     return subTensor;
 }
 
-void Aidge::Tensor::makeContiguous() {
+void Tensor::makeContiguous() {
     if (!mImpl || isContiguous()) {
         return;
     }
@@ -411,7 +412,7 @@ void Aidge::Tensor::makeContiguous() {
     resize(mDims);
 }
 
-void Aidge::Tensor::copyCast(const Tensor& src) {
+void Tensor::copyCast(const Tensor& src) {
     if (&src == this) {
         return;
     }
@@ -432,7 +433,7 @@ void Aidge::Tensor::copyCast(const Tensor& src) {
                         src.size(), mImplOffset);
 }
 
-void Aidge::Tensor::copyFrom(const Tensor& src) {
+void Tensor::copyFrom(const Tensor& src) {
     if (&src == this) {
         return;
     }
@@ -453,7 +454,7 @@ void Aidge::Tensor::copyFrom(const Tensor& src) {
                         mImplOffset);
 }
 
-void Aidge::Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t>& transpose) {
+void Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t>& transpose) {
     std::vector<DimSize_t> newDims;
     for (std::size_t i = 0; i < src.dims().size(); ++i) {
         newDims.push_back(src.dims()[transpose[i]]);
@@ -495,11 +496,11 @@ void Aidge::Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t
     setImpl(newImpl);
 }
 
-void Aidge::Tensor::copyTranspose(const Tensor& src, const DataFormatTranspose& transpose) {
+void Tensor::copyTranspose(const Tensor& src, const DataFormatTranspose& transpose) {
     copyTranspose(src, std::vector<DimSize_t>(transpose.begin(), transpose.end()));
 }
 
-void Aidge::Tensor::copyCastFrom(const Tensor& src,
+void Tensor::copyCastFrom(const Tensor& src,
                                  std::shared_ptr<Tensor>& movedSrcPtr) {
     if (&src == this) {
         return;
@@ -532,13 +533,13 @@ void Aidge::Tensor::copyCastFrom(const Tensor& src,
     }
 }
 
-Aidge::Tensor& Aidge::Tensor::refContiguous(std::shared_ptr<Tensor>& fallback) {
+Tensor& Tensor::refContiguous(std::shared_ptr<Tensor>& fallback) {
     // Scott Meyers' solution to avoid code duplication
     return const_cast<Tensor&>(
         static_cast<const Tensor&>(*this).refContiguous(fallback));
 }
 
-const Aidge::Tensor& Aidge::Tensor::refContiguous(
+const Tensor& Tensor::refContiguous(
     std::shared_ptr<Tensor>& fallback) const {
     AIDGE_ASSERT(getImpl(),
                  "no backend was set for tensor, cannot refCast() it");
@@ -557,15 +558,15 @@ const Aidge::Tensor& Aidge::Tensor::refContiguous(
     }
 }
 
-Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback,
-                                      const Aidge::DataType& dt) {
+Tensor& Tensor::refCast(std::shared_ptr<Tensor>& fallback,
+                                      const DataType& dt) {
     // Scott Meyers' solution to avoid code duplication
     return const_cast<Tensor&>(
         static_cast<const Tensor&>(*this).refCast(fallback, dt));
 }
 
-const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback,
-                                            const Aidge::DataType& dt) const {
+const Tensor& Tensor::refCast(std::shared_ptr<Tensor>& fallback,
+                                            const DataType& dt) const {
     AIDGE_ASSERT(getImpl(),
                  "no backend was set for tensor, cannot refCast() it");
 
@@ -598,7 +599,7 @@ const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback,
     }
 }
 
-Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
+Tensor& Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
                                       const std::string& backend,
                                       DeviceIdx_t device) {
     // Scott Meyers' solution to avoid code duplication
@@ -606,7 +607,7 @@ Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
         static_cast<const Tensor&>(*this).refFrom(fallback, backend, device));
 }
 
-const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
+const Tensor& Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
                                             const std::string& backend,
                                             DeviceIdx_t device) const {
     AIDGE_ASSERT(getImpl(),
@@ -639,8 +640,8 @@ const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
     }
 }
 
-Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback,
-                                  const Aidge::DataType& dt,
+Tensor& Tensor::ref(std::shared_ptr<Tensor>& fallback,
+                                  const DataType& dt,
                                   const std::string& backend,
                                   DeviceIdx_t device) {
     // Scott Meyers' solution to avoid code duplication
@@ -648,8 +649,8 @@ Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback,
         static_cast<const Tensor&>(*this).ref(fallback, dt, backend, device));
 }
 
-const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback,
-                                        const Aidge::DataType& dt,
+const Tensor& Tensor::ref(std::shared_ptr<Tensor>& fallback,
+                                        const DataType& dt,
                                         const std::string& backend,
                                         DeviceIdx_t device) const {
     AIDGE_ASSERT(getImpl(), "no backend was set for tensor, cannot ref() it");
@@ -673,9 +674,64 @@ const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback,
     }
 }
 
-std::set<std::string> Aidge::Tensor::getAvailableBackends() {
+
+std::vector<std::size_t>
+Tensor::toCoord(const std::vector<DimSize_t>& dimensions, std::size_t index) {
+    std::vector<std::size_t> coord(dimensions.size());
+    std::size_t i = dimensions.size();
+
+    while (i-- > 0) {
+        coord[i] = (index % dimensions[i]);
+        index /= dimensions[i];
+    }
+    return coord;
+}
+
+
+std::size_t Tensor::toIndex(const std::vector<DimSize_t> &dimensions, const std::vector<std::size_t>& coords) {
+    AIDGE_ASSERT(coords.size() == dimensions.size(), "Tensor::getIdx(): Coordinates does not match number of dimensions.\n\tCoords : {}\n\tDimensions: {}",coords, dimensions);
+    std::size_t index = 0;
+    std::size_t dimensions_s = 1; // stride
+    std::size_t i = dimensions.size();
+    while (i-- > 0) {
+        index += coords[i] * dimensions_s;
+        dimensions_s *= dimensions[i];
+    }
+    return index;
+}
+
+template<typename T>
+bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<T>& coords){
+    AIDGE_ASSERT(coords.size() == dimensions.size(),
+                 "Coordinates({}) to compare have not "
+                 "the same number of dimension as tensor dimensions({}), aborting.",
+                 coords,
+                 dimensions);
+    bool isInBound {true};
+    for(std::size_t i = 0 ; i < coords.size() && isInBound; ++i ){
+        isInBound = coords[i] >= 0 && coords[i] < static_cast<T>(dimensions[i]) ;
+    }
+    return isInBound;
+}
+
+
+bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::size_t index){
+    return index < std::accumulate(dimensions.cbegin(), dimensions.cend(), std::size_t(1), std::multiplies<std::size_t>());
+}
+
+template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<std::int16_t>& coords);
+template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<std::int32_t>& coords);
+template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<std::int64_t>& coords);
+template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<std::size_t>& coords);
+template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<float>& coords);
+template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<double>& coords);
+
+
+std::set<std::string> Tensor::getAvailableBackends() {
     std::set<std::string> backendsList;
-    for (const auto& tupleKey : Registrar<Tensor>::getKeys())
+    for (const auto& tupleKey : Registrar<Tensor>::getKeys()) {
         backendsList.insert(std::get<0>(tupleKey));
+    }
     return backendsList;
 }
+}  // namespace Aidge
diff --git a/src/data/interpolation.cpp b/src/data/interpolation.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ce5431a2b3bd742f98a169666811bd5d373a5c24
--- /dev/null
+++ b/src/data/interpolation.cpp
@@ -0,0 +1,237 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/data/Interpolation.hpp"
+
+#include <algorithm>  // std::clamp
+#include <bitset>
+#include <cmath>      // std::ceil, std::floor
+#include <cstddef>    // std::size_t
+#include <cstdint>    // std::int64_t
+#include <stdexcept>  // std::runtime_error
+#include <utility>    // std::make_pair, std::set
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/half.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Log.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+template <typename T>
+[[noreturn]] T
+Interpolation::interpolate(const std::vector<float> & /*originalIndex*/,
+                           const std::vector<Point<T>> & /*points*/,
+                           const Mode /*interpMode*/) {
+    AIDGE_THROW_OR_ABORT(
+        std::runtime_error,
+        "interpolate() is backend dependendant and should be"
+        "called from derived classes: Interpolation<Backend>::interpolate(...)"
+        "Meaning that for CPU backend, InterpolationCPU::interpolate() should "
+        "be called.");
+}
+
+std::vector<float> Interpolation::untransformCoordinates(
+    const std::vector<DimSize_t> &transformedCoords,
+    const std::vector<DimSize_t> &inputDims,
+    const std::vector<DimSize_t> &outputDims,
+    const Interpolation::CoordinateTransformation coordTransfoMode) {
+    AIDGE_ASSERT(
+        inputDims.size() == outputDims.size(),
+        "Interpolate::untransformCoordinates: input and output coordinates "
+        "dimension number mismatch, they should be equal."
+        "Got inputDims({}) and outputDims ({}).",
+        inputDims,
+        outputDims);
+    AIDGE_ASSERT(
+        transformedCoords.size() == outputDims.size(),
+        "Interpolate::untransformCoordinates: coordinates dimension mismatch, "
+        "transformed coords number should be equal to output dimension number."
+        "Got coords to transform ({}) and outputDims ({})",
+        transformedCoords,
+        outputDims);
+    std::vector<float> originalCoords(transformedCoords.size());
+
+    for (DimIdx_t i = 0; i < transformedCoords.size(); ++i) {
+        float scale = static_cast<float>(outputDims[i]) /
+                      static_cast<float>(inputDims[i]);
+
+        switch (coordTransfoMode) {
+        case CoordinateTransformation::AlignCorners:
+            AIDGE_THROW_OR_ABORT(
+                std::runtime_error,
+                "Interpolation::untransformCoords: Unsupported Coordinate "
+                "transform : AlignCorners");
+            break;
+        case CoordinateTransformation::Asymmetric:
+            originalCoords[i] = transformedCoords[i] / scale;
+            break;
+        case CoordinateTransformation::HalfPixel:
+            originalCoords[i] = (transformedCoords[i] + 0.5) / scale - 0.5;
+            break;
+        case CoordinateTransformation::HalfPixelSymmetric:
+            AIDGE_THROW_OR_ABORT(
+                std::runtime_error,
+                "Interpolation::untransformCoords: Unsupported Coordinate "
+                "transform : HalfPixelSymmetric");
+            break;
+        case Interpolation::CoordinateTransformation::PytorchHalfPixel:
+
+            AIDGE_THROW_OR_ABORT(
+                std::runtime_error,
+                "Interpolation::untransformCoords: Unsupported Coordinate "
+                "transform : PytorchHalfPixel");
+            break;
+        }
+    }
+    return originalCoords;
+}
+
+/**
+ * @details Generates a list of all neighbours of a given coordinate.
+ * Since the coordinates are floating points as they are the result of
+ * Interpolation::untransformCoords, they are approximation of coordinates in
+ * originalTensor frame from coordinates in interpolatedTensor frame.
+ *
+ * So to retrieve the neghbouring values, we must apply either floor() or
+ * ceil() to each coordinate.
+ *
+ * In order to generate the list of all combinations
+ * available, we simply iterate through the bits of each values from 0 to
+ * tensorDims.
+ * @example : in 2 dimensions , we  have the point (1.3, 3.4)
+ * we iterate up to 2^2 - 1 and
+ * 0 = 0b00 -> (floor(x), floor(y)) = (1,3)
+ * 1 = 0b01 -> (floor(x), ceil(y))  = (1,4)
+ * 2 = 0b10 -> (ceil(x) , floor(y)) = (2,3)
+ * 3 = 0b11 -> (ceil(x) , ceil(y))  = (2,4)
+ */
+template <typename T>
+std::set<Interpolation::Point<T>>
+Interpolation::retrieveNeighbours(const T *tensorValues,
+                                  const std::vector<DimSize_t> &tensorDims,
+                                  const std::vector<float> &coords,
+                                  const PadBorderType paddingMode) {
+
+    Log::debug("retrieveNeighbours: TensorDims : {}", tensorDims);
+    Log::debug("retrieveNeighbours: coords to interpolate : {}", coords);
+
+    // Will retrieve out of bound values depending on given padding mode.
+    // auto retrieveOutOfBoundValue =
+    //     [&tensorValues, &tensorDims, &paddingMode](Coords coord) -> T {
+    //     std::vector<DimSize_t> rectifiedCoord;
+    //     rectifiedCoord.reserve(coord.size());
+    //     switch (paddingMode) {
+    //     case Aidge::PadBorderType::Edge: {
+    //         for (DimSize_t i = 0; i < coord.size(); ++i) {
+    //             rectifiedCoord[i] = coord[i] < 0 ? 0 : tensorDims[i] - 1;
+    //         }
+    //         return tensorValues[Tensor::getIdx(tensorDims, rectifiedCoord)];
+    //     }
+    //     case Aidge::PadBorderType::Zero: {
+    //         return static_cast<T>(0);
+    //     }
+    //     default: {
+    //         AIDGE_THROW_OR_ABORT(
+    //             std::runtime_error,
+    //             "Unsupported padding mode as of now for interpolation.");
+    //     }
+    //     }
+    // };
+
+    std::set<Point<T>> neighbours;
+    const std::size_t nbNeighbours = std::size_t(1) << tensorDims.size();
+    Coords neighbourCoords(tensorDims.size());
+
+    for (std::size_t i = 0; i < nbNeighbours; ++i) {
+        const std::bitset<MaxDim> bits = std::bitset<MaxDim>{i};
+        for (size_t j = 0; j < tensorDims.size(); ++j) {
+            neighbourCoords[j] =
+                bits[j] == 0 ? std::ceil(coords[j]) : std::floor(coords[j]);
+        }
+
+        T value;
+        if (Tensor::isInBounds(tensorDims, neighbourCoords)) {
+            // cast from unsigned to signed won't create problem as we ensured
+            // that all neighboursCoords values are > 0 with isInBounds
+            value = tensorValues[Tensor::toIndex(
+                tensorDims,
+                std::vector<DimSize_t>(neighbourCoords.begin(),
+                                       neighbourCoords.end()))];
+        } else {
+            switch (paddingMode) {
+                case PadBorderType::Edge:
+                    for (DimSize_t j = 0; j < tensorDims.size(); ++j) {
+                        neighbourCoords[j] = (neighbourCoords[j] < 0) ? 0 :
+                                                ((neighbourCoords[j] >= static_cast<std::int64_t>(tensorDims[j])) ? (tensorDims[j] - 1) :
+                                                    neighbourCoords[j]);
+                    }
+                    value = tensorValues[Tensor::toIndex(
+                                tensorDims,
+                                std::vector<DimSize_t>(neighbourCoords.begin(),
+                                                    neighbourCoords.end()))];
+                    break;
+                case PadBorderType::Zero:
+                    value = static_cast<T>(0);
+                    break;
+                default:
+                    AIDGE_THROW_OR_ABORT(
+                        std::runtime_error,
+                        "Unsupported padding mode as of now for interpolation.");
+            }
+        }
+        neighbours.insert(std::make_pair(neighbourCoords, value));
+    }
+    Log::debug("Interpolation::retrieveNeighbours(): neighbourCoords: {}",
+               neighbours);
+    return neighbours;
+}
+
+template std::set<Interpolation::Point<int16_t>>
+Interpolation::retrieveNeighbours(const int16_t *tensorValues,
+                                  const std::vector<DimSize_t> &tensorDims,
+                                  const std::vector<float> &coords,
+                                  const PadBorderType paddingMode);
+
+template std::set<Interpolation::Point<int32_t>>
+Interpolation::retrieveNeighbours(const int32_t *tensorValues,
+                                  const std::vector<DimSize_t> &tensorDims,
+                                  const std::vector<float> &coords,
+                                  const PadBorderType paddingMode);
+
+template std::set<Interpolation::Point<int64_t>>
+Interpolation::retrieveNeighbours(const int64_t *tensorValues,
+                                  const std::vector<DimSize_t> &tensorDims,
+                                  const std::vector<float> &coords,
+                                  const PadBorderType paddingMode);
+
+template std::set<Interpolation::Point<half_float::half>>
+Interpolation::retrieveNeighbours(const half_float::half *tensorValues,
+                                  const std::vector<DimSize_t> &tensorDims,
+                                  const std::vector<float> &coords,
+                                  const PadBorderType paddingMode);
+
+template std::set<Interpolation::Point<float>>
+Interpolation::retrieveNeighbours(const float *tensorValues,
+                                  const std::vector<DimSize_t> &tensorDims,
+                                  const std::vector<float> &coords,
+                                  const PadBorderType paddingMode);
+
+template std::set<Interpolation::Point<double>>
+Interpolation::retrieveNeighbours(const double *tensorValues,
+                                  const std::vector<DimSize_t> &tensorDims,
+                                  const std::vector<float> &coords,
+                                  const PadBorderType paddingMode);
+
+} // namespace Aidge
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index ff6fb9ce4b6b8596477dfdd1f43f8927e534459b..586dbc2037d36d26f39dd06404b3b70b99270c1e 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -45,7 +45,8 @@ Aidge::OperatorTensor::OperatorTensor(const OperatorTensor& other)
 
 void Aidge::OperatorTensor::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
     AIDGE_ASSERT(inputIdx < nbInputs(), "{} Operator has {} inputs", type(), nbInputs());
-    AIDGE_ASSERT(data->type() == Tensor::Type, "Input data must be of Tensor type");
+    AIDGE_ASSERT(data != nullptr, "Undefined data argument, make sure that the associated tensor holds data before associating the input.")
+    AIDGE_ASSERT(data->type() == Tensor::Type, "OperatorTensor::associateInput(): Input data must be of Tensor type, got {}", data->type());
     mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
 }
 
diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp
index 9e5762452e382a31c1e5da25708507653da2e474..252f55a6abdea13cc43cf21d3e8c7ab33ddbb86e 100644
--- a/src/operator/Resize.cpp
+++ b/src/operator/Resize.cpp
@@ -11,56 +11,31 @@
 
 #include "aidge/operator/Resize.hpp"
 
-#include <cstddef>    // std::size_t
-#include <cstdint>    // std::int64_t
-#include <stdexcept>  // std::runtime_error
+#include <algorithm>
+#include <cstddef>   // std::size_t
+#include <cstdint>   // std::int64_t
+#include <fmt/core.h>
+#include <stdexcept> // std::runtime_error
 #include <string>
 #include <vector>
-#include <fmt/core.h>
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Interpolation.hpp"
 #include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Resize_Op::Type = "Resize";
-
-Aidge::Resize_Op::Resize_Op()
-    : OperatorTensor(Type,
-        {InputCategory::Data,
-            InputCategory::OptionalData,
-            InputCategory::OptionalData,
-            InputCategory::OptionalData},
-        1) {}
-
-/**
- * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
- * but not its input tensors (the new operator has no input associated).
- * @param op Operator to copy.
- */
-
-Aidge::Resize_Op::Resize_Op(const Aidge::Resize_Op& op)
-    : OperatorTensor(op)
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Resize_Op, *this, op.backend());
-    }
-    else {
-        mImpl = nullptr;
-    }
-}
+namespace Aidge {
 
-std::shared_ptr<Aidge::Operator> Aidge::Resize_Op::clone() const {
-    return std::make_shared<Resize_Op>(*this);
-}
+const std::string Resize_Op::Type = "Resize";
 
-bool Aidge::Resize_Op::dimsForwarded() const {
+bool Resize_Op::dimsForwarded() const {
     // in case of ROI add getInput(1) condition
-    if ((getInput(1) && !getInput(1)->undefined())
-        || (getInput(2) && !getInput(2)->undefined())
-        || (getInput(3) && !getInput(3)->undefined())
-        )
-    {
+    if ((getInput(1) && !getInput(1)->undefined()) ||
+        (getInput(2) && !getInput(2)->undefined()) ||
+        (getInput(3) && !getInput(3)->undefined())) {
         // output dims are data dependent
         return false;
     }
@@ -68,93 +43,137 @@ bool Aidge::Resize_Op::dimsForwarded() const {
     return OperatorTensor::dimsForwarded();
 }
 
-bool Aidge::Resize_Op::forwardDims(bool allowDataDependency) {
-    if (inputsAssociated()) {
-        AIDGE_ASSERT(getInput(0)->nbDims() == 4,
-            "input tensor must have dimensions = 4 (batch, channel, height, width).");
-
-        const bool input1ROIPresent           = getInput(1) && !getInput(1)->undefined();
-        const bool input2ScalesPresent        = getInput(2) && !getInput(2)->undefined();
-        const bool input3SizesPresent         = getInput(3) && !getInput(3)->undefined();
+bool Resize_Op::forwardDims(bool allowDataDependency) {
+    if (!allowDataDependency) {
+        Log::warn("{}: cannot execute forwardDims() as the output "
+                    "dimensions are computed from some input data.",
+                    type());
+        return false;
+    }
 
-        AIDGE_ASSERT(input2ScalesPresent != input3SizesPresent, "Only one of scales and  sizes can be specified.")
+    // Some optional input may be linked but undefined because of ONNX import
+    if (!inputsAssociated(false)) {
+        return false;
+    }
 
-        if (input1ROIPresent) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Input #1 (ROI) is given and it is not supported.");
+    /** @brief input #0 */
+    constexpr IOIndex_t inDataIdx = 0;
+    /** @brief input #1 */
+    constexpr IOIndex_t inROIIdx = 1;
+    /** @brief input #2 */
+    constexpr IOIndex_t inScalesIdx = 2;
+    /** @brief input #3 */
+    constexpr IOIndex_t inSizesIdx = 3;
+
+    std::vector<DimSize_t> outDims = getInput(inDataIdx)->dims();
+    /////////////////////////////////////////////////////
+    // Ensuring operator is connected properly
+    const bool inputROIPresent =
+        getInput(inROIIdx) && !getInput(inROIIdx)->undefined();
+    if (inputROIPresent) {
+            AIDGE_THROW_OR_ABORT(
+                std::runtime_error,
+                "{}: input ROI(#{}) is present but it is not supported.",
+                type(),
+                inROIIdx);
         }
-        else if (input2ScalesPresent)  {
-            if (!allowDataDependency) {
-                Log::warn("Resize_Op: cannot execute forwardDims() as the output dimensions depend on the input #2");
-                return false;
-            }
-
-            AIDGE_ASSERT(getInput(0)->nbDims() ==  getInput(2)->size(),
-                "input #0 and input #2 (Scales) must have the same dimensions.");
-
-            std::vector<DimSize_t>      outDims = getInput(0)->dims();
-            const std::vector<DimSize_t> inDims = getInput(0)->dims();
 
-            std::shared_ptr<Tensor> fallback;
-            const auto& scales = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
-
-            for (std::size_t dim=0; dim < getInput(2)->size(); ++dim) {
-                outDims[dim] = inDims[dim]*static_cast<int64_t*>(scales.getImpl()->hostPtr())[dim];
-            }
-
-            mOutputs[0]->resize(outDims);
-            return true;
+    const bool inputScalesPresent =
+        getInput(inScalesIdx) && !getInput(inScalesIdx)->undefined();
+    const bool inputSizesPresent =
+        getInput(inSizesIdx) && !getInput(inSizesIdx)->undefined();
+
+    AIDGE_ASSERT(inputScalesPresent ^ inputSizesPresent,
+                 "{}: Only one of the two inputs must be defined between input "
+                 "Scales(#2) "
+                 "and Sizes(#3). They cannot be specified at the same time.",
+                 type())
+
+    std::shared_ptr<Tensor> resizeParam = inputScalesPresent ? getInput(inScalesIdx) : getInput(inSizesIdx);
+    AIDGE_ASSERT(getInput(inDataIdx)->nbDims() == resizeParam->size(),
+        "{}: data input #0 and resizing parameter input #{} must have the "
+        "same dimensions.",
+        type(), inputScalesPresent ? inScalesIdx :inSizesIdx);
+
+
+    ////////////////////////////////////////////
+    // Case resize is done using Scales formula
+    if (inputScalesPresent) {
+
+        std::shared_ptr<Tensor> fallback;
+        const auto &scales =
+            resizeParam
+                ->refCastFrom(fallback,
+                              DataType::Float32,
+                              resizeParam->backend());
+
+        const std::vector<DimSize_t> inDims = getInput(inDataIdx)->dims();
+        for (std::size_t dim = 0; dim < getInput(inScalesIdx)->size(); ++dim) {
+            const auto scaleAlongDim = scales.get<cpptype_t<DataType::Float32>>(dim);
+            AIDGE_ASSERT(scaleAlongDim > 0,
+                         "{}: all scales values must be sctricly positive, "
+                         "got {}.",
+                         type(),
+                         scaleAlongDim);
+            outDims[dim] =
+                static_cast<DimSize_t>(inDims[dim] * scaleAlongDim);
         }
-        else if (input3SizesPresent) {
-            if (!allowDataDependency) {
-                Log::warn("Resize_Op: cannot execute forwardDims() as the output dimensions depend on the input #3");
-                return false;
-            }
-
-            AIDGE_ASSERT(getInput(0)->nbDims() ==  getInput(3)->size(),
-                "input #0 and input #3 (Sizes) must have the same dimensions.");
-
-            std::vector<DimSize_t> outDims = getInput(0)->dims();
 
-            std::shared_ptr<Tensor> fallback;
-            const auto& sizes = getInput(3)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
-
-            for (std::size_t dim=0; dim < getInput(3)->size(); ++dim) {
-                outDims[dim] = static_cast<int64_t*>(sizes.getImpl()->hostPtr())[dim];
-            }
-
-            mOutputs[0]->resize(outDims);
-            return true;
-        }
-        else {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Error: Either Input #2 or Input #3 must be present.");
+        ///////////////////////////////////////////////////////////////
+        // case where resize output dims are given via the Size input
+    } else {
+        std::shared_ptr<Tensor> fallback;
+        const auto &sizes = resizeParam
+                                ->refCastFrom(fallback,
+                                              NativeType<DimSize_t>::type,
+                                              resizeParam->backend());
+
+        for (std::size_t dim = 0; dim < getInput(inSizesIdx)->size(); ++dim) {
+            outDims[dim] = sizes.get<DimSize_t>(dim);
         }
     }
-
-    return false;
+    mOutputs[0]->resize(outDims);
+    return true;
 }
 
-void Aidge::Resize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+void Resize_Op::setBackend(const std::string &name, DeviceIdx_t device) {
     SET_IMPL_MACRO(Resize_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 
-    // By default, automatically set backend for all inputs: roi, scales and sizes
-    if(getInput(1)) {
+    // By default, automatically set backend for all optional inputs: roi, scales and
+    // sizes
+    if (getInput(1)) {
         getInput(1)->setBackend(name, device);
     }
-    if(getInput(2)) {
+    if (getInput(2)) {
         getInput(2)->setBackend(name, device);
     }
-    if(getInput(3)) {
+    if (getInput(3)) {
         getInput(3)->setBackend(name, device);
     }
 }
 
-std::set<std::string> Aidge::Resize_Op::getAvailableBackends() const {
-    return Registrar<Resize_Op>::getKeys();
-}
-
-/////////////////////////////////////////////
+std::shared_ptr<Node>
+Resize(std::vector<float> scale,
+        std::vector<std::size_t> size,
+       Interpolation::CoordinateTransformation coordTransfoMode,
+       Interpolation::Mode interpolMode,
+       float cubicCoefA,
+       const std::string &name) {
+    std::shared_ptr<Node> node_resize = std::make_shared<Node>(std::make_shared<Resize_Op>(coordTransfoMode,
+                                                              interpolMode,
+                                                              cubicCoefA),
+                                  name);
+    if (scale.size()) {
+        std::shared_ptr<Node> prod_scale = Producer(std::make_shared<Tensor>(Vector<float>(scale)));
+        prod_scale->addChild(node_resize, 0, 2);
+    }
+    if (size.size())
+    {
+        std::shared_ptr<Node> prod_size = Producer(std::make_shared<Tensor>(Vector<std::size_t>(size)));
+        prod_size->addChild(node_resize, 0, 3);
+    }
+    return node_resize;
 
-std::shared_ptr<Aidge::Node> Aidge::Resize(const std::string &name) {
-    return std::make_shared<Node>(std::make_shared<Resize_Op>(), name);
-}
\ No newline at end of file
+}
+} // namespace Aidge
diff --git a/unit_tests/data/Test_Interpolation.cpp b/unit_tests/data/Test_Interpolation.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..73a48125a7f6e300cebe483dfc1cf025fdfc7707
--- /dev/null
+++ b/unit_tests/data/Test_Interpolation.cpp
@@ -0,0 +1,76 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstdlib>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Interpolation.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/filler/Filler.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+TEST_CASE("[core/data] Interpolation", "[Interpolation][Data]") {
+    Log::setConsoleLevel(Log::Debug);
+
+    auto tensor = std::make_shared<Tensor>(std::vector<DimSize_t>({10, 10}));
+    tensor->setDataType(DataType::Float32);
+    tensor->setBackend("cpu");
+    Aidge::constantFiller(tensor, 1337.F);
+
+    SECTION("retrieveNeighbours") {
+        std::set<Interpolation::Point<float>> neighbours;
+        std::set<Interpolation::Point<float>> expectedResult;
+
+        std::vector<float> coords;
+        SECTION("Out of bounds") {
+            coords = {-0.5, -0.5};
+            expectedResult = {{{-1, -1}, 0.f},
+                              {{0, -1}, 0.F},
+                              {{-1, 0}, 0.F},
+                              {{0, 0}, 1337.F}};
+            neighbours = Interpolation::retrieveNeighbours<float>(
+                reinterpret_cast<float *>(tensor->getImpl()->rawPtr()),
+                tensor->dims(),
+                coords,
+                PadBorderType::Zero);
+
+            CHECK(neighbours == expectedResult);
+        }
+        SECTION("Some coords are rounds hence duplicates are filtered out") {
+            tensor = std::make_shared<Tensor>(
+                std::vector<DimSize_t>({5, 10, 10, 10}));
+            tensor->setDataType(DataType::Float32);
+            tensor->setBackend("cpu");
+            Aidge::constantFiller(tensor, 1337.F);
+
+            expectedResult = {{{0, 0, -1, -1}, 0.F},
+                              {{0, 0, 0, -1}, 0.F},
+                              {{0, 0, -1, 0}, 0.F},
+                              {{0, 0, 0, 0}, 1337.F}};
+
+            neighbours = Interpolation::retrieveNeighbours(
+                reinterpret_cast<float *>(tensor->getImpl()->rawPtr()),
+                tensor->dims(),
+                std::vector<float>({0, 0, -0.25, -0.25}));
+            CHECK(expectedResult == neighbours);
+        }
+    }
+}
+} // namespace Aidge
diff --git a/unit_tests/data/Test_Tensor.cpp b/unit_tests/data/Test_Tensor.cpp
index 4462eb91ed6c6cdfce77b47b6a1a8808eec88423..58003bb4009a484ca63acffdb50fbda156a48787 100644
--- a/unit_tests/data/Test_Tensor.cpp
+++ b/unit_tests/data/Test_Tensor.cpp
@@ -9,9 +9,9 @@
  *
  ********************************************************************************/
 
-#include <array>
 #include <cstddef>     // std::size_t
 #include <cstdint>     // std::uint8_t, std::uint16_t, std::int32_t
+#include <cstdlib>
 #include <numeric>     // std::accumulate, std::inner_product
 #include <functional>  // std::multiplies
 #include <random>      // std::mt19937,
@@ -340,32 +340,63 @@ TEST_CASE("[core/data] Tensor(other)", "[Tensor][extract][zeros][print]") {
                 }
 
                 // Test get() and set() by coords
-                // We create coords of rank 0 to the number of dimensions
-                for (std::size_t coord_size = 0; coord_size < dims.size(); ++coord_size) {
-                    std::vector<std::size_t> coords(coord_size);
-                    for (std::size_t coord_idx = 0; coord_idx < coord_size; ++coord_idx) {
-                        std::size_t dim_idx = (dimsDist(gen)-1) % dims[coord_idx];
-                        coords[coord_idx] = dim_idx;
-                    }
-                    std::size_t flat_idx, flat_storage_idx;
-                    // As it is continuous we have getIdx() == getStorageIdx()
-                    REQUIRE_NOTHROW(flat_idx = x.getIdx(coords));
-                    REQUIRE_NOTHROW(flat_storage_idx = x.getStorageIdx(coords));
-                    REQUIRE(flat_storage_idx == flat_idx);
-                    float val, val_flat;
-                    // Test get() by index and by coords
-                    REQUIRE_NOTHROW(val_flat = x.get<float>(flat_idx));
-                    REQUIRE_NOTHROW(val = x.get<float>(coords));
-                    REQUIRE(val == val_flat);
-                    REQUIRE(val == values[flat_idx]);
-                    // Test set() by coords, also update the reference array
-                    REQUIRE_NOTHROW(x.set(coords, val + 1));
-                    values[flat_idx] += 1;
+                // We create coords of the number of dimensions
+                std::vector<std::size_t> coords(nb_dims);
+                for (std::size_t coord_idx = 0; coord_idx < nb_dims; ++coord_idx) {
+                    std::size_t dim_idx = (dimsDist(gen)-1) % dims[coord_idx];
+                    coords[coord_idx] = dim_idx;
                 }
+                std::size_t flat_idx, flat_storage_idx;
+                // As it is continuous we have getIdx() == getStorageIdx()
+                REQUIRE_NOTHROW(flat_idx = x.getIdx(coords));
+                REQUIRE_NOTHROW(flat_storage_idx = x.getStorageIdx(coords));
+                REQUIRE(flat_storage_idx == flat_idx);
+                float val, val_flat;
+                // Test get() by index and by coords
+                REQUIRE_NOTHROW(val_flat = x.get<float>(flat_idx));
+                REQUIRE_NOTHROW(val = x.get<float>(coords));
+                REQUIRE(val == val_flat);
+                REQUIRE(val == values[flat_idx]);
+                // Test set() by coords, also update the reference array
+                REQUIRE_NOTHROW(x.set(coords, val + 1));
+                values[flat_idx] += 1;
             }
         }
     }
-
+    SECTION("Index & coord manipulation"){
+            Tensor tensor;
+            std::vector<DimSize_t> dims {2,2};
+            int nbVal = std::accumulate(dims.begin(),
+                                        dims.end(),
+                                        1,
+                                        std::multiplies<DimSize_t>());
+            float* values = static_cast<float*>(malloc(nbVal * sizeof(float)));
+            values[0] = 0;
+            values[1] = 1;
+            values[2] = 2;
+            values[3] = 3;
+            tensor.setDataType(DataType::Int32);
+            tensor.setBackend("cpu");
+            tensor.resize(dims);
+            tensor.getImpl()->setRawPtr(values, 4);
+            std::vector<std::size_t> coords;
+        SECTION("getIdx"){
+            CHECK(Tensor::toIndex(tensor.dims(), std::vector<std::size_t>({1,1}) ) == 3);
+            CHECK(Tensor::toIndex(tensor.dims(), std::vector<std::size_t>({1,0}) ) == 2);
+            // No check to ensure if value is in bounds
+            CHECK_THROWS(tensor.getIdx(std::vector<std::size_t>({0,2})));
+        }
+        SECTION("getCoord"){
+            CHECK(Tensor::toCoord(tensor.dims(),  3 ) ==std::vector<std::size_t>({1,1}));
+            CHECK(Tensor::toCoord(tensor.dims(),  2 ) ==std::vector<std::size_t>({1,0}));
+        }
+        SECTION("isInBound"){
+            CHECK_THROWS(Tensor::isInBounds(dims, std::vector<DimSize_t>({1,2,4,5})) == true);
+            CHECK(Tensor::isInBounds(dims, std::vector<DimSize_t>({1,2})) == false);
+            CHECK(Tensor::isInBounds(dims, std::vector<int>({-1,1})) == false);
+            CHECK(Tensor::isInBounds(dims, std::vector<DimSize_t>({1,1})) == true);
+        }
+    }
     SECTION("Tensor extract") {
         bool equal;
 
diff --git a/unit_tests/operator/Test_Resize_Op.cpp b/unit_tests/operator/Test_Resize_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..111e8fb4f62040127f8b5da8125ba9d91c546f23
--- /dev/null
+++ b/unit_tests/operator/Test_Resize_Op.cpp
@@ -0,0 +1,180 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Resize.hpp"
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef> // std::size_t
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Log.hpp"
+
+namespace Aidge {
+
+
+TEST_CASE("[core/operator] Resize_Op(forwardDims)",
+          "[Resize][forwardDimsScales]") {
+  std::vector<Aidge::DimSize_t> input_dims;
+  std::vector<float> scales;
+  std::vector<std::size_t> sizes;
+  std::vector<Aidge::DimSize_t> expected_dims;
+
+  SECTION("Un-connected input leads to failure.") {
+    input_dims = std::vector<Aidge::DimSize_t>({1, 1, 2, 2});
+    std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+    auto resize_node = Resize();
+    auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+    op->associateInput(0, input_data);
+
+    REQUIRE_THROWS(op->forwardDims(true));
+  }
+
+  SECTION("Connecting both Scales & Sizes leads to failure") {
+    input_dims = std::vector<Aidge::DimSize_t>({1, 1, 2, 2});
+    std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+    scales = std::vector<float>({.5, 3.0f, 2.0f, 2.0f});
+    sizes = std::vector<std::size_t>({1, 3, 4, 4});
+    expected_dims = std::vector<Aidge::DimSize_t>({2, 3, 4, 4});
+
+    auto resize_node = Resize(scales, sizes);
+    auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+    op->associateInput(0, input_data);
+
+    REQUIRE_THROWS(op->forwardDims(true));
+  }
+
+  SECTION("Input Scales") {
+    SECTION("TEST 1") {
+      input_dims = std::vector<Aidge::DimSize_t>({1, 1, 2, 2});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+      scales = std::vector<float>({1, 1, 2, 2});
+      sizes = std::vector<std::size_t>({});
+      expected_dims = std::vector<Aidge::DimSize_t>({1, 1, 4, 4});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+
+    SECTION("TEST 2") {
+      input_dims = std::vector<Aidge::DimSize_t>({4, 4, 10, 10});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+      scales = std::vector<float>({1, 1, 2, 3});
+      sizes = std::vector<std::size_t>({});
+      expected_dims = std::vector<Aidge::DimSize_t>({4, 4, 20, 30});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+    SECTION("TEST 3") {
+      input_dims = std::vector<Aidge::DimSize_t>({4, 2, 10, 10});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+      scales = std::vector<float>({1, 1, 0.5, 0.5});
+      sizes = std::vector<std::size_t>({});
+      expected_dims = std::vector<Aidge::DimSize_t>({4, 2, 5, 5});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+    SECTION("TEST 4") {
+      input_dims = std::vector<Aidge::DimSize_t>({11, 11, 4, 4});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+
+      scales = std::vector<float>({1, 1, 0.3, 0.3});
+      sizes = std::vector<std::size_t>({});
+      expected_dims = std::vector<Aidge::DimSize_t>({11, 11, 1, 1});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+  }
+
+  SECTION("Input Sizes") {
+    SECTION("TEST 1") {
+      input_dims = std::vector<Aidge::DimSize_t>({1, 1, 2, 2});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+      scales = std::vector<float>({});
+      sizes = std::vector<std::size_t>({4, 5, 8, 8});
+      expected_dims = std::vector<Aidge::DimSize_t>({4, 5, 8, 8});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+    SECTION("TEST 2") {
+      input_dims = std::vector<Aidge::DimSize_t>({60, 60, 30, 30});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+      scales = std::vector<float>({});
+      sizes = std::vector<std::size_t>({1, 1, 75, 75});
+      expected_dims = std::vector<Aidge::DimSize_t>({1, 1, 75, 75});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+    SECTION("TEST 3") {
+      input_dims = std::vector<Aidge::DimSize_t>({11, 11, 20, 20});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+      scales = std::vector<float>({});
+      sizes = std::vector<std::size_t>({19, 6, 8, 8});
+      expected_dims = std::vector<Aidge::DimSize_t>({19, 6, 8, 8});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+    SECTION("TEST 4") {
+      input_dims = std::vector<Aidge::DimSize_t>({43, 211, 22, 22});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+      scales = std::vector<float>({});
+      sizes = std::vector<std::size_t>({1, 1, 10, 10});
+      expected_dims = std::vector<Aidge::DimSize_t>({1, 1, 10, 10});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+  }
+}
+
+} // namespace Aidge