diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp
index af1c4e05f8d28f4ca3ef341aaa6359c4552dcbfd..89bf9965d6fbdb697a9933d5f81f4794ca1ff57c 100644
--- a/src/operator/Resize.cpp
+++ b/src/operator/Resize.cpp
@@ -37,19 +37,19 @@ bool Aidge::Resize_Op::forwardDims(bool /*allowDataDependency*/) {
 
     // if ((!getInput(0)->empty()) && !getInput(2)->empty() && this->template getAttr<ResizeAttr::NoROI>() && this->template getAttr<ResizeAttr::NoSizes>())  {
     if (this->template getAttr<ResizeAttr::NoROI>() && this->template getAttr<ResizeAttr::NoSizes>())  {
+
+        AIDGE_ASSERT(getInput(0)->nbDims() ==  getInput(2)->size(),\
+             "input tensor and Scales must have the same dimentions.");
     
         std::vector<DimSize_t> outDims = getInput(0)->dims();
         const std::vector<DimSize_t> inDims = getInput(0)->dims();
 
-        // TODO: cast according to tensor type
-        float* scales = static_cast<float*>(getInput(2)->getImpl()->rawPtr()); 
-        // get values of scales
+        std::shared_ptr<Tensor> fallback;
+        const auto& scales = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
 
         // TODO: check if enusure different dims of sizes/scales
         for (std::size_t dim=0; dim < getInput(2)->size(); ++dim) {
-            // TODO: verify if batch and depth is not 1 !/ check if onnx operator impacts scakes 
-            // logic if 1, 2, 2 scales and input tensor 3, 4, 4, !!!
-            outDims[dim] = inDims[dim]*scales[dim];
+            outDims[dim] = inDims[dim]*static_cast<int64_t*>(scales.getImpl()->hostPtr())[dim];
         }
         mOutputs[0]->resize(outDims);
         return true;
@@ -68,10 +68,14 @@ bool Aidge::Resize_Op::forwardDims(bool /*allowDataDependency*/) {
         
         std::vector<DimSize_t> outDims = getInput(0)->dims();
 
-        // tmp
-        const std::vector<DimSize_t> inDims = getInput(0)->dims();        
-        //TODO cast according to tensor type
-        float* sizes = static_cast<float*>(getInput(3)->getImpl()->rawPtr()); 
+        // // tmp
+        // const std::vector<DimSize_t> inDims = getInput(0)->dims();  
+
+        AIDGE_ASSERT(getInput(0)->nbDims() ==  getInput(3)->size(),\
+             "input tensor and Sizes must have the same dimentions.");
+
+        std::shared_ptr<Tensor> fallback;
+        const auto& sizes = getInput(3)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
         
         /*
         std::vector<DimSize_t> outDims[  width_out = sizes[0], \ 
@@ -81,9 +85,9 @@ bool Aidge::Resize_Op::forwardDims(bool /*allowDataDependency*/) {
         */
 
         for (std::size_t dim=0; dim < getInput(3)->size(); ++dim) {
-            
             // TODO: verify if batch and depth is not 1 !!!!
-            outDims[dim] = sizes[dim];
+            
+            outDims[dim] = static_cast<int64_t*>(sizes.getImpl()->hostPtr())[dim];
         }
         mOutputs[0]->resize(outDims);
         // fmt::print("Resize forward Dims for sizes. DONE.\n");