From 6978ee80903afff6f495e102061253f8ace55095 Mon Sep 17 00:00:00 2001
From: ms245755 <michal.szczepanski@cea.fr>
Date: Thu, 6 Jun 2024 13:38:54 +0000
Subject: [PATCH] ASSERT that check dim the input

---
 src/operator/Resize.cpp | 28 ++++++++++++++++------------
 1 file changed, 16 insertions(+), 12 deletions(-)

diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp
index af1c4e05f..89bf9965d 100644
--- a/src/operator/Resize.cpp
+++ b/src/operator/Resize.cpp
@@ -37,19 +37,19 @@ bool Aidge::Resize_Op::forwardDims(bool /*allowDataDependency*/) {
 
     // if ((!getInput(0)->empty()) && !getInput(2)->empty() && this->template getAttr<ResizeAttr::NoROI>() && this->template getAttr<ResizeAttr::NoSizes>())  {
     if (this->template getAttr<ResizeAttr::NoROI>() && this->template getAttr<ResizeAttr::NoSizes>())  {
+
+        AIDGE_ASSERT(getInput(0)->nbDims() ==  getInput(2)->size(),\
+             "input tensor and Scales must have the same dimentions.");
     
         std::vector<DimSize_t> outDims = getInput(0)->dims();
         const std::vector<DimSize_t> inDims = getInput(0)->dims();
 
-        // TODO: cast according to tensor type
-        float* scales = static_cast<float*>(getInput(2)->getImpl()->rawPtr()); 
-        // get values of scales
+        std::shared_ptr<Tensor> fallback;
+        const auto& scales = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
 
         // TODO: check if enusure different dims of sizes/scales
         for (std::size_t dim=0; dim < getInput(2)->size(); ++dim) {
-            // TODO: verify if batch and depth is not 1 !/ check if onnx operator impacts scakes 
-            // logic if 1, 2, 2 scales and input tensor 3, 4, 4, !!!
-            outDims[dim] = inDims[dim]*scales[dim];
+            outDims[dim] = inDims[dim]*static_cast<int64_t*>(scales.getImpl()->hostPtr())[dim];
         }
         mOutputs[0]->resize(outDims);
         return true;
@@ -68,10 +68,14 @@ bool Aidge::Resize_Op::forwardDims(bool /*allowDataDependency*/) {
         
         std::vector<DimSize_t> outDims = getInput(0)->dims();
 
-        // tmp
-        const std::vector<DimSize_t> inDims = getInput(0)->dims();        
-        //TODO cast according to tensor type
-        float* sizes = static_cast<float*>(getInput(3)->getImpl()->rawPtr()); 
+        // // tmp
+        // const std::vector<DimSize_t> inDims = getInput(0)->dims();  
+
+        AIDGE_ASSERT(getInput(0)->nbDims() ==  getInput(3)->size(),\
+             "input tensor and Sizes must have the same dimentions.");
+
+        std::shared_ptr<Tensor> fallback;
+        const auto& sizes = getInput(3)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
         
         /*
         std::vector<DimSize_t> outDims[  width_out = sizes[0], \ 
@@ -81,9 +85,9 @@ bool Aidge::Resize_Op::forwardDims(bool /*allowDataDependency*/) {
         */
 
         for (std::size_t dim=0; dim < getInput(3)->size(); ++dim) {
-            
             // TODO: verify if batch and depth is not 1 !!!!
-            outDims[dim] = sizes[dim];
+            
+            outDims[dim] = static_cast<int64_t*>(sizes.getImpl()->hostPtr())[dim];
         }
         mOutputs[0]->resize(outDims);
         // fmt::print("Resize forward Dims for sizes. DONE.\n");
-- 
GitLab