Skip to content
Snippets Groups Projects
Commit 6978ee80 authored by Michal Szczepanski's avatar Michal Szczepanski
Browse files

ASSERT that check dim the input

parent f5e48c55
No related branches found
No related tags found
2 merge requests!152Update Aidge export to take a graph view has an argument instead of a...,!125Operator resize
Pipeline #47567 passed
...@@ -37,19 +37,19 @@ bool Aidge::Resize_Op::forwardDims(bool /*allowDataDependency*/) { ...@@ -37,19 +37,19 @@ bool Aidge::Resize_Op::forwardDims(bool /*allowDataDependency*/) {
// if ((!getInput(0)->empty()) && !getInput(2)->empty() && this->template getAttr<ResizeAttr::NoROI>() && this->template getAttr<ResizeAttr::NoSizes>()) { // if ((!getInput(0)->empty()) && !getInput(2)->empty() && this->template getAttr<ResizeAttr::NoROI>() && this->template getAttr<ResizeAttr::NoSizes>()) {
if (this->template getAttr<ResizeAttr::NoROI>() && this->template getAttr<ResizeAttr::NoSizes>()) { if (this->template getAttr<ResizeAttr::NoROI>() && this->template getAttr<ResizeAttr::NoSizes>()) {
AIDGE_ASSERT(getInput(0)->nbDims() == getInput(2)->size(),\
"input tensor and Scales must have the same dimentions.");
std::vector<DimSize_t> outDims = getInput(0)->dims(); std::vector<DimSize_t> outDims = getInput(0)->dims();
const std::vector<DimSize_t> inDims = getInput(0)->dims(); const std::vector<DimSize_t> inDims = getInput(0)->dims();
// TODO: cast according to tensor type std::shared_ptr<Tensor> fallback;
float* scales = static_cast<float*>(getInput(2)->getImpl()->rawPtr()); const auto& scales = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
// get values of scales
// TODO: check if enusure different dims of sizes/scales // TODO: check if enusure different dims of sizes/scales
for (std::size_t dim=0; dim < getInput(2)->size(); ++dim) { for (std::size_t dim=0; dim < getInput(2)->size(); ++dim) {
// TODO: verify if batch and depth is not 1 !/ check if onnx operator impacts scakes outDims[dim] = inDims[dim]*static_cast<int64_t*>(scales.getImpl()->hostPtr())[dim];
// logic if 1, 2, 2 scales and input tensor 3, 4, 4, !!!
outDims[dim] = inDims[dim]*scales[dim];
} }
mOutputs[0]->resize(outDims); mOutputs[0]->resize(outDims);
return true; return true;
...@@ -68,10 +68,14 @@ bool Aidge::Resize_Op::forwardDims(bool /*allowDataDependency*/) { ...@@ -68,10 +68,14 @@ bool Aidge::Resize_Op::forwardDims(bool /*allowDataDependency*/) {
std::vector<DimSize_t> outDims = getInput(0)->dims(); std::vector<DimSize_t> outDims = getInput(0)->dims();
// tmp // // tmp
const std::vector<DimSize_t> inDims = getInput(0)->dims(); // const std::vector<DimSize_t> inDims = getInput(0)->dims();
//TODO cast according to tensor type
float* sizes = static_cast<float*>(getInput(3)->getImpl()->rawPtr()); AIDGE_ASSERT(getInput(0)->nbDims() == getInput(3)->size(),\
"input tensor and Sizes must have the same dimentions.");
std::shared_ptr<Tensor> fallback;
const auto& sizes = getInput(3)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
/* /*
std::vector<DimSize_t> outDims[ width_out = sizes[0], \ std::vector<DimSize_t> outDims[ width_out = sizes[0], \
...@@ -81,9 +85,9 @@ bool Aidge::Resize_Op::forwardDims(bool /*allowDataDependency*/) { ...@@ -81,9 +85,9 @@ bool Aidge::Resize_Op::forwardDims(bool /*allowDataDependency*/) {
*/ */
for (std::size_t dim=0; dim < getInput(3)->size(); ++dim) { for (std::size_t dim=0; dim < getInput(3)->size(); ++dim) {
// TODO: verify if batch and depth is not 1 !!!! // TODO: verify if batch and depth is not 1 !!!!
outDims[dim] = sizes[dim];
outDims[dim] = static_cast<int64_t*>(sizes.getImpl()->hostPtr())[dim];
} }
mOutputs[0]->resize(outDims); mOutputs[0]->resize(outDims);
// fmt::print("Resize forward Dims for sizes. DONE.\n"); // fmt::print("Resize forward Dims for sizes. DONE.\n");
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment