Skip to content
Snippets Groups Projects
Commit 42445cbd authored by Houssem ROUIS's avatar Houssem ROUIS
Browse files

switch shape input to attr for Reshape

parent 5b1a492f
No related branches found
No related tags found
2 merge requests!50version 0.2.0,!20Vit operators
...@@ -20,13 +20,13 @@ ...@@ -20,13 +20,13 @@
namespace Aidge { namespace Aidge {
template <class I, class O> template <class I, class O>
void ReshapeImpl_cpu_forward_kernel(std::size_t inputLength, void ReshapeImpl_cpu_forward_kernel(std::size_t inputLength,
const void* input1_, const void* input_,
void* output_) { void* output_) {
const I* input_1 = static_cast<const I*>(input1_); const I* input = static_cast<const I*>(input_);
O* output = static_cast<O*>(output_); O* output = static_cast<O*>(output_);
std::copy_n(input_1, inputLength, output); std::copy_n(input, inputLength, output);
} }
namespace { namespace {
......
...@@ -10,10 +10,6 @@ ...@@ -10,10 +10,6 @@
********************************************************************************/ ********************************************************************************/
#include <cassert> #include <cassert>
#include <chrono> // std::chrono::milliseconds
#include <numeric> // std::accumulate
#include <thread> // std::this_thread::sleep_for
#include <vector>
#include "aidge/operator/Reshape.hpp" #include "aidge/operator/Reshape.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
...@@ -27,17 +23,9 @@ Aidge::NbElts_t Aidge::ReshapeImpl_cpu::getNbRequiredProtected(const Aidge::IOIn ...@@ -27,17 +23,9 @@ Aidge::NbElts_t Aidge::ReshapeImpl_cpu::getNbRequiredProtected(const Aidge::IOIn
} }
void Aidge::ReshapeImpl_cpu::forward() { void Aidge::ReshapeImpl_cpu::forward() {
assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size() ==
std::size_t outputSize = 1; std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->size()
&& "input must have the same overall size as shape");
int* shape = static_cast<int*>(std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->getImpl()->rawPtr());
for(std::size_t i= 0; i<std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->size(); ++i)
{
assert(shape[i]>0 && "all input #1 elements must be >0");
outputSize *= shape[i];
}
assert((std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size() == outputSize) &&
"the shape given in input #1 must give the same size of input #0");
// Find the correct kernel type // Find the correct kernel type
auto kernelFunc = Registrar<ReshapeImplForward_cpu>::create({ auto kernelFunc = Registrar<ReshapeImplForward_cpu>::create({
......
...@@ -25,7 +25,6 @@ TEST_CASE("[cpu/operator] Reshape(forward)") { ...@@ -25,7 +25,6 @@ TEST_CASE("[cpu/operator] Reshape(forward)") {
std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array1D<float,6> { std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array1D<float,6> {
{1.0, 2.0, 3.0, 4.0, 5.0, 6.0} {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}
}); });
std::shared_ptr<Tensor> shape = std::make_shared<Tensor>(Array1D<int,2>{{2, 3}});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,3> { std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,3> {
{ {
{1.0, 2.0, 3.0}, {1.0, 2.0, 3.0},
...@@ -33,10 +32,9 @@ TEST_CASE("[cpu/operator] Reshape(forward)") { ...@@ -33,10 +32,9 @@ TEST_CASE("[cpu/operator] Reshape(forward)") {
} }
}); });
std::shared_ptr<Node> myReshape = Reshape(); std::shared_ptr<Node> myReshape = Reshape({2, 3});
auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator()); auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator());
op->associateInput(0, input); op->associateInput(0, input);
op->associateInput(1, shape);
op->setDataType(DataType::Float32); op->setDataType(DataType::Float32);
op->setBackend("cpu"); op->setBackend("cpu");
op->computeOutputDims(); op->computeOutputDims();
...@@ -52,7 +50,6 @@ TEST_CASE("[cpu/operator] Reshape(forward)") { ...@@ -52,7 +50,6 @@ TEST_CASE("[cpu/operator] Reshape(forward)") {
} }
}); });
std::shared_ptr<Tensor> shape = std::make_shared<Tensor>(Array1D<int,2>{{3, 2}});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,3,2> { std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,3,2> {
{ {
{1.0, 2.0}, {1.0, 2.0},
...@@ -61,10 +58,9 @@ TEST_CASE("[cpu/operator] Reshape(forward)") { ...@@ -61,10 +58,9 @@ TEST_CASE("[cpu/operator] Reshape(forward)") {
} }
}); });
std::shared_ptr<Node> myReshape = Reshape(); std::shared_ptr<Node> myReshape = Reshape({3, 2});
auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator()); auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator());
op->associateInput(0, input); op->associateInput(0, input);
op->associateInput(1, shape);
op->setDataType(DataType::Float32); op->setDataType(DataType::Float32);
op->setBackend("cpu"); op->setBackend("cpu");
op->computeOutputDims(); op->computeOutputDims();
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment