Skip to content
Snippets Groups Projects
Commit ce266959 authored by Houssem ROUIS's avatar Houssem ROUIS
Browse files

add Reshape operator

parent 0a694579
No related branches found
No related tags found
2 merge requests!50version 0.2.0,!20Vit operators
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_RESHAPEIMPL_H_
#define AIDGE_CPU_OPERATOR_RESHAPEIMPL_H_
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Reshape.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include <memory>
#include <vector>
namespace Aidge {
// class Reshape_Op;
// compute kernel registry for forward and backward
class ReshapeImplForward_cpu
: public Registrable<ReshapeImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
};
class ReshapeImplBackward_cpu
: public Registrable<ReshapeImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
};
class ReshapeImpl_cpu : public OperatorImpl {
public:
ReshapeImpl_cpu(const Reshape_Op& op) : OperatorImpl(op) {}
static std::unique_ptr<ReshapeImpl_cpu> create(const Reshape_Op& op) {
return std::make_unique<ReshapeImpl_cpu>(op);
}
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
void forward() override;
};
namespace {
static Registrar<Reshape_Op> registrarReshapeImpl_cpu("cpu", Aidge::ReshapeImpl_cpu::create);
}
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_RESHAPEIMPL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_RESHAPEIMPL_FORWARD_KERNEL_H_
#define AIDGE_CPU_OPERATOR_RESHAPEIMPL_FORWARD_KERNEL_H_
#include "aidge/utils/Registrar.hpp"
#include <cmath>
#include "aidge/backend/cpu/operator/ReshapeImpl.hpp"
namespace Aidge {
template <class I, class O>
void ReshapeImpl_cpu_forward_kernel(std::size_t inputLength,
const void* input1_,
void* output_) {
const I* input_1 = static_cast<const I*>(input1_);
O* output = static_cast<O*>(output_);
std::copy_n(input_1, inputLength, output);
}
namespace {
static Registrar<ReshapeImplForward_cpu> registrarReshapeImplForward_cpu_Float32(
{DataType::Float32, DataType::Float32},
Aidge::ReshapeImpl_cpu_forward_kernel<float, float>);
static Registrar<ReshapeImplForward_cpu> registrarReshapeImplForward_cpu_Int32(
{DataType::Int32, DataType::Int32},
Aidge::ReshapeImpl_cpu_forward_kernel<int, int>);
static Registrar<ReshapeImplForward_cpu> registrarReshapeImplForward_cpu_Float64(
{DataType::Float64, DataType::Float64},
Aidge::ReshapeImpl_cpu_forward_kernel<double, double>);
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_RESHAPEIMPL_FORWARD_KERNEL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <cassert>
#include <chrono> // std::chrono::milliseconds
#include <numeric> // std::accumulate
#include <thread> // std::this_thread::sleep_for
#include <vector>
#include "aidge/operator/Reshape.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/operator/ReshapeImpl.hpp"
#include "aidge/backend/cpu/operator/ReshapeImpl_forward_kernels.hpp"
Aidge::NbElts_t Aidge::ReshapeImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
// this implementation can be in-place
return 0;
}
void Aidge::ReshapeImpl_cpu::forward() {
assert(mOp.getInput(0) && "missing input #0");
assert(mOp.getInput(1) && "missing input #1");
assert(mOp.getInput(1)->nbDims() == 1 && "input #1 must be a 1D array");
std::size_t outputSize = 1;
int* shape = static_cast<int*>(mOp.getInput(1)->getImpl()->rawPtr());
for(std::size_t i= 0; i<mOp.getInput(1)->size(); ++i)
{
assert(shape[i]>0 && "all input #1 elements must be >0");
outputSize *= shape[i];
}
assert((mOp.getInput(0)->size() == outputSize) &&
"the shape given in input #1 must give the same size of input #0");
// Find the correct kernel type
auto kernelFunc = Registrar<ReshapeImplForward_cpu>::create({
mOp.getInput(0)->dataType(),
mOp.getOutput(0)->dataType()});
// Call kernel
kernelFunc(mOp.getInput(0)->size(),
mOp.getInput(0)->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr());
}
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Reshape.hpp"
#include "aidge/backend/cpu.hpp"
#include <memory>
using namespace Aidge;
TEST_CASE("[cpu/operator] Reshape(forward)") {
SECTION("1D Tensor") {
std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array1D<float,6> {
{1.0, 2.0, 3.0, 4.0, 5.0, 6.0}
});
std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array1D<int,2>{{2, 3}});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,3> {
{
{1.0, 2.0, 3.0},
{4.0, 5.0, 6.0}
}
});
std::shared_ptr<Node> myReshape = Reshape();
myReshape->getOperator()->setDatatype(DataType::Float32);
myReshape->getOperator()->setBackend("cpu");
myReshape->getOperator()->associateInput(0, input_1);
myReshape->getOperator()->associateInput(1, input_2);
myReshape->getOperator()->computeOutputDims();
myReshape->forward();
float* resPtr = static_cast<float*>(myReshape->getOperator()->getOutput(0)->getImpl()->rawPtr());
float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
for (std::size_t i = 0; i< 6; ++i) {
printf("res %f, expected %f", resPtr[i], expectedPtr[i]);
REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
}
}
SECTION("2D Tensor") {
std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,3> {
{
{1.0, 2.0, 3.0},
{4.0, 5.0, 6.0}
}
});
std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array1D<int,2>{{3, 2}});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,3,2> {
{
{1.0, 2.0},
{3.0, 4.0},
{5.0, 6.0}
}
});
std::shared_ptr<Node> myReshape = Reshape();
myReshape->getOperator()->setDatatype(DataType::Float32);
myReshape->getOperator()->setBackend("cpu");
myReshape->getOperator()->associateInput(0, input_1);
myReshape->getOperator()->associateInput(1, input_2);
myReshape->getOperator()->computeOutputDims();
myReshape->forward();
float* resPtr = static_cast<float*>(myReshape->getOperator()->getOutput(0)->getImpl()->rawPtr());
float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
for (std::size_t i = 0; i< 6; ++i) {
printf("res %f, expected %f", resPtr[i], expectedPtr[i]);
REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
}
}
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment