Skip to content
Snippets Groups Projects
Commit 1f508b50 authored by Maxence Naud's avatar Maxence Naud
Browse files

[Test] Add Concat test and fix its forward function

parent 7dcb5bec
No related branches found
No related tags found
2 merge requests!22Update operators implementation,!16Draft: Tiling
...@@ -26,12 +26,14 @@ namespace Aidge { ...@@ -26,12 +26,14 @@ namespace Aidge {
class ConcatImplForward_cpu class ConcatImplForward_cpu
: public Registrable<ConcatImplForward_cpu, std::tuple<DataType, DataType>, void(const Concat_Op::Attrs&, : public Registrable<ConcatImplForward_cpu, std::tuple<DataType, DataType>, void(const Concat_Op::Attrs&,
const std::vector<DimSize_t>, const std::vector<DimSize_t>,
const std::vector<DimSize_t>&,
const std::vector<const void*>, const std::vector<const void*>,
void*)> {}; void*)> {};
class ConcatImplBackward_cpu class ConcatImplBackward_cpu
: public Registrable<ConcatImplBackward_cpu, std::tuple<DataType, DataType>, void(const Concat_Op::Attrs&, : public Registrable<ConcatImplBackward_cpu, std::tuple<DataType, DataType>, void(const Concat_Op::Attrs&,
const std::vector<DimSize_t>, const std::vector<DimSize_t>,
const std::vector<DimSize_t>&,
const std::vector<const void*>, const std::vector<const void*>,
void*)> {}; void*)> {};
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#define AIDGE_CPU_OPERATOR_CONCATIMPL_FORWARD_KERNEL_H_ #define AIDGE_CPU_OPERATOR_CONCATIMPL_FORWARD_KERNEL_H_
#include <algorithm> #include <algorithm>
#include <numeric>
#include <cstddef> #include <cstddef>
#include <vector> #include <vector>
...@@ -26,8 +27,9 @@ namespace Aidge { ...@@ -26,8 +27,9 @@ namespace Aidge {
template <class I, class O> template <class I, class O>
void ConcatImpl_cpu_forward_kernel(const Concat_Op::Attrs& attrs, void ConcatImpl_cpu_forward_kernel(const Concat_Op::Attrs& attrs,
const std::vector<DimSize_t> dimsFirstInput, const std::vector<DimSize_t>& dimsFirstInput,
const std::vector<const void*> inputs_, const std::vector<DimSize_t>& concatAxisValues,
const std::vector<const void*>& inputs_,
void* output_) void* output_)
{ {
// FIXME: missing Concat attributes as arguments // FIXME: missing Concat attributes as arguments
...@@ -37,6 +39,8 @@ void ConcatImpl_cpu_forward_kernel(const Concat_Op::Attrs& attrs, ...@@ -37,6 +39,8 @@ void ConcatImpl_cpu_forward_kernel(const Concat_Op::Attrs& attrs,
} }
O* output = static_cast<O*>(output_); O* output = static_cast<O*>(output_);
DimSize_t outputAxisValue = std::accumulate(concatAxisValues.begin(), concatAxisValues.end(), 0);
DimSize_t prodDimLower = 1; DimSize_t prodDimLower = 1;
for (DimIdx_t i = 0; i < std::get<1>(attrs); ++i) { for (DimIdx_t i = 0; i < std::get<1>(attrs); ++i) {
prodDimLower *= dimsFirstInput[i]; prodDimLower *= dimsFirstInput[i];
...@@ -47,13 +51,16 @@ void ConcatImpl_cpu_forward_kernel(const Concat_Op::Attrs& attrs, ...@@ -47,13 +51,16 @@ void ConcatImpl_cpu_forward_kernel(const Concat_Op::Attrs& attrs,
prodDimHigher *= dimsFirstInput[i]; prodDimHigher *= dimsFirstInput[i];
} }
std::size_t oIndexStart = 0;
std::size_t oIndex = 0; std::size_t oIndex = 0;
for (std::size_t inputId = 0; inputId < inputs.size(); ++inputId) { for (std::size_t inputId = 0; inputId < inputs.size(); ++inputId) {
oIndex = oIndexStart;
const DimSize_t iOffset = prodDimHigher*concatAxisValues[inputId];
for (std::size_t iIndex = 0; iIndex < prodDimLower; ++iIndex) { for (std::size_t iIndex = 0; iIndex < prodDimLower; ++iIndex) {
std::copy(inputs[inputId] + iIndex, inputs[inputId] + iIndex + prodDimHigher, std::copy(inputs[inputId] + iIndex*iOffset, inputs[inputId] + (iIndex+1)*iOffset, output + oIndex);
output + oIndex); oIndex += prodDimHigher*outputAxisValue;
oIndex += prodDimHigher;
} }
oIndexStart += concatAxisValues[inputId]*prodDimHigher;
} }
} }
......
...@@ -73,12 +73,15 @@ void Aidge::ConcatImpl_cpu::forward() { ...@@ -73,12 +73,15 @@ void Aidge::ConcatImpl_cpu::forward() {
mOp.getOutput(0)->dataType()}); mOp.getOutput(0)->dataType()});
std::vector<const void*> opInputs; std::vector<const void*> opInputs;
std::vector<DimSize_t> opInputAxis;
for (IOIndex_t i = 0; i < mOp.nbInputs(); ++i) { for (IOIndex_t i = 0; i < mOp.nbInputs(); ++i) {
opInputs.push_back(mOp.getInput(i)->getImpl()->rawPtr()); opInputs.push_back(mOp.getInput(i)->getImpl()->rawPtr());
opInputAxis.push_back(mOp.getInput(i)->dims()[mOp.template getAttr<DimSize_t>("Axis")]);
} }
kernelFunc(mOp.getStaticAttributes(), kernelFunc(mOp.getStaticAttributes(),
mOp.getInput(0)->dims(), mOp.getInput(0)->dims(),
opInputAxis,
opInputs, opInputs,
mOp.getOutput(0)->getImpl()->rawPtr()); mOp.getOutput(0)->getImpl()->rawPtr());
} }
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Add.hpp"
#include "aidge/backend/cpu.hpp"
using namespace Aidge;
TEST_CASE("[cpu/operator] Concat(forward)", "[Concat]") {
SECTION("Concat 4D inputs on 1st axis") {
std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array4D<int,1,3,3,2> {
{ //
{ //
{{20, 47},{21, 48},{22, 49}}, //
{{23, 50},{24, 51},{25, 52}}, //
{{26, 53},{27, 54},{28, 55}} //
}, //
} //
}); //
std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(Array4D<int,2,3,3,2> {
{
{ //
{{29, 56},{30, 57},{31, 58}}, //
{{32, 59},{33, 60},{34, 61}}, //
{{35, 62},{36, 63},{37, 64}} //
}, //
{ //
{{38, 65},{39, 66},{40, 67}}, //
{{41, 68},{42, 69},{43, 70}}, //
{{44, 71},{45, 72},{46, 73}} //
} //
} //
}); //
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,3,3,3,2> {
{ //
{ //
{{20, 47},{21, 48},{22, 49}}, //
{{23, 50},{24, 51},{25, 52}}, //
{{26, 53},{27, 54},{28, 55}} //
}, //
{ //
{{29, 56},{30, 57},{31, 58}}, //
{{32, 59},{33, 60},{34, 61}}, //
{{35, 62},{36, 63},{37, 64}} //
}, //
{ //
{{38, 65},{39, 66},{40, 67}}, //
{{41, 68},{42, 69},{43, 70}}, //
{{44, 71},{45, 72},{46, 73}} //
} //
} //
}); //
auto myConcat = Concat(2, 0);
myConcat->getOperator()->setBackend("cpu");
myConcat->getOperator()->setDatatype(DataType::Int32);
myConcat->getOperator()->associateInput(0, input1);
myConcat->getOperator()->associateInput(1, input2);
myConcat->getOperator()->computeOutputDims();
myConcat->forward();
myConcat->getOperator()->getOutput(0)->print();
REQUIRE(myConcat->getOperator()->output(0) == *expectedOutput);
}
SECTION("Concat 4D inputs on 3rd axis") {
std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array4D<int,1,3,3,2> {
{ //
{ //
{{20, 47},{21, 48},{22, 49}}, //
{{23, 50},{24, 51},{25, 52}}, //
{{26, 53},{27, 54},{28, 55}} //
}, //
} //
}); //
std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(Array4D<int,1,3,6,2> {
{
{ //
{{29, 56},{30, 57},{31, 58},{38, 65},{39, 66},{40, 67}}, //
{{32, 59},{33, 60},{34, 61},{41, 68},{42, 69},{43, 70}}, //
{{35, 62},{36, 63},{37, 64},{44, 71},{45, 72},{46, 73}} //
},
}
});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,1,3,9,2> {
{ //
{ //
{{20, 47},{21, 48},{22, 49},{29, 56},{30, 57},{31, 58},{38, 65},{39, 66},{40, 67}}, //
{{23, 50},{24, 51},{25, 52},{32, 59},{33, 60},{34, 61},{41, 68},{42, 69},{43, 70}}, //
{{26, 53},{27, 54},{28, 55},{35, 62},{36, 63},{37, 64},{44, 71},{45, 72},{46, 73}} //
}, //
} //
}); //
auto myConcat = Concat(2, 2);
myConcat->getOperator()->setBackend("cpu");
myConcat->getOperator()->setDatatype(DataType::Int32);
myConcat->getOperator()->associateInput(0, input1);
myConcat->getOperator()->associateInput(1, input2);
myConcat->getOperator()->computeOutputDims();
myConcat->forward();
myConcat->getOperator()->getOutput(0)->print();
REQUIRE(myConcat->getOperator()->output(0) == *expectedOutput);
}
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment