Skip to content
Snippets Groups Projects
Commit cac4b1f0 authored by Maxence Naud's avatar Maxence Naud
Browse files

[Add] Concat Operator implementation

parent c295836d
No related branches found
No related tags found
2 merge requests!22Update operators implementation,!16Draft: Tiling
......@@ -17,7 +17,7 @@
#include "aidge/backend/cpu/operator/AvgPoolingImpl.hpp"
#include "aidge/backend/cpu/operator/MaxPoolingImpl.hpp"
#include "aidge/backend/cpu/operator/BatchNormImpl.hpp"
// #include "aidge/backend/cpu/operator/ConcatImpl.hpp.hpp"
#include "aidge/backend/cpu/operator/ConcatImpl.hpp.hpp"
#include "aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp"
#include "aidge/backend/cpu/operator/ConvImpl.hpp"
#include "aidge/backend/cpu/operator/FCImpl.hpp"
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_CONCATIMPL_H_
#define AIDGE_CPU_OPERATOR_CONCATIMPL_H_
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Concat.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include <memory>
#include <vector>
namespace Aidge {
// class Concat_Op<2>;
// compute kernel registry for forward and backward
class ConcatImplForward_cpu
: public Registrable<ConcatImplForward_cpu, std::tuple<DataType, DataType>, void(const Concat_Op::Attrs&,
const std::vector<DimSize_t>,
const std::vector<const void*>,
void*)> {};
class ConcatImplBackward_cpu
: public Registrable<ConcatImplBackward_cpu, std::tuple<DataType, DataType>, void(const Concat_Op::Attrs&,
const std::vector<DimSize_t>,
const std::vector<const void*>,
void*)> {};
class ConcatImpl_cpu : public OperatorImpl {
private:
const Concat_Op& mOp;
std::vector<NbElts_t> mNbConsumedData;
std::array<NbElts_t, 1> mNbProducedData = {};
public:
ConcatImpl_cpu(const Concat_Op& op) : mOp(op), mNbConsumedData(std::vector<NbElts_t>(op.nbInputs())) {}
static std::unique_ptr<ConcatImpl_cpu> create(const Concat_Op& op) {
return std::make_unique<ConcatImpl_cpu>(op);
}
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
NbElts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward() override;
void backward() override;
};
namespace {
static Registrar<Concat_Op> registrarConcatImpl_cpu("cpu", Aidge::ConcatImpl_cpu::create);
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_CONCATIMPL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_CONCATIMPL_FORWARD_KERNEL_H_
#define AIDGE_CPU_OPERATOR_CONCATIMPL_FORWARD_KERNEL_H_
#include <algorithm>
#include <cstddef>
#include <vector>
#include "aidge/backend/cpu/operator/ConcatImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/operator/Concat.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
template <class I, class O>
void ConcatImpl_cpu_forward_kernel(const Concat_Op::Attrs& attrs,
const std::vector<DimSize_t> dimsFirstInput,
const std::vector<const void*> inputs_,
void* output_)
{
// FIXME: missing Concat attributes as arguments
std::vector<const I*> inputs;
for (const auto& input_ : inputs_) {
inputs.push_back(static_cast<const I*>(input_));
}
O* output = static_cast<O*>(output_);
DimSize_t prodDimLower = 1;
for (DimIdx_t i = 0; i < std::get<1>(attrs); ++i) {
prodDimLower *= dimsFirstInput[i];
}
DimSize_t prodDimHigher = 1;
for (DimIdx_t i = std::get<1>(attrs) + 1; static_cast<std::size_t>(i) < dimsFirstInput.size();
++i) {
prodDimHigher *= dimsFirstInput[i];
}
std::size_t oIndex = 0;
for (std::size_t inputId = 0; inputId < inputs.size(); ++inputId) {
for (std::size_t iIndex = 0; iIndex < prodDimLower; ++iIndex) {
std::copy(inputs[inputId] + iIndex, inputs[inputId] + iIndex + prodDimHigher,
output + oIndex);
oIndex += prodDimHigher;
}
}
}
namespace {
static Registrar<ConcatImplForward_cpu> registrarConcatImplForward_cpu_Float32(
{DataType::Float32, DataType::Float32}, Aidge::ConcatImpl_cpu_forward_kernel<float, float>);
static Registrar<ConcatImplForward_cpu> registrarConcatImplForward_cpu_Int32(
{DataType::Int32, DataType::Int32}, Aidge::ConcatImpl_cpu_forward_kernel<int, int>);
static Registrar<ConcatImplForward_cpu> registrarConcatImplForward_cpu_Float64(
{DataType::Float64, DataType::Float64},
Aidge::ConcatImpl_cpu_forward_kernel<double, double>);
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_CONCATIMPL_CPU_FORWARD_KERNEL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <cassert>
#include <numeric> // std::accumulate
#include <vector>
#include "aidge/utils/Types.h"
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/backend/cpu/operator/ConcatImpl.hpp"
#include "aidge/backend/cpu/operator/ConcatImpl_forward_kernels.hpp"
Aidge::NbElts_t Aidge::ConcatImpl_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
assert(mOp.getInput(inputIdx) && "requires valid input");
// Requires the whole tensors
const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->dims();
return std::accumulate(inputDims.begin(), inputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::ConcatImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
// for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
return 0;
}
Aidge::NbElts_t Aidge::ConcatImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
(void) outputIdx;
const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::ConcatImpl_cpu::getNbConsumedData(const Aidge::IOIndex_t inputIdx) const {
assert(inputIdx < mNbConsumedData.size());
return mNbConsumedData[inputIdx];
}
Aidge::NbElts_t Aidge::ConcatImpl_cpu::getNbProducedData(const Aidge::IOIndex_t outputIdx) const {
assert(outputIdx < mNbProducedData.size());
return mNbProducedData[outputIdx];
}
void Aidge::ConcatImpl_cpu::updateConsummerProducer() {
for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::ConcatImpl_cpu::forward() {
assert(mOp.getInput(0) && "missing input in Concat operator");
DataType datatypeFirstInput = mOp.getInput(0)->dataType();
for (IOIndex_t i = 1; i < mOp.nbInputs(); ++i) {
assert(mOp.getInput(i) && "missing input in Concat operator");
assert(mOp.getInput(i)->dataType() == datatypeFirstInput);
}
auto kernelFunc = Registrar<ConcatImplForward_cpu>::create({
datatypeFirstInput,
mOp.getOutput(0)->dataType()});
std::vector<const void*> opInputs;
for (IOIndex_t i = 0; i < mOp.nbInputs(); ++i) {
opInputs.push_back(mOp.getInput(i)->getImpl()->rawPtr());
}
kernelFunc(mOp.getStaticAttributes(),
mOp.getInput(0)->dims(),
opInputs,
mOp.getOutput(0)->getImpl()->rawPtr());
}
void Aidge::ConcatImpl_cpu::backward() { printf("Not implemented yet.\n"); }
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment