Skip to content
Snippets Groups Projects
Commit a4c970ff authored by Inna Kucher's avatar Inna Kucher
Browse files

adding SCaling operator implem

parent ba9ba4b7
No related branches found
No related tags found
No related merge requests found
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_CPU_OPERATOR_ScalingIMPL_H__
#define __AIDGE_CPU_OPERATOR_ScalingIMPL_H__
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Scaling.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include <memory>
#include <vector>
namespace Aidge {
// class Scaling_Op;
// compute kernel registry for forward and backward
class ScalingImplForward_cpu
: public Registrable<ScalingImplForward_cpu, std::tuple<DataType, DataType>, void(const Scaling_Op::Parameters&, std::size_t, const void*, void*)> {
};
class ScalingImplBackward_cpu
: public Registrable<ScalingImplBackward_cpu, std::tuple<DataType, DataType>, void(const Scaling_Op::Parameters&, std::size_t, const void*, void*)> {
};
class ScalingImpl_cpu : public OperatorImpl {
private:
const Scaling_Op& mOp;
std::array<NbElts_t, 1> mNbConsumedData;
std::array<NbElts_t, 1> mNbProducedData;
public:
ScalingImpl_cpu(const Scaling_Op& op) : mOp(op), mNbConsumedData({0}), mNbProducedData({0}) {}
static std::unique_ptr<ScalingImpl_cpu> create(const Scaling_Op& op) {
std::cout << "ScalingImpl_cpu create" << std::endl;
return std::make_unique<ScalingImpl_cpu>(op);
}
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, __attribute__((unused)) const std::vector<DimSize_t>& inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward();
void backward();
};
namespace {
static Registrar<Scaling_Op> registrarScalingImpl_cpu("cpu", Aidge::ScalingImpl_cpu::create);
}
} // namespace Aidge
#endif /* __AIDGE_CPU_OPERATOR_ScalingIMPL_H__ */
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_CPU_OPERATOR_ScalingIMPL_FORWARD_KERNEL_H__
#define __AIDGE_CPU_OPERATOR_ScalingIMPL_FORWARD_KERNEL_H__
#include "aidge/utils/Registrar.hpp"
#include "aidge/backend/cpu/operator/ScalingImpl.hpp"
namespace Aidge {
template <class I, class O>
void ScalingImpl_cpu_forward_kernel(const Scaling_Op::Parameters& params,
std::size_t inputLenght,
const void* input_,
void* output_) {
const I* input = static_cast<const I*>(input_);
O* output = static_cast<O*>(output_);
I scalingFactor = static_cast<I>(std::get<0>(params));
for (std::size_t i = 0; i < inputLenght; ++i) {
output[i] = input[i] * scalingFactor;
}
}
namespace {
static Registrar<ScalingImplForward_cpu> registrarScalingImplForward_cpu_Float32(
{DataType::Float32, DataType::Float32}, Aidge::ScalingImpl_cpu_forward_kernel<float, float>);
static Registrar<ScalingImplForward_cpu> registrarScalingImplForward_cpu_Int32(
{DataType::Int32, DataType::Int32}, Aidge::ScalingImpl_cpu_forward_kernel<int, int>);
static Registrar<ScalingImplForward_cpu> registrarScalingImplForward_cpu_Float64(
{DataType::Float64, DataType::Float64}, Aidge::ScalingImpl_cpu_forward_kernel<double, double>);
} // namespace
} // namespace Aidge
#endif /* __AIDGE_CPU_OPERATOR_ScalingIMPL_FORWARD_KERNEL_H__ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <cassert>
#include <chrono> // std::chrono::milliseconds
#include <numeric> // std::accumulate
#include <thread> // std::this_thread::sleep_for
#include "aidge/operator/Scaling.hpp"
#include "aidge/backend/cpu/operator/ScalingImpl.hpp"
#include "aidge/backend/cpu/operator/ScalingImpl_forward_kernels.hpp"
#include "aidge/utils/Types.h"
#include <numeric>
#include <vector>
// FIXME: replace whole Tensor with minimum needed data quantity
Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbRequiredData(Aidge::IOIndex_t /*inputIdx*/) const {
assert(mOp.getInput(0) && "requires valid input");
// Requires the whole tensors
const auto& inputDims = mOp.getInput(0)->dims();
return std::accumulate(inputDims.begin(), inputDims.end(),
static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
// for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
return 0;
}
Aidge::NbElts_t Aidge::ScalingImpl_cpu::getRequiredMemory(__attribute__((unused)) const Aidge::IOIndex_t outputIdx, __attribute__((unused)) const std::vector<Aidge::DimSize_t> &inputsSize) const {
const auto& outputDims = mOp.getOutput(0)->dims();
return std::accumulate(outputDims.begin(), outputDims.end(),
static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbConsumedData(Aidge::IOIndex_t /*inputIdx*/) const {
return mNbConsumedData[0];
}
Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
return mNbProducedData[0];
}
void Aidge::ScalingImpl_cpu::updateConsummerProducer(){
mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::ScalingImpl_cpu::forward() {
// FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0");
// Find the correct kernel type
auto kernelFunc = Registrar<ScalingImplForward_cpu>::create({
mOp.getInput(0)->dataType(),
mOp.getOutput(0)->dataType()});
// Call kernel
kernelFunc(mOp.getParams(),
std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
mOp.getInput(0)->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr());
mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::ScalingImpl_cpu::backward() {
printf("Not implemented yet.\n");
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment