Skip to content
Snippets Groups Projects
Commit 1680bc4b authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Merge branch 'master' into unified_params

parents d26753df fb5598f8
No related branches found
No related tags found
2 merge requests!10Unified interface for attributes,!6Tensor setter getter
Pipeline #32446 failed
......@@ -14,9 +14,11 @@ So far be sure to have the correct requirements to use this library
## Pip installation
You will need to install first the aidge_core library before installing aidge_cpu.
Also, make sure that the install path was set before installing aidge_core library.
Then run in your python environnement :
You will need to install first the ``aidge_core`` library before installing ``aidge_backend_cpu``.
If you have set a custom install path for the ``aidge_core`` library, make sure to use the same one here.
Then run in your python environnement :
``` bash
pip install . -v
```
......@@ -46,4 +48,4 @@ Important: this command can also be run with `make`.
To compile the CPU library with the python binding + the associated unitary tests, run
```
make cpu_with_pybind_tests
```
\ No newline at end of file
```
......@@ -23,5 +23,6 @@
#include "aidge/backend/cpu/operator/ProducerImpl.hpp"
#include "aidge/backend/cpu/operator/ReLUImpl.hpp"
#include "aidge/backend/cpu/operator/SoftmaxImpl.hpp"
#include "aidge/backend/cpu/operator/ScalingImpl.hpp"
#endif /* AIDGE_CPU_IMPORTS_H_ */
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_CPU_OPERATOR_ScalingIMPL_H__
#define __AIDGE_CPU_OPERATOR_ScalingIMPL_H__
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Scaling.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include <memory>
#include <vector>
namespace Aidge {
// class Scaling_Op;
// compute kernel registry for forward and backward
class ScalingImplForward_cpu
: public Registrable<ScalingImplForward_cpu, std::tuple<DataType, DataType>, void(const Scaling_Op::Parameters&, std::size_t, const void*, void*)> {
};
class ScalingImplBackward_cpu
: public Registrable<ScalingImplBackward_cpu, std::tuple<DataType, DataType>, void(const Scaling_Op::Parameters&, std::size_t, const void*, void*)> {
};
class ScalingImpl_cpu : public OperatorImpl {
private:
const Scaling_Op& mOp;
std::array<NbElts_t, 1> mNbConsumedData;
std::array<NbElts_t, 1> mNbProducedData;
public:
ScalingImpl_cpu(const Scaling_Op& op) : mOp(op), mNbConsumedData({0}), mNbProducedData({0}) {}
static std::unique_ptr<ScalingImpl_cpu> create(const Scaling_Op& op) {
//std::cout << "ScalingImpl_cpu create" << std::endl;
return std::make_unique<ScalingImpl_cpu>(op);
}
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward();
void backward();
};
namespace {
static Registrar<Scaling_Op> registrarScalingImpl_cpu("cpu", Aidge::ScalingImpl_cpu::create);
}
} // namespace Aidge
#endif /* __AIDGE_CPU_OPERATOR_ScalingIMPL_H__ */
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_CPU_OPERATOR_ScalingIMPL_FORWARD_KERNEL_H__
#define __AIDGE_CPU_OPERATOR_ScalingIMPL_FORWARD_KERNEL_H__
#include "aidge/utils/Registrar.hpp"
#include "aidge/backend/cpu/operator/ScalingImpl.hpp"
namespace Aidge {
template <class I, class O>
void ScalingImpl_cpu_forward_kernel(const Scaling_Op::Parameters& params,
std::size_t inputLenght,
const void* input_,
void* output_) {
const I* input = static_cast<const I*>(input_);
O* output = static_cast<O*>(output_);
I scalingFactor = static_cast<I>(std::get<0>(params));
for (std::size_t i = 0; i < inputLenght; ++i) {
output[i] = input[i] * scalingFactor;
}
}
namespace {
static Registrar<ScalingImplForward_cpu> registrarScalingImplForward_cpu_Float32(
{DataType::Float32, DataType::Float32}, Aidge::ScalingImpl_cpu_forward_kernel<float, float>);
static Registrar<ScalingImplForward_cpu> registrarScalingImplForward_cpu_Int32(
{DataType::Int32, DataType::Int32}, Aidge::ScalingImpl_cpu_forward_kernel<int, int>);
static Registrar<ScalingImplForward_cpu> registrarScalingImplForward_cpu_Float64(
{DataType::Float64, DataType::Float64}, Aidge::ScalingImpl_cpu_forward_kernel<double, double>);
} // namespace
} // namespace Aidge
#endif /* __AIDGE_CPU_OPERATOR_ScalingIMPL_FORWARD_KERNEL_H__ */
......@@ -62,11 +62,11 @@ class CMakeBuild(build_ext):
os.chdir(str(build_temp))
# Impose to use the executable of the python
# Impose to use the executable of the python
# used to launch setup.py to setup PythonInterp
param_py = "-DPYTHON_EXECUTABLE=" + sys.executable
install_path = f"{build_temp}/install" if "AIDGE_INSTALL" not in os.environ else os.environ["AIDGE_INSTALL"]
install_path = os.path.join(sys.prefix, "lib", "libAidge") if "AIDGE_INSTALL" not in os.environ else os.environ["AIDGE_INSTALL"]
self.spawn(['cmake', str(cwd), param_py, '-DTEST=OFF', f'-DCMAKE_INSTALL_PREFIX:PATH={install_path}'])
if not self.dry_run:
......@@ -83,11 +83,11 @@ class CMakeBuild(build_ext):
for file in files:
if file.endswith('.so') and (root != str(aidge_package.absolute())):
currentFile=os.path.join(root, file)
shutil.copy(currentFile, str(aidge_package.absolute()))
shutil.copy(currentFile, str(aidge_package.absolute()))
# Copy version.txt in aidge_package
os.chdir(os.path.dirname(__file__))
shutil.copy("version.txt", str(aidge_package.absolute()))
shutil.copy("version.txt", str(aidge_package.absolute()))
if __name__ == '__main__':
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <cassert>
#include <numeric> // std::accumulate
#include <functional> // std::multiplies
#include "aidge/operator/Scaling.hpp"
#include "aidge/backend/cpu/operator/ScalingImpl.hpp"
#include "aidge/backend/cpu/operator/ScalingImpl_forward_kernels.hpp"
#include "aidge/utils/Types.h"
#include <vector>
// FIXME: replace whole Tensor with minimum needed data quantity
Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbRequiredData(Aidge::IOIndex_t /*inputIdx*/) const {
assert(mOp.getInput(0) && "requires valid input");
// Requires the whole tensors
const auto& inputDims = mOp.getInput(0)->dims();
return std::accumulate(inputDims.begin(), inputDims.end(),
static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
// for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
return 0;
}
Aidge::NbElts_t Aidge::ScalingImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t> &inputsSize) const {
(void) outputIdx;
(void) inputsSize;
const auto& outputDims = mOp.getOutput(0)->dims();
return std::accumulate(outputDims.begin(), outputDims.end(),
static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbConsumedData(Aidge::IOIndex_t /*inputIdx*/) const {
return mNbConsumedData[0];
}
Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
return mNbProducedData[0];
}
void Aidge::ScalingImpl_cpu::updateConsummerProducer(){
mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::ScalingImpl_cpu::forward() {
// FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0");
// Find the correct kernel type
auto kernelFunc = Registrar<ScalingImplForward_cpu>::create({
mOp.getInput(0)->dataType(),
mOp.getOutput(0)->dataType()});
// Call kernel
kernelFunc(mOp.getParams(),
std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
mOp.getInput(0)->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr());
mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::ScalingImpl_cpu::backward() {
printf("Not implemented yet.\n");
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment