Skip to content
Snippets Groups Projects
Commit 8391626e authored by Cyril Moineau's avatar Cyril Moineau
Browse files

Merge branch 'fuseBN' into 'main'

- [x] Fix Add + Mul recipies
- [x] Add fuseBN recipies
- [x] Fix parameters issue #8 
- [x] Update tensor to get/set values 
- [X] Add approxEq Tensor utils method to check if two tensor are approximatly equals
- [X] Update the way scheduler is updated
- [x] Add unit test for:
  - [x] fuse Add + Mul 
  - [x] remove padding
  - [x] Add unit test for fuseBN recipies (TODO in aidge_backend_cpu because of dependancie to the backend for the test)
parents 8228ba63 88323891
No related branches found
No related tags found
No related merge requests found
Showing
with 393 additions and 84 deletions
...@@ -27,6 +27,8 @@ build:ubuntu_python: ...@@ -27,6 +27,8 @@ build:ubuntu_python:
- python3 -m pip install virtualenv - python3 -m pip install virtualenv
- virtualenv venv - virtualenv venv
- source venv/bin/activate - source venv/bin/activate
# Numpy dependancy for unit test
- python3 -m pip install numpy
- export AIDGE_INSTALL=`pwd`/install - export AIDGE_INSTALL=`pwd`/install
- python3 -m pip install . - python3 -m pip install .
artifacts: artifacts:
......
...@@ -40,7 +40,7 @@ class test_parameters(unittest.TestCase): ...@@ -40,7 +40,7 @@ class test_parameters(unittest.TestCase):
def test_matmul(self): def test_matmul(self):
out_channels = 8 out_channels = 8
matmul_op = aidge_core.Matmul(out_channels).get_operator() matmul_op = aidge_core.MatMul(out_channels).get_operator()
self.assertEqual(matmul_op.get("OutChannels"), out_channels) self.assertEqual(matmul_op.get("OutChannels"), out_channels)
def test_producer_1D(self): def test_producer_1D(self):
......
"""
Copyright (c) 2023 CEA-List
This program and the accompanying materials are made available under the
terms of the Eclipse Public License 2.0 which is available at
http://www.eclipse.org/legal/epl-2.0.
SPDX-License-Identifier: EPL-2.0
"""
import unittest
import aidge_core
class test_recipies(unittest.TestCase):
"""
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_remove_flatten(self):
graph_view = aidge_core.sequential([
aidge_core.GenericOperator("Flatten", 1, 1, 1, name="Flatten0"),
aidge_core.FC(50, name='0')
])
old_nodes = graph_view.get_nodes()
aidge_core.remove_flatten(graph_view)
self.assertTrue(len(graph_view.get_nodes()) == len(old_nodes) - 1)
self.assertTrue("Flatten0" not in [i.name for i in graph_view.get_nodes()])
self.assertTrue(all([i in old_nodes for i in graph_view.get_nodes()]))
def test_fuse_matmul_add(self):
matmul0 = aidge_core.GenericOperator("MatMul", 1, 2, 1, name="MatMul0")
add0 = aidge_core.Add(name="Add0")
matmul1 = aidge_core.GenericOperator("MatMul", 1, 2, 1, name="MatMul1")
add1 = aidge_core.Add(name="Add1")
graph_view = aidge_core.sequential([matmul0, add0, matmul1, add1])
w0 = aidge_core.Producer([1, 1], name="W0")
w0.add_child(matmul0, 0, 1)
graph_view.add(w0)
b0 = aidge_core.Producer([1], name="B0")
b0.add_child(add0, 0, 1)
graph_view.add(b0)
w1 = aidge_core.Producer([1, 1], name="W1")
w1.add_child(matmul1, 0, 1)
graph_view.add(w1)
b1 = aidge_core.Producer([1], name="B1")
b1.add_child(add1, 0, 1)
graph_view.add(b1)
old_nodes = graph_view.get_nodes()
aidge_core.fuse_mul_add(graph_view)
self.assertTrue(len(graph_view.get_nodes()) == len(old_nodes) - 2)
self.assertTrue("MatMul0" not in [i.name() for i in graph_view.get_nodes()])
self.assertTrue("Add0" not in [i.name() for i in graph_view.get_nodes()])
self.assertTrue("MatMul1" not in [i.name() for i in graph_view.get_nodes()])
self.assertTrue("Add1" not in [i.name() for i in graph_view.get_nodes()])
self.assertTrue("W0" in [i.name() for i in graph_view.get_nodes()])
self.assertTrue("B0" in [i.name() for i in graph_view.get_nodes()])
self.assertTrue("W1" in [i.name() for i in graph_view.get_nodes()])
self.assertTrue("B1" in [i.name() for i in graph_view.get_nodes()])
# TODO : Vérifier que FC bien crée
if __name__ == '__main__':
unittest.main()
"""
Copyright (c) 2023 CEA-List
This program and the accompanying materials are made available under the
terms of the Eclipse Public License 2.0 which is available at
http://www.eclipse.org/legal/epl-2.0.
SPDX-License-Identifier: EPL-2.0
"""
import unittest
import aidge_core
from functools import reduce
import numpy as np
class test_tensor(unittest.TestCase):
"""
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_getcoord_getidx(self):
dims = [2,2,2]
size = reduce((lambda x, y: x*y), dims)
np_array = np.arange(size).reshape(dims)
t = aidge_core.Tensor(np_array)
for i in range(size):
coord = t.get_coord(i)
idx = t.get_idx(coord)
self.assertEqual(idx, i)
if __name__ == '__main__':
unittest.main()
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
#include "aidge/operator/ConvDepthWise.hpp" #include "aidge/operator/ConvDepthWise.hpp"
#include "aidge/operator/FC.hpp" #include "aidge/operator/FC.hpp"
#include "aidge/operator/GenericOperator.hpp" #include "aidge/operator/GenericOperator.hpp"
#include "aidge/operator/Matmul.hpp" #include "aidge/operator/MatMul.hpp"
#include "aidge/operator/MaxPooling.hpp" #include "aidge/operator/MaxPooling.hpp"
//#include "aidge/operator/MetaOperator.hpp" //#include "aidge/operator/MetaOperator.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/Operator.hpp"
......
...@@ -27,6 +27,9 @@ public: ...@@ -27,6 +27,9 @@ public:
{ {
printf("Cannot set raw pointer for backend %s\n", mBackend); printf("Cannot set raw pointer for backend %s\n", mBackend);
}; };
virtual void* getRaw(std::size_t /*idx*/)=0;
virtual std::size_t scalarSize() const = 0; // Size of one scalar (in bytes) virtual std::size_t scalarSize() const = 0; // Size of one scalar (in bytes)
constexpr const char *backend() const { return mBackend; } constexpr const char *backend() const { return mBackend; }
virtual ~TensorImpl() = default; virtual ~TensorImpl() = default;
......
...@@ -446,18 +446,33 @@ class Tensor : public Data, ...@@ -446,18 +446,33 @@ class Tensor : public Data,
*/ */
bool empty() const { return mDims.empty(); } bool empty() const { return mDims.empty(); }
template <typename expectedType, std::array<std::size_t, 1>::size_type DIM> template <typename expectedType>
constexpr expectedType &get(std::array<std::size_t, DIM> idx) { expectedType& get(std::size_t idx){
assert(DIM == mDims.size()); // TODO : add assert expected Type compatible with datatype
assert(mImpl); // TODO : add assert idx < Size
std::size_t unfoldedIdx = 0; return *reinterpret_cast<expectedType *>(mImpl->getRaw(idx));
for (std::size_t i = 0; i < DIM - std::size_t(1); ++i) { }
unfoldedIdx = (unfoldedIdx + idx[i]) * mDims[i + 1];
} template <typename expectedType>
unfoldedIdx += idx[DIM - 1]; expectedType& get(std::vector<std::size_t> coordIdx){
return static_cast<expectedType *>(mImpl->rawPtr())[unfoldedIdx]; return get<expectedType>(getIdx(coordIdx));
}
template <typename expectedType>
void set(std::size_t idx, expectedType value){
// TODO : add assert expected Type compatible with datatype
// TODO : add assert idx < Size
void* dataPtr = mImpl->getRaw(idx);
std::memcpy(dataPtr, &value, sizeof(expectedType));
} }
template <typename expectedType>
void set(std::vector<std::size_t> coordIdx, expectedType value){
set<expectedType>(getIdx(coordIdx), value);
}
std::string toString() { std::string toString() {
if (dims().empty()) { return "{}"; } if (dims().empty()) { return "{}"; }
std::string res; std::string res;
...@@ -559,6 +574,42 @@ class Tensor : public Data, ...@@ -559,6 +574,42 @@ class Tensor : public Data,
return mGrad; return mGrad;
} }
/**
* @brief From the the 1D index, return the coordinate of an element in the tensor.
*
* @param flatIdx 1D index of the value considering a flatten tensor.
* @return std::vector<DimSize_t>
*/
std::vector<std::size_t> getCoord(std::size_t flatIdx) const {
std::vector<std::size_t> coordIdx = std::vector<std::size_t>(mDims.size());
std::size_t idx = flatIdx;
for (std::size_t i = mDims.size() - 1; i > 0; --i){
coordIdx[i] = (idx % mDims[i]);
idx/=mDims[i];
}
coordIdx[0] = idx % mDims[0];
return coordIdx;
}
/**
* @brief From the coordinate returns the 1D index of an element in the tensor.
*
* @param coordIdx Coordinate to an element in the tensor
* @return DimSize_t
*/
std::size_t getIdx(std::vector<std::size_t> coordIdx) const {
// std::size_t flatIdx = 0;
// std::size_t stride = 1;
std::size_t flatIdx = 0;
assert(coordIdx.size() == mDims.size() && "Coordinates does not match number of dimensions");
std::size_t i = 0;
for(; i < mDims.size() - 1; ++i){
assert(coordIdx[i] < mDims[i] && "Coordinates dimensions does not fit the dimensions of the tensor");
flatIdx = (flatIdx + coordIdx[i]) * mDims[i + 1];
}
return flatIdx + coordIdx[i];
}
private: private:
///\bug not protected against overflow ///\bug not protected against overflow
std::size_t computeSize() { std::size_t computeSize() {
......
...@@ -97,7 +97,6 @@ public: ...@@ -97,7 +97,6 @@ public:
if (!mInputs[0]->empty()) { if (!mInputs[0]->empty()) {
for (std::size_t i = nbDataInputs(); i < nbInputs(); ++i) { for (std::size_t i = nbDataInputs(); i < nbInputs(); ++i) {
if(mInputs[i]->size() != mInputs[0]->dims()[1]) { if(mInputs[i]->size() != mInputs[0]->dims()[1]) {
assert(!mInputs[0]->hasImpl() && "Incompatible size with already implemented learnable parameter");
mInputs[i]->resize(std::array<DimSize_t, 1>({mInputs[0]->dims()[1]})); mInputs[i]->resize(std::array<DimSize_t, 1>({mInputs[0]->dims()[1]}));
} }
} }
...@@ -181,4 +180,4 @@ template <> ...@@ -181,4 +180,4 @@ template <>
const char *const EnumStrings<Aidge::BatchNormParam>::data[] = { "Epsilon", "Momentum" }; const char *const EnumStrings<Aidge::BatchNormParam>::data[] = { "Epsilon", "Momentum" };
} }
#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_ #endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
\ No newline at end of file
...@@ -216,8 +216,14 @@ inline std::shared_ptr<Node> Conv( ...@@ -216,8 +216,14 @@ inline std::shared_ptr<Node> Conv(
namespace { namespace {
template <> template <>
const char *const EnumStrings<Aidge::ConvParam>::data[] = {"StrideDims", "DilationDims", "InChannels", "OutChannels", const char *const EnumStrings<Aidge::ConvParam>::data[] = {
"KernelDims", "PaddingDims"}; "StrideDims",
"DilationDims",
"InChannels",
"OutChannels",
"KernelDims",
"PaddingDims"
};
} }
#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */ #endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
...@@ -27,29 +27,29 @@ ...@@ -27,29 +27,29 @@
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
namespace Aidge { namespace Aidge {
enum class MatmulParam { OutChannels }; enum class MatMulParam { OutChannels };
class Matmul_Op : public Operator, class MatMul_Op : public Operator,
public Registrable<Matmul_Op, public Registrable<MatMul_Op,
std::string, std::string,
std::unique_ptr<OperatorImpl>(const Matmul_Op &)>, std::unique_ptr<OperatorImpl>(const MatMul_Op &)>,
public Parameterizable<MatmulParam, DimSize_t> { public Parameterizable<MatMulParam, DimSize_t> {
public: public:
std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()}; std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>(); const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public: public:
static constexpr const char* Type = "Matmul"; static constexpr const char* Type = "MatMul";
Matmul_Op() = delete; MatMul_Op() = delete;
using Parameterizable_ = Parameterizable<MatmulParam, DimSize_t>; using Parameterizable_ = Parameterizable<MatMulParam, DimSize_t>;
template <MatmulParam e> using param = typename Parameterizable_::template param<e>; template <MatMulParam e> using param = typename Parameterizable_::template param<e>;
Matmul_Op(DimSize_t out_channels) MatMul_Op(DimSize_t out_channels)
: Operator(Type), : Operator(Type),
Parameterizable_( Parameterizable_(
param<MatmulParam::OutChannels>(out_channels)) param<MatMulParam::OutChannels>(out_channels))
{ {
setDatatype(DataType::Float32); setDatatype(DataType::Float32);
} }
...@@ -58,22 +58,22 @@ public: ...@@ -58,22 +58,22 @@ public:
* @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
Matmul_Op(const Matmul_Op& op) MatMul_Op(const MatMul_Op& op)
: Operator(Type), : Operator(Type),
Parameterizable_(op), Parameterizable_(op),
mOutput(std::make_shared<Tensor>(*op.mOutput)) mOutput(std::make_shared<Tensor>(*op.mOutput))
{ {
// cpy-ctor // cpy-ctor
setDatatype(op.mOutput->dataType()); setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Matmul_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr; mImpl = op.mImpl ? Registrar<MatMul_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
} }
/** /**
* @brief Clone the operator using its copy-constructor. * @brief Clone the operator using its copy-constructor.
* @see Operator::Matmul_Op * @see Operator::MatMul_Op
*/ */
std::shared_ptr<Operator> clone() const override { std::shared_ptr<Operator> clone() const override {
return std::make_shared<Matmul_Op>(*this); return std::make_shared<MatMul_Op>(*this);
} }
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
...@@ -85,9 +85,9 @@ public: ...@@ -85,9 +85,9 @@ public:
void computeOutputDims() override final { void computeOutputDims() override final {
if (!mInputs[0]->empty()) { if (!mInputs[0]->empty()) {
// <in_features**, out_channels> // <in_features**, out_channels>
std::array<DimSize_t, 2> weightDims = {static_cast<DimSize_t>(mInputs[0]->size()), this->template get<MatmulParam::OutChannels>()}; std::array<DimSize_t, 2> weightDims = {this->template get<MatMulParam::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
// <out_channels, batch> // <out_channels, batch>
std::array<DimSize_t, 1> outputDims = {this->template get<MatmulParam::OutChannels>()}; std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template get<MatMulParam::OutChannels>()};
mInputs[1]->resize(weightDims); mInputs[1]->resize(weightDims);
mOutput->resize(outputDims); mOutput->resize(outputDims);
...@@ -128,7 +128,7 @@ public: ...@@ -128,7 +128,7 @@ public:
void setBackend(const std::string& name) { void setBackend(const std::string& name) {
mImpl = Registrar<Matmul_Op>::create(name)(*this); mImpl = Registrar<MatMul_Op>::create(name)(*this);
mOutput->setBackend(name); mOutput->setBackend(name);
// FIXME: temporary workaround // FIXME: temporary workaround
...@@ -150,17 +150,17 @@ public: ...@@ -150,17 +150,17 @@ public:
inline IOIndex_t nbOutputs() const noexcept override final { return 1; } inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
}; };
inline std::shared_ptr<Node> Matmul(DimSize_t out_channels, const std::string& name = "") { inline std::shared_ptr<Node> MatMul(DimSize_t out_channels, const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases // FIXME: properly handle default w initialization in every cases
auto matmul = std::make_shared<Node>(std::make_shared<Matmul_Op>(out_channels), name); auto matmul = std::make_shared<Node>(std::make_shared<MatMul_Op>(out_channels), name);
addProducer(matmul, 1, {1, out_channels}, "w"); addProducer(matmul, 1, {out_channels, 1}, "w");
return matmul; return matmul;
} }
} // namespace Aidge } // namespace Aidge
namespace { namespace {
template <> template <>
const char *const EnumStrings<Aidge::MatmulParam>::data[] = {"OutChannels"}; const char *const EnumStrings<Aidge::MatMulParam>::data[] = {"OutChannels"};
} }
#endif /* AIDGE_CORE_OPERATOR__MATMUL_H_ */ #endif /* AIDGE_CORE_OPERATOR__MATMUL_H_ */
...@@ -34,7 +34,7 @@ public: ...@@ -34,7 +34,7 @@ public:
/** /**
* @brief Clone the operator using its copy-constructor. * @brief Clone the operator using its copy-constructor.
* @see Operator::Matmul_Op * @see Operator::MatMul_Op
*/ */
std::shared_ptr<Operator> clone() const override { std::shared_ptr<Operator> clone() const override {
return std::make_shared<MetaOperator>(*this); return std::make_shared<MetaOperator>(*this);
......
...@@ -75,6 +75,16 @@ public: ...@@ -75,6 +75,16 @@ public:
assert(false && "Producer operator takes no input"); assert(false && "Producer operator takes no input");
} }
/**
* @brief Set the Output Tensor of the Producer operator.
* This method will create a copy of the Tensor.
*
* @param newOutput Tensor containing the values to copy
*/
void setOutputTensor(const Tensor& newOutput) {
*mOutput = newOutput;
}
void computeOutputDims() override final {} void computeOutputDims() override final {}
bool outputDimsForwarded() const override final {return true;} bool outputDimsForwarded() const override final {return true;}
...@@ -163,4 +173,4 @@ void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, Dim ...@@ -163,4 +173,4 @@ void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, Dim
} }
} // namespace Aidge } // namespace Aidge
#endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */ #endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
\ No newline at end of file
...@@ -145,11 +145,11 @@ public: ...@@ -145,11 +145,11 @@ public:
assert(false && "parameter not found"); assert(false && "parameter not found");
} }
template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value-1> template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
constexpr typename std::enable_if<(SIZE > 0), R&>::type get(std::size_t i) { constexpr typename std::enable_if<(SIZE > 0), R&>::type get(std::size_t i) {
if (i == SIZE) { if (i == SIZE-1) {
if (std::is_same<R, typename std::tuple_element<SIZE,std::tuple<T...>>::type>::value) { if (std::is_same<R, typename std::tuple_element<SIZE-1,std::tuple<T...>>::type>::value) {
return reinterpret_cast<R&>(std::get<SIZE>(mParams)); return reinterpret_cast<R&>(std::get<SIZE-1>(mParams));
} }
else { else {
assert(false && "wrong parameter type"); assert(false && "wrong parameter type");
...@@ -160,9 +160,10 @@ public: ...@@ -160,9 +160,10 @@ public:
} }
} }
template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value-1> template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
constexpr typename std::enable_if<(SIZE <= 0), R&>::type get(std::size_t i) { [[noreturn]] constexpr typename std::enable_if<(SIZE == 0), R&>::type get(std::size_t /*i*/) {
assert(false && "parameter not found"); assert(false && "parameter not found");
exit(-1);
} }
constexpr const std::tuple<T...>& getParams() const { constexpr const std::tuple<T...>& getParams() const {
......
...@@ -17,11 +17,54 @@ ...@@ -17,11 +17,54 @@
namespace Aidge{ namespace Aidge{
// FUSE MATMUL + ADD -> FC
/**
* @brief Merge ``MatMul`` and :cpp:function:`Aidge::Add` Node into a :cpp:function:`Aidge::FC` Node.
*
* @param nodes Strict set of Node to merge.
*/
void fuseMulAdd(std::set<std::shared_ptr<Node>> nodes); void fuseMulAdd(std::set<std::shared_ptr<Node>> nodes);
/**
* @brief Merge ``MatMul`` and :cpp:function:`Aidge::Add` Node into a :cpp:function:`Aidge::FC` Node.
*
* @param graphView Graph view to use graph matching on, in order to apply transfomrations.
*/
void fuseMulAdd(std::shared_ptr<GraphView> graphView);
// REMOVE FLATTEN + FC -> FC
/**
* @brief Remove ``Flatten`` before :cpp:function:`Aidge::FC` Node.
*
* @param nodes Strict set of Node to merge.
*/
void removeFlatten(std::set<std::shared_ptr<Node>> nodes); void removeFlatten(std::set<std::shared_ptr<Node>> nodes);
/**
* @brief Remove ``Flatten`` before :cpp:function:`Aidge::FC` Node.
*
* @param graphView Graph view to use graph matching on, in order to apply transfomrations.
*/
void removeFlatten(std::shared_ptr<GraphView> graphView);
// FUSE BN + FC || CONV -> FC || CONV
/**
* @brief Fuse :cpp:function:`Aidge::BatchNorm` with :cpp:function:`Aidge::Conv` or :cpp:function:`Aidge::FC` Nodes.
* Ref: https://nenadmarkus.com/p/fusing-batchnorm-and-conv/
*
* @param nodes Strict set of Node to merge.
*/
void fuseBatchNorm(std::set<std::shared_ptr<Node>> nodes);
/**
* @brief Fuse :cpp:function:`Aidge::BatchNorm` with :cpp:function:`Aidge::Conv` or :cpp:function:`Aidge::FC` Nodes.
* Ref: https://nenadmarkus.com/p/fusing-batchnorm-and-conv/
*
* @param graphView Graph view to use graph matching on, in order to apply transfomrations.
*/
void fuseBatchNorm(std::shared_ptr<GraphView> graphView);
} }
#endif /* AIDGE_CORE_UTILS_RECIPIES_H_ */
#endif /* AIDGE_CORE_UTILS_RECIPIES_H_ */
\ No newline at end of file
...@@ -34,6 +34,7 @@ public: ...@@ -34,6 +34,7 @@ public:
static std::map<Key, std::function<Func>>& registry() static std::map<Key, std::function<Func>>& registry()
{ {
#ifdef PYBIND #ifdef PYBIND
#define _CRT_SECURE_NO_WARNINGS
if (std::getenv("AIDGE_CORE_WITH_PYBIND")){ if (std::getenv("AIDGE_CORE_WITH_PYBIND")){
std::string name = std::string("registrar_")+typeid(Registrable<DerivedClass, Key, Func>).name(); std::string name = std::string("registrar_")+typeid(Registrable<DerivedClass, Key, Func>).name();
static auto shared_data = reinterpret_cast<std::map<Key, std::function<Func>> *>(py::get_shared_data(name)); static auto shared_data = reinterpret_cast<std::map<Key, std::function<Func>> *>(py::get_shared_data(name));
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_UTILS_TENSOR_UTILS_H_
#define AIDGE_CORE_UTILS_TENSOR_UTILS_H_
#include <cmath> // std::abs
#include "aidge/data/Tensor.hpp"
/**
* @brief Compare two :cpp:class:`Aidge::Tensor` value wise. The comparison function is:
*
* |t1-t2| <= absolute + relative * |t2|
*
* If a tensor value is different from the other tensor return False
* If the tensor does not have the same size, return False
* If the datatype is not the same between each tensor return False
* If the templated type does not correspond to the datatype of each tensor, raise an assertion error
*
* @tparam T should correspond to the type of the tensor, define the type of the absolute and relative error
* @param t1 first :cpp:class:`Aidge::Tensor` to test
* @param t2 second :cpp:class:`Aidge::Tensor` to test
* @param relative relative difference allowed (should be betwen 0 and 1)
* @param absolute absolute error allowed (shoulmd be positive)
* @return true if both tensor are approximately equal and have the datatype, shape. Else return false
*/
template <typename T>
bool approxEq(Aidge::Tensor t1, Aidge::Tensor t2, float relative, float absolute){
assert(t1.dataType() == t2.dataType());
assert(t1.dataType() == NativeType<T>::type);
assert(relative >= 0);
assert(absolute >= 0 && absolute<=1);
if (t1.size() != t2.size()){
return false;
}
for(size_t i; i < t1.size(); ++i){
if (static_cast<float>(std::abs(t1.get<T>(i) - t2.get<T>(i))) > (absolute + (relative * static_cast<float>(std::abs(t2.get<T>(i)))))){
return false;
}
}
return true;
}
#endif /* AIDGE_CORE_UTILS_TENSOR_UTILS_H_s */
...@@ -26,10 +26,10 @@ namespace Aidge { ...@@ -26,10 +26,10 @@ namespace Aidge {
template<typename T> template<typename T>
void addCtor(py::class_<Tensor, void addCtor(py::class_<Tensor,
std::shared_ptr<Tensor>, std::shared_ptr<Tensor>,
Data, Data,
Registrable<Tensor, Registrable<Tensor,
std::tuple<std::string, DataType>, std::tuple<std::string, DataType>,
std::unique_ptr<TensorImpl>(const Tensor&)>>& mTensor){ std::unique_ptr<TensorImpl>(const Tensor&)>>& mTensor){
mTensor.def(py::init([]( py::array_t<T, py::array::c_style | py::array::forcecast> b) { mTensor.def(py::init([]( py::array_t<T, py::array::c_style | py::array::forcecast> b) {
/* Request a buffer descriptor from Python */ /* Request a buffer descriptor from Python */
...@@ -46,24 +46,27 @@ void addCtor(py::class_<Tensor, ...@@ -46,24 +46,27 @@ void addCtor(py::class_<Tensor,
}else{ }else{
printf("Warning : Could not use aidge_cpu backend, verify you have `import aidge_cpu`\n"); printf("Warning : Could not use aidge_cpu backend, verify you have `import aidge_cpu`\n");
} }
return newTensor; return newTensor;
})); }))
.def("__setitem__", (void (Tensor::*)(std::size_t, T)) &Tensor::set)
.def("__setitem__", (void (Tensor::*)(std::vector<std::size_t>, T)) &Tensor::set)
;
} }
void init_Tensor(py::module& m){ void init_Tensor(py::module& m){
py::class_<Registrable<Tensor, py::class_<Registrable<Tensor,
std::tuple<std::string, DataType>, std::tuple<std::string, DataType>,
std::unique_ptr<TensorImpl>(const Tensor&)>, std::unique_ptr<TensorImpl>(const Tensor&)>,
std::shared_ptr<Registrable<Tensor, std::shared_ptr<Registrable<Tensor,
std::tuple<std::string, DataType>, std::tuple<std::string, DataType>,
std::unique_ptr<TensorImpl>(const Tensor&)>>>(m,"TensorRegistrable"); std::unique_ptr<TensorImpl>(const Tensor&)>>>(m,"TensorRegistrable");
py::class_<Tensor, std::shared_ptr<Tensor>, py::class_<Tensor, std::shared_ptr<Tensor>,
Data, Data,
Registrable<Tensor, Registrable<Tensor,
std::tuple<std::string, DataType>, std::tuple<std::string, DataType>,
std::unique_ptr<TensorImpl>(const Tensor&)>> pyClassTensor std::unique_ptr<TensorImpl>(const Tensor&)>> pyClassTensor
(m,"Tensor", py::multiple_inheritance(), py::buffer_protocol()); (m,"Tensor", py::multiple_inheritance(), py::buffer_protocol());
...@@ -74,6 +77,8 @@ void init_Tensor(py::module& m){ ...@@ -74,6 +77,8 @@ void init_Tensor(py::module& m){
.def("size", &Tensor::size) .def("size", &Tensor::size)
.def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&)) &Tensor::resize) .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&)) &Tensor::resize)
.def("has_impl", &Tensor::hasImpl) .def("has_impl", &Tensor::hasImpl)
.def("get_coord", &Tensor::getCoord)
.def("get_idx", &Tensor::getIdx)
.def_static("get_available_backends", &Tensor::getAvailableBackends) .def_static("get_available_backends", &Tensor::getAvailableBackends)
.def("__str__", [](Tensor& b) { .def("__str__", [](Tensor& b) {
return b.toString(); return b.toString();
...@@ -82,15 +87,27 @@ void init_Tensor(py::module& m){ ...@@ -82,15 +87,27 @@ void init_Tensor(py::module& m){
return b.size(); return b.size();
}) })
.def("__getitem__", [](Tensor& b, size_t idx)-> py::object { .def("__getitem__", [](Tensor& b, size_t idx)-> py::object {
// TODO : Should return error if backend not compatible with get
if (idx >= b.size()) throw py::index_error(); if (idx >= b.size()) throw py::index_error();
switch(b.dataType()){ switch(b.dataType()){
case DataType::Float64: case DataType::Float64:
return py::cast(static_cast<double*>(b.getImpl()->rawPtr())[idx]); return py::cast(b.get<double>(idx));
case DataType::Float32:
return py::cast(b.get<float>(idx));
case DataType::Int32:
return py::cast(b.get<int>(idx));
default:
return py::none();
}
})
.def("__getitem__", [](Tensor& b, std::vector<size_t> coordIdx)-> py::object {
if (b.getIdx(coordIdx) >= b.size()) throw py::index_error();
switch(b.dataType()){
case DataType::Float64:
return py::cast(b.get<double>(coordIdx));
case DataType::Float32: case DataType::Float32:
return py::cast(static_cast<float*>(b.getImpl()->rawPtr())[idx]); return py::cast(b.get<float>(coordIdx));
case DataType::Int32: case DataType::Int32:
return py::cast(static_cast<int*>(b.getImpl()->rawPtr())[idx]); return py::cast(b.get<int>(coordIdx));
default: default:
return py::none(); return py::none();
} }
...@@ -126,12 +143,12 @@ void init_Tensor(py::module& m){ ...@@ -126,12 +143,12 @@ void init_Tensor(py::module& m){
} }
return py::buffer_info( return py::buffer_info(
tensorImpl->rawPtr(), /* Pointer to buffer */ tensorImpl->rawPtr(), /* Pointer to buffer */
tensorImpl->scalarSize(), /* Size of one scalar */ tensorImpl->scalarSize(), /* Size of one scalar */
dataFormatDescriptor, /* Python struct-style format descriptor */ dataFormatDescriptor, /* Python struct-style format descriptor */
b.nbDims(), /* Number of dimensions */ b.nbDims(), /* Number of dimensions */
dims, /* Buffer dimensions */ dims, /* Buffer dimensions */
strides /* Strides (in bytes) for each index */ strides /* Strides (in bytes) for each index */
); );
}); });
...@@ -142,6 +159,6 @@ void init_Tensor(py::module& m){ ...@@ -142,6 +159,6 @@ void init_Tensor(py::module& m){
// #if SIZE_MAX != 0xFFFFFFFF // #if SIZE_MAX != 0xFFFFFFFF
addCtor<double>(pyClassTensor); addCtor<double>(pyClassTensor);
// #endif // #endif
} }
} }
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#include <pybind11/pybind11.h> #include <pybind11/pybind11.h>
#include "aidge/operator/Matmul.hpp" #include "aidge/operator/MatMul.hpp"
#include "aidge/utils/Parameter.hpp" #include "aidge/utils/Parameter.hpp"
#include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/Operator.hpp"
...@@ -20,13 +20,13 @@ ...@@ -20,13 +20,13 @@
namespace py = pybind11; namespace py = pybind11;
namespace Aidge { namespace Aidge {
void declare_Matmul(py::module &m) { void declare_MatMul(py::module &m) {
py::class_<Matmul_Op, std::shared_ptr<Matmul_Op>, Operator, PyAbstractParametrizable>(m, "Matmul_Op", py::multiple_inheritance()); py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, Operator, PyAbstractParametrizable>(m, "MatMul_Op", py::multiple_inheritance());
m.def("Matmul", &Matmul, py::arg("out_channels"), py::arg("name") = ""); m.def("MatMul", &MatMul, py::arg("out_channels"), py::arg("name") = "");
} }
void init_Matmul(py::module &m) { void init_MatMul(py::module &m) {
declare_Matmul(m); declare_MatMul(m);
} }
} // namespace Aidge } // namespace Aidge
...@@ -26,18 +26,19 @@ template <DimIdx_t DIM> ...@@ -26,18 +26,19 @@ template <DimIdx_t DIM>
void declare_Producer(py::module &m) { void declare_Producer(py::module &m) {
// m.def(("Producer_" + std::to_string(DIM)+"D").c_str(), py::overload_cast<shared_ptr<Node>&>(&Producer<DIM>), py::arg("dims"), py::arg("name")); // m.def(("Producer_" + std::to_string(DIM)+"D").c_str(), py::overload_cast<shared_ptr<Node>&>(&Producer<DIM>), py::arg("dims"), py::arg("name"));
m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::array<DimSize_t, DIM>&, const std::string&)>(&Producer), py::arg("dims"), py::arg("name") = ""); m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::array<DimSize_t, DIM>&, const std::string&)>(&Producer), py::arg("dims"), py::arg("name") = "");
} }
void init_Producer(py::module &m) { void init_Producer(py::module &m) {
py::class_<Producer_Op, std::shared_ptr<Producer_Op>, Operator>( py::class_<Producer_Op, std::shared_ptr<Producer_Op>, Operator>(
m, m,
"ProducerOp", "ProducerOp",
py::multiple_inheritance()) py::multiple_inheritance())
.def("dims", &Producer_Op::dims); .def("dims", &Producer_Op::dims)
.def("set_output_tensor", &Producer_Op::setOutputTensor);
m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&)>(&Producer), py::arg("tensor"), py::arg("name") = ""); m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&)>(&Producer), py::arg("tensor"), py::arg("name") = "");
declare_Producer<1>(m); declare_Producer<1>(m);
declare_Producer<2>(m); declare_Producer<2>(m);
declare_Producer<3>(m); declare_Producer<3>(m);
......
...@@ -28,7 +28,7 @@ void init_ConvDepthWise(py::module&); ...@@ -28,7 +28,7 @@ void init_ConvDepthWise(py::module&);
void init_FC(py::module&); void init_FC(py::module&);
void init_GenericOperator(py::module&); void init_GenericOperator(py::module&);
void init_LeakyReLU(py::module&); void init_LeakyReLU(py::module&);
void init_Matmul(py::module&); void init_MatMul(py::module&);
void init_MaxPooling(py::module&); void init_MaxPooling(py::module&);
void init_Producer(py::module&); void init_Producer(py::module&);
void init_ReLU(py::module&); void init_ReLU(py::module&);
...@@ -46,7 +46,7 @@ void init_GRegex(py::module&); ...@@ -46,7 +46,7 @@ void init_GRegex(py::module&);
void init_Recipies(py::module&); void init_Recipies(py::module&);
void init_Scheduler(py::module&); void init_Scheduler(py::module&);
void init_TensorUtils(py::module&);
void set_python_flag(){ void set_python_flag(){
// Set an env variable to know if we run with ypthon or cpp // Set an env variable to know if we run with ypthon or cpp
...@@ -75,7 +75,7 @@ void init_Aidge(py::module& m){ ...@@ -75,7 +75,7 @@ void init_Aidge(py::module& m){
init_FC(m); init_FC(m);
init_GenericOperator(m); init_GenericOperator(m);
init_LeakyReLU(m); init_LeakyReLU(m);
init_Matmul(m); init_MatMul(m);
init_MaxPooling(m); init_MaxPooling(m);
init_ReLU(m); init_ReLU(m);
init_Softmax(m); init_Softmax(m);
...@@ -86,6 +86,7 @@ void init_Aidge(py::module& m){ ...@@ -86,6 +86,7 @@ void init_Aidge(py::module& m){
init_GRegex(m); init_GRegex(m);
init_Recipies(m); init_Recipies(m);
init_Scheduler(m); init_Scheduler(m);
init_TensorUtils(m);
} }
PYBIND11_MODULE(aidge_core, m) { PYBIND11_MODULE(aidge_core, m) {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment