Skip to content
Snippets Groups Projects
Commit 6878f23d authored by laurent soulier's avatar laurent soulier
Browse files

[MERGE][main]

parents 4aed093f 4b783082
No related branches found
No related tags found
2 merge requests!41Support for any backend storage,!13Refactoring Tensor
......@@ -107,8 +107,8 @@ class test_operator_binding(unittest.TestCase):
class PythonCustomImpl(aidge_core.OperatorImpl):
"""Dummy implementation to test that C++ call python code
"""
def __init__(self):
aidge_core.OperatorImpl.__init__(self) # Recquired to avoid type error !
def __init__(self, op: aidge_core.Operator):
aidge_core.OperatorImpl.__init__(self, op) # Recquired to avoid type error !
self.idx = 0
def forward(self):
......@@ -117,8 +117,8 @@ class test_operator_binding(unittest.TestCase):
self.idx += 1
generic_node = aidge_core.GenericOperator("Relu", 1, 1, 1, name="myReLu")
customImpl = PythonCustomImpl()
generic_op = generic_node.get_operator()
customImpl = PythonCustomImpl(generic_op)
generic_op.forward() # Do nothing, no implementation set
generic_op.set_impl(customImpl)
......
......@@ -18,11 +18,13 @@
#include "aidge/utils/Types.h"
namespace Aidge {
class Operator;
class OperatorImpl {
public:
virtual void forward(){};
virtual void backward(){};
OperatorImpl(const Operator& op);
virtual void forward();
virtual void backward();
/**
* @brief Minimum amount of data from a specific input required by the
......@@ -31,13 +33,13 @@ public:
* @param inputIdx Index of the input analysed.
* @return std::size_t
*/
virtual NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const = 0;
virtual NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const;
// Amount of input data that cannot be overwritten during the execution.
virtual NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const = 0;
virtual NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const;
// Memory required at an output for a given input size.
virtual NbElts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const = 0;
virtual NbElts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const;
/**
* @brief Total amount of consumed data from a specific input.
......@@ -45,7 +47,7 @@ public:
* @param inputIdx Index of the input analysed.
* @return DimSize_t
*/
virtual NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const = 0;
virtual NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const;
/**
* @brief Total amount of produced data ready to be used on a specific output.
......@@ -53,15 +55,20 @@ public:
* @param outputIdx Index of the output analysed.
* @return DimSize_t
*/
virtual NbElts_t getNbProducedData(const IOIndex_t outputIdx) const = 0;
virtual NbElts_t getNbProducedData(const IOIndex_t outputIdx) const;
/**
* @brief Update the Consummer Producer system by simulating the consumption and production of i/o
*
*/
virtual void updateConsummerProducer() = 0;
virtual void updateConsummerProducer();
virtual ~OperatorImpl() = default;
protected:
const Operator &mOp;
std::vector<NbElts_t> mNbConsumedData;
std::vector<NbElts_t> mNbProducedData;
};
} // namespace Aidge
......
......@@ -30,7 +30,8 @@ namespace Aidge
enum class MaxPoolingAttr
{
StrideDims,
KernelDims
KernelDims,
CeilMode
};
template<DimIdx_t DIM>
......@@ -42,7 +43,8 @@ class MaxPooling_Op : public Operator,
public StaticAttributes<
MaxPoolingAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>>
std::array<DimSize_t, DIM>,
bool>
{
private:
// FIXME: change accessibility
......@@ -57,16 +59,19 @@ public:
using Attributes_ = StaticAttributes<
MaxPoolingAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>>;
std::array<DimSize_t, DIM>,
bool>;
template<MaxPoolingAttr e> using attr = typename Attributes_::template attr<e>;
constexpr MaxPooling_Op(
const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1)) :
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1),
bool ceil_mode = false) :
Operator(Type),
Attributes_(
attr<MaxPoolingAttr::StrideDims>(stride_dims),
attr<MaxPoolingAttr::KernelDims>(kernel_dims)),
attr<MaxPoolingAttr::KernelDims>(kernel_dims),
attr<MaxPoolingAttr::CeilMode>(ceil_mode)),
mOutput(std::make_shared<Tensor>())
{
setDatatype(DataType::Float32);
......@@ -114,13 +119,23 @@ public:
{
std::array<DimSize_t, DIM + 2> outputDims = {};
std::function<float(float)> roundingFunction;
if (this->template getAttr<MaxPoolingAttr::CeilMode>())
{
roundingFunction = [](float x) { return std::ceil(x); };
}
else
{
roundingFunction = [](float x) { return std::floor(x); };
}
for (std::size_t dim = 0;
dim < this->template getAttr<MaxPoolingAttr::KernelDims>().size();
++dim)
{
outputDims[dim + 2]
= 1
+ static_cast<DimSize_t>(std::floor(
+ static_cast<DimSize_t>(roundingFunction(
static_cast<float>(
mInput->dims()[dim + 2]
- this->template getAttr<MaxPoolingAttr::KernelDims>()[dim])
......@@ -219,14 +234,15 @@ template<std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> MaxPooling(
const std::array<DimSize_t, DIM> &kernel_dims,
const std::string &name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1))
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1),
bool ceil_mode = false)
{
static_assert(
DIM <= kMaxDim,
DIM <= MaxDim,
"Too many kernel dimensions required by MaxPooling, not supported");
return std::make_shared<Node>(
std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(
kernel_dims, stride_dims),
kernel_dims, stride_dims, ceil_mode),
name);
}
......@@ -236,12 +252,13 @@ template<DimSize_t DIM>
inline std::shared_ptr<Node> MaxPooling(
DimSize_t const (&kernel_dims)[DIM],
const std::string &name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1))
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1),
bool ceil_mode = false)
{
static_assert(
DIM <= kMaxDim,
DIM <= MaxDim,
"Too many kernel dimensions required by MaxPooling, not supported");
return MaxPooling(to_array(kernel_dims), name, stride_dims);
return MaxPooling(to_array(kernel_dims), name, stride_dims, ceil_mode);
}
} // namespace Aidge
......@@ -249,7 +266,7 @@ namespace
{
template<>
const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[]
= {"StrideDims", "KernelDims"};
= {"StrideDims", "KernelDims", "CeilMode"};
}
#endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
......@@ -115,11 +115,12 @@ template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0))
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
bool ceil_mode = false)
{
auto graph = Sequential({
Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
MaxPooling(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims)
MaxPooling(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims, ceil_mode)
});
return MetaOperator("PaddedMaxPooling", graph, name);
......@@ -131,9 +132,10 @@ inline std::shared_ptr<Node> PaddedMaxPooling(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0))
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
bool ceil_mode= false)
{
return PaddedMaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
return PaddedMaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims, ceil_mode);
}
} // namespace Aidge
......
......@@ -12,6 +12,7 @@
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "aidge/operator/Operator.hpp"
#include "aidge/backend/OperatorImpl.hpp"
namespace py = pybind11;
......@@ -22,8 +23,8 @@ namespace Aidge {
*
*/
class pyOperatorImpl: public OperatorImpl {
public:
pyOperatorImpl(){}
public:
using OperatorImpl::OperatorImpl; // Inherit constructors
void forward() override {
PYBIND11_OVERRIDE(
......@@ -42,7 +43,7 @@ class pyOperatorImpl: public OperatorImpl {
);
}
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override {
PYBIND11_OVERRIDE_PURE_NAME(
PYBIND11_OVERRIDE_NAME(
NbElts_t,
OperatorImpl,
"get_nb_required_data",
......@@ -51,7 +52,7 @@ class pyOperatorImpl: public OperatorImpl {
);
}
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override {
PYBIND11_OVERRIDE_PURE_NAME(
PYBIND11_OVERRIDE_NAME(
NbElts_t,
OperatorImpl,
"get_nb_required_protected",
......@@ -62,7 +63,7 @@ class pyOperatorImpl: public OperatorImpl {
}
NbElts_t getRequiredMemory(const IOIndex_t outputIdx,
const std::vector<DimSize_t> &inputsSize) const override {
PYBIND11_OVERRIDE_PURE_NAME(
PYBIND11_OVERRIDE_NAME(
NbElts_t,
OperatorImpl,
"get_required_memory",
......@@ -73,7 +74,7 @@ class pyOperatorImpl: public OperatorImpl {
);
}
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override {
PYBIND11_OVERRIDE_PURE_NAME(
PYBIND11_OVERRIDE_NAME(
NbElts_t,
OperatorImpl,
"get_nb_consumed_data",
......@@ -83,7 +84,7 @@ class pyOperatorImpl: public OperatorImpl {
);
}
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override {
PYBIND11_OVERRIDE_PURE_NAME(
PYBIND11_OVERRIDE_NAME(
NbElts_t,
OperatorImpl,
"get_nb_produced_data",
......@@ -93,7 +94,7 @@ class pyOperatorImpl: public OperatorImpl {
);
}
void updateConsummerProducer() override {
PYBIND11_OVERRIDE_PURE_NAME(
PYBIND11_OVERRIDE_NAME(
void,
OperatorImpl,
"update_consummer_producer",
......@@ -106,7 +107,7 @@ class pyOperatorImpl: public OperatorImpl {
void init_OperatorImpl(py::module& m){
py::class_<OperatorImpl, std::shared_ptr<OperatorImpl>, pyOperatorImpl>(m, "OperatorImpl", py::dynamic_attr())
.def(py::init<>())
.def(py::init<const Operator&>())
.def("forward", &OperatorImpl::forward)
.def("backward", &OperatorImpl::backward)
.def("get_nb_required_data", &OperatorImpl::getNbRequiredData)
......
......@@ -30,22 +30,26 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
py::multiple_inheritance())
.def(py::init<const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &>(),
const std::array<DimSize_t, DIM> &,
bool>(),
py::arg("kernel_dims"),
py::arg("stride_dims"))
py::arg("stride_dims"),
py::arg("ceil_mode"))
.def("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
.def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName);
m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::vector<DimSize_t> &stride_dims) {
const std::vector<DimSize_t> &stride_dims,
bool ceil_mode) {
AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
return MaxPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()));
return MaxPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), ceil_mode);
}, py::arg("kernel_dims"),
py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1));
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("ceil_mode") = false);
}
......@@ -55,8 +59,5 @@ void init_MaxPooling(py::module &m) {
declare_MaxPoolingOp<2>(m);
declare_MaxPoolingOp<3>(m);
// FIXME:
// m.def("MaxPooling1D", static_cast<NodeAPI(*)(const char*, int, int, int const
// (&)[1])>(&MaxPooling));
}
} // namespace Aidge
......@@ -28,7 +28,7 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
m.def(("PaddedConv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels,
DimSize_t out_channels,
const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::string& name,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &padding_dims,
const std::vector<DimSize_t> &dilation_dims)
......@@ -50,7 +50,7 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
m.def(("PaddedConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::string& name,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &padding_dims,
const std::vector<DimSize_t> &dilation_dims)
......@@ -66,12 +66,12 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
}
template <DimIdx_t DIM> void declare_PaddedAvgPoolingOp(py::module &m) {
m.def(("PaddedAvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::string& name,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &padding_dims)
{
......@@ -84,25 +84,27 @@ template <DimIdx_t DIM> void declare_PaddedAvgPoolingOp(py::module &m) {
py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0));
}
template <DimIdx_t DIM> void declare_PaddedMaxPoolingOp(py::module &m) {
m.def(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::string& name,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &padding_dims)
const std::vector<DimSize_t> &padding_dims,
bool ceil_mode)
{
AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [%ld] does not match DIM [%d]", padding_dims.size(), 2*DIM);
return PaddedMaxPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()));
return PaddedMaxPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), ceil_mode);
}, py::arg("kernel_dims"),
py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0));
py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
py::arg("ceil_mode") = false);
}
void init_MetaOperatorDefs(py::module &m) {
......@@ -118,9 +120,7 @@ void init_MetaOperatorDefs(py::module &m) {
declare_PaddedMaxPoolingOp<1>(m);
declare_PaddedMaxPoolingOp<2>(m);
declare_PaddedMaxPoolingOp<3>(m);
// FIXME:
// m.def("Conv1D", static_cast<NodeAPI(*)(const char*, int, int, int const
// (&)[1])>(&Conv));
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <cassert>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
Aidge::OperatorImpl::OperatorImpl(const Operator& op):
mOp(op),
mNbConsumedData(mOp.nbInputs(), 0),
mNbProducedData(mOp.nbOutputs(), 0)
{
//ctor
}
Aidge::NbElts_t Aidge::OperatorImpl::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
assert(mOp.getInput(inputIdx) && "requires valid input");
// Requires the whole tensor by default
return std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->size();
}
Aidge::NbElts_t Aidge::OperatorImpl::getNbRequiredProtected(IOIndex_t inputIdx) const {
assert(mOp.getInput(inputIdx) && "requires valid input");
// Protect the whole tensor by default
return std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->size();
}
Aidge::NbElts_t Aidge::OperatorImpl::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
assert(mOp.getOutput(outputIdx) && "requires valid output");
// Requires the whole tensor by default, regardless of available data on inputs
return std::static_pointer_cast<Tensor>(mOp.getOutput(outputIdx))->size();
}
Aidge::NbElts_t Aidge::OperatorImpl::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
assert(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size());
return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
}
Aidge::NbElts_t Aidge::OperatorImpl::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
assert(static_cast<std::size_t>(outputIdx) < mNbProducedData.size());
return mNbProducedData[static_cast<std::size_t>(outputIdx)];
}
void Aidge::OperatorImpl::updateConsummerProducer(){
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx) {
// each input is consumed by the minimum amount for a forward pass
mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));
}
for (std::size_t outputIdx = 0; outputIdx < mNbProducedData.size(); ++outputIdx) {
mNbProducedData[outputIdx] += getRequiredMemory(outputIdx, {});
}
}
void Aidge::OperatorImpl::forward() {
AIDGE_THROW_OR_ABORT(std::runtime_error, "forward() not implemented");
}
void Aidge::OperatorImpl::backward() {
AIDGE_THROW_OR_ABORT(std::runtime_error, "backward() not implemented");
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment