Skip to content
Snippets Groups Projects
Commit 55825145 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Merge branch 'main' into clone

parents 53bdb571 46767f41
No related branches found
No related tags found
1 merge request!8GraphView cloning proposal + labelGraph proof of concept
Pipeline #31890 failed
......@@ -34,6 +34,7 @@
#include "aidge/operator/FC.hpp"
#include "aidge/operator/GenericOperator.hpp"
#include "aidge/operator/Matmul.hpp"
#include "aidge/operator/MaxPooling.hpp"
//#include "aidge/operator/MetaOperator.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/operator/Producer.hpp"
......
......@@ -303,7 +303,7 @@ public:
* @param inId Input index.
* @return std::shared_ptr<Node>&
*/
inline NodePtr &getParents(const IOIndex_t inId) {
inline NodePtr &getParent(const IOIndex_t inId) {
assert(inId != gk_IODefaultIndex);
return mParents.at(inId);
}
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_MAXPOOLING_H_
#define AIDGE_CORE_OPERATOR_MAXPOOLING_H_
#include <array>
#include <numeric>
#include <vector>
#include <cmath>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/Parameter.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
enum class MaxPoolingParam { StrideDims, KernelDims, PaddingDims };
template <DimIdx_t DIM>
class MaxPooling_Op : public Operator,
public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
public Parameterizable<MaxPoolingParam,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, (DIM<<1) >> {
private:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char *Type = "MaxPooling";
MaxPooling_Op() = delete;
using Parameterizable_ = Parameterizable<MaxPoolingParam,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, (DIM<<1)> >;
template <MaxPoolingParam e>
using param = typename Parameterizable_::template param<e>;
constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0))
: Operator(Type),
Parameterizable_(param<MaxPoolingParam::StrideDims>(stride_dims),
param<MaxPoolingParam::KernelDims>(kernel_dims),
param<MaxPoolingParam::PaddingDims>(padding_dims)),
mOutput(std::make_shared<Tensor>()) {
setDatatype(DataType::Float32);
}
constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 1 && "operators supports only 3 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
constexpr void computeOutputDims() override final {
if (!mInput->empty()) {
std::array<DimSize_t, DIM + 2> outputDims = {};
for (std::size_t dim = 0; dim < this->template get<MaxPoolingParam::KernelDims>().size() ; ++dim) {
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
std::floor(static_cast<float>(mInput->dims()[dim+2] -
this->template get<MaxPoolingParam::KernelDims>()[dim] +
this->template get<MaxPoolingParam::PaddingDims>()[dim] +
this->template get<MaxPoolingParam::PaddingDims>()[dim+DIM]) /
static_cast<float>(this->template get<MaxPoolingParam::StrideDims>()[dim])));
}
outputDims[1] = mInput->dims()[1];
outputDims[0] = mInput->dims()[0];
mOutput->resize(outputDims);
}
}
bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return *(mInput.get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "MaxPooling Operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return mInput;
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "MaxPooling Operators has only 1 outputs");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInput);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string &name) {
mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType &datatype) {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
mInput->setDatatype(datatype);
}
inline IOIndex_t nbInputs() const noexcept override final { return 1; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
auto avgPool = std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, padding_dims), name);
return avgPool;
}
template <DimSize_t DIM>
inline std::shared_ptr<Node> MaxPooling(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
return MaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::MaxPoolingParam>::data[] = {"StrideDims", "KernelDims", "PaddingDims"};
}
#endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
......@@ -136,6 +136,16 @@ void init_Node(py::module& m) {
:rtype: int
)mydelimiter")
.def("get_parents", &Node::getParents,
R"mydelimiter(
Get parents.
)mydelimiter")
.def("get_children", (std::set<std::shared_ptr<Node>> (Node::*)() const) &Node::getChildren,
R"mydelimiter(
Get children.
)mydelimiter")
.def("__call__", &Node::operator(), py::arg("connectors"));
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifdef PYBIND
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <string>
#include <vector>
#include <array>
#include "aidge/utils/Parameter.hpp"
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/MaxPooling.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/utils/Types.h"
#include "aidge/data/Tensor.hpp"
namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Operator, PyAbstractParametrizable>(
m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
py::multiple_inheritance())
.def(py::init<const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, (DIM<<1)> &>(),
py::arg("kernel_dims"),
py::arg("stride_dims"),
py::arg("padding_dims"));
m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &padding_dims) {
// Lambda function wrapper because PyBind fails to convert const array.
// So we use a vector that we convert in this function to a const DimeSize_t [DIM] array.
if (kernel_dims.size() != DIM) {
throw std::runtime_error("kernel_dims size [" + std::to_string(kernel_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
}
if (stride_dims.size() != DIM) {
throw std::runtime_error("stride_dims size [" + std::to_string(stride_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
}
if (padding_dims.size() != (DIM<<1)) {
throw std::runtime_error("padding_dims size [" + std::to_string(padding_dims.size()) + "] does not match DIM [" + std::to_string(DIM<<1) +"]");
}
DimSize_t tmp_kernel_dims_array[DIM];
for (size_t i = 0; i < DIM; ++i) {
tmp_kernel_dims_array[i] = kernel_dims[i];
}
DimSize_t tmp_stride_dims_array[DIM];
for (size_t i = 0; i < DIM; ++i) {
tmp_stride_dims_array[i] = stride_dims[i];
}
DimSize_t tmp_padding_dims_array[DIM<<1];
for (size_t i = 0; i < (DIM<<1); ++i) {
tmp_padding_dims_array[i] = padding_dims[i];
}
const DimSize_t (&kernel_dims_array)[DIM] = tmp_kernel_dims_array;
const DimSize_t (&stride_dims_array)[DIM] = tmp_stride_dims_array;
const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array;
return MaxPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array));
}, py::arg("kernel_dims"),
py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0));
}
void init_MaxPooling(py::module &m) {
declare_MaxPoolingOp<1>(m);
declare_MaxPoolingOp<2>(m);
declare_MaxPoolingOp<3>(m);
// FIXME:
// m.def("MaxPooling1D", static_cast<NodeAPI(*)(const char*, int, int, int const
// (&)[1])>(&MaxPooling));
}
} // namespace Aidge
#endif
\ No newline at end of file
......@@ -29,6 +29,7 @@ void init_FC(py::module&);
void init_GenericOperator(py::module&);
void init_LeakyReLU(py::module&);
void init_Matmul(py::module&);
void init_MaxPooling(py::module&);
void init_Producer(py::module&);
void init_ReLU(py::module&);
void init_Softmax(py::module&);
......@@ -75,6 +76,7 @@ void init_Aidge(py::module& m){
init_GenericOperator(m);
init_LeakyReLU(m);
init_Matmul(m);
init_MaxPooling(m);
init_ReLU(m);
init_Softmax(m);
......
......@@ -326,7 +326,7 @@ void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnablePara
// add learnable parameters to the graph
if (includeLearnableParam) {
for (IOIndex_t i = node->nbDataInputs(); i < node->nbInputs(); ++i) {
std::shared_ptr<Node> parentNode = node->getParents(static_cast<IOIndex_t>(i));
std::shared_ptr<Node> parentNode = node->getParent(static_cast<IOIndex_t>(i));
if (parentNode) {
parentNode->addView(shared_from_this());
mNodes.insert(parentNode);
......
......@@ -226,7 +226,7 @@ void Aidge::Node::addChild(std::shared_ptr<GraphView> otherView, const IOIndex_t
}
void Aidge::Node::addParent(const std::shared_ptr<Node> other_node, const IOIndex_t inId) {
if (getParents(inId) != nullptr) {
if (getParent(inId) != nullptr) {
printf("Warning, you're replacing a Parent.\n");
}
assert((inId != gk_IODefaultIndex) && (inId < nbInputs()) && "Input index out of bound.");
......
......@@ -59,12 +59,12 @@ void Aidge::fuseMulAdd(std::set<std::shared_ptr<Node>> nodes){
// Step 2 : Branch existing producers & create the others
// link weights & bias
if (matmul->getParents(1)==nullptr) {
matmul->getParents(0)->addChild(fc, 0, 1);
if (matmul->getParent(1)==nullptr) {
matmul->getParent(0)->addChild(fc, 0, 1);
} else {
if (matmul->getParents(0)!=nullptr)
matmul->getParents(0)->addChild(fc, 0, 0);
matmul->getParents(1)->addChild(fc, 0, 1);
if (matmul->getParent(0)!=nullptr)
matmul->getParent(0)->addChild(fc, 0, 0);
matmul->getParent(1)->addChild(fc, 0, 1);
}
(producer_add_bias.first)->addChild(fc,0,2);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment