Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • eclipse/aidge/aidge_core
  • hrouis/aidge_core
  • mszczep/aidge_core
  • oantoni/aidge_core
  • cguillon/aidge_core
  • jeromeh/aidge_core
  • axelfarr/aidge_core
  • cmoineau/aidge_core
  • noamzerah/aidge_core
  • lrakotoarivony/aidge_core
  • silvanosky/aidge_core
  • maab05/aidge_core
  • mick94/aidge_core
  • lucaslopez/aidge_core_ll
  • wboussella/aidge_core
  • farnez/aidge_core
  • mnewson/aidge_core
17 results
Show changes
Commits on Source (27)
Showing
with 644 additions and 43 deletions
......@@ -12,6 +12,7 @@ build:ubuntu_cpp:
- make -j4 all install
artifacts:
expire_in: 1 week
paths:
- build_cpp/
- install_cpp/
......@@ -29,6 +30,7 @@ build:ubuntu_python:
- export AIDGE_INSTALL=`pwd`/install
- python3 -m pip install .
artifacts:
expire_in: 1 week
paths:
- venv/
......@@ -57,6 +59,7 @@ build:windows_cpp:
- cmake --install . --config Debug
artifacts:
expire_in: 1 week
paths:
- build_cpp/
- install_cpp/
......@@ -52,9 +52,9 @@ target_include_directories(${module_name}
)
# PYTHON BINDING
generate_python_binding(${project} ${module_name})
if (PYBIND)
generate_python_binding(${project} ${module_name})
# Handles Python + pybind11 headers dependencies
target_link_libraries(${module_name}
PUBLIC
......@@ -66,22 +66,12 @@ endif()
target_compile_features(${module_name} PRIVATE cxx_std_14)
if(WERROR)
target_compile_options(${module_name} PRIVATE
target_compile_options(${module_name} PRIVATE
$<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
-Wall -Wextra -fPIC -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow -Werror>)
target_compile_options(${module_name} PRIVATE
-Wall -Wextra -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow $<$<BOOL:${WERROR}>:-Werror>>)
target_compile_options(${module_name} PRIVATE
$<$<CXX_COMPILER_ID:MSVC>:
/W4>)
else()
target_compile_options(${module_name} PRIVATE
$<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
-Wall -Wextra -fPIC -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow -Wpedantic>)
target_compile_options(${module_name} PRIVATE
$<$<CXX_COMPILER_ID:MSVC>:
/W4>)
endif()
if(CMAKE_COMPILER_IS_GNUCXX AND COVERAGE)
append_coverage_compiler_flags()
......
function(generate_python_binding name target_to_bind)
if (PYBIND)
add_definitions(-DPYBIND)
Include(FetchContent)
function(generate_python_binding name target_to_bind)
add_definitions(-DPYBIND)
Include(FetchContent)
FetchContent_Declare(
PyBind11
GIT_REPOSITORY https://github.com/pybind/pybind11.git
GIT_TAG v2.10.4 # or a later release
)
FetchContent_Declare(
PyBind11
GIT_REPOSITORY https://github.com/pybind/pybind11.git
GIT_TAG v2.10.4 # or a later release
)
# Use the New FindPython mode, recommanded. Requires CMake 3.15+
find_package(Python COMPONENTS Interpreter Development)
FetchContent_MakeAvailable(PyBind11)
# Use the New FindPython mode, recommanded. Requires CMake 3.15+
find_package(Python COMPONENTS Interpreter Development)
FetchContent_MakeAvailable(PyBind11)
message(STATUS "Creating binding for module ${name}")
file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp")
message(STATUS "Creating binding for module ${name}")
file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp")
pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install
target_include_directories(${name} PUBLIC "python_binding")
target_link_libraries(${name} PUBLIC ${target_to_bind})
endif()
pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install
target_include_directories(${name} PUBLIC "python_binding")
target_link_libraries(${name} PUBLIC ${target_to_bind})
endfunction()
......@@ -34,11 +34,13 @@
#include "aidge/operator/FC.hpp"
#include "aidge/operator/GenericOperator.hpp"
#include "aidge/operator/Matmul.hpp"
#include "aidge/operator/MetaOperator.hpp"
#include "aidge/operator/MaxPooling.hpp"
//#include "aidge/operator/MetaOperator.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/operator/ReLU.hpp"
#include "aidge/operator/Softmax.hpp"
#include "aidge/operator/Scaling.hpp"
#include "aidge/scheduler/Scheduler.hpp"
#include "aidge/utils/CParameter.hpp"
#include "aidge/utils/Parameter.hpp"
......
......@@ -303,7 +303,7 @@ public:
* @param inId Input index.
* @return std::shared_ptr<Node>&
*/
inline NodePtr &getParents(const IOIndex_t inId) {
inline NodePtr &getParent(const IOIndex_t inId) {
assert(inId != gk_IODefaultIndex);
return mParents.at(inId);
}
......
/**
* \file execTime.hpp
* \brief execTime structure
* \version file 1.0.0
* \date Creation 27 June 2023
* \date 27 June 2023
* \par ChangeLog
* \par
* v1.0.0, 27 June 2023<br>
* - Initial version.
* \author mn271187, ik243221
* \copyright
* Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
* rights reserved.
*/
#ifndef execTime_H_
#define execTime_H_
#include "aidge/operator/Operator.hpp"
#include "aidge/hook/hook.hpp"
#include <memory>
#include <chrono>
#include <vector>
namespace Aidge {
class ExecTime : public Hook {
private:
std::vector<std::chrono::high_resolution_clock::time_point> registeredTimes = std::vector<std::chrono::high_resolution_clock::time_point>();
public:
ExecTime(const std::shared_ptr<Operator> op) : Hook(op) {}
~ExecTime() = default;
void call() override final {
registeredTimes.push_back(std::chrono::high_resolution_clock::now());
}
static std::shared_ptr<ExecTime> create(const std::shared_ptr<Operator> op)
{
return std::make_shared<ExecTime>(op);
}
std::vector<std::chrono::high_resolution_clock::time_point> getTimes() {
return registeredTimes;
}
std::chrono::high_resolution_clock::time_point getTime(size_t idx) {
return registeredTimes[idx];
}
};
namespace {
static Registrar<Hook> registrarHook_ExecTime({"execution_time"}, Aidge::ExecTime::create);
}
}
#endif /* execTime_H_ */
\ No newline at end of file
/**
* \file Hook.hpp
* \brief Hook structure
* \version file 1.0.0
* \date Creation 27 June 2023
* \date 27 June 2023
* \par ChangeLog
* \par
* v1.0.0, 27 June 2023<br>
* - Initial version.
* \author mn271187, ik243221
* \copyright
* Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
* rights reserved.
*/
#ifndef Hook_H_
#define Hook_H_
#include "aidge/utils/Parameter.hpp"
#include "aidge/utils/Registrar.hpp"
#include <memory>
namespace Aidge {
class Operator;
class Hook : public Registrable<Hook, std::tuple<std::string>, std::shared_ptr<Hook>(const std::shared_ptr<Operator>)> {
//class Hook : public Registrable<Hook, std::tuple<std::string>, std::shared_ptr<Hook>(const std::shared_ptr<Operator>)>{
protected:
const std::shared_ptr<Operator> mOperator;
public:
Hook(std::shared_ptr<Operator> op) : mOperator(op) {}
virtual ~Hook();
virtual void call() = 0;
};
}
#endif /* Hook_H_ */
\ No newline at end of file
/**
* \file execTime.hpp
* \brief execTime structure
* \version file 1.0.0
* \date Creation 27 June 2023
* \date 27 June 2023
* \par ChangeLog
* \par
* v1.0.0, 27 June 2023<br>
* - Initial version.
* \author ik243221
* \copyright
* Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
* rights reserved.
*/
#ifndef AIDGE_CORE_HOOK_OUTPUTRANGE_H_
#define AIDGE_CORE_HOOK_OUTPUTRANGE_H_
#include "aidge/operator/Operator.hpp"
#include "aidge/hook/hook.hpp"
#include <memory>
#include <chrono>
#include <vector>
#include <cmath>
namespace Aidge {
class OutputRange : public Hook {
private:
std::vector<float> registeredOutputs = std::vector<float>();
public:
OutputRange(const std::shared_ptr<Operator> op) : Hook(op) {}
~OutputRange() = default;
void call() override final {
//std::cout << "call() outputRange hook " << std::endl;
//this assumes there is only 1 output possible
std::shared_ptr<Tensor> tensor = mOperator->getOutput(0);
//tensor->print();
//std::cout << "call() outputRange hook : tensor printed" << std::endl;
float max_value = 0.;
float * casted_tensor = static_cast<float *>(tensor->getImpl()->rawPtr());
//find the absolute max value in the tensor, save it to registered outputs
for(std::size_t i = 0; i < tensor->size(); ++i) {
//std::cout << "call() outputRange hook : casted_tensor[i] = " << casted_tensor[i] << std::endl;
if(std::abs(casted_tensor[i]) > max_value){
max_value = std::abs(casted_tensor[i]);
}
}
//std::cout << "call() outputRange hook : max_value = " << max_value << std::endl;
registeredOutputs.push_back(max_value);
}
static std::shared_ptr<OutputRange> create(const std::shared_ptr<Operator> op)
{
return std::make_shared<OutputRange>(op);
}
std::vector<float> getOutputs() {
return registeredOutputs;
}
float getOutput(size_t idx) {
return registeredOutputs[idx];
}
};
namespace {
static Registrar<Hook> registrarHook_OutputRange({"output_range"}, Aidge::OutputRange::create);
}
}
#endif /* outputRange_H_ */
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_MAXPOOLING_H_
#define AIDGE_CORE_OPERATOR_MAXPOOLING_H_
#include <array>
#include <numeric>
#include <vector>
#include <cmath>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/Parameter.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
enum class MaxPoolingParam { StrideDims, KernelDims, PaddingDims };
template <DimIdx_t DIM>
class MaxPooling_Op : public Operator,
public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
public Parameterizable<MaxPoolingParam,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, (DIM<<1) >> {
private:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char *Type = "MaxPooling";
MaxPooling_Op() = delete;
using Parameterizable_ = Parameterizable<MaxPoolingParam,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, (DIM<<1)> >;
template <MaxPoolingParam e>
using param = typename Parameterizable_::template param<e>;
constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0))
: Operator(Type),
Parameterizable_(param<MaxPoolingParam::StrideDims>(stride_dims),
param<MaxPoolingParam::KernelDims>(kernel_dims),
param<MaxPoolingParam::PaddingDims>(padding_dims)),
mOutput(std::make_shared<Tensor>()) {
setDatatype(DataType::Float32);
}
constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 1 && "operators supports only 3 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
constexpr void computeOutputDims() override final {
if (!mInput->empty()) {
std::array<DimSize_t, DIM + 2> outputDims = {};
for (std::size_t dim = 0; dim < this->template get<MaxPoolingParam::KernelDims>().size() ; ++dim) {
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
std::floor(static_cast<float>(mInput->dims()[dim+2] -
this->template get<MaxPoolingParam::KernelDims>()[dim] +
this->template get<MaxPoolingParam::PaddingDims>()[dim] +
this->template get<MaxPoolingParam::PaddingDims>()[dim+DIM]) /
static_cast<float>(this->template get<MaxPoolingParam::StrideDims>()[dim])));
}
outputDims[1] = mInput->dims()[1];
outputDims[0] = mInput->dims()[0];
mOutput->resize(outputDims);
}
}
bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return *(mInput.get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "MaxPooling Operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return mInput;
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "MaxPooling Operators has only 1 outputs");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInput);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string &name) {
mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType &datatype) {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
mInput->setDatatype(datatype);
}
inline IOIndex_t nbInputs() const noexcept override final { return 1; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
auto avgPool = std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, padding_dims), name);
return avgPool;
}
template <DimSize_t DIM>
inline std::shared_ptr<Node> MaxPooling(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
return MaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::MaxPoolingParam>::data[] = {"StrideDims", "KernelDims", "PaddingDims"};
}
#endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
......@@ -20,12 +20,14 @@
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/Types.h"
#include "aidge/hook/hook.hpp"
namespace Aidge {
class Operator : public std::enable_shared_from_this<Operator> {
protected:
std::unique_ptr<OperatorImpl> mImpl; // implementation of the operator
std::map<std::string, std::shared_ptr<Hook>> mHooks;
private:
std::string mType;
......@@ -48,6 +50,15 @@ public:
virtual std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const = 0;
virtual Tensor& output(const IOIndex_t /*outputIdx*/) const = 0;
std::shared_ptr<Hook> getHook(std::string hookName) {
return mHooks[hookName];
}
void addHook(std::string hookName) {
mHooks.insert(std::pair<std::string, std::shared_ptr<Hook>>(hookName,Registrar<Hook>::create({hookName})(shared_from_this())));
}
void runHooks() const;
///////////////////////////////////////////////////////
// IMPLEMENTATION
///////////////////////////////////////////////////////
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_CORE_OPERATOR_Scaling_H__
#define __AIDGE_CORE_OPERATOR_Scaling_H__
#include <vector>
#include <memory>
#include "aidge/utils/Parameter.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
enum class ScalingParam {
scalingFactor
};
class Scaling_Op : public Operator,
public Registrable<Scaling_Op, std::string, std::unique_ptr<OperatorImpl>(const Scaling_Op&)>,
public Parameterizable<ScalingParam, float> {
public:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char* Type = "Scaling";
Scaling_Op() = delete;
using Parameterizable_ = Parameterizable<ScalingParam, float>;
template <ScalingParam e> using param = typename Parameterizable_::template param<e>;
Scaling_Op(float scalingFactor)
: Operator(Type),
Parameterizable_(
param<ScalingParam::scalingFactor>(scalingFactor))
{
setDatatype(DataType::Float32);
}
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx == 0 && "operator supports only 1 input");
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
(void) inputIdx; //avoid unused warning
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInput->empty())
mOutput->resize(mInput->dims());
}
bool outputDimsForwarded() const override final {
return !(mOutput->empty());
}
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert((inputIdx == 0) && "Scaling Operator has only 1 input");
(void) inputIdx; // avoid unused warning
return *(mInput.get());
}
inline Tensor& output(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Scaling Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return *(mOutput.get());
}
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert((inputIdx == 0) && "Scaling Operator has only 1 input");
(void) inputIdx; // avoid unused warning
return mInput;
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Scaling Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operator supports only 1 input");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInput);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning;
return mOutput;
}
void setBackend(const std::string& name) {
mImpl = Registrar<Scaling_Op>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType& datatype) {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
mInput->setDatatype(datatype);
}
inline IOIndex_t nbInputs() const noexcept override final { return 1; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
};
inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor), name);
}
}
namespace {
template <>
const char* const EnumStrings<Aidge::ScalingParam>::data[]
= {"scalingFactor"};
}
#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
......@@ -136,6 +136,16 @@ void init_Node(py::module& m) {
:rtype: int
)mydelimiter")
.def("get_parents", &Node::getParents,
R"mydelimiter(
Get parents.
)mydelimiter")
.def("get_children", (std::set<std::shared_ptr<Node>> (Node::*)() const) &Node::getChildren,
R"mydelimiter(
Get children.
)mydelimiter")
.def("__call__", &Node::operator(), py::arg("connectors"));
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifdef PYBIND
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <string>
#include <vector>
#include <array>
#include "aidge/utils/Parameter.hpp"
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/MaxPooling.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/utils/Types.h"
#include "aidge/data/Tensor.hpp"
namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Operator, PyAbstractParametrizable>(
m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
py::multiple_inheritance())
.def(py::init<const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, (DIM<<1)> &>(),
py::arg("kernel_dims"),
py::arg("stride_dims"),
py::arg("padding_dims"));
m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &padding_dims) {
// Lambda function wrapper because PyBind fails to convert const array.
// So we use a vector that we convert in this function to a const DimeSize_t [DIM] array.
if (kernel_dims.size() != DIM) {
throw std::runtime_error("kernel_dims size [" + std::to_string(kernel_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
}
if (stride_dims.size() != DIM) {
throw std::runtime_error("stride_dims size [" + std::to_string(stride_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
}
if (padding_dims.size() != (DIM<<1)) {
throw std::runtime_error("padding_dims size [" + std::to_string(padding_dims.size()) + "] does not match DIM [" + std::to_string(DIM<<1) +"]");
}
DimSize_t tmp_kernel_dims_array[DIM];
for (size_t i = 0; i < DIM; ++i) {
tmp_kernel_dims_array[i] = kernel_dims[i];
}
DimSize_t tmp_stride_dims_array[DIM];
for (size_t i = 0; i < DIM; ++i) {
tmp_stride_dims_array[i] = stride_dims[i];
}
DimSize_t tmp_padding_dims_array[DIM<<1];
for (size_t i = 0; i < (DIM<<1); ++i) {
tmp_padding_dims_array[i] = padding_dims[i];
}
const DimSize_t (&kernel_dims_array)[DIM] = tmp_kernel_dims_array;
const DimSize_t (&stride_dims_array)[DIM] = tmp_stride_dims_array;
const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array;
return MaxPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array));
}, py::arg("kernel_dims"),
py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0));
}
void init_MaxPooling(py::module &m) {
declare_MaxPoolingOp<1>(m);
declare_MaxPoolingOp<2>(m);
declare_MaxPoolingOp<3>(m);
// FIXME:
// m.def("MaxPooling1D", static_cast<NodeAPI(*)(const char*, int, int, int const
// (&)[1])>(&MaxPooling));
}
} // namespace Aidge
#endif
\ No newline at end of file
......@@ -29,6 +29,7 @@ void init_FC(py::module&);
void init_GenericOperator(py::module&);
void init_LeakyReLU(py::module&);
void init_Matmul(py::module&);
void init_MaxPooling(py::module&);
void init_Producer(py::module&);
void init_ReLU(py::module&);
void init_Softmax(py::module&);
......@@ -75,6 +76,7 @@ void init_Aidge(py::module& m){
init_GenericOperator(m);
init_LeakyReLU(m);
init_Matmul(m);
init_MaxPooling(m);
init_ReLU(m);
init_Softmax(m);
......
......@@ -326,7 +326,7 @@ void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnablePara
// add learnable parameters to the graph
if (includeLearnableParam) {
for (IOIndex_t i = node->nbDataInputs(); i < node->nbInputs(); ++i) {
std::shared_ptr<Node> parentNode = node->getParents(static_cast<IOIndex_t>(i));
std::shared_ptr<Node> parentNode = node->getParent(static_cast<IOIndex_t>(i));
if (parentNode) {
parentNode->addView(shared_from_this());
mNodes.insert(parentNode);
......
......@@ -226,7 +226,7 @@ void Aidge::Node::addChild(std::shared_ptr<GraphView> otherView, const IOIndex_t
}
void Aidge::Node::addParent(const std::shared_ptr<Node> other_node, const IOIndex_t inId) {
if (getParents(inId) != nullptr) {
if (getParent(inId) != nullptr) {
printf("Warning, you're replacing a Parent.\n");
}
assert((inId != gk_IODefaultIndex) && (inId < nbInputs()) && "Input index out of bound.");
......
......@@ -42,6 +42,14 @@ void Aidge::Operator::updateConsummerProducer(){
mImpl->updateConsummerProducer();
}
void Aidge::Operator::forward() { mImpl->forward(); }
void Aidge::Operator::runHooks() const {
for (auto& hook : mHooks) {
hook.second->call();
}
}
void Aidge::Operator::forward() {
mImpl->forward();
runHooks();
}
void Aidge::Operator::backward() { mImpl->backward(); }
......@@ -59,12 +59,12 @@ void Aidge::fuseMulAdd(std::set<std::shared_ptr<Node>> nodes){
// Step 2 : Branch existing producers & create the others
// link weights & bias
if (matmul->getParents(1)==nullptr) {
matmul->getParents(0)->addChild(fc, 0, 1);
if (matmul->getParent(1)==nullptr) {
matmul->getParent(0)->addChild(fc, 0, 1);
} else {
if (matmul->getParents(0)!=nullptr)
matmul->getParents(0)->addChild(fc, 0, 0);
matmul->getParents(1)->addChild(fc, 0, 1);
if (matmul->getParent(0)!=nullptr)
matmul->getParent(0)->addChild(fc, 0, 0);
matmul->getParent(1)->addChild(fc, 0, 1);
}
(producer_add_bias.first)->addChild(fc,0,2);
......