Skip to content
Snippets Groups Projects
Commit 86d866d4 authored by vincent  lorrain's avatar vincent lorrain
Browse files

Merge remote-tracking branch 'origin/main' into graphRegex

parents c2f55dc6 51af88a0
No related branches found
No related tags found
1 merge request!14Graph regex
Pipeline #32148 failed
Showing
with 683 additions and 49 deletions
...@@ -12,6 +12,7 @@ build:ubuntu_cpp: ...@@ -12,6 +12,7 @@ build:ubuntu_cpp:
- make -j4 all install - make -j4 all install
artifacts: artifacts:
expire_in: 1 week
paths: paths:
- build_cpp/ - build_cpp/
- install_cpp/ - install_cpp/
...@@ -29,6 +30,7 @@ build:ubuntu_python: ...@@ -29,6 +30,7 @@ build:ubuntu_python:
- export AIDGE_INSTALL=`pwd`/install - export AIDGE_INSTALL=`pwd`/install
- python3 -m pip install . - python3 -m pip install .
artifacts: artifacts:
expire_in: 1 week
paths: paths:
- venv/ - venv/
...@@ -57,6 +59,7 @@ build:windows_cpp: ...@@ -57,6 +59,7 @@ build:windows_cpp:
- cmake --install . --config Debug - cmake --install . --config Debug
artifacts: artifacts:
expire_in: 1 week
paths: paths:
- build_cpp/ - build_cpp/
- install_cpp/ - install_cpp/
...@@ -52,9 +52,9 @@ target_include_directories(${module_name} ...@@ -52,9 +52,9 @@ target_include_directories(${module_name}
) )
# PYTHON BINDING # PYTHON BINDING
generate_python_binding(${project} ${module_name})
if (PYBIND) if (PYBIND)
generate_python_binding(${project} ${module_name})
# Handles Python + pybind11 headers dependencies # Handles Python + pybind11 headers dependencies
target_link_libraries(${module_name} target_link_libraries(${module_name}
PUBLIC PUBLIC
...@@ -66,22 +66,12 @@ endif() ...@@ -66,22 +66,12 @@ endif()
target_compile_features(${module_name} PRIVATE cxx_std_14) target_compile_features(${module_name} PRIVATE cxx_std_14)
target_compile_options(${module_name} PRIVATE
if(WERROR)
target_compile_options(${module_name} PRIVATE
$<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>: $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
-Wall -Wextra -fPIC -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow -Werror>) -Wall -Wextra -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow $<$<BOOL:${WERROR}>:-Werror>>)
target_compile_options(${module_name} PRIVATE target_compile_options(${module_name} PRIVATE
$<$<CXX_COMPILER_ID:MSVC>: $<$<CXX_COMPILER_ID:MSVC>:
/W4>) /W4>)
else()
target_compile_options(${module_name} PRIVATE
$<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
-Wall -Wextra -fPIC -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow -Wpedantic>)
target_compile_options(${module_name} PRIVATE
$<$<CXX_COMPILER_ID:MSVC>:
/W4>)
endif()
if(CMAKE_COMPILER_IS_GNUCXX AND COVERAGE) if(CMAKE_COMPILER_IS_GNUCXX AND COVERAGE)
append_coverage_compiler_flags() append_coverage_compiler_flags()
......
function(generate_python_binding name target_to_bind) function(generate_python_binding name target_to_bind)
if (PYBIND) add_definitions(-DPYBIND)
add_definitions(-DPYBIND) Include(FetchContent)
Include(FetchContent)
FetchContent_Declare( FetchContent_Declare(
PyBind11 PyBind11
GIT_REPOSITORY https://github.com/pybind/pybind11.git GIT_REPOSITORY https://github.com/pybind/pybind11.git
GIT_TAG v2.10.4 # or a later release GIT_TAG v2.10.4 # or a later release
) )
# Use the New FindPython mode, recommanded. Requires CMake 3.15+ # Use the New FindPython mode, recommanded. Requires CMake 3.15+
find_package(Python COMPONENTS Interpreter Development) find_package(Python COMPONENTS Interpreter Development)
FetchContent_MakeAvailable(PyBind11) FetchContent_MakeAvailable(PyBind11)
message(STATUS "Creating binding for module ${name}") message(STATUS "Creating binding for module ${name}")
file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp") file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp")
pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install
target_include_directories(${name} PUBLIC "python_binding") target_include_directories(${name} PUBLIC "python_binding")
target_link_libraries(${name} PUBLIC ${target_to_bind}) target_link_libraries(${name} PUBLIC ${target_to_bind})
endif()
endfunction() endfunction()
...@@ -34,11 +34,13 @@ ...@@ -34,11 +34,13 @@
#include "aidge/operator/FC.hpp" #include "aidge/operator/FC.hpp"
#include "aidge/operator/GenericOperator.hpp" #include "aidge/operator/GenericOperator.hpp"
#include "aidge/operator/Matmul.hpp" #include "aidge/operator/Matmul.hpp"
#include "aidge/operator/MetaOperator.hpp" #include "aidge/operator/MaxPooling.hpp"
//#include "aidge/operator/MetaOperator.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/Operator.hpp"
#include "aidge/operator/Producer.hpp" #include "aidge/operator/Producer.hpp"
#include "aidge/operator/ReLU.hpp" #include "aidge/operator/ReLU.hpp"
#include "aidge/operator/Softmax.hpp" #include "aidge/operator/Softmax.hpp"
#include "aidge/operator/Scaling.hpp"
#include "aidge/scheduler/Scheduler.hpp" #include "aidge/scheduler/Scheduler.hpp"
#include "aidge/utils/CParameter.hpp" #include "aidge/utils/CParameter.hpp"
#include "aidge/utils/Parameter.hpp" #include "aidge/utils/Parameter.hpp"
......
...@@ -20,7 +20,7 @@ namespace Aidge { ...@@ -20,7 +20,7 @@ namespace Aidge {
class OperatorImpl { class OperatorImpl {
public: public:
virtual void forward(){}; virtual void forward(){};
virtual void backward() {} virtual void backward(){};
/** /**
* @brief Minimum amount of data from a specific input required by the * @brief Minimum amount of data from a specific input required by the
...@@ -46,13 +46,19 @@ public: ...@@ -46,13 +46,19 @@ public:
virtual NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const = 0; virtual NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const = 0;
/** /**
* @brief TOtal amount of produced data ready to be used on a specific output. * @brief Total amount of produced data ready to be used on a specific output.
* *
* @param outputIdx Index of the output analysed. * @param outputIdx Index of the output analysed.
* @return DimSize_t * @return DimSize_t
*/ */
virtual NbElts_t getNbProducedData(const IOIndex_t outputIdx) const = 0; virtual NbElts_t getNbProducedData(const IOIndex_t outputIdx) const = 0;
/**
* @brief Update the Consummer Producer system by simulating the consumption and production of i/o
*
*/
virtual void updateConsummerProducer() = 0;
virtual ~OperatorImpl() = default; virtual ~OperatorImpl() = default;
}; };
} // namespace Aidge } // namespace Aidge
......
...@@ -303,7 +303,7 @@ public: ...@@ -303,7 +303,7 @@ public:
* @param inId Input index. * @param inId Input index.
* @return std::shared_ptr<Node>& * @return std::shared_ptr<Node>&
*/ */
inline NodePtr &getParents(const IOIndex_t inId) { inline NodePtr &getParent(const IOIndex_t inId) {
assert(inId != gk_IODefaultIndex); assert(inId != gk_IODefaultIndex);
return mParents.at(inId); return mParents.at(inId);
} }
......
...@@ -55,7 +55,7 @@ public: ...@@ -55,7 +55,7 @@ public:
* @param inputs List of Node and GraphView to link sequentially. * @param inputs List of Node and GraphView to link sequentially.
* @return std::shared_ptr<GraphView> Pointer to the generated view. * @return std::shared_ptr<GraphView> Pointer to the generated view.
*/ */
std::shared_ptr<GraphView> Sequential(std::initializer_list<OpArgs> inputs); std::shared_ptr<GraphView> Sequential(std::vector<OpArgs> inputs);
///////////////////////////// /////////////////////////////
// Parallel // Parallel
...@@ -65,7 +65,7 @@ std::shared_ptr<GraphView> Sequential(std::initializer_list<OpArgs> inputs); ...@@ -65,7 +65,7 @@ std::shared_ptr<GraphView> Sequential(std::initializer_list<OpArgs> inputs);
* @param inputs List of Node and GraphView to link sequentially. * @param inputs List of Node and GraphView to link sequentially.
* @return std::shared_ptr<GraphView> pointer to the generated view. * @return std::shared_ptr<GraphView> pointer to the generated view.
*/ */
std::shared_ptr<GraphView> Parallel(std::initializer_list<OpArgs> inputs); std::shared_ptr<GraphView> Parallel(std::vector<OpArgs> inputs);
///////////////////////////// /////////////////////////////
// Residual // Residual
...@@ -79,8 +79,8 @@ std::shared_ptr<GraphView> Parallel(std::initializer_list<OpArgs> inputs); ...@@ -79,8 +79,8 @@ std::shared_ptr<GraphView> Parallel(std::initializer_list<OpArgs> inputs);
* @param inputs List of Node and GraphView to link sequentially. * @param inputs List of Node and GraphView to link sequentially.
* @return std::shared_ptr<GraphView> pointer to the generated view. * @return std::shared_ptr<GraphView> pointer to the generated view.
*/ */
std::shared_ptr<GraphView> Residual(std::initializer_list<OpArgs> inputs); std::shared_ptr<GraphView> Residual(std::vector<OpArgs> inputs);
} }
#endif /* AIDGE_CORE_GRAPH_OPARGS_H_ */ #endif /* AIDGE_CORE_GRAPH_OPARGS_H_ */
\ No newline at end of file
/**
* \file execTime.hpp
* \brief execTime structure
* \version file 1.0.0
* \date Creation 27 June 2023
* \date 27 June 2023
* \par ChangeLog
* \par
* v1.0.0, 27 June 2023<br>
* - Initial version.
* \author mn271187, ik243221
* \copyright
* Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
* rights reserved.
*/
#ifndef execTime_H_
#define execTime_H_
#include "aidge/operator/Operator.hpp"
#include "aidge/hook/hook.hpp"
#include <memory>
#include <chrono>
#include <vector>
namespace Aidge {
class ExecTime : public Hook {
private:
std::vector<std::chrono::high_resolution_clock::time_point> registeredTimes = std::vector<std::chrono::high_resolution_clock::time_point>();
public:
ExecTime(const std::shared_ptr<Operator> op) : Hook(op) {}
~ExecTime() = default;
void call() override final {
registeredTimes.push_back(std::chrono::high_resolution_clock::now());
}
static std::shared_ptr<ExecTime> create(const std::shared_ptr<Operator> op)
{
return std::make_shared<ExecTime>(op);
}
std::vector<std::chrono::high_resolution_clock::time_point> getTimes() {
return registeredTimes;
}
std::chrono::high_resolution_clock::time_point getTime(size_t idx) {
return registeredTimes[idx];
}
};
namespace {
static Registrar<Hook> registrarHook_ExecTime({"execution_time"}, Aidge::ExecTime::create);
}
}
#endif /* execTime_H_ */
\ No newline at end of file
/**
* \file Hook.hpp
* \brief Hook structure
* \version file 1.0.0
* \date Creation 27 June 2023
* \date 27 June 2023
* \par ChangeLog
* \par
* v1.0.0, 27 June 2023<br>
* - Initial version.
* \author mn271187, ik243221
* \copyright
* Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
* rights reserved.
*/
#ifndef Hook_H_
#define Hook_H_
#include "aidge/utils/Parameter.hpp"
#include "aidge/utils/Registrar.hpp"
#include <memory>
namespace Aidge {
class Operator;
class Hook : public Registrable<Hook, std::tuple<std::string>, std::shared_ptr<Hook>(const std::shared_ptr<Operator>)> {
//class Hook : public Registrable<Hook, std::tuple<std::string>, std::shared_ptr<Hook>(const std::shared_ptr<Operator>)>{
protected:
const std::shared_ptr<Operator> mOperator;
public:
Hook(std::shared_ptr<Operator> op) : mOperator(op) {}
virtual ~Hook();
virtual void call() = 0;
};
}
#endif /* Hook_H_ */
\ No newline at end of file
/**
* \file execTime.hpp
* \brief execTime structure
* \version file 1.0.0
* \date Creation 27 June 2023
* \date 27 June 2023
* \par ChangeLog
* \par
* v1.0.0, 27 June 2023<br>
* - Initial version.
* \author ik243221
* \copyright
* Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
* rights reserved.
*/
#ifndef AIDGE_CORE_HOOK_OUTPUTRANGE_H_
#define AIDGE_CORE_HOOK_OUTPUTRANGE_H_
#include "aidge/operator/Operator.hpp"
#include "aidge/hook/hook.hpp"
#include <memory>
#include <chrono>
#include <vector>
#include <cmath>
namespace Aidge {
class OutputRange : public Hook {
private:
std::vector<float> registeredOutputs = std::vector<float>();
public:
OutputRange(const std::shared_ptr<Operator> op) : Hook(op) {}
~OutputRange() = default;
void call() override final {
//std::cout << "call() outputRange hook " << std::endl;
//this assumes there is only 1 output possible
std::shared_ptr<Tensor> tensor = mOperator->getOutput(0);
//tensor->print();
//std::cout << "call() outputRange hook : tensor printed" << std::endl;
float max_value = 0.;
float * casted_tensor = static_cast<float *>(tensor->getImpl()->rawPtr());
//find the absolute max value in the tensor, save it to registered outputs
for(std::size_t i = 0; i < tensor->size(); ++i) {
//std::cout << "call() outputRange hook : casted_tensor[i] = " << casted_tensor[i] << std::endl;
if(std::abs(casted_tensor[i]) > max_value){
max_value = std::abs(casted_tensor[i]);
}
}
//std::cout << "call() outputRange hook : max_value = " << max_value << std::endl;
registeredOutputs.push_back(max_value);
}
static std::shared_ptr<OutputRange> create(const std::shared_ptr<Operator> op)
{
return std::make_shared<OutputRange>(op);
}
std::vector<float> getOutputs() {
return registeredOutputs;
}
float getOutput(size_t idx) {
return registeredOutputs[idx];
}
};
namespace {
static Registrar<Hook> registrarHook_OutputRange({"output_range"}, Aidge::OutputRange::create);
}
}
#endif /* outputRange_H_ */
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_MAXPOOLING_H_
#define AIDGE_CORE_OPERATOR_MAXPOOLING_H_
#include <array>
#include <numeric>
#include <vector>
#include <cmath>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/Parameter.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
enum class MaxPoolingParam { StrideDims, KernelDims, PaddingDims };
template <DimIdx_t DIM>
class MaxPooling_Op : public Operator,
public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
public Parameterizable<MaxPoolingParam,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, (DIM<<1) >> {
private:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char *Type = "MaxPooling";
MaxPooling_Op() = delete;
using Parameterizable_ = Parameterizable<MaxPoolingParam,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, (DIM<<1)> >;
template <MaxPoolingParam e>
using param = typename Parameterizable_::template param<e>;
constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0))
: Operator(Type),
Parameterizable_(param<MaxPoolingParam::StrideDims>(stride_dims),
param<MaxPoolingParam::KernelDims>(kernel_dims),
param<MaxPoolingParam::PaddingDims>(padding_dims)),
mOutput(std::make_shared<Tensor>()) {
setDatatype(DataType::Float32);
}
constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 1 && "operators supports only 3 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
constexpr void computeOutputDims() override final {
if (!mInput->empty()) {
std::array<DimSize_t, DIM + 2> outputDims = {};
for (std::size_t dim = 0; dim < this->template get<MaxPoolingParam::KernelDims>().size() ; ++dim) {
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
std::floor(static_cast<float>(mInput->dims()[dim+2] -
this->template get<MaxPoolingParam::KernelDims>()[dim] +
this->template get<MaxPoolingParam::PaddingDims>()[dim] +
this->template get<MaxPoolingParam::PaddingDims>()[dim+DIM]) /
static_cast<float>(this->template get<MaxPoolingParam::StrideDims>()[dim])));
}
outputDims[1] = mInput->dims()[1];
outputDims[0] = mInput->dims()[0];
mOutput->resize(outputDims);
}
}
bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return *(mInput.get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "MaxPooling Operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return mInput;
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "MaxPooling Operators has only 1 outputs");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInput);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string &name) {
mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType &datatype) {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
mInput->setDatatype(datatype);
}
inline IOIndex_t nbInputs() const noexcept override final { return 1; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
auto avgPool = std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, padding_dims), name);
return avgPool;
}
template <DimSize_t DIM>
inline std::shared_ptr<Node> MaxPooling(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
return MaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::MaxPoolingParam>::data[] = {"StrideDims", "KernelDims", "PaddingDims"};
}
#endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
...@@ -20,12 +20,14 @@ ...@@ -20,12 +20,14 @@
#include "aidge/data/Data.hpp" #include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
#include "aidge/hook/hook.hpp"
namespace Aidge { namespace Aidge {
class Operator : public std::enable_shared_from_this<Operator> { class Operator : public std::enable_shared_from_this<Operator> {
protected: protected:
std::unique_ptr<OperatorImpl> mImpl; // implementation of the operator std::unique_ptr<OperatorImpl> mImpl; // implementation of the operator
std::map<std::string, std::shared_ptr<Hook>> mHooks;
private: private:
std::string mType; std::string mType;
...@@ -48,6 +50,15 @@ public: ...@@ -48,6 +50,15 @@ public:
virtual std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const = 0; virtual std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const = 0;
virtual Tensor& output(const IOIndex_t /*outputIdx*/) const = 0; virtual Tensor& output(const IOIndex_t /*outputIdx*/) const = 0;
std::shared_ptr<Hook> getHook(std::string hookName) {
return mHooks[hookName];
}
void addHook(std::string hookName) {
mHooks.insert(std::pair<std::string, std::shared_ptr<Hook>>(hookName,Registrar<Hook>::create({hookName})(shared_from_this())));
}
void runHooks() const;
/////////////////////////////////////////////////////// ///////////////////////////////////////////////////////
// IMPLEMENTATION // IMPLEMENTATION
/////////////////////////////////////////////////////// ///////////////////////////////////////////////////////
...@@ -78,6 +89,8 @@ public: ...@@ -78,6 +89,8 @@ public:
*/ */
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const; NbElts_t getNbProducedData(const IOIndex_t outputIdx) const;
void updateConsummerProducer();
virtual void forward(); virtual void forward();
virtual void backward(); virtual void backward();
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_CORE_OPERATOR_Scaling_H__
#define __AIDGE_CORE_OPERATOR_Scaling_H__
#include <vector>
#include <memory>
#include "aidge/utils/Parameter.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
enum class ScalingParam {
scalingFactor
};
class Scaling_Op : public Operator,
public Registrable<Scaling_Op, std::string, std::unique_ptr<OperatorImpl>(const Scaling_Op&)>,
public Parameterizable<ScalingParam, float> {
public:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char* Type = "Scaling";
Scaling_Op() = delete;
using Parameterizable_ = Parameterizable<ScalingParam, float>;
template <ScalingParam e> using param = typename Parameterizable_::template param<e>;
Scaling_Op(float scalingFactor)
: Operator(Type),
Parameterizable_(
param<ScalingParam::scalingFactor>(scalingFactor))
{
setDatatype(DataType::Float32);
}
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx == 0 && "operator supports only 1 input");
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
(void) inputIdx; //avoid unused warning
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInput->empty())
mOutput->resize(mInput->dims());
}
bool outputDimsForwarded() const override final {
return !(mOutput->empty());
}
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert((inputIdx == 0) && "Scaling Operator has only 1 input");
(void) inputIdx; // avoid unused warning
return *(mInput.get());
}
inline Tensor& output(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Scaling Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return *(mOutput.get());
}
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert((inputIdx == 0) && "Scaling Operator has only 1 input");
(void) inputIdx; // avoid unused warning
return mInput;
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Scaling Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operator supports only 1 input");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInput);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning;
return mOutput;
}
void setBackend(const std::string& name) {
mImpl = Registrar<Scaling_Op>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType& datatype) {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
mInput->setDatatype(datatype);
}
inline IOIndex_t nbInputs() const noexcept override final { return 1; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
};
inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor), name);
}
}
namespace {
template <>
const char* const EnumStrings<Aidge::ScalingParam>::data[]
= {"scalingFactor"};
}
#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
...@@ -43,6 +43,8 @@ public: ...@@ -43,6 +43,8 @@ public:
}; };
~SequentialScheduler() = default; ~SequentialScheduler() = default;
void generateScheduling(bool verbose = false);
/** /**
* @brief Run the provided Computational Graph with a batch of data * @brief Run the provided Computational Graph with a batch of data
*/ */
...@@ -54,6 +56,15 @@ public: ...@@ -54,6 +56,15 @@ public:
*/ */
void saveSchedulingDiagram(const std::string& fileName) const; void saveSchedulingDiagram(const std::string& fileName) const;
/**
* @brief Return a vector of Node ordered by the order they are called by the scheduler
*
* @return std::vector<std::shared_ptr<Node>>
*/
std::vector<std::shared_ptr<Node>> getStaticScheduling(){
return mStaticSchedule;
}
private: private:
/** /**
* @brief Set of layers receiving an input from currently processing layers * @brief Set of layers receiving an input from currently processing layers
...@@ -63,9 +74,27 @@ private: ...@@ -63,9 +74,27 @@ private:
*/ */
std::set<std::shared_ptr<Node>> getConsumers(const std::set<std::shared_ptr<Node>>& producers) const; std::set<std::shared_ptr<Node>> getConsumers(const std::set<std::shared_ptr<Node>>& producers) const;
/**
* @brief Shared ptr to the scheduled graph view
*
*/
std::shared_ptr<GraphView> mGraphView; std::shared_ptr<GraphView> mGraphView;
/**
* @brief List of SchedulingElement (i.e: Nodes with their computation time)
*
*/
std::vector<SchedulingElement> mScheduling; std::vector<SchedulingElement> mScheduling;
/**
* @brief List of nodes ordered by their
*
*/
std::vector<std::shared_ptr<Node>> mStaticSchedule;
/**
* @brief Number of computation node (i.e: nb nodes != Producer)
*
*/
std::size_t mComputationNumber = 0; // TODO: Check if not inferable from mStaticSchedule
}; };
} // namespace Aidge } // namespace Aidge
#endif /* AIDGE_SCHEDULER_H_ */ #endif /* AIDGE_SCHEDULER_H_ */
\ No newline at end of file
...@@ -136,6 +136,16 @@ void init_Node(py::module& m) { ...@@ -136,6 +136,16 @@ void init_Node(py::module& m) {
:rtype: int :rtype: int
)mydelimiter") )mydelimiter")
.def("get_parents", &Node::getParents,
R"mydelimiter(
Get parents.
)mydelimiter")
.def("get_children", (std::set<std::shared_ptr<Node>> (Node::*)() const) &Node::getChildren,
R"mydelimiter(
Get children.
)mydelimiter")
.def("__call__", &Node::operator(), py::arg("connectors")); .def("__call__", &Node::operator(), py::arg("connectors"));
} }
} // namespace Aidge } // namespace Aidge
...@@ -10,19 +10,20 @@ ...@@ -10,19 +10,20 @@
********************************************************************************/ ********************************************************************************/
#include <pybind11/pybind11.h> #include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "aidge/graph/OpArgs.hpp" #include "aidge/graph/OpArgs.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/graph/GraphView.hpp" #include "aidge/graph/GraphView.hpp"
#include <pybind11/stl.h>
#include <pybind11/complex.h>
#include <pybind11/functional.h>
#include <pybind11/chrono.h>
namespace py = pybind11; namespace py = pybind11;
namespace Aidge { namespace Aidge {
void init_OpArgs(py::module& m){ void init_OpArgs(py::module& m){
py::class_<OpArgs, std::shared_ptr<OpArgs>>(m, "OpArgs") py::class_<OpArgs, std::shared_ptr<OpArgs>>(m, "OpArgs")
.def(py::init<const std::shared_ptr<GraphView>&>(), py::arg("view_"))
.def(py::init<const std::shared_ptr<Node>&>(), py::arg("node_"))
.def("node", &OpArgs::node) .def("node", &OpArgs::node)
.def("view", &OpArgs::view) .def("view", &OpArgs::view)
; ;
......
...@@ -22,7 +22,7 @@ namespace Aidge { ...@@ -22,7 +22,7 @@ namespace Aidge {
void init_GenericOperator(py::module& m) { void init_GenericOperator(py::module& m) {
py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, Operator>(m, "GenericOperatorOp", py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, Operator>(m, "GenericOperatorOp",
py::multiple_inheritance()) py::multiple_inheritance())
.def("get_parameter_type", &GenericOperator_Op::getParameterType) .def("get_parameter_type", &GenericOperator_Op::getParameterType)
.def("get_parameters_name", &GenericOperator_Op::getParametersName) .def("get_parameters_name", &GenericOperator_Op::getParametersName)
.def("add_parameter", &GenericOperator_Op::addParameter<bool>) .def("add_parameter", &GenericOperator_Op::addParameter<bool>)
.def("add_parameter", &GenericOperator_Op::addParameter<int>) .def("add_parameter", &GenericOperator_Op::addParameter<int>)
...@@ -34,10 +34,10 @@ void init_GenericOperator(py::module& m) { ...@@ -34,10 +34,10 @@ void init_GenericOperator(py::module& m) {
.def("add_parameter", &GenericOperator_Op::addParameter<std::vector<std::string>>) .def("add_parameter", &GenericOperator_Op::addParameter<std::vector<std::string>>)
.def("get_parameter", [](GenericOperator_Op& self, std::string key) -> py::object { .def("get_parameter", [](GenericOperator_Op& self, std::string key) -> py::object {
/* /*
This getParameter method returns the good python type without having to have This getParameter method returns the good python type without having to have
prior knowledge of the parameter type. prior knowledge of the parameter type.
*/ */
py::object res = py::none(); py::object res = py::none();
std::string paramType = self.getParameterType(key); std::string paramType = self.getParameterType(key);
if(paramType == typeid(int).name()) if(paramType == typeid(int).name())
res = py::cast(self.getParameter<int>(key)); res = py::cast(self.getParameter<int>(key));
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifdef PYBIND
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <string>
#include <vector>
#include <array>
#include "aidge/utils/Parameter.hpp"
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/MaxPooling.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/utils/Types.h"
#include "aidge/data/Tensor.hpp"
namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Operator, PyAbstractParametrizable>(
m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
py::multiple_inheritance())
.def(py::init<const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, (DIM<<1)> &>(),
py::arg("kernel_dims"),
py::arg("stride_dims"),
py::arg("padding_dims"));
m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &padding_dims) {
// Lambda function wrapper because PyBind fails to convert const array.
// So we use a vector that we convert in this function to a const DimeSize_t [DIM] array.
if (kernel_dims.size() != DIM) {
throw std::runtime_error("kernel_dims size [" + std::to_string(kernel_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
}
if (stride_dims.size() != DIM) {
throw std::runtime_error("stride_dims size [" + std::to_string(stride_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
}
if (padding_dims.size() != (DIM<<1)) {
throw std::runtime_error("padding_dims size [" + std::to_string(padding_dims.size()) + "] does not match DIM [" + std::to_string(DIM<<1) +"]");
}
DimSize_t tmp_kernel_dims_array[DIM];
for (size_t i = 0; i < DIM; ++i) {
tmp_kernel_dims_array[i] = kernel_dims[i];
}
DimSize_t tmp_stride_dims_array[DIM];
for (size_t i = 0; i < DIM; ++i) {
tmp_stride_dims_array[i] = stride_dims[i];
}
DimSize_t tmp_padding_dims_array[DIM<<1];
for (size_t i = 0; i < (DIM<<1); ++i) {
tmp_padding_dims_array[i] = padding_dims[i];
}
const DimSize_t (&kernel_dims_array)[DIM] = tmp_kernel_dims_array;
const DimSize_t (&stride_dims_array)[DIM] = tmp_stride_dims_array;
const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array;
return MaxPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array));
}, py::arg("kernel_dims"),
py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0));
}
void init_MaxPooling(py::module &m) {
declare_MaxPoolingOp<1>(m);
declare_MaxPoolingOp<2>(m);
declare_MaxPoolingOp<3>(m);
// FIXME:
// m.def("MaxPooling1D", static_cast<NodeAPI(*)(const char*, int, int, int const
// (&)[1])>(&MaxPooling));
}
} // namespace Aidge
#endif
\ No newline at end of file
...@@ -29,6 +29,7 @@ void init_FC(py::module&); ...@@ -29,6 +29,7 @@ void init_FC(py::module&);
void init_GenericOperator(py::module&); void init_GenericOperator(py::module&);
void init_LeakyReLU(py::module&); void init_LeakyReLU(py::module&);
void init_Matmul(py::module&); void init_Matmul(py::module&);
void init_MaxPooling(py::module&);
void init_Producer(py::module&); void init_Producer(py::module&);
void init_ReLU(py::module&); void init_ReLU(py::module&);
void init_Softmax(py::module&); void init_Softmax(py::module&);
...@@ -75,6 +76,7 @@ void init_Aidge(py::module& m){ ...@@ -75,6 +76,7 @@ void init_Aidge(py::module& m){
init_GenericOperator(m); init_GenericOperator(m);
init_LeakyReLU(m); init_LeakyReLU(m);
init_Matmul(m); init_Matmul(m);
init_MaxPooling(m);
init_ReLU(m); init_ReLU(m);
init_Softmax(m); init_Softmax(m);
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
********************************************************************************/ ********************************************************************************/
#include <pybind11/pybind11.h> #include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "aidge/scheduler/Scheduler.hpp" #include "aidge/scheduler/Scheduler.hpp"
#include "aidge/graph/GraphView.hpp" #include "aidge/graph/GraphView.hpp"
...@@ -20,6 +21,8 @@ void init_Scheduler(py::module& m){ ...@@ -20,6 +21,8 @@ void init_Scheduler(py::module& m){
.def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view")) .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
.def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("verbose")=false) .def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("verbose")=false)
.def("save_scheduling_diagram", &SequentialScheduler::saveSchedulingDiagram, py::arg("file_name")) .def("save_scheduling_diagram", &SequentialScheduler::saveSchedulingDiagram, py::arg("file_name"))
.def("generate_scheduling", &SequentialScheduler::generateScheduling, py::arg("verbose")=false)
.def("get_static_scheduling", &SequentialScheduler::getStaticScheduling)
; ;
} }
} }
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment