Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • eclipse/aidge/aidge_core
  • hrouis/aidge_core
  • mszczep/aidge_core
  • oantoni/aidge_core
  • cguillon/aidge_core
  • jeromeh/aidge_core
  • axelfarr/aidge_core
  • cmoineau/aidge_core
  • noamzerah/aidge_core
  • lrakotoarivony/aidge_core
  • silvanosky/aidge_core
  • maab05/aidge_core
  • mick94/aidge_core
  • lucaslopez/aidge_core_ll
  • wboussella/aidge_core
  • farnez/aidge_core
  • mnewson/aidge_core
17 results
Show changes
Commits on Source (47)
Showing
with 858 additions and 94 deletions
......@@ -12,6 +12,7 @@ build:ubuntu_cpp:
- make -j4 all install
artifacts:
expire_in: 1 week
paths:
- build_cpp/
- install_cpp/
......@@ -29,6 +30,7 @@ build:ubuntu_python:
- export AIDGE_INSTALL=`pwd`/install
- python3 -m pip install .
artifacts:
expire_in: 1 week
paths:
- venv/
......@@ -57,6 +59,7 @@ build:windows_cpp:
- cmake --install . --config Debug
artifacts:
expire_in: 1 week
paths:
- build_cpp/
- install_cpp/
......@@ -52,9 +52,9 @@ target_include_directories(${module_name}
)
# PYTHON BINDING
generate_python_binding(${project} ${module_name})
if (PYBIND)
generate_python_binding(${project} ${module_name})
# Handles Python + pybind11 headers dependencies
target_link_libraries(${module_name}
PUBLIC
......@@ -66,22 +66,12 @@ endif()
target_compile_features(${module_name} PRIVATE cxx_std_14)
if(WERROR)
target_compile_options(${module_name} PRIVATE
target_compile_options(${module_name} PRIVATE
$<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
-Wall -Wextra -fPIC -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow -Werror>)
target_compile_options(${module_name} PRIVATE
-Wall -Wextra -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow $<$<BOOL:${WERROR}>:-Werror>>)
target_compile_options(${module_name} PRIVATE
$<$<CXX_COMPILER_ID:MSVC>:
/W4>)
else()
target_compile_options(${module_name} PRIVATE
$<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
-Wall -Wextra -fPIC -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow -Wpedantic>)
target_compile_options(${module_name} PRIVATE
$<$<CXX_COMPILER_ID:MSVC>:
/W4>)
endif()
if(CMAKE_COMPILER_IS_GNUCXX AND COVERAGE)
append_coverage_compiler_flags()
......
function(generate_python_binding name target_to_bind)
if (PYBIND)
add_definitions(-DPYBIND)
Include(FetchContent)
function(generate_python_binding name target_to_bind)
add_definitions(-DPYBIND)
Include(FetchContent)
FetchContent_Declare(
PyBind11
GIT_REPOSITORY https://github.com/pybind/pybind11.git
GIT_TAG v2.10.4 # or a later release
)
FetchContent_Declare(
PyBind11
GIT_REPOSITORY https://github.com/pybind/pybind11.git
GIT_TAG v2.10.4 # or a later release
)
# Use the New FindPython mode, recommanded. Requires CMake 3.15+
find_package(Python COMPONENTS Interpreter Development)
FetchContent_MakeAvailable(PyBind11)
# Use the New FindPython mode, recommanded. Requires CMake 3.15+
find_package(Python COMPONENTS Interpreter Development)
FetchContent_MakeAvailable(PyBind11)
message(STATUS "Creating binding for module ${name}")
file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp")
message(STATUS "Creating binding for module ${name}")
file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp")
pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install
target_include_directories(${name} PUBLIC "python_binding")
target_link_libraries(${name} PUBLIC ${target_to_bind})
endif()
pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install
target_include_directories(${name} PUBLIC "python_binding")
target_link_libraries(${name} PUBLIC ${target_to_bind})
endfunction()
......@@ -34,11 +34,13 @@
#include "aidge/operator/FC.hpp"
#include "aidge/operator/GenericOperator.hpp"
#include "aidge/operator/Matmul.hpp"
#include "aidge/operator/MetaOperator.hpp"
#include "aidge/operator/MaxPooling.hpp"
//#include "aidge/operator/MetaOperator.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/operator/ReLU.hpp"
#include "aidge/operator/Softmax.hpp"
#include "aidge/operator/Scaling.hpp"
#include "aidge/scheduler/Scheduler.hpp"
#include "aidge/utils/CParameter.hpp"
#include "aidge/utils/Parameter.hpp"
......
......@@ -320,8 +320,20 @@ public:
void link(std::string name1_inID, std::string name2_outID);
void insert(Node &newNode, Node &inNode, std::initializer_list<Node> outNodes,
IOIndex_t tensorIdx);
/**
* @brief Insert a node (newParentNode) as a parent of the passed node (childNode).
*
* @param childNode Node that gets a new parent.
* @param newParentNode Inserted Node.
* @param childInputTensorIdx Index of the input Tensor for the childNode linked to the inserted Node output.
* @param newParentInputTensorIdx Index of the input Tensor for the newParentNode linked to the former parent of childNode.
* @param newParentOutputTensorIdx Index of the output Tensor for the newParentNode linked to the childNode's input Tensor.
*/
void insertParent(NodePtr childNode,
NodePtr newParentNode,
IOIndex_t childInputTensorIdx,
IOIndex_t newParentInputTensorIdx,
IOIndex_t newParentOutputTensorIdx);
/**
* @brief Replace the current GraphView with the set of given Nodes if possible
......
......@@ -303,7 +303,7 @@ public:
* @param inId Input index.
* @return std::shared_ptr<Node>&
*/
inline NodePtr &getParents(const IOIndex_t inId) {
inline NodePtr &getParent(const IOIndex_t inId) {
assert(inId != gk_IODefaultIndex);
return mParents.at(inId);
}
......
/**
* \file execTime.hpp
* \brief execTime structure
* \version file 1.0.0
* \date Creation 27 June 2023
* \date 27 June 2023
* \par ChangeLog
* \par
* v1.0.0, 27 June 2023<br>
* - Initial version.
* \author mn271187, ik243221
* \copyright
* Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
* rights reserved.
*/
#ifndef execTime_H_
#define execTime_H_
#include "aidge/operator/Operator.hpp"
#include "aidge/hook/hook.hpp"
#include <memory>
#include <chrono>
#include <vector>
namespace Aidge {
class ExecTime : public Hook {
private:
std::vector<std::chrono::high_resolution_clock::time_point> registeredTimes = std::vector<std::chrono::high_resolution_clock::time_point>();
public:
ExecTime(const std::shared_ptr<Operator> op) : Hook(op) {}
~ExecTime() = default;
void call() override final {
registeredTimes.push_back(std::chrono::high_resolution_clock::now());
}
static std::shared_ptr<ExecTime> create(const std::shared_ptr<Operator> op)
{
return std::make_shared<ExecTime>(op);
}
std::vector<std::chrono::high_resolution_clock::time_point> getTimes() {
return registeredTimes;
}
std::chrono::high_resolution_clock::time_point getTime(size_t idx) {
return registeredTimes[idx];
}
};
namespace {
static Registrar<Hook> registrarHook_ExecTime({"execution_time"}, Aidge::ExecTime::create);
}
}
#endif /* execTime_H_ */
\ No newline at end of file
/**
* \file Hook.hpp
* \brief Hook structure
* \version file 1.0.0
* \date Creation 27 June 2023
* \date 27 June 2023
* \par ChangeLog
* \par
* v1.0.0, 27 June 2023<br>
* - Initial version.
* \author mn271187, ik243221
* \copyright
* Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
* rights reserved.
*/
#ifndef Hook_H_
#define Hook_H_
#include "aidge/utils/Parameter.hpp"
#include "aidge/utils/Registrar.hpp"
#include <memory>
namespace Aidge {
class Operator;
class Hook : public Registrable<Hook, std::tuple<std::string>, std::shared_ptr<Hook>(const std::shared_ptr<Operator>)> {
//class Hook : public Registrable<Hook, std::tuple<std::string>, std::shared_ptr<Hook>(const std::shared_ptr<Operator>)>{
protected:
const std::shared_ptr<Operator> mOperator;
public:
Hook(std::shared_ptr<Operator> op) : mOperator(op) {}
virtual ~Hook();
virtual void call() = 0;
};
}
#endif /* Hook_H_ */
\ No newline at end of file
/**
* \file execTime.hpp
* \brief execTime structure
* \version file 1.0.0
* \date Creation 27 June 2023
* \date 27 June 2023
* \par ChangeLog
* \par
* v1.0.0, 27 June 2023<br>
* - Initial version.
* \author ik243221
* \copyright
* Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
* rights reserved.
*/
#ifndef AIDGE_CORE_HOOK_OUTPUTRANGE_H_
#define AIDGE_CORE_HOOK_OUTPUTRANGE_H_
#include "aidge/operator/Operator.hpp"
#include "aidge/hook/hook.hpp"
#include <memory>
#include <chrono>
#include <vector>
#include <cmath>
namespace Aidge {
class OutputRange : public Hook {
private:
std::vector<float> registeredOutputs = std::vector<float>();
public:
OutputRange(const std::shared_ptr<Operator> op) : Hook(op) {}
~OutputRange() = default;
void call() override final {
//std::cout << "call() outputRange hook " << std::endl;
//this assumes there is only 1 output possible
std::shared_ptr<Tensor> tensor = mOperator->getOutput(0);
//tensor->print();
//std::cout << "call() outputRange hook : tensor printed" << std::endl;
float max_value = 0.;
float * casted_tensor = static_cast<float *>(tensor->getImpl()->rawPtr());
//find the absolute max value in the tensor, save it to registered outputs
for(std::size_t i = 0; i < tensor->size(); ++i) {
//std::cout << "call() outputRange hook : casted_tensor[i] = " << casted_tensor[i] << std::endl;
if(std::abs(casted_tensor[i]) > max_value){
max_value = std::abs(casted_tensor[i]);
}
}
//std::cout << "call() outputRange hook : max_value = " << max_value << std::endl;
registeredOutputs.push_back(max_value);
}
static std::shared_ptr<OutputRange> create(const std::shared_ptr<Operator> op)
{
return std::make_shared<OutputRange>(op);
}
std::vector<float> getOutputs() {
return registeredOutputs;
}
float getOutput(size_t idx) {
return registeredOutputs[idx];
}
};
namespace {
static Registrar<Hook> registrarHook_OutputRange({"output_range"}, Aidge::OutputRange::create);
}
}
#endif /* outputRange_H_ */
\ No newline at end of file
......@@ -62,7 +62,12 @@ class GenericOperator_Op
* @return template<class T> The parameter.
*/
template <class T>
T getParameter(std::string const &key) const {
const T& getParameter(std::string const &key) const {
return mParams.Get<const T>(key);
}
template <class T>
T& getParameter(std::string const &key) {
return mParams.Get<T>(key);
}
......@@ -75,8 +80,8 @@ class GenericOperator_Op
/// internal buffer in a new location (previous value is still in memory at
/// its previous location)
template <class T>
void addParameter(std::string const &key, T const &value) {
mParams.Add<T>(key, value);
void addParameter(std::string const &key, T&& value) {
mParams.Add<T>(key, std::forward<T>(value));
}
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_MAXPOOLING_H_
#define AIDGE_CORE_OPERATOR_MAXPOOLING_H_
#include <array>
#include <numeric>
#include <vector>
#include <cmath>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/Parameter.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
enum class MaxPoolingParam { StrideDims, KernelDims, PaddingDims };
template <DimIdx_t DIM>
class MaxPooling_Op : public Operator,
public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
public Parameterizable<MaxPoolingParam,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, (DIM<<1) >> {
private:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char *Type = "MaxPooling";
MaxPooling_Op() = delete;
using Parameterizable_ = Parameterizable<MaxPoolingParam,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, (DIM<<1)> >;
template <MaxPoolingParam e>
using param = typename Parameterizable_::template param<e>;
constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0))
: Operator(Type),
Parameterizable_(param<MaxPoolingParam::StrideDims>(stride_dims),
param<MaxPoolingParam::KernelDims>(kernel_dims),
param<MaxPoolingParam::PaddingDims>(padding_dims)),
mOutput(std::make_shared<Tensor>()) {
setDatatype(DataType::Float32);
}
constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 1 && "operators supports only 3 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
constexpr void computeOutputDims() override final {
if (!mInput->empty()) {
std::array<DimSize_t, DIM + 2> outputDims = {};
for (std::size_t dim = 0; dim < this->template get<MaxPoolingParam::KernelDims>().size() ; ++dim) {
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
std::floor(static_cast<float>(mInput->dims()[dim+2] -
this->template get<MaxPoolingParam::KernelDims>()[dim] +
this->template get<MaxPoolingParam::PaddingDims>()[dim] +
this->template get<MaxPoolingParam::PaddingDims>()[dim+DIM]) /
static_cast<float>(this->template get<MaxPoolingParam::StrideDims>()[dim])));
}
outputDims[1] = mInput->dims()[1];
outputDims[0] = mInput->dims()[0];
mOutput->resize(outputDims);
}
}
bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return *(mInput.get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "MaxPooling Operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return mInput;
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "MaxPooling Operators has only 1 outputs");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInput);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string &name) {
mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType &datatype) {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
mInput->setDatatype(datatype);
}
inline IOIndex_t nbInputs() const noexcept override final { return 1; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
auto avgPool = std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, padding_dims), name);
return avgPool;
}
template <DimSize_t DIM>
inline std::shared_ptr<Node> MaxPooling(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
return MaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::MaxPoolingParam>::data[] = {"StrideDims", "KernelDims", "PaddingDims"};
}
#endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
......@@ -20,12 +20,14 @@
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/Types.h"
#include "aidge/hook/hook.hpp"
namespace Aidge {
class Operator : public std::enable_shared_from_this<Operator> {
protected:
std::unique_ptr<OperatorImpl> mImpl; // implementation of the operator
std::map<std::string, std::shared_ptr<Hook>> mHooks;
private:
std::string mType;
......@@ -48,6 +50,15 @@ public:
virtual std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const = 0;
virtual Tensor& output(const IOIndex_t /*outputIdx*/) const = 0;
std::shared_ptr<Hook> getHook(std::string hookName) {
return mHooks[hookName];
}
void addHook(std::string hookName) {
mHooks.insert(std::pair<std::string, std::shared_ptr<Hook>>(hookName,Registrar<Hook>::create({hookName})(shared_from_this())));
}
void runHooks() const;
///////////////////////////////////////////////////////
// IMPLEMENTATION
///////////////////////////////////////////////////////
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_CORE_OPERATOR_Scaling_H__
#define __AIDGE_CORE_OPERATOR_Scaling_H__
#include <vector>
#include <memory>
#include "aidge/utils/Parameter.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
enum class ScalingParam {
scalingFactor
};
class Scaling_Op : public Operator,
public Registrable<Scaling_Op, std::string, std::unique_ptr<OperatorImpl>(const Scaling_Op&)>,
public Parameterizable<ScalingParam, float> {
public:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char* Type = "Scaling";
Scaling_Op() = delete;
using Parameterizable_ = Parameterizable<ScalingParam, float>;
template <ScalingParam e> using param = typename Parameterizable_::template param<e>;
Scaling_Op(float scalingFactor)
: Operator(Type),
Parameterizable_(
param<ScalingParam::scalingFactor>(scalingFactor))
{
setDatatype(DataType::Float32);
}
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx == 0 && "operator supports only 1 input");
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
(void) inputIdx; //avoid unused warning
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInput->empty())
mOutput->resize(mInput->dims());
}
bool outputDimsForwarded() const override final {
return !(mOutput->empty());
}
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert((inputIdx == 0) && "Scaling Operator has only 1 input");
(void) inputIdx; // avoid unused warning
return *(mInput.get());
}
inline Tensor& output(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Scaling Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return *(mOutput.get());
}
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert((inputIdx == 0) && "Scaling Operator has only 1 input");
(void) inputIdx; // avoid unused warning
return mInput;
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Scaling Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operator supports only 1 input");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInput);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning;
return mOutput;
}
void setBackend(const std::string& name) {
mImpl = Registrar<Scaling_Op>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType& datatype) {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
mInput->setDatatype(datatype);
}
inline IOIndex_t nbInputs() const noexcept override final { return 1; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
};
inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor), name);
}
}
namespace {
template <>
const char* const EnumStrings<Aidge::ScalingParam>::data[]
= {"scalingFactor"};
}
#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_ANY_H_
#define AIDGE_ANY_H_
#include <typeinfo> // typeid
#include <type_traits> // std::enable_if_t, std::decay_t, std::is_same, std::is_copy_constructible, std::remove_cv, std::remove_reference
#include <assert.h>
#include <new>
class _any {
private:
/// @brief Operation to perform on the object.
enum _Op { _Op_access, _Op_get_type_info, _Op_clone, _Op_destroy };
union _Arg {
const std::type_info* _M_typeinfo;
_any* _M_any;
};
/// @brief Stored data without type information.
void* _M_data;
/// @brief Member function to perform type-related computations on stored data.
void (*_M_manager)(_Op, const _any*, _Arg*);
public:
/// @brief Class to centralize functions and type information in a memory efficient way.
/// @tparam Tp Decayed stored type.
template <typename Tp>
struct Manager {
static void manage(_Op which, const _any* __any, _Arg* __arg) {
auto ptr = static_cast<const Tp*>(__any->_M_data);
switch (which)
{
case _Op_get_type_info:
__arg->_M_typeinfo = &typeid(Tp);
break;
case _Op_clone:
__arg->_M_any->_M_data = new Tp(*ptr);
__arg->_M_any->_M_manager = __any->_M_manager;
break;
case _Op_destroy:
delete ptr;
break;
}
}
static Tp* access(const _any* __any) {
return static_cast<Tp*>(__any->_M_data);
}
// template <typename Up>
// static void create(void* data, Up&& value) {
// data = new Tp(std::forward<Up>(value));
// }
};
private:
template<typename _Tp, typename _VTp = std::decay_t<_Tp>>
using _Decay_if_not_any = std::enable_if_t<!std::is_same<_VTp, _any>::value, _VTp>;
public:
/// @brief Default constructor
_any() noexcept : _M_manager(nullptr) { }
/// @brief Copy constructor
/// @param __other
_any(const _any& __other)
{
if (!__other._M_manager)
_M_manager = nullptr;
else
{
_Arg __arg;
__arg._M_any = this;
__other._M_manager(_Op_clone, &__other, &__arg);
}
}
/// @brief Move constructor
/// @param __other
_any(_any&& __other)
{
if (!__other._M_manager)
_M_manager = nullptr;
else
{
_M_data = __other._M_data;
_M_manager = __other._M_manager;
const_cast<_any*>(&__other)->_M_manager = nullptr;
}
}
/// @brief By-value constructor.
/// @tparam T Data type.
/// @tparam VT Decayed data type.
/// @param value
template<typename T, typename VT = _Decay_if_not_any<T>, std::enable_if_t<std::is_copy_constructible<VT>::value, bool> = true>
explicit _any(T&& value)
: _M_manager(&Manager<VT>::manage),
_M_data(new VT{std::forward<T>(value)})
{}
~_any()
{
if(_M_manager) {
_M_manager(_Op_destroy, this, nullptr);
_M_manager = nullptr;
}
}
/// @brief Access type id of the value currently stored
/// @return
const std::type_info& type() const
{
if (!_M_manager)
return typeid(void);
_Arg __arg;
_M_manager(_Op_get_type_info, this, &__arg);
return *__arg._M_typeinfo;
}
};
/// @brief Access value stored in the object converted in the template type if possible.
/// @tparam _ValueType
/// @param __any
/// @return Stored value.
template<typename _ValueType>
inline _ValueType any_cast(const _any& __any)
{
using _Up = std::remove_cv_t<std::remove_reference_t<_ValueType>>;
assert((std::__or_<std::is_reference<_ValueType>, std::is_copy_constructible<_ValueType>>::value && "Template argument must be a reference or CopyConstructible type"));
assert((std::is_constructible<_ValueType, const _Up&>::value && "Template argument must be constructible from a const value."));
assert(std::is_object<_Up>::value);
assert(__any.type() == typeid(_Up));
auto __p = static_cast<_Up*>(__any._M_data);
if (__p)
return static_cast<_ValueType>(*__p);
throw std::bad_cast();
}
#endif /* AIDGE_ANY_H_ */
\ No newline at end of file
......@@ -12,23 +12,34 @@
#ifndef AIDGE_CPARAMETER_H_
#define AIDGE_CPARAMETER_H_
#include <assert.h>
#include <map>
#include <vector>
#include <type_traits>
#include <typeinfo>
#include <assert.h>
#include "aidge/utils/Any.hpp"
namespace Aidge {
///\todo store also a fix-sized code that indicates the type
///\todo managing complex types or excluding non-trivial, non-aggregate types
class CParameter
{
class CParameter {
private:
template <typename T>
struct is_vector : std::false_type {};
template <typename T, typename Alloc>
struct is_vector<std::vector<T, Alloc>> : std::true_type {};
template<typename _ValueType>
inline _ValueType& any_cast_ref(const _any& __any)
{
using _Up = std::remove_cv_t<std::remove_reference_t<_ValueType>>;
assert(((std::is_reference<_ValueType>::value || std::is_copy_constructible<_ValueType>::value) && "Template argument must be a reference or CopyConstructible type"));
assert((std::is_constructible<_ValueType, const _Up&>::value && "Template argument must be constructible from a const value."));
assert(std::is_object<_Up>::value);
assert(__any.type() == typeid(_Up));
if (_any::Manager<_Up>::access(&__any)) { // assess if _any object is empty
return *static_cast<_ValueType*>(_any::Manager<_Up>::access(&__any));
}
throw std::bad_cast();
}
public:
// not copyable, not movable
CParameter(CParameter const &) = delete;
......@@ -48,15 +59,16 @@ public:
* param buffer that will get invalid after the CParam death.
* \note at() throws if the parameter does not exist, using find to test for parameter existance
*/
template<class T> T Get(std::string const i_ParamName) const
template<class T> T& Get(const std::string i_ParamName)
{
assert(m_Params.find(i_ParamName) != m_Params.end());
assert(m_Types.find(i_ParamName) != m_Types.end());
assert(m_Params.at(i_ParamName) <= m_OffSet);
assert(typeid(T).name() == m_Types.at(i_ParamName));
return *reinterpret_cast<T *>(m_BeginBuffer + m_Params.at(i_ParamName));
return any_cast_ref<T>(m_Buffer[m_Params.at(i_ParamName)]);
}
// template<class T> const T& Get(const std::string i_ParamName) const
// {
// return any_cast<T>(m_Buffer[m_Params.at(i_ParamName)]);
// }
///\brief Add a parameter value, identified by its name
///\tparam T expected parameter type
///\param i_ParamName Parameter name
......@@ -64,21 +76,15 @@ public:
///\todo Pass i_Value by ref if large or not trivial
///\bug If parameter already exists, its value is changed but written in the
/// internal buffer in a new location (previous value is still in memory at its previous location)
template<class T> void Add(std::string const &i_ParamName, T const &i_Value)
template<class T> void Add(const std::string &i_ParamName, T&& i_Value)
{
m_Buffer.resize(m_Buffer.size() + (sizeof(T) / sizeof(uint8_t)));
m_BeginBuffer = m_Buffer.data(); // Update buffer ptr in case of memory reordering
*reinterpret_cast<T *>(m_BeginBuffer + m_OffSet)
= i_Value; // Black-magic used to add anytype into the vector
m_Params[i_ParamName] = m_OffSet; // Copy pointer offset
m_OffSet += sizeof(T); // Increment offset
m_Types[i_ParamName] = typeid(i_Value).name();
m_Params[i_ParamName] = m_Buffer.size(); // Copy pointer offset
m_Buffer.push_back(_any(std::forward<T>(i_Value)));
}
std::string getParamType(std::string const &i_ParamName){
return m_Types[i_ParamName];
return m_Buffer[m_Params.at(i_ParamName)].type().name();
}
std::vector<std::string> getParametersName(){
......@@ -91,23 +97,8 @@ public:
private:
std::map<std::string, std::size_t> m_Params; // { Param name : offset }
///\brief Map to check type error
/* Note : i tried this : `std::map<std::string, std::type_info const *> mTypes;`
but looks like the type_ingo object was destroyed.
I am not a hugde fan of storing a string and making string comparison.
Maybe we can use a custom enum type (or is there a standard solution ?)
*/
std::map<std::string, std::string> m_Types;
///\brief All parameters values concatenated in raw binary form.
std::vector<uint8_t> m_Buffer = {};
///\brief Starting address of the buffer
uint8_t *m_BeginBuffer = m_Buffer.data();
///\brief Offset, in number of uint8_t, of the next parameter to write
std::size_t m_OffSet = 0;
///\brief All raw pointers to parameters values concatenated. Use custom any class compatible with C++14.
std::vector<_any> m_Buffer = {};
};
}
......
......@@ -136,6 +136,16 @@ void init_Node(py::module& m) {
:rtype: int
)mydelimiter")
.def("get_parents", &Node::getParents,
R"mydelimiter(
Get parents.
)mydelimiter")
.def("get_children", (std::set<std::shared_ptr<Node>> (Node::*)() const) &Node::getChildren,
R"mydelimiter(
Get children.
)mydelimiter")
.def("__call__", &Node::operator(), py::arg("connectors"));
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifdef PYBIND
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <string>
#include <vector>
#include <array>
#include "aidge/utils/Parameter.hpp"
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/MaxPooling.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/utils/Types.h"
#include "aidge/data/Tensor.hpp"
namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Operator, PyAbstractParametrizable>(
m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
py::multiple_inheritance())
.def(py::init<const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, (DIM<<1)> &>(),
py::arg("kernel_dims"),
py::arg("stride_dims"),
py::arg("padding_dims"));
m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &padding_dims) {
// Lambda function wrapper because PyBind fails to convert const array.
// So we use a vector that we convert in this function to a const DimeSize_t [DIM] array.
if (kernel_dims.size() != DIM) {
throw std::runtime_error("kernel_dims size [" + std::to_string(kernel_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
}
if (stride_dims.size() != DIM) {
throw std::runtime_error("stride_dims size [" + std::to_string(stride_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
}
if (padding_dims.size() != (DIM<<1)) {
throw std::runtime_error("padding_dims size [" + std::to_string(padding_dims.size()) + "] does not match DIM [" + std::to_string(DIM<<1) +"]");
}
DimSize_t tmp_kernel_dims_array[DIM];
for (size_t i = 0; i < DIM; ++i) {
tmp_kernel_dims_array[i] = kernel_dims[i];
}
DimSize_t tmp_stride_dims_array[DIM];
for (size_t i = 0; i < DIM; ++i) {
tmp_stride_dims_array[i] = stride_dims[i];
}
DimSize_t tmp_padding_dims_array[DIM<<1];
for (size_t i = 0; i < (DIM<<1); ++i) {
tmp_padding_dims_array[i] = padding_dims[i];
}
const DimSize_t (&kernel_dims_array)[DIM] = tmp_kernel_dims_array;
const DimSize_t (&stride_dims_array)[DIM] = tmp_stride_dims_array;
const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array;
return MaxPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array));
}, py::arg("kernel_dims"),
py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0));
}
void init_MaxPooling(py::module &m) {
declare_MaxPoolingOp<1>(m);
declare_MaxPoolingOp<2>(m);
declare_MaxPoolingOp<3>(m);
// FIXME:
// m.def("MaxPooling1D", static_cast<NodeAPI(*)(const char*, int, int, int const
// (&)[1])>(&MaxPooling));
}
} // namespace Aidge
#endif
\ No newline at end of file
......@@ -29,6 +29,7 @@ void init_FC(py::module&);
void init_GenericOperator(py::module&);
void init_LeakyReLU(py::module&);
void init_Matmul(py::module&);
void init_MaxPooling(py::module&);
void init_Producer(py::module&);
void init_ReLU(py::module&);
void init_Softmax(py::module&);
......@@ -75,6 +76,7 @@ void init_Aidge(py::module& m){
init_GenericOperator(m);
init_LeakyReLU(m);
init_Matmul(m);
init_MaxPooling(m);
init_ReLU(m);
init_Softmax(m);
......
......@@ -33,13 +33,10 @@ Aidge::Connector Aidge::GraphView::operator()(
(void)input; // avoid unused warning
}
IOIndex_t inID = 0;
for (const Connector &ctor : ctors) {
assert((ctor.node() != nullptr) &&
"Input Connector must be associated with a node");
(void)ctors; // avoid unused warning
}
IOIndex_t inID = 0;
for (const Connector &ctor : ctors) {
ctor.node()->addChild(shared_from_this(), static_cast<std::size_t>(ctor.index()),
{inNode, inID++});
}
......@@ -197,7 +194,7 @@ void Aidge::GraphView::forwardDims() {
{
assert(!std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty());
}
}
}
// Compute dimensions of every node
......@@ -326,7 +323,7 @@ void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnablePara
// add learnable parameters to the graph
if (includeLearnableParam) {
for (IOIndex_t i = node->nbDataInputs(); i < node->nbInputs(); ++i) {
std::shared_ptr<Node> parentNode = node->getParents(static_cast<IOIndex_t>(i));
std::shared_ptr<Node> parentNode = node->getParent(static_cast<IOIndex_t>(i));
if (parentNode) {
parentNode->addView(shared_from_this());
mNodes.insert(parentNode);
......@@ -522,12 +519,24 @@ void Aidge::GraphView::link(std::string /*name1_inID*/,
printf("Not implemented yet.\n");
}
void Aidge::GraphView::insert(Node & /*newNode*/, Node & /*inNode*/,
std::initializer_list<Node> /*outNodes*/,
IOIndex_t /*tensorIdx*/) {
printf("Not implemented yet.\n");
void Aidge::GraphView::insertParent(NodePtr childNode,
NodePtr newParentNode,
IOIndex_t childInputTensorIdx,
IOIndex_t newParentInputTensorIdx,
IOIndex_t newParentOutputTensorIdx){
NodePtr currentParentNode = childNode->getParent(childInputTensorIdx);
const IOIndex_t currentParentOutputTensorIdx = childNode->input(childInputTensorIdx).second;
// Remove child from current parent & current Parent from child
currentParentNode->removeChild(childNode, currentParentOutputTensorIdx);
// Add child
currentParentNode->addChild(newParentNode,currentParentOutputTensorIdx, newParentInputTensorIdx);
newParentNode->addChild(childNode, newParentOutputTensorIdx, childInputTensorIdx);
add(newParentNode);
}
bool Aidge::GraphView::replaceWith(std::set<std::shared_ptr<Node>> newNodes) {
// TODO : only supports one input/output node for now
assert(mNodes.size()>0 && "There must be at least one Node to replace");
......@@ -537,7 +546,7 @@ bool Aidge::GraphView::replaceWith(std::set<std::shared_ptr<Node>> newNodes) {
std::shared_ptr<Node> newInputNode;
std::shared_ptr<Node> previousOutputNode;
std::shared_ptr<Node> newOutputNode;
auto gNew = std::make_shared<GraphView>();
gNew->add(newNodes, false);
......
......@@ -226,7 +226,7 @@ void Aidge::Node::addChild(std::shared_ptr<GraphView> otherView, const IOIndex_t
}
void Aidge::Node::addParent(const std::shared_ptr<Node> other_node, const IOIndex_t inId) {
if (getParents(inId) != nullptr) {
if (getParent(inId) != nullptr) {
printf("Warning, you're replacing a Parent.\n");
}
assert((inId != gk_IODefaultIndex) && (inId < nbInputs()) && "Input index out of bound.");
......