Skip to content
Snippets Groups Projects
Commit 4e5ad5e0 authored by Cyril Moineau's avatar Cyril Moineau
Browse files

Initial commit

parent 48f84784
No related branches found
No related tags found
No related merge requests found
Showing
with 2478 additions and 82 deletions
if (BUILD_CORE_ALONE)
project(Aidge_Core)
cmake_minimum_required(VERSION 3.11)
add_compile_options(-Wall -Wextra -fPIC)
endif()
if (PYBIND)
add_definitions(-DPYBIND)
Include(FetchContent)
FetchContent_Declare(
PyBind11
GIT_REPOSITORY https://github.com/pybind/pybind11.git
GIT_TAG v2.10.4 # or a later release
)
FetchContent_MakeAvailable(PyBind11)
file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp")
pybind11_add_module(aidge_core MODULE ${pybind_src_files} "NO_EXTRAS")
target_include_directories(aidge_core PUBLIC ${pybind11_INCLUDE_DIRS} "python_binding")
target_link_libraries(aidge_core PUBLIC core)
# generate_python_binding(aidge_core core)
endif()
add_library(core STATIC)
# Add include directories
target_include_directories(core PUBLIC "include")
# Containers module
file(GLOB_RECURSE src_files "src/*.cpp")
target_sources(core PRIVATE ${src_files})
set_property(TARGET core PROPERTY POSITION_INDEPENDENT_CODE ON)
if (PYBIND)
target_include_directories(core PUBLIC $<BUILD_INTERFACE:${PYTHON_INCLUDE_DIRS}>)
target_link_libraries(core PRIVATE ${PYTHON_LIBRARIES})
endif()
if (NOT BUILD_CORE_ALONE)
# Activate compile time reducer for aidge_core
set_target_properties(core PROPERTIES COTIRE_ADD_UNITY_BUILD FALSE)
# set_target_properties(n2d2_cpu_lib PROPERTIES COTIRE_CXX_PREFIX_HEADER_INIT "include/utils/Precompiled.hpp")
cotire(core)
endif()
if (TESTS)
add_subdirectory(tests)
endif()
\ No newline at end of file
Makefile 0 → 100644
# This makefile does nothing but delegating the actual building to cmake
BUILDDIR := build
MAKEFLAGS := --no-print-directory
all: core_with_pybind
core_only:
mkdir -p ${BUILDDIR}; \
cd ${BUILDDIR}; \
cmake -DBUILD_CORE_ALONE=ON -DCMAKE_BUILD_TYPE=Release -DPYBIND=OFF -DTESTS=OFF ..; \
${MAKE} ${MAKEFLAGS};
core_tests:
mkdir -p ${BUILDDIR}; \
cd ${BUILDDIR}; \
cmake -DBUILD_CORE_ALONE=ON -DCMAKE_BUILD_TYPE=Debug -DPYBIND=OFF -DTESTS=ON ..; \
${MAKE} ${MAKEFLAGS}; \
cd tests; \
ctest --output-on-failure || true;
core_with_pybind:
mkdir -p ${BUILDDIR}; \
cd ${BUILDDIR}; \
cmake -DBUILD_CORE_ALONE=ON -DCMAKE_BUILD_TYPE=Release -DPYBIND=ON -DTESTS=OFF ..; \
${MAKE} ${MAKEFLAGS};
core_with_pybind_tests:
mkdir -p ${BUILDDIR}; \
cd ${BUILDDIR}; \
cmake -DBUILD_CORE_ALONE=ON -DCMAKE_BUILD_TYPE=Debug -DPYBIND=ON -DTESTS=ON ..; \
${MAKE} ${MAKEFLAGS}; \
cd tests; \
ctest --output-on-failure || true;
clean:
if [ -d "${BUILDDIR}" ]; then rm -rf ${BUILDDIR}; fi
\ No newline at end of file
# aidge_core
# Aidge Core library
You can find here the C++ code of the Core library of Aidge.
## Compilation
## Getting started
To make it easy for you to get started with GitLab, here's a list of recommended next steps.
Already a pro? Just edit this README.md and make it your own. Want to make it easy? [Use the template at the bottom](#editing-this-readme)!
## Add your files
- [ ] [Create](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#create-a-file) or [upload](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#upload-a-file) files
- [ ] [Add files using the command line](https://docs.gitlab.com/ee/gitlab-basics/add-file.html#add-a-file-using-the-command-line) or push an existing Git repository with the following command:
To only compile the Core library, run
```
cd existing_repo
git remote add origin https://git-dscin.intra.cea.fr/aidge/aidge_core.git
git branch -M main
git push -uf origin main
make core_only
```
## Integrate with your tools
- [ ] [Set up project integrations](https://git-dscin.intra.cea.fr/aidge/aidge_core/-/settings/integrations)
## Collaborate with your team
- [ ] [Invite team members and collaborators](https://docs.gitlab.com/ee/user/project/members/)
- [ ] [Create a new merge request](https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html)
- [ ] [Automatically close issues from merge requests](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically)
- [ ] [Enable merge request approvals](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/)
- [ ] [Automatically merge when pipeline succeeds](https://docs.gitlab.com/ee/user/project/merge_requests/merge_when_pipeline_succeeds.html)
## Test and Deploy
Use the built-in continuous integration in GitLab.
- [ ] [Get started with GitLab CI/CD](https://docs.gitlab.com/ee/ci/quick_start/index.html)
- [ ] [Analyze your code for known vulnerabilities with Static Application Security Testing(SAST)](https://docs.gitlab.com/ee/user/application_security/sast/)
- [ ] [Deploy to Kubernetes, Amazon EC2, or Amazon ECS using Auto Deploy](https://docs.gitlab.com/ee/topics/autodevops/requirements.html)
- [ ] [Use pull-based deployments for improved Kubernetes management](https://docs.gitlab.com/ee/user/clusters/agent/)
- [ ] [Set up protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments.html)
***
# Editing this README
When you're ready to make this README your own, just edit this file and use the handy template below (or feel free to structure it however you want - this is just a starting point!). Thank you to [makeareadme.com](https://www.makeareadme.com/) for this template.
## Suggestions for a good README
Every project is different, so consider which of these sections apply to yours. The sections used in the template are suggestions for most open source projects. Also keep in mind that while a README can be too long and detailed, too long is better than too short. If you think your README is too long, consider utilizing another form of documentation rather than cutting out information.
## Name
Choose a self-explaining name for your project.
## Description
Let people know what your project can do specifically. Provide context and add a link to any reference visitors might be unfamiliar with. A list of Features or a Background subsection can also be added here. If there are alternatives to your project, this is a good place to list differentiating factors.
## Badges
On some READMEs, you may see small images that convey metadata, such as whether or not all the tests are passing for the project. You can use Shields to add some to your README. Many services also have instructions for adding a badge.
## Visuals
Depending on what you are making, it can be a good idea to include screenshots or even a video (you'll frequently see GIFs rather than actual videos). Tools like ttygif can help, but check out Asciinema for a more sophisticated method.
## Installation
Within a particular ecosystem, there may be a common way of installing things, such as using Yarn, NuGet, or Homebrew. However, consider the possibility that whoever is reading your README is a novice and would like more guidance. Listing specific steps helps remove ambiguity and gets people to using your project as quickly as possible. If it only runs in a specific context like a particular programming language version or operating system or has dependencies that have to be installed manually, also add a Requirements subsection.
## Usage
Use examples liberally, and show the expected output if you can. It's helpful to have inline the smallest example of usage that you can demonstrate, while providing links to more sophisticated examples if they are too long to reasonably include in the README.
## Support
Tell people where they can go to for help. It can be any combination of an issue tracker, a chat room, an email address, etc.
## Roadmap
If you have ideas for releases in the future, it is a good idea to list them in the README.
## Contributing
State if you are open to contributions and what your requirements are for accepting them.
For people who want to make changes to your project, it's helpful to have some documentation on how to get started. Perhaps there is a script that they should run or some environment variables that they need to set. Make these steps explicit. These instructions could also be useful to your future self.
To compile the Core library + the associated unitary tests, run
```
make core_tests
```
You can also document commands to lint the code or run tests. These steps help to ensure high code quality and reduce the likelihood that the changes inadvertently break something. Having instructions for running tests is especially helpful if it requires external setup, such as starting a Selenium server for testing in a browser.
To compile the Core library with the python binding, run
```
make core_with_pybind
```
Important: this command can also be run with `make`.
## Authors and acknowledgment
Show your appreciation to those who have contributed to the project.
## License
For open source projects, say how it is licensed.
To compile the Core library with the python binding + the associated unitary tests, run
```
make core_with_pybind_tests
```
## Project status
If you have run out of energy or time for your project, put a note at the top of the README saying that development has slowed down or stopped completely. Someone may choose to fork your project or volunteer to step in as a maintainer or owner, allowing your project to keep going. You can also make an explicit request for maintainers.
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_OPERATORIMPL_H__
#define __AIDGE_OPERATORIMPL_H__
#include <cstddef>
#include <vector>
#include "utils/Types.h"
namespace Aidge {
class OperatorImpl {
public:
virtual void forward(){};
virtual void backward() {}
/**
* @brief Minimum amount of data from a specific input required by the
* implementation to be run.
*
* @param inputIdx Index of the input analysed.
* @return std::size_t
*/
virtual NbElts_t getNbRequiredData(IOIndex_t inputIdx) const = 0;
// Amount of input data that cannot be overwritten during the execution.
virtual NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const = 0;
// Memory required at an output for a given input size.
virtual NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const = 0;
/**
* @brief Total amount of consumed data from a specific input.
*
* @param inputIdx Index of the input analysed.
* @return DimSize_t
*/
virtual NbElts_t getNbConsumedData(IOIndex_t inputIdx) const = 0;
/**
* @brief TOtal amount of produced data ready to be used on a specific output.
*
* @param outputIdx Index of the output analysed.
* @return DimSize_t
*/
virtual NbElts_t getNbProducedData(IOIndex_t outputIdx) const = 0;
virtual ~OperatorImpl() = default;
};
} // namespace Aidge
#endif /* __AIDGE_OPERATORIMPL_H__ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_TENSORIMPL_H__
#define __AIDGE_TENSORIMPL_H__
#include <cstddef>
#include <cstdio>
#include "utils/Types.h"
namespace Aidge {
class TensorImpl {
public:
TensorImpl() = delete;
TensorImpl(const char *backend) : mBackend(backend){};
virtual void copy(const void *src, NbElts_t length) = 0;
virtual void *rawPtr() = 0;
virtual void setRawPtr(void* /*ptr*/)
{
printf("Cannot set raw pointer for backend %s\n", mBackend);
};
virtual std::size_t scalarSize() const = 0; // Size of one scalar (in bytes)
constexpr const char *backend() const { return mBackend; }
virtual ~TensorImpl() = default;
virtual bool operator==(const TensorImpl &othImpl) const = 0;
private:
const char *mBackend;
};
} // namespace Aidge
#endif /* __AIDGE_TENSORIMPL_H__ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_DATA_H__
#define __AIDGE_DATA_H__
#include "utils/Parameter.hpp"
namespace Aidge {
enum class DataType {
Float64,
Float32,
Float16,
BFloat16,
Binary,
Ternary,
Int2,
Int3,
Int4,
Int5,
Int6,
Int7,
Int8,
Int16,
Int32,
Int64,
UInt2,
UInt3,
UInt4,
UInt5,
UInt6,
UInt7,
UInt8,
UInt16,
UInt32,
UInt64
};
class Data {
public:
constexpr Data(const char* type): mType(type) {};
constexpr const char* type() const {
return mType;
}
virtual ~Data() = default;
private:
const char* mType;
};
}
namespace {
template <typename T> struct NativeType { static const Aidge::DataType type; };
template <> const Aidge::DataType NativeType<double>::type = Aidge::DataType::Float64;
template <> const Aidge::DataType NativeType<float>::type = Aidge::DataType::Float32;
template <> const Aidge::DataType NativeType<long>::type = Aidge::DataType::Int64;
template <> const Aidge::DataType NativeType<int>::type = Aidge::DataType::Int32;
template <>
const char* const EnumStrings<Aidge::DataType>::data[]
= {"Float64", "Float32", "Float16", "BFloat16", "Binary", "Ternary",
"Int2", "Int3", "Int4", "Int5", "Int6", "Int7", "Int8", "Int16",
"Int32", "Int64", "UInt2", "UInt3", "UInt4", "UInt5", "UInt6",
"UInt7", "UInt8", "UInt16", "UInt32", "UInt64"};
}
#endif /* __AIDGE_DATA_H__ */
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_CORE_DATA_TENSOR_H__
#define __AIDGE_CORE_DATA_TENSOR_H__
#include <cstring>
#include <set>
#include <memory>
#include <numeric>
#include <string>
#include <vector>
#include "backend/TensorImpl.hpp"
#include "data/Data.hpp"
#include "utils/Registrar.hpp"
#include "utils/Types.h"
namespace Aidge {
// Helper to create default arrays
template <typename T, std::size_t ... Is>
constexpr std::array<T, sizeof...(Is)>
create_array_impl(T value, std::index_sequence<Is...>)
{
// cast Is to void to remove the warning: unused value
return {{(static_cast<void>(Is), value)...}};
}
template <typename T, std::size_t N>
constexpr std::array<T, N> create_array(const T& value)
{
return create_array_impl(value, std::make_index_sequence<N>());
}
// Helper to convert vector to array
template <typename T, typename Iter, std::size_t... Is>
constexpr auto to_array(Iter &iter, std::index_sequence<Is...>) -> std::array<T, sizeof...(Is)> {
return {{((void)Is, T(*iter++))...}};
}
/**
* @brief Convert an object with an iterator to an std::array.
*/
template <std::size_t N, typename U = void, typename Iter, typename V = typename std::iterator_traits<Iter>::value_type,
typename T = std::conditional_t<std::is_same<U, void>{}, V, U>>
constexpr auto to_array(Iter iter) -> std::array<T, N> {
return to_array<T>(iter, std::make_index_sequence<N>{});
}
namespace detail {
template <class T, std::size_t N, std::size_t... I>
constexpr std::array<std::remove_cv_t<T>, N> to_array_impl(T (&a)[N], std::index_sequence<I...>) {
return {{a[I]...}};
}
} // namespace detail
/**
* @brief Convert a C-stype array into a C++ std::array.
*
* @tparam T Data type.
* @tparam N Number of elements.
* @param a C-style array to convert.
* @return constexpr std::array<std::remove_cv_t<T>, N>
*/
template <class T, std::size_t N>
constexpr std::array<std::remove_cv_t<T>, N> to_array(T (&a)[N]) {
return detail::to_array_impl(a, std::make_index_sequence<N>{});
}
template <typename T, std::size_t N, std::size_t... I>
constexpr std::array<T, N + 1> append(std::array<T, N> a, T t, std::index_sequence<I...>) {
return std::array<T, N + 1>{a[I]..., t};
}
template <typename T, std::size_t N, std::size_t... I>
constexpr std::array<T, N + 1> append(T t, std::array<T, N> a, std::index_sequence<I...>) {
return std::array<T, N + 1>{t, a[I]...};
}
/**
* @brief Create a new array concatenating the initial one with the value to
* add.
* @details append([1,2,7], 3) -> [1,2,7,3]
*
* @tparam T Data type.
* @tparam N Number of elements in the initilial array.
* @param a Initial array.
* @param t Element to add.
* @return constexpr std::array<T, N + 1>
*/
template <typename T, std::size_t N>
constexpr std::array<T, N + 1> append(std::array<T, N> a, T t) {
return append(a, t, std::make_index_sequence<N>());
}
template <typename T, std::size_t N>
constexpr std::array<T, N + 1> append(T t, std::array<T, N> a) {
return append(t, a, std::make_index_sequence<N>());
}
// Generic helper for initializing a Tensor
template <typename T, std::size_t SIZE_0>
struct Array1D {
T data[SIZE_0];
};
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
struct Array2D {
T data[SIZE_0][SIZE_1];
};
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
struct Array3D {
T data[SIZE_0][SIZE_1][SIZE_2];
};
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
struct Array4D {
T data[SIZE_0][SIZE_1][SIZE_2][SIZE_3];
};
class Tensor : public Data,
public Registrable<Tensor, std::tuple<std::string, DataType>, std::unique_ptr<TensorImpl>(const Tensor &)> {
private:
DataType mDataType;
std::vector<DimSize_t> mDims;
std::unique_ptr<TensorImpl> mImpl;
std::shared_ptr<Tensor> mGrad;
// Cached data
std::size_t mSize; // number of elements in the tensor
std::size_t mSizeM1; // for a tensor of N dimensions, number of elements in the N-1
// first dimensions
public:
static constexpr const char *Type = "Tensor";
Tensor(DataType dataType = DataType::Float32) : Data(Type), mDataType(dataType), mDims({}), mSize(0), mSizeM1(0) {
// ctor
}
Tensor(const Tensor& otherTensor)
: Data(Type),
mDataType(otherTensor.mDataType),
mDims(otherTensor.mDims),
mSize(otherTensor.mSize),
mSizeM1(otherTensor.mSizeM1)
{
if (otherTensor.hasImpl()) {
mImpl = Registrar<Tensor>::create({otherTensor.mImpl->backend(), dataType()})(*this);
mImpl->copy(otherTensor.mImpl->rawPtr(), mSize);
}
}
template <typename T, std::size_t SIZE_0>
constexpr Tensor(Array1D<T, SIZE_0> &&arr)
: Data(Type),
mDataType(NativeType<T>::type),
mDims({SIZE_0}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)),
mSize(SIZE_0),
mSizeM1(SIZE_0) {
mImpl->copy(&arr.data[0], SIZE_0);
}
template <typename T, std::size_t SIZE_0>
constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
resize({SIZE_0});
if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this);
}
mImpl->copy(&arr.data[0], SIZE_0);
return *this;
}
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
constexpr Tensor(Array2D<T, SIZE_0, SIZE_1> &&arr)
: Data(Type),
mDataType(NativeType<T>::type),
mDims({SIZE_0, SIZE_1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)),
mSize(SIZE_0 * SIZE_1),
mSizeM1(SIZE_1) {
mImpl->copy(&arr.data[0][0], SIZE_0 * SIZE_1);
}
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) {
resize({SIZE_0, SIZE_1});
if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this);
}
mImpl->copy(&arr.data[0][0], SIZE_0 * SIZE_1);
return *this;
}
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
constexpr Tensor(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr)
: Data(Type),
mDataType(NativeType<T>::type),
mDims({SIZE_0, SIZE_1, SIZE_2}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)),
mSize(SIZE_0 * SIZE_1 * SIZE_2),
mSizeM1(SIZE_1 * SIZE_2) {
mImpl->copy(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
}
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) {
resize({SIZE_0, SIZE_1, SIZE_2});
if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this);
}
mImpl->copy(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
return *this;
}
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
constexpr Tensor(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr)
: Data(Type),
mDataType(NativeType<T>::type),
mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)),
mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3),
mSizeM1(SIZE_1 * SIZE_2 * SIZE_3) {
mImpl->copy(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
}
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) {
resize({SIZE_0, SIZE_1, SIZE_2, SIZE_3});
if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this);
}
mImpl->copy(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
return *this;
}
Tensor &operator=(const Tensor &t) {
resize(t.dims());
setDatatype(t.dataType());
if (t.hasImpl()) {
setBackend(t.mImpl->backend());
mImpl->copy(t.mImpl->rawPtr(), size());
}
else {
mImpl = nullptr;
}
return *this;
}
bool operator==(const Tensor &otherTensor) const {
if ((!mImpl && !otherTensor.mImpl) || (dataType() != otherTensor.dataType()) ||
(dims() != otherTensor.dims()) || (mImpl->backend() != otherTensor.mImpl->backend())) {
return false;
}
return *mImpl == *(otherTensor.mImpl);
}
inline void setBackend(const std::string &name) {
if (mImpl) {
if (strcmp(mImpl->backend(), name.c_str()) != 0) {
// Backend change: create new impl, copy from old to new and replace
// impl
std::unique_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(*this);
newImpl->copy(mImpl->rawPtr(), size());
mImpl = std::move(newImpl);
}
} else
mImpl = Registrar<Tensor>::create({name, mDataType})(*this);
}
static std::set<std::string> getAvailableBackends(){
std::set<std::string> backendsList;
for(std::tuple<std::string, DataType> tupleKey : Registrar<Tensor>::getKeys())
backendsList.insert(std::get<0>(tupleKey));
return backendsList;
}
constexpr DataType dataType() const { return mDataType; }
/**
* @brief Set the DataType of the Tensor and converts data
* if the Tensor has already been initialized.
* @param dt DataType.
*/
void setDatatype(const DataType dt) {
if (mImpl && (dataType() != dt)) {
// get ptr before changing Tensor backend or the type difference will trigger a warning
const void *data = mImpl->rawPtr();
mDataType = dt;
std::unique_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), dt})(*this);
newImpl->copy(data, size()); // /!\ it does not cast data but reinterpret them
mImpl = std::move(newImpl);
}
mDataType = dt;
}
constexpr const std::unique_ptr<TensorImpl> &getImpl() { return mImpl; }
bool hasImpl() const
{
return (mImpl) ? true : false;
}
inline std::size_t nbDims() const { return mDims.size(); }
template <DimIdx_t DIM>
constexpr std::array<DimSize_t, DIM> dims() const {
assert(DIM == mDims.size() && "wrong number of dimensions");
return to_array<DIM>(mDims.cbegin());
}
constexpr const std::vector<DimSize_t> &dims() const { return mDims; }
constexpr std::size_t size() const { return mSize; }
constexpr std::size_t sizeM1() const { return mSizeM1; }
// deducing std::array size_type and declaring DIM accordingly
template <std::array<DimSize_t, 1>::size_type DIM>
void resize(const std::array<DimSize_t, DIM> &dims) {
static_assert(DIM<=MaxDim,"Too many tensor dimensions required by resize, not supported");
mDims.assign(dims.begin(), dims.end());
computeSize();
}
void resize(const std::vector<DimSize_t> &dims) {
mDims = dims;
computeSize();
}
bool empty() const { return mDims.empty(); }
template <typename expectedType, std::array<std::size_t, 1>::size_type DIM>
constexpr expectedType &get(std::array<std::size_t, DIM> idx) {
assert(DIM == mDims.size());
assert(mImpl);
std::size_t unfoldedIdx = 0;
for (std::size_t i = 0; i < DIM - std::size_t(1); ++i) {
unfoldedIdx = (unfoldedIdx + idx[i]) * mDims[i + 1];
}
unfoldedIdx += idx[DIM - 1];
return static_cast<expectedType *>(mImpl->rawPtr())[unfoldedIdx];
}
std::string toString() {
std::string res;
std::size_t dim = 0;
std::size_t *dimVals = new std::size_t[nbDims()];
for (std::size_t i = 0; i < nbDims(); ++i) {
dimVals[i] = 0;
}
std::size_t counter = 0;
res += "{\n";
if (nbDims()>=2){
while (counter < mSize) {
std::string spaceString = std::string((dim+1)<<1,' ');
if (dim < nbDims()-2) {
if (dimVals[dim] == 0) {
res += spaceString + "{\n";
++dim;
} else if (dimVals[dim] < static_cast<std::size_t>(dims()[dim])) {
res += spaceString + "},\n" + spaceString + "{\n";
++dim;
} else {
res += spaceString + "}\n";
dimVals[dim--] = 0;
dimVals[dim]++;
}
} else {
for (; dimVals[dim] < static_cast<std::size_t>(dims()[dim]); ++dimVals[dim]) {
res += spaceString + "{";
for (DimSize_t j = 0; j < dims()[dim + 1] - 1; ++j) {
switch (mDataType)
{
case DataType::Int32:
res += " " + std::to_string(static_cast<int *>(mImpl->rawPtr())[counter++]) + ",";
break;
case DataType::Float64:
res += " " + std::to_string(static_cast<double *>(mImpl->rawPtr())[counter++]) + ",";
break;
default:
res += " " + std::to_string(static_cast<float *>(mImpl->rawPtr())[counter++]) + ",";
break;
}
}
switch (mDataType)
{
case DataType::Int32:
res += " " + std::to_string(static_cast<int *>(mImpl->rawPtr())[counter++]) + "}";
break;
case DataType::Float64:
res += " " + std::to_string(static_cast<double *>(mImpl->rawPtr())[counter++]) + "}";
break;
default:
res += " " + std::to_string(static_cast<float *>(mImpl->rawPtr())[counter++]) + "}";
break;
}
if (dimVals[dim] < static_cast<std::size_t>(dims()[dim] - 1)) {
res += ",";
}
res += "\n";
}
dimVals[dim--] = 0;
dimVals[dim]++;
}
}
for(int i = static_cast<int>(dim); i>=0; --i) {
res += std::string((dim+1)<<1,' ') + "}\n";
}
}else{
for (DimSize_t j = 0; j < dims()[0]; ++j) {
switch (mDataType)
{
case DataType::Int32:
res += " " + std::to_string(static_cast<int *>(mImpl->rawPtr())[j]) + ((j < dims()[0]-1) ? "," : "\n");
break;
case DataType::Float64:
res += " " + std::to_string(static_cast<double *>(mImpl->rawPtr())[j]) + ((j < dims()[0]-1) ? "," : "\n");
break;
default:
res += " " + std::to_string(static_cast<float *>(mImpl->rawPtr())[j]) + ((j < dims()[0]-1) ? "," : "\n");
break;
}
}
}
res += "}";
return res;
}
inline void print() { printf("%s\n", toString().c_str()); }
std::shared_ptr<Tensor> grad() {
if (!mGrad) {
mGrad = std::make_shared<Tensor>(mDataType);
mGrad->resize(mDims);
if (mImpl) mGrad->setBackend(mImpl->backend());
}
return mGrad;
}
private:
///\bug not protected against overflow, see ThaliaCommonPack for a solution
std::size_t computeSize() {
if (mDims.empty()) {
mSizeM1 = DimSize_t(0);
mSize = DimSize_t(0);
}
else if (mDims.size() == 1)
{
mSizeM1 = mDims[0];
mSize = mDims[0];
}
else {
mSizeM1 = std::accumulate(++mDims.begin(),mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>());
mSize = static_cast<std::size_t>(mSizeM1 * mDims[0]);
}
return mSize;
}
};
} // namespace Aidge
#endif /* __AIDGE_CORE_DATA_TENSOR_H__ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_CORE_GRAPH_CONNECTOR_H__
#define __AIDGE_CORE_GRAPH_CONNECTOR_H__
#include <cassert>
#include <memory>
#include <vector>
#include "utils/Types.h"
namespace Aidge {
class Node;
class GraphView;
/**
* @brief Object meant for simpler and more instrinctive user API.
*
* example:
* Connector x();
* x = Conv(...)(x);
* Connector y = Split(3)(x[0]); // Error! Cannot slice a Connector with one output only
* Connector y = Split(3)(x);
* CustomLayer cl(...);
* Connector z = cl(y) // Error! y has multiple outputs, must specify which one to use
* Connector z1 = cl(y[0]);
* Connector z2 = cl(y[1]);
* Connector z3 = cl(y[2]);
* x = Sum(...)(z1, z2, z3);
* GraphView g = x.generateGraph();
*/
class Connector {
private:
std::shared_ptr<Node> mNode;
///\brief output id
///\details gk_IODefaultIndex is reserved for?
///\bug Is negative value pertinent?
IOIndex_t mOutputId = gk_IODefaultIndex;
public:
Connector() : mNode(nullptr) {
// ctor
}
Connector(std::shared_ptr<Node> node);
~Connector() = default;
public:
Connector operator[](IOIndex_t index) {
assert((size() > 1) && "Cannot refer a slice of the output.");
return Connector(mNode, index);
}
public:
IONb_t size() const;
inline std::shared_ptr<Node> node() const { return mNode; }
inline IOIndex_t index() const { return mOutputId; }
private:
Connector(std::shared_ptr<Node> node, IOIndex_t index) : mNode(node) {
assert((index >= 0) && (static_cast<IONb_t>(index) < size()) &&
"Non-valid output index.\n");
mOutputId = index;
}
};
/**
* @brief Generate a GraphView from a list of output Connectors
*
* @param ctors list of output Connector for the graph to generate.
* @return std::shared_ptr<GraphView>
*/
std::shared_ptr<GraphView> generateGraph(std::vector<Connector> ctors);
} // namespace Aidge
#endif /* __AIDGE_CORE_GRAPH_CONNECTOR_H__ */
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_CORE_GRAPH_GRAPHVIEW_H__
#define __AIDGE_CORE_GRAPH_GRAPHVIEW_H__
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "graph/Node.hpp"
#include "utils/Types.h"
namespace Aidge {
enum class DataType;
class GraphView : public std::enable_shared_from_this<GraphView> {
private:
/// @brief Name of the graphview
std::string mName;
/// @brief Set of nodes included in the GraphView
std::set<NodePtr> mNodes;
/// @brief Set of nodes included in the graphview with names
std::map<std::string, NodePtr> mNodeRegistry;
/// @brief Nodes without input link
std::set<NodePtr> mInputNodes;
/// @brief Nodes without output link
std::set<NodePtr> mOutputNodes;
public:
GraphView(std::string name="")
: mName(name)
{
// ctor
}
GraphView(std::set<NodePtr> nodes, std::string name="")
: mName(name)
{
add(nodes);
}
bool operator==(const GraphView &gv) const
{
return mNodes == gv.mNodes;
}
NodePtr operator[](std::string name)
{
assert(mNodeRegistry.find(name) != mNodeRegistry.end() && "Could not find Node in the GraphView.");
return mNodeRegistry.at(name);
}
///////////////////////////////////////////////////////
// FUNCTIONAL DESCRIPTION
///////////////////////////////////////////////////////
Connector operator()(const std::vector<Connector> ctors);
///////////////////////////////////////////////////////
// INNER
///////////////////////////////////////////////////////
public:
/**
* @brief Name of the node.
* @return std::string
*/
std::string name() const;
/**
* @brief Set the node name.
* @warning Undefined behaviour when several Nodes have the same name.
* @param name New name for the node.
*/
void setName(const std::string &name);
/**
* @brief Save the GraphView as a Mermaid graph in a .md file at the
* specified location.
* @param path
*/
void save(std::string path, bool verbose = false) const;
inline bool inView(NodePtr nodePtr) const {
return mNodes.find(nodePtr) != mNodes.end();
}
///////////////////////////////////////////////////////
// TENSOR MANAGEMENT
///////////////////////////////////////////////////////
public:
inline std::set<NodePtr> inputNodes() const noexcept { return mInputNodes; }
inline std::set<NodePtr> outputNodes() const noexcept { return mOutputNodes; }
inline bool isInputNode(NodePtr nodePtr) const {
return (mInputNodes.find(nodePtr) != mInputNodes.end()) ? true : false;
}
inline bool isOutputNode(NodePtr nodePtr) const {
return (mOutputNodes.find(nodePtr) != mOutputNodes.end()) ? true : false;
}
/**
* @brief List data input Tensors of the graph input nodes.
* @return std::vector<std::pair<NodePtr, IOIndex_t>>
*/
std::vector<std::pair<NodePtr, IOIndex_t>> dataInputs() const;
/**
* @brief List data input Tensors of the graph input nodes.
* @param name Name of the Node.
* @return std::vector<std::pair<NodePtr, IOIndex_t>>
*/
inline auto dataInputs(std::string name) const { return mNodeRegistry.at(name)->dataInputs(); }
/**
* @brief List input Tensors of the graph input nodes.
* @return std::vector<std::pair<NodePtr, IOIndex_t>>
*/
std::vector<std::pair<NodePtr, IOIndex_t>> inputs() const;
std::vector<std::pair<NodePtr, IOIndex_t>> inputs(std::string name) const;
/**
* @brief List output Tensors of the node.
* @return std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>
*/
std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>> outputs() const;
/**
* @brief Specific i-th output Tensor of the GraphView.
* @param nodeName Name of the Node of which to show the output.
* @return std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>
*/
std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>> outputs(
std::string nodeName) const;
void forwardDims();
void setBackend(const std::string &backend);
void setDatatype(const DataType &datatype);
///////////////////////////////////////////////////////
// TOPOLOGY
///////////////////////////////////////////////////////
public:
/**
* @brief Get the Parents of inputNodes.
* @return std::vector<NodePtr>
*/
std::set<NodePtr> getParents() const;
std::vector<NodePtr> getParents(const std::string nodeName) const;
std::vector<std::vector<NodePtr>> getOrderedParents() const;
/**
* @brief Get the Children of outputNodes.
* @return std::set<NodePtr>
*/
std::set<NodePtr> getChildren() const;
std::vector<std::vector<NodePtr>> getChildren(const std::string nodeName) const;
std::set<NodePtr> getChildren(
const NodePtr otherNode) const; // TODO change it for a vector<vector> ?
/**
* @brief Getter for Operators of the GraphView.
* @return std::set<NodePtr>
*/
inline std::set<NodePtr> getNodes() const { return mNodes; }
/**
* @brief Get the operator with the corresponding name if it is in the
* GraphView.
* @param nodeName name of the node.
* @return NodePtr return a new empty node if the one asked for
* was not found.
*/
NodePtr getNode(const char *nodeName) const;
/**
* @brief Remove a Node from the current GraphView scope without affecting its connections
* @param nodePtr Node to remove
* @param includeLearnableParam Whether learnable parameters should also be removed. Default true.
*/
void remove(NodePtr nodePtr, bool includeLearnableParam = true);
// Surrounding nodes management
void setInputId(IOIndex_t inID, IOIndex_t newNodeOutID);
/**
* @brief Includes a Node to the current GraphView
* @param other_node Node to add.
* @param includeLearnableParam Should non-data inputs, like weights and biases
* be included in the GraphView automatically. Default: true.
*/
void add(NodePtr otherNode, bool includeLearnableParam = true);
void add(std::set<NodePtr> otherNodes,
bool includeLearnableParam = true);
/**
* @brief Include every Node inside another GraphView to the current
* GraphView.
* @param other_graph GraphView containing the Nodes to include.
*/
void add(std::shared_ptr<GraphView> otherGraph);
/**
* @brief Include a Node in the current GraphView and link it to another
* already contained Node.
*
* @param toOtherNode Pointer to the Node to add.
* @param fromOutNode Pointer to the already included Node the new Node will
* be linked to (it will become a parent of the new Node). If the GraphView
* only has one output Node, then default to this Node.
* @param fromTensor Ouput Tensor ID of the already included Node. Default to
* 0.
* @param toTensor Input Tensor ID of the new Node. Default to gk_IODefaultIndex, meaning
* first available data input for the Node.
*/
void addChild(NodePtr toOtherNode, NodePtr fromOutNode = nullptr,
const IOIndex_t fromTensor = IOIndex_t(0),
IOIndex_t toTensor = gk_IODefaultIndex);
/**
* @brief Include a Node in the current GraphView and link it to another
* already contained Node.
*
* @param toOtherNode Pointer to the Node to add.
* @param fromOutNodeName Name of the already included Node the new Node will
* be linked to (it will become a parent of the new Node). As a name is
* optional, ensure such Node is in the GraphView or it will send back an
* error message.
* @param fromTensor Ouput Tensor ID of the already included Node. Default to
* 0.
* @param toTensor Input Tensor ID of the new Node. Default to gk_IODefaultIndex, meaning
* first available data input for the Node.
*/
inline void addChild(NodePtr toOtherNode, std::string fromOutNodeName,
const IOIndex_t fromTensor = IOIndex_t(0),
IOIndex_t toTensor = gk_IODefaultIndex) {
assert(mNodeRegistry.find(fromOutNodeName) != mNodeRegistry.end() &&
"No Node with this name found in the GraphView.");
addChild(toOtherNode, mNodeRegistry.at(fromOutNodeName), fromTensor, toTensor);
}
/**
* @brief Include a GraphView content in the current GraphView and link
* the two sets by linking one Node from each GraphView.
* @param toOtherView Pointer to the GraphView whose content should be added.
* @param fromOutNode Pair of pointer to Node and Tensor ID for specifying the
* connection. If the GraphView including the other one has only one output
* Node, then it defaults to the first output Tensor of this Node.
* @param toNode Pair of pointer to Node and Tensor ID for specifying the
* connection. If the GraphView whose content is included has only one input
* Node, then it defaults to the first available data input Tensor of this
* Node.
*/
void addChild(std::shared_ptr<GraphView> toOtherView,
std::pair<NodePtr, IOIndex_t> fromOutNode =
std::pair<NodePtr, IOIndex_t>(nullptr, IOIndex_t(0)),
std::pair<NodePtr, IOIndex_t> toNode =
std::pair<NodePtr, IOIndex_t>(nullptr, gk_IODefaultIndex));
/**
* @brief Swap two Node instances if possible.
* @param node
* @param otherNode
* @return true
* @return false
*/
bool swap(Node &node, Node &otherNode);
void link(std::string name1_inID, std::string name2_outID);
void insert(Node &newNode, Node &inNode, std::initializer_list<Node> outNodes,
IOIndex_t tensorIdx);
/**
* @brief Replace the current GraphView with the set of given Nodes if possible
* @param newNodes Set of Nodes.
* @return true
* @return false
*/
bool replaceWith(std::set<NodePtr> newNodes);
void updateInputNodes();
/**
* @brief Process from zero the set of output Nodes.
*/
void updateOutputNodes();
private:
///////////////////////////////////////////////////////
// TENSOR MANAGEMENT
///////////////////////////////////////////////////////
IONb_t getNbDataInputs() const;
IONb_t getNbFreeDataInputs() const;
void updateInputNodes(NodePtr node);
/**
* @brief Update the set of output Nodes with a new Node,checking if it can be
* added and removing any Node not part of mOutputNode anymore.
* @param nodePtr
*/
void updateOutputNodes(NodePtr node);
///////////////////////////////////////////////////////
// TOPOLOGY
///////////////////////////////////////////////////////
void _forwardDims(std::set<NodePtr> listNodes);
void removeInputNode(const std::string nodeName);
void removeOutputNode(const std::string nodeName);
};
} // namespace Aidge
#endif /* __AIDGE_CORE_GRAPH_GRAPHVIEW_H__ */
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_CORE_GRAPH_NODE_H__
#define __AIDGE_CORE_GRAPH_NODE_H__
#include <cassert>
#include <memory>
#include <set>
#include <string>
#include <vector>
#include <utility>
#include "graph/Connector.hpp"
#include "operator/Operator.hpp"
#include "utils/Types.h"
namespace Aidge {
using NodePtr = std::shared_ptr<Node>;
class GraphView;
class Node : public std::enable_shared_from_this<Node> {
private:
std::string mName; // Name of the Node. Should be unique
std::set<std::shared_ptr<GraphView>> mViews =
std::set<std::shared_ptr<GraphView>>(); // Set of pointers to GraphView
// instances including this Node
// instance
const std::shared_ptr<Operator>
mOperator; // Pointer to the associated Operator
std::vector<NodePtr>
mParents; // List of parent nodes (Parent --> Node --> Child)
std::vector<std::vector<NodePtr>>
mChildren; // List of child nodes for each output (Parent --> Node -->
// Child)
std::vector<std::vector<IOIndex_t>> mIdInChildren; // InID of Child node.
std::vector<IOIndex_t> mIdOutParents; // OutID of Parent node. Default: gk_IODefaultIndex.
public:
Node() = delete;
Node(std::shared_ptr<Operator> op, const char *name = nullptr);
virtual ~Node() = default;
friend bool operator==(const Node &lhs, const Node &rhs) {
return lhs.shared_from_this() == rhs.shared_from_this();
}
public:
///////////////////////////////////////////////////////
// FUNCTIONAL DESCRIPTION
///////////////////////////////////////////////////////
Connector operator()(const std::vector<Connector> ctors);
public:
///////////////////////////////////////////////////////
// INNER
///////////////////////////////////////////////////////
/**
* @brief Name of the node.
* @return std::string
*/
inline std::string name() const noexcept { return mName; }
/**
* @brief Set the node name.
* @warning Undefined behaviour when several Nodes have the same name.
* @param name New name for the node.
*/
void setName(const std::string &name);
/**
* @brief Type of the node.
* @return std::string
*/
inline std::string type() const { return mOperator->type(); }
///////////////////////////////////////////////////////
// OPERATORS
///////////////////////////////////////////////////////
/**
* @brief Run forward() function of the associated Operator
*/
void forward();
/**
* @brief Run backward() function of the associated Operator
*/
void backward();
/**
* @brief Get the Operator object of the Node
* @return std::shared_ptr<Operator>
*/
inline std::shared_ptr<Operator> getOperator() const { return mOperator; }
///////////////////////////////////////////////////////
// TENSOR MANAGEMENT
///////////////////////////////////////////////////////
/**
* @brief Whether or not every input of the Node is linked to a Tensor.
* If true then the Node is ready to be executed.
* @return true
* @return false
*/
bool valid() const;
/**
* @brief List of pair <Parent, ID of the data intput>. When an input is not
* linked to any Parent, the pair is <nullptr, gk_IODefaultIndex>.
* @return std::vector<std::pair<NodePtr, IOIndex_t>>
*/
std::vector<std::pair<NodePtr, IOIndex_t>> dataInputs() const;
/**
* @brief List of pair <Parent, ID of the intput>. When an input is not linked
* to any Parent, the pair is <nullptr, gk_IODefaultIndex>.
* @return std::vector<std::pair<NodePtr, IOIndex_t>>
*/
std::vector<std::pair<NodePtr, IOIndex_t>> inputs() const;
/**
* @brief Parent and its output Tensor ID linked to the inID-th input Tensor.
* If the input is not linked to any Parent, the pair is <nullptr, gk_IODefaultIndex>.
* @param inID
* @return std::pair<NodePtr, IOIndex_t>
*/
inline std::pair<NodePtr, IOIndex_t> input(IOIndex_t inID) const {
assert((inID != gk_IODefaultIndex) && (static_cast<IONb_t>(inID) < nbInputs()) && "Input index out of bound.");
return std::pair<NodePtr, IOIndex_t>(mParents[inID],
mIdOutParents[inID]);
}
/**
* @brief Set fix value for the specified input by creating a Producer wrapping the given Tensor.
*
* @param idx input index
* @param tensor constant tensor to add as parent for specified index.
*/
void setInput(const IOIndex_t idx, const std::shared_ptr<Tensor> tensor);
/**
* @brief Get the lowest index in the input Data Parent list equal to the
* nullptr.
* @return std::size_t
*/
inline IOIndex_t getFirstFreeDataInput() const {
IOIndex_t i = 0;
for (; (static_cast<IONb_t>(i) < nbDataInputs()) && (input(i).second >= 0); ++i) {}
// assert((i<nbDataInputs()) && "No free data input for Node");
return (static_cast<IONb_t>(i) < nbDataInputs()) ? i : gk_IODefaultIndex;
}
IONb_t getNbFreeDataInputs() const;
/**
* @brief List input ids of children liked to outputs of the node
* @return std::vector<std::vector<std::pair<NodePtr,
* IOIndex_t>>>
*/
std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>> outputs() const;
/**
* @brief Children and their input Tensor ID linked to the outID-th output
* Tensor.
* @param outID
* @return std::vector<std::pair<NodePtr, IOIndex_t>>
*/
std::vector<std::pair<NodePtr, IOIndex_t>>
output(IOIndex_t outID) const;
/**
* @brief Number of input specifically for data
* @details [data, data, weight, bias] => 4
* @return IOIndex_t
*/
inline IONb_t nbInputs() const noexcept { return getOperator()->nbInputs(); }
/**
* @brief Number of input specifically for data
* @details [data, data, weight, bias] => 2
* @return IOIndex_t
*/
inline IONb_t nbDataInputs() const noexcept {
return getOperator()->nbDataInputs();
}
/**
* @brief Number of inputs linked to a Parent's output.
* @return IOIndex_t
*/
IONb_t nbValidInputs() const;
/**
* @brief Getter for the number of Output Tensors of the Node.
* @return IOIndex_t
*/
inline IONb_t nbOutputs() const noexcept { return getOperator()->nbOutputs(); }
IONb_t nbValidOutputs() const;
///////////////////////////////////////////////////////
// TOPOLOGY
///////////////////////////////////////////////////////
/**
* @brief Vector of pointers to each GraphView containing the object
* @return std::vector<GraphView>
*/
inline std::set<std::shared_ptr<GraphView>> views() const noexcept {
return mViews;
}
/**
* @brief Add a GraphView pointer to the list of GraphView containing
* the current Node. This feature allows transparent GraphViews.
* @param graphPtr Pointer to GraphView to add to the list.
*/
inline void addView(const std::shared_ptr<GraphView> graphPtr) {
mViews.insert(graphPtr);
}
inline void removeView(const std::shared_ptr<GraphView> graphPtr) {
if (mViews.find(graphPtr) != mViews.end()) {
mViews.erase(graphPtr);
}
}
/**
* @brief Link another Node to an output of the current Node.
* @param otherNode Pointer to the other Node.
* @param outId ID of the output Tensor to connect to the other Node.
* Default to 0.
* @param otherInId ID of the input Tensor to connect to the current Node.
* Default to the first avaible data input.
*/
void addChild(NodePtr otherNode,
const IOIndex_t outId = IOIndex_t(0),
IOIndex_t otherInId = gk_IODefaultIndex);
/**
* @brief Link a Node from a specific GraphView to the current Node.
* @param otherView Pointer to the GraphView whose content should be
* linked to the current Node.
* @param outId ID of the output Tensor to connect to the other Node.
* Default to 0.
* @param otherInId Pair of pointer to Node and Tensor ID for specifying the
* connection. If the GraphView whose content is linked has only one input
* Node, then it defaults to the first available data input Tensor of this
* Node.
*/
void addChild(std::shared_ptr<GraphView> otherView,
const IOIndex_t outId = IOIndex_t(0),
std::pair<NodePtr, IOIndex_t> otherInId =
std::pair<NodePtr, IOIndex_t>(nullptr, gk_IODefaultIndex));
/**
* @brief Get the list of parent Nodes. As an input is linked to a unic Node,
* if non is linked then the parent is a nullptr.
* @return std::vector<NodePtr>
*/
std::vector<NodePtr> getParents() const;
inline NodePtr &getParents(IOIndex_t inID) {
assert(inID != gk_IODefaultIndex);
return mParents.at(inID);
}
NodePtr popParent(const IOIndex_t inID);
bool removeParent(const IOIndex_t inID);
/**
* @brief Get the Children object. Children do not include any nullptr as
* an output maybe linked to nobody and the Node would still work fine.
* @return std::set<NodePtr>>
*/
std::set<NodePtr> getChildren() const;
std::vector<std::vector<NodePtr>> getOrderedChildren() const;
std::vector<NodePtr> getChildren(IOIndex_t outID) const;
/**
* @brief Remove registered child from children lists if possible.
* @param nodePtr Node to remove.
* @param outId Output index. Default 0.
* @return true Child found and removed for given output index.
* @return false Child not found at given index. Nothing removed.
*/
bool removeChild(const NodePtr nodePtr, const IOIndex_t outId = 0);
/**
* @brief Remove every link of surrounding nodes to it and conversly
*/
void resetConnections(bool includeLearnableParam = false);
private:
///////////////////////////////////////////////////////
// OPERATORS
///////////////////////////////////////////////////////
// void setOperator(const std::shared_ptr<Operator> op_ptr);
///////////////////////////////////////////////////////
// TENSOR MANAGEMENT
///////////////////////////////////////////////////////
void setInputId(IOIndex_t inID, IOIndex_t newNodeOutID);
///////////////////////////////////////////////////////
// TOPOLOGY
///////////////////////////////////////////////////////
/**
* @brief add function specialized in adding Nodes.
* @param other_node
* @param outID
* @param other_inID
*/
void addChildOp(NodePtr other_node, const IOIndex_t outID,
IOIndex_t other_inID);
/**
* @brief add functon specialized in adding GraphView.
*
* @param other_graph
* @param outID
* @param other_inID
*/
void addChildView(std::shared_ptr<GraphView> other_graph,
const IOIndex_t outID,
std::pair<NodePtr, IOIndex_t> other_inID);
/**
* @brief Add a Node to the list of parents.
* @param other_node Node to add to parents list.
* @param inID index for adding the parent.
*/
void addParent(const NodePtr other_node, const IOIndex_t inID);
};
} // namespace Aidge
#endif /* __AIDGE_CORE_GRAPH_NODE_H__ */
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_CORE_GRAPH_OPARGS_H__
#define __AIDGE_CORE_GRAPH_OPARGS_H__
#include <memory>
#include <cassert>
namespace Aidge {
class Node;
class GraphView;
/**
* @brief Intermediate representation for Structural description.
*/
class OpArgs {
private:
std::shared_ptr<Node> mNode = nullptr;
std::shared_ptr<GraphView> mView = nullptr;
public:
OpArgs(const std::shared_ptr<GraphView>& view_)
: mView(view_) {assert(mView && "The GraphView provided should not be a nullptr.");}
OpArgs(const std::shared_ptr<Node>& node_)
: mNode(node_) {assert(mNode && "The Node provided should not be a nullptr.");}
inline std::shared_ptr<Node> node() const noexcept {
return mNode;
}
inline std::shared_ptr<GraphView> view() const noexcept {
return mView;
}
};
/////////////////////////////
// Sequential
/**
* @brief Create a GraphView by linking every input with the next
* one in a sequential way. Nodes linked with the Sequential graph
* generation instructions must have a single output.
* Sequential(A, B, C) returns A-->B-->C.
* @param inputs List of Node and GraphView to link sequentially.
* @return std::shared_ptr<GraphView> Pointer to the generated view.
*/
std::shared_ptr<GraphView> Sequential(std::initializer_list<OpArgs> inputs);
/////////////////////////////
// Parallel
/**
* @brief Creates a GraphView with provided Nodes without linking them.
* @param inputs List of Node and GraphView to link sequentially.
* @return std::shared_ptr<GraphView> pointer to the generated view.
*/
std::shared_ptr<GraphView> Parallel(std::initializer_list<OpArgs> inputs);
/////////////////////////////
// Residual
/**
* @brief Create a GraphView by linking every input with the next
* one in a sequential way. Finally the first element output is used
* as another input for the last element. Nodes linked with the Recursive graph
* generation instructions must have a single output.
* Recursive(A, B, C) returns A-->B-->C , A-->C.
* @param inputs List of Node and GraphView to link sequentially.
* @return std::shared_ptr<GraphView> pointer to the generated view.
*/
std::shared_ptr<GraphView> Residual(std::initializer_list<OpArgs> inputs);
}
#endif /* __AIDGE_CORE_GRAPH_OPARGS_H__ */
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_GREGEX_H__
#define __AIDGE_GREGEX_H__
#include <stdexcept> // for exception, runtime_error, out_of_range
#include <regex>
#include <memory> // for shared_ptr
#include <algorithm> // for next_permutation
#include "graphmatching/Utile.hpp"
#include "graphmatching/StmFactory.hpp"
#include "graphmatching/SeqStm.hpp"
#include "graphmatching/NodeRegex.hpp"
#include "graphmatching/Match.hpp"
namespace Aidge{
class GRegex {
// __init__(self,nodes_regex:dict,seq_regexps:list)
StmFactory mStmFab;
std::vector<SeqStm*> mStmInit;
public:
GRegex(const std::map<std::string,NodeRegex*>& nodesRegex,std::vector<std::string>& seqRegexps );
std::set<NodeTmp> matchFromStartNodes(const std::vector<NodeTmp> startNodes,const std::shared_ptr<GraphView> graphToMatch);
bool walk_validation_all_stm_are_valid(const std::vector<std::vector<SeqStm*>> all_stm);
bool walk_validation_all_node_read_validate_by_one_stm(const std::vector<std::vector<SeqStm*>> all_stm);
bool walk_validation_common_nodes_same_tag_for_all_stm(const std::vector<std::vector<SeqStm*>> all_stm);
std::set<NodeTmp> get_all_validate_nodes(const std::vector<std::vector<SeqStm*>> all_stm);
std::vector<SeqStm*> getStmInit() const {
return mStmInit;
}
StmFactory getStmFab() const {
return mStmFab;
}
//std::set<std::pair<std::vector<NodeTmp>,std::set<NodeTmp>>> match(const std::shared_ptr<GraphView> graphToMatch);
Match match(const std::shared_ptr<GraphView> graphToMatch);
};
}
#endif //__AIDGE_GREGEX_H__
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_MATCH_H__
#define __AIDGE_MATCH_H__
#include <vector>
#include <set>
#include <iostream>
#include <cassert>
#include "graphmatching/Utile.hpp"
namespace Aidge{
class Match {
public:
Match();
size_t getNbMatch();
void insert(std::vector<NodeTmp> startnodes, std::set<NodeTmp> matchnodes);
std::vector<std::vector<NodeTmp>> getStartNodes();
std::vector<std::set<NodeTmp>> getMatchNodes();
protected:
std::vector<std::vector<NodeTmp>> mStartNodes;
std::vector<std::set<NodeTmp>> mMatchNodes;
};
}
#endif //__AIDGE_MATCH_H__
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_NODEREGEX_H__
#define __AIDGE_NODEREGEX_H__
#include <cstdlib>
#include <iostream>
#include <cstring>
#include "graph/Node.hpp"
namespace Aidge {
class NodeRegex
{
public:
std::string mCondition;
NodeRegex(const std::string c){
mCondition = c;
};
// Version 1 - Only test the type of the node (no need for a lexer)
// Input : Node_op
// Output : bool
// return mCondition == Node_op.type
bool _is(std::shared_ptr<Node> &Node_op);
bool isA(std::string NodeType);
};
}
#endif /* ___AIDGE_NODEREGEX_H___ */
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_SEQSTM_H__
#define __AIDGE_SEQSTM_H__
#include <iostream>
#include <map>
#include <regex>
#include <set>
#include <stdexcept> // for exception, runtime_error, out_of_range
#include <string>
#include <utility>
#include <vector>
#include "graphmatching/NodeRegex.hpp"
#include "graphmatching/Utile.hpp"
namespace Aidge {
class SeqStm {
private:
const int mStmIdx;
const std::vector<std::vector<int>> mTransitionMatrix;
// str key of type like 'A' that ce use in the A->B .. extpr
const std::map<std::string, NodeRegex *> mNodesRegex;
// mTypeToIdxTransition.first = std::pair node_type , common_tag
// mTypeToIdxTransition.segond = idx in trans matrix
const std::map<NodeTypeKey, int> mTypeToIdxTransition;
int mActSt;
std::set<NodeTmp> mAllNodeValidated;
std::set<NodeTmp> mAllNodeTested;
std::set<std::pair<NodeTmp, std::string>> mAllCommonNode;
bool mStmIsValid;
std::pair<NodeRegex *, std::string> getNodeRegexAndCommonAt(int idxType);
/**
* @brief test the stm on a type
* @return the common tag
*/
std::string transitionOnNodeType(NodeType nodeType);
public:
SeqStm(const int mStmIdx,
const std::vector<std::vector<int>> &mTransitionMatrix,
const std::map<std::string, NodeRegex *> &mNodesRegex,
const std::map<NodeTypeKey, int> &mTypeToIdxTransition, int mActSt,
std::set<NodeTmp> mAllNodeValidated, std::set<NodeTmp> mAllNodeTested,
std::set<std::pair<NodeTmp, std::string>> mAllCommonNode,
bool mStmIsValid);
//////////////////////////////////////
// STM test
/////////////////////////////////////
/**
* @brief get if a st is a valide one
* @return bool
*/
bool isAValidSt(int st) {
std::size_t size = mTransitionMatrix.size();
return st == static_cast<int>(size - 1) ? true : false;
}
/**
* @brief true if the stm is blocked into st
* @return bool
*/
bool isStmBlocked() { return mActSt == -1 ? true : false; }
/**
* @brief true if the stm into valide st
* @return bool
*/
bool isValid() { return mStmIsValid; }
/////////////////////////////////////
// utile
/////////////////////////////////////
/**
* @brief extract from a node is type
* @return bool
*/
NodeType getTheNodeType(NodeTmp node);
void drawStm();
/////////////////////////////////////
// geter
/////////////////////////////////////
std::set<std::pair<NodeTmp, std::string>> getAllCommonNode() {
return mAllCommonNode;
}
std::set<NodeTmp> getAllNodeTested() { return mAllNodeTested; }
std::set<NodeTmp> getAllNodeValidated() { return mAllNodeValidated; }
SeqStm *duplicateStm();
int getStmIdx() { return mStmIdx; }
int getState() { return mActSt; }
//////////////////////////////////////////
// USE
//////////////////////////////////////////
/**
* @brief test the stm on a node
* @return pair new stm state, the common tag
*/
std::pair<int, std::string> testNode(const NodeTmp node);
};
} // namespace Aidge
#endif /* __AIDGE_SEQSTM_H__ */
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_STMFACTORY_H__
#define __AIDGE_STMFACTORY_H__
#include <map>
#include <utility>
#include <set>
#include <string>
#include <vector>
#include <iostream>
#include <stdexcept> // for exception, runtime_error, out_of_range
#include <regex>
#include "graphmatching/NodeRegex.hpp"
#include "graphmatching/SeqStm.hpp"
#include "graphmatching/Utile.hpp"
namespace Aidge{
class StmFactory {
const std::map<std::string,NodeRegex*>& mNodesRegex;
std::size_t mCmptStm = 0;
public:
StmFactory(const std::map<std::string,NodeRegex*>& nodesRegex);
//StmFactory(){};
SeqStm* makeNewStm(const std::string& sequRegex);
SeqStm* duplicateStm(SeqStm* stm);
std::size_t getNumberOfStm(){
return mCmptStm;
}
private:
ParsingReturn initParsingSequRegex(const std::string& sequRegex);
std::vector<std::vector<int>> initTransitionMatrix(ParsingReturn& parsing);
};
}
#endif //__AIDGE_STMFACTORY_H__
\ No newline at end of file
/**
* @file
* @brief
* @version file 1.0.0
* @author vl241552
* @copyright
* Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory.
* All rights reserved.
*/
#ifndef _utile_H_
#define _utile_H_
#include <map>
#include "graph/Node.hpp"
#include <map>
namespace Aidge {
using NodeTmp = std::shared_ptr<Node>;
using NodeType = std::string;
using CommonTag = std::string;
using NodeTypeKey = std::pair<NodeType, CommonTag>;
// type def
// struct NodeTypeKey {
// NodeType nodeType;
// std::string commonTag;
// // for map find
// bool operator<(const NodeTypeKey& other) const {
// if (nodeType != other.nodeType or commonTag != other.commonTag) {
// return false;
// } else {
// return true;
// }
// }
// };
struct ParsingReturn {
std::map<NodeTypeKey, int> typeToIdxTransition;
std::vector<std::pair<NodeTypeKey, std::string>> transition;
};
} // namespace Aidge
#endif //_utile_H_
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_CORE_OPERATOR_ADD_H__
#define __AIDGE_CORE_OPERATOR_ADD_H__
#include <numeric>
#include <vector>
#include <cmath>
#include <memory>
#include <array>
#include "utils/Registrar.hpp"
#include "operator/Operator.hpp"
#include "data/Tensor.hpp"
#include "graph/Node.hpp"
#include "utils/Types.h"
namespace Aidge {
template <std::size_t NUM>
class Add_Op : public Operator,
public Registrable<Add_Op<NUM>, std::string, std::unique_ptr<OperatorImpl>(const Add_Op<NUM>&)> {
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, NUM> mInputs;
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>(shared_from_this());
public:
static constexpr const char* Type = "Add";
constexpr Add_Op()
: Operator(Type),
mOutput(std::make_shared<Tensor>())
{
assert(NUM > 0 && "Add should have at least one input");
for (std::size_t i = 0; i<NUM; ++i) {
mInputs[i] = std::make_shared<Tensor>();
}
setDatatype(DataType::Float32);
}
// Data operator[](const char* inputName) override final {
// std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
// (strcmp(inputName, "weight") ? mInputs[1] :
// (strcmp(inputName, "bias") ? mInputs[2] :
// nullptr));
// assert((in!=nullptr) && "No such parameter");
// return *in;
// }
constexpr void associateInput(IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
constexpr void computeOutputDims() override final {
if (!mInputs[0]->empty()) {
const auto expectedDims = mInputs[0]->dims();
std::size_t nonEmptyInputTensor = 1;
for (; nonEmptyInputTensor<NUM && (!mInputs[nonEmptyInputTensor]->empty()); ++nonEmptyInputTensor) {
assert(expectedDims == mInputs[nonEmptyInputTensor]->dims());
}
if (nonEmptyInputTensor == NUM) {
mOutput->resize(expectedDims);
}
}
}
bool outputDimsForwarded() const override final {
std::size_t forwarded = 0;
for (; forwarded < NUM && (!mInputs[forwarded]->empty()); ++forwarded) {}
return ((forwarded==NUM) && !(mOutput->empty()));
}
// void checkDims() const override final {
// assert(outputDimsForwarded());
// for (const auto& in : mInputs) {
// assert(in->dims() == mOutput->dims());
// }
// }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
return *(mInputs[inputIdx].get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "Add Operators has only 1 outputs");
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string& name) {
mImpl = Registrar<Add_Op<NUM>>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
for (std::size_t i = 0; i < NUM; ++i) {
mInputs[i]->setBackend(name);
}
}
void setDatatype(const DataType& datatype) {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
for (std::size_t i = 0; i < NUM; ++i) {
mInputs[i]->setDatatype(datatype);
}
}
inline IONb_t nbInputs() const noexcept override final { return NUM; }
inline IONb_t nbDataInputs() const noexcept override final { return NUM; }
inline IONb_t nbOutputs() const noexcept override final { return 1; }
};
template <std::size_t NUM>
inline std::shared_ptr<Node> Add(const char* name = nullptr) {
return std::make_shared<Node>(std::make_shared<Add_Op<NUM>>(), name);
}
}
#endif /* __AIDGE_CORE_OPERATOR_ADD_H__ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_CORE_OPERATOR_AVGPOOLING_H__
#define __AIDGE_CORE_OPERATOR_AVGPOOLING_H__
#include <array>
#include <numeric>
#include <vector>
#include <cmath>
#include "data/Tensor.hpp"
#include "graph/Node.hpp"
#include "operator/Operator.hpp"
#include "operator/Producer.hpp"
#include "utils/Parameter.hpp"
#include "utils/Registrar.hpp"
#include "utils/Types.h"
namespace Aidge {
enum class AvgPoolingParam { StrideDims, KernelDims, PaddingDims };
template <DimIdx_t DIM>
class AvgPooling_Op : public Operator,
public Registrable<AvgPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
public Parameterizable<AvgPoolingParam,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, (DIM<<1) >> {
private:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char *Type = "AvgPooling";
AvgPooling_Op() = delete;
using Parameterizable_ = Parameterizable<AvgPoolingParam,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, (DIM<<1)> >;
template <AvgPoolingParam e>
using param = typename Parameterizable_::template param<e>;
constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0))
: Operator(Type),
Parameterizable_(param<AvgPoolingParam::StrideDims>(stride_dims),
param<AvgPoolingParam::KernelDims>(kernel_dims),
param<AvgPoolingParam::PaddingDims>(padding_dims)),
mOutput(std::make_shared<Tensor>()) {
setDatatype(DataType::Float32);
}
constexpr void associateInput(IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 1 && "operators supports only 3 inputs");
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
constexpr void computeOutputDims() override final {
if (!mInput->empty()) {
std::array<DimSize_t, DIM + 2> outputDims = {};
for (std::size_t dim = 0; dim < this->template get<AvgPoolingParam::KernelDims>().size() ; ++dim) {
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
std::floor(static_cast<float>(mInput->dims()[dim+2] -
this->template get<AvgPoolingParam::KernelDims>()[dim] +
this->template get<AvgPoolingParam::PaddingDims>()[dim] +
this->template get<AvgPoolingParam::PaddingDims>()[dim+DIM]) /
static_cast<float>(this->template get<AvgPoolingParam::StrideDims>()[dim])));
}
outputDims[1] = mInput->dims()[1];
outputDims[0] = mInput->dims()[0];
mOutput->resize(outputDims);
}
}
bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs");
return *(mInput.get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "AvgPooling Operators supports only 1 inputs");
return mInput;
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "AvgPooling Operators has only 1 outputs");
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs");
return std::static_pointer_cast<Data>(mInput);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string &name) {
mImpl = Registrar<AvgPooling_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType &datatype) {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
mInput->setDatatype(datatype);
}
inline IONb_t nbInputs() const noexcept override final { return 1; }
inline IONb_t nbDataInputs() const noexcept override final { return 1; }
inline IONb_t nbOutputs() const noexcept override final { return 1; }
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const char *name = nullptr,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
auto avgPool = std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, padding_dims), name);
return avgPool;
}
template <DimSize_t DIM>
inline std::shared_ptr<Node> AvgPooling(
DimSize_t const (&kernel_dims)[DIM],
const char *name = nullptr,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
return AvgPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::AvgPoolingParam>::data[] = {"StrideDims",
"KernelDims", "PaddingDims"};
}
#endif /* __AIDGE_CORE_OPERATOR_AVGPOOLING_H__ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef __AIDGE_CORE_OPERATOR_BATCHNORM_H__
#define __AIDGE_CORE_OPERATOR_BATCHNORM_H__
#include <array>
#include <memory>
#include <vector>
#include "utils/Types.h"
#include "data/Tensor.hpp"
#include "graph/Node.hpp"
#include "operator/Operator.hpp"
#include "operator/Producer.hpp"
#include "utils/Parameter.hpp"
#include "utils/Registrar.hpp"
namespace Aidge {
enum class BatchNormParam { Epsilon, Momentum };
template <DimIdx_t DIM>
class BatchNorm_Op : public Operator,
public Registrable<BatchNorm_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
public Parameterizable<BatchNormParam, float, float> {
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, 5> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
std::make_shared<Tensor>(), std::make_shared<Tensor>(),
std::make_shared<Tensor>()};
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char *Type = "BatchNorm";
BatchNorm_Op() = delete;
using Parameterizable_ = Parameterizable<BatchNormParam, float, float>;
template <BatchNormParam e>
using param = typename Parameterizable_::template param<e>;
constexpr BatchNorm_Op(float epsilon, float momentum)
: Operator(Type),
Parameterizable_(param<BatchNormParam::Epsilon>(epsilon),
param<BatchNormParam::Momentum>(momentum)),
mOutput(std::make_shared<Tensor>()) {
setDatatype(DataType::Float32);
}
// Data operator[](const char* inputName) override final {
// std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
// (strcmp(inputName, "weight") ? mInputs[1] :
// (strcmp(inputName, "bias") ? mInputs[2] :
// nullptr));
// assert((in!=nullptr) && "No such parameter");
// return *in;
// }
constexpr void associateInput(IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 5 && "operators supports only 5 inputs");
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
constexpr void computeOutputDims() override final {
if (!mInputs[0]->empty()) {
for (std::size_t i = nbDataInputs(); i < nbInputs(); ++i) {
if(mInputs[i]->size() != mInputs[0]->dims()[1]) {
assert(!mInputs[0]->hasImpl() && "Incompatible size with already implemented learnable parameter");
mInputs[i]->resize(std::array<DimSize_t, 1>({mInputs[0]->dims()[1]}));
}
}
mOutput->resize(mInputs[0]->dims());
}
}
bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 5 && "operators supports only 5 inputs");
return *(mInputs[inputIdx].get()); }
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 5 && "BatchNorm Operators supports only 5 inputs");
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "BatchNorm Operator has only 1 output");
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 5 && "operators supports only 5 inputs");
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string &name) {
mImpl = Registrar<BatchNorm_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInputs[1]->setBackend(name);
mInputs[2]->setBackend(name);
mInputs[3]->setBackend(name);
mInputs[4]->setBackend(name);
}
void setDatatype(const DataType &datatype) {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
mInputs[1]->setDatatype(datatype);
mInputs[2]->setDatatype(datatype);
mInputs[3]->setDatatype(datatype);
mInputs[4]->setDatatype(datatype);
}
inline IONb_t nbInputs() const noexcept override final { return 5; }
inline IONb_t nbDataInputs() const noexcept override final { return 1; }
inline IONb_t nbOutputs() const noexcept override final { return 1; }
};
template <DimSize_t DIM>
inline std::shared_ptr<Node> BatchNorm(const float epsilon = 1.0e-5F,
const float momentum = 0.1F,
const char *name = nullptr) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by BatchNorm, not supported");
auto batchNorm = std::make_shared<Node>(std::make_shared<BatchNorm_Op<static_cast<DimIdx_t>(DIM)>>(epsilon, momentum), name);
addProducer(batchNorm, 1, std::array<DimSize_t,0>({}), "scale");
addProducer(batchNorm, 2, std::array<DimSize_t,0>({}), "shift");
addProducer(batchNorm, 3, std::array<DimSize_t,0>({}), "batch_mean");
addProducer(batchNorm, 4, std::array<DimSize_t,0>({}), "batch_variance");
return batchNorm;
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::BatchNormParam>::data[] = { "Epsilon", "Momentum" };
}
#endif // __AIDGE_CORE_OPERATOR_BATCHNORM_H__
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment