Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • eclipse/aidge/aidge_core
  • hrouis/aidge_core
  • mszczep/aidge_core
  • oantoni/aidge_core
  • cguillon/aidge_core
  • jeromeh/aidge_core
  • axelfarr/aidge_core
  • cmoineau/aidge_core
  • noamzerah/aidge_core
  • lrakotoarivony/aidge_core
  • silvanosky/aidge_core
  • maab05/aidge_core
  • mick94/aidge_core
  • lucaslopez/aidge_core_ll
  • wboussella/aidge_core
  • farnez/aidge_core
  • mnewson/aidge_core
17 results
Show changes
Commits on Source (82)
Showing
with 480 additions and 313 deletions
...@@ -95,60 +95,60 @@ build:ubuntu_python: ...@@ -95,60 +95,60 @@ build:ubuntu_python:
paths: paths:
- venv/ - venv/
build:windows_cpp: # build:windows_cpp:
stage: build # stage: build
needs: [] # needs: []
tags: # tags:
- windows # - windows
image: buildtools # image: buildtools
before_script: # before_script:
# Install Chocolatey # # Install Chocolatey
- Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) # - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
# Install dependencies # # Install dependencies
- choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y # - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
- choco install git -Y # - choco install git -Y
- choco install python -Y # - choco install python -Y
# Update PATH # # Update PATH
- $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User") # - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
script: # script:
- mkdir -p build_cpp # - mkdir -p build_cpp
- mkdir -p install_cpp # - mkdir -p install_cpp
- cd build_cpp # - cd build_cpp
- cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug .. # - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug ..
- cmake --build . -j2 # - cmake --build . -j2
- cmake --install . --config Debug # - cmake --install . --config Debug
artifacts: # artifacts:
expire_in: 1 week # expire_in: 1 week
paths: # paths:
- build_cpp/ # - build_cpp/
- install_cpp/ # - install_cpp/
build:windows_python: # build:windows_python:
stage: build # stage: build
needs: [] # needs: []
tags: # tags:
- windows # - windows
image: buildtools # image: buildtools
before_script: # before_script:
# Install Chocolatey # # Install Chocolatey
- Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) # - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
# Install dependencies # # Install dependencies
- choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y # - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
- choco install git -Y # - choco install git -Y
- choco install python -Y # - choco install python -Y
# Update PATH # # Update PATH
- $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User") # - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
script: # script:
- python -m pip install virtualenv # - python -m pip install virtualenv
- virtualenv venv # - virtualenv venv
- venv\Scripts\Activate.ps1 # - venv\Scripts\Activate.ps1
# Numpy dependancy for unit test # # Numpy dependancy for unit test
- python -m pip install -r requirements.txt # - python -m pip install -r requirements.txt
- python -m pip install . # - python -m pip install .
artifacts: # artifacts:
expire_in: 1 week # expire_in: 1 week
paths: # paths:
- venv/ # - venv/
...@@ -26,23 +26,23 @@ test:ubuntu_python: ...@@ -26,23 +26,23 @@ test:ubuntu_python:
reports: reports:
junit: ${CI_PROJECT_NAME}/xmlrunner-results.xml junit: ${CI_PROJECT_NAME}/xmlrunner-results.xml
test:windows_cpp: # test:windows_cpp:
stage: test # stage: test
needs: ["build:windows_cpp"] # needs: ["build:windows_cpp"]
tags: # tags:
- windows # - windows
image: buildtools # image: buildtools
before_script: # before_script:
# Install Chocolatey # # Install Chocolatey
- Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) # - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
# Install dependencies # # Install dependencies
- choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y # - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
- choco install python -Y # - choco install python -Y
# Update PATH # # Update PATH
- $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User") # - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
script: # script:
- cd build_cpp # - cd build_cpp
- ctest --output-junit ctest-results.xml --output-on-failure # - ctest --output-junit ctest-results.xml --output-on-failure
artifacts: # artifacts:
reports: # reports:
junit: build_cpp/ctest-results.xml # junit: build_cpp/ctest-results.xml
# Version 0.1.1 (January 29, 2024)
[Add] Support of a negative value for Reshape Operator shape attribute.
# Version 0.1.0 (January 23, 2024) # Version 0.1.0 (January 23, 2024)
Initial release Initial release
...@@ -39,12 +39,6 @@ class test_attributes(unittest.TestCase): ...@@ -39,12 +39,6 @@ class test_attributes(unittest.TestCase):
self.assertEqual(fc_op.get_attr("OutChannels"), out_channels) self.assertEqual(fc_op.get_attr("OutChannels"), out_channels)
self.assertEqual(fc_op.get_attr("NoBias"), nb_bias) self.assertEqual(fc_op.get_attr("NoBias"), nb_bias)
def test_matmul(self):
in_channels = 4
out_channels = 8
matmul_op = aidge_core.MatMul(in_channels, out_channels).get_operator()
self.assertEqual(matmul_op.get_attr("OutChannels"), out_channels)
def test_producer_1D(self): def test_producer_1D(self):
dims = [5] dims = [5]
producer_op = aidge_core.Producer(dims).get_operator() producer_op = aidge_core.Producer(dims).get_operator()
......
...@@ -45,9 +45,9 @@ class test_recipies(unittest.TestCase): ...@@ -45,9 +45,9 @@ class test_recipies(unittest.TestCase):
self.assertTrue(all([i in old_nodes for i in graph_view.get_nodes()])) self.assertTrue(all([i in old_nodes for i in graph_view.get_nodes()]))
def test_fuse_matmul_add(self): def test_fuse_matmul_add(self):
matmul0 = aidge_core.MatMul(1, 1, name="MatMul0") matmul0 = aidge_core.MatMul(name="MatMul0")
add0 = aidge_core.Add(2, name="Add0") add0 = aidge_core.Add(2, name="Add0")
matmul1 = aidge_core.MatMul(1, 1, name="MatMul1") matmul1 = aidge_core.MatMul(name="MatMul1")
add1 = aidge_core.Add(2, name="Add1") add1 = aidge_core.Add(2, name="Add1")
graph_view = aidge_core.sequential([matmul0, add0, matmul1, add1]) graph_view = aidge_core.sequential([matmul0, add0, matmul1, add1])
......
...@@ -14,13 +14,15 @@ ...@@ -14,13 +14,15 @@
#include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/OperatorImpl.hpp"
#include "aidge/backend/TensorImpl.hpp" #include "aidge/backend/TensorImpl.hpp"
#include "aidge/backend/StimulusImpl.hpp"
#include "aidge/backend/cpu/data/TensorImpl.hpp" #include "aidge/backend/cpu/data/TensorImpl.hpp"
#include "aidge/backend/cpu/data/GetCPUPtr.h" #include "aidge/backend/cpu/data/GetCPUPtr.h"
#include "aidge/data/Data.hpp" #include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/data/Database.hpp"
#include "aidge/data/DataProvider.hpp"
#include "aidge/graph/Connector.hpp" #include "aidge/graph/Connector.hpp"
#include "aidge/graph/GraphView.hpp" #include "aidge/graph/GraphView.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
...@@ -61,6 +63,7 @@ ...@@ -61,6 +63,7 @@
#include "aidge/operator/Sub.hpp" #include "aidge/operator/Sub.hpp"
#include "aidge/operator/Transpose.hpp" #include "aidge/operator/Transpose.hpp"
#include "aidge/scheduler/Scheduler.hpp" #include "aidge/scheduler/Scheduler.hpp"
#include "aidge/stimuli/Stimulus.hpp"
#include "aidge/recipies/Recipies.hpp" #include "aidge/recipies/Recipies.hpp"
...@@ -69,7 +72,5 @@ ...@@ -69,7 +72,5 @@
#include "aidge/utils/DynamicAttributes.hpp" #include "aidge/utils/DynamicAttributes.hpp"
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
//#include "aidge/utilsParsing/AstNode.hpp"
//#include "aidge/utilsParsing/ParsingToken.hpp"
#endif /* AIDGE_IMPORTS_H_ */ #endif /* AIDGE_IMPORTS_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_BACKEND_STIMULUSIMPL_H_
#define AIDGE_CORE_BACKEND_STIMULUSIMPL_H_
#include <memory>
#include "aidge/data/Tensor.hpp"
namespace Aidge {
/**
* @brief Base class to implement data loading functions.
*/
class StimulusImpl {
public:
virtual ~StimulusImpl() noexcept = default;
virtual std::shared_ptr<Tensor> load() const = 0;
};
} // namespace Aidge
#endif /* AIDGE_CORE_BACKEND_STIMULUSIMPL_H_ */
...@@ -67,7 +67,10 @@ private: ...@@ -67,7 +67,10 @@ private:
class TensorImpl { class TensorImpl {
public: public:
TensorImpl() = delete; TensorImpl() = delete;
TensorImpl(const char *backend, DeviceIdx_t device, NbElts_t length) : mBackend(backend), mDevice(device), mNbElts(length) {}; TensorImpl(const char *backend, DeviceIdx_t device, std::vector<DimSize_t> dims) : mBackend(backend), mDevice(device)
{
resize(dims);
};
/** /**
* Return the (backend, device) pair for this implementation. * Return the (backend, device) pair for this implementation.
...@@ -147,8 +150,12 @@ public: ...@@ -147,8 +150,12 @@ public:
/** /**
* Set the size, in number of elements, that must be stored. * Set the size, in number of elements, that must be stored.
*/ */
void resize(NbElts_t length) { virtual void resize(std::vector<DimSize_t> dims) {
mNbElts = length; size_t product = 1;
for (size_t num : dims) {
product *= num;
}
mNbElts = product;
} }
/** /**
......
...@@ -12,13 +12,16 @@ ...@@ -12,13 +12,16 @@
#ifndef AIDGE_CPU_DATA_GETCPUPTR_H_ #ifndef AIDGE_CPU_DATA_GETCPUPTR_H_
#define AIDGE_CPU_DATA_GETCPUPTR_H_ #define AIDGE_CPU_DATA_GETCPUPTR_H_
#include <cstddef>
#include <memory>
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
namespace Aidge { namespace Aidge {
inline void *getCPUPtr(std::shared_ptr<Aidge::Data> const &data) { inline void *getCPUPtr(std::shared_ptr<Aidge::Data> const &data, const std::size_t offset = 0) {
const auto tensor = std::static_pointer_cast<Tensor>(data); const auto tensor = std::static_pointer_cast<Tensor>(data);
return tensor->getImpl()->hostPtr(tensor->getImplOffset()); return tensor->getImpl()->hostPtr(tensor->getImplOffset() + offset);
} }
} // namespace Aidge } // namespace Aidge
#endif // AIDGE_CPU_DATA_GETCPUPTR_H_ #endif // AIDGE_CPU_DATA_GETCPUPTR_H_
\ No newline at end of file
...@@ -33,7 +33,7 @@ private: ...@@ -33,7 +33,7 @@ private:
public: public:
static constexpr const char *Backend = "cpu"; static constexpr const char *Backend = "cpu";
TensorImpl_cpu(DeviceIdx_t device, NbElts_t length) : TensorImpl(Backend, device, length) {} TensorImpl_cpu(DeviceIdx_t device, std::vector<DimSize_t> dims) : TensorImpl(Backend, device, dims) {}
bool operator==(const TensorImpl &otherImpl) const override final { bool operator==(const TensorImpl &otherImpl) const override final {
const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl); const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl);
...@@ -47,8 +47,8 @@ public: ...@@ -47,8 +47,8 @@ public:
return i == mNbElts; return i == mNbElts;
} }
static std::shared_ptr<TensorImpl_cpu> create(DeviceIdx_t device, NbElts_t length) { static std::shared_ptr<TensorImpl_cpu> create(DeviceIdx_t device, std::vector<DimSize_t> dims) {
return std::make_shared<TensorImpl_cpu<T>>(device, length); return std::make_shared<TensorImpl_cpu<T>>(device, dims);
} }
inline std::size_t scalarSize() const noexcept override final { return sizeof(T); } inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
...@@ -183,10 +183,18 @@ static Registrar<Tensor> registrarTensorImpl_cpu_Float32( ...@@ -183,10 +183,18 @@ static Registrar<Tensor> registrarTensorImpl_cpu_Float32(
{"cpu", DataType::Float32}, Aidge::TensorImpl_cpu<float>::create); {"cpu", DataType::Float32}, Aidge::TensorImpl_cpu<float>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Float16( static Registrar<Tensor> registrarTensorImpl_cpu_Float16(
{"cpu", DataType::Float16}, Aidge::TensorImpl_cpu<half_float::half>::create); {"cpu", DataType::Float16}, Aidge::TensorImpl_cpu<half_float::half>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
{"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int64( static Registrar<Tensor> registrarTensorImpl_cpu_Int64(
{"cpu", DataType::Int64}, Aidge::TensorImpl_cpu<long>::create); {"cpu", DataType::Int64}, Aidge::TensorImpl_cpu<long>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
{"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int16(
{"cpu", DataType::Int16}, Aidge::TensorImpl_cpu<int16_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_UInt16(
{"cpu", DataType::UInt16}, Aidge::TensorImpl_cpu<uint16_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int8(
{"cpu", DataType::Int8}, Aidge::TensorImpl_cpu<int8_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_UInt8(
{"cpu", DataType::UInt8}, Aidge::TensorImpl_cpu<uint8_t>::create);
} // namespace } // namespace
} // namespace Aidge } // namespace Aidge
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_DATA_DATAPROVIDER_H_
#define AIDGE_CORE_DATA_DATAPROVIDER_H_
#include <cstddef> // std::size_t
#include <memory> // std::shared_ptr
#include <string>
#include <vector> // std::vector
#include "aidge/data/Database.hpp"
#include "aidge/data/Data.hpp"
namespace Aidge {
/**
* @brief Data Provider. Takes in a database and compose batches by fetching data from the given database.
* @todo Implement Drop last batch option. Currently returns the last batch with less elements in the batch.
* @todo Implement readRandomBatch to compose batches from the database with a random sampling startegy. Necessary for training.
*/
class DataProvider {
private:
// Dataset providing the data to the dataProvider
const Database& mDatabase;
const std::size_t mNumberModality;
std::vector<std::vector<std::size_t>> mDataSizes;
std::vector<std::string> mDataBackends;
std::vector<DataType> mDataTypes;
// Desired size of the produced batches
const std::size_t mBatchSize;
public:
/**
* @brief Constructor of Data Provider.
* @param database database from which to load the data.
* @param batchSize number of data samples per batch.
*/
DataProvider(const Database& database, const std::size_t batchSize);
public:
/**
* @brief Create a batch for each data modality in the database. The returned batch contain the data as sorted in the database.
* @param startIndex the starting index in the database to start the batch from.
* @return a vector of tensors. Each tensor is a batch corresponding to one modality.
*/
std::vector<std::shared_ptr<Tensor>> readBatch(const std::size_t startIndex) const;
};
} // namespace Aidge
#endif /* AIDGE_CORE_DATA_DATAPROVIDER_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_DATA_DATABASE_H_
#define AIDGE_CORE_DATA_DATABASE_H_
#include <cstddef>
#include <memory>
#include <vector>
#include "aidge/data/Tensor.hpp"
namespace Aidge {
/**
* @brief Abstract class representing a map from a key to data.
* All databases should inherit from this class. All subclasses should overwrite
* :cpp:function:`Database::getItem` to fetch data from a given index.
*/
class Database {
public:
Database() = default;
virtual ~Database() noexcept = default;
/**
* @brief Fetch an item of the database.
* @param index index of the item.
* @return vector of data mapped to index.
*/
virtual std::vector<std::shared_ptr<Tensor>> getItem(const std::size_t index) const = 0;
/**
* @brief Get the number of items in the database
*
* @return std::size_t
*/
virtual std::size_t getLen() const noexcept = 0;
/**
* @brief Get the number of modalities in one database item
*
* @return std::size_t
*/
virtual std::size_t getNbModalities() const noexcept = 0;
};
} // namespace Aidge
#endif /* AIDGE_CORE_DATA_DATABASE_H_ */
...@@ -32,7 +32,7 @@ namespace Aidge { ...@@ -32,7 +32,7 @@ namespace Aidge {
* Contains a pointer to an actual contiguous implementation of data. * Contains a pointer to an actual contiguous implementation of data.
*/ */
class Tensor : public Data, class Tensor : public Data,
public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)> { public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)> {
private: private:
DataType mDataType; /** enum to specify data type. */ DataType mDataType; /** enum to specify data type. */
std::vector<DimSize_t> mDims; /** Dimensions of the tensor. */ std::vector<DimSize_t> mDims; /** Dimensions of the tensor. */
...@@ -59,11 +59,25 @@ class Tensor : public Data, ...@@ -59,11 +59,25 @@ class Tensor : public Data,
// ctor // ctor
} }
/**
* @brief Construct a new Tensor object from dimensions.
*
* @param dims dimensions of the tensor
* @param dataType datatype of the tensor (default = DataType::Float32)
*/
Tensor(const std::vector<DimSize_t>& dims, DataType dataType = DataType::Float32)
: Data(Type),
mDataType(dataType),
mDims(dims)
{
computeSize();
}
/** /**
* @brief Construct a new Tensor object from another one (shallow copy). * @brief Construct a new Tensor object from another one (shallow copy).
* Data memory is not copied, but shared between the new Tensor and the * Data memory is not copied, but shared between the new Tensor and the
* initial one. * initial one.
* *
* @param otherTensor * @param otherTensor
*/ */
Tensor(const Tensor&) = default; Tensor(const Tensor&) = default;
...@@ -78,7 +92,7 @@ class Tensor : public Data, ...@@ -78,7 +92,7 @@ class Tensor : public Data,
newTensor.makeContiguous(); newTensor.makeContiguous();
} }
else { else {
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mSize); std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
newImpl->copy(mImpl->rawPtr(mImplOffset), mSize); newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
newTensor.setImpl(newImpl); newTensor.setImpl(newImpl);
} }
...@@ -96,7 +110,7 @@ class Tensor : public Data, ...@@ -96,7 +110,7 @@ class Tensor : public Data,
mDataType(NativeType<T>::type), mDataType(NativeType<T>::type),
mDims({SIZE_0}), mDims({SIZE_0}),
mStrides({1}), mStrides({1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0)), mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0})),
mSize(SIZE_0) { mSize(SIZE_0) {
mImpl->copyFromHost(&arr.data[0], SIZE_0); mImpl->copyFromHost(&arr.data[0], SIZE_0);
} }
...@@ -105,7 +119,7 @@ class Tensor : public Data, ...@@ -105,7 +119,7 @@ class Tensor : public Data,
constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) { constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
resize({SIZE_0}); resize({SIZE_0});
if (!mImpl) { if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0); mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0});
} }
mImpl->copyFromHost(&arr.data[0], SIZE_0, mImplOffset); mImpl->copyFromHost(&arr.data[0], SIZE_0, mImplOffset);
return *this; return *this;
...@@ -123,7 +137,7 @@ class Tensor : public Data, ...@@ -123,7 +137,7 @@ class Tensor : public Data,
mDataType(NativeType<T>::type), mDataType(NativeType<T>::type),
mDims({SIZE_0, SIZE_1}), mDims({SIZE_0, SIZE_1}),
mStrides({SIZE_1, 1}), mStrides({SIZE_1, 1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1)), mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1})),
mSize(SIZE_0 * SIZE_1) { mSize(SIZE_0 * SIZE_1) {
mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1); mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1);
} }
...@@ -132,7 +146,7 @@ class Tensor : public Data, ...@@ -132,7 +146,7 @@ class Tensor : public Data,
constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) { constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) {
resize({SIZE_0, SIZE_1}); resize({SIZE_0, SIZE_1});
if (!mImpl) { if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1); mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1});
} }
mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1, mImplOffset); mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1, mImplOffset);
return *this; return *this;
...@@ -151,7 +165,7 @@ class Tensor : public Data, ...@@ -151,7 +165,7 @@ class Tensor : public Data,
mDataType(NativeType<T>::type), mDataType(NativeType<T>::type),
mDims({SIZE_0, SIZE_1, SIZE_2}), mDims({SIZE_0, SIZE_1, SIZE_2}),
mStrides({SIZE_1 * SIZE_2, SIZE_2, 1}), mStrides({SIZE_1 * SIZE_2, SIZE_2, 1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2)), mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2})),
mSize(SIZE_0 * SIZE_1 * SIZE_2) { mSize(SIZE_0 * SIZE_1 * SIZE_2) {
mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2); mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
} }
...@@ -160,7 +174,7 @@ class Tensor : public Data, ...@@ -160,7 +174,7 @@ class Tensor : public Data,
constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) { constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) {
resize({SIZE_0, SIZE_1, SIZE_2}); resize({SIZE_0, SIZE_1, SIZE_2});
if (!mImpl) { if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2); mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2});
} }
mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2, mImplOffset); mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2, mImplOffset);
return *this; return *this;
...@@ -180,7 +194,7 @@ class Tensor : public Data, ...@@ -180,7 +194,7 @@ class Tensor : public Data,
mDataType(NativeType<T>::type), mDataType(NativeType<T>::type),
mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}), mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}),
mStrides({SIZE_1 * SIZE_2 * SIZE_3, SIZE_2 * SIZE_3, SIZE_3, 1}), mStrides({SIZE_1 * SIZE_2 * SIZE_3, SIZE_2 * SIZE_3, SIZE_3, 1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3)), mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3})),
mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3) { mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3) {
mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3); mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
} }
...@@ -189,7 +203,7 @@ class Tensor : public Data, ...@@ -189,7 +203,7 @@ class Tensor : public Data,
constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) { constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) {
resize({SIZE_0, SIZE_1, SIZE_2, SIZE_3}); resize({SIZE_0, SIZE_1, SIZE_2, SIZE_3});
if (!mImpl) { if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3); mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3});
} }
mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3, mImplOffset); mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3, mImplOffset);
return *this; return *this;
...@@ -250,7 +264,7 @@ class Tensor : public Data, ...@@ -250,7 +264,7 @@ class Tensor : public Data,
if (mImpl->device() != std::make_pair(name, device)) { if (mImpl->device() != std::make_pair(name, device)) {
// Backend change: create new impl, copy from old to new and replace // Backend change: create new impl, copy from old to new and replace
// impl // impl
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(device, mImpl->size()); std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims);
if (copyFrom) { if (copyFrom) {
newImpl->copyFrom(*mImpl, mImpl->size(), mImplOffset, 0); newImpl->copyFrom(*mImpl, mImpl->size(), mImplOffset, 0);
} }
...@@ -258,7 +272,7 @@ class Tensor : public Data, ...@@ -258,7 +272,7 @@ class Tensor : public Data,
} }
} }
else { else {
mImpl = Registrar<Tensor>::create({name, mDataType})(device, mSize); mImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims);
} }
} }
...@@ -288,7 +302,7 @@ class Tensor : public Data, ...@@ -288,7 +302,7 @@ class Tensor : public Data,
*/ */
void setDataType(const DataType dt, bool copyCast = true) { void setDataType(const DataType dt, bool copyCast = true) {
if (mImpl && (dataType() != dt)) { if (mImpl && (dataType() != dt)) {
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), dt})(mImpl->device().second, mImpl->size()); std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), dt})(mImpl->device().second, mDims);
if (copyCast) { if (copyCast) {
newImpl->copyCast(mImpl->rawPtr(mImplOffset), mDataType, mImpl->size()); newImpl->copyCast(mImpl->rawPtr(mImplOffset), mDataType, mImpl->size());
} }
...@@ -306,7 +320,7 @@ class Tensor : public Data, ...@@ -306,7 +320,7 @@ class Tensor : public Data,
/** /**
* @brief Set the Impl object * @brief Set the Impl object
* *
* @param impl New impl shared pointer * @param impl New impl shared pointer
* @param implOffset Storage offset in this new impl for this Tensor * @param implOffset Storage offset in this new impl for this Tensor
*/ */
...@@ -375,7 +389,7 @@ class Tensor : public Data, ...@@ -375,7 +389,7 @@ class Tensor : public Data,
* @param dims New dimensions * @param dims New dimensions
*/ */
template <std::array<DimSize_t, 1>::size_type DIM> // deducing std::array size_type and declaring DIM accordingly template <std::array<DimSize_t, 1>::size_type DIM> // deducing std::array size_type and declaring DIM accordingly
void resize(const std::array<DimSize_t, DIM> &dims) { inline void resize(const std::array<DimSize_t, DIM> &dims) {
resize(std::vector<DimSize_t>(dims.begin(), dims.end())); resize(std::vector<DimSize_t>(dims.begin(), dims.end()));
} }
...@@ -390,48 +404,7 @@ class Tensor : public Data, ...@@ -390,48 +404,7 @@ class Tensor : public Data,
* @param dims New dimensions * @param dims New dimensions
* @param strides Stride of the tensor (if not specified, "nested" stride is used) * @param strides Stride of the tensor (if not specified, "nested" stride is used)
*/ */
void resize(const std::vector<DimSize_t> &dims, std::vector<DimSize_t> strides = std::vector<DimSize_t>()) { void resize(const std::vector<DimSize_t> &dims, std::vector<DimSize_t> strides = std::vector<DimSize_t>());
bool checkContiguous = true;
if (strides.empty()) {
strides.resize(dims.size());
size_t expectedStride = 1;
for (int dim = dims.size() - 1; dim >= 0; --dim) {
strides[dim] = expectedStride;
expectedStride*= dims[dim];
}
checkContiguous = false;
}
else {
AIDGE_ASSERT(strides.size() == dims.size(), "Number of strides must match number of dims");
}
if (mImpl.use_count() > 1) {
// Here we could also create a new storage for this tensor in this case
// But, is it more likely that the user really wants this, or that he did a mistake?
AIDGE_ASSERT(dims == mDims && strides == mStrides, "Cannot resize Tensor with shared storage");
}
else {
mDims = dims;
mStrides = strides;
mContiguous = true;
if (checkContiguous) {
size_t expectedStride = 1;
for (int dim = dims.size() - 1; dim >= 0; --dim) {
if (strides[dim] != expectedStride) {
mContiguous = false;
break;
}
expectedStride*= dims[dim];
}
}
computeSize();
if (mImpl) {
mImpl->resize(mSize);
}
}
}
/** /**
* @brief Return if the Tensor object has at leastone element. * @brief Return if the Tensor object has at leastone element.
...@@ -465,95 +438,7 @@ class Tensor : public Data, ...@@ -465,95 +438,7 @@ class Tensor : public Data,
set<expectedType>(getStorageIdx(coordIdx), value); set<expectedType>(getStorageIdx(coordIdx), value);
} }
std::string toString() const;
std::string toString() const {
AIDGE_ASSERT(mImpl && (dims().empty() || (dims() == std::vector<DimSize_t>({0})) || (mImpl->hostPtr() != nullptr)), "tensor should have a valid host pointer");
// TODO: move lambda elsewhere?
auto ptrToString = [](DataType dt, void* ptr, size_t idx) {
switch (dt) {
case DataType::Float64:
return std::to_string(static_cast<double*>(ptr)[idx]);
case DataType::Float32:
return std::to_string(static_cast<float*>(ptr)[idx]);
case DataType::Float16:
return std::to_string(static_cast<half_float::half*>(ptr)[idx]);
case DataType::Int8:
return std::to_string(static_cast<int8_t*>(ptr)[idx]);
case DataType::Int16:
return std::to_string(static_cast<int16_t*>(ptr)[idx]);
case DataType::Int32:
return std::to_string(static_cast<int32_t*>(ptr)[idx]);
case DataType::Int64:
return std::to_string(static_cast<int64_t*>(ptr)[idx]);
case DataType::UInt8:
return std::to_string(static_cast<uint8_t*>(ptr)[idx]);
case DataType::UInt16:
return std::to_string(static_cast<uint16_t*>(ptr)[idx]);
case DataType::UInt32:
return std::to_string(static_cast<uint32_t*>(ptr)[idx]);
case DataType::UInt64:
return std::to_string(static_cast<uint64_t*>(ptr)[idx]);
default:
AIDGE_ASSERT(true, "unsupported type to convert to string");
}
return std::string("?"); // To make Clang happy
};
if (dims().empty()) { return ptrToString(mDataType, mImpl->hostPtr(), 0); }
std::string res;
std::size_t dim = 0;
std::size_t counter = 0;
if (nbDims()>=2) {
std::vector<std::size_t> dimVals(nbDims(), 0);
res += "{\n";
while (counter < mSize) {
std::string spaceString = std::string((dim+1)<<1,' ');
if (dim < nbDims()-2) {
if (dimVals[dim] == 0) {
res += spaceString + "{\n";
++dim;
} else if (dimVals[dim] < static_cast<std::size_t>(dims()[dim])) {
res += spaceString + "},\n" + spaceString + "{\n";
++dim;
} else {
res += spaceString + "}\n";
dimVals[dim--] = 0;
dimVals[dim]++;
}
} else {
for (; dimVals[dim] < static_cast<std::size_t>(dims()[dim]); ++dimVals[dim]) {
res += spaceString + "{";
for (DimSize_t j = 0; j < dims()[dim + 1] - 1; ++j) {
res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + ",";
}
res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + "}";
if (dimVals[dim] < static_cast<std::size_t>(dims()[dim] - 1)) {
res += ",";
}
res += "\n";
}
if (dim == 0) {
break;
}
dimVals[dim--] = 0;
dimVals[dim]++;
}
}
for(int i = static_cast<int>(dim); i > 0; --i) {
res += std::string((dim+1)<<1,' ') + "}\n";
}
} else {
res += "{";
for (DimSize_t j = 0; j < dims()[0]; ++j) {
res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), j) + ((j < dims()[0]-1) ? "," : " ");
}
}
res += "}";
return res;
}
inline void print() const { printf("%s\n", toString().c_str()); } inline void print() const { printf("%s\n", toString().c_str()); }
...@@ -621,7 +506,7 @@ class Tensor : public Data, ...@@ -621,7 +506,7 @@ class Tensor : public Data,
} }
/** /**
* Returns a sub-tensor with one or more dimension less. * @brief Returns a sub-tensor with one or more dimension less.
* For instance, t.extract({1}) on a CHW tensor will return the HW tensor * For instance, t.extract({1}) on a CHW tensor will return the HW tensor
* of channel #1. * of channel #1.
* Likewise, t.extract({0, 1}) on a NCHW tensor will return the HW tensor * Likewise, t.extract({0, 1}) on a NCHW tensor will return the HW tensor
...@@ -631,15 +516,15 @@ class Tensor : public Data, ...@@ -631,15 +516,15 @@ class Tensor : public Data,
* tensor is returned. * tensor is returned.
* It current tensor was contiguous, the returned tensor is garanteed to be * It current tensor was contiguous, the returned tensor is garanteed to be
* contiguous as well. * contiguous as well.
* *
* @param coordIdx Coordinates of the sub-tensor to extract * @param coordIdx Coordinates of the sub-tensor to extract
* @return Tensor Sub-tensor. * @return Tensor Sub-tensor.
*/ */
Tensor extract(const std::vector<std::size_t>& coordIdx) const; Tensor extract(const std::vector<std::size_t>& coordIdx) const;
/** /**
* Returns a sub-tensor at some coordinate and with some dimension. * @brief Returns a sub-tensor at some coordinate and with some dimension.
* *
* @param coordIdx First coordinates of the sub-tensor to extract * @param coordIdx First coordinates of the sub-tensor to extract
* @param dims Dimensions of the sub-tensor to extract * @param dims Dimensions of the sub-tensor to extract
* @return Tensor Sub-tensor. * @return Tensor Sub-tensor.
...@@ -647,7 +532,7 @@ class Tensor : public Data, ...@@ -647,7 +532,7 @@ class Tensor : public Data,
Tensor extract(const std::vector<std::size_t>& coordIdx, const std::vector<std::size_t>& dims) const; Tensor extract(const std::vector<std::size_t>& coordIdx, const std::vector<std::size_t>& dims) const;
/** /**
* Make the tensor's storage contiguous, if it is not already the case. * @brief Make the tensor's storage contiguous, if it is not already the case.
* If not contiguous, a new memory space is allocated. * If not contiguous, a new memory space is allocated.
*/ */
void makeContiguous(); void makeContiguous();
...@@ -704,7 +589,7 @@ class Tensor : public Data, ...@@ -704,7 +589,7 @@ class Tensor : public Data,
* The data type, backend and device stay the same. * The data type, backend and device stay the same.
* @param fallback A shared_ptr to Tensor ready to be overwritten if necessary. * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
* The shared_ptr does not need to be initialized. No new memory allocation * The shared_ptr does not need to be initialized. No new memory allocation
* will occur if fallback has already been allocated with the right * will occur if fallback has already been allocated with the right
* type/size/device. * type/size/device.
* @return Reference to either itself or to fallback. * @return Reference to either itself or to fallback.
*/ */
...@@ -782,10 +667,10 @@ class Tensor : public Data, ...@@ -782,10 +667,10 @@ class Tensor : public Data,
} }
/** /**
* Return a reference to a Tensor on desired data type and backend/device: * @brief Return a reference to a Tensor on desired data type and backend/device:
* - itself, if already with the right characteristics; * - itself, if already with the right characteristics;
* - the provided Tensor, overwritten with the right characteristics. * - the provided Tensor, overwritten with the right characteristics.
* NOTE: no data is copy-casted. If it was so in a previous refCastFrom() on * @note no data is copy-casted. If it was so in a previous refCastFrom() on
* the same fallback, it remains valid, otherwise, data is invalid. * the same fallback, it remains valid, otherwise, data is invalid.
* @param fallback A shared_ptr to Tensor ready to be overwritten if necessary. * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
* The shared_ptr does not need to be initialized. No new memory allocation * The shared_ptr does not need to be initialized. No new memory allocation
...@@ -800,11 +685,11 @@ class Tensor : public Data, ...@@ -800,11 +685,11 @@ class Tensor : public Data,
const Tensor& ref(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt, const std::string &backend, DeviceIdx_t device = 0) const; const Tensor& ref(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt, const std::string &backend, DeviceIdx_t device = 0) const;
/** /**
* Return a reference to a Tensor with same characteristics * @brief Return a reference to a Tensor with same characteristics
* (data type, backend/device) as targetReqs Tensor: * (data type, backend/device) as targetReqs Tensor:
* - itself, if already with the right characteristics; * - itself, if already with the right characteristics;
* - the provided Tensor, overwritten with the right characteristics. * - the provided Tensor, overwritten with the right characteristics.
* NOTE: no data is copy-casted. If it was so in a previous refCastFrom() on * @note no data is copy-casted. If it was so in a previous refCastFrom() on
* the same fallback, it remains valid, otherwise, data is invalid. * the same fallback, it remains valid, otherwise, data is invalid.
* @param fallback A shared_ptr to Tensor ready to be overwritten if necessary. * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
* The shared_ptr does not need to be initialized. No new memory allocation * The shared_ptr does not need to be initialized. No new memory allocation
...@@ -819,7 +704,11 @@ class Tensor : public Data, ...@@ -819,7 +704,11 @@ class Tensor : public Data,
} }
private: private:
///\bug not protected against overflow /**
* @brief Compute the number of elements in the Tensor.
* @note If dimensions are not empty, they are multiplied to get the total number
* of elements. Else, the Tensor represents a scalar and contains a single element.
*/
void computeSize() { void computeSize() {
mSize = std::accumulate(mDims.begin(), mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>()); mSize = std::accumulate(mDims.begin(), mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>());
} }
......
...@@ -209,7 +209,7 @@ public: ...@@ -209,7 +209,7 @@ public:
* @brief Compute dimensions of input/output Tensors for each Operator of the * @brief Compute dimensions of input/output Tensors for each Operator of the
* GraphView object's Nodes. * GraphView object's Nodes.
*/ */
void forwardDims(); void forwardDims(const std::vector<std::vector<DimSize_t>> dims = {});
/** @brief Set the same backend for each Operator of the GraphView object's Nodes. */ /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
void setBackend(const std::string &backend, DeviceIdx_t device = 0); void setBackend(const std::string &backend, DeviceIdx_t device = 0);
......
...@@ -59,7 +59,7 @@ public: ...@@ -59,7 +59,7 @@ public:
// Helper functions that can be used with setComputeOutputDims(): // Helper functions that can be used with setComputeOutputDims():
static const ComputeDimsFunc Identity; static const ComputeDimsFunc Identity;
void setComputeOutputDims(ComputeDimsFunc func) { inline void setComputeOutputDims(ComputeDimsFunc func) {
mComputeOutputDims = func; mComputeOutputDims = func;
} }
......
...@@ -12,49 +12,32 @@ ...@@ -12,49 +12,32 @@
#ifndef AIDGE_CORE_OPERATOR_MATMUL_H_ #ifndef AIDGE_CORE_OPERATOR_MATMUL_H_
#define AIDGE_CORE_OPERATOR_MATMUL_H_ #define AIDGE_CORE_OPERATOR_MATMUL_H_
#include <array>
#include <cmath>
#include <numeric>
#include <memory> #include <memory>
#include <string>
#include <vector> #include <vector>
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
namespace Aidge { namespace Aidge {
enum class MatMulAttr { OutChannels };
class MatMul_Op : public OperatorTensor, class MatMul_Op : public OperatorTensor,
public Registrable<MatMul_Op, public Registrable<MatMul_Op,
std::string, std::string,
std::unique_ptr<OperatorImpl>(const MatMul_Op &)>, std::unique_ptr<OperatorImpl>(const MatMul_Op &)> {
public StaticAttributes<MatMulAttr, DimSize_t> {
public: public:
static const std::string Type; static const std::string Type;
MatMul_Op() = delete; MatMul_Op() : OperatorTensor(Type, 2, 0, 1) {}
using Attributes_ = StaticAttributes<MatMulAttr, DimSize_t>;
template <MatMulAttr e> using attr = typename Attributes_::template attr<e>;
MatMul_Op(DimSize_t out_channels)
: OperatorTensor(Type, 1, 1, 1),
Attributes_(
attr<MatMulAttr::OutChannels>(out_channels))
{}
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
MatMul_Op(const MatMul_Op& op) MatMul_Op(const MatMul_Op& op) : OperatorTensor(op)
: OperatorTensor(op),
Attributes_(op)
{ {
mImpl = op.mImpl ? Registrar<MatMul_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; mImpl = op.mImpl ? Registrar<MatMul_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
} }
...@@ -63,50 +46,40 @@ public: ...@@ -63,50 +46,40 @@ public:
* @brief Clone the operator using its copy-constructor. * @brief Clone the operator using its copy-constructor.
* @see Operator::MatMul_Op * @see Operator::MatMul_Op
*/ */
std::shared_ptr<Operator> clone() const override { std::shared_ptr<Operator> clone() const override final {
return std::make_shared<MatMul_Op>(*this); return std::make_shared<MatMul_Op>(*this);
} }
/**
void computeOutputDims() override final { * @brief Compute dimensions for the output Tensor following the same rules as
bool associated = true; * numpy.matmul.
for (IOIndex_t i = 0; i < nbInputs(); ++i) { * @note - Both inputs are 2-D Tensors: classic matrix multiplication
if (!getInput(i)) { * @note - Either input is N-D with N > 2: it is treated as a stack of matrices residing
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor"); * in the last two indexes and broadcast accordingly.
} * @note - First input is 1-D: it is promoted to a matrix by prepending a 1 to its
associated &= !(getInput(i)->empty()); * dimensions (D) -> (1,D). The prepended 1 is removed after computation.
} * @note - Second input is 1-D: it is promoted to a matrix by appending a 1 to its
if (associated) { * dimensions (D) -> (D,1). The appended 1 is removed after computation.
// <batch, OutChannels> */
mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<MatMulAttr::OutChannels>()}); void computeOutputDims() override final;
}
}
void setBackend(const std::string& name, DeviceIdx_t device = 0) override { void setBackend(const std::string& name, DeviceIdx_t device = 0) override final {
mImpl = Registrar<MatMul_Op>::create(name)(*this); mImpl = Registrar<MatMul_Op>::create(name)(*this);
mOutputs[0]->setBackend(name, device); mOutputs[0]->setBackend(name, device);
} }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName() {
return {"data_input", "weight"}; return {"data_input1", "data_input2"};
} }
static const std::vector<std::string> getOutputsName(){ static const std::vector<std::string> getOutputsName() {
return {"data_output"}; return {"data_output"};
} }
}; };
inline std::shared_ptr<Node> MatMul(DimSize_t inChannels, DimSize_t outChannels, const std::string& name = "") { inline std::shared_ptr<Node> MatMul(const std::string& name = "") {
// FIXME: properly handle default w initialization in every cases return std::make_shared<Node>(std::make_shared<MatMul_Op>(), name);
auto matmul = std::make_shared<Node>(std::make_shared<MatMul_Op>(outChannels), name);
addProducer(matmul, 1, {outChannels, inChannels}, "w");
return matmul;
} }
} // namespace Aidge } // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::MatMulAttr>::data[] = {"OutChannels"};
}
#endif /* AIDGE_CORE_OPERATOR__MATMUL_H_ */ #endif /* AIDGE_CORE_OPERATOR__MATMUL_H_ */
...@@ -118,9 +118,7 @@ public: ...@@ -118,9 +118,7 @@ public:
* @brief Set the a new OperatorImpl to the Operator * @brief Set the a new OperatorImpl to the Operator
* *
*/ */
void setImpl(std::shared_ptr<OperatorImpl> impl){ inline void setImpl(std::shared_ptr<OperatorImpl> impl) { mImpl = impl; }
mImpl = impl;
}
/** /**
* @brief Minimum amount of data from a specific input for one computation pass. * @brief Minimum amount of data from a specific input for one computation pass.
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <string> #include <string>
#include <vector> #include <vector>
#include "aidge/data/Tensor.hpp"
namespace Aidge { namespace Aidge {
class Node; class Node;
class GraphView; class GraphView;
...@@ -49,11 +51,17 @@ public: ...@@ -49,11 +51,17 @@ public:
mScheduling.clear(); mScheduling.clear();
mStaticSchedule.clear(); mStaticSchedule.clear();
} }
/**
* @brief Place the data tensors inside in the data input tensor of the graphView. In case of multiple data input tensors, they are mapped to producers in the order given by the graph.
*
* @param data data input tensors
*/
void connectInputs(std::vector<std::shared_ptr<Aidge::Tensor>> data);
/** /**
* @brief Run the provided Computational Graph with a batch of data * @brief Run the provided Computational Graph with a batch of data
*/ */
void forward(bool forwardDims = true, bool verbose = false); void forward(bool forwardDims = true, bool verbose = false, std::vector<std::shared_ptr<Aidge::Tensor>> data = {});
/** /**
* @brief Save in a Markdown file the order of layers execution. * @brief Save in a Markdown file the order of layers execution.
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_STIMULI_STIMULUS_H_
#define AIDGE_CORE_STIMULI_STIMULUS_H_
#include <string>
#include <memory>
#include <tuple>
#include "aidge/backend/StimulusImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/ErrorHandling.hpp"
namespace Aidge {
/**
* @brief Stimulus. A class wrapping a data sample. Stimulus has two functioning modes. The first mode enables to load data samples from a dataPath and optionnaly store the data in-memory. The second mode enables to store a data sample that was already loaded in memory.
* @details When Stimulus is used in the first mode, the loading function is determined automaticaly based on the backend and the file extension.
*/
class Stimulus : public Registrable<Stimulus, std::tuple<std::string, std::string>, std::unique_ptr<StimulusImpl>(const std::string&)> {
private:
/// Stimulus data path
const std::string mDataPath;
const std::string mFileExtension;
bool mLoadDataInMemory;
/// Stimulus data ptr
std::shared_ptr<Tensor> mData;
// Implementation of the Stimulus
std::unique_ptr<StimulusImpl> mImpl;
public:
Stimulus() = delete;
/**
* @brief Construct a new Stimulus object based on a tensor that is already loaded in memory.
*
* @param data the data tensor.
*/
Stimulus(const std::shared_ptr<Tensor> data)
: mLoadDataInMemory(true),
mData(data)
{
// ctor
}
/**
* @brief Construct a new Stimulus object based on a dataPath to load the data.
*
* @param dataPath path to the data to be loaded.
* @param loadDataInMemory when true, keep the data in memory once loaded
*/
Stimulus(const std::string& dataPath, bool loadDataInMemory = false)
: mDataPath(dataPath),
mFileExtension(dataPath.substr(dataPath.find_last_of(".") + 1)),
mLoadDataInMemory(loadDataInMemory)
{
AIDGE_ASSERT((dataPath.find_last_of(".") != std::string::npos), "Cannot find extension");
}
/**
* @brief Construct a new Stimulus object copied from another one.
* @param otherStimulus
*/
Stimulus(const Stimulus& otherStimulus)
: mDataPath(otherStimulus.mDataPath),
mFileExtension(otherStimulus.mFileExtension),
mLoadDataInMemory(otherStimulus.mLoadDataInMemory),
mData(otherStimulus.mData)
{
if (otherStimulus.mImpl) {
mImpl = Registrar<Stimulus>::create({"opencv", mFileExtension})(mDataPath);
}
}
virtual ~Stimulus();
public:
/**
* @brief Set the backend of the stimuli associated load implementation
* @details Create and initialize an implementation.
* @param name name of the backend.
*/
inline void setBackend(const std::string &name) {
mImpl = Registrar<Stimulus>::create({name, mFileExtension})(mDataPath);
}
/**
* @brief Get the data tensor associated to the stimuli. The data is either loaded from a datapath or passed from an in-memory tensor.
*
* @return std::shared_ptr<Tensor> the data tensor.
*/
virtual std::shared_ptr<Tensor> load();
};
} // namespace Aidge
#endif // AIDGE_CORE_STIMULI_STIMULUS_H_
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "aidge/data/DataProvider.hpp"
#include "aidge/data/Database.hpp"
namespace py = pybind11;
namespace Aidge {
void init_DataProvider(py::module& m){
py::class_<DataProvider, std::shared_ptr<DataProvider>>(m, "DataProvider")
.def(py::init<Database&, std::size_t>(), py::arg("database"), py::arg("batchSize"))
.def("read_batch", &DataProvider::readBatch, py::arg("start_index"),
R"mydelimiter(
Return a batch of each data modality.
:param start_index: Database starting index to read the batch from
:type start_index: int
)mydelimiter");
}
}