Skip to content
Snippets Groups Projects
Commit 89b643dd authored by Cyril Moineau's avatar Cyril Moineau
Browse files

Merge branch 'PyRegistrar' of gitlab.eclipse.org:eclipse/aidge/aidge_core into PyRegistrar

parents 6c5e9c81 5b78b976
No related branches found
No related tags found
No related merge requests found
Showing
with 394 additions and 339 deletions
......@@ -95,60 +95,60 @@ build:ubuntu_python:
paths:
- venv/
build:windows_cpp:
stage: build
needs: []
tags:
- windows
image: buildtools
before_script:
# Install Chocolatey
- Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
# Install dependencies
- choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
- choco install git -Y
- choco install python -Y
# Update PATH
- $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
script:
- mkdir -p build_cpp
- mkdir -p install_cpp
- cd build_cpp
- cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug ..
- cmake --build . -j2
- cmake --install . --config Debug
artifacts:
expire_in: 1 week
paths:
- build_cpp/
- install_cpp/
build:windows_python:
stage: build
needs: []
tags:
- windows
image: buildtools
before_script:
# Install Chocolatey
- Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
# Install dependencies
- choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
- choco install git -Y
- choco install python -Y
# Update PATH
- $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
script:
- python -m pip install virtualenv
- virtualenv venv
- venv\Scripts\Activate.ps1
# Numpy dependancy for unit test
- python -m pip install -r requirements.txt
- python -m pip install .
artifacts:
expire_in: 1 week
paths:
- venv/
# build:windows_cpp:
# stage: build
# needs: []
# tags:
# - windows
# image: buildtools
# before_script:
# # Install Chocolatey
# - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
# # Install dependencies
# - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
# - choco install git -Y
# - choco install python -Y
# # Update PATH
# - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
# script:
# - mkdir -p build_cpp
# - mkdir -p install_cpp
# - cd build_cpp
# - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug ..
# - cmake --build . -j2
# - cmake --install . --config Debug
# artifacts:
# expire_in: 1 week
# paths:
# - build_cpp/
# - install_cpp/
# build:windows_python:
# stage: build
# needs: []
# tags:
# - windows
# image: buildtools
# before_script:
# # Install Chocolatey
# - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
# # Install dependencies
# - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
# - choco install git -Y
# - choco install python -Y
# # Update PATH
# - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
# script:
# - python -m pip install virtualenv
# - virtualenv venv
# - venv\Scripts\Activate.ps1
# # Numpy dependancy for unit test
# - python -m pip install -r requirements.txt
# - python -m pip install .
# artifacts:
# expire_in: 1 week
# paths:
# - venv/
......@@ -26,23 +26,23 @@ test:ubuntu_python:
reports:
junit: ${CI_PROJECT_NAME}/xmlrunner-results.xml
test:windows_cpp:
stage: test
needs: ["build:windows_cpp"]
tags:
- windows
image: buildtools
before_script:
# Install Chocolatey
- Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
# Install dependencies
- choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
- choco install python -Y
# Update PATH
- $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
script:
- cd build_cpp
- ctest --output-junit ctest-results.xml --output-on-failure
artifacts:
reports:
junit: build_cpp/ctest-results.xml
# test:windows_cpp:
# stage: test
# needs: ["build:windows_cpp"]
# tags:
# - windows
# image: buildtools
# before_script:
# # Install Chocolatey
# - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
# # Install dependencies
# - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
# - choco install python -Y
# # Update PATH
# - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
# script:
# - cd build_cpp
# - ctest --output-junit ctest-results.xml --output-on-failure
# artifacts:
# reports:
# junit: build_cpp/ctest-results.xml
# Version 0.1.1 (January 29, 2024)
[Add] Support of a negative value for Reshape Operator shape attribute.
# Version 0.1.0 (January 23, 2024)
Initial release
......@@ -39,12 +39,6 @@ class test_attributes(unittest.TestCase):
self.assertEqual(fc_op.get_attr("OutChannels"), out_channels)
self.assertEqual(fc_op.get_attr("NoBias"), nb_bias)
def test_matmul(self):
in_channels = 4
out_channels = 8
matmul_op = aidge_core.MatMul(in_channels, out_channels).get_operator()
self.assertEqual(matmul_op.get_attr("OutChannels"), out_channels)
def test_producer_1D(self):
dims = [5]
producer_op = aidge_core.Producer(dims).get_operator()
......
......@@ -45,9 +45,9 @@ class test_recipies(unittest.TestCase):
self.assertTrue(all([i in old_nodes for i in graph_view.get_nodes()]))
def test_fuse_matmul_add(self):
matmul0 = aidge_core.MatMul(1, 1, name="MatMul0")
matmul0 = aidge_core.MatMul(name="MatMul0")
add0 = aidge_core.Add(2, name="Add0")
matmul1 = aidge_core.MatMul(1, 1, name="MatMul1")
matmul1 = aidge_core.MatMul(name="MatMul1")
add1 = aidge_core.Add(2, name="Add1")
graph_view = aidge_core.sequential([matmul0, add0, matmul1, add1])
......
......@@ -14,13 +14,15 @@
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/backend/TensorImpl.hpp"
#include "aidge/backend/StimulusImpl.hpp"
#include "aidge/backend/cpu/data/TensorImpl.hpp"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/data/Database.hpp"
#include "aidge/data/DataProvider.hpp"
#include "aidge/graph/Connector.hpp"
#include "aidge/graph/GraphView.hpp"
#include "aidge/graph/Node.hpp"
......@@ -61,6 +63,7 @@
#include "aidge/operator/Sub.hpp"
#include "aidge/operator/Transpose.hpp"
#include "aidge/scheduler/Scheduler.hpp"
#include "aidge/stimuli/Stimulus.hpp"
#include "aidge/recipies/Recipies.hpp"
......@@ -69,7 +72,5 @@
#include "aidge/utils/DynamicAttributes.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
//#include "aidge/utilsParsing/AstNode.hpp"
//#include "aidge/utilsParsing/ParsingToken.hpp"
#endif /* AIDGE_IMPORTS_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_BACKEND_STIMULUSIMPL_H_
#define AIDGE_CORE_BACKEND_STIMULUSIMPL_H_
#include <memory>
#include "aidge/data/Tensor.hpp"
namespace Aidge {
/**
* @brief Base class to implement data loading functions.
*/
class StimulusImpl {
public:
virtual ~StimulusImpl() noexcept = default;
virtual std::shared_ptr<Tensor> load() const = 0;
};
} // namespace Aidge
#endif /* AIDGE_CORE_BACKEND_STIMULUSIMPL_H_ */
......@@ -12,8 +12,12 @@
#ifndef AIDGE_TENSORIMPL_H_
#define AIDGE_TENSORIMPL_H_
#include <cstddef>
#include <cstdio>
#include <numeric> // std::accumulate
#include <cstddef> // std::size_t
#include <functional> // std::multiplies
#include <vector>
#include <utility> // std::pair, std::make_pair
#include "aidge/data/Data.hpp"
#include "aidge/utils/Types.h"
#include "aidge/utils/ErrorHandling.hpp"
......@@ -59,20 +63,42 @@ private:
*/
/**
* This class manages the raw data storage of a Tensor and provide generic copy
* @class TensorImpl
* @brief Class to manage the raw data storage of a Tensor and provide generic copy
* primitives from other devices and from/to host.
* It can own the data or not (use setRawPtr() to set an external data owner).
* It only knows the data type and data capacity, but does not handle anything else.
* @note It can own the data or not (use ``setRawPtr()`` to set an external data owner).
* @note It only knows the data type and data capacity, but does not handle anything else.
*/
class TensorImpl {
protected:
const char *mBackend;
/// @brief Device id.
const DeviceIdx_t mDevice;
/// Number of elements (to be) stored.
NbElts_t mNbElts;
public:
TensorImpl() = delete;
TensorImpl(const char *backend, DeviceIdx_t device, NbElts_t length) : mBackend(backend), mDevice(device), mNbElts(length) {};
TensorImpl(const char *backend, DeviceIdx_t device, std::vector<DimSize_t> dims)
: mBackend(backend),
mDevice(device)
{
resize(dims);
};
virtual ~TensorImpl() = default;
virtual bool operator==(const TensorImpl &othImpl) const = 0;
public:
/**
* Return the (backend, device) pair for this implementation.
*/
std::pair<std::string, DeviceIdx_t> device() const { return std::make_pair(mBackend, mDevice); }
std::pair<std::string, DeviceIdx_t> device() const noexcept {
return std::make_pair(std::string(mBackend), mDevice);
}
/**
* Copy data from the same device.
......@@ -147,8 +173,8 @@ public:
/**
* Set the size, in number of elements, that must be stored.
*/
void resize(NbElts_t length) {
mNbElts = length;
virtual void resize(std::vector<DimSize_t> dims) {
mNbElts = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
}
/**
......@@ -161,23 +187,15 @@ public:
*/
virtual std::size_t scalarSize() const noexcept = 0;
constexpr const char *backend() const { return mBackend; }
virtual ~TensorImpl() = default;
virtual bool operator==(const TensorImpl &othImpl) const = 0;
/**
* Copy from another backend.
* @brief Copy from another backend.
* @param srcImpl Source TensorImpl to copy from.
* @param length Number of elements of size scalarSize() to copy
* @param srcOffset Source offset (in number of elements).
* @param dstOffset Destination offset (in number of elements).
*/
void copyFrom(const TensorImpl& srcImpl, NbElts_t length, NbElts_t srcOffset = 0, NbElts_t dstOffset = 0);
protected:
const char *mBackend;
const DeviceIdx_t mDevice;
/// Number of elements (to be) stored
NbElts_t mNbElts;
};
} // namespace Aidge
......
......@@ -12,13 +12,16 @@
#ifndef AIDGE_CPU_DATA_GETCPUPTR_H_
#define AIDGE_CPU_DATA_GETCPUPTR_H_
#include <cstddef>
#include <memory>
#include "aidge/data/Tensor.hpp"
namespace Aidge {
inline void *getCPUPtr(std::shared_ptr<Aidge::Data> const &data) {
inline void *getCPUPtr(std::shared_ptr<Aidge::Data> const &data, const std::size_t offset = 0) {
const auto tensor = std::static_pointer_cast<Tensor>(data);
return tensor->getImpl()->hostPtr(tensor->getImplOffset());
return tensor->getImpl()->hostPtr(tensor->getImplOffset() + offset);
}
} // namespace Aidge
#endif // AIDGE_CPU_DATA_GETCPUPTR_H_
\ No newline at end of file
#endif // AIDGE_CPU_DATA_GETCPUPTR_H_
......@@ -33,7 +33,7 @@ private:
public:
static constexpr const char *Backend = "cpu";
TensorImpl_cpu(DeviceIdx_t device, NbElts_t length) : TensorImpl(Backend, device, length) {}
TensorImpl_cpu(DeviceIdx_t device, std::vector<DimSize_t> dims) : TensorImpl(Backend, device, dims) {}
bool operator==(const TensorImpl &otherImpl) const override final {
const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl);
......@@ -47,8 +47,8 @@ public:
return i == mNbElts;
}
static std::shared_ptr<TensorImpl_cpu> create(DeviceIdx_t device, NbElts_t length) {
return std::make_shared<TensorImpl_cpu<T>>(device, length);
static std::shared_ptr<TensorImpl_cpu> create(DeviceIdx_t device, std::vector<DimSize_t> dims) {
return std::make_shared<TensorImpl_cpu<T>>(device, dims);
}
inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
......@@ -183,10 +183,18 @@ static Registrar<Tensor> registrarTensorImpl_cpu_Float32(
{"cpu", DataType::Float32}, Aidge::TensorImpl_cpu<float>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Float16(
{"cpu", DataType::Float16}, Aidge::TensorImpl_cpu<half_float::half>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
{"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int64(
{"cpu", DataType::Int64}, Aidge::TensorImpl_cpu<long>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
{"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int16(
{"cpu", DataType::Int16}, Aidge::TensorImpl_cpu<int16_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_UInt16(
{"cpu", DataType::UInt16}, Aidge::TensorImpl_cpu<uint16_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int8(
{"cpu", DataType::Int8}, Aidge::TensorImpl_cpu<int8_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_UInt8(
{"cpu", DataType::UInt8}, Aidge::TensorImpl_cpu<uint8_t>::create);
} // namespace
} // namespace Aidge
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_DATA_DATAPROVIDER_H_
#define AIDGE_CORE_DATA_DATAPROVIDER_H_
#include <cstddef> // std::size_t
#include <memory> // std::shared_ptr
#include <string>
#include <vector> // std::vector
#include "aidge/data/Database.hpp"
#include "aidge/data/Data.hpp"
namespace Aidge {
/**
* @brief Data Provider. Takes in a database and compose batches by fetching data from the given database.
* @todo Implement Drop last batch option. Currently returns the last batch with less elements in the batch.
* @todo Implement readRandomBatch to compose batches from the database with a random sampling startegy. Necessary for training.
*/
class DataProvider {
private:
// Dataset providing the data to the dataProvider
const Database& mDatabase;
const std::size_t mNumberModality;
std::vector<std::vector<std::size_t>> mDataSizes;
std::vector<std::string> mDataBackends;
std::vector<DataType> mDataTypes;
// Desired size of the produced batches
const std::size_t mBatchSize;
public:
/**
* @brief Constructor of Data Provider.
* @param database database from which to load the data.
* @param batchSize number of data samples per batch.
*/
DataProvider(const Database& database, const std::size_t batchSize);
public:
/**
* @brief Create a batch for each data modality in the database. The returned batch contain the data as sorted in the database.
* @param startIndex the starting index in the database to start the batch from.
* @return a vector of tensors. Each tensor is a batch corresponding to one modality.
*/
std::vector<std::shared_ptr<Tensor>> readBatch(const std::size_t startIndex) const;
};
} // namespace Aidge
#endif /* AIDGE_CORE_DATA_DATAPROVIDER_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_DATA_DATABASE_H_
#define AIDGE_CORE_DATA_DATABASE_H_
#include <cstddef>
#include <memory>
#include <vector>
#include "aidge/data/Tensor.hpp"
namespace Aidge {
/**
* @brief Abstract class representing a map from a key to data.
* All databases should inherit from this class. All subclasses should overwrite
* :cpp:function:`Database::getItem` to fetch data from a given index.
*/
class Database {
public:
Database() = default;
virtual ~Database() noexcept = default;
/**
* @brief Fetch an item of the database.
* @param index index of the item.
* @return vector of data mapped to index.
*/
virtual std::vector<std::shared_ptr<Tensor>> getItem(const std::size_t index) const = 0;
/**
* @brief Get the number of items in the database
*
* @return std::size_t
*/
virtual std::size_t getLen() const noexcept = 0;
/**
* @brief Get the number of modalities in one database item
*
* @return std::size_t
*/
virtual std::size_t getNbModalities() const noexcept = 0;
};
} // namespace Aidge
#endif /* AIDGE_CORE_DATA_DATABASE_H_ */
This diff is collapsed.
......@@ -209,7 +209,7 @@ public:
* @brief Compute dimensions of input/output Tensors for each Operator of the
* GraphView object's Nodes.
*/
void forwardDims();
void forwardDims(const std::vector<std::vector<DimSize_t>> dims = {});
/** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
void setBackend(const std::string &backend, DeviceIdx_t device = 0);
......
......@@ -68,13 +68,7 @@ public:
// }
// void checkDims() const override final {
// assert(outputDimsForwarded());
// for (const auto& in : mInputs) {
// assert(in->dims() == mOutputs[0]->dims());
// }
// }
void computeOutputDims() override final;
void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
mImpl = Registrar<Add_Op>::create(name)(*this);
......
......@@ -60,7 +60,7 @@ public:
}
static const std::vector<std::string> getInputsName(){
return {"data_input"};
return {"data_input_1", "data_input_2"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
......
......@@ -59,7 +59,7 @@ public:
// Helper functions that can be used with setComputeOutputDims():
static const ComputeDimsFunc Identity;
void setComputeOutputDims(ComputeDimsFunc func) {
inline void setComputeOutputDims(ComputeDimsFunc func) {
mComputeOutputDims = func;
}
......
......@@ -12,49 +12,32 @@
#ifndef AIDGE_CORE_OPERATOR_MATMUL_H_
#define AIDGE_CORE_OPERATOR_MATMUL_H_
#include <array>
#include <cmath>
#include <numeric>
#include <memory>
#include <string>
#include <vector>
#include "aidge/utils/Types.h"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp"
namespace Aidge {
enum class MatMulAttr { OutChannels };
class MatMul_Op : public OperatorTensor,
public Registrable<MatMul_Op,
std::string,
std::unique_ptr<OperatorImpl>(const MatMul_Op &)>,
public StaticAttributes<MatMulAttr, DimSize_t> {
std::unique_ptr<OperatorImpl>(const MatMul_Op &)> {
public:
static const std::string Type;
MatMul_Op() = delete;
using Attributes_ = StaticAttributes<MatMulAttr, DimSize_t>;
template <MatMulAttr e> using attr = typename Attributes_::template attr<e>;
MatMul_Op(DimSize_t out_channels)
: OperatorTensor(Type, 1, 1, 1),
Attributes_(
attr<MatMulAttr::OutChannels>(out_channels))
{}
MatMul_Op() : OperatorTensor(Type, 2, 0, 1) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
MatMul_Op(const MatMul_Op& op)
: OperatorTensor(op),
Attributes_(op)
MatMul_Op(const MatMul_Op& op) : OperatorTensor(op)
{
mImpl = op.mImpl ? Registrar<MatMul_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
}
......@@ -63,50 +46,40 @@ public:
* @brief Clone the operator using its copy-constructor.
* @see Operator::MatMul_Op
*/
std::shared_ptr<Operator> clone() const override {
std::shared_ptr<Operator> clone() const override final {
return std::make_shared<MatMul_Op>(*this);
}
void computeOutputDims() override final {
bool associated = true;
for (IOIndex_t i = 0; i < nbInputs(); ++i) {
if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
}
associated &= !(getInput(i)->empty());
}
if (associated) {
// <batch, OutChannels>
mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<MatMulAttr::OutChannels>()});
}
}
/**
* @brief Compute dimensions for the output Tensor following the same rules as
* numpy.matmul.
* @note - Both inputs are 2-D Tensors: classic matrix multiplication
* @note - Either input is N-D with N > 2: it is treated as a stack of matrices residing
* in the last two indexes and broadcast accordingly.
* @note - First input is 1-D: it is promoted to a matrix by prepending a 1 to its
* dimensions (D) -> (1,D). The prepended 1 is removed after computation.
* @note - Second input is 1-D: it is promoted to a matrix by appending a 1 to its
* dimensions (D) -> (D,1). The appended 1 is removed after computation.
*/
void computeOutputDims() override final;
void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
void setBackend(const std::string& name, DeviceIdx_t device = 0) override final {
mImpl = Registrar<MatMul_Op>::create(name)(*this);
mOutputs[0]->setBackend(name, device);
}
static const std::vector<std::string> getInputsName(){
return {"data_input", "weight"};
static const std::vector<std::string> getInputsName() {
return {"data_input1", "data_input2"};
}
static const std::vector<std::string> getOutputsName(){
static const std::vector<std::string> getOutputsName() {
return {"data_output"};
}
};
inline std::shared_ptr<Node> MatMul(DimSize_t inChannels, DimSize_t outChannels, const std::string& name = "") {
// FIXME: properly handle default w initialization in every cases
auto matmul = std::make_shared<Node>(std::make_shared<MatMul_Op>(outChannels), name);
addProducer(matmul, 1, {outChannels, inChannels}, "w");
return matmul;
inline std::shared_ptr<Node> MatMul(const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<MatMul_Op>(), name);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::MatMulAttr>::data[] = {"OutChannels"};
}
#endif /* AIDGE_CORE_OPERATOR__MATMUL_H_ */
......@@ -62,7 +62,7 @@ public:
}
static const std::vector<std::string> getInputsName(){
return {"data_input"};
return {"data_input_1", "data_input_2"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
......
......@@ -118,8 +118,14 @@ public:
* @brief Set a new OperatorImpl to the Operator
*
*/
void setImpl(std::shared_ptr<OperatorImpl> impl){
mImpl = impl;
inline void setImpl(std::shared_ptr<OperatorImpl> impl) { mImpl = impl; }
/**
* @brief Get the OperatorImpl of the Operator
*
*/
inline std::shared_ptr<OperatorImpl> getImpl() const noexcept {
return mImpl;
}
/**
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment