Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • eclipse/aidge/aidge_core
  • hrouis/aidge_core
  • mszczep/aidge_core
  • oantoni/aidge_core
  • cguillon/aidge_core
  • jeromeh/aidge_core
  • axelfarr/aidge_core
  • cmoineau/aidge_core
  • noamzerah/aidge_core
  • lrakotoarivony/aidge_core
  • silvanosky/aidge_core
  • maab05/aidge_core
  • mick94/aidge_core
  • lucaslopez/aidge_core_ll
  • wboussella/aidge_core
  • farnez/aidge_core
  • mnewson/aidge_core
17 results
Show changes
Showing
with 613 additions and 31 deletions
#include <pybind11/pybind11.h>
#include "aidge/data/Database.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Database(py::module& m){
py::class_<Database, std::shared_ptr<Database>>(m,"Database");
}
}
......@@ -30,7 +30,7 @@ void addCtor(py::class_<Tensor,
Data,
Registrable<Tensor,
std::tuple<std::string, DataType>,
std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>>& mTensor){
std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>>& mTensor){
mTensor.def(py::init([](
py::array_t<T, py::array::c_style | py::array::forcecast> b,
std::string backend = "cpu") {
......@@ -60,16 +60,16 @@ void addCtor(py::class_<Tensor,
void init_Tensor(py::module& m){
py::class_<Registrable<Tensor,
std::tuple<std::string, DataType>,
std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>,
std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>,
std::shared_ptr<Registrable<Tensor,
std::tuple<std::string, DataType>,
std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>>>(m,"TensorRegistrable");
std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>>>(m,"TensorRegistrable");
py::class_<Tensor, std::shared_ptr<Tensor>,
Data,
Registrable<Tensor,
std::tuple<std::string, DataType>,
std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>> pyClassTensor
std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>> pyClassTensor
(m,"Tensor", py::multiple_inheritance(), py::buffer_protocol());
pyClassTensor.def(py::init<>())
......
......@@ -100,7 +100,7 @@ void init_GraphView(py::module& m) {
.def("get_nodes", &GraphView::getNodes)
.def("get_node", &GraphView::getNode, py::arg("node_name"))
.def("forward_dims", &GraphView::forwardDims)
.def("forward_dims", &GraphView::forwardDims, py::arg("dims")=std::vector<std::vector<DimSize_t>>())
.def("compile", &GraphView::compile, py::arg("backend"), py::arg("datatype"), py::arg("device") = 0)
.def("__call__", &GraphView::operator(), py::arg("connectors"))
.def("set_datatype", &GraphView::setDataType, py::arg("datatype"))
......
......@@ -19,16 +19,11 @@
namespace py = pybind11;
namespace Aidge {
void declare_MatMul(py::module &m) {
py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, Attributes, OperatorTensor>(m, "MatMulOp", py::multiple_inheritance())
void init_MatMul(py::module &m) {
py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor>(m, "MatMulOp", py::multiple_inheritance())
.def("get_inputs_name", &MatMul_Op::getInputsName)
.def("get_outputs_name", &MatMul_Op::getOutputsName)
.def("attributes_name", &MatMul_Op::staticGetAttrsName);
m.def("MatMul", &MatMul, py::arg("in_channels"), py::arg("out_channels"), py::arg("name") = "");
}
.def("get_outputs_name", &MatMul_Op::getOutputsName);
void init_MatMul(py::module &m) {
declare_MatMul(m);
m.def("MatMul", &MatMul, py::arg("name") = "");
}
} // namespace Aidge
......@@ -18,6 +18,8 @@ namespace py = pybind11;
namespace Aidge {
void init_Data(py::module&);
void init_Database(py::module&);
void init_DataProvider(py::module&);
void init_Tensor(py::module&);
void init_OperatorImpl(py::module&);
void init_Attributes(py::module&);
......@@ -69,6 +71,8 @@ void init_TensorUtils(py::module&);
void init_Aidge(py::module& m){
init_Data(m);
init_Database(m);
init_DataProvider(m);
init_Tensor(m);
init_Node(m);
......
......@@ -13,13 +13,14 @@
#include <pybind11/stl.h>
#include "aidge/scheduler/Scheduler.hpp"
#include "aidge/graph/GraphView.hpp"
#include "aidge/data/Tensor.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Scheduler(py::module& m){
py::class_<SequentialScheduler, std::shared_ptr<SequentialScheduler>>(m, "SequentialScheduler")
.def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
.def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("verbose")=false)
.def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("verbose")=false, py::arg("data")=std::vector<Tensor>())
.def("save_scheduling_diagram", &SequentialScheduler::saveSchedulingDiagram, py::arg("file_name"))
.def("resetScheduling", &SequentialScheduler::resetScheduling)
.def("generate_scheduling", &SequentialScheduler::generateScheduling, py::arg("verbose")=false)
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <cassert>
#include <cstddef> // std::size_t
#include <memory>
#include <vector>
#include "aidge/data/Database.hpp"
#include "aidge/data/DataProvider.hpp"
#include "aidge/data/Tensor.hpp"
Aidge::DataProvider::DataProvider(const Aidge::Database& database, const std::size_t batchSize)
: mDatabase(database),
mNumberModality(database.getItem(0).size()),
mBatchSize(batchSize)
{
// Iterating on each data modality in the database
// Get the tensor dimensions, datatype and backend of each modality to ensure each data have the same
for (const auto& modality : mDatabase.getItem(0)) {
mDataSizes.push_back(modality->dims());
// assert(std::strcmp(item[i]->getImpl()->backend(), "cpu") == 0 && "DataProvider currently only supports cpu backend tensors");
// mDataBackends.push_back(item[i]->getImpl()->backend());
mDataTypes.push_back(modality->dataType());
}
}
std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch(const std::size_t startIndex) const
{
assert((startIndex) <= mDatabase.getLen() && " DataProvider readBatch : database fetch out of bounds");
// Determine the batch size (may differ for the last batch)
const std::size_t current_batch_size = ((startIndex + mBatchSize) > mDatabase.getLen()) ?
mDatabase.getLen()-startIndex :
mBatchSize;
// Create batch tensors (dimensions, backends, datatype) for each modality
std::vector<std::shared_ptr<Tensor>> batchTensors;
auto dataBatchSize = mDataSizes;
for (std::size_t i = 0; i < mNumberModality; ++i) {
dataBatchSize[i].insert(dataBatchSize[i].begin(), current_batch_size);
auto batchData = std::make_shared<Tensor>();
batchData->resize(dataBatchSize[i]);
// batchData->setBackend(mDataBackends[i]);
batchData->setBackend("cpu");
batchData->setDataType(mDataTypes[i]);
batchTensors.push_back(batchData);
}
// Call each database item and concatenate each data modularity in the batch tensors
for (std::size_t i = 0; i < current_batch_size; ++i){
auto dataItem = mDatabase.getItem(startIndex+i);
// assert same number of modalities
assert(dataItem.size() == mNumberModality && "DataProvider readBatch : item from database have inconsistent number of modality.");
// Browse each modularity in the database item
for (std::size_t j = 0; j < mNumberModality; ++j) {
auto dataSample = dataItem[j];
// Assert tensor sizes
assert(dataSample->dims() == mDataSizes[j] && "DataProvider readBatch : corrupted Data size");
// Assert implementation backend
// assert(dataSample->getImpl()->backend() == mDataBackends[j] && "DataProvider readBatch : corrupted data backend");
// Assert DataType
assert(dataSample->dataType() == mDataTypes[j] && "DataProvider readBatch : corrupted data DataType");
// Concatenate into the batch tensor
batchTensors[j]->getImpl()->copy(dataSample->getImpl()->rawPtr(), dataSample->size(), i*dataSample->size());
}
}
return batchTensors;
}
\ No newline at end of file
......@@ -9,10 +9,145 @@
*
********************************************************************************/
#include <vector>
#include <cstddef>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/Types.h"
#include "aidge/utils/ErrorHandling.hpp"
void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vector<Aidge::DimSize_t> strides) {
bool checkContiguous = true;
if (strides.empty()) {
strides.resize(dims.size());
size_t expectedStride = 1;
for (int dim = dims.size() - 1; dim >= 0; --dim) {
strides[dim] = expectedStride;
expectedStride*= dims[dim];
}
checkContiguous = false;
}
else {
AIDGE_ASSERT(strides.size() == dims.size(), "Number of strides must match number of dims");
}
if (mImpl.use_count() > 1) {
// Here we could also create a new storage for this tensor in this case
// But, is it more likely that the user really wants this, or that he did a mistake?
AIDGE_ASSERT(dims == mDims && strides == mStrides, "Cannot resize Tensor with shared storage");
}
else {
mDims = dims;
mStrides = strides;
mContiguous = true;
if (checkContiguous) {
std::size_t expectedStride = 1;
for (std::size_t i = dims.size()-1; i > 0; --i) {
if (strides[i] != expectedStride) {
mContiguous = false;
break;
}
expectedStride*= dims[i];
}
mContiguous &= (strides[0] == expectedStride);
}
computeSize();
if (mImpl) {
mImpl->resize(mDims);
}
}
}
std::string Aidge::Tensor::toString() const {
AIDGE_ASSERT(mImpl && (dims().empty() || (dims() == std::vector<DimSize_t>({0})) || (mImpl->hostPtr() != nullptr)), "tensor should have a valid host pointer");
// TODO: move lambda elsewhere?
auto ptrToString = [](DataType dt, void* ptr, std::size_t idx) {
switch (dt) {
case DataType::Float64:
return std::to_string(static_cast<double*>(ptr)[idx]);
case DataType::Float32:
return std::to_string(static_cast<float*>(ptr)[idx]);
case DataType::Float16:
return std::to_string(static_cast<half_float::half*>(ptr)[idx]);
case DataType::Int8:
return std::to_string(static_cast<int8_t*>(ptr)[idx]);
case DataType::Int16:
return std::to_string(static_cast<int16_t*>(ptr)[idx]);
case DataType::Int32:
return std::to_string(static_cast<int32_t*>(ptr)[idx]);
case DataType::Int64:
return std::to_string(static_cast<int64_t*>(ptr)[idx]);
case DataType::UInt8:
return std::to_string(static_cast<uint8_t*>(ptr)[idx]);
case DataType::UInt16:
return std::to_string(static_cast<uint16_t*>(ptr)[idx]);
case DataType::UInt32:
return std::to_string(static_cast<uint32_t*>(ptr)[idx]);
case DataType::UInt64:
return std::to_string(static_cast<uint64_t*>(ptr)[idx]);
default:
AIDGE_ASSERT(true, "unsupported type to convert to string");
}
return std::string("?"); // To make Clang happy
};
if (dims().empty()) { return ptrToString(mDataType, mImpl->hostPtr(), 0); }
std::string res;
std::size_t dim = 0;
std::size_t counter = 0;
if (nbDims()>=2) {
std::vector<std::size_t> dimVals(nbDims(), 0);
res += "{\n";
while (counter < mSize) {
std::string spaceString = std::string((dim+1)<<1,' ');
if (dim < nbDims()-2) {
if (dimVals[dim] == 0) {
res += spaceString + "{\n";
++dim;
} else if (dimVals[dim] < static_cast<std::size_t>(dims()[dim])) {
res += spaceString + "},\n" + spaceString + "{\n";
++dim;
} else {
res += spaceString + "}\n";
dimVals[dim--] = 0;
dimVals[dim]++;
}
} else {
for (; dimVals[dim] < static_cast<std::size_t>(dims()[dim]); ++dimVals[dim]) {
res += spaceString + "{";
for (DimSize_t j = 0; j < dims()[dim + 1] - 1; ++j) {
res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + ",";
}
res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + "}";
if (dimVals[dim] < static_cast<std::size_t>(dims()[dim] - 1)) {
res += ",";
}
res += "\n";
}
if (dim == 0) {
break;
}
dimVals[dim--] = 0;
dimVals[dim]++;
}
}
for(int i = static_cast<int>(dim); i > 0; --i) {
res += std::string((dim+1)<<1,' ') + "}\n";
}
} else {
res += "{";
for (DimSize_t j = 0; j < dims()[0]; ++j) {
res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), j) + ((j < dims()[0]-1) ? "," : " ");
}
}
res += "}";
return res;
}
Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx) const {
AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Number of coordinates is higher than number of dimensions");
......@@ -44,7 +179,7 @@ void Aidge::Tensor::makeContiguous() {
// Block so that mImpl ref count is 1 for resize()
{
// Create a new storage that will be contiguous
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mSize);
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
// Copy elements from old to new storage
size_t idx = 0;
while (idx < mSize) {
......@@ -52,7 +187,7 @@ void Aidge::Tensor::makeContiguous() {
// Determine the size of the contiguous chunk
size_t copySize = 1;
while (idx + copySize < mSize &&
while (idx + copySize < mSize &&
getStorageIdx(getCoord(idx + copySize)) == storageIdx + copySize)
{
++copySize;
......
......@@ -265,10 +265,18 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType
forwardDims();
}
void Aidge::GraphView::forwardDims() {
void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>> dims) {
// setInputs
// Link every tensor to the right pointer
// following parent - children informations
if (!dims.empty()){
AIDGE_ASSERT(dims.size() == mInputNodes.size(), "GraphView forwardDims error - Inconsistent number of dimensions and graph inputs");
for (std::size_t i = 0; i < dims.size(); ++i){
auto tensor = std::make_shared<Tensor>(dims[i]);
mInputNodes[i].first->getOperator()->setInput(mInputNodes[i].second, tensor);
}
}
for (std::shared_ptr<Node> nodePtr : getNodes()) {
for (IOIndex_t i = 0; i < nodePtr->nbInputs(); ++i) {
// assess if the input was not already set and is a Tensor then link it to parent output
......
......@@ -9,8 +9,64 @@
*
********************************************************************************/
#include <algorithm>
#include <string>
#include <vector>
#include "aidge/operator/MatMul.hpp"
#include "aidge/utils/Types.h"
#include "aidge/utils/ErrorHandling.hpp"
const std::string Aidge::MatMul_Op::Type = "MatMul";
\ No newline at end of file
const std::string Aidge::MatMul_Op::Type = "MatMul";
void Aidge::MatMul_Op::computeOutputDims() {
if (!getInput(0) || !getInput(1)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Missing input. Cannot compute output dimensions for MatMul Operator.");
}
if (getInput(0)->empty() && getInput(1)->empty()) {
// both inputs are scalar
mOutputs[0]->resize({});
}
else if (!getInput(0)->empty() && !getInput(1)->empty())
{
std::vector<std::size_t> dims0 = getInput(0)->dims();
std::vector<std::size_t> dims1 = getInput(1)->dims();
// keep second-to-last dimension of dims0
const bool keepDim0 = dims0.size() > 1;
// keep last dimension of dims1
const bool keepDim1 = dims1.size() > 1;
if (dims0.size() == 1) {
dims0.insert(dims0.cbegin(), 1);
}
if (dims1.size() == 1) {
dims1.push_back(1);
}
const std::size_t dims_size = std::max(dims0.size(), dims1.size());
if (dims0.size() > dims1.size()) {
dims1.insert(dims1.cbegin(), dims0.size() - dims1.size(), std::size_t(1));
}
else if (dims1.size() > dims0.size()) {
dims0.insert(dims0.cbegin(), dims1.size() - dims0.size(), std::size_t(1));
}
AIDGE_ASSERT(dims0[dims_size-1] == dims1[dims_size-2], "Incompatible matrices sizes.");
std::vector<std::size_t> outDims = std::vector<std::size_t>(dims_size-2, 1);
for (std::size_t i = 0; i < dims_size-2; ++i) {
AIDGE_ASSERT((dims0[i] == dims1[i]) || (dims0[i] == 1) || (dims1[i] == 1), "Bad vector dimension.");
outDims[i] = std::max(dims0[i], dims1[i]);
}
// use keepDim0 instead of dims0.size() because dims0 has been modified
if (keepDim0)
outDims.push_back(dims0[dims_size-2]);
if (keepDim1)
outDims.push_back(dims1[dims_size-1]);
mOutputs[0]->resize(outDims);
}
}
......@@ -41,7 +41,19 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
AIDGE_ASSERT(matmulNode->getParent(1), "No weight detected to produce the fuseMulAdd recipe.");
std::shared_ptr<Node> weight = matmulNode->getParent(1)->cloneSharedOperators();
const DimSize_t outSize = std::dynamic_pointer_cast<MatMul_Op>(matmulNode->getOperator()) -> getAttr<DimSize_t>("OutChannels");
// TODO: find another way to get OutChannels for FC operator.
// This poor fix supposes that one of Add inputs is a const and has the same outChannels as the output
DimSize_t outSize = 0;
const auto& op = std::dynamic_pointer_cast<OperatorTensor>(addNode->getOperator());
for (size_t i = 0; i < op->nbInputs(); i++)
{
const auto& inTensor = op->getInput(i);
if(inTensor->nbDims() > 0) {
outSize = inTensor->dims()[inTensor->nbDims()-1];
break;
}
}
AIDGE_ASSERT(outSize, "Couldnt get output number of channels for FC operator.");
// Instanciate FC
//std::shared_ptr<Node> fc = FC(dim[0], false, "Fc");
......
......@@ -174,8 +174,28 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
}
void Aidge::SequentialScheduler::connectInputs(std::vector<std::shared_ptr<Aidge::Tensor>> data){
// This version of connect inputs only connects tensor inputs in input data producers.
auto inputNodes = mGraphView->getOrderedInputs();
// Assert that the number of input data producers corresponds to the number of data input
assert(data.size() == inputNodes.size() && "Scheduler connectInput error - Inconsistent number of graph inputs and inputs passed to the graph");
for (std::size_t i = 0; i < data.size(); ++i){
// TODO : maybe shallow copy instead of deepcopy
inputNodes[i].first->getOperator()->setInput(inputNodes[i].second, data[i]);
}
}
// TODO: handle multiple inputs/outputs
void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose) {
void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose, std::vector<std::shared_ptr<Aidge::Tensor>> data) {
// Collect all data input of the graph (that are producers)
if (!data.empty()){
connectInputs(data);
}
// Forward dims (if allowed)
if (forwardDims) {mGraphView->forwardDims(); }
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/stimuli/Stimulus.hpp"
#include <memory>
#include "aidge/data/Tensor.hpp"
Aidge::Stimulus::~Stimulus() = default;
std::shared_ptr<Aidge::Tensor> Aidge::Stimulus::load() {
AIDGE_ASSERT((mImpl!=nullptr || mData!=nullptr), "No load implementation and No stored data");
if (mLoadDataInMemory){
if (mData == nullptr){
mData = mImpl->load();
}
return mData;
}
return mImpl->load();
}
\ No newline at end of file
......@@ -19,7 +19,7 @@
using namespace Aidge;
TEST_CASE("Tensor creation") {
TEST_CASE("[core/data] Tensor creation") {
SECTION("from const array") {
Tensor x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
......@@ -59,7 +59,34 @@ TEST_CASE("Tensor creation") {
}
}
TEST_CASE("Tensor methods") {
TEST_CASE("Tensor fill") {
SECTION("Instantiate batches independantly") {
// initialization with 0s
std::shared_ptr<Tensor> concatenatedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{});
//concatenatedTensor->print();
std::shared_ptr<Tensor> myTensor1 = std::make_shared<Tensor>(Array1D<int, 5>{{1,2,3,4,5}});
std::shared_ptr<Tensor> myTensor2 = std::make_shared<Tensor>(Array1D<int, 5>{{6,7,8,9,10}});
std::shared_ptr<Tensor> myTensor3 = std::make_shared<Tensor>(Array1D<int, 5>{{11,12,13,14,15}});
// use copy function from implementation
concatenatedTensor->getImpl()->copy(myTensor1->getImpl()->rawPtr(), 5, 0);
concatenatedTensor->getImpl()->copy(myTensor2->getImpl()->rawPtr(), 5, 5);
concatenatedTensor->getImpl()->copy(myTensor3->getImpl()->rawPtr(), 5, 10);
// concatenatedTensor->print();
std::shared_ptr<Tensor> expectedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{
{{1,2,3,4,5},
{6,7,8,9,10},
{11,12,13,14,15}}
});
// expectedTensor->print();
REQUIRE(*concatenatedTensor == *expectedTensor);
}
}
TEST_CASE("[core/data] Tensor methods","[Tensor]") {
Tensor x = Array3D<int, 2, 2, 2>{{
{{1, 2},
{3, 4}},
......@@ -89,7 +116,7 @@ TEST_CASE("Tensor methods") {
REQUIRE(y.getImpl() == x.getImpl());
REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}}));
REQUIRE(y.isContiguous());
Tensor y2 = x.extract({0, 1, 1}, {2, 1, 1});
REQUIRE(y2.getImpl() == x.getImpl());
REQUIRE(!y2.isContiguous());
......
......@@ -126,9 +126,9 @@ TEST_CASE("GraphRegexUser") {
SECTION("Applied Recipes"){
// generate the original GraphView
auto matmul0 = MatMul(5, 5, "matmul0");
auto matmul0 = MatMul("matmul0");
auto add0 = Add(2, "add0");
auto matmul1 = MatMul(5, 5, "matmul1");
auto matmul1 = MatMul("matmul1");
auto add1 = Add(2, "add1");
auto b0 = Producer({5}, "B0");
......@@ -154,7 +154,7 @@ TEST_CASE("GraphRegexUser") {
auto g = std::make_shared<GraphView>();
g->add({matmul0, add0, matmul1, add1, b0, b1,fl,fc});
g->add({w0, matmul0, b0, add0, w1, matmul1, b1, add1,fl,fc});
std::shared_ptr<GraphRegex> kitchenBook = std::make_shared<GraphRegex>();
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <cstddef> // std::size_t
#include <memory>
#include <random> // std::random_device, std::mt19937, std::uniform_int_distribution
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/MatMul.hpp"
#include "aidge/operator/OperatorTensor.hpp"
namespace Aidge {
TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutputDims]") {
// Create a random number generator
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dist(1, 10);
// Create MatMul Operator
std::shared_ptr<Node> myMatMul = MatMul();
auto op = std::static_pointer_cast<OperatorTensor>(myMatMul -> getOperator());
/** @todo Special case of scalar Tensor objects.
* Not handled yet.
*/
// SECTION("0-D / 0-D") {
// std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
// T0->resize({});
// op -> associateInput(0,T0);
// // input_1 - right
// std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
// T1->resize({});
// op -> associateInput(1,T1);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()).empty());
// // input_1 - wrong
// T1->resize({dist(gen)});
// REQUIRE_THROWS(op->computeOutputDims());
// }
SECTION("1-D / N-D") {
// input_0
std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
const std::size_t dim0 = dist(gen);
T0->resize({dim0});
op -> associateInput(0,T0);
std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
op -> associateInput(1,T1);
SECTION("1-D / 1-D") {
// input_1 - right
T1->resize({dim0});
REQUIRE_NOTHROW(op -> computeOutputDims());
REQUIRE((op->getOutput(0)->dims()).empty());
// input_1 - wrong
T1->resize({dim0+1});
REQUIRE_THROWS(op -> computeOutputDims());
}
SECTION("1-D / 2-D") {
// input_1 - right
const std::size_t dim1 = dist(gen);
T1->resize({dim0,dim1});
REQUIRE_NOTHROW(op -> computeOutputDims());
REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim1}));
// input_1 - wrong
T1->resize({dim0+1,dim1});
REQUIRE_THROWS(op -> computeOutputDims());
}
SECTION("1-D / +2-D") {
// input_1 - right
const std::size_t dim1 = dist(gen);
const std::size_t dim2 = dist(gen);
const std::size_t dim3 = dist(gen);
T1->resize({dim1,dim2,dim0,dim3});
REQUIRE_NOTHROW(op -> computeOutputDims());
REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim1,dim2,dim3}));
}
}
SECTION("2-D / N-D") {
// input_0
std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
const std::size_t dim0 = dist(gen);
const std::size_t dim1 = dist(gen);
T0->resize({dim0,dim1});
op -> associateInput(0,T0);
// input_1
std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
op -> associateInput(1,T1);
SECTION("2-D / 1-D") {
// input_1 - right
T1->resize({dim1});
REQUIRE_NOTHROW(op -> computeOutputDims());
REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0}));
// input_1 - wrong
T1->resize({dim1+1});
REQUIRE_THROWS(op -> computeOutputDims());
}
SECTION("2-D / 2-D") {
// input_1 - right
const std::size_t dim2 = dist(gen);
T1->resize({dim1, dim2});
REQUIRE_NOTHROW(op -> computeOutputDims());
REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim2}));
// input_1 - wrong
T1->resize({dim1+1,dim2});
REQUIRE_THROWS(op -> computeOutputDims());
}
SECTION("2-D / +2-D") {
// input_1 - right
const std::size_t dim2 = dist(gen);
const std::size_t dim3 = dist(gen);
const std::size_t dim4 = dist(gen);
T1->resize({dim3,dim4,dim1, dim2});
REQUIRE_NOTHROW(op -> computeOutputDims());
REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim3,dim4,dim0,dim2}));
// input_1 - wrong
T1->resize({dim3,dim4,dim1+1,dim2});
REQUIRE_THROWS(op -> computeOutputDims());
}
}
SECTION("+2-D / +2-D") {
// input_0
std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
const std::size_t dim0 = dist(gen) + 1;
const std::size_t dim1 = 1;
const std::size_t dim2 = dist(gen);
const std::size_t dim3 = dist(gen);
T0->resize({dim0,dim1,dim2,dim3});
op -> associateInput(0,T0);
// input_1
std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
op -> associateInput(1,T1);
// input_1 - right
// 1
const std::size_t dim5 = dist(gen);
T1->resize({dim0,dim1,dim3,dim5});
REQUIRE_NOTHROW(op -> computeOutputDims());
REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1,dim2,dim5}));
// 2 - input_1 broadcast
T1->resize({1,dim1,dim3,dim5});
REQUIRE_NOTHROW(op -> computeOutputDims());
REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1,dim2,dim5}));
// 3 - input_0 broadcast
const std::size_t dim1_bigger = dist(gen) + 1;
T1->resize({dim0,dim1_bigger,dim3,dim5});
REQUIRE_NOTHROW(op -> computeOutputDims());
REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1_bigger,dim2,dim5}));
// 4 - input_0+input_1 broadcast
T1->resize({1,dim1_bigger,dim3,dim5});
REQUIRE_NOTHROW(op -> computeOutputDims());
REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1_bigger,dim2,dim5}));
// input_1 - wrong
T1->resize({dim0+1,dim1,dim3,dim5});
REQUIRE_THROWS(op -> computeOutputDims());
}
}
} // namespace Aidge
\ No newline at end of file
......@@ -25,9 +25,9 @@ namespace Aidge {
TEST_CASE("[cpu/recipies] FuseMulAdd", "[FuseMulAdd][recipies]") {
// generate the original GraphView
auto matmul0 = MatMul(5, 5, "matmul0");
auto matmul0 = MatMul("matmul0");
auto add0 = Add(2, "add0");
auto matmul1 = MatMul(5, 5, "matmul1");
auto matmul1 = MatMul("matmul1");
auto add1 = Add(2, "add1");
auto b0 = Producer({5}, "B0");
......@@ -49,7 +49,7 @@ TEST_CASE("[cpu/recipies] FuseMulAdd", "[FuseMulAdd][recipies]") {
b1->addChild(add1, 0, 1);
auto g = std::make_shared<GraphView>();
g->add({matmul0, add0, matmul1, add1, b0, b1});
g->add({w0, matmul0, b0, add0, w1, matmul1, b1, add1});
// Check original graph
REQUIRE(g->getNodes() ==
......
0.0.1
\ No newline at end of file
0.1.1