Skip to content
Snippets Groups Projects
Commit 5cf910bf authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge branch 'dev' into broadcasting

parents c67e943c 95077929
No related branches found
No related tags found
2 merge requests!105version 0.2.0,!65[Add] broadcasting for Arithmetic Operators
Showing
with 187 additions and 62 deletions
...@@ -18,9 +18,10 @@ namespace py = pybind11; ...@@ -18,9 +18,10 @@ namespace py = pybind11;
namespace Aidge { namespace Aidge {
void init_LeakyReLU(py::module& m) { void init_LeakyReLU(py::module& m) {
py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, OperatorTensor, Attributes>(m, "LeakyReLUOp", py::multiple_inheritance()) py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Attributes, OperatorTensor>(m, "LeakyReLUOp", py::multiple_inheritance())
.def("get_inputs_name", &LeakyReLU_Op::getInputsName) .def("get_inputs_name", &LeakyReLU_Op::getInputsName)
.def("get_outputs_name", &LeakyReLU_Op::getOutputsName); .def("get_outputs_name", &LeakyReLU_Op::getOutputsName)
.def("attributes_name", &LeakyReLU_Op::staticGetAttrsName);
m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = ""); m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
} }
......
...@@ -20,9 +20,10 @@ namespace py = pybind11; ...@@ -20,9 +20,10 @@ namespace py = pybind11;
namespace Aidge { namespace Aidge {
void declare_MatMul(py::module &m) { void declare_MatMul(py::module &m) {
py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor, Attributes>(m, "MatMulOp", py::multiple_inheritance()) py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, Attributes, OperatorTensor>(m, "MatMulOp", py::multiple_inheritance())
.def("get_inputs_name", &MatMul_Op::getInputsName) .def("get_inputs_name", &MatMul_Op::getInputsName)
.def("get_outputs_name", &MatMul_Op::getOutputsName); .def("get_outputs_name", &MatMul_Op::getOutputsName)
.def("attributes_name", &MatMul_Op::staticGetAttrsName);
m.def("MatMul", &MatMul, py::arg("in_channels"), py::arg("out_channels"), py::arg("name") = ""); m.def("MatMul", &MatMul, py::arg("in_channels"), py::arg("out_channels"), py::arg("name") = "");
} }
......
...@@ -26,7 +26,7 @@ namespace py = pybind11; ...@@ -26,7 +26,7 @@ namespace py = pybind11;
namespace Aidge { namespace Aidge {
template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) { template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, OperatorTensor, Attributes>( py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Attributes, OperatorTensor>(
m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(), m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
py::multiple_inheritance()) py::multiple_inheritance())
.def(py::init<const std::array<DimSize_t, DIM> &, .def(py::init<const std::array<DimSize_t, DIM> &,
...@@ -36,7 +36,8 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) { ...@@ -36,7 +36,8 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
py::arg("stride_dims"), py::arg("stride_dims"),
py::arg("ceil_mode")) py::arg("ceil_mode"))
.def("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName) .def("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
.def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName); .def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
.def("attributes_name", &MaxPooling_Op<DIM>::staticGetAttrsName);
m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name, const std::string& name,
......
...@@ -25,7 +25,7 @@ namespace py = pybind11; ...@@ -25,7 +25,7 @@ namespace py = pybind11;
namespace Aidge { namespace Aidge {
template <DimIdx_t DIM> void declare_PadOp(py::module &m) { template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Operator, Attributes>( py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Attributes, Operator>(
m, ("PadOp" + std::to_string(DIM) + "D").c_str(), m, ("PadOp" + std::to_string(DIM) + "D").c_str(),
py::multiple_inheritance()) py::multiple_inheritance())
.def(py::init<const std::array<DimSize_t, 2*DIM> &, .def(py::init<const std::array<DimSize_t, 2*DIM> &,
...@@ -36,6 +36,7 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) { ...@@ -36,6 +36,7 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
py::arg("borderValue") = 0.0) py::arg("borderValue") = 0.0)
.def("get_inputs_name", &Pad_Op<DIM>::getInputsName) .def("get_inputs_name", &Pad_Op<DIM>::getInputsName)
.def("get_outputs_name", &Pad_Op<DIM>::getOutputsName) .def("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
.def("attributes_name", &Pad_Op<DIM>::staticGetAttrsName)
; ;
m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples, m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples,
......
...@@ -30,13 +30,14 @@ void declare_Producer(py::module &m) { ...@@ -30,13 +30,14 @@ void declare_Producer(py::module &m) {
void init_Producer(py::module &m) { void init_Producer(py::module &m) {
py::class_<Producer_Op, std::shared_ptr<Producer_Op>, OperatorTensor, Attributes>( py::class_<Producer_Op, std::shared_ptr<Producer_Op>, Attributes, OperatorTensor>(
m, m,
"ProducerOp", "ProducerOp",
py::multiple_inheritance()) py::multiple_inheritance())
.def("dims", &Producer_Op::dims) .def("dims", &Producer_Op::dims)
.def("get_inputs_name", &Producer_Op::getInputsName) .def("get_inputs_name", &Producer_Op::getInputsName)
.def("get_outputs_name", &Producer_Op::getOutputsName); .def("get_outputs_name", &Producer_Op::getOutputsName)
.def("attributes_name", &Producer_Op::staticGetAttrsName);
m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&, bool)>(&Producer), py::arg("tensor"), py::arg("name") = "", py::arg("constant") = false); m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&, bool)>(&Producer), py::arg("tensor"), py::arg("name") = "", py::arg("constant") = false);
declare_Producer<1>(m); declare_Producer<1>(m);
......
...@@ -24,10 +24,11 @@ namespace py = pybind11; ...@@ -24,10 +24,11 @@ namespace py = pybind11;
namespace Aidge { namespace Aidge {
template <DimIdx_t DIM> void declare_ReduceMeanOp(py::module &m) { template <DimIdx_t DIM> void declare_ReduceMeanOp(py::module &m) {
py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, OperatorTensor, Attributes>( py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, Attributes, OperatorTensor>(
m, ("ReduceMeanOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance()) m, ("ReduceMeanOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
.def("get_inputs_name", &ReduceMean_Op<DIM>::getInputsName) .def("get_inputs_name", &ReduceMean_Op<DIM>::getInputsName)
.def("get_outputs_name", &ReduceMean_Op<DIM>::getOutputsName) .def("get_outputs_name", &ReduceMean_Op<DIM>::getOutputsName)
.def("attributes_name", &ReduceMean_Op<DIM>::staticGetAttrsName)
; ;
m.def(("ReduceMean" + std::to_string(DIM) + "D").c_str(), [](const std::vector<int>& axes, m.def(("ReduceMean" + std::to_string(DIM) + "D").c_str(), [](const std::vector<int>& axes,
......
...@@ -19,9 +19,10 @@ namespace py = pybind11; ...@@ -19,9 +19,10 @@ namespace py = pybind11;
namespace Aidge { namespace Aidge {
void init_Softmax(py::module& m) { void init_Softmax(py::module& m) {
py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, OperatorTensor, Attributes>(m, "SoftmaxOp", py::multiple_inheritance()) py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, Attributes, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance())
.def("get_inputs_name", &Softmax_Op::getInputsName) .def("get_inputs_name", &Softmax_Op::getInputsName)
.def("get_outputs_name", &Softmax_Op::getOutputsName); .def("get_outputs_name", &Softmax_Op::getOutputsName)
.def("attributes_name", &Softmax_Op::staticGetAttrsName);
m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = ""); m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = "");
} }
......
...@@ -25,12 +25,13 @@ ...@@ -25,12 +25,13 @@
namespace py = pybind11; namespace py = pybind11;
namespace Aidge { namespace Aidge {
template <DimIdx_t DIM> template <DimIdx_t DIM>
void declare_Transpose(py::module &m) { void declare_Transpose(py::module &m) {
py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, OperatorTensor, Attributes>( py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, Attributes, OperatorTensor>(
m, ("TransposeOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance()) m, ("TransposeOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
.def("get_inputs_name", &Transpose_Op<DIM>::getInputsName) .def("get_inputs_name", &Transpose_Op<DIM>::getInputsName)
.def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName); .def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName)
.def("attributes_name", &Transpose_Op<DIM>::staticGetAttrsName);
m.def(("Transpose" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& output_dims_order, m.def(("Transpose" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& output_dims_order,
const std::string& name) { const std::string& name) {
......
...@@ -11,6 +11,9 @@ ...@@ -11,6 +11,9 @@
#include <pybind11/pybind11.h> #include <pybind11/pybind11.h>
#include "aidge/backend/cpu/data/TensorImpl.hpp" // This include add Tensor
namespace py = pybind11; namespace py = pybind11;
namespace Aidge { namespace Aidge {
......
#include <pybind11/pybind11.h> #include <pybind11/pybind11.h>
#include "aidge/utils/Attributes.hpp" #include "aidge/utils/Attributes.hpp"
#include "aidge/utils/DynamicAttributes.hpp" #include "aidge/utils/DynamicAttributes.hpp"
#include "aidge/utils/StaticAttributes.hpp"
namespace py = pybind11; namespace py = pybind11;
namespace Aidge { namespace Aidge {
...@@ -21,11 +22,13 @@ void init_Attributes(py::module& m){ ...@@ -21,11 +22,13 @@ void init_Attributes(py::module& m){
.def("has_attr", &Attributes::hasAttr, py::arg("name")) .def("has_attr", &Attributes::hasAttr, py::arg("name"))
.def("get_attr_type", &Attributes::getAttrType, py::arg("name")) .def("get_attr_type", &Attributes::getAttrType, py::arg("name"))
.def("get_attrs_name", &Attributes::getAttrsName) .def("get_attrs_name", &Attributes::getAttrsName)
.def("get_attr", &Attributes::getAttrPy, py::arg("name")); .def("get_attr", &Attributes::getAttrPy, py::arg("name"))
.def("__getattr__", &Attributes::getAttrPy, py::arg("name"))
.def("set_attr", &Attributes::setAttrPy, py::arg("name"), py::arg("value"))
.def("__setattr__", &Attributes::setAttrPy, py::arg("name"), py::arg("value"));
py::class_<DynamicAttributes, std::shared_ptr<DynamicAttributes>, Attributes>(m, "DynamicAttributes") py::class_<DynamicAttributes, std::shared_ptr<DynamicAttributes>, Attributes>(m, "DynamicAttributes")
.def("add_attr", &DynamicAttributes::addAttrPy, py::arg("name"), py::arg("value")) .def("add_attr", &DynamicAttributes::addAttrPy, py::arg("name"), py::arg("value"))
.def("set_attr", &DynamicAttributes::setAttrPy, py::arg("name"), py::arg("value"))
.def("del_attr", &DynamicAttributes::delAttr, py::arg("name")); .def("del_attr", &DynamicAttributes::delAttr, py::arg("name"));
m.def("test_DynamicAttributes_binding", &test_DynamicAttributes_binding); m.def("test_DynamicAttributes_binding", &test_DynamicAttributes_binding);
......
...@@ -9,8 +9,8 @@ ...@@ -9,8 +9,8 @@
* *
********************************************************************************/ ********************************************************************************/
#include <cassert>
#include <cstddef> #include <cstddef>
#include <cstdint>
#include <string> #include <string>
#include <vector> #include <vector>
...@@ -22,18 +22,26 @@ const std::string Aidge::Gather_Op::Type = "Gather"; ...@@ -22,18 +22,26 @@ const std::string Aidge::Gather_Op::Type = "Gather";
void Aidge::Gather_Op::computeOutputDims() { void Aidge::Gather_Op::computeOutputDims() {
// check inputs have been associated // check inputs have been associated
if (!getInput(0) || !getInput(1)) { if (!getInput(0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected"); AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
} }
if (getInput(1)->nbDims()!=2){ if (!getInput(0)->empty()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Indices input must be a 2D Tensor"); std::vector<DimSize_t> outDims = getInput(0)->dims();
} const std::vector<DimSize_t> gatheredShape = this->template getAttr<GatherAttr::GatheredShape>();
// TODO: check indices and gatheredShape
const std::int64_t axisIdx = this->template getAttr<GatherAttr::Axis>() >= 0 ?
this->template getAttr<GatherAttr::Axis>() :
this->template getAttr<GatherAttr::Axis>() + outDims.size();
outDims.erase(outDims.begin() + static_cast<std::size_t>(axisIdx));
if (!gatheredShape.empty())
{
outDims.insert(outDims.cbegin() + static_cast<std::size_t>(axisIdx),
gatheredShape.cbegin(),
gatheredShape.cend());
}
std::vector<DimSize_t> outDims = getInput(0)->dims(); mOutputs[0]->resize(outDims);
std::vector<DimSize_t> indexesDims = getInput(1)->dims(); }
int axisIdx = this->template getAttr<GatherAttr::Axis>()>=0?this->template getAttr<GatherAttr::Axis>():this->template getAttr<GatherAttr::Axis>()+outDims.size();
outDims.erase(outDims.begin() + static_cast<std::size_t>(axisIdx));
outDims.insert(outDims.begin() + static_cast<std::size_t>(axisIdx), indexesDims.begin(),indexesDims.end());
mOutputs[0]->resize(outDims);
} }
\ No newline at end of file
...@@ -27,31 +27,32 @@ void Aidge::Reshape_Op::computeOutputDims() { ...@@ -27,31 +27,32 @@ void Aidge::Reshape_Op::computeOutputDims() {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected"); AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
} }
std::vector<DimSize_t> outDims; if (!getInput(0)->empty()) {
std::vector<DimSize_t> outDims;
// variables to handle a negative dimension // variables to handle a negative dimension
bool foundNegativeDimension = false; bool foundNegativeDimension = false;
std::size_t outSize = 1; std::size_t outSize = 1;
DimIdx_t negativeIndex = 0; DimIdx_t negativeIndex = 0;
for(std::size_t i = 0; i < this->template getAttr<ReshapeAttr::Shape>().size(); ++i) for(std::size_t i = 0; i < this->template getAttr<ReshapeAttr::Shape>().size(); ++i)
{ {
std::int64_t dimSize = this->template getAttr<ReshapeAttr::Shape>()[i]; std::int64_t dimSize = this->template getAttr<ReshapeAttr::Shape>()[i];
if (dimSize < 0) { if (dimSize < 0) {
if (foundNegativeDimension) { if (foundNegativeDimension) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Found more than one negative dimension in Reshape Operator."); AIDGE_THROW_OR_ABORT(std::runtime_error, "Found more than one negative dimension in Reshape Operator.");
}
foundNegativeDimension = true;
dimSize = 1;
negativeIndex = static_cast<DimIdx_t>(i);
} }
foundNegativeDimension = true; outDims.push_back(static_cast<DimSize_t>(dimSize));
dimSize = 1; outSize *= static_cast<DimSize_t>(dimSize);
negativeIndex = static_cast<DimIdx_t>(i);
} }
outDims.push_back(static_cast<DimSize_t>(dimSize));
outSize *= static_cast<DimSize_t>(dimSize);
}
if (foundNegativeDimension) { if (foundNegativeDimension) {
outDims[negativeIndex] = (getInput(0) -> size()) / outSize; outDims[negativeIndex] = (getInput(0) -> size()) / outSize;
} }
mOutputs[0]->resize(outDims); mOutputs[0]->resize(outDims);
}
} }
\ No newline at end of file
...@@ -30,21 +30,23 @@ void Aidge::Slice_Op::computeOutputDims() { ...@@ -30,21 +30,23 @@ void Aidge::Slice_Op::computeOutputDims() {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor"); AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
} }
DimSize_t nbAxes = this->template getAttr<SliceAttr::Axes>().size(); const DimSize_t nbAxes = this->template getAttr<SliceAttr::Axes>().size();
std::vector<DimSize_t> outDims = getInput(0)->dims(); std::vector<DimSize_t> outDims = getInput(0)->dims();
for (std::size_t i = 0; i < nbAxes; ++i) { for (std::size_t i = 0; i < nbAxes; ++i) {
// For each slice operation get the params and cast them to size_t // For each slice operation get the params and cast them to size_t
const std::int64_t axis_ = this->template getAttr<SliceAttr::Axes>()[i]; const std::int64_t axis_ = this->template getAttr<SliceAttr::Axes>()[i];
const std::int64_t start_ = this->template getAttr<SliceAttr::Starts>()[i]; const std::int64_t start_ = this->template getAttr<SliceAttr::Starts>()[i];
const std::int64_t end_ = this->template getAttr<SliceAttr::Ends>()[i]; const std::int64_t end_ = this->template getAttr<SliceAttr::Ends>()[i];
const std::size_t axis = axis_ >= 0 ? static_cast<std::size_t>(axis_) : axis_ + getInput(0)->nbDims(); const std::size_t axis = axis_ >= 0 ? static_cast<std::size_t>(axis_) : static_cast<std::size_t>(axis_) + getInput(0)->nbDims();
const std::size_t start = start_ >= 0 ? static_cast<std::size_t>(start_) : start_ + getInput(0)->dims()[axis]; const std::size_t start = start_ >= 0 ? static_cast<std::size_t>(start_) : static_cast<std::size_t>(start_) + getInput(0)->dims()[axis];
const std::size_t end = end_ >= 0 ? static_cast<std::size_t>(end_) : end_ + getInput(0)->dims()[axis]; const std::size_t end = end_ >= 0 ? static_cast<std::size_t>(end_) : static_cast<std::size_t>(end_) + getInput(0)->dims()[axis];
const std::size_t sliceLength = end - start + 1; const std::size_t sliceLength = end - start + 1;
// Check if slice length is valid // Check if slice length is valid
if (sliceLength > getInput(0)->dims()[axis]) if (sliceLength > getInput(0)->dims()[axis])
{
AIDGE_THROW_OR_ABORT(std::runtime_error, "ROI of Slice operator out of bounds"); AIDGE_THROW_OR_ABORT(std::runtime_error, "ROI of Slice operator out of bounds");
}
outDims[axis] = sliceLength; outDims[axis] = sliceLength;
} }
mOutputs[0]->resize(outDims); mOutputs[0]->resize(outDims);
......
...@@ -82,16 +82,16 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std: ...@@ -82,16 +82,16 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
clonedInputs[1] -> addChild(newNode, 0, 1); clonedInputs[1] -> addChild(newNode, 0, 1);
clonedInputs[2] -> addChild(newNode, 0, 2); clonedInputs[2] -> addChild(newNode, 0, 2);
// Slice for input and each parameter // Slice for input and each parameter
std::vector<std::int32_t> inputDimsEnd(inputDims[0].first.size()); std::vector<std::int64_t> inputDimsEnd(inputDims[0].first.size());
for (std::size_t dim = 0; dim < inputDimsEnd.size(); ++dim) { for (std::size_t dim = 0; dim < inputDimsEnd.size(); ++dim) {
inputDimsEnd[dim] = static_cast<std::int32_t>(inputDims[0].first[dim] + inputDims[0].second[dim]) - 1; inputDimsEnd[dim] = static_cast<std::int64_t>(inputDims[0].first[dim] + inputDims[0].second[dim]) - 1;
} }
std::vector<std::int32_t> inputDimsStart(inputDims[0].first.size()); std::vector<std::int64_t> inputDimsStart(inputDims[0].first.size());
for (std::size_t dim = 0; dim < inputDimsStart.size(); ++dim) { for (std::size_t dim = 0; dim < inputDimsStart.size(); ++dim) {
inputDimsStart[dim] = static_cast<std::int32_t>(inputDims[0].first[dim]); inputDimsStart[dim] = static_cast<std::int64_t>(inputDims[0].first[dim]);
} }
std::vector<std::int32_t> usedDims(inputDimsEnd.size()); std::vector<std::int64_t> usedDims(inputDimsEnd.size());
std::iota(usedDims.begin(), usedDims.end(), static_cast<std::int32_t>(0)); std::iota(usedDims.begin(), usedDims.end(), static_cast<std::int64_t>(0));
auto slice = Slice(inputDimsStart, inputDimsEnd, usedDims, "Slice_" + std::to_string(currentFirstDims[axis])); auto slice = Slice(inputDimsStart, inputDimsEnd, usedDims, "Slice_" + std::to_string(currentFirstDims[axis]));
slice -> addChild(newNode, 0, 0); slice -> addChild(newNode, 0, 0);
newNode -> addChild(concat, 0, i); newNode -> addChild(concat, 0, i);
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <array>
#include <catch2/catch_test_macros.hpp>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/TensorUtils.hpp"
#include "aidge/backend/cpu/data/TensorImpl.hpp"
using namespace Aidge;
TEST_CASE("Tensor creation") {
SECTION("from const array") {
Tensor x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor xFloat =
Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
SECTION("Tensor features") {
REQUIRE(x.nbDims() == 3);
REQUIRE(x.dims()[0] == 2);
REQUIRE(x.dims()[1] == 2);
REQUIRE(x.dims()[2] == 2);
REQUIRE(x.size() == 8);
}
SECTION("Access to array") {
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1);
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8);
}
SECTION("get function") {
REQUIRE(x.get<int>({0, 0, 0}) == 1);
REQUIRE(x.get<int>({0, 0, 1}) == 2);
REQUIRE(x.get<int>({0, 1, 1}) == 4);
REQUIRE(x.get<int>({1, 1, 0}) == 7);
x.set<int>({1, 1, 1}, 36);
REQUIRE(x.get<int>({1, 1, 1}) == 36);
}
SECTION("Pretty printing for debug") { REQUIRE_NOTHROW(x.print()); }
SECTION("Tensor (in)equality") {
REQUIRE(x == xCopy);
REQUIRE_FALSE(x == xFloat);
}
}
}
TEST_CASE("Tensor methods") {
Tensor x = Array3D<int, 2, 2, 2>{{
{{1, 2},
{3, 4}},
{{5, 6},
{7, 8}}
}};
Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor xFloat =
Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
SECTION("Tensor sharing") {
Tensor xCopyCtor(x);
REQUIRE(xCopyCtor.getImpl() == x.getImpl());
Tensor xEqOp = x;
REQUIRE(xEqOp.getImpl() == x.getImpl());
Tensor xCloned = x.clone();
REQUIRE(xCloned.getImpl() != x.getImpl());
REQUIRE(xCloned == x);
}
SECTION("Tensor extract") {
Tensor y = x.extract({0, 1});
REQUIRE(y.getImpl() == x.getImpl());
REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}}));
REQUIRE(y.isContiguous());
Tensor y2 = x.extract({0, 1, 1}, {2, 1, 1});
REQUIRE(y2.getImpl() == x.getImpl());
REQUIRE(!y2.isContiguous());
Tensor y3 = y2.clone();
REQUIRE(y3.isContiguous());
REQUIRE(approxEq<int>(y3, Array3D<int, 2, 1, 1>{{{{4}}, {{8}}}}));
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment