Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • eclipse/aidge/aidge_core
  • hrouis/aidge_core
  • mszczep/aidge_core
  • oantoni/aidge_core
  • cguillon/aidge_core
  • jeromeh/aidge_core
  • axelfarr/aidge_core
  • cmoineau/aidge_core
  • noamzerah/aidge_core
  • lrakotoarivony/aidge_core
  • silvanosky/aidge_core
  • maab05/aidge_core
  • mick94/aidge_core
  • lucaslopez/aidge_core_ll
  • wboussella/aidge_core
  • farnez/aidge_core
  • mnewson/aidge_core
17 results
Show changes
Showing
with 345 additions and 79 deletions
......@@ -21,13 +21,36 @@ namespace py = pybind11;
namespace Aidge {
void init_GenericOperator(py::module& m) {
py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, OperatorTensor, DynamicAttributes>(m, "GenericOperatorOp",
py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, DynamicAttributes, OperatorTensor>(m, "GenericOperatorOp",
py::multiple_inheritance())
.def_readonly_static("identity", &GenericOperator_Op::Identity)
.def("compute_output_dims", &GenericOperator_Op::computeOutputDims)
.def("set_compute_output_dims", &GenericOperator_Op::setComputeOutputDims, py::arg("computation_function"));
m.def("GenericOperator", &GenericOperator, py::arg("type"), py::arg("nb_data"), py::arg("nb_param"), py::arg("nb_out"),
py::arg("name") = "");
// &GenericOperator
m.def("GenericOperator",
[]( const std::string& type,
IOIndex_t nbData,
IOIndex_t nbParam,
IOIndex_t nbOut,
const std::string& name,
const py::kwargs kwargs){
std::shared_ptr<Node> genericNode = GenericOperator(
type,
nbData,
nbParam,
nbOut,
name
);
if (kwargs){
std::shared_ptr<GenericOperator_Op> gop = std::static_pointer_cast<GenericOperator_Op>(genericNode->getOperator());
for (auto item : kwargs) {
std::string key = py::cast<std::string>(item.first);
py::object value = py::reinterpret_borrow<py::object>(item.second);
gop->setAttrPy(key, std::move(value));
}
}
return genericNode;
}, py::arg("type"), py::arg("nb_data"), py::arg("nb_param"), py::arg("nb_out"), py::arg("name") = "");
}
} // namespace Aidge
......@@ -18,9 +18,10 @@ namespace py = pybind11;
namespace Aidge {
void init_LeakyReLU(py::module& m) {
py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, OperatorTensor, Attributes>(m, "LeakyReLUOp", py::multiple_inheritance())
py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Attributes, OperatorTensor>(m, "LeakyReLUOp", py::multiple_inheritance())
.def("get_inputs_name", &LeakyReLU_Op::getInputsName)
.def("get_outputs_name", &LeakyReLU_Op::getOutputsName);
.def("get_outputs_name", &LeakyReLU_Op::getOutputsName)
.def("attributes_name", &LeakyReLU_Op::staticGetAttrsName);
m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
}
......
......@@ -20,9 +20,10 @@ namespace py = pybind11;
namespace Aidge {
void declare_MatMul(py::module &m) {
py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor, Attributes>(m, "MatMulOp", py::multiple_inheritance())
py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, Attributes, OperatorTensor>(m, "MatMulOp", py::multiple_inheritance())
.def("get_inputs_name", &MatMul_Op::getInputsName)
.def("get_outputs_name", &MatMul_Op::getOutputsName);
.def("get_outputs_name", &MatMul_Op::getOutputsName)
.def("attributes_name", &MatMul_Op::staticGetAttrsName);
m.def("MatMul", &MatMul, py::arg("in_channels"), py::arg("out_channels"), py::arg("name") = "");
}
......
......@@ -26,7 +26,7 @@ namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, OperatorTensor, Attributes>(
py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Attributes, OperatorTensor>(
m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
py::multiple_inheritance())
.def(py::init<const std::array<DimSize_t, DIM> &,
......@@ -36,7 +36,8 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
py::arg("stride_dims"),
py::arg("ceil_mode"))
.def("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
.def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName);
.def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
.def("attributes_name", &MaxPooling_Op<DIM>::staticGetAttrsName);
m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
......
......@@ -25,7 +25,7 @@ namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Operator, Attributes>(
py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Attributes, Operator>(
m, ("PadOp" + std::to_string(DIM) + "D").c_str(),
py::multiple_inheritance())
.def(py::init<const std::array<DimSize_t, 2*DIM> &,
......@@ -36,6 +36,7 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
py::arg("borderValue") = 0.0)
.def("get_inputs_name", &Pad_Op<DIM>::getInputsName)
.def("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
.def("attributes_name", &Pad_Op<DIM>::staticGetAttrsName)
;
m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples,
......
......@@ -30,13 +30,14 @@ void declare_Producer(py::module &m) {
void init_Producer(py::module &m) {
py::class_<Producer_Op, std::shared_ptr<Producer_Op>, OperatorTensor, Attributes>(
py::class_<Producer_Op, std::shared_ptr<Producer_Op>, Attributes, OperatorTensor>(
m,
"ProducerOp",
py::multiple_inheritance())
.def("dims", &Producer_Op::dims)
.def("get_inputs_name", &Producer_Op::getInputsName)
.def("get_outputs_name", &Producer_Op::getOutputsName);
.def("get_outputs_name", &Producer_Op::getOutputsName)
.def("attributes_name", &Producer_Op::staticGetAttrsName);
m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&, bool)>(&Producer), py::arg("tensor"), py::arg("name") = "", py::arg("constant") = false);
declare_Producer<1>(m);
......
......@@ -24,10 +24,11 @@ namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM> void declare_ReduceMeanOp(py::module &m) {
py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, OperatorTensor, Attributes>(
py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, Attributes, OperatorTensor>(
m, ("ReduceMeanOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
.def("get_inputs_name", &ReduceMean_Op<DIM>::getInputsName)
.def("get_outputs_name", &ReduceMean_Op<DIM>::getOutputsName)
.def("attributes_name", &ReduceMean_Op<DIM>::staticGetAttrsName)
;
m.def(("ReduceMean" + std::to_string(DIM) + "D").c_str(), [](const std::vector<int>& axes,
......
......@@ -19,9 +19,10 @@ namespace py = pybind11;
namespace Aidge {
void init_Softmax(py::module& m) {
py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, OperatorTensor, Attributes>(m, "SoftmaxOp", py::multiple_inheritance())
py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, Attributes, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance())
.def("get_inputs_name", &Softmax_Op::getInputsName)
.def("get_outputs_name", &Softmax_Op::getOutputsName);
.def("get_outputs_name", &Softmax_Op::getOutputsName)
.def("attributes_name", &Softmax_Op::staticGetAttrsName);
m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = "");
}
......
......@@ -25,12 +25,13 @@
namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM>
template <DimIdx_t DIM>
void declare_Transpose(py::module &m) {
py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, OperatorTensor, Attributes>(
py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, Attributes, OperatorTensor>(
m, ("TransposeOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
.def("get_inputs_name", &Transpose_Op<DIM>::getInputsName)
.def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName);
.def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName)
.def("attributes_name", &Transpose_Op<DIM>::staticGetAttrsName);
m.def(("Transpose" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& output_dims_order,
const std::string& name) {
......
......@@ -11,6 +11,9 @@
#include <pybind11/pybind11.h>
#include "aidge/backend/cpu/data/TensorImpl.hpp" // This include add Tensor
namespace py = pybind11;
namespace Aidge {
......
#include <pybind11/pybind11.h>
#include "aidge/utils/Attributes.hpp"
#include "aidge/utils/DynamicAttributes.hpp"
#include "aidge/utils/StaticAttributes.hpp"
namespace py = pybind11;
namespace Aidge {
......@@ -21,11 +22,13 @@ void init_Attributes(py::module& m){
.def("has_attr", &Attributes::hasAttr, py::arg("name"))
.def("get_attr_type", &Attributes::getAttrType, py::arg("name"))
.def("get_attrs_name", &Attributes::getAttrsName)
.def("get_attr", &Attributes::getAttrPy, py::arg("name"));
.def("get_attr", &Attributes::getAttrPy, py::arg("name"))
.def("__getattr__", &Attributes::getAttrPy, py::arg("name"))
.def("set_attr", &Attributes::setAttrPy, py::arg("name"), py::arg("value"))
.def("__setattr__", &Attributes::setAttrPy, py::arg("name"), py::arg("value"));
py::class_<DynamicAttributes, std::shared_ptr<DynamicAttributes>, Attributes>(m, "DynamicAttributes")
.def("add_attr", &DynamicAttributes::addAttrPy, py::arg("name"), py::arg("value"))
.def("set_attr", &DynamicAttributes::setAttrPy, py::arg("name"), py::arg("value"))
.def("del_attr", &DynamicAttributes::delAttr, py::arg("name"));
m.def("test_DynamicAttributes_binding", &test_DynamicAttributes_binding);
......
......@@ -14,23 +14,23 @@
#include "aidge/utils/Types.h"
#include "aidge/utils/ErrorHandling.hpp"
void Aidge::TensorImpl::copyFrom(const TensorImpl& srcImpl, NbElts_t length) {
if (&srcImpl == this) {
void Aidge::TensorImpl::copyFrom(const TensorImpl& srcImpl, NbElts_t length, NbElts_t srcOffset, NbElts_t dstOffset) {
if (&srcImpl == this && srcOffset == dstOffset) {
return;
}
if (srcImpl.device() != device()) {
if (srcImpl.backend() == backend()) {
// Same backend, but different device
copyFromDevice(srcImpl.rawPtr(), length, srcImpl.device());
copyFromDevice(srcImpl.rawPtr(srcOffset), srcImpl.device(), length, dstOffset);
}
else if (srcImpl.hostPtr() != nullptr) {
// Different backend, but input is valid on host
copyFromHost(srcImpl.hostPtr(), length);
copyFromHost(srcImpl.hostPtr(srcOffset), length, dstOffset);
}
else if (hostPtr() != nullptr) {
// Different backend, but dst is valid on host
srcImpl.copyToHost(hostPtr(), length);
srcImpl.copyToHost(hostPtr(srcOffset), length, dstOffset);
}
else {
// No direct link possible from src to dst device
......@@ -40,12 +40,12 @@ void Aidge::TensorImpl::copyFrom(const TensorImpl& srcImpl, NbElts_t length) {
// - There is currently no concrete use case
// - Just providing a pointer would be unsafe (risk of buffer overflow...)
auto tmpHostBuffer = std::unique_ptr<char[]>(new char[scalarSize() * length]);
srcImpl.copyToHost(tmpHostBuffer.get(), length);
copyFromHost(tmpHostBuffer.get(), length);
srcImpl.copyToHost(tmpHostBuffer.get(), length, srcOffset);
copyFromHost(tmpHostBuffer.get(), length, dstOffset);
}
}
else {
// Same device: simple copy on device
copy(srcImpl.rawPtr(), length);
copy(srcImpl.rawPtr(srcOffset), length, dstOffset);
}
}
......@@ -13,11 +13,72 @@
#include "aidge/utils/Types.h"
#include "aidge/utils/ErrorHandling.hpp"
Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx) const {
AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Number of coordinates is higher than number of dimensions");
Tensor subTensor(mDataType);
subTensor.resize(std::vector<size_t>(mDims.begin() + coordIdx.size(), mDims.end()),
std::vector<size_t>(mStrides.begin() + coordIdx.size(), mStrides.end()));
subTensor.setBackend(mImpl->backend(), mImpl->device().second);
subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(coordIdx));
return subTensor;
}
Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx, const std::vector<std::size_t>& dims) const {
AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
AIDGE_ASSERT(coordIdx.size() == mDims.size(), "Coordinates does not match number of dimensions");
Tensor subTensor(mDataType);
subTensor.resize(dims, mStrides);
subTensor.setBackend(mImpl->backend(), mImpl->device().second);
subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(coordIdx));
return subTensor;
}
void Aidge::Tensor::makeContiguous() {
if (!mImpl || isContiguous()) {
return;
}
// Block so that mImpl ref count is 1 for resize()
{
// Create a new storage that will be contiguous
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mSize);
// Copy elements from old to new storage
size_t idx = 0;
while (idx < mSize) {
const size_t storageIdx = getStorageIdx(getCoord(idx));
// Determine the size of the contiguous chunk
size_t copySize = 1;
while (idx + copySize < mSize &&
getStorageIdx(getCoord(idx + copySize)) == storageIdx + copySize)
{
++copySize;
}
// Perform a single copy for the contiguous chunk
newImpl->copy(mImpl->rawPtr(mImplOffset + storageIdx), copySize, idx);
// Move to the next index after the contiguous chunk
idx += copySize;
}
// Replace old storage by new, contiguous, storage
setImpl(newImpl);
}
// Resize tensor without strides => tensor is now contiguous
resize(mDims);
}
void Aidge::Tensor::copyCast(const Tensor& src) {
if (&src == this) {
return;
}
AIDGE_ASSERT(src.isContiguous(), "cannot copy-cast non-contiguous tensor");
// Current Tensor has necessarily a data type, but may not have backend
if (!getImpl()) {
// If no backend was set for the current tensor, use the same as src
......@@ -27,7 +88,7 @@ void Aidge::Tensor::copyCast(const Tensor& src) {
resize(src.dims());
AIDGE_ASSERT(src.getImpl()->device() == getImpl()->device(), "cannot copy-cast from a different backend/device");
getImpl()->copyCast(src.getImpl()->rawPtr(), src.size(), src.dataType());
getImpl()->copyCast(src.getImpl()->rawPtr(src.mImplOffset), src.dataType(), src.size(), mImplOffset);
}
void Aidge::Tensor::copyFrom(const Tensor& src) {
......@@ -35,6 +96,8 @@ void Aidge::Tensor::copyFrom(const Tensor& src) {
return;
}
AIDGE_ASSERT(src.isContiguous(), "cannot copy from non-contiguous tensor");
// Current Tensor has necessarily a data type, but may not have backend
if (!getImpl()) {
// If no backend was set for the current tensor, use the same as src
......@@ -44,7 +107,7 @@ void Aidge::Tensor::copyFrom(const Tensor& src) {
resize(src.dims());
AIDGE_ASSERT(src.dataType() == dataType(), "cannot copy from a different data type");
getImpl()->copyFrom(*(src.getImpl()), src.size());
getImpl()->copyFrom(*(src.getImpl()), src.size(), src.mImplOffset, mImplOffset);
}
void Aidge::Tensor::copyCastFrom(const Tensor& src, std::shared_ptr<Tensor>& movedSrcPtr) {
......@@ -52,6 +115,8 @@ void Aidge::Tensor::copyCastFrom(const Tensor& src, std::shared_ptr<Tensor>& mov
return;
}
AIDGE_ASSERT(src.isContiguous(), "cannot copy-cast from non-contiguous tensor");
// Current Tensor has necessarily a data type, but may not have backend
if (!getImpl()) {
// If no backend was set for the current tensor, use the same as src
......@@ -65,12 +130,35 @@ void Aidge::Tensor::copyCastFrom(const Tensor& src, std::shared_ptr<Tensor>& mov
const auto device = getImpl()->device();
const Tensor& movedSrc = src.refFrom(movedSrcPtr, device.first, device.second);
// Second, copy-cast data (necessary)
getImpl()->copyCast(movedSrc.getImpl()->rawPtr(), movedSrc.size(), movedSrc.dataType());
getImpl()->copyCast(movedSrc.getImpl()->rawPtr(movedSrc.mImplOffset), movedSrc.dataType(), movedSrc.size(), mImplOffset);
}
else {
// Directly copy, no conversion necessary
// Avoid making a double copy if both data type and device are the same
getImpl()->copyFrom(*(src.getImpl()), src.size());
getImpl()->copyFrom(*(src.getImpl()), src.size(), src.mImplOffset, mImplOffset);
}
}
Aidge::Tensor& Aidge::Tensor::refContiguous(std::shared_ptr<Tensor>& fallback) {
// Scott Meyers' solution to avoid code duplication
return const_cast<Tensor&>(static_cast<const Tensor&>(*this).refContiguous(fallback));
}
const Aidge::Tensor& Aidge::Tensor::refContiguous(std::shared_ptr<Tensor>& fallback) const {
AIDGE_ASSERT(getImpl(), "no backend was set for tensor, cannot refCast() it");
if (isContiguous()) {
return *this;
}
else {
if (this != fallback.get()) {
// Shallow copy to fallback
*fallback = *this;
}
// Make fallback contiguous
fallback->makeContiguous();
return *fallback;
}
}
......@@ -91,6 +179,8 @@ const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback, c
fallback->setDataType(dt);
}
else {
AIDGE_ASSERT(isContiguous(), "cannot refCast non-contiguous tensor");
if (!fallback) {
fallback = std::make_shared<Tensor>(dt);
}
......@@ -101,7 +191,7 @@ const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback, c
const auto device = getImpl()->device();
fallback->setBackend(device.first, device.second, false); // don't keep previous data (no copy)
fallback->resize(dims());
fallback->getImpl()->copyCast(getImpl()->rawPtr(), size(), dataType());
fallback->getImpl()->copyCast(getImpl()->rawPtr(mImplOffset), dataType(), size(), fallback->mImplOffset);
}
return *fallback;
}
......@@ -124,6 +214,8 @@ const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback, c
fallback->setBackend(backend, device);
}
else {
AIDGE_ASSERT(isContiguous(), "cannot refFrom non-contiguous tensor");
if (!fallback) {
fallback = std::make_shared<Tensor>(dataType());
}
......@@ -133,8 +225,34 @@ const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback, c
fallback->setBackend(backend, device, false); // don't keep previous data (no copy)
fallback->resize(dims());
fallback->getImpl()->copyFrom(*getImpl(), size());
fallback->getImpl()->copyFrom(*getImpl(), size(), mImplOffset, fallback->mImplOffset);
}
return *fallback;
}
}
Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt, const std::string &backend, DeviceIdx_t device) {
// Scott Meyers' solution to avoid code duplication
return const_cast<Tensor&>(static_cast<const Tensor&>(*this).ref(fallback, dt, backend, device));
}
const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt, const std::string &backend, DeviceIdx_t device) const {
AIDGE_ASSERT(getImpl(), "no backend was set for tensor, cannot ref() it");
if (dt == dataType() && std::make_pair(backend, device) == getImpl()->device()) {
return *this;
}
else {
// Change fallback type, backend & device, without any data copy
if (!fallback) {
fallback = std::make_shared<Tensor>(dt);
}
else {
fallback->setDataType(dt, false); // don't keep previous data (no copy)
}
fallback->setBackend(backend, device, false); // don't keep previous data (no copy)
fallback->resize(dims());
return *fallback;
}
}
......@@ -9,8 +9,8 @@
*
********************************************************************************/
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <string>
#include <vector>
......@@ -22,18 +22,26 @@ const std::string Aidge::Gather_Op::Type = "Gather";
void Aidge::Gather_Op::computeOutputDims() {
// check inputs have been associated
if (!getInput(0) || !getInput(1)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
if (!getInput(0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
}
if (getInput(1)->nbDims()!=2){
AIDGE_THROW_OR_ABORT(std::runtime_error, "Indices input must be a 2D Tensor");
}
if (!getInput(0)->empty()) {
std::vector<DimSize_t> outDims = getInput(0)->dims();
const std::vector<DimSize_t> gatheredShape = this->template getAttr<GatherAttr::GatheredShape>();
// TODO: check indices and gatheredShape
const std::int64_t axisIdx = this->template getAttr<GatherAttr::Axis>() >= 0 ?
this->template getAttr<GatherAttr::Axis>() :
this->template getAttr<GatherAttr::Axis>() + outDims.size();
outDims.erase(outDims.begin() + static_cast<std::size_t>(axisIdx));
if (!gatheredShape.empty())
{
outDims.insert(outDims.cbegin() + static_cast<std::size_t>(axisIdx),
gatheredShape.cbegin(),
gatheredShape.cend());
}
std::vector<DimSize_t> outDims = getInput(0)->dims();
std::vector<DimSize_t> indexesDims = getInput(1)->dims();
int axisIdx = this->template getAttr<GatherAttr::Axis>()>=0?this->template getAttr<GatherAttr::Axis>():this->template getAttr<GatherAttr::Axis>()+outDims.size();
outDims.erase(outDims.begin() + static_cast<std::size_t>(axisIdx));
outDims.insert(outDims.begin() + static_cast<std::size_t>(axisIdx), indexesDims.begin(),indexesDims.end());
mOutputs[0]->resize(outDims);
mOutputs[0]->resize(outDims);
}
}
\ No newline at end of file
......@@ -27,31 +27,32 @@ void Aidge::Reshape_Op::computeOutputDims() {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
}
std::vector<DimSize_t> outDims;
// variables to handle a negative dimension
bool foundNegativeDimension = false;
std::size_t outSize = 1;
DimIdx_t negativeIndex = 0;
for(std::size_t i = 0; i < this->template getAttr<ReshapeAttr::Shape>().size(); ++i)
{
std::int64_t dimSize = this->template getAttr<ReshapeAttr::Shape>()[i];
if (dimSize < 0) {
if (foundNegativeDimension) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Found more than one negative dimension in Reshape Operator.");
if (!getInput(0)->empty()) {
std::vector<DimSize_t> outDims;
// variables to handle a negative dimension
bool foundNegativeDimension = false;
std::size_t outSize = 1;
DimIdx_t negativeIndex = 0;
for(std::size_t i = 0; i < this->template getAttr<ReshapeAttr::Shape>().size(); ++i)
{
std::int64_t dimSize = this->template getAttr<ReshapeAttr::Shape>()[i];
if (dimSize < 0) {
if (foundNegativeDimension) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Found more than one negative dimension in Reshape Operator.");
}
foundNegativeDimension = true;
dimSize = 1;
negativeIndex = static_cast<DimIdx_t>(i);
}
foundNegativeDimension = true;
dimSize = 1;
negativeIndex = static_cast<DimIdx_t>(i);
outDims.push_back(static_cast<DimSize_t>(dimSize));
outSize *= static_cast<DimSize_t>(dimSize);
}
outDims.push_back(static_cast<DimSize_t>(dimSize));
outSize *= static_cast<DimSize_t>(dimSize);
}
if (foundNegativeDimension) {
outDims[negativeIndex] = (getInput(0) -> size()) / outSize;
}
if (foundNegativeDimension) {
outDims[negativeIndex] = (getInput(0) -> size()) / outSize;
}
mOutputs[0]->resize(outDims);
mOutputs[0]->resize(outDims);
}
}
\ No newline at end of file
......@@ -30,21 +30,23 @@ void Aidge::Slice_Op::computeOutputDims() {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
}
DimSize_t nbAxes = this->template getAttr<SliceAttr::Axes>().size();
const DimSize_t nbAxes = this->template getAttr<SliceAttr::Axes>().size();
std::vector<DimSize_t> outDims = getInput(0)->dims();
for (std::size_t i = 0; i < nbAxes; ++i) {
// For each slice operation get the params and cast them to size_t
const std::int64_t axis_ = this->template getAttr<SliceAttr::Axes>()[i];
const std::int64_t start_ = this->template getAttr<SliceAttr::Starts>()[i];
const std::int64_t end_ = this->template getAttr<SliceAttr::Ends>()[i];
const std::size_t axis = axis_ >= 0 ? static_cast<std::size_t>(axis_) : axis_ + getInput(0)->nbDims();
const std::size_t start = start_ >= 0 ? static_cast<std::size_t>(start_) : start_ + getInput(0)->dims()[axis];
const std::size_t end = end_ >= 0 ? static_cast<std::size_t>(end_) : end_ + getInput(0)->dims()[axis];
const std::size_t axis = axis_ >= 0 ? static_cast<std::size_t>(axis_) : static_cast<std::size_t>(axis_) + getInput(0)->nbDims();
const std::size_t start = start_ >= 0 ? static_cast<std::size_t>(start_) : static_cast<std::size_t>(start_) + getInput(0)->dims()[axis];
const std::size_t end = end_ >= 0 ? static_cast<std::size_t>(end_) : static_cast<std::size_t>(end_) + getInput(0)->dims()[axis];
const std::size_t sliceLength = end - start + 1;
// Check if slice length is valid
if (sliceLength > getInput(0)->dims()[axis])
{
AIDGE_THROW_OR_ABORT(std::runtime_error, "ROI of Slice operator out of bounds");
}
outDims[axis] = sliceLength;
}
mOutputs[0]->resize(outDims);
......
......@@ -82,16 +82,16 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
clonedInputs[1] -> addChild(newNode, 0, 1);
clonedInputs[2] -> addChild(newNode, 0, 2);
// Slice for input and each parameter
std::vector<std::int32_t> inputDimsEnd(inputDims[0].first.size());
std::vector<std::int64_t> inputDimsEnd(inputDims[0].first.size());
for (std::size_t dim = 0; dim < inputDimsEnd.size(); ++dim) {
inputDimsEnd[dim] = static_cast<std::int32_t>(inputDims[0].first[dim] + inputDims[0].second[dim]) - 1;
inputDimsEnd[dim] = static_cast<std::int64_t>(inputDims[0].first[dim] + inputDims[0].second[dim]) - 1;
}
std::vector<std::int32_t> inputDimsStart(inputDims[0].first.size());
std::vector<std::int64_t> inputDimsStart(inputDims[0].first.size());
for (std::size_t dim = 0; dim < inputDimsStart.size(); ++dim) {
inputDimsStart[dim] = static_cast<std::int32_t>(inputDims[0].first[dim]);
inputDimsStart[dim] = static_cast<std::int64_t>(inputDims[0].first[dim]);
}
std::vector<std::int32_t> usedDims(inputDimsEnd.size());
std::iota(usedDims.begin(), usedDims.end(), static_cast<std::int32_t>(0));
std::vector<std::int64_t> usedDims(inputDimsEnd.size());
std::iota(usedDims.begin(), usedDims.end(), static_cast<std::int64_t>(0));
auto slice = Slice(inputDimsStart, inputDimsEnd, usedDims, "Slice_" + std::to_string(currentFirstDims[axis]));
slice -> addChild(newNode, 0, 0);
newNode -> addChild(concat, 0, i);
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <array>
#include <catch2/catch_test_macros.hpp>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/TensorUtils.hpp"
#include "aidge/backend/cpu/data/TensorImpl.hpp"
using namespace Aidge;
TEST_CASE("Tensor creation") {
SECTION("from const array") {
Tensor x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor xFloat =
Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
SECTION("Tensor features") {
REQUIRE(x.nbDims() == 3);
REQUIRE(x.dims()[0] == 2);
REQUIRE(x.dims()[1] == 2);
REQUIRE(x.dims()[2] == 2);
REQUIRE(x.size() == 8);
}
SECTION("Access to array") {
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1);
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8);
}
SECTION("get function") {
REQUIRE(x.get<int>({0, 0, 0}) == 1);
REQUIRE(x.get<int>({0, 0, 1}) == 2);
REQUIRE(x.get<int>({0, 1, 1}) == 4);
REQUIRE(x.get<int>({1, 1, 0}) == 7);
x.set<int>({1, 1, 1}, 36);
REQUIRE(x.get<int>({1, 1, 1}) == 36);
}
SECTION("Pretty printing for debug") { REQUIRE_NOTHROW(x.print()); }
SECTION("Tensor (in)equality") {
REQUIRE(x == xCopy);
REQUIRE_FALSE(x == xFloat);
}
}
}
TEST_CASE("Tensor methods") {
Tensor x = Array3D<int, 2, 2, 2>{{
{{1, 2},
{3, 4}},
{{5, 6},
{7, 8}}
}};
Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor xFloat =
Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
SECTION("Tensor sharing") {
Tensor xCopyCtor(x);
REQUIRE(xCopyCtor.getImpl() == x.getImpl());
Tensor xEqOp = x;
REQUIRE(xEqOp.getImpl() == x.getImpl());
Tensor xCloned = x.clone();
REQUIRE(xCloned.getImpl() != x.getImpl());
REQUIRE(xCloned == x);
}
SECTION("Tensor extract") {
Tensor y = x.extract({0, 1});
REQUIRE(y.getImpl() == x.getImpl());
REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}}));
REQUIRE(y.isContiguous());
Tensor y2 = x.extract({0, 1, 1}, {2, 1, 1});
REQUIRE(y2.getImpl() == x.getImpl());
REQUIRE(!y2.isContiguous());
Tensor y3 = y2.clone();
REQUIRE(y3.isContiguous());
REQUIRE(approxEq<int>(y3, Array3D<int, 2, 1, 1>{{{{4}}, {{8}}}}));
}
}