Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • eclipse/aidge/aidge_core
  • hrouis/aidge_core
  • mszczep/aidge_core
  • oantoni/aidge_core
  • cguillon/aidge_core
  • jeromeh/aidge_core
  • axelfarr/aidge_core
  • cmoineau/aidge_core
  • noamzerah/aidge_core
  • lrakotoarivony/aidge_core
  • silvanosky/aidge_core
  • maab05/aidge_core
  • mick94/aidge_core
  • lucaslopez/aidge_core_ll
  • wboussella/aidge_core
  • farnez/aidge_core
  • mnewson/aidge_core
17 results
Show changes
Commits on Source (5)
Showing
with 125 additions and 98 deletions
......@@ -132,10 +132,14 @@ static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
{"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int32_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int16(
{"cpu", DataType::Int16}, Aidge::TensorImpl_cpu<int16_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_UInt16(
{"cpu", DataType::UInt16}, Aidge::TensorImpl_cpu<uint16_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int8(
{"cpu", DataType::Int8}, Aidge::TensorImpl_cpu<int8_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_UInt64(
{"cpu", DataType::UInt64}, Aidge::TensorImpl_cpu<uint64_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_UInt32(
{"cpu", DataType::UInt32}, Aidge::TensorImpl_cpu<uint32_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_UInt16(
{"cpu", DataType::UInt16}, Aidge::TensorImpl_cpu<uint16_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_UInt8(
{"cpu", DataType::UInt8}, Aidge::TensorImpl_cpu<uint8_t>::create);
} // namespace
......
......@@ -57,7 +57,8 @@ class Tensor : public Data,
/**
* @brief Construct a new empty Tensor object.
* It has the features of an undefined scalar.
* It is considered undefined, i.e. dims can't be forwarded from such a Tensor.
* @ref undefined() method for details
*/
Tensor(DataType dtype = DataType::Float32, DataFormat dformat = DataFormat::Default)
: Data(Type),
......@@ -65,7 +66,7 @@ class Tensor : public Data,
mDataFormat(dformat),
mDims(std::vector<DimSize_t>({})),
mStrides({1}),
mSize(1)
mSize(0)
{
// ctor
}
......@@ -523,14 +524,30 @@ public:
void resize(const std::vector<DimSize_t> &dims, std::vector<DimSize_t> strides = std::vector<DimSize_t>());
/**
* @brief Return if the Tensor object has at leastone element.
* @return true
* @return false
* @brief Return whether the Tensor object as a rank of 0, i.e. dimensions == {}.
* For defined Tensors, this implies that the Tensor is scalar.
* For backward compatibility reasons, it is valid to call this predicate
* even on undefined Tensors, in which case it returns true.
* Hence before test the rank with this method, always check that the
* Tensor is not undefined().
* In particular for operations such as forwardDims(), one should always
* use undefined() to test whether the Tensor dimensions have been defined.
* In this case empty() can be used to distinguish scalars from N-D Tensors.
* @return true if rank is 0 or the tensor is undefined
*/
bool empty() const { return mDims.empty(); }
// bool newempty() const noexcept {
// return mSize == 0;
// }
/**
* @brief Returns whether the Tensor object is undefined.
* An undefined Tensor is equivalent to a tensor for which dimensions have not
* been defined yet. Hence, dimensions forwarding can't be done from undefined tensors.
* The only cases where a tensor is undefined is after the default constructor
* and before any call to resize().
* Also, as soon as the resize() method has been called, the Tensor is irreversibly defined.
* @ref empty() method for distinguishing an undefined from a scalar
* @return true if undefined
*/
bool undefined() const { return mSize == 0; }
/**
* @brief Set each element of the tensor to zero.
......
......@@ -76,7 +76,7 @@ public:
* @return false Input has no dimensions or is a nullptr.
*/
bool dimsForwarded() const override final {
return mInputs[0] ? (mInputs[0]->empty() ? false : mInputs[0]->dims() == mOutputs[0]->dims()) : false;
return mInputs[0] ? (mInputs[0]->undefined() ? false : mInputs[0]->dims() == mOutputs[0]->dims()) : false;
}
......
......@@ -93,6 +93,7 @@ void init_Tensor(py::module& m){
.def("get_coord", &Tensor::getCoord)
.def("get_idx", &Tensor::getIdx)
.def_static("get_available_backends", &Tensor::getAvailableBackends)
.def("undefined", &Tensor::undefined)
.def("__str__", [](Tensor& b) {
if (b.empty()) {
return std::string("{}");
......
......@@ -29,7 +29,7 @@ Aidge::OperatorImpl::OperatorImpl(const Operator& op, const std::string& backend
Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
if (mOp.getRawInput(inputIdx)) {
const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
if (!input->empty()) {
if (!input->undefined()) {
// Known amount of data: requires the whole tensor by default
return Elts_t::DataElts(input->size());
}
......@@ -46,7 +46,7 @@ Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredData(const Aidge::IOIndex_t inpu
Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredProtected(IOIndex_t inputIdx) const {
if (mOp.getRawInput(inputIdx)) {
const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
if (!input->empty()) {
if (!input->undefined()) {
// Known amount of data: protect the whole tensor by default
return Elts_t::DataElts(input->size());
}
......@@ -67,7 +67,7 @@ Aidge::Elts_t Aidge::OperatorImpl::getRequiredMemory(const Aidge::IOIndex_t outp
const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
if (mOp.getRawOutput(outputIdx)) {
const auto output = std::static_pointer_cast<Tensor>(mOp.getRawOutput(outputIdx));
if (!output->empty()) {
if (!output->undefined()) {
// Known amount of data: requires the whole tensor by default,
// regardless of available data on inputs
return Elts_t::DataElts(output->size());
......
......@@ -150,13 +150,12 @@ Aidge::Tensor::~Tensor() noexcept = default;
void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims,
std::vector<Aidge::DimSize_t> strides) {
// TODO: scalar Tensor not handled
if (dims.empty()) { // scalar
mDims = std::vector<DimSize_t>(0);
mStrides = std::vector<DimSize_t>({1});
mContiguous = true;
computeSize();
computeSize(); // will set mSize to 1
if (mImpl) {
mImpl->resize(mDims);
}
......@@ -214,7 +213,7 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims,
std::string Aidge::Tensor::toString() const {
AIDGE_ASSERT(
mImpl && (dims().empty() || (dims() == std::vector<DimSize_t>({0})) ||
mImpl && (undefined() || (dims() == std::vector<DimSize_t>({0})) ||
(mImpl->hostPtr() != nullptr)),
"tensor should have a valid host pointer");
......
......@@ -152,7 +152,7 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
// Add-on to display the operator's output dimensions
std::string dims = "";
const auto op = std::dynamic_pointer_cast<OperatorTensor>(node_ptr->getOperator());
if (op && !op->getOutput(outputIdx)->dims().empty()) {
if (op && !op->getOutput(outputIdx)->undefined()) {
dims += " " + fmt::format("{}", op->getOutput(outputIdx)->dims());
}
......@@ -198,7 +198,7 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
// Add-on to display the operator's output dimensions
std::string dims = "";
const auto op = std::dynamic_pointer_cast<OperatorTensor>(output.first->getOperator());
if (op && op->getOutput(output.second) && !op->getOutput(output.second)->dims().empty()) {
if (op && op->getOutput(output.second) && !op->getOutput(output.second)->undefined()) {
dims += " " + fmt::format("{}", op->getOutput(output.second)->dims());
}
......@@ -441,8 +441,8 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
// Input is missing
AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i),
"Missing input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
AIDGE_ASSERT(!std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty(),
"Empty input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
AIDGE_ASSERT(!std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->undefined(),
"Undefined input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
}
}
......
......@@ -64,14 +64,8 @@ bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) {
return false;
}
const std::size_t nbDimsInput0 = getInput(0)->nbDims();
if (nbDimsInput0 == 0) {
return false;
}
AIDGE_ASSERT(nbDimsInput0 > 0, "First input in {} Operator is empty", type());
AIDGE_ASSERT(nbDimsInput0 > 0, "First input in {} Operator is scalar", type());
for (IOIndex_t i = 1; i < nbInputs(); ++i) {
if (getInput(i)->nbDims() == 0) {
return false;
}
AIDGE_ASSERT(nbDimsInput0 == getInput(i)->nbDims(),
"Input 0 and input {} in {} Operator have different number of dimensions: {} / {}",
i, type(), nbDimsInput0, getInput(i)->nbDims());
......
......@@ -51,7 +51,7 @@ void Aidge::Gather_OpImpl::forward() {
const std::string Aidge::Gather_Op::Type = "Gather";
bool Aidge::Gather_Op::dimsForwarded() const {
if (getInput(1) && !getInput(1)->empty()) {
if (getInput(1) && !getInput(1)->undefined()) {
// output dims are data dependent
return false;
}
......
......@@ -69,7 +69,10 @@ bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
mOutputs[0]->resize(outDims);
return true;
} else {
AIDGE_ASSERT(false, "Incompatible scalar and N-D sizes.");
}
}
return false;
......
......@@ -85,12 +85,12 @@ bool Aidge::Memorize_Op::forwardDims(bool /*allowDataDependency*/) {
if (inputsAssociated(false)) {
// Only require one of the input to have dims defined
// Otherwise, forwardDims() won't converge!
if (!(getInput(0)->empty())) {
if (!(getInput(0)->undefined())) {
const auto expectedDims = getInput(0)->dims();
mOutputs[0]->resize(expectedDims);
return true;
}
else if (!(getInput(1)->empty())) {
else if (!(getInput(1)->undefined())) {
const auto expectedDims = getInput(1)->dims();
mOutputs[0]->resize(expectedDims);
return true;
......@@ -105,7 +105,7 @@ bool Aidge::Memorize_Op::dimsForwarded() const {
bool forwarded = true;
// check outputs have been filled
for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
forwarded &= !(getOutput(i)->empty());
forwarded &= !(getOutput(i)->undefined());
}
return forwarded;
}
......
......@@ -123,7 +123,7 @@ bool Aidge::OperatorTensor::inputsAssociated(bool checkNonEmpty) const {
}
if (checkNonEmpty && getInput(i)) {
associated &= !(getInput(i)->empty());
associated &= !(getInput(i)->undefined());
}
}
......@@ -152,13 +152,13 @@ bool Aidge::OperatorTensor::dimsForwarded() const {
// check both inputs and outputs have been filled
for (IOIndex_t i = 0; i < nbInputs(); ++i) {
if (inputCategory(i) != InputCategory::OptionalData && inputCategory(i) != InputCategory::OptionalParam) {
forwarded &= mInputs[i] ? !(getInput(i)->empty()) : false;
forwarded &= mInputs[i] ? !(getInput(i)->undefined()) : false;
}
}
for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
// If getOutput(i) is nullptr, ignore this output (it may be a dummy
// output in a MetaOperator)
forwarded &= (getOutput(i)) ? !(getOutput(i)->empty()) : true;
forwarded &= (getOutput(i)) ? !(getOutput(i)->undefined()) : true;
}
return forwarded;
}
......
......@@ -31,7 +31,7 @@ void Aidge::Reshape_OpImpl::forward() {
const std::string Aidge::Reshape_Op::Type = "Reshape";
bool Aidge::Reshape_Op::dimsForwarded() const {
if (getInput(1) && !getInput(1)->empty()) {
if (getInput(1) && !getInput(1)->undefined()) {
// output dims are data dependent
return false;
}
......
......@@ -27,9 +27,9 @@ const std::string Aidge::Resize_Op::Type = "Resize";
bool Aidge::Resize_Op::dimsForwarded() const {
// in case of ROI add getInput(1) condition
if ((getInput(1) && !getInput(1)->empty())
|| (getInput(2) && !getInput(2)->empty())
|| (getInput(3) && !getInput(3)->empty())
if ((getInput(1) && !getInput(1)->undefined())
|| (getInput(2) && !getInput(2)->undefined())
|| (getInput(3) && !getInput(3)->undefined())
)
{
// output dims are data dependent
......@@ -44,9 +44,9 @@ bool Aidge::Resize_Op::forwardDims(bool allowDataDependency) {
AIDGE_ASSERT(getInput(0)->nbDims() == 4,
"input tensor must have dimensions = 4 (batch, channel, height, width).");
const bool input1ROIPresent = getInput(1) && !getInput(1)->empty();
const bool input2ScalesPresent = getInput(2) && !getInput(2)->empty();
const bool input3SizesPresent = getInput(3) && !getInput(3)->empty();
const bool input1ROIPresent = getInput(1) && !getInput(1)->undefined();
const bool input2ScalesPresent = getInput(2) && !getInput(2)->undefined();
const bool input3SizesPresent = getInput(3) && !getInput(3)->undefined();
AIDGE_ASSERT(input2ScalesPresent != input3SizesPresent, "Only one of scales and sizes can be specified.")
......@@ -118,4 +118,4 @@ void Aidge::Resize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t de
if(getInput(3)) {
getInput(3)->setBackend(name, device);
}
}
\ No newline at end of file
}
......@@ -29,10 +29,10 @@
const std::string Aidge::Slice_Op::Type = "Slice";
bool Aidge::Slice_Op::dimsForwarded() const {
if ((getInput(1) && !getInput(1)->empty())
|| (getInput(2) && !getInput(2)->empty())
|| (getInput(3) && !getInput(3)->empty())
|| (getInput(4) && !getInput(4)->empty()))
if ((getInput(1) && !getInput(1)->undefined())
|| (getInput(2) && !getInput(2)->undefined())
|| (getInput(3) && !getInput(3)->undefined())
|| (getInput(4) && !getInput(4)->undefined()))
{
// output dims are data dependent
return false;
......
......@@ -55,7 +55,7 @@ void Aidge::Split_OpImpl::forward() {
const std::string Aidge::Split_Op::Type = "Split";
bool Aidge::Split_Op::dimsForwarded() const {
if ((getInput(1) && !getInput(1)->empty()))
if ((getInput(1) && !getInput(1)->undefined()))
{
// output dims are data dependent
return false;
......
......@@ -32,6 +32,7 @@ const std::string Aidge::Transpose_Op::Type = "Transpose";
bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
if (inputsAssociated()) {
AIDGE_ASSERT(!getInput(0)->empty(), "Not applicable on scalars.");
std::vector<DimSize_t> outputDims;
for (std::size_t i = 0; i < outputDimsOrder().size(); ++i) {
outputDims.push_back(getInput(0)->dims()[outputDimsOrder()[i]]);
......
......@@ -36,7 +36,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
Tensor T_default{};
REQUIRE((
(T_default.dataType() == DataType::Float32) &&
(T_default.size() == 1) &&
(T_default.size() == 0) &&
(T_default.dims() == std::vector<DimSize_t>({})) &&
(T_default.strides() == std::vector<DimSize_t>({1})) &&
(T_default.getImpl() == nullptr) &&
......
......@@ -18,6 +18,14 @@
using namespace Aidge;
TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") {
SECTION("Concat scalar inputs") {
std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(2);
std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(4);
auto myConcat = Concat(2, 0);
myConcat->getOperator()->associateInput(0, input1);
myConcat->getOperator()->associateInput(1, input2);
REQUIRE_THROWS(myConcat->forward());
}
SECTION("Concat 1D inputs") {
std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array1D<int,2>{{ 2, 3 }});
std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(Array1D<int,3>{{ 4, 5, 6 }});
......@@ -140,4 +148,4 @@ TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") {
REQUIRE(*std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0) == *expectedOutput);
}
}
\ No newline at end of file
}
......@@ -44,54 +44,54 @@ TEST_CASE("[core/operator] Div_Op(forwardDims)", "[Div][forwardDims]") {
* @todo Special case: scalar not handled yet by
* ``OperatorTensor::forwardDims()``
*/
// SECTION("Scalar / Scalar") {
// // input_0
// T0->resize({});
// // input_1
// T1->resize({});
// REQUIRE_NOTHROW(op->forwardDims());
// REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
// }
// SECTION("Scalar / +1-D") {
// // a scalar is compatible with any other Tensor
// // input_0
// T0->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_1
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen);
// }
// T1->resize(dims);
// REQUIRE_NOTHROW(op->forwardDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// }
// }
// SECTION("+1-D / Scalar") {
// // a scalar is compatible with any other Tensor
// // input_1
// T1->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_0
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen);
// }
// T0->resize(dims);
// REQUIRE_NOTHROW(op->forwardDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// }
// }
SECTION("Scalar / Scalar") {
// input_0
T0->resize({});
// input_1
T1->resize({});
REQUIRE_NOTHROW(op->forwardDims());
REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
}
SECTION("Scalar / +1-D") {
// a scalar is compatible with any other Tensor
// input_0
T0->resize({});
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// input_1
const std::size_t nb_dims = nbDimsDist(gen);
std::vector<std::size_t> dims(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims[i] = dimsDist(gen);
}
T1->resize(dims);
REQUIRE_NOTHROW(op->forwardDims());
REQUIRE((op->getOutput(0)->dims()) == dims);
}
}
SECTION("+1-D / Scalar") {
// a scalar is compatible with any other Tensor
// input_1
T1->resize({});
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// input_0
const std::size_t nb_dims = nbDimsDist(gen);
std::vector<std::size_t> dims(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims[i] = dimsDist(gen);
}
T0->resize(dims);
REQUIRE_NOTHROW(op->forwardDims());
REQUIRE((op->getOutput(0)->dims()) == dims);
}
}
SECTION("+1-D / +1-D") {
// same size
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
......