Skip to content
Snippets Groups Projects
Commit 0b990efd authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge branch 'broadcasting' into 'dev'

[Add] broadcasting for Arithmetic Operators

See merge request !65
parents b3f36f6b 97740d0d
No related branches found
No related tags found
2 merge requests!105version 0.2.0,!65[Add] broadcasting for Arithmetic Operators
Pipeline #39777 passed
Showing with 774 additions and 73 deletions
...@@ -12,8 +12,12 @@ ...@@ -12,8 +12,12 @@
#ifndef AIDGE_TENSORIMPL_H_ #ifndef AIDGE_TENSORIMPL_H_
#define AIDGE_TENSORIMPL_H_ #define AIDGE_TENSORIMPL_H_
#include <cstddef> #include <numeric> // std::accumulate
#include <cstdio> #include <cstddef> // std::size_t
#include <functional> // std::multiplies
#include <vector>
#include <utility> // std::pair, std::make_pair
#include "aidge/data/Data.hpp" #include "aidge/data/Data.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/ErrorHandling.hpp"
...@@ -59,23 +63,42 @@ private: ...@@ -59,23 +63,42 @@ private:
*/ */
/** /**
* This class manages the raw data storage of a Tensor and provide generic copy * @class TensorImpl
* @brief Class to manage the raw data storage of a Tensor and provide generic copy
* primitives from other devices and from/to host. * primitives from other devices and from/to host.
* It can own the data or not (use setRawPtr() to set an external data owner). * @note It can own the data or not (use ``setRawPtr()`` to set an external data owner).
* It only knows the data type and data capacity, but does not handle anything else. * @note It only knows the data type and data capacity, but does not handle anything else.
*/ */
class TensorImpl { class TensorImpl {
protected:
const char *mBackend;
/// @brief Device id.
const DeviceIdx_t mDevice;
/// Number of elements (to be) stored.
NbElts_t mNbElts;
public: public:
TensorImpl() = delete; TensorImpl() = delete;
TensorImpl(const char *backend, DeviceIdx_t device, std::vector<DimSize_t> dims) : mBackend(backend), mDevice(device)
TensorImpl(const char *backend, DeviceIdx_t device, std::vector<DimSize_t> dims)
: mBackend(backend),
mDevice(device)
{ {
resize(dims); resize(dims);
}; };
virtual ~TensorImpl() = default;
virtual bool operator==(const TensorImpl &othImpl) const = 0;
public:
/** /**
* Return the (backend, device) pair for this implementation. * Return the (backend, device) pair for this implementation.
*/ */
std::pair<std::string, DeviceIdx_t> device() const { return std::make_pair(mBackend, mDevice); } std::pair<std::string, DeviceIdx_t> device() const noexcept {
return std::make_pair(std::string(mBackend), mDevice);
}
/** /**
* Copy data from the same device. * Copy data from the same device.
...@@ -151,11 +174,7 @@ public: ...@@ -151,11 +174,7 @@ public:
* Set the size, in number of elements, that must be stored. * Set the size, in number of elements, that must be stored.
*/ */
virtual void resize(std::vector<DimSize_t> dims) { virtual void resize(std::vector<DimSize_t> dims) {
size_t product = 1; mNbElts = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
for (size_t num : dims) {
product *= num;
}
mNbElts = product;
} }
/** /**
...@@ -168,23 +187,15 @@ public: ...@@ -168,23 +187,15 @@ public:
*/ */
virtual std::size_t scalarSize() const noexcept = 0; virtual std::size_t scalarSize() const noexcept = 0;
constexpr const char *backend() const { return mBackend; } constexpr const char *backend() const { return mBackend; }
virtual ~TensorImpl() = default;
virtual bool operator==(const TensorImpl &othImpl) const = 0;
/** /**
* Copy from another backend. * @brief Copy from another backend.
* @param srcImpl Source TensorImpl to copy from. * @param srcImpl Source TensorImpl to copy from.
* @param length Number of elements of size scalarSize() to copy * @param length Number of elements of size scalarSize() to copy
* @param srcOffset Source offset (in number of elements). * @param srcOffset Source offset (in number of elements).
* @param dstOffset Destination offset (in number of elements). * @param dstOffset Destination offset (in number of elements).
*/ */
void copyFrom(const TensorImpl& srcImpl, NbElts_t length, NbElts_t srcOffset = 0, NbElts_t dstOffset = 0); void copyFrom(const TensorImpl& srcImpl, NbElts_t length, NbElts_t srcOffset = 0, NbElts_t dstOffset = 0);
protected:
const char *mBackend;
const DeviceIdx_t mDevice;
/// Number of elements (to be) stored
NbElts_t mNbElts;
}; };
} // namespace Aidge } // namespace Aidge
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <memory> #include <memory>
#include <numeric> // std::accumulate #include <numeric> // std::accumulate
#include <string> #include <string>
#include <type_traits> // std::is_arithmetic
#include <vector> #include <vector>
#include "aidge/backend/TensorImpl.hpp" #include "aidge/backend/TensorImpl.hpp"
...@@ -99,6 +100,17 @@ class Tensor : public Data, ...@@ -99,6 +100,17 @@ class Tensor : public Data,
return newTensor; return newTensor;
} }
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
Tensor(T val)
: Data(Type),
mDataType(NativeType<VT>::type),
mDims({}), mStrides({1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, std::vector<std::size_t>())),
mSize(1) {
*static_cast<VT*>(mImpl->rawPtr()) = static_cast<VT>(val);
}
/** /**
* @brief Construct a new Tensor object from the 1-dimension Array helper. * @brief Construct a new Tensor object from the 1-dimension Array helper.
* @tparam T datatype * @tparam T datatype
...@@ -291,7 +303,7 @@ class Tensor : public Data, ...@@ -291,7 +303,7 @@ class Tensor : public Data,
* @brief Get the data type enum. * @brief Get the data type enum.
* @return constexpr DataType * @return constexpr DataType
*/ */
constexpr DataType dataType() const { return mDataType; } constexpr DataType dataType() const noexcept { return mDataType; }
/** /**
* @brief Set the DataType of the Tensor and converts data * @brief Set the DataType of the Tensor and converts data
...@@ -334,7 +346,7 @@ class Tensor : public Data, ...@@ -334,7 +346,7 @@ class Tensor : public Data,
* @return true * @return true
* @return false * @return false
*/ */
bool hasImpl() const { return (mImpl) ? true : false; } bool hasImpl() const noexcept { return mImpl ? true : false; }
/** /**
* @brief Get number of dimensions of the Tensor. * @brief Get number of dimensions of the Tensor.
...@@ -369,13 +381,13 @@ class Tensor : public Data, ...@@ -369,13 +381,13 @@ class Tensor : public Data,
* @brief Return true if Tensor is contiguous in memory. * @brief Return true if Tensor is contiguous in memory.
* @return bool * @return bool
*/ */
constexpr bool isContiguous() const { return mContiguous; } constexpr bool isContiguous() const noexcept { return mContiguous; }
/** /**
* @brief Get the number of elements in the Tensor object. * @brief Get the number of elements in the Tensor object.
* @return constexpr std::size_t * @return constexpr std::size_t
*/ */
constexpr std::size_t size() const { return mSize; } constexpr std::size_t size() const noexcept { return mSize; }
/** /**
* @brief Change the dimensions of the Tensor object according to the given argument. * @brief Change the dimensions of the Tensor object according to the given argument.
......
...@@ -68,13 +68,7 @@ public: ...@@ -68,13 +68,7 @@ public:
// } // }
// void checkDims() const override final { void computeOutputDims() override final;
// assert(outputDimsForwarded());
// for (const auto& in : mInputs) {
// assert(in->dims() == mOutputs[0]->dims());
// }
// }
void setBackend(const std::string& name, DeviceIdx_t device = 0) override { void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
mImpl = Registrar<Add_Op>::create(name)(*this); mImpl = Registrar<Add_Op>::create(name)(*this);
......
...@@ -60,7 +60,7 @@ public: ...@@ -60,7 +60,7 @@ public:
} }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
return {"data_input"}; return {"data_input_1", "data_input_2"};
} }
static const std::vector<std::string> getOutputsName(){ static const std::vector<std::string> getOutputsName(){
return {"data_output"}; return {"data_output"};
......
...@@ -62,7 +62,7 @@ public: ...@@ -62,7 +62,7 @@ public:
} }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
return {"data_input"}; return {"data_input_1", "data_input_2"};
} }
static const std::vector<std::string> getOutputsName(){ static const std::vector<std::string> getOutputsName(){
return {"data_output"}; return {"data_output"};
......
...@@ -60,7 +60,7 @@ public: ...@@ -60,7 +60,7 @@ public:
} }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
return {"data_input"}; return {"data_input_1", "data_input_2"};
} }
static const std::vector<std::string> getOutputsName(){ static const std::vector<std::string> getOutputsName(){
return {"data_output"}; return {"data_output"};
......
...@@ -65,7 +65,7 @@ public: ...@@ -65,7 +65,7 @@ public:
} }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
return {"data_input"}; return {"data_input_1", "data_input_2"};
} }
static const std::vector<std::string> getOutputsName(){ static const std::vector<std::string> getOutputsName(){
return {"data_output"}; return {"data_output"};
......
...@@ -9,8 +9,53 @@ ...@@ -9,8 +9,53 @@
* *
********************************************************************************/ ********************************************************************************/
#include <cstddef> // std::size_t
#include <stdexcept> // std::runtime_error
#include <string> #include <string>
#include <vector>
#include "aidge/operator/Add.hpp" #include "aidge/operator/Add.hpp"
#include "aidge/utils/Types.h"
#include "aidge/utils/ErrorHandling.hpp"
const std::string Aidge::Add_Op::Type = "Add"; const std::string Aidge::Add_Op::Type = "Add";
\ No newline at end of file
void Aidge::Add_Op::computeOutputDims() {
// check inputs have been associated
bool associated = (nbInputs() > 0); // do not compute anything if no input
for (IOIndex_t i = 0; i < nbInputs(); ++i) {
if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
}
associated &= !(getInput(i)->empty());
}
if (associated) {
std::vector<std::vector<std::size_t>> inputsDims(nbInputs());
for (std::size_t i = 0; i < nbInputs(); i++) {
inputsDims[i] = getInput(i)->dims();
}
std::size_t outNbDims = 1;
for(std::size_t i = 0; i < nbInputs(); ++i) {
outNbDims = (inputsDims[i].size() > outNbDims) ? inputsDims[i].size() : outNbDims;
}
std::vector<std::size_t> outDims(outNbDims, 1);
for (auto it = outDims.rbegin(); it != outDims.rend(); ++it) {
for (std::size_t i = 0; i < nbInputs(); ++i) {
if(!inputsDims[i].empty()) {
const std::size_t dim = inputsDims[i].back();
inputsDims[i].pop_back();
if (*it == 1) {
*it = dim;
}
else if ((dim != *it) && (dim != 1)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Add operation");
}
}
}
}
mOutputs[0]->resize(outDims);
}
}
...@@ -9,11 +9,10 @@ ...@@ -9,11 +9,10 @@
* *
********************************************************************************/ ********************************************************************************/
#include <cassert> #include <cstddef> // std::size_t
#include <cstddef> #include <stdexcept> // std::runtime_error
#include <string> #include <string>
#include <vector> #include <vector>
#include <utility>
#include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Div.hpp" #include "aidge/operator/Div.hpp"
...@@ -28,11 +27,27 @@ void Aidge::Div_Op::computeOutputDims() { ...@@ -28,11 +27,27 @@ void Aidge::Div_Op::computeOutputDims() {
AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected"); AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
} }
if ((!getInput(0)->empty()) && if (!getInput(0)->empty() && !getInput(1)->empty()) {
((getInput(1)->size() == 1) || // div by a single value
(getInput(1)->size() == getInput(0)->size()) || // div elem-wise const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
(getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // div by a Tensor with one dimension of output size const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
{
mOutputs[0]->resize(getInput(0)->dims()); std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
std::size_t out_id = outDims.size() - 1;
std::size_t low_id = lowDims.size() - 1;
std::size_t i = 0;
while (i++ < lowDims.size()) {
if (outDims[out_id] == 1) {
outDims[out_id] = lowDims[low_id];
}
else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
}
--out_id;
--low_id;
}
mOutputs[0]->resize(outDims);
} }
} }
\ No newline at end of file
...@@ -9,10 +9,10 @@ ...@@ -9,10 +9,10 @@
* *
********************************************************************************/ ********************************************************************************/
#include <cassert> #include <cstddef> // std::size_t
#include <cstddef> #include <stdexcept> // std::runtime_error
#include <string>
#include <vector> #include <vector>
#include <utility>
#include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Mul.hpp" #include "aidge/operator/Mul.hpp"
...@@ -27,11 +27,27 @@ void Aidge::Mul_Op::computeOutputDims() { ...@@ -27,11 +27,27 @@ void Aidge::Mul_Op::computeOutputDims() {
AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected"); AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
} }
if ((!getInput(0)->empty()) && if (!getInput(0)->empty() && !getInput(1)->empty()) {
((getInput(1)->size() == 1) || // mul by a single value
(getInput(1)->size() == getInput(0)->size()) || // mul elem-wise const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
(getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // mul by a Tensor with one dimension of output size const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
{
mOutputs[0]->resize(getInput(0)->dims()); std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
std::size_t out_id = outDims.size() - 1;
std::size_t low_id = lowDims.size() - 1;
std::size_t i = 0;
while (i++ < lowDims.size()) {
if (outDims[out_id] == 1) {
outDims[out_id] = lowDims[low_id];
}
else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
}
--out_id;
--low_id;
}
mOutputs[0]->resize(outDims);
} }
} }
\ No newline at end of file
...@@ -9,10 +9,10 @@ ...@@ -9,10 +9,10 @@
* *
********************************************************************************/ ********************************************************************************/
#include <cassert> #include <cstddef> // std::size_t
#include <cstddef> #include <stdexcept> // std::runtime_error
#include <string>
#include <vector> #include <vector>
#include <utility>
#include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Pow.hpp" #include "aidge/operator/Pow.hpp"
...@@ -27,11 +27,27 @@ void Aidge::Pow_Op::computeOutputDims() { ...@@ -27,11 +27,27 @@ void Aidge::Pow_Op::computeOutputDims() {
AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected"); AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
} }
if ((!getInput(0)->empty()) && if (!getInput(0)->empty() && !getInput(1)->empty()) {
((getInput(1)->size() == 1) || // pow by a single value
(getInput(1)->size() == getInput(0)->size()) || // pow elem-wise const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
(getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // pow by a Tensor with one dimension of output size const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
{
mOutputs[0]->resize(getInput(0)->dims()); std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
std::size_t out_id = outDims.size() - 1;
std::size_t low_id = lowDims.size() - 1;
std::size_t i = 0;
while (i++ < lowDims.size()) {
if (outDims[out_id] == 1) {
outDims[out_id] = lowDims[low_id];
}
else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
}
--out_id;
--low_id;
}
mOutputs[0]->resize(outDims);
} }
} }
\ No newline at end of file
...@@ -9,10 +9,10 @@ ...@@ -9,10 +9,10 @@
* *
********************************************************************************/ ********************************************************************************/
#include <cassert> #include <cstddef> // std::size_t
#include <cstddef> #include <stdexcept> // std::runtime_error
#include <string>
#include <vector> #include <vector>
#include <utility>
#include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Sub.hpp" #include "aidge/operator/Sub.hpp"
...@@ -27,11 +27,27 @@ void Aidge::Sub_Op::computeOutputDims() { ...@@ -27,11 +27,27 @@ void Aidge::Sub_Op::computeOutputDims() {
AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected"); AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
} }
if ((!getInput(0)->empty()) && if (!getInput(0)->empty() && !getInput(1)->empty()) {
((getInput(1)->size() == 1) || // sub by a single value
(getInput(1)->size() == getInput(0)->size()) || // sub elem-wise const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
(getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // sub by a Tensor with one dimension of output size const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
{
mOutputs[0]->resize(getInput(0)->dims()); std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
std::size_t out_id = outDims.size() - 1;
std::size_t low_id = lowDims.size() - 1;
std::size_t i = 0;
while (i++ < lowDims.size()) {
if (outDims[out_id] == 1) {
outDims[out_id] = lowDims[low_id];
}
else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
}
--out_id;
--low_id;
}
mOutputs[0]->resize(outDims);
} }
} }
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <cstddef> // std::size_t
#include <memory>
#include <random> // std::random_device, std::mt19937, std::uniform_int_distribution
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Div.hpp"
#include "aidge/operator/OperatorTensor.hpp"
namespace Aidge {
TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]") {
constexpr std::uint16_t NBTRIALS = 10;
// Create a random number generator
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
// Create Div Operator
std::shared_ptr<Node> myDiv = Div();
auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator());
// input_0
std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
op -> associateInput(0,T0);
// input_1
std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
op -> associateInput(1,T1);
/**
* @todo Special case: scalar not handled yet by
* ``OperatorTensor::computeOutputDims()``
*/
// SECTION("Scalar / Scalar") {
// // input_0
// T0->resize({});
// // input_1
// T1->resize({});
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
// }
// SECTION("Scalar / +1-D") {
// // a scalar is compatible with any other Tensor
// // input_0
// T0->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_1
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen);
// }
// T1->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// }
// }
// SECTION("+1-D / Scalar") {
// // a scalar is compatible with any other Tensor
// // input_1
// T1->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_0
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen);
// }
// T0->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// }
// }
SECTION("+1-D / +1-D") {
// same size
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims0[i] = dimsDist(gen) + 1;
}
T0->resize(dims0);
T1->resize(dims0);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dims0);
}
// broadcast
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims0[i] = dimsDist(gen) + 2;
}
std::vector<std::size_t> dimsOut = dims0;
std::vector<std::size_t> dims1 = dims0;
for (std::size_t i = 0; i < nb_dims; ++i) {
if (dimsDist(gen) <= 5) {
dims1[i] = 1;
}
}
dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
T0->resize(dims0);
T1->resize(dims1);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dimsOut);
// input_0 - wrong
// T1->resize({dims[0] + 1});
std::vector<std::size_t> dims1_wrong = dims1;
for (std::size_t i = 0; i < dims1.size(); ++i) {
++dims1_wrong[i];
}
T1->resize(dims1_wrong);
REQUIRE(dims0 != dims1_wrong);
REQUIRE_THROWS(op->computeOutputDims());
}
}
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <cstddef> // std::size_t
#include <memory>
#include <random> // std::random_device, std::mt19937, std::uniform_int_distribution
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Mul.hpp"
#include "aidge/operator/OperatorTensor.hpp"
namespace Aidge {
TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]") {
constexpr std::uint16_t NBTRIALS = 10;
// Create a random number generator
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
// Create Mul Operator
std::shared_ptr<Node> myMul = Mul();
auto op = std::static_pointer_cast<OperatorTensor>(myMul -> getOperator());
// input_0
std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
op -> associateInput(0,T0);
// input_1
std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
op -> associateInput(1,T1);
/**
* @todo Special case: scalar not handled yet by
* ``OperatorTensor::computeOutputDims()``
*/
// SECTION("Scalar / Scalar") {
// // input_0
// T0->resize({});
// // input_1
// T1->resize({});
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
// }
// SECTION("Scalar / +1-D") {
// // a scalar is compatible with any other Tensor
// // input_0
// T0->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_1
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen);
// }
// T1->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// }
// }
// SECTION("+1-D / Scalar") {
// // a scalar is compatible with any other Tensor
// // input_1
// T1->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_0
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen);
// }
// T0->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// }
// }
SECTION("+1-D / +1-D") {
// same size
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims0[i] = dimsDist(gen) + 1;
}
T0->resize(dims0);
T1->resize(dims0);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dims0);
}
// broadcast
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims0[i] = dimsDist(gen) + 2;
}
std::vector<std::size_t> dimsOut = dims0;
std::vector<std::size_t> dims1 = dims0;
for (std::size_t i = 0; i < nb_dims; ++i) {
if (dimsDist(gen) <= 5) {
dims1[i] = 1;
}
}
dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
T0->resize(dims0);
T1->resize(dims1);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dimsOut);
// input_0 - wrong
// T1->resize({dims[0] + 1});
std::vector<std::size_t> dims1_wrong = dims1;
for (std::size_t i = 0; i < dims1.size(); ++i) {
++dims1_wrong[i];
}
T1->resize(dims1_wrong);
REQUIRE(dims0 != dims1_wrong);
REQUIRE_THROWS(op->computeOutputDims());
}
}
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <cstddef> // std::size_t
#include <memory>
#include <random> // std::random_device, std::mt19937, std::uniform_int_distribution
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Pow.hpp"
#include "aidge/operator/OperatorTensor.hpp"
namespace Aidge {
TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]") {
constexpr std::uint16_t NBTRIALS = 10;
// Create a random number generator
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
// Create Pow Operator
std::shared_ptr<Node> myPow = Pow();
auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator());
// input_0
std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
op -> associateInput(0,T0);
// input_1
std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
op -> associateInput(1,T1);
/**
* @todo Special case: scalar not handled yet by
* ``OperatorTensor::computeOutputDims()``
*/
// SECTION("Scalar / Scalar") {
// // input_0
// T0->resize({});
// // input_1
// T1->resize({});
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
// }
// SECTION("Scalar / +1-D") {
// // a scalar is compatible with any other Tensor
// // input_0
// T0->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_1
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen);
// }
// T1->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// }
// }
// SECTION("+1-D / Scalar") {
// // a scalar is compatible with any other Tensor
// // input_1
// T1->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_0
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen);
// }
// T0->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// }
// }
SECTION("+1-D / +1-D") {
// same size
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims0[i] = dimsDist(gen) + 1;
}
T0->resize(dims0);
T1->resize(dims0);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dims0);
}
// broadcast
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims0[i] = dimsDist(gen) + 2;
}
std::vector<std::size_t> dimsOut = dims0;
std::vector<std::size_t> dims1 = dims0;
for (std::size_t i = 0; i < nb_dims; ++i) {
if (dimsDist(gen) <= 5) {
dims1[i] = 1;
}
}
dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
T0->resize(dims0);
T1->resize(dims1);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dimsOut);
// input_0 - wrong
// T1->resize({dims[0] + 1});
std::vector<std::size_t> dims1_wrong = dims1;
for (std::size_t i = 0; i < dims1.size(); ++i) {
++dims1_wrong[i];
}
T1->resize(dims1_wrong);
REQUIRE(dims0 != dims1_wrong);
REQUIRE_THROWS(op->computeOutputDims());
}
}
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <cstddef> // std::size_t
#include <memory>
#include <random> // std::random_device, std::mt19937, std::uniform_int_distribution
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Sub.hpp"
#include "aidge/operator/OperatorTensor.hpp"
namespace Aidge {
TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]") {
constexpr std::uint16_t NBTRIALS = 10;
// Create a random number generator
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
// Create Sub Operator
std::shared_ptr<Node> mySub = Sub();
auto op = std::static_pointer_cast<OperatorTensor>(mySub -> getOperator());
// input_0
std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
op -> associateInput(0,T0);
// input_1
std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
op -> associateInput(1,T1);
/**
* @todo Special case: scalar not handled yet by
* ``OperatorTensor::computeOutputDims()``
*/
// SECTION("Scalar / Scalar") {
// // input_0
// T0->resize({});
// // input_1
// T1->resize({});
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
// }
// SECTION("Scalar / +1-D") {
// // a scalar is compatible with any other Tensor
// // input_0
// T0->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_1
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen);
// }
// T1->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// }
// }
// SECTION("+1-D / Scalar") {
// // a scalar is compatible with any other Tensor
// // input_1
// T1->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_0
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen);
// }
// T0->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// }
// }
SECTION("+1-D / +1-D") {
// same size
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims0[i] = dimsDist(gen) + 1;
}
T0->resize(dims0);
T1->resize(dims0);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dims0);
}
// broadcast
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims0[i] = dimsDist(gen) + 2;
}
std::vector<std::size_t> dimsOut = dims0;
std::vector<std::size_t> dims1 = dims0;
for (std::size_t i = 0; i < nb_dims; ++i) {
if (dimsDist(gen) <= 5) {
dims1[i] = 1;
}
}
dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
T0->resize(dims0);
T1->resize(dims1);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dimsOut);
// input_0 - wrong
// T1->resize({dims[0] + 1});
std::vector<std::size_t> dims1_wrong = dims1;
for (std::size_t i = 0; i < dims1.size(); ++i) {
++dims1_wrong[i];
}
T1->resize(dims1_wrong);
REQUIRE(dims0 != dims1_wrong);
REQUIRE_THROWS(op->computeOutputDims());
}
}
}
} // namespace Aidge
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment