Skip to content
Snippets Groups Projects
Commit 698d0028 authored by Maxence Naud's avatar Maxence Naud
Browse files

Standardize and optimize TensorImpl, Tensor and arithmetic operators

parent 2231e352
No related branches found
No related tags found
No related merge requests found
......@@ -12,8 +12,12 @@
#ifndef AIDGE_TENSORIMPL_H_
#define AIDGE_TENSORIMPL_H_
#include <cstddef>
#include <cstdio>
#include <algorithm> // std::accumulate
#include <cstddef> // std::size_t
#include <functional> // std::multiplies
#include <vector>
#include <utility> // std::pair, std::make_pair
#include "aidge/data/Data.hpp"
#include "aidge/utils/Types.h"
#include "aidge/utils/ErrorHandling.hpp"
......@@ -59,23 +63,42 @@ private:
*/
/**
* This class manages the raw data storage of a Tensor and provide generic copy
* @class TensorImpl
* @brief Class to manage the raw data storage of a Tensor and provide generic copy
* primitives from other devices and from/to host.
* It can own the data or not (use setRawPtr() to set an external data owner).
* It only knows the data type and data capacity, but does not handle anything else.
* @note It can own the data or not (use ``setRawPtr()`` to set an external data owner).
* @note It only knows the data type and data capacity, but does not handle anything else.
*/
class TensorImpl {
protected:
const char *mBackend;
/// @brief Device id.
const DeviceIdx_t mDevice;
/// Number of elements (to be) stored.
NbElts_t mNbElts;
public:
TensorImpl() = delete;
TensorImpl(const char *backend, DeviceIdx_t device, std::vector<DimSize_t> dims) : mBackend(backend), mDevice(device)
TensorImpl(const char *backend, DeviceIdx_t device, std::vector<DimSize_t> dims)
: mBackend(backend),
mDevice(device)
{
resize(dims);
};
virtual ~TensorImpl() = default;
virtual bool operator==(const TensorImpl &othImpl) const = 0;
public:
/**
* Return the (backend, device) pair for this implementation.
*/
std::pair<std::string, DeviceIdx_t> device() const { return std::make_pair(mBackend, mDevice); }
std::pair<std::string, DeviceIdx_t> device() const noexcept {
return std::make_pair(std::string(mBackend), mDevice);
}
/**
* Copy data from the same device.
......@@ -151,11 +174,7 @@ public:
* Set the size, in number of elements, that must be stored.
*/
virtual void resize(std::vector<DimSize_t> dims) {
size_t product = 1;
for (size_t num : dims) {
product *= num;
}
mNbElts = product;
mNbElts = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
}
/**
......@@ -168,23 +187,15 @@ public:
*/
virtual std::size_t scalarSize() const noexcept = 0;
constexpr const char *backend() const { return mBackend; }
virtual ~TensorImpl() = default;
virtual bool operator==(const TensorImpl &othImpl) const = 0;
/**
* Copy from another backend.
* @brief Copy from another backend.
* @param srcImpl Source TensorImpl to copy from.
* @param length Number of elements of size scalarSize() to copy
* @param srcOffset Source offset (in number of elements).
* @param dstOffset Destination offset (in number of elements).
*/
void copyFrom(const TensorImpl& srcImpl, NbElts_t length, NbElts_t srcOffset = 0, NbElts_t dstOffset = 0);
protected:
const char *mBackend;
const DeviceIdx_t mDevice;
/// Number of elements (to be) stored
NbElts_t mNbElts;
};
} // namespace Aidge
......
......@@ -106,7 +106,7 @@ class Tensor : public Data,
: Data(Type),
mDataType(NativeType<VT>::type),
mDims({}), mStrides({1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, 1)),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, std::vector<std::size_t>())),
mSize(1) {
*static_cast<VT*>(mImpl->rawPtr()) = static_cast<VT>(val);
}
......@@ -303,7 +303,7 @@ class Tensor : public Data,
* @brief Get the data type enum.
* @return constexpr DataType
*/
constexpr DataType dataType() const { return mDataType; }
constexpr DataType dataType() const noexcept { return mDataType; }
/**
* @brief Set the DataType of the Tensor and converts data
......@@ -346,7 +346,7 @@ class Tensor : public Data,
* @return true
* @return false
*/
bool hasImpl() const { return (mImpl) ? true : false; }
bool hasImpl() const noexcept { return mImpl ? true : false; }
/**
* @brief Get number of dimensions of the Tensor.
......@@ -381,13 +381,13 @@ class Tensor : public Data,
* @brief Return true if Tensor is contiguous in memory.
* @return bool
*/
constexpr bool isContiguous() const { return mContiguous; }
constexpr bool isContiguous() const noexcept { return mContiguous; }
/**
* @brief Get the number of elements in the Tensor object.
* @return constexpr std::size_t
*/
constexpr std::size_t size() const { return mSize; }
constexpr std::size_t size() const noexcept { return mSize; }
/**
* @brief Change the dimensions of the Tensor object according to the given argument.
......
......@@ -9,7 +9,8 @@
*
********************************************************************************/
#include <cstddef> // std::size_t
#include <cstddef> // std::size_t
#include <stdexcept> // std::runtime_error
#include <string>
#include <vector>
......@@ -42,19 +43,15 @@ void Aidge::Add_Op::computeOutputDims() {
std::vector<std::size_t> outDims(outNbDims, 1);
for (auto it = outDims.rbegin(); it != outDims.rend(); ++it) {
for (size_t i = 0; i < inputsDims.size(); i++) {
for (std::size_t i = 0; i < nbInputs(); ++i) {
if(!inputsDims[i].empty()) {
std::size_t dim = inputsDims[i].back();
const std::size_t dim = inputsDims[i].back();
inputsDims[i].pop_back();
if (*it != dim) {
if(dim != 1) {
if (*it != 1) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Add operation");
}
else {
*it = dim;
}
}
if (*it == 1) {
*it = dim;
}
else if ((dim != *it) && (dim != 1)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Add operation");
}
}
}
......
......@@ -8,12 +8,11 @@
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstddef> // std::size_t
#include <stdexcept> // std::runtime_error
#include <string>
#include <vector>
#include <utility>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Div.hpp"
......@@ -30,30 +29,24 @@ void Aidge::Div_Op::computeOutputDims() {
if (!getInput(0)->empty() && !getInput(1)->empty()) {
std::vector<std::vector<std::size_t>> inputsDims{getInput(0)->dims(), getInput(1)->dims()};
std::vector<std::size_t> outDims = (inputsDims[0].size() >= inputsDims[1].size()) ?
inputsDims[0] : inputsDims[1];
std::vector<std::size_t>::iterator it = outDims.end();
while (it != outDims.begin()) {
--it;
for (size_t i = 0; i < inputsDims.size(); i++) {
if(!inputsDims[i].empty()) {
std::size_t dim = inputsDims[i].back();
inputsDims[i].pop_back();
if (*it != dim) {
if(dim != 1) {
if (*it != 1) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
}
else {
*it = dim;
}
}
}
}
const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
std::size_t out_id = outDims.size() - 1;
std::size_t low_id = lowDims.size() - 1;
std::size_t i = 0;
while (i++ < lowDims.size()) {
if (outDims[out_id] == 1) {
outDims[out_id] = lowDims[low_id];
}
else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
}
--out_id;
--low_id;
}
mOutputs[0]->resize(outDims);
}
......
......@@ -8,11 +8,11 @@
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstddef> // std::size_t
#include <stdexcept> // std::runtime_error
#include <string>
#include <vector>
#include <utility>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Mul.hpp"
......@@ -29,30 +29,24 @@ void Aidge::Mul_Op::computeOutputDims() {
if (!getInput(0)->empty() && !getInput(1)->empty()) {
std::vector<std::vector<std::size_t>> inputsDims{getInput(0)->dims(), getInput(1)->dims()};
std::vector<std::size_t> outDims = (inputsDims[0].size() >= inputsDims[1].size()) ?
inputsDims[0] : inputsDims[1];
std::vector<std::size_t>::iterator it = outDims.end();
while (it != outDims.begin()) {
--it;
for (size_t i = 0; i < inputsDims.size(); i++) {
if(!inputsDims[i].empty()) {
std::size_t dim = inputsDims[i].back();
inputsDims[i].pop_back();
if (*it != dim) {
if(dim != 1) {
if (*it != 1) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Mul Operation");
}
else {
*it = dim;
}
}
}
}
const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
std::size_t out_id = outDims.size() - 1;
std::size_t low_id = lowDims.size() - 1;
std::size_t i = 0;
while (i++ < lowDims.size()) {
if (outDims[out_id] == 1) {
outDims[out_id] = lowDims[low_id];
}
else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
}
--out_id;
--low_id;
}
mOutputs[0]->resize(outDims);
}
......
......@@ -8,11 +8,11 @@
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstddef> // std::size_t
#include <stdexcept> // std::runtime_error
#include <string>
#include <vector>
#include <utility>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Pow.hpp"
......@@ -29,30 +29,24 @@ void Aidge::Pow_Op::computeOutputDims() {
if (!getInput(0)->empty() && !getInput(1)->empty()) {
std::vector<std::vector<std::size_t>> inputsDims{getInput(0)->dims(), getInput(1)->dims()};
std::vector<std::size_t> outDims = (inputsDims[0].size() >= inputsDims[1].size()) ?
inputsDims[0] : inputsDims[1];
std::vector<std::size_t>::iterator it = outDims.end();
while (it != outDims.begin()) {
--it;
for (size_t i = 0; i < inputsDims.size(); i++) {
if(!inputsDims[i].empty()) {
std::size_t dim = inputsDims[i].back();
inputsDims[i].pop_back();
if (*it != dim) {
if(dim != 1) {
if (*it != 1) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Pow Operation");
}
else {
*it = dim;
}
}
}
}
const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
std::size_t out_id = outDims.size() - 1;
std::size_t low_id = lowDims.size() - 1;
std::size_t i = 0;
while (i++ < lowDims.size()) {
if (outDims[out_id] == 1) {
outDims[out_id] = lowDims[low_id];
}
else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
}
--out_id;
--low_id;
}
mOutputs[0]->resize(outDims);
}
......
......@@ -8,11 +8,11 @@
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstddef> // std::size_t
#include <stdexcept> // std::runtime_error
#include <string>
#include <vector>
#include <utility>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Sub.hpp"
......@@ -29,30 +29,24 @@ void Aidge::Sub_Op::computeOutputDims() {
if (!getInput(0)->empty() && !getInput(1)->empty()) {
std::vector<std::vector<std::size_t>> inputsDims{getInput(0)->dims(), getInput(1)->dims()};
std::vector<std::size_t> outDims = (inputsDims[0].size() >= inputsDims[1].size()) ?
inputsDims[0] : inputsDims[1];
std::vector<std::size_t>::iterator it = outDims.end();
while (it != outDims.begin()) {
--it;
for (size_t i = 0; i < inputsDims.size(); i++) {
if(!inputsDims[i].empty()) {
std::size_t dim = inputsDims[i].back();
inputsDims[i].pop_back();
if (*it != dim) {
if(dim != 1) {
if (*it != 1) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Sub Operation");
}
else {
*it = dim;
}
}
}
}
const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
std::size_t out_id = outDims.size() - 1;
std::size_t low_id = lowDims.size() - 1;
std::size_t i = 0;
while (i++ < lowDims.size()) {
if (outDims[out_id] == 1) {
outDims[out_id] = lowDims[low_id];
}
else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
}
--out_id;
--low_id;
}
mOutputs[0]->resize(outDims);
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment