Skip to content
Snippets Groups Projects
Commit ee26ba87 authored by Grégoire Kubler's avatar Grégoire Kubler Committed by Maxence Naud
Browse files

chore : cleaning up tensor.cpp: added namespace Aidge + rm includes

Addded namespace Aidge {} to remove extra Aidge:: specification
Removed useless #include
parent 754266ad
No related branches found
No related tags found
No related merge requests found
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include <algorithm>
#include <cstddef> #include <cstddef>
#include <vector> #include <vector>
...@@ -25,14 +24,14 @@ ...@@ -25,14 +24,14 @@
#include "aidge/operator/ReduceMean.hpp" #include "aidge/operator/ReduceMean.hpp"
#include "aidge/operator/Sub.hpp" #include "aidge/operator/Sub.hpp"
#include "aidge/operator/Sqrt.hpp" #include "aidge/operator/Sqrt.hpp"
#include "aidge/operator/Transpose.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
namespace Aidge {
Aidge::Tensor::~Tensor() noexcept = default; Tensor::~Tensor() noexcept = default;
Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const { Tensor Tensor::operator+(const Tensor& other) const {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation."); AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend"); AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type"); AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
...@@ -49,7 +48,7 @@ Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const { ...@@ -49,7 +48,7 @@ Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const {
} }
Aidge::Tensor Aidge::Tensor::operator-(const Aidge::Tensor& other) const { Tensor Tensor::operator-(const Tensor& other) const {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation."); AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend"); AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type"); AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
...@@ -66,7 +65,7 @@ Aidge::Tensor Aidge::Tensor::operator-(const Aidge::Tensor& other) const { ...@@ -66,7 +65,7 @@ Aidge::Tensor Aidge::Tensor::operator-(const Aidge::Tensor& other) const {
} }
Aidge::Tensor Aidge::Tensor::operator*(const Aidge::Tensor& other) const { Tensor Tensor::operator*(const Tensor& other) const {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation."); AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend"); AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type"); AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
...@@ -83,7 +82,7 @@ Aidge::Tensor Aidge::Tensor::operator*(const Aidge::Tensor& other) const { ...@@ -83,7 +82,7 @@ Aidge::Tensor Aidge::Tensor::operator*(const Aidge::Tensor& other) const {
} }
Aidge::Tensor Aidge::Tensor::operator/(const Aidge::Tensor& other) const { Tensor Tensor::operator/(const Tensor& other) const {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation."); AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend"); AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type"); AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
...@@ -99,7 +98,7 @@ Aidge::Tensor Aidge::Tensor::operator/(const Aidge::Tensor& other) const { ...@@ -99,7 +98,7 @@ Aidge::Tensor Aidge::Tensor::operator/(const Aidge::Tensor& other) const {
return div_.getOutput(0)->clone(); return div_.getOutput(0)->clone();
} }
Aidge::Tensor Aidge::Tensor::sqrt() const { Tensor Tensor::sqrt() const {
AIDGE_ASSERT(hasImpl(), "Tensor has no implementation."); AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
auto sqrt_ = Sqrt_Op(); auto sqrt_ = Sqrt_Op();
sqrt_.associateInput(0, std::make_shared<Tensor>(*this)); sqrt_.associateInput(0, std::make_shared<Tensor>(*this));
...@@ -110,7 +109,7 @@ Aidge::Tensor Aidge::Tensor::sqrt() const { ...@@ -110,7 +109,7 @@ Aidge::Tensor Aidge::Tensor::sqrt() const {
return sqrt_.getOutput(0)->clone(); return sqrt_.getOutput(0)->clone();
} }
Aidge::Tensor Aidge::Tensor::abs() const { Tensor Tensor::abs() const {
AIDGE_ASSERT(hasImpl(), "Tensor has no implementation."); AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
auto abs_ = Abs_Op(); auto abs_ = Abs_Op();
abs_.associateInput(0, std::make_shared<Tensor>(*this)); abs_.associateInput(0, std::make_shared<Tensor>(*this));
...@@ -121,7 +120,7 @@ Aidge::Tensor Aidge::Tensor::abs() const { ...@@ -121,7 +120,7 @@ Aidge::Tensor Aidge::Tensor::abs() const {
return abs_.getOutput(0)->clone(); return abs_.getOutput(0)->clone();
} }
Aidge::Tensor Aidge::Tensor::mean() const { Tensor Tensor::mean() const {
AIDGE_ASSERT(hasImpl(), "Tensor has no implementation."); AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
// TODO: should be the default behavior of ReduceMean_Op // TODO: should be the default behavior of ReduceMean_Op
// No need to specify the list of all axes! // No need to specify the list of all axes!
...@@ -136,7 +135,7 @@ Aidge::Tensor Aidge::Tensor::mean() const { ...@@ -136,7 +135,7 @@ Aidge::Tensor Aidge::Tensor::mean() const {
return mean_.getOutput(0)->clone(); return mean_.getOutput(0)->clone();
} }
Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) { Tensor& Tensor::operator=(const Tensor& other) {
if (this == &other) { if (this == &other) {
return *this; return *this;
} }
...@@ -156,7 +155,7 @@ Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) { ...@@ -156,7 +155,7 @@ Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
} }
void Aidge::Tensor::setBackend(const std::string &name, Aidge::DeviceIdx_t device, bool copyFrom) { void Tensor::setBackend(const std::string &name, DeviceIdx_t device, bool copyFrom) {
if (mImpl) { if (mImpl) {
if (mImpl->device() != std::make_pair(name, device)) { if (mImpl->device() != std::make_pair(name, device)) {
// Backend change: create new impl, copy from old to new and replace // Backend change: create new impl, copy from old to new and replace
...@@ -173,8 +172,8 @@ void Aidge::Tensor::setBackend(const std::string &name, Aidge::DeviceIdx_t devic ...@@ -173,8 +172,8 @@ void Aidge::Tensor::setBackend(const std::string &name, Aidge::DeviceIdx_t devic
} }
} }
void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims, void Tensor::resize(const std::vector<DimSize_t>& dims,
std::vector<Aidge::DimSize_t> strides) { std::vector<DimSize_t> strides) {
if (dims.empty()) { // scalar if (dims.empty()) { // scalar
mDims = std::vector<DimSize_t>(0); mDims = std::vector<DimSize_t>(0);
mStrides = std::vector<DimSize_t>({1}); mStrides = std::vector<DimSize_t>({1});
...@@ -236,12 +235,11 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims, ...@@ -236,12 +235,11 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims,
} }
} }
std::string Aidge::Tensor::toString() const { std::string Tensor::toString() const {
AIDGE_ASSERT(
if (!hasImpl() || undefined()) { mImpl && (undefined() || (dims() == std::vector<DimSize_t>({0})) ||
// Return no value on no implementation or undefined size (mImpl->hostPtr() != nullptr)),
return std::string("{}"); "tensor should have a valid host pointer");
}
// TODO: move lambda elsewhere? // TODO: move lambda elsewhere?
auto ptrToString = [](DataType dt, void* ptr, std::size_t idx) { auto ptrToString = [](DataType dt, void* ptr, std::size_t idx) {
...@@ -345,7 +343,7 @@ std::string Aidge::Tensor::toString() const { ...@@ -345,7 +343,7 @@ std::string Aidge::Tensor::toString() const {
return res; return res;
} }
Aidge::Tensor Aidge::Tensor::extract( Tensor Tensor::extract(
const std::vector<std::size_t>& fixedCoord) const { const std::vector<std::size_t>& fixedCoord) const {
AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous"); AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
AIDGE_ASSERT(fixedCoord.size() <= mDims.size(), AIDGE_ASSERT(fixedCoord.size() <= mDims.size(),
...@@ -361,7 +359,7 @@ Aidge::Tensor Aidge::Tensor::extract( ...@@ -361,7 +359,7 @@ Aidge::Tensor Aidge::Tensor::extract(
return subTensor; return subTensor;
} }
Aidge::Tensor Aidge::Tensor::extract( Tensor Tensor::extract(
const std::vector<std::size_t>& startCoord, const std::vector<std::size_t>& startCoord,
const std::vector<std::size_t>& dims) const { const std::vector<std::size_t>& dims) const {
AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous"); AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
...@@ -375,7 +373,7 @@ Aidge::Tensor Aidge::Tensor::extract( ...@@ -375,7 +373,7 @@ Aidge::Tensor Aidge::Tensor::extract(
return subTensor; return subTensor;
} }
void Aidge::Tensor::makeContiguous() { void Tensor::makeContiguous() {
if (!mImpl || isContiguous()) { if (!mImpl || isContiguous()) {
return; return;
} }
...@@ -413,7 +411,7 @@ void Aidge::Tensor::makeContiguous() { ...@@ -413,7 +411,7 @@ void Aidge::Tensor::makeContiguous() {
resize(mDims); resize(mDims);
} }
void Aidge::Tensor::copyCast(const Tensor& src) { void Tensor::copyCast(const Tensor& src) {
if (&src == this) { if (&src == this) {
return; return;
} }
...@@ -434,7 +432,7 @@ void Aidge::Tensor::copyCast(const Tensor& src) { ...@@ -434,7 +432,7 @@ void Aidge::Tensor::copyCast(const Tensor& src) {
src.size(), mImplOffset); src.size(), mImplOffset);
} }
void Aidge::Tensor::copyFrom(const Tensor& src) { void Tensor::copyFrom(const Tensor& src) {
if (&src == this) { if (&src == this) {
return; return;
} }
...@@ -455,7 +453,7 @@ void Aidge::Tensor::copyFrom(const Tensor& src) { ...@@ -455,7 +453,7 @@ void Aidge::Tensor::copyFrom(const Tensor& src) {
mImplOffset); mImplOffset);
} }
void Aidge::Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t>& transpose) { void Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t>& transpose) {
std::vector<DimSize_t> newDims; std::vector<DimSize_t> newDims;
for (std::size_t i = 0; i < src.dims().size(); ++i) { for (std::size_t i = 0; i < src.dims().size(); ++i) {
newDims.push_back(src.dims()[transpose[i]]); newDims.push_back(src.dims()[transpose[i]]);
...@@ -497,11 +495,11 @@ void Aidge::Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t ...@@ -497,11 +495,11 @@ void Aidge::Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t
setImpl(newImpl); setImpl(newImpl);
} }
void Aidge::Tensor::copyTranspose(const Tensor& src, const DataFormatTranspose& transpose) { void Tensor::copyTranspose(const Tensor& src, const DataFormatTranspose& transpose) {
copyTranspose(src, std::vector<DimSize_t>(transpose.begin(), transpose.end())); copyTranspose(src, std::vector<DimSize_t>(transpose.begin(), transpose.end()));
} }
void Aidge::Tensor::copyCastFrom(const Tensor& src, void Tensor::copyCastFrom(const Tensor& src,
std::shared_ptr<Tensor>& movedSrcPtr) { std::shared_ptr<Tensor>& movedSrcPtr) {
if (&src == this) { if (&src == this) {
return; return;
...@@ -534,13 +532,13 @@ void Aidge::Tensor::copyCastFrom(const Tensor& src, ...@@ -534,13 +532,13 @@ void Aidge::Tensor::copyCastFrom(const Tensor& src,
} }
} }
Aidge::Tensor& Aidge::Tensor::refContiguous(std::shared_ptr<Tensor>& fallback) { Tensor& Tensor::refContiguous(std::shared_ptr<Tensor>& fallback) {
// Scott Meyers' solution to avoid code duplication // Scott Meyers' solution to avoid code duplication
return const_cast<Tensor&>( return const_cast<Tensor&>(
static_cast<const Tensor&>(*this).refContiguous(fallback)); static_cast<const Tensor&>(*this).refContiguous(fallback));
} }
const Aidge::Tensor& Aidge::Tensor::refContiguous( const Tensor& Tensor::refContiguous(
std::shared_ptr<Tensor>& fallback) const { std::shared_ptr<Tensor>& fallback) const {
AIDGE_ASSERT(getImpl(), AIDGE_ASSERT(getImpl(),
"no backend was set for tensor, cannot refCast() it"); "no backend was set for tensor, cannot refCast() it");
...@@ -559,15 +557,15 @@ const Aidge::Tensor& Aidge::Tensor::refContiguous( ...@@ -559,15 +557,15 @@ const Aidge::Tensor& Aidge::Tensor::refContiguous(
} }
} }
Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback, Tensor& Tensor::refCast(std::shared_ptr<Tensor>& fallback,
const Aidge::DataType& dt) { const DataType& dt) {
// Scott Meyers' solution to avoid code duplication // Scott Meyers' solution to avoid code duplication
return const_cast<Tensor&>( return const_cast<Tensor&>(
static_cast<const Tensor&>(*this).refCast(fallback, dt)); static_cast<const Tensor&>(*this).refCast(fallback, dt));
} }
const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback, const Tensor& Tensor::refCast(std::shared_ptr<Tensor>& fallback,
const Aidge::DataType& dt) const { const DataType& dt) const {
AIDGE_ASSERT(getImpl(), AIDGE_ASSERT(getImpl(),
"no backend was set for tensor, cannot refCast() it"); "no backend was set for tensor, cannot refCast() it");
...@@ -600,7 +598,7 @@ const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback, ...@@ -600,7 +598,7 @@ const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback,
} }
} }
Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback, Tensor& Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
const std::string& backend, const std::string& backend,
DeviceIdx_t device) { DeviceIdx_t device) {
// Scott Meyers' solution to avoid code duplication // Scott Meyers' solution to avoid code duplication
...@@ -608,7 +606,7 @@ Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback, ...@@ -608,7 +606,7 @@ Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
static_cast<const Tensor&>(*this).refFrom(fallback, backend, device)); static_cast<const Tensor&>(*this).refFrom(fallback, backend, device));
} }
const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback, const Tensor& Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
const std::string& backend, const std::string& backend,
DeviceIdx_t device) const { DeviceIdx_t device) const {
AIDGE_ASSERT(getImpl(), AIDGE_ASSERT(getImpl(),
...@@ -641,8 +639,8 @@ const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback, ...@@ -641,8 +639,8 @@ const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
} }
} }
Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, Tensor& Tensor::ref(std::shared_ptr<Tensor>& fallback,
const Aidge::DataType& dt, const DataType& dt,
const std::string& backend, const std::string& backend,
DeviceIdx_t device) { DeviceIdx_t device) {
// Scott Meyers' solution to avoid code duplication // Scott Meyers' solution to avoid code duplication
...@@ -650,8 +648,8 @@ Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, ...@@ -650,8 +648,8 @@ Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback,
static_cast<const Tensor&>(*this).ref(fallback, dt, backend, device)); static_cast<const Tensor&>(*this).ref(fallback, dt, backend, device));
} }
const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, const Tensor& Tensor::ref(std::shared_ptr<Tensor>& fallback,
const Aidge::DataType& dt, const DataType& dt,
const std::string& backend, const std::string& backend,
DeviceIdx_t device) const { DeviceIdx_t device) const {
AIDGE_ASSERT(getImpl(), "no backend was set for tensor, cannot ref() it"); AIDGE_ASSERT(getImpl(), "no backend was set for tensor, cannot ref() it");
...@@ -675,7 +673,7 @@ const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, ...@@ -675,7 +673,7 @@ const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback,
} }
} }
std::set<std::string> Aidge::Tensor::getAvailableBackends() { std::set<std::string> Tensor::getAvailableBackends() {
std::set<std::string> backendsList; std::set<std::string> backendsList;
for (const auto& tupleKey : Registrar<Tensor>::getKeys()) { for (const auto& tupleKey : Registrar<Tensor>::getKeys()) {
backendsList.insert(std::get<0>(tupleKey)); backendsList.insert(std::get<0>(tupleKey));
...@@ -686,7 +684,7 @@ std::set<std::string> Aidge::Tensor::getAvailableBackends() { ...@@ -686,7 +684,7 @@ std::set<std::string> Aidge::Tensor::getAvailableBackends() {
/////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////////
// COORDINATES MANIPULATION // COORDINATES MANIPULATION
std::vector<std::size_t> std::vector<std::size_t>
Aidge::Tensor::getCoord(const std::vector<Aidge::DimSize_t> &tensorDims, Tensor::getCoord(const std::vector<DimSize_t> &tensorDims,
std::size_t flatIdx) { std::size_t flatIdx) {
std::vector<std::size_t> coordIdx(tensorDims.size()); std::vector<std::size_t> coordIdx(tensorDims.size());
std::size_t i = tensorDims.size(); std::size_t i = tensorDims.size();
...@@ -699,7 +697,7 @@ Aidge::Tensor::getCoord(const std::vector<Aidge::DimSize_t> &tensorDims, ...@@ -699,7 +697,7 @@ Aidge::Tensor::getCoord(const std::vector<Aidge::DimSize_t> &tensorDims,
} }
std::size_t Aidge::Tensor::getIdx(const std::vector<Aidge::DimSize_t> &tensorDims, const std::vector<std::size_t>& coordIdx) { std::size_t Tensor::getIdx(const std::vector<DimSize_t> &tensorDims, const std::vector<std::size_t>& coordIdx) {
AIDGE_ASSERT(coordIdx.size() <= tensorDims.size(), "Tensor::getIdx(): Coordinates does not match number of dimensions.\n\tCoords : {}\n\tDimensions: {}",coordIdx, tensorDims); AIDGE_ASSERT(coordIdx.size() <= tensorDims.size(), "Tensor::getIdx(): Coordinates does not match number of dimensions.\n\tCoords : {}\n\tDimensions: {}",coordIdx, tensorDims);
std::size_t flatIdx = 0; std::size_t flatIdx = 0;
for(std::size_t i = 0; i < tensorDims.size(); ++i) { for(std::size_t i = 0; i < tensorDims.size(); ++i) {
...@@ -716,7 +714,7 @@ std::size_t Aidge::Tensor::getIdx(const std::vector<Aidge::DimSize_t> &tensorDim ...@@ -716,7 +714,7 @@ std::size_t Aidge::Tensor::getIdx(const std::vector<Aidge::DimSize_t> &tensorDim
} }
template<typename T> template<typename T>
bool Aidge::Tensor::isInBounds(const std::vector<Aidge::DimSize_t>& tensorDims, const std::vector<T>& coords){ bool Tensor::isInBounds(const std::vector<DimSize_t>& tensorDims, const std::vector<T>& coords){
AIDGE_ASSERT(coords.size() == tensorDims.size(), AIDGE_ASSERT(coords.size() == tensorDims.size(),
"Coordinates({}) to compare have not " "Coordinates({}) to compare have not "
"the same number of dimension as tensor dimensions({}), aborting.", "the same number of dimension as tensor dimensions({}), aborting.",
...@@ -736,3 +734,5 @@ template bool Tensor::isInBounds(const std::vector<DimSize_t>& tensorDims, const ...@@ -736,3 +734,5 @@ template bool Tensor::isInBounds(const std::vector<DimSize_t>& tensorDims, const
template bool Tensor::isInBounds(const std::vector<DimSize_t>& tensorDims, const std::vector<float>& coords); template bool Tensor::isInBounds(const std::vector<DimSize_t>& tensorDims, const std::vector<float>& coords);
template bool Tensor::isInBounds(const std::vector<DimSize_t>& tensorDims, const std::vector<double>& coords); template bool Tensor::isInBounds(const std::vector<DimSize_t>& tensorDims, const std::vector<double>& coords);
} // namespace Aidge
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment