From ee26ba87b5fd7354e74aaf43d7408b52e9b050de Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Gr=C3=A9goire=20KUBLER?= <gregoire.kubler@proton.me>
Date: Mon, 21 Oct 2024 16:45:26 +0200
Subject: [PATCH] chore : cleaning up tensor.cpp: added namespace Aidge + rm
 includes Addded namespace Aidge {} to remove extra Aidge:: specification
 Removed useless #include

---
 src/data/Tensor.cpp | 88 ++++++++++++++++++++++-----------------------
 1 file changed, 44 insertions(+), 44 deletions(-)

diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index 44fce6619..c43ca3fbe 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -11,7 +11,6 @@
 
 #include "aidge/data/Tensor.hpp"
 
-#include <algorithm>
 #include <cstddef>
 #include <vector>
 
@@ -25,14 +24,14 @@
 #include "aidge/operator/ReduceMean.hpp"
 #include "aidge/operator/Sub.hpp"
 #include "aidge/operator/Sqrt.hpp"
-#include "aidge/operator/Transpose.hpp"
 #include "aidge/utils/Types.h"
 
+namespace Aidge {
 
-Aidge::Tensor::~Tensor() noexcept = default;
+Tensor::~Tensor() noexcept = default;
 
 
-Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const {
+Tensor Tensor::operator+(const Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
     AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
     AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
@@ -49,7 +48,7 @@ Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const {
 }
 
 
-Aidge::Tensor Aidge::Tensor::operator-(const Aidge::Tensor& other) const {
+Tensor Tensor::operator-(const Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
     AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
     AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
@@ -66,7 +65,7 @@ Aidge::Tensor Aidge::Tensor::operator-(const Aidge::Tensor& other) const {
 }
 
 
-Aidge::Tensor Aidge::Tensor::operator*(const Aidge::Tensor& other) const {
+Tensor Tensor::operator*(const Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
     AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
     AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
@@ -83,7 +82,7 @@ Aidge::Tensor Aidge::Tensor::operator*(const Aidge::Tensor& other) const {
 }
 
 
-Aidge::Tensor Aidge::Tensor::operator/(const Aidge::Tensor& other) const {
+Tensor Tensor::operator/(const Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
     AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
     AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
@@ -99,7 +98,7 @@ Aidge::Tensor Aidge::Tensor::operator/(const Aidge::Tensor& other) const {
     return div_.getOutput(0)->clone();
 }
 
-Aidge::Tensor Aidge::Tensor::sqrt() const {
+Tensor Tensor::sqrt() const {
     AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
     auto sqrt_ = Sqrt_Op();
     sqrt_.associateInput(0, std::make_shared<Tensor>(*this));
@@ -110,7 +109,7 @@ Aidge::Tensor Aidge::Tensor::sqrt() const {
     return sqrt_.getOutput(0)->clone();
 }
 
-Aidge::Tensor Aidge::Tensor::abs() const {
+Tensor Tensor::abs() const {
     AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
     auto abs_ = Abs_Op();
     abs_.associateInput(0, std::make_shared<Tensor>(*this));
@@ -121,7 +120,7 @@ Aidge::Tensor Aidge::Tensor::abs() const {
     return abs_.getOutput(0)->clone();
 }
 
-Aidge::Tensor Aidge::Tensor::mean() const {
+Tensor Tensor::mean() const {
     AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
     // TODO: should be the default behavior of ReduceMean_Op
     // No need to specify the list of all axes!
@@ -136,7 +135,7 @@ Aidge::Tensor Aidge::Tensor::mean() const {
     return mean_.getOutput(0)->clone();
 }
 
-Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
+Tensor& Tensor::operator=(const Tensor& other) {
     if (this == &other) {
         return *this;
     }
@@ -156,7 +155,7 @@ Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
 }
 
 
-void Aidge::Tensor::setBackend(const std::string &name, Aidge::DeviceIdx_t device, bool copyFrom) {
+void Tensor::setBackend(const std::string &name, DeviceIdx_t device, bool copyFrom) {
     if (mImpl) {
         if (mImpl->device() != std::make_pair(name, device)) {
             // Backend change: create new impl, copy from old to new and replace
@@ -173,8 +172,8 @@ void Aidge::Tensor::setBackend(const std::string &name, Aidge::DeviceIdx_t devic
     }
     }
 
-void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims,
-                           std::vector<Aidge::DimSize_t> strides) {
+void Tensor::resize(const std::vector<DimSize_t>& dims,
+                           std::vector<DimSize_t> strides) {
     if (dims.empty()) {  // scalar
         mDims = std::vector<DimSize_t>(0);
         mStrides = std::vector<DimSize_t>({1});
@@ -236,12 +235,11 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims,
     }
 }
 
-std::string Aidge::Tensor::toString() const {
-
-    if (!hasImpl() || undefined()) {
-        // Return no value on no implementation or undefined size
-        return std::string("{}");
-    }
+std::string Tensor::toString() const {
+    AIDGE_ASSERT(
+        mImpl && (undefined() || (dims() == std::vector<DimSize_t>({0})) ||
+                  (mImpl->hostPtr() != nullptr)),
+        "tensor should have a valid host pointer");
 
     // TODO: move lambda elsewhere?
     auto ptrToString = [](DataType dt, void* ptr, std::size_t idx) {
@@ -345,7 +343,7 @@ std::string Aidge::Tensor::toString() const {
     return res;
 }
 
-Aidge::Tensor Aidge::Tensor::extract(
+Tensor Tensor::extract(
     const std::vector<std::size_t>& fixedCoord) const {
     AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
     AIDGE_ASSERT(fixedCoord.size() <= mDims.size(),
@@ -361,7 +359,7 @@ Aidge::Tensor Aidge::Tensor::extract(
     return subTensor;
 }
 
-Aidge::Tensor Aidge::Tensor::extract(
+Tensor Tensor::extract(
     const std::vector<std::size_t>& startCoord,
     const std::vector<std::size_t>& dims) const {
     AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
@@ -375,7 +373,7 @@ Aidge::Tensor Aidge::Tensor::extract(
     return subTensor;
 }
 
-void Aidge::Tensor::makeContiguous() {
+void Tensor::makeContiguous() {
     if (!mImpl || isContiguous()) {
         return;
     }
@@ -413,7 +411,7 @@ void Aidge::Tensor::makeContiguous() {
     resize(mDims);
 }
 
-void Aidge::Tensor::copyCast(const Tensor& src) {
+void Tensor::copyCast(const Tensor& src) {
     if (&src == this) {
         return;
     }
@@ -434,7 +432,7 @@ void Aidge::Tensor::copyCast(const Tensor& src) {
                         src.size(), mImplOffset);
 }
 
-void Aidge::Tensor::copyFrom(const Tensor& src) {
+void Tensor::copyFrom(const Tensor& src) {
     if (&src == this) {
         return;
     }
@@ -455,7 +453,7 @@ void Aidge::Tensor::copyFrom(const Tensor& src) {
                         mImplOffset);
 }
 
-void Aidge::Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t>& transpose) {
+void Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t>& transpose) {
     std::vector<DimSize_t> newDims;
     for (std::size_t i = 0; i < src.dims().size(); ++i) {
         newDims.push_back(src.dims()[transpose[i]]);
@@ -497,11 +495,11 @@ void Aidge::Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t
     setImpl(newImpl);
 }
 
-void Aidge::Tensor::copyTranspose(const Tensor& src, const DataFormatTranspose& transpose) {
+void Tensor::copyTranspose(const Tensor& src, const DataFormatTranspose& transpose) {
     copyTranspose(src, std::vector<DimSize_t>(transpose.begin(), transpose.end()));
 }
 
-void Aidge::Tensor::copyCastFrom(const Tensor& src,
+void Tensor::copyCastFrom(const Tensor& src,
                                  std::shared_ptr<Tensor>& movedSrcPtr) {
     if (&src == this) {
         return;
@@ -534,13 +532,13 @@ void Aidge::Tensor::copyCastFrom(const Tensor& src,
     }
 }
 
-Aidge::Tensor& Aidge::Tensor::refContiguous(std::shared_ptr<Tensor>& fallback) {
+Tensor& Tensor::refContiguous(std::shared_ptr<Tensor>& fallback) {
     // Scott Meyers' solution to avoid code duplication
     return const_cast<Tensor&>(
         static_cast<const Tensor&>(*this).refContiguous(fallback));
 }
 
-const Aidge::Tensor& Aidge::Tensor::refContiguous(
+const Tensor& Tensor::refContiguous(
     std::shared_ptr<Tensor>& fallback) const {
     AIDGE_ASSERT(getImpl(),
                  "no backend was set for tensor, cannot refCast() it");
@@ -559,15 +557,15 @@ const Aidge::Tensor& Aidge::Tensor::refContiguous(
     }
 }
 
-Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback,
-                                      const Aidge::DataType& dt) {
+Tensor& Tensor::refCast(std::shared_ptr<Tensor>& fallback,
+                                      const DataType& dt) {
     // Scott Meyers' solution to avoid code duplication
     return const_cast<Tensor&>(
         static_cast<const Tensor&>(*this).refCast(fallback, dt));
 }
 
-const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback,
-                                            const Aidge::DataType& dt) const {
+const Tensor& Tensor::refCast(std::shared_ptr<Tensor>& fallback,
+                                            const DataType& dt) const {
     AIDGE_ASSERT(getImpl(),
                  "no backend was set for tensor, cannot refCast() it");
 
@@ -600,7 +598,7 @@ const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback,
     }
 }
 
-Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
+Tensor& Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
                                       const std::string& backend,
                                       DeviceIdx_t device) {
     // Scott Meyers' solution to avoid code duplication
@@ -608,7 +606,7 @@ Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
         static_cast<const Tensor&>(*this).refFrom(fallback, backend, device));
 }
 
-const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
+const Tensor& Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
                                             const std::string& backend,
                                             DeviceIdx_t device) const {
     AIDGE_ASSERT(getImpl(),
@@ -641,8 +639,8 @@ const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
     }
 }
 
-Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback,
-                                  const Aidge::DataType& dt,
+Tensor& Tensor::ref(std::shared_ptr<Tensor>& fallback,
+                                  const DataType& dt,
                                   const std::string& backend,
                                   DeviceIdx_t device) {
     // Scott Meyers' solution to avoid code duplication
@@ -650,8 +648,8 @@ Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback,
         static_cast<const Tensor&>(*this).ref(fallback, dt, backend, device));
 }
 
-const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback,
-                                        const Aidge::DataType& dt,
+const Tensor& Tensor::ref(std::shared_ptr<Tensor>& fallback,
+                                        const DataType& dt,
                                         const std::string& backend,
                                         DeviceIdx_t device) const {
     AIDGE_ASSERT(getImpl(), "no backend was set for tensor, cannot ref() it");
@@ -675,7 +673,7 @@ const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback,
     }
 }
 
-std::set<std::string> Aidge::Tensor::getAvailableBackends() {
+std::set<std::string> Tensor::getAvailableBackends() {
     std::set<std::string> backendsList;
     for (const auto& tupleKey : Registrar<Tensor>::getKeys()) {
         backendsList.insert(std::get<0>(tupleKey));
@@ -686,7 +684,7 @@ std::set<std::string> Aidge::Tensor::getAvailableBackends() {
 ///////////////////////////////////////////////////////////////////////////////////////////////////////////
 // COORDINATES MANIPULATION
 std::vector<std::size_t>
-Aidge::Tensor::getCoord(const std::vector<Aidge::DimSize_t> &tensorDims,
+Tensor::getCoord(const std::vector<DimSize_t> &tensorDims,
                   std::size_t flatIdx) {
         std::vector<std::size_t> coordIdx(tensorDims.size());
         std::size_t i = tensorDims.size();
@@ -699,7 +697,7 @@ Aidge::Tensor::getCoord(const std::vector<Aidge::DimSize_t> &tensorDims,
 }
 
 
-std::size_t Aidge::Tensor::getIdx(const std::vector<Aidge::DimSize_t> &tensorDims, const std::vector<std::size_t>& coordIdx) {
+std::size_t Tensor::getIdx(const std::vector<DimSize_t> &tensorDims, const std::vector<std::size_t>& coordIdx) {
    AIDGE_ASSERT(coordIdx.size() <= tensorDims.size(), "Tensor::getIdx(): Coordinates does not match number of dimensions.\n\tCoords : {}\n\tDimensions: {}",coordIdx, tensorDims);
    std::size_t flatIdx = 0;
     for(std::size_t i = 0; i < tensorDims.size(); ++i) {
@@ -716,7 +714,7 @@ std::size_t Aidge::Tensor::getIdx(const std::vector<Aidge::DimSize_t> &tensorDim
 }
 
 template<typename T>
-bool Aidge::Tensor::isInBounds(const std::vector<Aidge::DimSize_t>& tensorDims, const std::vector<T>& coords){
+bool Tensor::isInBounds(const std::vector<DimSize_t>& tensorDims, const std::vector<T>& coords){
     AIDGE_ASSERT(coords.size() == tensorDims.size(),
                  "Coordinates({}) to compare have not "
                  "the same number of dimension as tensor dimensions({}), aborting.",
@@ -736,3 +734,5 @@ template bool Tensor::isInBounds(const std::vector<DimSize_t>& tensorDims, const
 template bool Tensor::isInBounds(const std::vector<DimSize_t>& tensorDims, const std::vector<float>& coords);
 template bool Tensor::isInBounds(const std::vector<DimSize_t>& tensorDims, const std::vector<double>& coords);
 
+
+}  // namespace Aidge
-- 
GitLab