From 869b863dfb6bbc032ac12b4e4f6455ba981e164a Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Thu, 4 Jan 2024 11:43:40 +0100
Subject: [PATCH] Adaptations to core changes

---
 include/aidge/backend/cpu/data/TensorImpl.hpp | 13 +++++++------
 src/operator/FCImpl.cpp                       |  4 ++--
 src/operator/MatMulImpl.cpp                   |  2 +-
 src/operator/SoftmaxImpl.cpp                  |  2 +-
 4 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index dd31088d..9de5210c 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -44,16 +44,17 @@ class TensorImpl_cpu : public TensorImpl {
     // native interface
     const future_std::span<T>& data() const { return mData; }
 
+    std::size_t size() const override { return mData.size(); }
     std::size_t scalarSize() const override { return sizeof(T); }
 
     void setDevice(int device) override {
         AIDGE_ASSERT(device == 0, "device cannot be != 0 for CPU backend");
     }
 
-    void copy(const void *src, NbElts_t length) override {
+    void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override {
         AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
         std::copy(static_cast<const T *>(src), static_cast<const T *>(src) + length,
-                  static_cast<T *>(rawPtr()));
+                  static_cast<T *>(rawPtr()) + offset);
     }
 
     void copyCast(const void *src, NbElts_t length, const DataType srcDt) override {
@@ -148,19 +149,19 @@ class TensorImpl_cpu : public TensorImpl {
         return mData.data();
     };
 
-    void* getRaw(std::size_t idx) {
+    void* getRawPtr(NbElts_t idx) override final {
         AIDGE_ASSERT(idx < mData.size(), "idx out of range");
-        return  static_cast<void*>(static_cast<T *>(rawPtr()) + idx);
+        return static_cast<void*>(static_cast<T*>(rawPtr()) + idx);
     };
 
-    virtual ~TensorImpl_cpu() = default;
-
     void setRawPtr(void *ptr, NbElts_t length) override final {
         AIDGE_ASSERT(length >= mTensor.size(), "trying to set raw pointer of insufficient capacity");
         mData = future_std::span<T>(static_cast<T *>(ptr), length);
         mDataOwner.reset();
     };
 
+    virtual ~TensorImpl_cpu() = default;
+
 private:
     void lazyInit() {
         if (mData.size() < mTensor.size()) {
diff --git a/src/operator/FCImpl.cpp b/src/operator/FCImpl.cpp
index 90cf3ea5..bc4a7a7c 100644
--- a/src/operator/FCImpl.cpp
+++ b/src/operator/FCImpl.cpp
@@ -58,8 +58,8 @@ void Aidge::FCImpl_cpu::forward()
 
     // Call kernel
     kernelFunc(dynamic_cast<const FC_Op&>(mOp).getStaticAttributes(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[0],
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->sizeM1(),
+        input0.dims()[0],
+        input0.size() / input0.dims()[0],
         input0.getImpl()->rawPtr(), input1.getImpl()->rawPtr(), input2.getImpl()->rawPtr(),
         getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/MatMulImpl.cpp b/src/operator/MatMulImpl.cpp
index 1abd75db..f02effb3 100644
--- a/src/operator/MatMulImpl.cpp
+++ b/src/operator/MatMulImpl.cpp
@@ -47,7 +47,7 @@ void Aidge::MatMulImpl_cpu::forward()
     kernelFunc(
         dynamic_cast<const MatMul_Op&>(mOp).getStaticAttributes(),
         std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[0],
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->sizeM1(),
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size() / std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[0],
         getCPUPtr(mOp.getRawInput(0)),
         getCPUPtr(mOp.getRawInput(1)),
         getCPUPtr(mOp.getRawOutput(0)));
diff --git a/src/operator/SoftmaxImpl.cpp b/src/operator/SoftmaxImpl.cpp
index 428d32fc..c3086d8f 100644
--- a/src/operator/SoftmaxImpl.cpp
+++ b/src/operator/SoftmaxImpl.cpp
@@ -38,7 +38,7 @@ void Aidge::SoftmaxImpl_cpu::forward() {
 
     DimSize_t batchSize = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[0];
     DimSize_t channelSize = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[1];
-    DimSize_t featureSize = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->sizeM1()/channelSize;
+    DimSize_t featureSize = (std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size()/batchSize)/channelSize;
     // Call kernel
     kernelFunc(batchSize,
                channelSize,
-- 
GitLab