From 7844f1ae76ef10bbfb827d3d929f2f41e93bc752 Mon Sep 17 00:00:00 2001
From: NAUD Maxence <maxence.naud@cea.fr>
Date: Thu, 5 Dec 2024 14:26:41 +0000
Subject: [PATCH] Change 1D attribute Tensors for scalar Tensors and use
 compound assignment operators on 'Parameter' to avoid reset of 'mGrad'
 attribute

---
 include/aidge/learning/optimizer/Adam.hpp | 40 +++++++++++------------
 include/aidge/learning/optimizer/SGD.hpp  | 10 +++---
 2 files changed, 24 insertions(+), 26 deletions(-)

diff --git a/include/aidge/learning/optimizer/Adam.hpp b/include/aidge/learning/optimizer/Adam.hpp
index 125cfd7..a018d6e 100644
--- a/include/aidge/learning/optimizer/Adam.hpp
+++ b/include/aidge/learning/optimizer/Adam.hpp
@@ -35,12 +35,12 @@ class Adam: public Optimizer, public StaticAttributes<AdamAttr, float, float, fl
 private:
     std::vector<Tensor> mMomentum1;
     std::vector<Tensor> mMomentum2;
-    Tensor mLR{std::vector<std::size_t>({1})};
-    Tensor mBeta1{std::vector<std::size_t>({1})};
-    Tensor mReversedBeta1{std::vector<std::size_t>({1})};
-    Tensor mBeta2{std::vector<std::size_t>({1})};
-    Tensor mReversedBeta2{std::vector<std::size_t>({1})};
-    Tensor mEpsilon{std::vector<std::size_t>({1})};
+    Tensor mLR{1.0f};
+    Tensor mBeta1;
+    Tensor mReversedBeta1;
+    Tensor mBeta2;
+    Tensor mReversedBeta2;
+    Tensor mEpsilon;
 
 public:
     using Attributes_ = StaticAttributes<AdamAttr, float, float, float>;
@@ -51,19 +51,17 @@ public:
         : Optimizer(),
           Attributes_(attr<AdamAttr::Beta1>(beta1),
                       attr<AdamAttr::Beta2>(beta2),
-                      attr<AdamAttr::Epsilon>(epsilon))
+                      attr<AdamAttr::Epsilon>(epsilon)),
+          mBeta1(beta1),
+          mReversedBeta1(1.0f - beta1),
+          mBeta2(beta2),
+          mReversedBeta2(1.0f - beta2),
+          mEpsilon(epsilon)
     {
-        mBeta1 = Tensor(Array1D<float, 1>{{beta1}});
-        mReversedBeta1 = Tensor(Array1D<float, 1>{{1.0f - beta1}});
-
-        mBeta2 = Tensor(Array1D<float, 1>{{beta2}});
-        mReversedBeta2 = Tensor(Array1D<float, 1>{{1.0f - beta2}});
-
-        mEpsilon = Tensor(Array1D<float, 1>{{epsilon}});
     }
 
     void update() override final {
-        mLR = Tensor(Array1D<float, 1>{{learningRate()}});
+        mLR = Tensor(learningRate());
         mLR.setBackend(mParameters[0]->getImpl()->backend());
 
         if (mParameters[0]->getImpl()->backend() != mBeta1.getImpl()->backend()) {
@@ -73,11 +71,11 @@ public:
             mReversedBeta2.setBackend(mParameters[0]->getImpl()->backend());
         }
 
-        Tensor alpha = Tensor(Array1D<float, 1>{{ static_cast<float>(learningRate() * std::sqrt(1.0f - std::pow(this->getAttr<AdamAttr::Beta2>(), mLRScheduler.step() + 1))
-                                           / (1.0f - std::pow(this->getAttr<AdamAttr::Beta1>(), mLRScheduler.step() + 1))) }});
+        Tensor alpha = Tensor(learningRate() * std::sqrt(1.0f - std::pow(this->getAttr<AdamAttr::Beta2>(), static_cast<float>(mLRScheduler.step() + 1)))
+                                           / (1.0f - std::pow(this->getAttr<AdamAttr::Beta1>(), static_cast<float>(mLRScheduler.step() + 1))));
         alpha.setBackend(mParameters[0]->getImpl()->backend());
 
-        Tensor epsilon = Tensor(Array1D<float, 1>{{ static_cast<float>(this->getAttr<AdamAttr::Epsilon>() * std::sqrt(1.0f - std::pow(this->getAttr<AdamAttr::Beta2>(), mLRScheduler.step() + 1))) }});
+        Tensor epsilon = Tensor(this->getAttr<AdamAttr::Epsilon>() * std::sqrt(1.0f - std::pow(this->getAttr<AdamAttr::Beta2>(), static_cast<float>(mLRScheduler.step() + 1))));
         epsilon.setBackend(mParameters[0]->getImpl()->backend());
 
         if (mLRScheduler.step() == 0) {
@@ -90,13 +88,13 @@ public:
                 mMomentum2[i].zeros();
             }
         }
-		
+
         for (std::size_t i = 0; i < mParameters.size(); ++i) {
             mMomentum1[i] = mBeta1 * mMomentum1[i] + mReversedBeta1 * (*mParameters[i]->grad());
             mMomentum2[i] = mBeta2 * mMomentum2[i] + mReversedBeta2 * (*mParameters[i]->grad()) * (*mParameters[i]->grad());
-            *mParameters[i] = *mParameters[i] - alpha * mMomentum1[i] / (mMomentum2[i].sqrt() +  epsilon);
+            *mParameters[i] -= alpha * mMomentum1[i] / (mMomentum2[i].sqrt() +  epsilon);
         }
-        
+
         mLRScheduler.update();
     }
 
diff --git a/include/aidge/learning/optimizer/SGD.hpp b/include/aidge/learning/optimizer/SGD.hpp
index 2ce6572..768a3d0 100644
--- a/include/aidge/learning/optimizer/SGD.hpp
+++ b/include/aidge/learning/optimizer/SGD.hpp
@@ -47,23 +47,23 @@ public:
           Attributes_(attr<SGDAttr::Momentum>(momentum),
                     attr<SGDAttr::Dampening>(dampening))
     {
-        mMomentum = Tensor(Array1D<float, 1>{{momentum}});
-        mReversedDampening = Tensor(Array1D<float, 1>{{1.0f - dampening}});
+        mMomentum = Tensor(momentum);
+        mReversedDampening = Tensor(1.0f - dampening);
     }
 
     void update() override final {
-        mLR = Tensor(Array1D<float, 1>{{learningRate()}});
+        mLR = Tensor(learningRate());
         mLR.setBackend(mParameters[0]->getImpl()->backend());
 
         if (mLRScheduler.step() == 0) {
             for (std::size_t i = 0; i < mParameters.size(); ++i) {
                 mGradientInertia[i] = mParameters[i]->grad()->clone();
-                *mParameters[i] = *mParameters[i] - mLR*mGradientInertia[i];
+                *mParameters[i] -= mLR*mGradientInertia[i];
             }
         } else {
             for (std::size_t i = 0; i < mParameters.size(); ++i) {
                 mGradientInertia[i] = mMomentum*mGradientInertia[i] + mReversedDampening*(*mParameters[i]->grad());
-                *mParameters[i] = *mParameters[i] - mLR*mGradientInertia[i];
+                *mParameters[i] -= mLR*mGradientInertia[i];
             }
         }
         mLRScheduler.update();
-- 
GitLab