diff --git a/include/aidge/learning/optimizer/Adam.hpp b/include/aidge/learning/optimizer/Adam.hpp
index 125cfd792ca61c94f52c0238229f282d4d7f8e47..a018d6e57faef07bb4ade28ca06ed43c98075508 100644
--- a/include/aidge/learning/optimizer/Adam.hpp
+++ b/include/aidge/learning/optimizer/Adam.hpp
@@ -35,12 +35,12 @@ class Adam: public Optimizer, public StaticAttributes<AdamAttr, float, float, fl
 private:
     std::vector<Tensor> mMomentum1;
     std::vector<Tensor> mMomentum2;
-    Tensor mLR{std::vector<std::size_t>({1})};
-    Tensor mBeta1{std::vector<std::size_t>({1})};
-    Tensor mReversedBeta1{std::vector<std::size_t>({1})};
-    Tensor mBeta2{std::vector<std::size_t>({1})};
-    Tensor mReversedBeta2{std::vector<std::size_t>({1})};
-    Tensor mEpsilon{std::vector<std::size_t>({1})};
+    Tensor mLR{1.0f};
+    Tensor mBeta1;
+    Tensor mReversedBeta1;
+    Tensor mBeta2;
+    Tensor mReversedBeta2;
+    Tensor mEpsilon;
 
 public:
     using Attributes_ = StaticAttributes<AdamAttr, float, float, float>;
@@ -51,19 +51,17 @@ public:
         : Optimizer(),
           Attributes_(attr<AdamAttr::Beta1>(beta1),
                       attr<AdamAttr::Beta2>(beta2),
-                      attr<AdamAttr::Epsilon>(epsilon))
+                      attr<AdamAttr::Epsilon>(epsilon)),
+          mBeta1(beta1),
+          mReversedBeta1(1.0f - beta1),
+          mBeta2(beta2),
+          mReversedBeta2(1.0f - beta2),
+          mEpsilon(epsilon)
     {
-        mBeta1 = Tensor(Array1D<float, 1>{{beta1}});
-        mReversedBeta1 = Tensor(Array1D<float, 1>{{1.0f - beta1}});
-
-        mBeta2 = Tensor(Array1D<float, 1>{{beta2}});
-        mReversedBeta2 = Tensor(Array1D<float, 1>{{1.0f - beta2}});
-
-        mEpsilon = Tensor(Array1D<float, 1>{{epsilon}});
     }
 
     void update() override final {
-        mLR = Tensor(Array1D<float, 1>{{learningRate()}});
+        mLR = Tensor(learningRate());
         mLR.setBackend(mParameters[0]->getImpl()->backend());
 
         if (mParameters[0]->getImpl()->backend() != mBeta1.getImpl()->backend()) {
@@ -73,11 +71,11 @@ public:
             mReversedBeta2.setBackend(mParameters[0]->getImpl()->backend());
         }
 
-        Tensor alpha = Tensor(Array1D<float, 1>{{ static_cast<float>(learningRate() * std::sqrt(1.0f - std::pow(this->getAttr<AdamAttr::Beta2>(), mLRScheduler.step() + 1))
-                                           / (1.0f - std::pow(this->getAttr<AdamAttr::Beta1>(), mLRScheduler.step() + 1))) }});
+        Tensor alpha = Tensor(learningRate() * std::sqrt(1.0f - std::pow(this->getAttr<AdamAttr::Beta2>(), static_cast<float>(mLRScheduler.step() + 1)))
+                                           / (1.0f - std::pow(this->getAttr<AdamAttr::Beta1>(), static_cast<float>(mLRScheduler.step() + 1))));
         alpha.setBackend(mParameters[0]->getImpl()->backend());
 
-        Tensor epsilon = Tensor(Array1D<float, 1>{{ static_cast<float>(this->getAttr<AdamAttr::Epsilon>() * std::sqrt(1.0f - std::pow(this->getAttr<AdamAttr::Beta2>(), mLRScheduler.step() + 1))) }});
+        Tensor epsilon = Tensor(this->getAttr<AdamAttr::Epsilon>() * std::sqrt(1.0f - std::pow(this->getAttr<AdamAttr::Beta2>(), static_cast<float>(mLRScheduler.step() + 1))));
         epsilon.setBackend(mParameters[0]->getImpl()->backend());
 
         if (mLRScheduler.step() == 0) {
@@ -90,13 +88,13 @@ public:
                 mMomentum2[i].zeros();
             }
         }
-		
+
         for (std::size_t i = 0; i < mParameters.size(); ++i) {
             mMomentum1[i] = mBeta1 * mMomentum1[i] + mReversedBeta1 * (*mParameters[i]->grad());
             mMomentum2[i] = mBeta2 * mMomentum2[i] + mReversedBeta2 * (*mParameters[i]->grad()) * (*mParameters[i]->grad());
-            *mParameters[i] = *mParameters[i] - alpha * mMomentum1[i] / (mMomentum2[i].sqrt() +  epsilon);
+            *mParameters[i] -= alpha * mMomentum1[i] / (mMomentum2[i].sqrt() +  epsilon);
         }
-        
+
         mLRScheduler.update();
     }
 
diff --git a/include/aidge/learning/optimizer/SGD.hpp b/include/aidge/learning/optimizer/SGD.hpp
index 2ce657282496de90e5410e5e128a715a474d79bc..768a3d05604909c248ac105c444d4cda1aee93c2 100644
--- a/include/aidge/learning/optimizer/SGD.hpp
+++ b/include/aidge/learning/optimizer/SGD.hpp
@@ -47,23 +47,23 @@ public:
           Attributes_(attr<SGDAttr::Momentum>(momentum),
                     attr<SGDAttr::Dampening>(dampening))
     {
-        mMomentum = Tensor(Array1D<float, 1>{{momentum}});
-        mReversedDampening = Tensor(Array1D<float, 1>{{1.0f - dampening}});
+        mMomentum = Tensor(momentum);
+        mReversedDampening = Tensor(1.0f - dampening);
     }
 
     void update() override final {
-        mLR = Tensor(Array1D<float, 1>{{learningRate()}});
+        mLR = Tensor(learningRate());
         mLR.setBackend(mParameters[0]->getImpl()->backend());
 
         if (mLRScheduler.step() == 0) {
             for (std::size_t i = 0; i < mParameters.size(); ++i) {
                 mGradientInertia[i] = mParameters[i]->grad()->clone();
-                *mParameters[i] = *mParameters[i] - mLR*mGradientInertia[i];
+                *mParameters[i] -= mLR*mGradientInertia[i];
             }
         } else {
             for (std::size_t i = 0; i < mParameters.size(); ++i) {
                 mGradientInertia[i] = mMomentum*mGradientInertia[i] + mReversedDampening*(*mParameters[i]->grad());
-                *mParameters[i] = *mParameters[i] - mLR*mGradientInertia[i];
+                *mParameters[i] -= mLR*mGradientInertia[i];
             }
         }
         mLRScheduler.update();
diff --git a/unit_tests/CMakeLists.txt b/unit_tests/CMakeLists.txt
index 07d35e3360735434412ac7bbd659e3d8ad8240de..27dab59f4d65d98604baea968db3ab349538932d 100644
--- a/unit_tests/CMakeLists.txt
+++ b/unit_tests/CMakeLists.txt
@@ -4,7 +4,7 @@ include(FetchContent)
 FetchContent_Declare(
   Catch2
   GIT_REPOSITORY https://github.com/catchorg/Catch2.git
-  GIT_TAG        v3.0.1 # or a later release
+  GIT_TAG        v3.7.1 # or a later release
 )
 FetchContent_MakeAvailable(Catch2)
 
diff --git a/unit_tests/optimizer/Test_Adam.cpp b/unit_tests/optimizer/Test_Adam.cpp
index a3d7c4b15a02fba8897b736d45d3c6bc373209c0..cd171e3e856c9cc11c4575ef2b85207638d7f3af 100644
--- a/unit_tests/optimizer/Test_Adam.cpp
+++ b/unit_tests/optimizer/Test_Adam.cpp
@@ -130,8 +130,8 @@ TEST_CASE("[learning/Adam] update", "[Optimizer][Adam]") {
 
             for (std::size_t step = 0; step < 10; ++step) {
                 // truth
-                float lr2 = lr * std::sqrt(1.0f - std::pow(beta2, step + 1)) / (1.0f - std::pow(beta1, step + 1));
-                float epsilon2 = epsilon * std::sqrt(1.0f - std::pow(beta2, step + 1));
+                float lr2 = lr * std::sqrt(1.0f - std::pow(beta2, static_cast<float>(step + 1))) / (1.0f - std::pow(beta1, static_cast<float>(step + 1)));
+                float epsilon2 = epsilon * std::sqrt(1.0f - std::pow(beta2, static_cast<float>(step + 1)));
                 for (std::size_t t = 0; t < nb_tensors; ++t) {
                     for (std::size_t i = 0; i < size_tensors[t]; ++i) {
                         val_momentum1_tensors[t][i] = beta1 * val_momentum1_tensors[t][i] + (1.0f - beta1) * val_grad_tensors[t][i];
diff --git a/unit_tests/optimizer/Test_SGD.cpp b/unit_tests/optimizer/Test_SGD.cpp
index 3f1380779b90115652fe54249a77a41d58f15b13..14986a71125ca8fafe64ea2496f75ed3dfc010ef 100644
--- a/unit_tests/optimizer/Test_SGD.cpp
+++ b/unit_tests/optimizer/Test_SGD.cpp
@@ -9,13 +9,15 @@
  *
  ********************************************************************************/
 
-#include <catch2/catch_test_macros.hpp>
 #include <cstddef>  // std::size_t
 #include <memory>
 #include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
 #include <set>
 #include <vector>
 
+#include <catch2/catch_test_macros.hpp>
+#include <fmt/core.h>
+
 #include "aidge/data/Tensor.hpp"
 #include "aidge/backend/cpu/data/TensorImpl.hpp"
 #include "aidge/learning/learningRate/LRScheduler.hpp"
@@ -81,9 +83,7 @@ TEST_CASE("[learning/SGD] update", "[Optimizer][SGD]") {
                 tensors[i] = std::make_shared<Tensor>(dims);
                 tensors[i]->setBackend("cpu");
                 tensors[i]->getImpl()->setRawPtr(val_tensors[i].get(), size_tensors[i]);
-                optim_tensors[i] = std::make_shared<Tensor>(dims);
-                optim_tensors[i]->setBackend("cpu");
-                optim_tensors[i]->getImpl()->copy(val_tensors[i].get(), size_tensors[i]);
+                optim_tensors[i] = std::make_shared<Tensor>(tensors[i]->clone());
                 // optim_tensors[i]->initGrad();
 
                 grad_tensors[i] = std::make_shared<Tensor>(dims);