diff --git a/src/loss/classification/BCE.cpp b/src/loss/classification/BCE.cpp
index d80dd2749ae8e4128e9fc5b07740647854d7f5bb..722184eb5d1aa860d06d4e09e10e6c80dcf1e242 100644
--- a/src/loss/classification/BCE.cpp
+++ b/src/loss/classification/BCE.cpp
@@ -123,7 +123,7 @@ Aidge::Tensor Aidge::loss::BCE(std::shared_ptr<Tensor>& prediction,
     // Define node: gradient
     const std::shared_ptr<Node> gradient_node = Mul("gradient");
     div1_node->addChild(gradient_node, 0, 0);
-    Producer(std::make_shared<Tensor>(Array1D<float, 1>{{-1.0f/float(target->dims()[0])}}))
+    Producer(std::make_shared<Tensor>(Array1D<float, 1>{{-1.0f/float(target->size())}}))
         ->addChild(gradient_node, 0, 1);
 
     // Create GraphView
diff --git a/src/loss/regression/MSE.cpp b/src/loss/regression/MSE.cpp
index f6ad9cfa2fa67e84494c5566f986902e8073b569..b82eab83209cc06e6cecd812093052dac7647969 100644
--- a/src/loss/regression/MSE.cpp
+++ b/src/loss/regression/MSE.cpp
@@ -83,7 +83,7 @@ Aidge::Tensor Aidge::loss::MSE(std::shared_ptr<Tensor>& prediction,
 
     // Note: this assume target is [nbBatch, nbChan]
     Producer(std::make_shared<Tensor>(
-                 Array1D<float, 1>{{2 / float(target->dims()[0])}}))
+                 Array1D<float, 1>{{2 / float(target->size())}}))
         ->addChild(mul_node, 0, 1);
     sub_node->addChild(mul_node, 0, 0);  // Error computation branch !