diff --git a/include/aidge/filler/Filler.hpp b/include/aidge/filler/Filler.hpp
index a021e3d10969025bb349c96e163602b7edf94735..c7b12a35c9167feebaa75ffa6f59d96f6476a6ef 100644
--- a/include/aidge/filler/Filler.hpp
+++ b/include/aidge/filler/Filler.hpp
@@ -19,8 +19,8 @@
 
 namespace Aidge {
 
-void calculateFanInFanOut(std::shared_ptr<Tensor> tensor, unsigned int& fanIn,
-                          unsigned int& fanOut) {
+inline void calculateFanInFanOut(std::shared_ptr<Tensor> tensor,
+                                 unsigned int& fanIn, unsigned int& fanOut) {
     AIDGE_ASSERT(
         tensor->nbDims() == 4,
         "Tensor need to have 4 dimensions to compute FanIn and FanOut.");
@@ -39,182 +39,27 @@ void calculateFanInFanOut(std::shared_ptr<Tensor> tensor, unsigned int& fanIn,
 enum VarianceNorm { FanIn, Average, FanOut };
 
 template <typename T>
-void constantFiller(std::shared_ptr<Tensor> tensor, T constantValue) {
-    AIDGE_ASSERT(tensor->getImpl(),
-                 "Tensor got no implementation, cannot fill it.");
-    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
+void constantFiller(std::shared_ptr<Tensor> tensor, T constantValue);
 
-    std::shared_ptr<Tensor> cpyTensor;
-    // Create cpy only if tensor not on CPU
-    Tensor& tensorWithValues =
-        tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
-
-    // Setting values
-    for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
-        tensorWithValues.set<T>(idx, constantValue);
-    }
-
-    // Copy values back to the original tensors (actual copy only if needed)
-    tensor->copyCastFrom(tensorWithValues);
-}
 // TODO: Keep template or use switch case depending on Tensor datatype ?
 template <typename T>
 void normalFiller(std::shared_ptr<Tensor> tensor, double mean = 0.0,
-                  double stdDev = 1.0) {
-    AIDGE_ASSERT(tensor->getImpl(),
-                 "Tensor got no implementation, cannot fill it.");
-    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
-    std::random_device rd;
-    std::mt19937 gen(rd());  // Mersenne Twister pseudo-random number generator
-
-    std::normal_distribution<T> normalDist(mean, stdDev);
-
-    std::shared_ptr<Tensor> cpyTensor;
-    // Create cpy only if tensor not on CPU
-    Tensor& tensorWithValues =
-        tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
-
-    // Setting values
-    for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
-        tensorWithValues.set<T>(idx, normalDist(gen));
-    }
-
-    // Copy values back to the original tensors (actual copy only if needed)
-    tensor->copyCastFrom(tensorWithValues);
-};
+                  double stdDev = 1.0);
 
 // TODO: Keep template or use switch case depending on Tensor datatype ?
 template <typename T>
-void uniformFiller(std::shared_ptr<Tensor> tensor, T min, T max) {
-    AIDGE_ASSERT(tensor->getImpl(),
-                 "Tensor got no implementation, cannot fill it.");
-    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
-    std::random_device rd;
-    std::mt19937 gen(rd());  // Mersenne Twister pseudo-random number generator
-
-    std::uniform_real_distribution<T> uniformDist(min, max);
-
-    std::shared_ptr<Tensor> cpyTensor;
-    // Create cpy only if tensor not on CPU
-    Tensor& tensorWithValues =
-        tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
-
-    // Setting values
-    for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
-        tensorWithValues.set<T>(idx, uniformDist(gen));
-    }
-
-    // Copy values back to the original tensors (actual copy only if needed)
-    tensor->copyCastFrom(tensorWithValues);
-};
+void uniformFiller(std::shared_ptr<Tensor> tensor, T min, T max);
 
 template <typename T>
 void xavierUniformFiller(std::shared_ptr<Tensor> tensor, T scaling = 1.0,
-                         VarianceNorm varianceNorm = FanIn) {
-    AIDGE_ASSERT(tensor->getImpl(),
-                 "Tensor got no implementation, cannot fill it.");
-    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
-
-    unsigned int fanIn, fanOut = 0;
-    calculateFanInFanOut(tensor, fanIn, fanOut);
-
-    const T n((varianceNorm == FanIn)     ? fanIn
-              : (varianceNorm == Average) ? (fanIn + fanOut) / 2.0
-                                          : fanOut);
-    const T scale(std::sqrt(3.0 / n));
-
-    std::random_device rd;
-    std::mt19937 gen(rd());  // Mersenne Twister pseudo-random number generator
-
-    std::uniform_real_distribution<T> uniformDist(-scale, scale);
-
-    std::shared_ptr<Tensor> cpyTensor;
-    // Create cpy only if tensor not on CPU
-    Tensor& tensorWithValues =
-        tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
-    // Setting values
-    for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
-        T value = scaling * uniformDist(gen);
-        tensorWithValues.set<T>(idx, value);
-    }
-
-    // Copy values back to the original tensors (actual copy only if needed)
-    tensor->copyCastFrom(tensorWithValues);
-};
+                         VarianceNorm varianceNorm = FanIn);
 template <typename T>
 void xavierNormalFiller(std::shared_ptr<Tensor> tensor, T scaling = 1.0,
-                        VarianceNorm varianceNorm = FanIn) {
-    AIDGE_ASSERT(tensor->getImpl(),
-                 "Tensor got no implementation, cannot fill it.");
-    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
-
-    unsigned int fanIn, fanOut = 0;
-    calculateFanInFanOut(tensor, fanIn, fanOut);
-
-    const T n((varianceNorm == FanIn)     ? fanIn
-              : (varianceNorm == Average) ? (fanIn + fanOut) / 2.0
-                                          : fanOut);
-    const double stdDev(std::sqrt(1.0 / n));
-
-    std::random_device rd;
-    std::mt19937 gen(rd());  // Mersenne Twister pseudo-random number generator
-
-    std::normal_distribution<T> normalDist(0.0, stdDev);
-
-    std::shared_ptr<Tensor> cpyTensor;
-    // Create cpy only if tensor not on CPU
-    Tensor& tensorWithValues =
-        tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
-
-    // Setting values
-    for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
-        tensorWithValues.set<T>(idx, normalDist(gen));
-    }
-
-    // Copy values back to the original tensors (actual copy only if needed)
-    tensor->copyCastFrom(tensorWithValues);
-};
+                        VarianceNorm varianceNorm = FanIn);
 
 template <typename T>
 void heFiller(std::shared_ptr<Tensor> tensor, VarianceNorm varianceNorm = FanIn,
-              T meanNorm = 0.0, T scaling = 1.0) {
-    AIDGE_ASSERT(tensor->getImpl(),
-                 "Tensor got no implementation, cannot fill it.");
-    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
-
-    unsigned int fanIn, fanOut = 0;
-    calculateFanInFanOut(tensor, fanIn, fanOut);
-
-    const T n((varianceNorm == FanIn)     ? fanIn
-              : (varianceNorm == Average) ? (fanIn + fanOut) / 2.0
-                                          : fanOut);
-
-    const T stdDev(std::sqrt(2.0 / n));
-
-    const T mean(varianceNorm == FanIn ? meanNorm / fanIn
-                 : (varianceNorm == Average)
-                     ? meanNorm / ((fanIn + fanOut) / 2.0)
-                     : meanNorm / fanOut);
-
-    std::random_device rd;
-    std::mt19937 gen(rd());  // Mersenne Twister pseudo-random number generator
-
-    std::normal_distribution<T> normalDist(mean, stdDev);
-
-    std::shared_ptr<Tensor> cpyTensor;
-    // Create cpy only if tensor not on CPU
-    Tensor& tensorWithValues =
-        tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
-
-    // Setting values
-    for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
-        tensorWithValues.set<T>(idx, normalDist(gen));
-    }
-
-    // Copy values back to the original tensors (actual copy only if needed)
-    tensor->copyCastFrom(tensorWithValues);
-};
-
+              T meanNorm = 0.0, T scaling = 1.0);
 }  // namespace Aidge
 
 #endif /* AIDGE_CORE_FILLER_H_ */
diff --git a/python_binding/filler/pybind_Filler.cpp b/python_binding/filler/pybind_Filler.cpp
index fea7543fa21e625da05493064d1bbf2fa630f4d5..a85c0d6cd6fa0367dfc26328d214c99a4288a3be 100644
--- a/python_binding/filler/pybind_Filler.cpp
+++ b/python_binding/filler/pybind_Filler.cpp
@@ -35,29 +35,6 @@ void init_Filler(py::module &m) {
                  case DataType::Float32:
                      constantFiller<float>(tensor, value.cast<float>());
                      break;
-                 case DataType::Int8:
-                     constantFiller<int8_t>(tensor, value.cast<int8_t>());
-                     break;
-                 case DataType::Int16:
-                     constantFiller<std::int16_t>(tensor,
-                                                  value.cast<std::int16_t>());
-                     break;
-                 case DataType::Int32:
-                     constantFiller<std::int32_t>(tensor,
-                                                  value.cast<std::int32_t>());
-                     break;
-                 case DataType::Int64:
-                     constantFiller<std::int64_t>(tensor,
-                                                  value.cast<std::int64_t>());
-                     break;
-                 case DataType::UInt8:
-                     constantFiller<std::uint8_t>(tensor,
-                                                  value.cast<std::uint8_t>());
-                     break;
-                 case DataType::UInt16:
-                     constantFiller<std::uint16_t>(tensor,
-                                                   value.cast<std::uint16_t>());
-                     break;
                  default:
                      AIDGE_THROW_OR_ABORT(
                          py::value_error,
@@ -163,6 +140,8 @@ void init_Filler(py::module &m) {
                             "Data type is not supported for Uniform filler.");
                 }
             },
-            py::arg("tensor"), py::arg("varianceNorm") = VarianceNorm::FanIn, py::arg("meanNorm") = 0.0, py::arg("scaling") = 1.0);
+            py::arg("tensor"), py::arg("varianceNorm") = VarianceNorm::FanIn,
+            py::arg("meanNorm") = 0.0, py::arg("scaling") = 1.0)
+        ;
 }
 }  // namespace Aidge
diff --git a/src/filler/ConstantFiller.cpp b/src/filler/ConstantFiller.cpp
index 9a67d40cac6fcabd974b17f7e6195921facffdc6..e7db5e4d02b2031e7f5cf6a0203e3c7acbd3b93e 100644
--- a/src/filler/ConstantFiller.cpp
+++ b/src/filler/ConstantFiller.cpp
@@ -1,32 +1,40 @@
-// /********************************************************************************
-//  * Copyright (c) 2023 CEA-List
-//  *
-//  * This program and the accompanying materials are made available under the
-//  * terms of the Eclipse Public License 2.0 which is available at
-//  * http://www.eclipse.org/legal/epl-2.0.
-//  *
-//  * SPDX-License-Identifier: EPL-2.0
-//  *
-//  ********************************************************************************/
-
-// #include "aidge/filler/Filler.hpp"
-
-// template<typename T>
-// void Aidge::constantFiller(std::shared_ptr<Aidge::Tensor> tensor, T constantValue){
-//     AIDGE_ASSERT(tensor->getImpl(), "Tensor got no implementation, cannot fill it.");
-//     AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
-
-//     std::shared_ptr<Tensor> cpyTensor;
-//     // Create cpy only if tensor not on CPU
-//     const Tensor& tensorWithValues = tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
-
-//     // Setting values
-//     for(std::size_t idx = 0; idx<tensorWithValues.size(); ++idx){
-//         tensorWithValues.set<T>(idx, constantValue);
-//     }
-
-//     // Copy values back to the original tensors (actual copy only if needed)
-//     tensor->copyCastFrom(tensorWithValues);
-
-
-// }
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#include <memory>
+#include <random>  // normal_distribution, uniform_real_distribution
+
+#include "aidge/filler/Filler.hpp"
+#include "aidge/data/Tensor.hpp"
+
+
+template<typename T>
+void Aidge::constantFiller(std::shared_ptr<Aidge::Tensor> tensor, T constantValue){
+    AIDGE_ASSERT(tensor->getImpl(),
+                 "Tensor got no implementation, cannot fill it.");
+    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
+
+    std::shared_ptr<Aidge::Tensor> cpyTensor;
+    // Create cpy only if tensor not on CPU
+    Aidge::Tensor& tensorWithValues =
+        tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
+
+    // Setting values
+    for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
+        tensorWithValues.set<T>(idx, constantValue);
+    }
+
+    // Copy values back to the original tensors (actual copy only if needed)
+    tensor->copyCastFrom(tensorWithValues);
+}
+
+
+template void Aidge::constantFiller<float>(std::shared_ptr<Aidge::Tensor>, float);
+template void Aidge::constantFiller<double>(std::shared_ptr<Aidge::Tensor>, double);
diff --git a/src/filler/HeFiller.cpp b/src/filler/HeFiller.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e49386b49c3829c5657a155ac5e6fdddf40f9c03
--- /dev/null
+++ b/src/filler/HeFiller.cpp
@@ -0,0 +1,61 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#include <memory>
+#include <random>  // normal_distribution, uniform_real_distribution
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/filler/Filler.hpp"
+
+template <typename T>
+void Aidge::heFiller(std::shared_ptr<Aidge::Tensor> tensor,
+                     Aidge::VarianceNorm varianceNorm, T meanNorm, T scaling) {
+    AIDGE_ASSERT(tensor->getImpl(),
+                 "Tensor got no implementation, cannot fill it.");
+    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
+
+    unsigned int fanIn, fanOut = 0;
+    Aidge::calculateFanInFanOut(tensor, fanIn, fanOut);
+
+    const T n((varianceNorm == Aidge::VarianceNorm::FanIn) ? fanIn
+              : (varianceNorm == Aidge::VarianceNorm::Average)
+                  ? (fanIn + fanOut) / 2.0
+                  : fanOut);
+
+    const T stdDev(std::sqrt(2.0 / n));
+
+    const T mean(varianceNorm == Aidge::VarianceNorm::FanIn ? meanNorm / fanIn
+                 : (varianceNorm == Aidge::VarianceNorm::Average)
+                     ? meanNorm / ((fanIn + fanOut) / 2.0)
+                     : meanNorm / fanOut);
+
+    std::random_device rd;
+    std::mt19937 gen(rd());  // Mersenne Twister pseudo-random number generator
+
+    std::normal_distribution<T> normalDist(mean, stdDev);
+
+    std::shared_ptr<Tensor> cpyTensor;
+    // Create cpy only if tensor not on CPU
+    Tensor& tensorWithValues =
+        tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
+
+    // Setting values
+    for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
+        tensorWithValues.set<T>(idx, scaling*normalDist(gen));
+    }
+
+    // Copy values back to the original tensors (actual copy only if needed)
+    tensor->copyCastFrom(tensorWithValues);
+}
+
+template void Aidge::heFiller<float>(std::shared_ptr<Aidge::Tensor>,
+                                     Aidge::VarianceNorm, float, float);
+template void Aidge::heFiller<double>(std::shared_ptr<Aidge::Tensor>,
+                                      Aidge::VarianceNorm, double, double);
diff --git a/src/filler/NormalFiller.cpp b/src/filler/NormalFiller.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0fadbd134ff9fd4712a57541cfb3f35debdff13d
--- /dev/null
+++ b/src/filler/NormalFiller.cpp
@@ -0,0 +1,45 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#include <memory>
+#include <random>  // normal_distribution, uniform_real_distribution
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/filler/Filler.hpp"
+
+template <typename T>
+void Aidge::normalFiller(std::shared_ptr<Aidge::Tensor> tensor, double mean,
+                         double stdDev) {
+    AIDGE_ASSERT(tensor->getImpl(),
+                 "Tensor got no implementation, cannot fill it.");
+    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
+    std::random_device rd;
+    std::mt19937 gen(rd());  // Mersenne Twister pseudo-random number generator
+
+    std::normal_distribution<T> normalDist(mean, stdDev);
+
+    std::shared_ptr<Tensor> cpyTensor;
+    // Create cpy only if tensor not on CPU
+    Tensor& tensorWithValues =
+        tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
+
+    // Setting values
+    for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
+        tensorWithValues.set<T>(idx, normalDist(gen));
+    }
+
+    // Copy values back to the original tensors (actual copy only if needed)
+    tensor->copyCastFrom(tensorWithValues);
+}
+
+template void Aidge::normalFiller<float>(std::shared_ptr<Aidge::Tensor>, double,
+                                         double);
+template void Aidge::normalFiller<double>(std::shared_ptr<Aidge::Tensor>,
+                                          double, double);
diff --git a/src/filler/UniformFiller.cpp b/src/filler/UniformFiller.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e45d6f13edeadd8261cbe9742fc064bbcd6155c1
--- /dev/null
+++ b/src/filler/UniformFiller.cpp
@@ -0,0 +1,44 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#include <memory>
+#include <random>  // normal_distribution, uniform_real_distribution
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/filler/Filler.hpp"
+
+template <typename T>
+void Aidge::uniformFiller(std::shared_ptr<Aidge::Tensor> tensor, T min, T max) {
+    AIDGE_ASSERT(tensor->getImpl(),
+                 "Tensor got no implementation, cannot fill it.");
+    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
+    std::random_device rd;
+    std::mt19937 gen(rd());  // Mersenne Twister pseudo-random number generator
+
+    std::uniform_real_distribution<T> uniformDist(min, max);
+
+    std::shared_ptr<Aidge::Tensor> cpyTensor;
+    // Create cpy only if tensor not on CPU
+    Aidge::Tensor& tensorWithValues =
+        tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
+
+    // Setting values
+    for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
+        tensorWithValues.set<T>(idx, uniformDist(gen));
+    }
+
+    // Copy values back to the original tensors (actual copy only if needed)
+    tensor->copyCastFrom(tensorWithValues);
+}
+
+template void Aidge::uniformFiller<float>(std::shared_ptr<Aidge::Tensor>, float,
+                                          float);
+template void Aidge::uniformFiller<double>(std::shared_ptr<Aidge::Tensor>,
+                                           double, double);
diff --git a/src/filler/XavierFiller.cpp b/src/filler/XavierFiller.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f1c5d17e80d1f683b59a6429407d74f69000e321
--- /dev/null
+++ b/src/filler/XavierFiller.cpp
@@ -0,0 +1,94 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#include <memory>
+#include <random>  // normal_distribution, uniform_real_distribution
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/filler/Filler.hpp"
+
+template <typename T>
+void Aidge::xavierUniformFiller(std::shared_ptr<Aidge::Tensor> tensor, T scaling,
+                         Aidge::VarianceNorm varianceNorm) {
+    AIDGE_ASSERT(tensor->getImpl(),
+                 "Tensor got no implementation, cannot fill it.");
+    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
+
+    unsigned int fanIn, fanOut = 0;
+    Aidge::calculateFanInFanOut(tensor, fanIn, fanOut);
+
+    const T n((varianceNorm == Aidge::VarianceNorm::FanIn) ? fanIn
+              : (varianceNorm == Aidge::VarianceNorm::Average)
+                  ? (fanIn + fanOut) / 2.0
+                  : fanOut);
+    const T scale(std::sqrt(3.0 / n));
+
+    std::random_device rd;
+    std::mt19937 gen(rd());  // Mersenne Twister pseudo-random number generator
+
+    std::uniform_real_distribution<T> uniformDist(-scale, scale);
+
+    std::shared_ptr<Aidge::Tensor> cpyTensor;
+    // Create cpy only if tensor not on CPU
+    Aidge::Tensor& tensorWithValues =
+        tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
+    // Setting values
+    for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
+        T value = scaling * uniformDist(gen);
+        tensorWithValues.set<T>(idx, value);
+    }
+
+    // Copy values back to the original tensors (actual copy only if needed)
+    tensor->copyCastFrom(tensorWithValues);
+}
+template <typename T>
+void Aidge::xavierNormalFiller(std::shared_ptr<Aidge::Tensor> tensor, T scaling,
+                        Aidge::VarianceNorm varianceNorm) {
+    AIDGE_ASSERT(tensor->getImpl(),
+                 "Tensor got no implementation, cannot fill it.");
+    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
+
+    unsigned int fanIn, fanOut = 0;
+    Aidge::calculateFanInFanOut(tensor, fanIn, fanOut);
+
+    const T n((varianceNorm == Aidge::VarianceNorm::FanIn) ? fanIn
+              : (varianceNorm == Aidge::VarianceNorm::Average)
+                  ? (fanIn + fanOut) / 2.0
+                  : fanOut);
+    const double stdDev(std::sqrt(1.0 / n));
+
+    std::random_device rd;
+    std::mt19937 gen(rd());  // Mersenne Twister pseudo-random number generator
+
+    std::normal_distribution<T> normalDist(0.0, stdDev);
+
+    std::shared_ptr<Aidge::Tensor> cpyTensor;
+    // Create cpy only if tensor not on CPU
+    Aidge::Tensor& tensorWithValues =
+        tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
+
+    // Setting values
+    for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
+        tensorWithValues.set<T>(idx, scaling*normalDist(gen));
+    }
+
+    // Copy values back to the original tensors (actual copy only if needed)
+    tensor->copyCastFrom(tensorWithValues);
+}
+
+template void Aidge::xavierUniformFiller<float>(std::shared_ptr<Aidge::Tensor>,
+                                                float, Aidge::VarianceNorm);
+template void Aidge::xavierUniformFiller<double>(std::shared_ptr<Aidge::Tensor>,
+                                                 double, Aidge::VarianceNorm);
+
+template void Aidge::xavierNormalFiller<float>(std::shared_ptr<Aidge::Tensor>,
+                                               float, Aidge::VarianceNorm);
+template void Aidge::xavierNormalFiller<double>(std::shared_ptr<Aidge::Tensor>,
+                                                double, Aidge::VarianceNorm);