diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 9c516575690fbca947496920c7068874bda6bf63..84d77e9f1370977e899331bad27f2ade4b2178f3 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -64,6 +64,7 @@
 #include "aidge/stimuli/Stimulus.hpp"
 
 #include "aidge/recipes/Recipes.hpp"
+#include "aidge/filler/Filler.hpp"
 
 #include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index d8412dbd4ddb4ec371649d180bce10a80dd624f3..a6ff03d36b662f4420424f930401844de25036d2 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -52,6 +52,7 @@ public:
         return mType;
     }
     virtual ~Data() = default;
+    virtual std::string toString() const = 0;
 
 private:
     const std::string mType;
@@ -84,4 +85,4 @@ namespace Aidge {
 inline auto format_as(DataType dt) { return EnumStrings<Aidge::DataType>::data[static_cast<int>(dt)]; }
 }
 
-#endif /* AIDGE_DATA_H_ */
\ No newline at end of file
+#endif /* AIDGE_DATA_H_ */
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index b82ec89d0096d47644e1bb4bd3819536ce7ccd66..1f9c5a5ec14cca4469b0329f2f968cf9dbc7b0de 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -445,7 +445,7 @@ public:
         set<expectedType>(getStorageIdx(coordIdx), value);
     }
 
-    std::string toString() const;
+    std::string toString() const override;
 
     inline void print() const { fmt::print("{}\n", toString()); }
 
diff --git a/include/aidge/filler/Filler.hpp b/include/aidge/filler/Filler.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..51d01d87f338d1c8eb33b7b3ec6194390bfe13bf
--- /dev/null
+++ b/include/aidge/filler/Filler.hpp
@@ -0,0 +1,63 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_FILLER_H_
+#define AIDGE_CORE_FILLER_H_
+
+#include <memory>
+#include <random>  // normal_distribution, uniform_real_distribution
+
+#include "aidge/data/Tensor.hpp"
+
+namespace Aidge {
+
+inline void calculateFanInFanOut(std::shared_ptr<Tensor> tensor,
+                                 unsigned int& fanIn, unsigned int& fanOut) {
+    AIDGE_ASSERT(
+        tensor->nbDims() == 4,
+        "Tensor need to have 4 dimensions to compute FanIn and FanOut.");
+    // Warning: This function suppose NCXX data layout.
+    // Aidge currently only support NCHW but this maybe not be true in the
+    // future.
+    DimSize_t batchSize = tensor->dims()[0];
+    DimSize_t channelSize = tensor->dims()[1];
+    AIDGE_ASSERT(batchSize != 0,
+                 "Cannot calculate FanIn if tensor batch size is 0.");
+    AIDGE_ASSERT(channelSize != 0,
+                 "Cannot calculate FanOut if tensor channel size is 0.");
+    fanIn = static_cast<unsigned int>(tensor->size() / batchSize);
+    fanOut = static_cast<unsigned int>(tensor->size() / channelSize);
+}
+enum VarianceNorm { FanIn, Average, FanOut };
+
+template <typename T>
+void constantFiller(std::shared_ptr<Tensor> tensor, T constantValue);
+
+template <typename T>
+void normalFiller(std::shared_ptr<Tensor> tensor, double mean = 0.0,
+                  double stdDev = 1.0);
+
+template <typename T>
+void uniformFiller(std::shared_ptr<Tensor> tensor, T min, T max);
+
+template <typename T>
+void xavierUniformFiller(std::shared_ptr<Tensor> tensor, T scaling = 1.0,
+                         VarianceNorm varianceNorm = FanIn);
+template <typename T>
+void xavierNormalFiller(std::shared_ptr<Tensor> tensor, T scaling = 1.0,
+                        VarianceNorm varianceNorm = FanIn);
+
+template <typename T>
+void heFiller(std::shared_ptr<Tensor> tensor, VarianceNorm varianceNorm = FanIn,
+              T meanNorm = 0.0, T scaling = 1.0);
+}  // namespace Aidge
+
+#endif /* AIDGE_CORE_FILLER_H_ */
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 46fa56ef0e7d63ce10bb3c96a8d7e1c42b191322..0c6b7f03326491711fd57ed939642d1eec80b0d8 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -98,6 +98,8 @@ public:
      */
     void save(const std::string& path, bool verbose = false, bool showProducers = true) const;
 
+    void logOutputs(const std::string& dirName) const;
+
     /**
      * Check that a node is in the current GraphView.
      * @param nodePtr Node to check
@@ -283,7 +285,7 @@ public:
      *   added to the list, and so on.
      * - Any remaining nodes have no path to the root node and are added in
      *   arbitrary order. In this case, the ranking is not garanteed to be unique.
-     * 
+     *
      * If the ranking cannot be garanteed to be unique, the second item indicates
      * the rank from which unicity cannot be garanteed.
      * @return std::pair<std::vector<NodePtr>, size_t> Pair with the list of ranked
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 82cd5df8e24457bd9f5e07c89826904c7d2283ad..517af5b050daa200e7d608aa71660c86b17701b0 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -27,21 +27,31 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims };
+enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims, NoBias };
 
 template <DimIdx_t DIM>
 class Conv_Op : public OperatorTensor,
                 public Registrable<Conv_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
-                public StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t,
-                                       DimSize_t, std::array<DimSize_t, DIM>> {
+                public StaticAttributes<ConvAttr,
+                                        std::array<DimSize_t, DIM>,
+                                        std::array<DimSize_t, DIM>,
+                                        DimSize_t,
+                                        DimSize_t,
+                                        std::array<DimSize_t, DIM>,
+                                        bool> {
 
 public:
     static const std::string Type;
 
     Conv_Op() = delete;
 
-    using Attributes_ = StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
-                                             DimSize_t, DimSize_t, std::array<DimSize_t, DIM>>;
+    using Attributes_ = StaticAttributes<ConvAttr,
+                                        std::array<DimSize_t, DIM>,
+                                        std::array<DimSize_t, DIM>,
+                                        DimSize_t,
+                                        DimSize_t,
+                                        std::array<DimSize_t, DIM>,
+                                        bool>;
     template <ConvAttr e>
     using attr = typename Attributes_::template attr<e>;
 
@@ -49,13 +59,15 @@ public:
                       DimSize_t outChannels,
                       const std::array<DimSize_t, DIM> &kernelDims,
                       const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-                      const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
+                      const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
+                      bool noBias = false)
         : OperatorTensor(Type, 1, 2, 1),
           Attributes_(attr<ConvAttr::StrideDims>(strideDims),
                       attr<ConvAttr::DilationDims>(dilationDims),
                       attr<ConvAttr::InChannels>(inChannels),
                       attr<ConvAttr::OutChannels>(outChannels),
-                      attr<ConvAttr::KernelDims>(kernelDims)) {}
+                      attr<ConvAttr::KernelDims>(kernelDims),
+                      attr<ConvAttr::NoBias>(noBias)) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -163,15 +175,17 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> co
             std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
             weightIdxDims[0] = firstEltDims[1];
 
-            // Bias
-            const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
-            const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
-
             // Result
             std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
             res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
             res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
-            res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
+
+            // Bias
+            if (! this->template getAttr<ConvAttr::NoBias>()){
+                const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
+                const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
+                res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
+            }
             return res;
         }
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
@@ -215,12 +229,14 @@ inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
                                   const std::array<DimSize_t, DIM> &kernelDims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
+                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
+                                  bool noBias = false) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims), name);
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims, noBias), name);
     addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
-    addProducer(conv, 2, {outChannels}, "b");
+    addProducer(conv, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
+
     return conv;
 }
 
@@ -232,9 +248,10 @@ inline std::shared_ptr<Node> Conv(
     DimSize_t const (&kernelDims)[DIM],
     const std::string& name = "",
     const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-    const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
+    const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
+    bool noBias = false) {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
-    return Conv(inChannels, outChannels, to_array(kernelDims), name, strideDims, dilationDims);
+    return Conv(inChannels, outChannels, to_array(kernelDims), name, strideDims, dilationDims, noBias);
 }
 }  // namespace Aidge
 
@@ -245,7 +262,8 @@ const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
     "DilationDims",
     "InChannels",
     "OutChannels",
-    "KernelDims"
+    "KernelDims",
+    "NoBias"
 };
 }
 
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 7fa9124d4c750cee53d9c4a402a2fa6196ac8158..035bd84b647bc7b4c57daa14d20ebe60e59e83c2 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -26,7 +26,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims };
+enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims, NoBias };
 
 template <DimIdx_t DIM>
 class ConvDepthWise_Op : public OperatorTensor,
@@ -35,7 +35,8 @@ class ConvDepthWise_Op : public OperatorTensor,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
                                        DimSize_t,
-                                       std::array<DimSize_t, DIM>> {
+                                       std::array<DimSize_t, DIM>,
+                                       bool> {
 public:
     static const std::string Type;
 
@@ -45,19 +46,22 @@ public:
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
                                              DimSize_t,
-                                             std::array<DimSize_t, DIM>>;
+                                             std::array<DimSize_t, DIM>,
+                                             bool>;
     template <ConvDepthWiseAttr e>
     using attr = typename Attributes_::template attr<e>;
 
     constexpr ConvDepthWise_Op(const DimSize_t nbChannels,
                                const std::array<DimSize_t, DIM> &kernel_dims,
                                const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                               const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
+                               const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
+                               bool no_bias=false)
         : OperatorTensor(Type, 1, 2, 1),
           Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
                       attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
                       attr<ConvDepthWiseAttr::Channels>(nbChannels),
-                      attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {}
+                      attr<ConvDepthWiseAttr::KernelDims>(kernel_dims),
+                      attr<ConvDepthWiseAttr::NoBias>(no_bias)) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -157,15 +161,17 @@ public:
             std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
             weightIdxDims[0] = firstEltDims[1];
 
-            // Bias
-            const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
-            const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
 
             // Result
             std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
             res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
             res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
-            res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
+            // Bias
+            if (! this->template getAttr<ConvDepthWiseAttr::NoBias>()){
+                const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
+                const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
+                res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
+            }
             return res;
         }
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
@@ -196,12 +202,13 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
                                            const std::array<DimSize_t, DIM> &kernelDims,
                                            const std::string& name = "",
                                            const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-                                           const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
+                                           const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
+                                           bool noBias=false) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
-    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims), name);
+    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims, noBias), name);
     addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
-    addProducer(convDW, 2, {nbChannels}, "b");
+    addProducer(convDW, 2, {(noBias ? 0 : nbChannels)}, "b");
     return convDW;
 }
 
@@ -212,16 +219,17 @@ inline std::shared_ptr<Node> ConvDepthWise(
     DimSize_t const (&kernelDims)[DIM],
     const std::string& name = "",
     const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-    const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
+    const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
+    bool noBias=false) {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
-    return ConvDepthWise(nbChannels, to_array(kernelDims), name, strideDims, dilationDims);
+    return ConvDepthWise(nbChannels, to_array(kernelDims), name, strideDims, dilationDims, noBias);
 }
 }  // namespace Aidge
 
 namespace {
 template <>
 const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims", "Channels",
-                                                          "KernelDims"};
+                                                          "KernelDims", "NoBias"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index 8f1de7c0e92558a4b47962c3a375764e1bd1c2ee..fb3aa6384fc703d758cb8753dcf54c4694f96bd4 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -35,11 +35,12 @@ inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
+                                  bool no_bias = false)
 {
     // Construct micro-graph
     auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : "");
 
     auto metaOp = MetaOperator("PaddedConv", Sequential({pad, conv}), name);
     addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
@@ -56,9 +57,10 @@ inline std::shared_ptr<Node> PaddedConv(
     const std::string& name = "",
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
     const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-    const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
+    const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
+    bool no_bias = false)
 {
-    return PaddedConv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
+    return PaddedConv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
 }
 
 template <std::array<DimSize_t, 1>::size_type DIM>
@@ -67,11 +69,12 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
+                                  bool no_bias = false)
 {
     // Construct micro-graph
     auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nb_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
+    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nb_channels, kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : "");
 
     auto metaOp = MetaOperator("PaddedConvDepthWise", Sequential({pad, conv}), name);
     addProducer(metaOp, 1, append(nb_channels, append(DimSize_t(1), kernel_dims)), "w");
@@ -87,9 +90,10 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(
     const std::string& name = "",
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
     const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-    const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
+    const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
+    bool no_bias = false)
 {
-    return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
+    return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
 }
 
 template <std::array<DimSize_t, 1>::size_type DIM>
diff --git a/include/aidge/utils/Directories.hpp b/include/aidge/utils/Directories.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..3bc07b9dd58e472096102c1b0c66971164d632a3
--- /dev/null
+++ b/include/aidge/utils/Directories.hpp
@@ -0,0 +1,83 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+
+#ifndef AIDGE_DIRECTORIES_H_
+#define AIDGE_DIRECTORIES_H_
+
+
+#include <string>  // std::string
+#include <sstream> // std::stringstream
+#include <iostream>
+#include <sys/stat.h>
+#include <errno.h>
+
+#ifdef WIN32
+#include <direct.h>
+#else
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+namespace Aidge {
+
+    bool isNotValidFilePath(int c) {
+        return (iscntrl(c)
+            || c == '<'
+            || c == '>'
+            || c == ':'
+            || c == '"'
+            || c == '|'
+            || c == '?'
+            || c == '*');
+    }
+
+    std::string filePath(const std::string& str) {
+        std::string filePath(str);
+        std::replace_if(filePath.begin(), filePath.end(),
+                        isNotValidFilePath, '_');
+        return filePath;
+    }
+
+
+    bool createDirectories(const std::string& dirName)
+    {
+        std::stringstream path(dirName);
+        std::string dir;
+        std::string pathToDir("");
+        int status = 0;
+
+        while (std::getline(path, dir, '/') && status == 0) {
+            pathToDir += dir + '/';
+            struct stat fileStat;
+            if (stat(pathToDir.c_str(), &fileStat) != 0) {
+                // Directory does not exist
+    #ifdef WIN32
+                status = _mkdir(pathToDir.c_str());
+    #else
+    #if defined(S_IRWXU)
+                status = mkdir(pathToDir.c_str(), S_IRWXU | S_IRWXG | S_IRWXO);
+    #else
+                status = mkdir(pathToDir.c_str());
+    #endif
+    #endif
+            } else if (!S_ISDIR(fileStat.st_mode)) {
+                status = -1;
+            }
+        }
+        return (status == 0 || errno == EEXIST);
+    }
+
+
+}
+
+#endif //AIDGE_DIRECTORIES_H_
+
diff --git a/include/aidge/utils/Random.hpp b/include/aidge/utils/Random.hpp
index 704609c0c778c7065a580b86fc67aea7e9d3525d..73cbd1453b3d840d6da2c58eadd5c5f47e9e9070 100644
--- a/include/aidge/utils/Random.hpp
+++ b/include/aidge/utils/Random.hpp
@@ -9,23 +9,53 @@
  *
  ********************************************************************************/
 
-
 #ifndef AIDGE_RANDOM_H_
 #define AIDGE_RANDOM_H_
 
-
 #include <algorithm>
-#include <vector>
 #include <random>
+#include <vector>
+namespace Aidge {
 
 namespace Random {
 
-    void randShuffle(std::vector<unsigned int>& vec) {
-        std::random_device rd;
-        std::mt19937 g(rd());
-        std::shuffle(vec.begin(), vec.end(), g);
-    }
-
+/**
+ * @brief Generator is a class created to handle only one Mersenne Twister
+ * pseudo-random number generator for the whole Aidge framework.
+ *
+ * All of its method are static. You can set a random seed and access the
+ * generator.
+ * By default, the random seed is set to 0 but selected randomly.
+ *
+ */
+class Generator {
+   public:
+    /**
+     * @brief Set a seed to the pseudo-random number generator.
+     *
+     * @return std::mt19937&
+     */
+    static void setSeed(unsigned int seed);
+    static unsigned int getSeed() { return seed; };
+    /**
+     * @brief Return a Mersenne Twister pseudo-random number generator.
+     * You can set the seed of this generator using ``setSeed`` method.
+     *
+     * @return std::mt19937&
+     */
+    static std::mt19937& get() { return generator; };
+
+   private:
+    // Mersenne Twister pseudo-random number generator
+    static std::mt19937 generator;
+    static unsigned int seed;
+};
+
+inline void randShuffle(std::vector<unsigned int>& vec) {
+    std::shuffle(vec.begin(), vec.end(), Aidge::Random::Generator::get());
 }
 
-#endif //AIDGE_RANDOM_H_
\ No newline at end of file
+}  // namespace Random
+}  // namespace Aidge
+
+#endif  // AIDGE_RANDOM_H_
diff --git a/python_binding/data/pybind_Data.cpp b/python_binding/data/pybind_Data.cpp
index df3792fd784a2ef2b9418628959629ac59c04094..bca246c94434b280a12d070526ad4ffb2c7fbe7b 100644
--- a/python_binding/data/pybind_Data.cpp
+++ b/python_binding/data/pybind_Data.cpp
@@ -26,12 +26,11 @@ void init_Data(py::module& m){
     .value("Int64", DataType::Int64)
     .value("UInt8", DataType::UInt8)
     .value("UInt32", DataType::UInt32)
-    .value("UInt64", DataType::UInt64)   
+    .value("UInt64", DataType::UInt64)
     ;
 
-    py::class_<Data, std::shared_ptr<Data>>(m,"Data")
-    .def(py::init<const std::string&>());
+    py::class_<Data, std::shared_ptr<Data>>(m,"Data");
+
 
-    
 }
 }
diff --git a/python_binding/filler/pybind_Filler.cpp b/python_binding/filler/pybind_Filler.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a85c0d6cd6fa0367dfc26328d214c99a4288a3be
--- /dev/null
+++ b/python_binding/filler/pybind_Filler.cpp
@@ -0,0 +1,147 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/filler/Filler.hpp"
+
+namespace py = pybind11;
+
+namespace Aidge {
+
+void init_Filler(py::module &m) {
+    py::enum_<enum VarianceNorm>(m, "VarianceNorm")
+        .value("FanIn", VarianceNorm::FanIn)
+        .value("Average", VarianceNorm::Average)
+        .value("FanOut", VarianceNorm::FanOut)
+        .export_values();
+
+    m.def(
+         "constant_filler",
+         [](std::shared_ptr<Tensor> tensor, py::object value) -> void {
+             switch (tensor->dataType()) {
+                 case DataType::Float64:
+                     constantFiller<double>(tensor, value.cast<double>());
+                     break;
+                 case DataType::Float32:
+                     constantFiller<float>(tensor, value.cast<float>());
+                     break;
+                 default:
+                     AIDGE_THROW_OR_ABORT(
+                         py::value_error,
+                         "Data type is not supported for Constant filler.");
+             }
+         },
+         py::arg("tensor"), py::arg("value"))
+        .def(
+            "normal_filler",
+            [](std::shared_ptr<Tensor> tensor, double mean,
+               double stdDev) -> void {
+                switch (tensor->dataType()) {
+                    case DataType::Float64:
+                        normalFiller<double>(tensor, mean, stdDev);
+                        break;
+                    case DataType::Float32:
+                        normalFiller<float>(tensor, mean, stdDev);
+                        break;
+                    default:
+                        AIDGE_THROW_OR_ABORT(
+                            py::value_error,
+                            "Data type is not supported for Normal filler.");
+                }
+            },
+            py::arg("tensor"), py::arg("mean") = 0.0, py::arg("stdDev") = 1.0)
+        .def(
+            "uniform_filler",
+            [](std::shared_ptr<Tensor> tensor, double min, double max) -> void {
+                switch (tensor->dataType()) {
+                    case DataType::Float64:
+                        uniformFiller<double>(tensor, min, max);
+                        break;
+                    case DataType::Float32:
+                        uniformFiller<float>(tensor, min, max);
+                        break;
+                    default:
+                        AIDGE_THROW_OR_ABORT(
+                            py::value_error,
+                            "Data type is not supported for Uniform filler.");
+                }
+            },
+            py::arg("tensor"), py::arg("min"), py::arg("max"))
+        .def(
+            "xavier_uniform_filler",
+            [](std::shared_ptr<Tensor> tensor, py::object scaling,
+               VarianceNorm varianceNorm) -> void {
+                switch (tensor->dataType()) {
+                    case DataType::Float64:
+                        xavierUniformFiller<double>(
+                            tensor, scaling.cast<double>(), varianceNorm);
+                        break;
+                    case DataType::Float32:
+                        xavierUniformFiller<float>(
+                            tensor, scaling.cast<float>(), varianceNorm);
+                        break;
+                    default:
+                        AIDGE_THROW_OR_ABORT(
+                            py::value_error,
+                            "Data type is not supported for Uniform filler.");
+                }
+            },
+            py::arg("tensor"), py::arg("scaling") = 1.0,
+            py::arg("varianceNorm") = VarianceNorm::FanIn)
+        .def(
+            "xavier_normal_filler",
+            [](std::shared_ptr<Tensor> tensor, py::object scaling,
+               VarianceNorm varianceNorm) -> void {
+                switch (tensor->dataType()) {
+                    case DataType::Float64:
+                        xavierNormalFiller<double>(
+                            tensor, scaling.cast<double>(), varianceNorm);
+                        break;
+                    case DataType::Float32:
+                        xavierNormalFiller<float>(tensor, scaling.cast<float>(),
+                                                  varianceNorm);
+                        break;
+                    default:
+                        AIDGE_THROW_OR_ABORT(
+                            py::value_error,
+                            "Data type is not supported for Uniform filler.");
+                }
+            },
+            py::arg("tensor"), py::arg("scaling") = 1.0,
+            py::arg("varianceNorm") = VarianceNorm::FanIn)
+        .def(
+            "he_filler",
+            [](std::shared_ptr<Tensor> tensor, VarianceNorm varianceNorm,
+               py::object meanNorm, py::object scaling) -> void {
+                switch (tensor->dataType()) {
+                    case DataType::Float64:
+                        heFiller<double>(tensor, varianceNorm,
+                                         meanNorm.cast<double>(),
+                                         scaling.cast<double>());
+                        break;
+                    case DataType::Float32:
+                        heFiller<float>(tensor, varianceNorm,
+                                        meanNorm.cast<float>(),
+                                        scaling.cast<float>());
+                        break;
+                    default:
+                        AIDGE_THROW_OR_ABORT(
+                            py::value_error,
+                            "Data type is not supported for Uniform filler.");
+                }
+            },
+            py::arg("tensor"), py::arg("varianceNorm") = VarianceNorm::FanIn,
+            py::arg("meanNorm") = 0.0, py::arg("scaling") = 1.0)
+        ;
+}
+}  // namespace Aidge
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index a41d0d92835be2b5ef07d30c4a5233da1e3906b7..eae05d8e2c04a877e5942600d7120024f20c4788 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -30,7 +30,7 @@ void init_GraphView(py::module& m) {
           :param path: save location
           :type path: str
           )mydelimiter")
-
+          .def("log_outputs", &GraphView::logOutputs, py::arg("path"))
           .def("get_output_nodes", &GraphView::outputNodes,
           R"mydelimiter(
           Get set of output Nodes.
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 7020c35f63880e77ecd3c2011a1b3c74bed847ed..087c232dc6a2977169e19ce4bdf0807adfc13d93 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -23,6 +23,9 @@ template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
     const std::string pyClassName("BatchNormOp" + std::to_string(DIM) + "D");
     py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, pyClassName.c_str(), py::multiple_inheritance())
+    .def(py::init<float, float>(),
+        py::arg("epsilon"),
+        py::arg("momentum"))
     .def("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
     .def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
     .def("attributes_name", &BatchNorm_Op<DIM>::staticGetAttrsName);
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index aea402017622655a577ac4f9e207141bff01d70d..d1016869c3fec9cbc10f2d2c86f685f8787b1d3b 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -33,12 +33,14 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
                 DimSize_t,
                 const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &>(),
+                const std::array<DimSize_t, DIM> &,
+                bool>(),
         py::arg("in_channels"),
         py::arg("out_channels"),
         py::arg("kernel_dims"),
         py::arg("stride_dims"),
-        py::arg("dilation_dims"))
+        py::arg("dilation_dims"),
+        py::arg("no_bias"))
     .def("get_inputs_name", &Conv_Op<DIM>::getInputsName)
     .def("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
     .def("attributes_name", &Conv_Op<DIM>::staticGetAttrsName)
@@ -51,18 +53,20 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
                                                          const std::vector<DimSize_t>& kernel_dims,
                                                          const std::string& name,
                                                          const std::vector<DimSize_t> &stride_dims,
-                                                         const std::vector<DimSize_t> &dilation_dims) {
+                                                         const std::vector<DimSize_t> &dilation_dims,
+                                                         bool noBias) {
         AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
         AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
-        return Conv<DIM>(in_channels, out_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
+        return Conv<DIM>(in_channels, out_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()), noBias);
     }, py::arg("in_channels"),
        py::arg("out_channels"),
        py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
+       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("no_bias") = false);
 }
 
 
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 83eac8742628bf2e0921e6a17dd46226c46fbea1..bbb94c3773e825cd5ee852243fa8db7a5bd763da 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -33,11 +33,13 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
   .def(py::init<const DimSize_t,
                 const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &>(),
+                const std::array<DimSize_t, DIM> &,
+                bool>(),
         py::arg("nb_channels"),
         py::arg("kernel_dims"),
         py::arg("stride_dims"),
-        py::arg("dilation_dims"))
+        py::arg("dilation_dims"),
+        py::arg("no_bias"))
   .def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
   .def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
   .def("attributes_name", &ConvDepthWise_Op<DIM>::staticGetAttrsName);
@@ -46,17 +48,19 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
                                                                   const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims,
-                                                                  const std::vector<DimSize_t> &dilation_dims) {
+                                                                  const std::vector<DimSize_t> &dilation_dims,
+                                                                  bool no_bias) {
         AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
         AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
-        return ConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
+        return ConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
     }, py::arg("nb_channenls"),
        py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
+       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("no_bias")= false);
 
 }
 
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index 20a620cee737db5380ee7641b161cf6296ef7e5b..20cd3f156996c98bb64502a90ab98535f87cc2a3 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -30,21 +30,23 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
                                                          const std::string& name,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims,
-                                                         const std::vector<DimSize_t> &dilation_dims)
+                                                         const std::vector<DimSize_t> &dilation_dims,
+                                                         bool no_bias)
     {
         AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
         AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
         AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
-        return PaddedConv<DIM>(in_channels, out_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
+        return PaddedConv<DIM>(in_channels, out_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
     }, py::arg("in_channels"),
        py::arg("out_channels"),
        py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
-       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
+       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("no_bias")= false);
 }
 
 template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
@@ -53,20 +55,22 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
                                                          const std::string& name,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims,
-                                                         const std::vector<DimSize_t> &dilation_dims)
+                                                         const std::vector<DimSize_t> &dilation_dims,
+                                                         bool no_bias)
     {
         AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
         AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
         AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
-        return PaddedConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
+        return PaddedConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
     }, py::arg("nb_channels"),
        py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
-       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
+       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("no_bias") = false);
 
 }
 
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 52863735ca431e797fab3426d7e61796a8725dd2..5ffa8f6b460b720581fb8196d45ad84e1ef350f2 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -11,12 +11,12 @@
 
 #include <pybind11/pybind11.h>
 
-#include "aidge/backend/cpu/data/TensorImpl.hpp" // This include add Tensor
-
+#include "aidge/backend/cpu/data/TensorImpl.hpp"  // This include add Tensor
 
 namespace py = pybind11;
 
 namespace Aidge {
+void init_Random(py::module&);
 void init_Data(py::module&);
 void init_Database(py::module&);
 void init_DataProvider(py::module&);
@@ -71,9 +71,11 @@ void init_Recipes(py::module&);
 
 void init_Scheduler(py::module&);
 void init_TensorUtils(py::module&);
+void init_Filler(py::module&);
 
+void init_Aidge(py::module& m) {
+    init_Random(m);
 
-void init_Aidge(py::module& m){
     init_Data(m);
     init_Database(m);
     init_DataProvider(m);
@@ -129,9 +131,8 @@ void init_Aidge(py::module& m){
     init_Recipes(m);
     init_Scheduler(m);
     init_TensorUtils(m);
+    init_Filler(m);
 }
 
-PYBIND11_MODULE(aidge_core, m) {
-    init_Aidge(m);
-}
-}
+PYBIND11_MODULE(aidge_core, m) { init_Aidge(m); }
+}  // namespace Aidge
diff --git a/python_binding/utils/pybind_Random.cpp b/python_binding/utils/pybind_Random.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a1956d2d1e398cdb81673e7760a92bcde46e2de6
--- /dev/null
+++ b/python_binding/utils/pybind_Random.cpp
@@ -0,0 +1,24 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include "aidge/utils/Random.hpp"
+
+namespace py = pybind11;
+
+namespace Aidge {
+
+void init_Random(py::module &m) {
+    auto mRand = m.def_submodule("random", "Random module.");
+    py::class_<Random::Generator>(mRand, "Generator")
+    .def_static("set_seed", Random::Generator::setSeed);
+}
+}  // namespace Aidge
diff --git a/src/data/DataProvider.cpp b/src/data/DataProvider.cpp
index 7783ed86cf4ae1d8672cc6a35a97ca9a996457b6..5c3d1d7ef3b3dd8c779cf9cda737f1a2b2f6e01f 100644
--- a/src/data/DataProvider.cpp
+++ b/src/data/DataProvider.cpp
@@ -41,8 +41,8 @@ Aidge::DataProvider::DataProvider(const Aidge::Database& database, const std::si
     }
 
     // Compute the number of bacthes depending on mDropLast boolean
-    mNbBatch = (mDropLast) ? 
-                static_cast<std::size_t>(std::floor(mNbItems / mBatchSize)) : 
+    mNbBatch = (mDropLast) ?
+                static_cast<std::size_t>(std::floor(mNbItems / mBatchSize)) :
                 static_cast<std::size_t>(std::ceil(mNbItems / mBatchSize));
 }
 
@@ -98,7 +98,7 @@ std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch() con
 
 
 void Aidge::DataProvider::setBatches(){
-    
+
     mBatches.clear();
     mBatches.resize(mNbItems);
     std::iota(mBatches.begin(),
@@ -106,7 +106,7 @@ void Aidge::DataProvider::setBatches(){
               0U);
 
     if (mShuffle){
-        Random::randShuffle(mBatches);
+        Aidge::Random::randShuffle(mBatches);
     }
 
     if (mNbItems % mBatchSize !=0){ // The last batch is not full
diff --git a/src/filler/ConstantFiller.cpp b/src/filler/ConstantFiller.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e7db5e4d02b2031e7f5cf6a0203e3c7acbd3b93e
--- /dev/null
+++ b/src/filler/ConstantFiller.cpp
@@ -0,0 +1,40 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#include <memory>
+#include <random>  // normal_distribution, uniform_real_distribution
+
+#include "aidge/filler/Filler.hpp"
+#include "aidge/data/Tensor.hpp"
+
+
+template<typename T>
+void Aidge::constantFiller(std::shared_ptr<Aidge::Tensor> tensor, T constantValue){
+    AIDGE_ASSERT(tensor->getImpl(),
+                 "Tensor got no implementation, cannot fill it.");
+    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
+
+    std::shared_ptr<Aidge::Tensor> cpyTensor;
+    // Create cpy only if tensor not on CPU
+    Aidge::Tensor& tensorWithValues =
+        tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
+
+    // Setting values
+    for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
+        tensorWithValues.set<T>(idx, constantValue);
+    }
+
+    // Copy values back to the original tensors (actual copy only if needed)
+    tensor->copyCastFrom(tensorWithValues);
+}
+
+
+template void Aidge::constantFiller<float>(std::shared_ptr<Aidge::Tensor>, float);
+template void Aidge::constantFiller<double>(std::shared_ptr<Aidge::Tensor>, double);
diff --git a/src/filler/HeFiller.cpp b/src/filler/HeFiller.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..74d681f1a05c15045d27a0fe678aa676d16af077
--- /dev/null
+++ b/src/filler/HeFiller.cpp
@@ -0,0 +1,59 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#include <memory>
+#include <random>  // normal_distribution, uniform_real_distribution
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/filler/Filler.hpp"
+#include "aidge/utils/Random.hpp"
+
+template <typename T>
+void Aidge::heFiller(std::shared_ptr<Aidge::Tensor> tensor,
+                     Aidge::VarianceNorm varianceNorm, T meanNorm, T scaling) {
+    AIDGE_ASSERT(tensor->getImpl(),
+                 "Tensor got no implementation, cannot fill it.");
+    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
+
+    unsigned int fanIn, fanOut = 0;
+    Aidge::calculateFanInFanOut(tensor, fanIn, fanOut);
+
+    const T n((varianceNorm == Aidge::VarianceNorm::FanIn) ? fanIn
+              : (varianceNorm == Aidge::VarianceNorm::Average)
+                  ? (fanIn + fanOut) / 2.0
+                  : fanOut);
+
+    const T stdDev(std::sqrt(2.0 / n));
+
+    const T mean(varianceNorm == Aidge::VarianceNorm::FanIn ? meanNorm / fanIn
+                 : (varianceNorm == Aidge::VarianceNorm::Average)
+                     ? meanNorm / ((fanIn + fanOut) / 2.0)
+                     : meanNorm / fanOut);
+
+    std::normal_distribution<T> normalDist(mean, stdDev);
+
+    std::shared_ptr<Tensor> cpyTensor;
+    // Create cpy only if tensor not on CPU
+    Tensor& tensorWithValues =
+        tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
+
+    // Setting values
+    for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
+        tensorWithValues.set<T>(idx, scaling*normalDist(Aidge::Random::Generator::get()));
+    }
+
+    // Copy values back to the original tensors (actual copy only if needed)
+    tensor->copyCastFrom(tensorWithValues);
+}
+
+template void Aidge::heFiller<float>(std::shared_ptr<Aidge::Tensor>,
+                                     Aidge::VarianceNorm, float, float);
+template void Aidge::heFiller<double>(std::shared_ptr<Aidge::Tensor>,
+                                      Aidge::VarianceNorm, double, double);
diff --git a/src/filler/NormalFiller.cpp b/src/filler/NormalFiller.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f30b32431cf466b10c1b10df8e0e5ccec9f483b6
--- /dev/null
+++ b/src/filler/NormalFiller.cpp
@@ -0,0 +1,44 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#include <memory>
+#include <random>  // normal_distribution, uniform_real_distribution
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/filler/Filler.hpp"
+#include "aidge/utils/Random.hpp"
+
+template <typename T>
+void Aidge::normalFiller(std::shared_ptr<Aidge::Tensor> tensor, double mean,
+                         double stdDev) {
+    AIDGE_ASSERT(tensor->getImpl(),
+                 "Tensor got no implementation, cannot fill it.");
+    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
+
+    std::normal_distribution<T> normalDist(mean, stdDev);
+
+    std::shared_ptr<Tensor> cpyTensor;
+    // Create cpy only if tensor not on CPU
+    Tensor& tensorWithValues =
+        tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
+
+    // Setting values
+    for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
+        tensorWithValues.set<T>(idx, normalDist(Aidge::Random::Generator::get()));
+    }
+
+    // Copy values back to the original tensors (actual copy only if needed)
+    tensor->copyCastFrom(tensorWithValues);
+}
+
+template void Aidge::normalFiller<float>(std::shared_ptr<Aidge::Tensor>, double,
+                                         double);
+template void Aidge::normalFiller<double>(std::shared_ptr<Aidge::Tensor>,
+                                          double, double);
diff --git a/src/filler/UniformFiller.cpp b/src/filler/UniformFiller.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a942f59d717fd8d7b541ee28868a7fb9f2e7cd95
--- /dev/null
+++ b/src/filler/UniformFiller.cpp
@@ -0,0 +1,44 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#include <memory>
+#include <random>  // normal_distribution, uniform_real_distribution
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/filler/Filler.hpp"
+#include "aidge/utils/Random.hpp"
+
+template <typename T>
+void Aidge::uniformFiller(std::shared_ptr<Aidge::Tensor> tensor, T min, T max) {
+    AIDGE_ASSERT(tensor->getImpl(),
+                 "Tensor got no implementation, cannot fill it.");
+    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
+
+
+    std::uniform_real_distribution<T> uniformDist(min, max);
+
+    std::shared_ptr<Aidge::Tensor> cpyTensor;
+    // Create cpy only if tensor not on CPU
+    Aidge::Tensor& tensorWithValues =
+        tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
+
+    // Setting values
+    for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
+        tensorWithValues.set<T>(idx, uniformDist(Aidge::Random::Generator::get()));
+    }
+
+    // Copy values back to the original tensors (actual copy only if needed)
+    tensor->copyCastFrom(tensorWithValues);
+}
+
+template void Aidge::uniformFiller<float>(std::shared_ptr<Aidge::Tensor>, float,
+                                          float);
+template void Aidge::uniformFiller<double>(std::shared_ptr<Aidge::Tensor>,
+                                           double, double);
diff --git a/src/filler/XavierFiller.cpp b/src/filler/XavierFiller.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a1de15971ca8063e504e270fa6d2275d93270460
--- /dev/null
+++ b/src/filler/XavierFiller.cpp
@@ -0,0 +1,90 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#include <memory>
+#include <random>  // normal_distribution, uniform_real_distribution
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/filler/Filler.hpp"
+#include "aidge/utils/Random.hpp"
+
+template <typename T>
+void Aidge::xavierUniformFiller(std::shared_ptr<Aidge::Tensor> tensor,
+                                T scaling, Aidge::VarianceNorm varianceNorm) {
+    AIDGE_ASSERT(tensor->getImpl(),
+                 "Tensor got no implementation, cannot fill it.");
+    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
+
+    unsigned int fanIn, fanOut = 0;
+    Aidge::calculateFanInFanOut(tensor, fanIn, fanOut);
+
+    const T n((varianceNorm == Aidge::VarianceNorm::FanIn) ? fanIn
+              : (varianceNorm == Aidge::VarianceNorm::Average)
+                  ? (fanIn + fanOut) / 2.0
+                  : fanOut);
+    const T scale(std::sqrt(3.0 / n));
+
+    std::uniform_real_distribution<T> uniformDist(-scale, scale);
+
+    std::shared_ptr<Aidge::Tensor> cpyTensor;
+    // Create cpy only if tensor not on CPU
+    Aidge::Tensor& tensorWithValues =
+        tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
+    // Setting values
+    for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
+        tensorWithValues.set<T>(
+            idx, scaling * uniformDist(Aidge::Random::Generator::get()));
+    }
+
+    // Copy values back to the original tensors (actual copy only if needed)
+    tensor->copyCastFrom(tensorWithValues);
+}
+template <typename T>
+void Aidge::xavierNormalFiller(std::shared_ptr<Aidge::Tensor> tensor, T scaling,
+                               Aidge::VarianceNorm varianceNorm) {
+    AIDGE_ASSERT(tensor->getImpl(),
+                 "Tensor got no implementation, cannot fill it.");
+    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
+
+    unsigned int fanIn, fanOut = 0;
+    Aidge::calculateFanInFanOut(tensor, fanIn, fanOut);
+
+    const T n((varianceNorm == Aidge::VarianceNorm::FanIn) ? fanIn
+              : (varianceNorm == Aidge::VarianceNorm::Average)
+                  ? (fanIn + fanOut) / 2.0
+                  : fanOut);
+    const double stdDev(std::sqrt(1.0 / n));
+
+    std::normal_distribution<T> normalDist(0.0, stdDev);
+
+    std::shared_ptr<Aidge::Tensor> cpyTensor;
+    // Create cpy only if tensor not on CPU
+    Aidge::Tensor& tensorWithValues =
+        tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
+
+    // Setting values
+    for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
+        tensorWithValues.set<T>(
+            idx, scaling * normalDist(Aidge::Random::Generator::get()));
+    }
+
+    // Copy values back to the original tensors (actual copy only if needed)
+    tensor->copyCastFrom(tensorWithValues);
+}
+
+template void Aidge::xavierUniformFiller<float>(std::shared_ptr<Aidge::Tensor>,
+                                                float, Aidge::VarianceNorm);
+template void Aidge::xavierUniformFiller<double>(std::shared_ptr<Aidge::Tensor>,
+                                                 double, Aidge::VarianceNorm);
+
+template void Aidge::xavierNormalFiller<float>(std::shared_ptr<Aidge::Tensor>,
+                                               float, Aidge::VarianceNorm);
+template void Aidge::xavierNormalFiller<double>(std::shared_ptr<Aidge::Tensor>,
+                                                double, Aidge::VarianceNorm);
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 9ae6c6450917942bbf335b442d64b97650eb3de0..f80e51d49eff94c16107df16baf3db816beaf1ca 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -26,6 +26,7 @@
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/operator/MetaOperator.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Directories.hpp"
 
 ///////////////////////////////////////////////////////
 //        FUNCTIONAL DESCRIPTION
@@ -193,6 +194,29 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
     fmt::print(fp.get(), "\n");
 }
 
+void Aidge::GraphView::logOutputs(const std::string& dirName) const {
+  if (!Aidge::createDirectories(dirName)){
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Failed to create directory: {}.", dirName);
+  }
+  for (std::shared_ptr<Node> nodePtr : getNodes()) {
+
+    const std::string& nodePath = dirName + "/" + Aidge::filePath(nodePtr->name()) +"/";
+    if (!Aidge::createDirectories(nodePath)){
+      AIDGE_THROW_OR_ABORT(std::runtime_error, "Failed to create directory: {}.", nodePath);
+    }
+
+    for (IOIndex_t outIdx = 0; outIdx < nodePtr->nbOutputs(); ++outIdx) {
+      const std::string& inputPath = nodePath +"output_" + std::to_string(outIdx) + ".log";
+      auto fp = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen(inputPath.c_str(), "w"), &std::fclose);
+      if (!fp) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error,
+            "Could not create graph view log file: {}", inputPath);
+      }
+      fmt::print(fp.get(), "{}\n", nodePtr->getOperator()->getRawOutput(outIdx)->toString().c_str());
+    }
+  }
+}
+
 void Aidge::GraphView::setRootNode(NodePtr node) {
   AIDGE_ASSERT(mNodes.find(node) != mNodes.end(), "Root node is not in the GraphView!");
   mRootNode = node;
@@ -356,7 +380,7 @@ void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
                 }
             } else {
                 AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i)
-                    && !std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty(), 
+                    && !std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty(),
                   "Missing input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
             }
 
diff --git a/src/graphRegex/GraphRegex.cpp b/src/graphRegex/GraphRegex.cpp
index 00a031e3fa9b03ff1870446b9ae58e8d3eb65bf7..ca15ff8dec5ff5ebd4ea69141c6e286849162bb5 100644
--- a/src/graphRegex/GraphRegex.cpp
+++ b/src/graphRegex/GraphRegex.cpp
@@ -117,6 +117,8 @@ std::set<std::shared_ptr<MatchSolution>> GraphRegex::match(std::shared_ptr<Graph
             std::vector<std::shared_ptr<MatchSolution>> solution = fsm->test(combination);
             solutions.insert(solutions.end(), solution.begin(), solution.end());
         }
+
+
     }
     return _findLargestCompatibleSet(solutions);
 }
@@ -142,7 +144,10 @@ void GraphRegex::setNodeKey(const std::string key,std::function<bool(NodePtr)> f
         throw std::runtime_error(key + " is define");
     }
     mAllLambda[key] = f;
+    
     _majConditionalInterpreterLambda();
+    //we add the lambda as key by default 
+    setNodeKey(key, key + "($)==true");
 }
 
 void GraphRegex::_majConditionalInterpreterLambda(){
diff --git a/src/nodeTester/ConditionalLexer.cpp b/src/nodeTester/ConditionalLexer.cpp
index 9379bd8409f8f7ec4bae3e0122f88de79718e9dd..e70772fc1a5d6136fb56f5981d73bf6cb0622991 100644
--- a/src/nodeTester/ConditionalLexer.cpp
+++ b/src/nodeTester/ConditionalLexer.cpp
@@ -120,7 +120,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo
             }
 
 
-            if (std::regex_match(currentChars,std::regex("(true|false)"))){
+            if (std::regex_match(currentChars,std::regex("(true|false|True|False)"))){
                 return std::make_shared<ParsingToken<ConditionalTokenTypes>>(ConditionalTokenTypes::BOOL,currentChars);
 
             } else if (isLambda){
diff --git a/src/utils/Random.cpp b/src/utils/Random.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0c3dc61df54e16d129638c66b4c245d6141e819c
--- /dev/null
+++ b/src/utils/Random.cpp
@@ -0,0 +1,22 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/utils/Random.hpp"
+
+#include <random>  // normal_distribution, uniform_real_distribution
+
+std::mt19937 Aidge::Random::Generator::generator{std::random_device{}()};
+unsigned int Aidge::Random::Generator::seed = 0;
+
+void Aidge::Random::Generator::setSeed(unsigned int new_seed) {
+    seed = new_seed;
+    generator.seed(seed);
+}
diff --git a/unit_tests/graphRegex/Test_GraphRegex.cpp b/unit_tests/graphRegex/Test_GraphRegex.cpp
index bcd6d0f4cd9ba32ee4318188343b7e6360670d3b..a62b9a8602b494f26fb47061b899eaba41129a1f 100644
--- a/unit_tests/graphRegex/Test_GraphRegex.cpp
+++ b/unit_tests/graphRegex/Test_GraphRegex.cpp
@@ -18,6 +18,32 @@ using namespace Aidge;
 
 TEST_CASE("GraphRegexUser") {
 
+
+    SECTION("Match using custom lambda") {
+
+        std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
+        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
+        std::shared_ptr<Node> fc = GenericOperator("FC", 1, 0, 1, "c1");
+        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
+        std::shared_ptr<Node> fc2 = GenericOperator("FC", 1, 0, 1, "c3");
+
+        g1->add(conv);
+        g1->addChild(fc, "c");
+        g1->addChild(conv2, "c1");
+        g1->addChild(fc2, "c2");
+        
+        ///
+        std::shared_ptr<GraphRegex> sut = std::make_shared<GraphRegex>();
+        sut->setNodeKey("C",+[](NodePtr NodeOp){return NodeOp->type() == "FC";});
+        
+        sut->setNodeKey("A","C($)==True");
+        sut->addQuery("A");
+        auto match = sut->match(g1);
+        REQUIRE(match.size() == 2);
+
+    }
+
+
     SECTION("INIT") {
 
         const std::string query = "Conv->FC";
diff --git a/version.txt b/version.txt
index 17e51c385ea382d4f2ef124b7032c1604845622d..0ea3a944b399d25f7e1b8fe684d754eb8da9fe7f 100644
--- a/version.txt
+++ b/version.txt
@@ -1 +1 @@
-0.1.1
+0.2.0