diff --git a/aidge_core/unit_tests/test_parameters.py b/aidge_core/unit_tests/test_parameters.py
index e7b16963f4c26e5d014ce90fa289c043e2eb0be4..a8143c5e86ec82f6e595136cb2b4fa9175abffd3 100644
--- a/aidge_core/unit_tests/test_parameters.py
+++ b/aidge_core/unit_tests/test_parameters.py
@@ -34,10 +34,8 @@ class test_attributes(unittest.TestCase):
     def test_fc(self):
         in_channels = 4
         out_channels = 8
-        nb_bias = True
-        fc_op = aidge_core.FC(in_channels, out_channels, nb_bias).get_operator()
+        fc_op = aidge_core.FC(in_channels, out_channels).get_operator()
         self.assertEqual(fc_op.out_channels(), out_channels)
-        self.assertEqual(fc_op.get_attr("NoBias"), nb_bias)
 
     def test_producer_1D(self):
         dims = [5]
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index c30282f3438889e233f3d9ed22ab7c7e795b2951..9310fdd24b33e419ff6b567a17628b7ec4c99797 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -30,7 +30,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvAttr { StrideDims, DilationDims, KernelDims, NoBias };
+enum class ConvAttr { StrideDims, DilationDims, KernelDims };
 
 template <DimIdx_t DIM>
 class Conv_Op : public OperatorTensor,
@@ -38,8 +38,7 @@ class Conv_Op : public OperatorTensor,
                 public StaticAttributes<ConvAttr,
                                         std::array<DimSize_t, DIM>,
                                         std::array<DimSize_t, DIM>,
-                                        std::array<DimSize_t, DIM>,
-                                        bool> {
+                                        std::array<DimSize_t, DIM>> {
 
 public:
     static const std::string Type;
@@ -49,22 +48,19 @@ public:
     using Attributes_ = StaticAttributes<ConvAttr,
                                         std::array<DimSize_t, DIM>,
                                         std::array<DimSize_t, DIM>,
-                                        std::array<DimSize_t, DIM>,
-                                        bool>;
+                                        std::array<DimSize_t, DIM>>;
     template <ConvAttr e>
     using attr = typename Attributes_::template attr<e>;
 
     constexpr Conv_Op(const std::array<DimSize_t, DIM> &kernelDims,
                       const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-                      const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
-                      bool noBias = false)
+                      const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
         : OperatorTensor(Type, 1, 2, 1),
           Attributes_(attr<ConvAttr::StrideDims>(strideDims),
                       attr<ConvAttr::DilationDims>(dilationDims),
                     //   attr<ConvAttr::InChannels>(inChannels),
                     //   attr<ConvAttr::OutChannels>(outChannels),
-                      attr<ConvAttr::KernelDims>(kernelDims),
-                      attr<ConvAttr::NoBias>(noBias)) {}
+                      attr<ConvAttr::KernelDims>(kernelDims)) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -148,10 +144,11 @@ inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
                                   bool noBias = false) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims, noBias), name);
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
     addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
-    addProducer(conv, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
-
+    if (!noBias) {
+        addProducer(conv, 2, {outChannels}, "b"); // already sets bias dims
+    }
     return conv;
 }
 
@@ -177,8 +174,7 @@ template <>
 const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
     "StrideDims",
     "DilationDims",
-    "KernelDims",
-    "NoBias"
+    "KernelDims"
 };
 }
 
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 7091421720aaf4291198823a6d7dcd732a8d9f99..594eab3f0871aebf0eeab5ef545eb1a734555969 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -29,7 +29,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims, NoBias };
+enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims };
 
 template <DimIdx_t DIM>
 class ConvDepthWise_Op : public OperatorTensor,
@@ -37,8 +37,7 @@ class ConvDepthWise_Op : public OperatorTensor,
                 public StaticAttributes<ConvDepthWiseAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
-                                       std::array<DimSize_t, DIM>,
-                                       bool> {
+                                       std::array<DimSize_t, DIM>> {
 public:
     static const std::string Type;
 
@@ -47,20 +46,17 @@ public:
     using Attributes_ = StaticAttributes<ConvDepthWiseAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
-                                             std::array<DimSize_t, DIM>,
-                                             bool>;
+                                             std::array<DimSize_t, DIM>>;
     template <ConvDepthWiseAttr e>
     using attr = typename Attributes_::template attr<e>;
 
     constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                                const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                               const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                               bool no_bias=false)
+                               const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
         : OperatorTensor(Type, 1, 2, 1),
           Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
                       attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
-                      attr<ConvDepthWiseAttr::KernelDims>(kernel_dims),
-                      attr<ConvDepthWiseAttr::NoBias>(no_bias)) {}
+                      attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -110,9 +106,11 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
                                            bool noBias=false) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
-    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims, noBias), name);
+    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
     addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
-    addProducer(convDW, 2, {(noBias ? 0 : nbChannels)}, "b");
+    if (!noBias) {
+        addProducer(convDW, 2, {nbChannels}, "b");
+    }
     return convDW;
 }
 
@@ -135,7 +133,7 @@ extern template class Aidge::ConvDepthWise_Op<2>;
 namespace {
 template <>
 const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims",
-                                                          "KernelDims", "NoBias"};
+                                                          "KernelDims"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 9f10970c4fd5b21a1cb92b334167d353f066e05b..98c415f70da327291f0653fae6b179f7e1db0f6c 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -24,24 +24,15 @@
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-enum class FCAttr { NoBias };
-
 class FC_Op : public OperatorTensor,
               public Registrable<FC_Op,
                                  std::string,
-                                 std::shared_ptr<OperatorImpl>(const FC_Op &)>,
-              public StaticAttributes<FCAttr, bool> {
+                                 std::shared_ptr<OperatorImpl>(const FC_Op &)> {
 public:
     static const std::string Type;
 
-    FC_Op() = delete;
-
-    using Attributes_ = StaticAttributes<FCAttr, bool>;
-    template <FCAttr e> using attr = typename Attributes_::template attr<e>;
-
-    FC_Op(bool noBias)
-    : OperatorTensor(Type, 1, 2, 1),
-      Attributes_(attr<FCAttr::NoBias>(noBias))
+    FC_Op()
+    : OperatorTensor(Type, 1, 2, 1)
     {}
 
     /**
@@ -49,8 +40,7 @@ public:
      * @param op Operator to copy.
      */
     FC_Op(const FC_Op& op)
-        : OperatorTensor(op),
-          Attributes_(op)
+        : OperatorTensor(op)
     {
         if (op.mImpl){
             SET_IMPL_MACRO(FC_Op, *this, op.backend());
@@ -90,16 +80,13 @@ public:
 
 inline std::shared_ptr<Node> FC(const DimSize_t inChannels, const DimSize_t outChannels, bool noBias = false, const std::string& name = "") {
     // FIXME: properly handle default w&b initialization in every cases
-    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), name);
+    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(), name);
     addProducer(fc, 1, {outChannels, inChannels}, "w");
-    addProducer(fc, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
+    if (!noBias) {
+        addProducer(fc, 2, {outChannels}, "b"); // already sets bias dims
+    }
     return fc;
 }
 } // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::FCAttr>::data[] = {"NoBias"};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_FC_H_ */
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index eb57761cc5927cb4eedfb6cb12b1d49a0ee50b9c..51681629cbae215fd529b6e7bb568d07264dd63e 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -44,11 +44,13 @@ inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
 {
     // Construct micro-graph
     auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : "");
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
 
     auto metaOp = MetaOperator("PaddedConv", Sequential({pad, conv}), name);
     addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
-    addProducer(metaOp, 2, {out_channels}, "b");
+    if (!no_bias) {
+        addProducer(metaOp, 2, {out_channels}, "b");
+    }
     return metaOp;
 }
 
@@ -57,11 +59,10 @@ inline std::shared_ptr<MetaOperator_Op> PaddedConv_Op(
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                                  bool no_bias = false)
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
 {
     auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), "");
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
 
     return std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}));
 }
@@ -94,11 +95,13 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
 {
     // Construct micro-graph
     auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : "");
+    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
 
     auto metaOp = MetaOperator("PaddedConvDepthWise", Sequential({pad, conv}), name);
     addProducer(metaOp, 1, append(nb_channels, append(DimSize_t(1), kernel_dims)), "w");
-    addProducer(metaOp, 2, {nb_channels}, "b");
+    if (!no_bias) {
+        addProducer(metaOp, 2, {nb_channels}, "b");
+    }
     return metaOp;
 }
 
@@ -107,11 +110,10 @@ inline std::shared_ptr<MetaOperator_Op> PaddedConvDepthWise_Op(
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                                  bool no_bias = false)
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
 {
     auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), "");
+    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
 
     return std::make_shared<MetaOperator_Op>("PaddedConvDepthWise", Sequential({pad, conv}));
 }
@@ -203,8 +205,7 @@ std::shared_ptr<Node> LSTM(DimSize_t in_channels,
                            bool noBias = false,
                            const std::string& name = "");
 
-std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length,
-                                         bool noBias = false);
+std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length);
 
 }  // namespace Aidge
 
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index c1a4f1319e4e715add01417f86d17bddadb992f1..d874e19b73c12b02d600d08c223cd2cb1e8baa11 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -32,17 +32,15 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
     py::multiple_inheritance())
         .def(py::init([](const std::vector<DimSize_t>& kernel_dims,
                          const std::vector<DimSize_t> &stride_dims,
-                         const std::vector<DimSize_t> &dilation_dims,
-                         bool no_bias) {
+                         const std::vector<DimSize_t> &dilation_dims) {
             AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
             AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
             AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
-            return new Conv_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
+            return new Conv_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
         }), py::arg("kernel_dims"),
             py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
-            py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
-            py::arg("no_bias") = false)
+            py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1))
         .def_static("get_inputs_name", &Conv_Op<DIM>::getInputsName)
         .def_static("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
         .def_static("attributes_name", &Conv_Op<DIM>::staticGetAttrsName)
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index ce286094d6606d8b7161acf9e3fb3c6cbcbb88c9..4e63f38304abc49606da4e0f2db661e15b0bd5f0 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -33,12 +33,10 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &,
-                bool>(),
+                const std::array<DimSize_t, DIM> &>(),
         py::arg("kernel_dims"),
         py::arg("stride_dims"),
-        py::arg("dilation_dims"),
-        py::arg("no_bias"))
+        py::arg("dilation_dims"))
   .def_static("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
   .def_static("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
   .def_static("attributes_name", &ConvDepthWise_Op<DIM>::staticGetAttrsName)
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index 6cff90d0ad3aacf4cf8a465408eb490e3f21abda..4184af039504466a49e1451f708ac2cd329f328e 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -21,11 +21,10 @@ namespace py = pybind11;
 namespace Aidge {
 
 void declare_FC(py::module &m) {
-  py::class_<FC_Op, std::shared_ptr<FC_Op>, Attributes, OperatorTensor>(m, "FCOp", py::multiple_inheritance())
-    .def(py::init<bool>(), py::arg("no_bias"))
+  py::class_<FC_Op, std::shared_ptr<FC_Op>, OperatorTensor>(m, "FCOp", py::multiple_inheritance())
+    .def(py::init<>())
     .def_static("get_inputs_name", &FC_Op::getInputsName)
     .def_static("get_outputs_name", &FC_Op::getOutputsName)
-    .def_static("attributes_name", &FC_Op::staticGetAttrsName)
     .def("out_channels", &FC_Op::outChannels);
 
   declare_registrable<FC_Op>(m, "FCOp");
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index ee3f85b6578054512df7b0087d1a972176cd50a3..c8d88db79e6df261062d737df06b2edf3d2555f9 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -51,20 +51,18 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
                                                          const std::vector<DimSize_t>& kernel_dims,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims,
-                                                         const std::vector<DimSize_t> &dilation_dims,
-                                                         bool no_bias)
+                                                         const std::vector<DimSize_t> &dilation_dims)
     {
         AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
         AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
         AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
-        return PaddedConv_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
+        return PaddedConv_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
     }, py::arg("kernel_dims"),
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
-       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("no_bias")= false);
+       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
 }
 
 template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
@@ -93,20 +91,18 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
                                                          const std::vector<DimSize_t>& kernel_dims,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims,
-                                                         const std::vector<DimSize_t> &dilation_dims,
-                                                         bool no_bias)
+                                                         const std::vector<DimSize_t> &dilation_dims)
     {
         AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
         AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
         AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
-        return PaddedConvDepthWise_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
+        return PaddedConvDepthWise_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
     }, py::arg("kernel_dims"),
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
-       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("no_bias") = false);
+       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
 
 }
 
@@ -180,8 +176,7 @@ void declare_LSTMOp(py::module &m) {
        py::arg("nobias") = false,
        py::arg("name") = "");
   m.def("LSTMOp", &LSTM_Op,
-       py::arg("seq_length"),
-       py::arg("nobias") = false);
+       py::arg("seq_length"));
 }
 
 void init_MetaOperatorDefs(py::module &m) {
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index 66e1d1f5b25c2b12f73a851d87d9f91aa4940322..24e39e48116963991ec9b18825d12a0ad62e2eb0 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -57,7 +57,7 @@ bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
                     (getInput(0)->template dims<DIM+2>()[1] == inChannels()),
                     "Wrong input size for Conv operator.");
         // check optional bias
-        if(!this->template getAttr<ConvAttr::NoBias>())
+        if(getInput(2))
             AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
                     (getInput(2)->template dims<1>()[0] == outChannels()),
                     "Wrong bias size for Conv operator.");
@@ -135,7 +135,7 @@ Aidge::Conv_Op<DIM>::computeReceptiveField(
         res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
 
         // Bias
-        if (! this->template getAttr<ConvAttr::NoBias>()){
+        if (getInput(2)){
             const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
             const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
             res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
diff --git a/src/operator/ConvDepthWise.cpp b/src/operator/ConvDepthWise.cpp
index 77441be414847c08452c71fc2e35c4e3e5bd3c04..743ce80bc13f5d2748e7efacb5bf463d72239826 100644
--- a/src/operator/ConvDepthWise.cpp
+++ b/src/operator/ConvDepthWise.cpp
@@ -59,7 +59,7 @@ bool Aidge::ConvDepthWise_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
                     (getInput(0)->template dims<DIM+2>()[1] == nbChannels()),
                     "Wrong input size for Conv operator.");
         // check optional bias
-        if(!this->template getAttr<ConvDepthWiseAttr::NoBias>())
+        if(getInput(2))
             AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
                     (getInput(2)->template dims<1>()[0] == nbChannels()),
                     "Wrong bias size for Conv operator.");
@@ -135,7 +135,7 @@ Aidge::ConvDepthWise_Op<DIM>::computeReceptiveField(
         res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
         res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
         // Bias
-        if (! this->template getAttr<ConvDepthWiseAttr::NoBias>()){
+        if (getInput(2)){
             const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
             const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
             res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index 1d53893b1e37933ef41540202b76fdcdfca08130..1073411a5ffb34fcf43aca03f4c444bc27e5925c 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -64,7 +64,7 @@ bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) {
                     nbInputFeatures, inChannels);
         }
         // check optional bias
-        if(!this->template getAttr<FCAttr::NoBias>())
+        if(getInput(2))
             AIDGE_ASSERT((getInput(2)->nbDims() == 1) &&
                     (getInput(2)->template dims<1>()[0] == outChannels),
                     "Wrong bias size for FC operator.");
diff --git a/src/operator/MetaOperatorDefs/LSTM.cpp b/src/operator/MetaOperatorDefs/LSTM.cpp
index cd993f9e5cd127a005101284b78c416150b3c99a..910e7c67aad0068679ca2d240b23312add3e42d7 100644
--- a/src/operator/MetaOperatorDefs/LSTM.cpp
+++ b/src/operator/MetaOperatorDefs/LSTM.cpp
@@ -38,9 +38,9 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     auto add = Add(2, (!name.empty()) ? name + "_add" : "");
 
     // Forget gate
-    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_forgetGateX" : "");
+    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_forgetGateX" : "");
     input->addChild(forgetGateX, 0, 0);
-    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_forgetGateH" : "");
+    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_forgetGateH" : "");
     hiddenState->addChild(forgetGateH, 1, 0);
     auto forgetGate = Add(2, (!name.empty()) ? name + "_forgetGate" : "");
     forgetGateX->addChild(forgetGate, 0, 0);
@@ -53,9 +53,9 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     cellState->addChild(forgetGateMul, 1, 1);
 
     // Input gate
-    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_inputGateX" : "");
+    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_inputGateX" : "");
     input->addChild(inputGateX, 0, 0);
-    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_inputGateH" : "");
+    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_inputGateH" : "");
     hiddenState->addChild(inputGateH, 1, 0);
     auto inputGate = Add(2, (!name.empty()) ? name + "_inputGate" : "");
     inputGateX->addChild(inputGate, 0, 0);
@@ -67,9 +67,9 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     inputGateMul->addChild(add, 0, 1);
 
     // Candidate for cell update
-    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_cellCandidateX" : "");
+    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_cellCandidateX" : "");
     input->addChild(cellCandidateX, 0, 0);
-    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_cellCandidateH" : "");
+    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_cellCandidateH" : "");
     hiddenState->addChild(cellCandidateH, 1, 0);
     auto cellCandidate = Add(2, (!name.empty()) ? name + "_cellCandidate" : "");
     cellCandidateX->addChild(cellCandidate, 0, 0);
@@ -79,9 +79,9 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     cellCandidateAct->addChild(inputGateMul, 0, 1);
 
     // Output gate
-    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_outputGateX" : "");
+    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_outputGateX" : "");
     input->addChild(outputGateX, 0, 0);
-    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_outputGateH" : "");
+    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_outputGateH" : "");
     hiddenState->addChild(outputGateH, 1, 0);
     auto outputGate = Add(2, (!name.empty()) ? name + "_outputGate" : "");
     outputGateX->addChild(outputGate, 0, 0);
@@ -124,19 +124,20 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     addProducer(metaOp, 6, {hiddenChannel, hiddenChannel}, "ro");
     addProducer(metaOp, 7, {hiddenChannel, hiddenChannel}, "rf");
     addProducer(metaOp, 8, {hiddenChannel, hiddenChannel}, "rc");
-    addProducer(metaOp, 9, {(noBias ? 0 : hiddenChannel)}, "wbi");
-    addProducer(metaOp, 10, {(noBias ? 0 : hiddenChannel)}, "wbo");
-    addProducer(metaOp, 11, {(noBias ? 0 : hiddenChannel)}, "wbf");
-    addProducer(metaOp, 12, {(noBias ? 0 : hiddenChannel)}, "wbc");
-    addProducer(metaOp, 13, {(noBias ? 0 : hiddenChannel)}, "rbi");
-    addProducer(metaOp, 14, {(noBias ? 0 : hiddenChannel)}, "rbo");
-    addProducer(metaOp, 15, {(noBias ? 0 : hiddenChannel)}, "rbf");
-    addProducer(metaOp, 16, {(noBias ? 0 : hiddenChannel)}, "rbc");
+    if (!noBias) {
+        addProducer(metaOp, 9, {hiddenChannel}, "wbi");
+        addProducer(metaOp, 10, {hiddenChannel}, "wbo");
+        addProducer(metaOp, 11, {hiddenChannel}, "wbf");
+        addProducer(metaOp, 12, {hiddenChannel}, "wbc");
+        addProducer(metaOp, 13, {hiddenChannel}, "rbi");
+        addProducer(metaOp, 14, {hiddenChannel}, "rbo");
+        addProducer(metaOp, 15, {hiddenChannel}, "rbf");
+        addProducer(metaOp, 16, {hiddenChannel}, "rbc");
+    }
     return metaOp;
 }
 
-std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
-                                         bool noBias)
+std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength)
 {
     // Construct micro-graph
     auto input = Identity("");
@@ -145,9 +146,9 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
     auto add = Add(2, "");
 
     // Forget gate
-    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     input->addChild(forgetGateX, 0, 0);
-    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(forgetGateH, 1, 0);
     auto forgetGate = Add(2, "");
     forgetGateX->addChild(forgetGate, 0, 0);
@@ -160,9 +161,9 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
     cellState->addChild(forgetGateMul, 1, 1);
 
     // Input gate
-    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     input->addChild(inputGateX, 0, 0);
-    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(inputGateH, 1, 0);
     auto inputGate = Add(2, "");
     inputGateX->addChild(inputGate, 0, 0);
@@ -174,9 +175,9 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
     inputGateMul->addChild(add, 0, 1);
 
     // Candidate for cell update
-    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     input->addChild(cellCandidateX, 0, 0);
-    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(cellCandidateH, 1, 0);
     auto cellCandidate = Add(2, "");
     cellCandidateX->addChild(cellCandidate, 0, 0);
@@ -186,9 +187,9 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
     cellCandidateAct->addChild(inputGateMul, 0, 1);
 
     // Output gate
-    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     input->addChild(outputGateX, 0, 0);
-    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(outputGateH, 1, 0);
     auto outputGate = Add(2,"");
     outputGateX->addChild(outputGate, 0, 0);
diff --git a/src/recipes/FuseMulAdd.cpp b/src/recipes/FuseMulAdd.cpp
index bb4b0e3db1974ccf106699b25fd71fc9cc09654c..cae8619c478926b4e5ca1541c4b8f74763992fdc 100644
--- a/src/recipes/FuseMulAdd.cpp
+++ b/src/recipes/FuseMulAdd.cpp
@@ -90,7 +90,7 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
         fcName += "_" + addNode->name();
     }
 
-    std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(bias ? false : true), fcName);
+    std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(), fcName);
 
     // Step 2 : Branch existing producers & create the others
     // link weights & bias