From a50fa2666b7821d947e8a688ffe588dc7e34a12b Mon Sep 17 00:00:00 2001
From: hrouis <houssemeddine.rouis92@gmail.com>
Date: Mon, 10 Feb 2025 15:53:07 +0100
Subject: [PATCH 1/8] add alpha and beta attr to FC

---
 include/aidge/operator/FC.hpp         | 49 ++++++++++++++++++++++++---
 python_binding/operator/pybind_FC.cpp |  6 +++-
 src/operator/FC.cpp                   |  6 ++--
 3 files changed, 53 insertions(+), 8 deletions(-)

diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 393e640d6..463304673 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -25,6 +25,11 @@
 
 namespace Aidge {
 
+enum class FCAttr {
+    Alpha,  // The scalar multiplier for the product of input tensors A * B.
+    Beta,   // The scalar multiplier for the bias.
+};
+
 /**
  * @brief Description of a Fully Connected (FC) operation on an input Tensor.
  *
@@ -54,6 +59,15 @@ class FC_Op : public OperatorTensor,
               public Registrable<FC_Op,
                                  std::string,
                                  std::function<std::shared_ptr<OperatorImpl>(const FC_Op &)>> {
+private:
+    using Attributes_ = StaticAttributes<FCAttr,
+                                        float,
+                                        float>;
+
+    template <FCAttr e>
+    using attr = typename Attributes_::template attr<e>;
+
+    const std::shared_ptr<Attributes_> mAttributes;
 public:
     /**
      * @brief Static type identifier for the FC operator.
@@ -65,8 +79,11 @@ public:
      *
      * Initializes the operator with a type identifier and input categories.
      */
-    FC_Op()
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1)
+    FC_Op(float alpha = 1.0f, float beta = 1.0f)
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
+    mAttributes(std::make_shared<Attributes_>(
+                attr<FCAttr::Alpha>(alpha),
+                attr<FCAttr::Beta>(beta)))
     {}
 
     /**
@@ -160,6 +177,24 @@ public:
         return getInput(1)->template dims<2>()[0];
     }
 
+    /**
+     * @brief Get the attributes of the operator.
+     * @return A shared pointer to the operator's attributes.
+     */
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Get the alpha coefficient.
+     * @return The alpha coefficient.
+     */
+    inline float& alpha() const { return mAttributes->template getAttr<FCAttr::Alpha>(); }
+
+    /**
+     * @brief Get the beta coefficient.
+     * @return The beta coefficient.
+     */
+    inline float& beta() const { return mAttributes->template getAttr<FCAttr::Beta>(); }
+
     /**
      * @brief Retrieves the input tensor names for the FC operator.
      * @return A vector of input tensor names: `{"data_input", "weight", "bias"}`.
@@ -180,16 +215,20 @@ public:
 /**
  * @brief Creates a Fully Connected operation node.
  *
- * Constructs an FC operator node with the specified input and output channels.
- *
  * @param[in] inChannels Number of input channels.
  * @param[in] outChannels Number of output channels.
+ * @param[in] alpha Scalar multiplier for the product of input tensors A * B.
+ * @param[in] beta Scalar multiplier for the bias.
  * @param[in] noBias Flag indicating whether to use a bias term (default is `false`).
  * @param[in] name Name of the operator (optional).
  * @return A shared pointer to the Node containing the FC operator.
  */
-std::shared_ptr<Node> FC(const DimSize_t inChannels, const DimSize_t outChannels, bool noBias = false, const std::string& name = "");
+std::shared_ptr<Node> FC(const DimSize_t inChannels, const DimSize_t outChannels, bool noBias = false, const std::string& name = "", float alpha = 1.0f, float beta = 1.0f);
 
 } // namespace Aidge
 
+namespace {
+template <>
+const char *const EnumStrings<Aidge::FCAttr>::data[] = {"alpha", "beta"};
+}
 #endif /* AIDGE_CORE_OPERATOR_FC_H_ */
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index c29b6e1d3..3dc2c1a6f 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -40,7 +40,7 @@ void declare_FC(py::module &m) {
 
   declare_registrable<FC_Op>(m, "FCOp");
 
-  m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("no_bias") = false, py::arg("name") = "",
+  m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("no_bias") = false, py::arg("name") = "", py::arg("alpha")=1.0f, py::arg("beta")=1.0f,
     R"mydelimiter(
     Initialize a node containing a Fully Connected (FC) operator.
 
@@ -52,6 +52,10 @@ void declare_FC(py::module &m) {
     :type no_bias : :py:class:`bool`
     :param name : Name of the node.
     :type name : :py:class:`str`
+    :param alpha : The scalar multiplier for the term A*B.
+    :type alpha : :py:class:`int`
+    :param beta : The scalar multiplier for the bias.
+    :type beta : :py:class:`int`
     )mydelimiter");
 }
 
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index dd3ed7aba..dd1d0577e 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -98,9 +98,11 @@ std::set<std::string> Aidge::FC_Op::getAvailableBackends() const {
 std::shared_ptr<Aidge::Node> Aidge::FC(const Aidge::DimSize_t inChannels,
                                        const Aidge::DimSize_t outChannels,
                                        bool noBias,
-                                       const std::string& name) {
+                                       const std::string& name,
+                                       float alpha,
+                                       float beta) {
     // FIXME: properly handle default w&b initialization in every cases
-    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(), name);
+    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(alpha, beta), name);
     addProducer(fc, 1, {outChannels, inChannels}, "w");
     if (!noBias) {
         addProducer(fc, 2, {outChannels}, "b"); // already sets bias dims
-- 
GitLab


From eaa8ff98debc78b44e6bbef3bf092c046a2723d0 Mon Sep 17 00:00:00 2001
From: hrouis <houssemeddine.rouis92@gmail.com>
Date: Mon, 10 Feb 2025 15:53:32 +0100
Subject: [PATCH 2/8] support 2D bias in FC

---
 src/operator/FC.cpp | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index dd1d0577e..26752a359 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -62,8 +62,13 @@ bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) {
         }
         // check optional bias
         if(getInput(2))
-            AIDGE_ASSERT((getInput(2)->nbDims() == 1) &&
-                    (getInput(2)->template dims<1>()[0] == outChannels),
+            AIDGE_ASSERT((((getInput(2)->nbDims() == 1) &&
+                    (getInput(2)->template dims<1>()[0] == outChannels)) ||
+                    ((getInput(2)->nbDims() == 2)&&
+                     (getInput(0)->nbDims() == 2)&&
+                     (getInput(2)->template dims<2>()[0] == getInput(0)->template dims<2>()[0])&&
+                     (getInput(2)->template dims<2>()[1] == outChannels)
+                     )),
                     "Wrong bias size for FC operator.");
         // <batch, OutChannels>
         mOutputs[0]->resize({getInput(0)->dims()[0], outChannels});
-- 
GitLab


From e95ac474714691e1d3133ddf9c5891897ff77f91 Mon Sep 17 00:00:00 2001
From: hrouis <houssemeddine.rouis92@gmail.com>
Date: Tue, 11 Feb 2025 14:11:48 +0100
Subject: [PATCH 3/8] add transA and transB attr for FC

---
 include/aidge/operator/FC.hpp         | 37 +++++++++++++++++++++++----
 python_binding/operator/pybind_FC.cpp |  6 ++++-
 src/operator/FC.cpp                   | 24 ++++++++++-------
 3 files changed, 52 insertions(+), 15 deletions(-)

diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 463304673..078ff0fde 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -28,6 +28,8 @@ namespace Aidge {
 enum class FCAttr {
     Alpha,  // The scalar multiplier for the product of input tensors A * B.
     Beta,   // The scalar multiplier for the bias.
+    TransA, // Boolean to store whether we need to tranpose input#0
+    TransB  // Boolean to store whether we need to tranpose input#1
 };
 
 /**
@@ -62,7 +64,9 @@ class FC_Op : public OperatorTensor,
 private:
     using Attributes_ = StaticAttributes<FCAttr,
                                         float,
-                                        float>;
+                                        float,
+                                        bool,
+                                        bool>;
 
     template <FCAttr e>
     using attr = typename Attributes_::template attr<e>;
@@ -79,11 +83,13 @@ public:
      *
      * Initializes the operator with a type identifier and input categories.
      */
-    FC_Op(float alpha = 1.0f, float beta = 1.0f)
+    FC_Op(float alpha = 1.0f, float beta = 1.0f, bool transA = false, bool transB = false)
     : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
     mAttributes(std::make_shared<Attributes_>(
                 attr<FCAttr::Alpha>(alpha),
-                attr<FCAttr::Beta>(beta)))
+                attr<FCAttr::Beta>(beta),
+                attr<FCAttr::TransA>(transA),
+                attr<FCAttr::TransB>(transB)))
     {}
 
     /**
@@ -195,6 +201,18 @@ public:
      */
     inline float& beta() const { return mAttributes->template getAttr<FCAttr::Beta>(); }
 
+    /**
+     * @brief Get the transA boolean.
+     * @return Whether input#0 needs to be transposed.
+     */
+    inline bool& transA() const { return mAttributes->template getAttr<FCAttr::TransA>(); }
+
+    /**
+     * @brief Get the transB boolean.
+     * @return Whether input#1 needs to be transposed.
+     */
+    inline bool& transB() const { return mAttributes->template getAttr<FCAttr::TransB>(); }
+
     /**
      * @brief Retrieves the input tensor names for the FC operator.
      * @return A vector of input tensor names: `{"data_input", "weight", "bias"}`.
@@ -221,14 +239,23 @@ public:
  * @param[in] beta Scalar multiplier for the bias.
  * @param[in] noBias Flag indicating whether to use a bias term (default is `false`).
  * @param[in] name Name of the operator (optional).
+ * @param[in] transA Flag indicating whether input#0 needs to be transposed (default is `false`).
+ * @param[in] transB Flag indicating whether input#1 needs to be transposed (default is `false`).
  * @return A shared pointer to the Node containing the FC operator.
  */
-std::shared_ptr<Node> FC(const DimSize_t inChannels, const DimSize_t outChannels, bool noBias = false, const std::string& name = "", float alpha = 1.0f, float beta = 1.0f);
+std::shared_ptr<Node> FC(const DimSize_t inChannels,
+                         const DimSize_t outChannels,
+                         bool noBias = false,
+                         const std::string& name = "",
+                         float alpha = 1.0f,
+                         float beta = 1.0f,
+                         bool transA = false,
+                         bool transB = false);
 
 } // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::FCAttr>::data[] = {"alpha", "beta"};
+const char *const EnumStrings<Aidge::FCAttr>::data[] = {"alpha", "beta", "transA", "transB"};
 }
 #endif /* AIDGE_CORE_OPERATOR_FC_H_ */
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index 3dc2c1a6f..447d0b29a 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -40,7 +40,7 @@ void declare_FC(py::module &m) {
 
   declare_registrable<FC_Op>(m, "FCOp");
 
-  m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("no_bias") = false, py::arg("name") = "", py::arg("alpha")=1.0f, py::arg("beta")=1.0f,
+  m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("no_bias") = false, py::arg("name") = "", py::arg("alpha")=1.0f, py::arg("beta")=1.0f, py::arg("transA") = false, py::arg("transB") = false,
     R"mydelimiter(
     Initialize a node containing a Fully Connected (FC) operator.
 
@@ -56,6 +56,10 @@ void declare_FC(py::module &m) {
     :type alpha : :py:class:`int`
     :param beta : The scalar multiplier for the bias.
     :type beta : :py:class:`int`
+    :param transA : Indicates whether first input needs to be transposed.
+    :type transA : :py:class:`bool`
+    :param transB : Indicates whether second input needs to be transposed.
+    :type transB : :py:class:`bool`
     )mydelimiter");
 }
 
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index 26752a359..d73c8fd8c 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -45,33 +45,37 @@ bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) {
         // first check weight since it defines inChannels and outChannels
         AIDGE_ASSERT((getInput(1)->nbDims() == 2),
                     "Wrong weight Tensor dimension: {} for FC operator (should have 2 dimensions).", getInput(1)->nbDims());
-        const DimSize_t outChannels = getInput(1)->template dims<2>()[0];
-        const DimSize_t inChannels = getInput(1)->template dims<2>()[1];
+        const DimSize_t outChannels = mAttributes->template getAttr<FCAttr::TransB>() ? getInput(1)->template dims<2>()[1]:getInput(1)->template dims<2>()[0];
+        const DimSize_t inChannels = mAttributes->template getAttr<FCAttr::TransB>() ? getInput(1)->template dims<2>()[0]:getInput(1)->template dims<2>()[1];
         // check data
         const std::vector<DimSize_t>& inputDims = getInput(0)->dims();
+        const DimIdx_t inChannelsIdx = mAttributes->template getAttr<FCAttr::TransA>() ? 1 : 0;
         if (getInput(0)->nbDims() == 1) {
-            AIDGE_ASSERT(inputDims[0] == inChannels,
+            AIDGE_ASSERT(inputDims[inChannelsIdx] == inChannels,
                 "Wrong number of input features for input data ({}), expected {}",
-                inputDims[0], inChannels);
+                inputDims[inChannelsIdx], inChannels);
         } else {
             AIDGE_ASSERT(getInput(0)->nbDims() > 1, "FC input data must have at least one dimension");
-            const DimSize_t nbInputFeatures = std::accumulate(inputDims.cbegin() + 1, inputDims.cend(), DimSize_t(1), std::multiplies<DimSize_t>());
+            const DimSize_t nbInputFeatures = mAttributes->template getAttr<FCAttr::TransA>() ?
+                                                            inputDims[0]:
+                                                            std::accumulate(inputDims.cbegin() + 1, inputDims.cend(), DimSize_t(1), std::multiplies<DimSize_t>());
             AIDGE_ASSERT(nbInputFeatures == inChannels,
                     "Wrong number of input features for input data ({}), expected {}",
                     nbInputFeatures, inChannels);
         }
         // check optional bias
+        const DimSize_t batchSize = static_cast<DimSize_t>(getInput(0)->size() / inChannels);
         if(getInput(2))
             AIDGE_ASSERT((((getInput(2)->nbDims() == 1) &&
                     (getInput(2)->template dims<1>()[0] == outChannels)) ||
                     ((getInput(2)->nbDims() == 2)&&
                      (getInput(0)->nbDims() == 2)&&
-                     (getInput(2)->template dims<2>()[0] == getInput(0)->template dims<2>()[0])&&
+                     (getInput(2)->template dims<2>()[0] == batchSize) &&
                      (getInput(2)->template dims<2>()[1] == outChannels)
                      )),
                     "Wrong bias size for FC operator.");
         // <batch, OutChannels>
-        mOutputs[0]->resize({getInput(0)->dims()[0], outChannels});
+        mOutputs[0]->resize({batchSize, outChannels});
         return true;
     }
 
@@ -105,9 +109,11 @@ std::shared_ptr<Aidge::Node> Aidge::FC(const Aidge::DimSize_t inChannels,
                                        bool noBias,
                                        const std::string& name,
                                        float alpha,
-                                       float beta) {
+                                       float beta,
+                                       bool transA,
+                                       bool transB) {
     // FIXME: properly handle default w&b initialization in every cases
-    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(alpha, beta), name);
+    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(alpha, beta, transA, transB), name);
     addProducer(fc, 1, {outChannels, inChannels}, "w");
     if (!noBias) {
         addProducer(fc, 2, {outChannels}, "b"); // already sets bias dims
-- 
GitLab


From f2de6fbbfca10a58a916e1abef40a05543e93304 Mon Sep 17 00:00:00 2001
From: hrouis <houssemeddine.rouis92@gmail.com>
Date: Tue, 18 Feb 2025 15:55:20 +0100
Subject: [PATCH 4/8] fix python binding of FC_Op

---
 python_binding/operator/pybind_FC.cpp | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index 447d0b29a..40433eb51 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -29,7 +29,14 @@ void declare_FC(py::module &m) {
     :param type : The type of the Fully Connected operation.
     :type type : :py:class:`str`
     )mydelimiter")
-    .def(py::init<>())
+    .def(py::init<float,
+      float,
+      bool,
+      bool>(),
+      py::arg("alpha")=1.0,
+      py::arg("beta")=1.0,
+      py::arg("transA")=false,
+      py::arg("transB")=false)
     .def_static("get_inputs_name", &FC_Op::getInputsName)
     .def_static("get_outputs_name", &FC_Op::getOutputsName)
     .def_readonly_static("Type", &FC_Op::Type)
-- 
GitLab


From 19e303d541e45624c92ce787cec47d6df36701c4 Mon Sep 17 00:00:00 2001
From: hrouis <houssemeddine.rouis92@gmail.com>
Date: Tue, 18 Feb 2025 15:56:33 +0100
Subject: [PATCH 5/8] support only bias of size outChannels for factory func
 limitations

---
 src/operator/FC.cpp | 23 ++++++++++-------------
 1 file changed, 10 insertions(+), 13 deletions(-)

diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index d73c8fd8c..54f28507b 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -45,8 +45,12 @@ bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) {
         // first check weight since it defines inChannels and outChannels
         AIDGE_ASSERT((getInput(1)->nbDims() == 2),
                     "Wrong weight Tensor dimension: {} for FC operator (should have 2 dimensions).", getInput(1)->nbDims());
-        const DimSize_t outChannels = mAttributes->template getAttr<FCAttr::TransB>() ? getInput(1)->template dims<2>()[1]:getInput(1)->template dims<2>()[0];
-        const DimSize_t inChannels = mAttributes->template getAttr<FCAttr::TransB>() ? getInput(1)->template dims<2>()[0]:getInput(1)->template dims<2>()[1];
+        const DimSize_t outChannels = mAttributes->template getAttr<FCAttr::TransB>() ?
+                                      getInput(1)->template dims<2>()[1]:
+                                      getInput(1)->template dims<2>()[0];
+        const DimSize_t inChannels = mAttributes->template getAttr<FCAttr::TransB>() ?
+                                     getInput(1)->template dims<2>()[0]:
+                                     getInput(1)->template dims<2>()[1];
         // check data
         const std::vector<DimSize_t>& inputDims = getInput(0)->dims();
         const DimIdx_t inChannelsIdx = mAttributes->template getAttr<FCAttr::TransA>() ? 1 : 0;
@@ -64,18 +68,11 @@ bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) {
                     nbInputFeatures, inChannels);
         }
         // check optional bias
-        const DimSize_t batchSize = static_cast<DimSize_t>(getInput(0)->size() / inChannels);
-        if(getInput(2))
-            AIDGE_ASSERT((((getInput(2)->nbDims() == 1) &&
-                    (getInput(2)->template dims<1>()[0] == outChannels)) ||
-                    ((getInput(2)->nbDims() == 2)&&
-                     (getInput(0)->nbDims() == 2)&&
-                     (getInput(2)->template dims<2>()[0] == batchSize) &&
-                     (getInput(2)->template dims<2>()[1] == outChannels)
-                     )),
-                    "Wrong bias size for FC operator.");
+        if(getInput(2)) {
+            AIDGE_ASSERT(getInput(2)->size() == outChannels, "Wrong bias size for FC operator.");
+        }
         // <batch, OutChannels>
-        mOutputs[0]->resize({batchSize, outChannels});
+        mOutputs[0]->resize({static_cast<DimSize_t>(getInput(0)->size() / inChannels), outChannels});
         return true;
     }
 
-- 
GitLab


From b901698526721e73eb9c031843d9c84971d0e2fa Mon Sep 17 00:00:00 2001
From: hrouis <houssemeddine.rouis92@gmail.com>
Date: Fri, 21 Feb 2025 15:55:49 +0100
Subject: [PATCH 6/8] reorder FC factory function arguments

---
 include/aidge/operator/FC.hpp             |  8 ++++----
 python_binding/operator/pybind_FC.cpp     | 10 +++++++++-
 src/operator/FC.cpp                       |  6 +++---
 unit_tests/graph/Test_Matching.cpp        |  6 +++---
 unit_tests/recipes/Test_ToGenericOp.cpp   |  4 ++--
 unit_tests/recipes/Test_removeFlatten.cpp |  4 ++--
 6 files changed, 23 insertions(+), 15 deletions(-)

diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 078ff0fde..373c7da29 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -238,19 +238,19 @@ public:
  * @param[in] alpha Scalar multiplier for the product of input tensors A * B.
  * @param[in] beta Scalar multiplier for the bias.
  * @param[in] noBias Flag indicating whether to use a bias term (default is `false`).
- * @param[in] name Name of the operator (optional).
  * @param[in] transA Flag indicating whether input#0 needs to be transposed (default is `false`).
  * @param[in] transB Flag indicating whether input#1 needs to be transposed (default is `false`).
+ * @param[in] name Name of the operator (optional).
  * @return A shared pointer to the Node containing the FC operator.
  */
 std::shared_ptr<Node> FC(const DimSize_t inChannels,
                          const DimSize_t outChannels,
-                         bool noBias = false,
-                         const std::string& name = "",
                          float alpha = 1.0f,
                          float beta = 1.0f,
+                         bool noBias = false,
                          bool transA = false,
-                         bool transB = false);
+                         bool transB = false,
+                         const std::string& name = "");
 
 } // namespace Aidge
 
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index 40433eb51..dc3f738fb 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -47,7 +47,15 @@ void declare_FC(py::module &m) {
 
   declare_registrable<FC_Op>(m, "FCOp");
 
-  m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("no_bias") = false, py::arg("name") = "", py::arg("alpha")=1.0f, py::arg("beta")=1.0f, py::arg("transA") = false, py::arg("transB") = false,
+  m.def("FC", &FC, 
+        py::arg("in_channels"),
+        py::arg("out_channels"),
+        py::arg("alpha")=1.0f,
+        py::arg("beta")=1.0f,
+        py::arg("no_bias") = false,
+        py::arg("transA") = false,
+        py::arg("transB") = false,
+        py::arg("name") = "",
     R"mydelimiter(
     Initialize a node containing a Fully Connected (FC) operator.
 
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index 54f28507b..13da22423 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -103,12 +103,12 @@ std::set<std::string> Aidge::FC_Op::getAvailableBackends() const {
 
 std::shared_ptr<Aidge::Node> Aidge::FC(const Aidge::DimSize_t inChannels,
                                        const Aidge::DimSize_t outChannels,
-                                       bool noBias,
-                                       const std::string& name,
                                        float alpha,
                                        float beta,
+                                       bool noBias,
                                        bool transA,
-                                       bool transB) {
+                                       bool transB,
+                                       const std::string& name) {
     // FIXME: properly handle default w&b initialization in every cases
     auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(alpha, beta, transA, transB), name);
     addProducer(fc, 1, {outChannels, inChannels}, "w");
diff --git a/unit_tests/graph/Test_Matching.cpp b/unit_tests/graph/Test_Matching.cpp
index 582c73565..bd684f9ea 100644
--- a/unit_tests/graph/Test_Matching.cpp
+++ b/unit_tests/graph/Test_Matching.cpp
@@ -357,9 +357,9 @@ TEST_CASE("[core/graph] Matching") {
         ReLU("relu2"),
         Conv(4, 4, {5, 5}, "conv3"),
         BatchNorm<2>(4, 1.0e-5, 0.1, false, "bn3"),
-        FC(4, 4, false, "fc1"),
-        FC(4, 4, false, "fc2"),
-        FC(4, 4, false, "fc3"),
+        FC(4, 4, 1.0, 1.0, false, false, false, "fc1"),
+        FC(4, 4, 1.0, 1.0, false, false, false, "fc2"),
+        FC(4, 4, 1.0, 1.0, false, false, false, "fc3"),
         ReLU("relu3"),
         Conv(1, 4, {5, 5}, "conv4")
     });
diff --git a/unit_tests/recipes/Test_ToGenericOp.cpp b/unit_tests/recipes/Test_ToGenericOp.cpp
index cb75fdb10..02d784385 100644
--- a/unit_tests/recipes/Test_ToGenericOp.cpp
+++ b/unit_tests/recipes/Test_ToGenericOp.cpp
@@ -32,9 +32,9 @@ TEST_CASE("[graph/convert] toGenericOp", "[toGenericOp][recipies]") {
                     ReLU(),
                     Conv(4, 3, {1, 1}, "conv3"),
                     ReLU(),
-                    FC(2028, 256, false, "fc1"),
+                    FC(2028, 256, 1.0, 1.0, false, false, false, "fc1"),
                     ReLU(),
-                    FC(256, 10, false, "fc2")});
+                    FC(256, 10, 1.0, 1.0, false, false, false, "fc2")});
     
     // NCHW - MNIST DATA like
     g->forwardDims({{5, 1, 28, 28}});
diff --git a/unit_tests/recipes/Test_removeFlatten.cpp b/unit_tests/recipes/Test_removeFlatten.cpp
index 1b5e27838..655f7c7f5 100644
--- a/unit_tests/recipes/Test_removeFlatten.cpp
+++ b/unit_tests/recipes/Test_removeFlatten.cpp
@@ -27,8 +27,8 @@ namespace Aidge {
 TEST_CASE("[cpu/recipes] RemoveFlatten", "[RemoveFlatten][recipes]") {
   std::shared_ptr<Node> flatten =
       GenericOperator("Flatten", 1, 0, 1, "myFlatten");
-  std::shared_ptr<Node> fc0 = FC(10, 10, false, "FC_1");
-  std::shared_ptr<Node> fc1 = FC(10, 10, false, "FC_2");
+  std::shared_ptr<Node> fc0 = FC(10, 10, 1.0, 1.0, false, false, false, "FC_1");
+  std::shared_ptr<Node> fc1 = FC(10, 10, 1.0, 1.0, false, false, false, "FC_2");
   std::shared_ptr<Node> prod = Producer(std::array<DimSize_t, 10>(), "myProd");
 
   SECTION("flatten last layer : nothing removed because pattern searched is "
-- 
GitLab


From 5c13d82dea1543eaaf669b19869269e2a6afc5b2 Mon Sep 17 00:00:00 2001
From: hrouis <houssemeddine.rouis92@gmail.com>
Date: Tue, 11 Mar 2025 12:00:19 +0100
Subject: [PATCH 7/8] add Sum operator

---
 include/aidge/aidge.hpp                |  1 +
 include/aidge/operator/Sum.hpp         | 90 ++++++++++++++++++++++++
 python_binding/operator/pybind_Sum.cpp | 67 ++++++++++++++++++
 src/operator/Sum.cpp                   | 95 ++++++++++++++++++++++++++
 4 files changed, 253 insertions(+)
 create mode 100644 include/aidge/operator/Sum.hpp
 create mode 100644 python_binding/operator/pybind_Sum.cpp
 create mode 100644 src/operator/Sum.cpp

diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index cd36a6547..5ff1159e6 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -80,6 +80,7 @@
 #include "aidge/operator/Split.hpp"
 #include "aidge/operator/Sqrt.hpp"
 #include "aidge/operator/Sub.hpp"
+#include "aidge/operator/Sum.hpp"
 #include "aidge/operator/Transpose.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
 #include "aidge/stimuli/Stimulus.hpp"
diff --git a/include/aidge/operator/Sum.hpp b/include/aidge/operator/Sum.hpp
new file mode 100644
index 000000000..6718f4179
--- /dev/null
+++ b/include/aidge/operator/Sum.hpp
@@ -0,0 +1,90 @@
+/********************************************************************************
+ * Copyright (c) 2025 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+ #ifndef AIDGE_CORE_OPERATOR_SUM_H_
+ #define AIDGE_CORE_OPERATOR_SUM_H_
+ 
+ #include <memory>
+ #include <string>
+ #include <vector>
+ 
+ #include "aidge/operator/OperatorTensor.hpp"
+ #include "aidge/graph/Node.hpp"
+ #include "aidge/utils/ErrorHandling.hpp"
+ #include "aidge/utils/Types.h"
+ #include "aidge/utils/Registrar.hpp"
+ 
+ namespace Aidge {
+ 
+ /**
+  * @brief Description of an element-wise Sum operation on multiple input Tensors,
+  * supporting NumPy broadcasting.
+  *
+  * For each N of elements x0, x1, ..., xN from the input Tensors, the function 
+  * is defined as:
+  * `f(x0, ..., xN) = x0 + x1 + ... + xN`
+  *
+  * Broadcasting adjusts shapes of the input Tensors to make them compatible:
+  * - Tensors are aligned from the rightmost dimensions.
+  * - Dimensions are compatible if they are equal, one of them is 1, or missing.
+  *
+  * The output Tensor shape is determined by taking the maximum size along 
+  * each dimension of the input Tensors after broadcasting.
+  *
+  * @example Input 1: (3, 4, 2), Input 2: (2), Output: (3, 4, 2)
+  * @example Input 1: (1, 5, 3), Input 2: (2, 1, 3), Input 3 : (2), Output: (2, 5, 3)
+  *
+  * @see OperatorTensor
+  * @see Registrable
+  */
+ class Sum_Op : public OperatorTensor,
+     public Registrable<Sum_Op,
+                        std::string,
+                        std::function<std::shared_ptr<OperatorImpl>(const Sum_Op&)>>
+ {
+ public:
+     static const std::string Type;
+ 
+    Sum_Op() = delete;
+    Sum_Op(const IOIndex_t nbIn);
+ 
+     /**
+      * @brief Copy-constructor.
+      * @param op Sum_Op to copy.
+      * @details Copies the operator attributes and its output tensor(s), but not
+      * its input tensors. The new operator has no associated input.
+      */
+     Sum_Op(const Sum_Op& op);
+ 
+     /**
+      * @brief Clone the operator using its copy-constructor.
+      * @see Operator::Sum_Op
+      */
+     std::shared_ptr<Operator> clone() const override;
+ 
+     bool forwardDims(bool allowDataDependency = false) override final;
+ 
+     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+     std::set<std::string> getAvailableBackends() const override;
+ 
+     static const std::vector<std::string> getInputsName() {
+         return {"data_input_0", "data_input_n"};
+     }
+     static const std::vector<std::string> getOutputsName() {
+         return {"data_output"};
+     }
+ };
+ 
+ std::shared_ptr<Node> Sum(const IOIndex_t nbIn, const std::string& name = "");
+ }
+ 
+ #endif /* AIDGE_CORE_OPERATOR_SUM_H_ */
+ 
\ No newline at end of file
diff --git a/python_binding/operator/pybind_Sum.cpp b/python_binding/operator/pybind_Sum.cpp
new file mode 100644
index 000000000..2d09d4736
--- /dev/null
+++ b/python_binding/operator/pybind_Sum.cpp
@@ -0,0 +1,67 @@
+/********************************************************************************
+ * Copyright (c) 2025 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+ #include <memory>
+
+ #include <pybind11/pybind11.h>
+ 
+ #include "aidge/operator/Sum.hpp"
+ #include "aidge/operator/OperatorTensor.hpp"
+ #include "aidge/utils/Types.h"
+ 
+ namespace py = pybind11;
+ namespace Aidge {
+ 
+ void declare_Sum(py::module &m) {
+   py::class_<Sum_Op, std::shared_ptr<Sum_Op>, OperatorTensor>(m, "SumOp", py::multiple_inheritance(),
+     R"mydelimiter(
+     Initialize a Sum operator.
+     This operator performs element-wise addition between multiple input tensors.
+     The operation is defined as:
+         Output = Input1 + Input2 + ... + InputN
+     The output tensor shape is determined by taking the maximum size along each dimension of the input tensors after broadcasting.
+     Examples:
+         Input 1: (3, 4, 2), Input 2: (2), Output: (3, 4, 2)
+         Input 1: (1, 5, 3), Input 2: (2, 1, 3), Input 3: (2), Output: (2, 5, 3)
+     :param name : Name of the node (optional).
+     :type name : str
+     )mydelimiter")
+     .def(py::init<const IOIndex_t>(), py::arg("nb_inputs"))
+     .def_static("get_inputs_name", &Sum_Op::getInputsName)
+     .def_static("get_outputs_name", &Sum_Op::getOutputsName)
+     .def_readonly_static("Type", &Sum_Op::Type);
+ 
+   declare_registrable<Sum_Op>(m, "SumOp");
+ 
+   m.def("Sum", &Sum, py::arg("nb_inputs"),  py::arg("name") = "",
+     R"mydelimiter(
+     Initialize a node containing a sum operator that performs element-wise addition between multiple tensors.
+     The operation is defined as:
+         Output = Input1 + Input2 + ... + InputN
+     The output tensor shape is determined by taking the maximum size along each dimension of the input tensors after broadcasting.
+     Examples:
+         Input 1: (3, 4, 2), Input 2: (2), Output: (3, 4, 2)
+         Input 1: (1, 5, 3), Input 2: (2, 1, 3), Input 3: (2), Output: (2, 5, 3)
+     :param nb_inputs : number of inputs to sum.
+     :type nb_inputs : int
+     :param name : Name of the node (optional).
+     :type name : str
+     :return: A node containing the Sum operator.
+     :rtype: :py:class:`SumOp`
+     )mydelimiter");
+ }
+ 
+ void init_Sum(py::module &m) {
+   declare_Sum(m);
+ }
+ 
+ } // namespace Aidge
+ 
\ No newline at end of file
diff --git a/src/operator/Sum.cpp b/src/operator/Sum.cpp
new file mode 100644
index 000000000..6c6e5fe29
--- /dev/null
+++ b/src/operator/Sum.cpp
@@ -0,0 +1,95 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+ #include <cstddef>    // std::size_t
+ #include <stdexcept>  // std::runtime_error
+ #include <string>
+ #include <vector>
+ 
+ #include "aidge/data/Tensor.hpp"
+ #include "aidge/operator/Sum.hpp"
+ #include "aidge/utils/Types.h"
+ #include "aidge/utils/ErrorHandling.hpp"
+ #include "aidge/utils/Registrar.hpp"
+ 
+ const std::string Aidge::Sum_Op::Type = "Sum";
+ 
+ Aidge::Sum_Op::Sum_Op(const IOIndex_t nbIn)
+ : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1) {
+    if (nbIn == 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Sum operator should have at least one input.");
+    }    
+ }
+ 
+ Aidge::Sum_Op::Sum_Op(const Sum_Op& op)
+     : OperatorTensor(op)
+ {
+     if (op.mImpl) {
+         SET_IMPL_MACRO(Sum_Op, *this, op.backend());
+     } else {
+         mImpl = nullptr;
+     }
+ }
+ 
+ std::shared_ptr<Aidge::Operator> Aidge::Sum_Op::clone() const {
+     return std::make_shared<Sum_Op>(*this);
+ }
+ 
+ bool Aidge::Sum_Op::forwardDims(bool /*allowDataDependency*/) {
+     if (inputsAssociated()) {
+         std::vector<std::vector<std::size_t>> inputsDims(nbInputs());
+         for (std::size_t i = 0; i < nbInputs(); i++) {
+             inputsDims[i] = getInput(i)->dims();
+         }
+ 
+         std::size_t outNbDims = 1;
+         for(std::size_t i = 0; i < nbInputs(); ++i) {
+             outNbDims = (inputsDims[i].size() > outNbDims) ? inputsDims[i].size() : outNbDims;
+         }
+ 
+         std::vector<std::size_t> outDims(outNbDims, 1);
+ 
+         for (auto it = outDims.rbegin(); it != outDims.rend(); ++it) {
+             for (std::size_t i = 0; i < nbInputs(); ++i) {
+                 if(!inputsDims[i].empty()) {
+                     const std::size_t dim = inputsDims[i].back();
+                     inputsDims[i].pop_back();
+                     if (*it == 1) {
+                         *it = dim;
+                     }
+                     else if ((dim != *it) && (dim != 1)) {
+                         AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for Add Operation: {} for previous inputs vs {} for input#{}",
+                             outDims, getInput(i)->dims(), i);
+                     }
+                 }
+             }
+         }
+         mOutputs[0]->resize(outDims);
+         return true;
+     }
+ 
+     return false;
+ }
+ 
+ void Aidge::Sum_Op::setBackend(const std::string& name, DeviceIdx_t device) {
+     SET_IMPL_MACRO(Sum_Op, *this, name);
+     mOutputs[0]->setBackend(name, device);
+ }
+ 
+ std::set<std::string> Aidge::Sum_Op::getAvailableBackends() const {
+     return Registrar<Sum_Op>::getKeys();
+ }
+ 
+ ////////////////////////////////////////////////////////////////////////////////
+ 
+ std::shared_ptr<Aidge::Node> Aidge::Sum(const IOIndex_t nbIn, const std::string& name) {
+     return std::make_shared<Node>(std::make_shared<Sum_Op>(nbIn), name);
+ }
\ No newline at end of file
-- 
GitLab


From 312cdbf78fac0da0b29b4537de9f9899a35104f6 Mon Sep 17 00:00:00 2001
From: hrouis <houssemeddine.rouis92@gmail.com>
Date: Mon, 24 Mar 2025 13:35:56 +0100
Subject: [PATCH 8/8] add TransposeFC MetaOp and remove transA and transB attr
 from FC

---
 include/aidge/operator/FC.hpp                 | 41 ++-------
 include/aidge/operator/MetaOperatorDefs.hpp   | 37 ++++++++
 python_binding/operator/pybind_FC.cpp         | 14 +--
 .../operator/pybind_MetaOperatorDefs.cpp      | 52 +++++++++++
 src/operator/FC.cpp                           | 21 ++---
 src/operator/MetaOperatorDefs/TransposeFC.cpp | 86 +++++++++++++++++++
 unit_tests/graph/Test_Matching.cpp            |  6 +-
 unit_tests/recipes/Test_ToGenericOp.cpp       |  4 +-
 unit_tests/recipes/Test_removeFlatten.cpp     |  4 +-
 9 files changed, 196 insertions(+), 69 deletions(-)
 create mode 100644 src/operator/MetaOperatorDefs/TransposeFC.cpp

diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 373c7da29..3d056f5f1 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -28,8 +28,6 @@ namespace Aidge {
 enum class FCAttr {
     Alpha,  // The scalar multiplier for the product of input tensors A * B.
     Beta,   // The scalar multiplier for the bias.
-    TransA, // Boolean to store whether we need to tranpose input#0
-    TransB  // Boolean to store whether we need to tranpose input#1
 };
 
 /**
@@ -38,16 +36,9 @@ enum class FCAttr {
  * The Fully Connected (FC) operation applies a linear transformation to the input Tensor
  * by multiplying it with a weight matrix and optionally adding a bias vector: 
  * - If `bias` is included:
- *   f(x) = x × weights^T + bias
+ *   f(x) = alpha * x * weights^T + beta * bias
  * - If `bias` is omitted:
- *   f(x) = x × weights^T
- *
- * Attributes:
- * - `inChannels`: The number of input features (or channels). Determined from the dimensions
- *   of the weight Tensor. This represents the size of the input vector.
- * - `outChannels`: The number of output features (or channels). Determined from the dimensions
- *   of the weight Tensor. This represents the size of the output vector.
- * - `noBias`: A boolean value indicating whether the bias vector is omitted in the operation.
+ *   f(x) = alpha * x × weights^T
  *
  * @example:
  * - Input Tensor: Shape (64, 128)  // Batch size of 64, 128 input features
@@ -64,9 +55,7 @@ class FC_Op : public OperatorTensor,
 private:
     using Attributes_ = StaticAttributes<FCAttr,
                                         float,
-                                        float,
-                                        bool,
-                                        bool>;
+                                        float>;
 
     template <FCAttr e>
     using attr = typename Attributes_::template attr<e>;
@@ -83,13 +72,11 @@ public:
      *
      * Initializes the operator with a type identifier and input categories.
      */
-    FC_Op(float alpha = 1.0f, float beta = 1.0f, bool transA = false, bool transB = false)
+    FC_Op(float alpha = 1.0f, float beta = 1.0f)
     : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
     mAttributes(std::make_shared<Attributes_>(
                 attr<FCAttr::Alpha>(alpha),
-                attr<FCAttr::Beta>(beta),
-                attr<FCAttr::TransA>(transA),
-                attr<FCAttr::TransB>(transB)))
+                attr<FCAttr::Beta>(beta)))
     {}
 
     /**
@@ -201,18 +188,6 @@ public:
      */
     inline float& beta() const { return mAttributes->template getAttr<FCAttr::Beta>(); }
 
-    /**
-     * @brief Get the transA boolean.
-     * @return Whether input#0 needs to be transposed.
-     */
-    inline bool& transA() const { return mAttributes->template getAttr<FCAttr::TransA>(); }
-
-    /**
-     * @brief Get the transB boolean.
-     * @return Whether input#1 needs to be transposed.
-     */
-    inline bool& transB() const { return mAttributes->template getAttr<FCAttr::TransB>(); }
-
     /**
      * @brief Retrieves the input tensor names for the FC operator.
      * @return A vector of input tensor names: `{"data_input", "weight", "bias"}`.
@@ -238,8 +213,6 @@ public:
  * @param[in] alpha Scalar multiplier for the product of input tensors A * B.
  * @param[in] beta Scalar multiplier for the bias.
  * @param[in] noBias Flag indicating whether to use a bias term (default is `false`).
- * @param[in] transA Flag indicating whether input#0 needs to be transposed (default is `false`).
- * @param[in] transB Flag indicating whether input#1 needs to be transposed (default is `false`).
  * @param[in] name Name of the operator (optional).
  * @return A shared pointer to the Node containing the FC operator.
  */
@@ -248,14 +221,12 @@ std::shared_ptr<Node> FC(const DimSize_t inChannels,
                          float alpha = 1.0f,
                          float beta = 1.0f,
                          bool noBias = false,
-                         bool transA = false,
-                         bool transB = false,
                          const std::string& name = "");
 
 } // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::FCAttr>::data[] = {"alpha", "beta", "transA", "transB"};
+const char *const EnumStrings<Aidge::FCAttr>::data[] = {"alpha", "beta"};
 }
 #endif /* AIDGE_CORE_OPERATOR_FC_H_ */
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index ef0879268..57cb56ea0 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -360,6 +360,43 @@ std::shared_ptr<Node> Leaky(const int nbTimeSteps,
                             const LeakyReset resetType = LeakyReset::Subtraction,
                             const std::string &name = "");
 
+
+/**
+ * @brief Creates a FC operation with transposed inputs.
+ *
+ * This function creates a Fully Connected operation with transpose Operation of 1 or both inputs.
+ *
+ * @param[in] inChannels Number of input channels.
+ * @param[in] outChannels Number of output channels.
+ * @param[in] alpha Scalar multiplier for the product of input tensors A * B.
+ * @param[in] beta Scalar multiplier for the bias.
+ * @param[in] name Optional name for the operation.
+ * @param[in] transposeA Flag indicating whether input#0 needs to be transposed (default is `false`).
+ * @param[in] transposeB Flag indicating whether input#1 needs to be transposed (default is `false`).
+ * @return A shared pointer to the Node representing the padded average pooling operation.
+ */
+extern std::shared_ptr<Node> TransposeFC(DimSize_t in_channels,
+                                        DimSize_t out_channels,
+                                        float alpha=1.0f,
+                                        float beta=1.0f,
+                                        const std::string& name = "",
+                                        bool no_bias = false,
+                                        bool transposeA = false,
+                                        bool transposeB = false);
+
+/**
+ * @brief Creates a padded convolution operation as a MetaOperator.
+ *
+ * This function creates a graph-based MetaOperator representing a padded convolution operation (Conv2D/Conv3D).
+ *
+ * @param[in] alpha Scalar multiplier for the product of input tensors A * B.
+ * @param[in] beta Scalar multiplier for the bias.
+ * @param[in] transposeA Flag indicating whether input#0 needs to be transposed (default is `false`).
+ * @param[in] transposeB Flag indicating whether input#1 needs to be transposed (default is `false`).
+ * @return A shared pointer to the MetaOperator_Op representing the padded convolution operation.
+ */
+extern std::shared_ptr<MetaOperator_Op> TransposeFC_Op(float alpha = 1.0f, float beta = 1.0f, bool transposeA = false, bool transposeB = false);
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_ */
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index dc3f738fb..f915f1fda 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -30,13 +30,9 @@ void declare_FC(py::module &m) {
     :type type : :py:class:`str`
     )mydelimiter")
     .def(py::init<float,
-      float,
-      bool,
-      bool>(),
+      float>(),
       py::arg("alpha")=1.0,
-      py::arg("beta")=1.0,
-      py::arg("transA")=false,
-      py::arg("transB")=false)
+      py::arg("beta")=1.0)
     .def_static("get_inputs_name", &FC_Op::getInputsName)
     .def_static("get_outputs_name", &FC_Op::getOutputsName)
     .def_readonly_static("Type", &FC_Op::Type)
@@ -53,8 +49,6 @@ void declare_FC(py::module &m) {
         py::arg("alpha")=1.0f,
         py::arg("beta")=1.0f,
         py::arg("no_bias") = false,
-        py::arg("transA") = false,
-        py::arg("transB") = false,
         py::arg("name") = "",
     R"mydelimiter(
     Initialize a node containing a Fully Connected (FC) operator.
@@ -71,10 +65,6 @@ void declare_FC(py::module &m) {
     :type alpha : :py:class:`int`
     :param beta : The scalar multiplier for the bias.
     :type beta : :py:class:`int`
-    :param transA : Indicates whether first input needs to be transposed.
-    :type transA : :py:class:`bool`
-    :param transB : Indicates whether second input needs to be transposed.
-    :type transB : :py:class:`bool`
     )mydelimiter");
 }
 
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index 182a5edaa..75e04d6cd 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -502,6 +502,57 @@ void declare_LeakyOp(py::module &m) {
     )mydelimiter");
 }
 
+void declare_TransposeFCOp(py::module &m) {
+    m.def("TransposeFC", [](DimSize_t in_channels,
+                            DimSize_t out_channels,
+                            float alpha,
+                            float beta,
+                            const std::string& name,
+                            bool no_bias,
+                            bool transA,
+                            bool transB)
+      {
+          return TransposeFC(in_channels, out_channels,alpha, beta, name, no_bias, transA, transB);
+      }, py::arg("in_channels"),
+         py::arg("out_channels"),
+         py::arg("alpha") = 1.0f,
+         py::arg("beta") = 1.0f,
+         py::arg("name") = "",
+         py::arg("no_bias")= false,
+         py::arg("transA")= false,
+         py::arg("transB")= false,
+      R"mydelimiter(
+          Initialize a node containing an FC operator with Transpose on one or both inputs.
+
+          :param in_channels: Number of input channels.
+          :type in_channels: int
+          :param out_channels: Number of output channels.
+          :type out_channels: int
+
+          :param no_bias: Whether to disable bias addition in the convolution.
+          :type no_bias: bool
+          :param name: Name of the node (optional).
+          :type name: str
+          :return: A node containing the FC operator with Transpose node on one or two inputs.
+          :rtype: :py:class:`TransposeFCOp`
+      )mydelimiter");
+
+      m.def("TransposeFCOp", [](float alpha, float beta, bool transA, bool transB)
+      {
+        return TransposeFC_Op(alpha, beta, transA, transB);
+      },
+        py::arg("alpha") = 1.0f,
+        py::arg("beta") = 1.0f,
+        py::arg("transA")= false,
+        py::arg("transB")= false,
+      R"mydelimiter(
+          Initialize an FC operator with Transpose on one or two inputs.
+
+          :return: An FC with Transpose operators.
+          :rtype: :py:class:`TransposeFCOp`
+      )mydelimiter");
+  }
+
 void init_MetaOperatorDefs(py::module &m) {
   declare_PaddedConvOp<1>(m);
   declare_PaddedConvOp<2>(m);
@@ -520,6 +571,7 @@ void init_MetaOperatorDefs(py::module &m) {
   declare_LSTMOp(m);
   declare_LeakyResetEnum(m);
   declare_LeakyOp(m);
+  declare_TransposeFCOp(m);
 
   py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, OperatorTensor>(m, "MetaOperatorOp", py::multiple_inheritance())
   .def(py::init<const char *, const std::shared_ptr<GraphView>&, const std::vector<InputCategory>&>(),
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index 13da22423..abe94d92e 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -45,24 +45,17 @@ bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) {
         // first check weight since it defines inChannels and outChannels
         AIDGE_ASSERT((getInput(1)->nbDims() == 2),
                     "Wrong weight Tensor dimension: {} for FC operator (should have 2 dimensions).", getInput(1)->nbDims());
-        const DimSize_t outChannels = mAttributes->template getAttr<FCAttr::TransB>() ?
-                                      getInput(1)->template dims<2>()[1]:
-                                      getInput(1)->template dims<2>()[0];
-        const DimSize_t inChannels = mAttributes->template getAttr<FCAttr::TransB>() ?
-                                     getInput(1)->template dims<2>()[0]:
-                                     getInput(1)->template dims<2>()[1];
+        const DimSize_t outChannels = getInput(1)->template dims<2>()[0];
+        const DimSize_t inChannels = getInput(1)->template dims<2>()[1];
         // check data
         const std::vector<DimSize_t>& inputDims = getInput(0)->dims();
-        const DimIdx_t inChannelsIdx = mAttributes->template getAttr<FCAttr::TransA>() ? 1 : 0;
         if (getInput(0)->nbDims() == 1) {
-            AIDGE_ASSERT(inputDims[inChannelsIdx] == inChannels,
+            AIDGE_ASSERT(inputDims[0] == inChannels,
                 "Wrong number of input features for input data ({}), expected {}",
-                inputDims[inChannelsIdx], inChannels);
+                inputDims[0], inChannels);
         } else {
             AIDGE_ASSERT(getInput(0)->nbDims() > 1, "FC input data must have at least one dimension");
-            const DimSize_t nbInputFeatures = mAttributes->template getAttr<FCAttr::TransA>() ?
-                                                            inputDims[0]:
-                                                            std::accumulate(inputDims.cbegin() + 1, inputDims.cend(), DimSize_t(1), std::multiplies<DimSize_t>());
+            const DimSize_t nbInputFeatures = std::accumulate(inputDims.cbegin() + 1, inputDims.cend(), DimSize_t(1), std::multiplies<DimSize_t>());
             AIDGE_ASSERT(nbInputFeatures == inChannels,
                     "Wrong number of input features for input data ({}), expected {}",
                     nbInputFeatures, inChannels);
@@ -106,11 +99,9 @@ std::shared_ptr<Aidge::Node> Aidge::FC(const Aidge::DimSize_t inChannels,
                                        float alpha,
                                        float beta,
                                        bool noBias,
-                                       bool transA,
-                                       bool transB,
                                        const std::string& name) {
     // FIXME: properly handle default w&b initialization in every cases
-    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(alpha, beta, transA, transB), name);
+    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(alpha, beta), name);
     addProducer(fc, 1, {outChannels, inChannels}, "w");
     if (!noBias) {
         addProducer(fc, 2, {outChannels}, "b"); // already sets bias dims
diff --git a/src/operator/MetaOperatorDefs/TransposeFC.cpp b/src/operator/MetaOperatorDefs/TransposeFC.cpp
new file mode 100644
index 000000000..5dc3a5b3d
--- /dev/null
+++ b/src/operator/MetaOperatorDefs/TransposeFC.cpp
@@ -0,0 +1,86 @@
+/********************************************************************************
+ * Copyright (c) 2025 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+ #include "aidge/operator/MetaOperatorDefs.hpp"
+
+ #include <array>
+ #include <memory>
+ #include <vector>
+ 
+ #include "aidge/graph/Node.hpp"
+ #include "aidge/graph/OpArgs.hpp"
+ #include "aidge/operator/FC.hpp"
+ #include "aidge/operator/MetaOperator.hpp"
+ #include "aidge/operator/Producer.hpp"
+ #include "aidge/operator/Transpose.hpp"
+ #include "aidge/utils/ArrayHelpers.hpp"
+ #include "aidge/utils/Types.h"
+ 
+ std::shared_ptr<Aidge::Node> Aidge::TransposeFC(Aidge::DimSize_t in_channels,
+                                   Aidge::DimSize_t out_channels,
+                                   float alpha,
+                                   float beta,
+                                   const std::string& name,
+                                   bool no_bias,
+                                   bool transposeA,
+                                   bool transposeB)
+ {
+    auto graph = std::make_shared<GraphView>();
+    auto fc = FC(in_channels, out_channels, alpha, beta, no_bias, name);
+    graph->add(fc);
+    if (transposeA) {
+        auto transA = Transpose(std::vector<DimSize_t>{}, name + "_transposeA");
+        transA->addChild(graph->getOrderedInputs()[0].first,0,0);
+        graph->add(transA);
+    }
+    if (transposeB) {
+        auto transB = Transpose(std::vector<DimSize_t>{}, name + "_transposeB");
+        transB->addChild(graph->getOrderedInputs()[1].first,0,1);
+        graph->add(transB);
+    }
+
+    auto metaOpNode = MetaOperator("TransposeFC", graph, {}, name);
+
+    addProducer(metaOpNode, 1, {out_channels, in_channels}, "w");
+    if (!no_bias) {
+        addProducer(metaOpNode, 2, {out_channels}, "b");
+    }
+
+     return metaOpNode;
+ }
+
+ std::shared_ptr<Aidge::MetaOperator_Op> Aidge::TransposeFC_Op(float alpha,
+    float beta,
+    bool transposeA,
+    bool transposeB)
+ {
+    auto graph = std::make_shared<GraphView>();
+    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(alpha, beta), "");
+    graph->add(fc);
+
+    std::vector<std::pair<NodePtr, IOIndex_t>> orderedInputs = {{fc,0}, {fc,1}, {fc,2}};
+
+    if (transposeA) {
+        auto transA = Transpose(std::vector<DimSize_t>{}, "");
+        transA->addChild(graph->getOrderedInputs()[0].first,0,0);
+        graph->add(transA);
+        orderedInputs[0] = {transA, 0};
+    }
+    if (transposeB) {
+        auto transB = Transpose(std::vector<DimSize_t>{}, "");
+        transB->addChild(graph->getOrderedInputs()[1].first,0,1);
+        graph->add(transB);
+        orderedInputs[1] = {transB, 0};
+    }
+    graph->setOrderedInputs(orderedInputs);
+    graph->setOrderedOutputs({{fc, 0}});
+    return std::make_shared<MetaOperator_Op>("TransposeFC", graph);
+ }
diff --git a/unit_tests/graph/Test_Matching.cpp b/unit_tests/graph/Test_Matching.cpp
index bd684f9ea..ae8ae6786 100644
--- a/unit_tests/graph/Test_Matching.cpp
+++ b/unit_tests/graph/Test_Matching.cpp
@@ -357,9 +357,9 @@ TEST_CASE("[core/graph] Matching") {
         ReLU("relu2"),
         Conv(4, 4, {5, 5}, "conv3"),
         BatchNorm<2>(4, 1.0e-5, 0.1, false, "bn3"),
-        FC(4, 4, 1.0, 1.0, false, false, false, "fc1"),
-        FC(4, 4, 1.0, 1.0, false, false, false, "fc2"),
-        FC(4, 4, 1.0, 1.0, false, false, false, "fc3"),
+        FC(4, 4, 1.0, 1.0, false, "fc1"),
+        FC(4, 4, 1.0, 1.0, false, "fc2"),
+        FC(4, 4, 1.0, 1.0, false, "fc3"),
         ReLU("relu3"),
         Conv(1, 4, {5, 5}, "conv4")
     });
diff --git a/unit_tests/recipes/Test_ToGenericOp.cpp b/unit_tests/recipes/Test_ToGenericOp.cpp
index 02d784385..4ff2bd72d 100644
--- a/unit_tests/recipes/Test_ToGenericOp.cpp
+++ b/unit_tests/recipes/Test_ToGenericOp.cpp
@@ -32,9 +32,9 @@ TEST_CASE("[graph/convert] toGenericOp", "[toGenericOp][recipies]") {
                     ReLU(),
                     Conv(4, 3, {1, 1}, "conv3"),
                     ReLU(),
-                    FC(2028, 256, 1.0, 1.0, false, false, false, "fc1"),
+                    FC(2028, 256, 1.0, 1.0, false, "fc1"),
                     ReLU(),
-                    FC(256, 10, 1.0, 1.0, false, false, false, "fc2")});
+                    FC(256, 10, 1.0, 1.0, false, "fc2")});
     
     // NCHW - MNIST DATA like
     g->forwardDims({{5, 1, 28, 28}});
diff --git a/unit_tests/recipes/Test_removeFlatten.cpp b/unit_tests/recipes/Test_removeFlatten.cpp
index 655f7c7f5..2e12e0532 100644
--- a/unit_tests/recipes/Test_removeFlatten.cpp
+++ b/unit_tests/recipes/Test_removeFlatten.cpp
@@ -27,8 +27,8 @@ namespace Aidge {
 TEST_CASE("[cpu/recipes] RemoveFlatten", "[RemoveFlatten][recipes]") {
   std::shared_ptr<Node> flatten =
       GenericOperator("Flatten", 1, 0, 1, "myFlatten");
-  std::shared_ptr<Node> fc0 = FC(10, 10, 1.0, 1.0, false, false, false, "FC_1");
-  std::shared_ptr<Node> fc1 = FC(10, 10, 1.0, 1.0, false, false, false, "FC_2");
+  std::shared_ptr<Node> fc0 = FC(10, 10, 1.0, 1.0, false, "FC_1");
+  std::shared_ptr<Node> fc1 = FC(10, 10, 1.0, 1.0, false, "FC_2");
   std::shared_ptr<Node> prod = Producer(std::array<DimSize_t, 10>(), "myProd");
 
   SECTION("flatten last layer : nothing removed because pattern searched is "
-- 
GitLab