diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index e73387ce1daf7b08d087faecf5a30edaffc6d54d..367435fe7dd62a071d701ef69c36d56ce2f7a940 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -290,5 +290,9 @@ extern template class Aidge::AvgPooling_Op<2>;
 extern template class Aidge::AvgPooling_Op<3>;
 extern template class Aidge::AvgPooling_Op<4>;
 
+<<<<<<< HEAD
+=======
+
+>>>>>>> 9b3579590d612d89cd36f42d47bb396670ef14af
 
 #endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index 8058cd2a23c6c1bf91b44b347af9df57aac0635a..2b2cdea12fee04e88ccb715abebf9da768758de3 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -50,7 +50,7 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
     R"mydelimiter(
         Initialize a node containing a Padded Convolution operator.
 
-        This operator performs a convolution operation with explicit padding. It applies a 
+        This operator performs a convolution operation with explicit padding. It applies a
         kernel filter over an input tensor with specified stride and dilation settings.
 
         :param in_channels: Number of input channels.
@@ -92,8 +92,8 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
     R"mydelimiter(
         Initialize a Padded Convolution operator.
 
-        This function defines a convolution operation that includes explicit padding before 
-        applying the kernel. The padding allows control over output dimensions while maintaining 
+        This function defines a convolution operation that includes explicit padding before
+        applying the kernel. The padding allows control over output dimensions while maintaining
         receptive field properties.
 
         :param kernel_dims: The size of the convolutional kernel for each dimension.
@@ -135,8 +135,8 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
     R"mydelimiter(
         Initialize a node containing a Depthwise Padded Convolution operator.
 
-        This operator performs a depthwise convolution operation, where each input channel is 
-        convolved separately with a different kernel. The operation includes explicit padding, 
+        This operator performs a depthwise convolution operation, where each input channel is
+        convolved separately with a different kernel. The operation includes explicit padding,
         stride control, and dilation options.
 
         :param nb_channels: Number of input channels (also the number of output channels since depthwise convolution does not mix channels).
@@ -176,8 +176,8 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
     R"mydelimiter(
         Initialize a Depthwise Padded Convolution operator.
 
-        This function defines a depthwise convolution operation that includes explicit padding 
-        before applying the kernel. Depthwise convolution applies a separate filter to each 
+        This function defines a depthwise convolution operation that includes explicit padding
+        before applying the kernel. Depthwise convolution applies a separate filter to each
         input channel, preserving channel independence.
 
         :param kernel_dims: The size of the convolutional kernel for each dimension.
@@ -216,7 +216,7 @@ template <DimIdx_t DIM> void declare_PaddedAvgPoolingOp(py::module &m) {
     R"mydelimiter(
         Initialize a node containing a Padded Average Pooling operator.
 
-        This operator performs an average pooling operation with explicit padding. The output value 
+        This operator performs an average pooling operation with explicit padding. The output value
         is computed as the average of input values within a defined kernel window.
 
         :param kernel_dims: The size of the pooling kernel for each dimension.
@@ -255,7 +255,7 @@ template <DimIdx_t DIM> void declare_PaddedAvgPoolingOp(py::module &m) {
     R"mydelimiter(
         Initialize a Padded Average Pooling operator.
 
-        This function defines an average pooling operation with explicit padding before pooling is applied. 
+        This function defines an average pooling operation with explicit padding before pooling is applied.
         The operation computes the average of the elements inside each kernel window.
 
         :param kernel_dims: The size of the pooling kernel for each dimension.
@@ -296,7 +296,7 @@ template <DimIdx_t DIM> void declare_PaddedMaxPoolingOp(py::module &m) {
     R"mydelimiter(
         Initialize a node containing a Padded Max Pooling operator.
 
-        This operator performs a max pooling operation with explicit padding before pooling is applied. 
+        This operator performs a max pooling operation with explicit padding before pooling is applied.
         The output value is computed as the maximum of input values within a defined kernel window.
 
         :param kernel_dims: The size of the pooling kernel for each dimension.
@@ -335,7 +335,7 @@ template <DimIdx_t DIM> void declare_PaddedMaxPoolingOp(py::module &m) {
     R"mydelimiter(
         Initialize a Padded Max Pooling operator.
 
-        This function defines a max pooling operation with explicit padding before pooling is applied. 
+        This function defines a max pooling operation with explicit padding before pooling is applied.
         The operation computes the maximum of the elements inside each kernel window.
 
         :param kernel_dims: The size of the pooling kernel for each dimension.
@@ -364,8 +364,8 @@ void declare_LSTMOp(py::module &m) {
     R"mydelimiter(
         Initialize a node containing an LSTM (Long Short-Term Memory) operator.
 
-        The LSTM operator is a recurrent neural network (RNN) variant designed to model sequential data 
-        while addressing the vanishing gradient problem. It includes gating mechanisms to control 
+        The LSTM operator is a recurrent neural network (RNN) variant designed to model sequential data
+        while addressing the vanishing gradient problem. It includes gating mechanisms to control
         information flow through time.
 
         :param in_channels: The number of input features per time step.
@@ -388,7 +388,7 @@ void declare_LSTMOp(py::module &m) {
     R"mydelimiter(
         Initialize an LSTM operation.
 
-        This function sets up an LSTM operator to process sequential data. The LSTM maintains hidden 
+        This function sets up an LSTM operator to process sequential data. The LSTM maintains hidden
         states over time steps, allowing it to learn long-range dependencies.
 
         :param seq_length: The length of the input sequence.
@@ -402,7 +402,7 @@ void declare_LSTMOp(py::module &m) {
 
 
 void declare_LeakyOp(py::module &m) {
-    m.def("Leaky", &Leaky, 
+    m.def("Leaky", &Leaky,
           py::arg("nb_timesteps"),
           py::arg("beta"),
           py::arg("threshold") = 1.0,
@@ -410,7 +410,7 @@ void declare_LeakyOp(py::module &m) {
     R"mydelimiter(
         Initialize a Leaky neuron operator.
 
-        The Leaky operator introduces a decay factor, allowing neuron states to "leak" over time instead of resetting 
+        The Leaky operator introduces a decay factor, allowing neuron states to "leak" over time instead of resetting
         abruptly. This helps in maintaining temporal memory.
 
         :param nb_timesteps: The number of time steps for the operation.