diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 3031fc19b335f6e77bb7999f8b3a2b107e3f5323..cd36a654772d2d641b9af32bb74b1336f4a9742d 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -47,6 +47,7 @@
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
 #include "aidge/operator/Div.hpp"
+#include "aidge/operator/Equal.hpp"
 #include "aidge/operator/Erf.hpp"
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/Gather.hpp"
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 981f71762757795461226f2b052bda7f4bc9cd89..ab9e111f2adcb1d09635924184c89800900d0635 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -33,12 +33,21 @@ enum class AvgPoolingAttr {
      * Specifies the step size of the sliding window along each spatial dimension.
      */
     StrideDims,
-
+    /**
+     * @brief Dilation along each spatial axis. Default value is 1.
+     */
+    Dilations,
     /**
      * @brief Kernel dimensions for the pooling operation.
      * Specifies the size of the pooling window along each spatial dimension.
      */
-    KernelDims
+    KernelDims,
+    /**
+     * @brief Flag indicating whether to use ceil or floor when calculating output size.
+     * - `true`: Use `ceil` for output size calculation.
+     * - `false`: Use `floor` for output size calculation.
+     */
+    CeilMode
 };
 
 /**
@@ -46,11 +55,30 @@ enum class AvgPoolingAttr {
  *
  * The AvgPooling operation computes the average value within sliding windows of specified size
  * (kernel dimensions) over the input tensor. The stride dimensions determine how the window
- * moves across the input. This operation is commonly used in neural networks to reduce the spatial
- * dimensions while preserving features.
+ * moves across the input. The dilation parameter allows spacing between kernel elements, and 
+ * `ceil_mode` determines whether to use ceiling instead of floor when computing the output shape.
+ * This operation is commonly used in neural networks to reduce spatial dimensions while preserving features.
  *
  * @tparam DIM Number of dimensions for the pooling operation.
+ *
+ * ### Output Shape Calculation
+ * - If `ceil_mode` is false:
+ *   `output_size = floor((input_size - dilation * (kernel_size - 1) - 1) / stride + 1)`
+ * - If `ceil_mode` is true:
+ *   `output_size = ceil((input_size - dilation * (kernel_size - 1) - 1) / stride + 1)`
+ *
+ * @example Example usage:
+ * - Input shape: (1, 3, 32, 32) // Batch size 1, 3 channels, 32x32 spatial dimensions
+ * - KernelDims: (2, 2)
+ * - StrideDims: (2, 2)
+ * - Dilation: (1, 1)
+ * - CeilMode: false
+ * - Output shape: (1, 3, 16, 16)
+ *
+ * @see OperatorTensor
+ * @see Registrable
  */
+
 template <DimIdx_t DIM>
 class AvgPooling_Op : public OperatorTensor,
                 public Registrable<AvgPooling_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>> {
@@ -67,7 +95,9 @@ private:
      */
     using Attributes_ = StaticAttributes<AvgPoolingAttr,
                                              std::array<DimSize_t, DIM>,
-                                             std::array<DimSize_t, DIM>>;
+                                             std::array<DimSize_t, DIM>,
+                                             std::array<DimSize_t, DIM>,
+                                             bool>;
     template <AvgPoolingAttr e>
     using attr = typename Attributes_::template attr<e>;
 
@@ -84,21 +114,27 @@ public:
 
     /**
      * @brief Constructs an AvgPooling operation with specified kernel and stride dimensions.
-     * @param kernel_dims Size of the pooling window for each spatial dimension.
-     * @param stride_dims Step size (stride) for sliding the pooling window across the input dimensions.
+     * @param[in] kernel_dims Size of the pooling window for each spatial dimension.
+     * @param[in] stride_dims Step size (stride) for sliding the pooling window across the input dimensions.
      * Defaults to 1 for each dimension.
+     * @param[in] dilations Spatial dilations for the pooling operation.
+     * @param[in] ceil_mode Indicates whether to use ceil mode for output size calculation.
      */
     constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
-                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1))
+                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1),
+                            const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t, DIM>(1),
+                            bool ceil_mode = false)
         : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
                         attr<AvgPoolingAttr::StrideDims>(stride_dims),
-                        attr<AvgPoolingAttr::KernelDims>(kernel_dims)))
+                        attr<AvgPoolingAttr::KernelDims>(kernel_dims),
+                        attr<AvgPoolingAttr::Dilations>(dilations),
+                        attr<AvgPoolingAttr::CeilMode>(ceil_mode)))
     {}
 
     /**
      * @brief Copy-constructor.
-     * @param op AvgPooling_Op to copy.
+     * @param[in] op AvgPooling_Op to copy.
      * @details Copies the operator attributes and its output tensor(s), but not
      * its input tensors. The new operator has no associated input.
      */
@@ -112,16 +148,16 @@ public:
 
     /**
      * @brief Calculates the output dimensions based on the input dimensions and operator attributes.
-     * @param allowDataDependency If true, considers data-dependent operations. Defaults to false.
+     * @param[in] allowDataDependency If true, considers data-dependent operations. Defaults to false.
      * @return True if the dimensions are successfully calculated.
      */
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     /**
      * @brief Computes the receptive field of the operator.
-     * @param firstEltDims Dimensions of the first element.
-     * @param outputDims Dimensions of the output tensor.
-     * @param outputIdx Index of the output tensor. Defaults to 0.
+     * @param[in] firstEltDims Dimensions of the first element.
+     * @param[in] outputDims Dimensions of the output tensor.
+     * @param[in] outputIdx Index of the output tensor. Defaults to 0.
      * @return A vector of pairs representing the receptive fields.
      */
     std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
@@ -131,8 +167,8 @@ public:
 
     /**
      * @brief Sets the backend for the operation.
-     * @param name Name of the backend.
-     * @param device Device index. Defaults to 0.
+     * @param[in] name Name of the backend.
+     * @param[in] device Device index. Defaults to 0.
      */
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
 
@@ -155,11 +191,23 @@ public:
     inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<AvgPoolingAttr::StrideDims>(); }
 
     /**
-     * @brief Accessor for the kernel dimensions.
-     * @return An array representing the kernel dimensions.
+     * @brief Accessor for dilations.
+     * @return An array representing spatial dilations.
+     */
+    inline std::array<DimSize_t, DIM>& dilations() const { return mAttributes->template getAttr<AvgPoolingAttr::Dilations>(); }
+
+    /**
+     * @brief Accessor for kernel dimensions.
+     * @return An array representing kernel dimensions.
      */
     inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<AvgPoolingAttr::KernelDims>(); }
 
+    /**
+     * @brief Accessor for ceil mode flag.
+     * @return Boolean value indicating whether ceil mode is enabled.
+     */
+    inline bool& ceilMode() const { return mAttributes->template getAttr<AvgPoolingAttr::CeilMode>(); }
+
     /**
      * @brief Retrieves the names of the input tensors.
      * @return A vector of strings representing the input tensors names.
@@ -180,31 +228,39 @@ public:
 /**
  * @brief Creates an AvgPooling operator node.
  * @tparam DIM Number of dimensions for the pooling operation.
- * @param kernel_dims Size of the pooling window for each spatial dimension.
- * @param name Name of the operator node. Defaults to an empty string.
- * @param stride_dims Step size (stride) for sliding the pooling window across the input dimensions. Defaults to 1 for each dimension.
+ * @param[in] kernel_dims Size of the pooling window for each spatial dimension.
+ * @param[in] name Name of the operator node. Defaults to an empty string.
+ * @param[in] stride_dims Step size (stride) for sliding the pooling window across the input dimensions. Defaults to 1 for each dimension.
+ * @param[in] dilations Spatial dilations for the pooling operation.
+ * @param[in] ceil_mode Indicates whether to use ceil mode for output size calculation.
  * @return A shared pointer to the created operator node.
  */
 template <std::array<DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
                                  const std::string& name = "",
-                                 const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1));
+                                 const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                 const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t,DIM>(1),
+                                 bool ceil_mode=false);
 
 /**
  * @brief Overload of AvgPooling for C-style arrays.
  * @tparam DIM Number of dimensions for the pooling operation.
- * @param kernel_dims C-style array specifying the kernel dimensions.
- * @param name Name of the operator node. Defaults to an empty string.
- * @param stride_dims Step size (stride) for sliding the pooling window across the input dimensions. Defaults to 1 for each dimension.
+ * @param[in] kernel_dims C-style array specifying the kernel dimensions.
+ * @param[in] name Name of the operator node. Defaults to an empty string.
+ * @param[in] stride_dims Step size (stride) for sliding the pooling window across the input dimensions. Defaults to 1 for each dimension.
+ * @param[in] dilations Spatial dilations for the pooling operation.
+ * @param[in] ceil_mode Indicates whether to use ceil mode for output size calculation.
  * @return A shared pointer to the created operator node.
  */
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> AvgPooling(
     DimSize_t const (&kernel_dims)[DIM],
     const std::string& name = "",
-    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
+    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+    const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t,DIM>(1),
+    bool ceil_mode=false) {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
-    return AvgPooling(to_array(kernel_dims), name, stride_dims);
+    return AvgPooling(to_array(kernel_dims), name, stride_dims, dilations, ceil_mode);
 }
 }  // namespace Aidge
 
@@ -221,10 +277,7 @@ namespace {
  * @brief String representation of the AvgPooling attributes.
  */
 template <>
-const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {
-    "stride_dims",
-    "kernel_dims"
-};
+const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = { "stride_dims", "kernel_dims", "dilations", "ceil_mode" };
 }
 
 #endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
diff --git a/include/aidge/operator/Equal.hpp b/include/aidge/operator/Equal.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..12bc9af7812aedf52a4502f270e136c65a4a9756
--- /dev/null
+++ b/include/aidge/operator/Equal.hpp
@@ -0,0 +1,82 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_EQUAL_H_
+#define AIDGE_CORE_OPERATOR_EQUAL_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+/**
+ * @brief Tensor element-wise logical equal operation.
+ */
+class Equal_Op : public OperatorTensor,
+    public Registrable<Equal_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Equal_Op&)>> {
+public:
+    static const std::string Type;
+
+    /**
+     * @brief Compute element-wise Equal operation on two given inputs.
+     * @details supports broadcasting of both operands.
+     */
+    Equal_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Equal_Op(const Equal_Op& op)
+        : OperatorTensor(op)
+    {
+        if (op.mImpl) {
+            SET_IMPL_MACRO(Equal_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Equal_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Equal_Op>(*this);
+    }
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input_1", "data_input_2"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Equal(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Equal_Op>(), name);
+}
+} // namespace Aidge
+
+#endif /* AIDGE_CORE_OPERATOR_EQUAL_H_ */
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 8503b1be15df24056d61885af27d0d1778990015..9063fb88b4e018826bff82e0e09e6dbfdbd48421 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -41,7 +41,10 @@ enum class MaxPoolingAttr {
    * Must be positive integers.
    */
   StrideDims,
-
+  /**
+   * @brief Dilation along each spatial axis. Default value is 1.
+   */
+  Dilations,
   /**
    * @brief Kernel dimensions specifying the size of the pooling window for each spatial dimension.
    * For example, common kernel dimensions include 2x2 or 3x3.
@@ -63,24 +66,28 @@ enum class MaxPoolingAttr {
  * @brief Implements the MaxPooling operation over a specified input tensor.
  *
  * MaxPooling reduces spatial dimensions by applying a max filter over a sliding window.
- * The resulting output tensor contains the maximum value within each window.
+ * The stride dimensions determine how the window moves across the input. The dilation 
+ * parameter allows spacing between kernel elements, and `ceil_mode` determines whether 
+ * to use ceiling instead of floor when computing the output shape.
  *
  * ### Output Shape Calculation
- * - If `CeilMode` is false:
- *   `output_size = floor((input_size - kernel_size) / stride + 1)`
- * - If `CeilMode` is true:
- *   `output_size = ceil((input_size - kernel_size) / stride + 1)`
+ * - If `ceil_mode` is false:
+ *   `output_size = floor((input_size - dilation * (kernel_size - 1) - 1) / stride + 1)`
+ * - If `ceil_mode` is true:
+ *   `output_size = ceil((input_size - dilation * (kernel_size - 1) - 1) / stride + 1)`
  *
  * @example Example usage:
  * - Input shape: (1, 3, 32, 32) // Batch size 1, 3 channels, 32x32 spatial dimensions
  * - KernelDims: (2, 2)
  * - StrideDims: (2, 2)
+ * - Dilation: (1, 1)
  * - CeilMode: false
  * - Output shape: (1, 3, 16, 16)
  *
  * @see OperatorTensor
  * @see Registrable
  */
+
 template <DimIdx_t DIM>
 class MaxPooling_Op : public OperatorTensor,
                 public Registrable<MaxPooling_Op<DIM>,
@@ -91,6 +98,7 @@ public:
     static const std::string Type; ///< Static identifier for this operator type.
 
     using Attributes_ = StaticAttributes<MaxPoolingAttr,
+                                         std::array<DimSize_t, DIM>,
                                          std::array<DimSize_t, DIM>,
                                          std::array<DimSize_t, DIM>,
                                          bool>;
@@ -107,15 +115,17 @@ public:
      * @brief Constructor.
      * @param[in] kernel_dims Size of the pooling window for each spatial dimension.
      * @param[in] stride_dims Step size (stride) for sliding the pooling window across input dimensions.
+     * @param[in] dilations Spatial dilations for the pooling operation.
      * @param[in] ceil_mode Indicates whether to use ceil or floor for output size calculation.
      */
     MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                  const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t, DIM>(1),
                   bool ceil_mode = false);
 
     /**
      * @brief Copy-constructor.
-     * @param op MaxPooling_Op to copy.
+     * @param[in] op MaxPooling_Op to copy.
      * @details Copies the operator attributes and its output tensor(s), but not
      * its input tensors. The new operator has no associated input.
      */
@@ -159,6 +169,12 @@ public:
      */
     inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<MaxPoolingAttr::StrideDims>(); }
 
+    /**
+     * @brief Accessor for dilations.
+     * @return An array representing spatial dilations.
+     */
+    inline std::array<DimSize_t, DIM>& dilations() const { return mAttributes->template getAttr<MaxPoolingAttr::Dilations>(); }
+
     /**
      * @brief Accessor for kernel dimensions.
      * @return An array representing kernel dimensions.
@@ -197,6 +213,7 @@ extern template class Aidge::MaxPooling_Op<3>;
  * @param[in] kernel_dims Kernel dimensions specifying the size of the pooling window.
  * @param[in] name Optional name for the operation.
  * @param[in] stride_dims Stride dimensions specifying the step size for the pooling window.
+ * @param[in] dilations Spatial dilations for the pooling operation.
  * @param[in] ceil_mode Indicates whether to use ceil mode for output size calculation.
  * @return A shared pointer to a Node representing the MaxPooling operation.
  */
@@ -204,6 +221,7 @@ template <std::array<DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
                                  const std::string& name = "",
                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                 const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t,DIM>(1),
                                  bool ceil_mode=false);
 
 /**
@@ -212,6 +230,7 @@ std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
  * @param[in] kernel_dims C-style array of kernel dimensions.
  * @param[in] name Optional name for the operation.
  * @param[in] stride_dims Stride dimensions specifying the step size for the pooling window.
+ * @param[in] dilations Spatial dilations for the pooling operation.
  * @param[in] ceil_mode Indicates whether to use ceil mode for output size calculation.
  * @return A shared pointer to a Node representing the MaxPooling operation.
  */
@@ -220,9 +239,10 @@ inline std::shared_ptr<Node> MaxPooling(
     DimSize_t const (&kernel_dims)[DIM],
     const std::string& name = "",
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+    const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t,DIM>(1),
     bool ceil_mode = false) {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
-    return MaxPooling(to_array(kernel_dims), name, stride_dims, ceil_mode);
+    return MaxPooling(to_array(kernel_dims), name, stride_dims, dilations, ceil_mode);
 }
 
 }  // namespace Aidge
@@ -232,7 +252,7 @@ namespace {
  * @brief String representations of MaxPooling attributes for debugging and logging.
  */
 template <>
-const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"stride_dims", "kernel_dims", "ceil_mode"};
+const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"stride_dims", "kernel_dims", "dilations", "ceil_mode"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index 9597b533c14b27d282985b13cd8e1199ed5360a8..c4ceccf530e7fca8939aeec92067b94f7a6bde80 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -164,14 +164,18 @@ PaddedConvDepthWise(const DimSize_t nb_channels,
  * @param[in] kernel_dims The dimensions of the pooling window.
  * @param[in] name Optional name for the operation.
  * @param[in] stride_dims The stride dimensions for pooling (default is 1).
+ * @param[in] dilations The spatial dilations for pooling (default is 1).
  * @param[in] padding_dims Padding dimensions before pooling (default is 0).
+ * @param[in] ceil_mode Whether to use ceiling mode for pooling (default is false).
  * @return A shared pointer to the Node representing the padded average pooling operation.
  */
 template <std::array<DimSize_t, 1>::size_type DIM>
 extern std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                                  const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0));
+                                  const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
+                                  bool ceil_mode = false);
 
 /**
  * @brief Creates a padded average pooling operation as a MetaOperator.
@@ -180,13 +184,17 @@ extern std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &
  *
  * @param[in] kernel_dims The dimensions of the pooling window.
  * @param[in] stride_dims The stride dimensions for pooling (default is 1).
+ * @param[in] dilations The spatial dilations for pooling (default is 1).
  * @param[in] padding_dims Padding dimensions before pooling (default is 0).
+ * @param[in] ceil_mode Whether to use ceiling mode for pooling (default is false).
  * @return A shared pointer to the MetaOperator_Op representing the padded average pooling operation.
  */
 template <std::array<DimSize_t, 1>::size_type DIM>
 extern std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                                  const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0));
+                                  const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
+                                  bool ceil_mode = false);
 
 // Helper function for average pooling with C-style array for kernel_dims, enabling automatic DIM deduction.
 template <DimSize_t DIM>
@@ -195,8 +203,11 @@ PaddedAvgPooling(DimSize_t const (&kernel_dims)[DIM],
                  const std::string &name = "",
                  const std::array<DimSize_t, DIM> &stride_dims =
                      create_array<DimSize_t, DIM>(1),
+                 const std::array<DimSize_t, DIM> &dilations =
+                     create_array<DimSize_t, DIM>(1),
                  const std::array<DimSize_t, 2 * DIM> &padding_dims =
-                     create_array<DimSize_t, 2 * DIM>(0));
+                     create_array<DimSize_t, 2 * DIM>(0),
+                bool ceil_mode = false);
 
 ////////////////////////////////////////////////////////////////////////////////
 
@@ -208,6 +219,7 @@ PaddedAvgPooling(DimSize_t const (&kernel_dims)[DIM],
  * @param[in] kernel_dims The dimensions of the pooling window.
  * @param[in] name Optional name for the operation.
  * @param[in] stride_dims The stride dimensions for pooling (default is 1).
+ * @param[in] dilations The spatial dilations for pooling (default is 1).
  * @param[in] padding_dims Padding dimensions before pooling (default is 0).
  * @param[in] ceil_mode Whether to use ceiling mode for pooling (default is false).
  * @return A shared pointer to the Node representing the padded max pooling operation.
@@ -216,11 +228,12 @@ template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
                                   bool ceil_mode = false) {
     auto graph = Sequential({
         Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
-        MaxPooling(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims, ceil_mode)
+        MaxPooling(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims, dilations, ceil_mode)
     });
 
     return MetaOperator(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), graph, {}, name);
@@ -233,6 +246,7 @@ inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &
  *
  * @param[in] kernel_dims The dimensions of the pooling window.
  * @param[in] stride_dims The stride dimensions for pooling (default is 1).
+ * @param[in] dilations The spatial dilations for pooling (default is 1).
  * @param[in] padding_dims Padding dimensions before pooling (default is 0).
  * @param[in] ceil_mode Whether to use ceiling mode for pooling (default is false).
  * @return A shared pointer to the MetaOperator_Op representing the padded max pooling operation.
@@ -240,11 +254,12 @@ inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<MetaOperator_Op> PaddedMaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
                                   bool ceil_mode = false) {
     auto graph = Sequential({
         Pad<DIM>(padding_dims, ""),
-        MaxPooling(kernel_dims, "", stride_dims, ceil_mode)
+        MaxPooling(kernel_dims, "", stride_dims, dilations, ceil_mode)
     });
     return std::make_shared<MetaOperator_Op>(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), graph);
 }
@@ -255,9 +270,10 @@ inline std::shared_ptr<Node> PaddedMaxPooling(
     DimSize_t const (&kernel_dims)[DIM],
     const std::string& name = "",
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+    const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t,DIM>(1),
     const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
     bool ceil_mode= false) {
-    return PaddedMaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims, ceil_mode);
+    return PaddedMaxPooling(to_array(kernel_dims), name, stride_dims, dilations, padding_dims, ceil_mode);
 }
 
 /**
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 24549e3f4f331ee1170a07e61a6190a607274fe3..e376bcffb6ee10449a8bca8cc89f26528865d291 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -47,11 +47,19 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
         :param stride_dims: The stride of the pooling operation. Specifies how much the kernel moves in each step.
                              By default, the stride is set to 1 for all dimensions.
         :type stride_dims: List[int], optional
+        :param dilations: The dilation value along each spatial axis of filter.
+        :type dilations: List[int], optional
+        :param ceil_mode: Whether to use ceil or floor when calculating the output dimensions.
+        :type ceil_mode: bool, optional
         )mydelimiter")
     .def(py::init<const std::array<DimSize_t, DIM> &,
-                  const std::array<DimSize_t, DIM> &>(),
+                  const std::array<DimSize_t, DIM> &,
+                  const std::array<DimSize_t, DIM> &,
+                  bool>(),
             py::arg("kernel_dims"),
-            py::arg("stride_dims") = create_array<DimSize_t, DIM>(1))
+            py::arg("stride_dims") = create_array<DimSize_t, DIM>(1),
+            py::arg("dilations") = create_array<DimSize_t, DIM>(1),
+            py::arg("ceil_mode") = false)
     .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
     .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
     .def_readonly_static("Type", &AvgPooling_Op<DIM>::Type);
@@ -60,14 +68,19 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
 
   m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
-                                                                  const std::vector<DimSize_t>& stride_dims) {
+                                                                  const std::vector<DimSize_t>& stride_dims,
+                                                                  const std::vector<DimSize_t>& dilations,
+                                                                  bool ceil_mode) {
         AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(dilations.size() == DIM, "dilations size [{}] does not match DIM [{}]", dilations.size(), DIM);
 
-        return AvgPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()));
+        return AvgPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilations.begin()), ceil_mode);
     }, py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM, 1),
+       py::arg("dilations") = std::vector<DimSize_t>(DIM, 1),
+       py::arg("ceil_mode") = false,
        R"mydelimiter(
         Initialize a node containing an AvgPooling operator.
 
@@ -75,6 +88,10 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
 
         :param kernel_dims: Size of the kernel applied during pooling.
         :type kernel_dims: List[int]
+        :param dilations: The dilation value along each spatial axis of filter.
+        :type dilations: List[int]
+        :param ceil_mode: Whether to use ceil or floor when calculating the output dimensions.
+        :type ceil_mode: bool
         :param name: Name of the operator node (optional).
         :type name: str
         :param stride_dims: Stride dimensions for the pooling operation.
diff --git a/python_binding/operator/pybind_Equal.cpp b/python_binding/operator/pybind_Equal.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ef4488edce3c096c368f43a07de6b0d65f368013
--- /dev/null
+++ b/python_binding/operator/pybind_Equal.cpp
@@ -0,0 +1,34 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Equal.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Equal(py::module& m) {
+    py::class_<Equal_Op, std::shared_ptr<Equal_Op>, OperatorTensor>(m, "Equal_Op", py::multiple_inheritance(),
+          R"mydelimiter( Initialize an Equal operator.)mydelimiter")
+    .def(py::init<>())
+    .def_static("get_inputs_name", &Equal_Op::getInputsName)
+    .def_static("get_outputs_name", &Equal_Op::getOutputsName);
+    declare_registrable<Equal_Op>(m, "EqualOp");
+    m.def("Equal", &Equal, py::arg("name") = "",
+	   R"mydelimiter(
+        Initialize a node containing an Equal operator.
+			:param name : name of the node.
+		)mydelimiter");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Flatten.cpp b/python_binding/operator/pybind_Flatten.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..899e5d7758d6e4737f89b4308872bb0926f1f98f
--- /dev/null
+++ b/python_binding/operator/pybind_Flatten.cpp
@@ -0,0 +1,50 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+#include <pybind11/pybind11.h>
+#include <string>
+#include <vector>
+
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Flatten.hpp"
+#include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Flatten(py::module &m) {
+  py::class_<Flatten_Op, std::shared_ptr<Flatten_Op>, OperatorTensor>(
+      m, "FlattenOp", py::multiple_inheritance(),
+		R"mydelimiter(
+		Initialize flatten operator
+		:param axis :   up to which input dimensions (exclusive) should be flattened to the outer dimension of the output
+                        between [-r;r-1] with r = input_tensor.nbDims()
+		:type axes : :py:class: List[Int]
+		)mydelimiter")
+      .def("get_inputs_name", &Flatten_Op::getInputsName)
+      .def("get_outputs_name", &Flatten_Op::getOutputsName)
+      .def("axis", &Flatten_Op::axis);
+  // Here we bind the constructor of the Flatten Node. We add an argument
+  // for each attribute of the operator (in here we only have 'axis') and
+  // the last argument is the node's name.
+  m.def("Flatten", &Flatten, py::arg("axis") = 1,
+        py::arg("name") = "",
+        R"mydelimiter(
+    Initialize a node containing a flatten operator.
+	:param axis :   up to which input dimensions (exclusive) should be flattened to the outer dimension of the output
+                    between [-r;r-1] with r = input_tensor.nbDims()
+	:type axes : :py:class: List[Int]
+    :param name : name of the node.
+)mydelimiter");
+}
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 8834625a8b5790e146861274cf82d0608b637148..bdbc1edd3cba67f6a7d703692a50f33355a8909e 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -37,14 +37,18 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
         :type kernel_dims: List[int]
         :param stride_dims: The stride (step size) to move the kernel over the input.
         :type stride_dims: List[int]
+        :param dilations: The dilation value along each spatial axis of filter.
+        :type dilations: List[int]
         :param ceil_mode: Whether to use ceil or floor when calculating the output dimensions.
         :type ceil_mode: bool
     )mydelimiter")
   .def(py::init<const std::array<DimSize_t, DIM> &,
+                const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &,
                 bool>(),
         py::arg("kernel_dims"),
         py::arg("stride_dims"),
+        py::arg("dilations"),
         py::arg("ceil_mode"))
   .def_static("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
   .def_static("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
@@ -55,14 +59,17 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
   m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims,
+                                                                  const std::vector<DimSize_t> &dilations,
                                                                   bool ceil_mode) {
         AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(dilations.size() == DIM, "dilations size [{}] does not match DIM [{}]", dilations.size(), DIM);
 
-        return MaxPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), ceil_mode);
+        return MaxPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()),  to_array<DIM>(dilations.begin()), ceil_mode);
     }, py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM, 1),
+       py::arg("dilations") = std::vector<DimSize_t>(DIM, 1),
        py::arg("ceil_mode") = false,
     R"mydelimiter(
         Initialize a node containing a MaxPooling operator.
@@ -75,6 +82,8 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
         :type kernel_dims: List[int]
         :param stride_dims: The stride (step size) to move the kernel over the input.
         :type stride_dims: List[int]
+        :param dilations: The dilation value along each spatial axis of filter.
+        :type dilations: List[int]
         :param ceil_mode: Whether to use ceil or floor when calculating the output dimensions.
         :type ceil_mode: bool
         :param name: Name of the node (optional).
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index 35f3d21341fbb529d692a71e597c3b2b76c8426e..8058cd2a23c6c1bf91b44b347af9df57aac0635a 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -46,7 +46,33 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("no_bias")= false);
+       py::arg("no_bias")= false,
+    R"mydelimiter(
+        Initialize a node containing a Padded Convolution operator.
+
+        This operator performs a convolution operation with explicit padding. It applies a 
+        kernel filter over an input tensor with specified stride and dilation settings.
+
+        :param in_channels: Number of input channels.
+        :type in_channels: int
+        :param out_channels: Number of output channels.
+        :type out_channels: int
+        :param kernel_dims: The size of the convolutional kernel for each dimension.
+        :type kernel_dims: List[int]
+        :param stride_dims: The stride (step size) for kernel movement.
+        :type stride_dims: List[int]
+        :param padding_dims: Explicit padding to apply before convolution.
+        :type padding_dims: List[int]
+        :param dilation_dims: The dilation factor for kernel spacing.
+        :type dilation_dims: List[int]
+        :param no_bias: Whether to disable bias addition in the convolution.
+        :type no_bias: bool
+        :param name: Name of the node (optional).
+        :type name: str
+        :return: A node containing the Padded Convolution operator.
+        :rtype: :py:class:`PaddedConvOp`
+    )mydelimiter");
+
     m.def(("PaddedConv" + std::to_string(DIM) + "DOp").c_str(), [](
                                                          const std::vector<DimSize_t>& kernel_dims,
                                                          const std::vector<DimSize_t> &stride_dims,
@@ -62,9 +88,28 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
     }, py::arg("kernel_dims"),
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
-       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
+       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
+    R"mydelimiter(
+        Initialize a Padded Convolution operator.
+
+        This function defines a convolution operation that includes explicit padding before 
+        applying the kernel. The padding allows control over output dimensions while maintaining 
+        receptive field properties.
+
+        :param kernel_dims: The size of the convolutional kernel for each dimension.
+        :type kernel_dims: List[int]
+        :param stride_dims: The stride (step size) for kernel movement.
+        :type stride_dims: List[int]
+        :param padding_dims: Padding applied before convolution.
+        :type padding_dims: List[int]
+        :param dilation_dims: The dilation factor for kernel spacing.
+        :type dilation_dims: List[int]
+        :return: A Padded Convolution operator.
+        :rtype: :py:class:`PaddedConvOp`
+    )mydelimiter");
 }
 
+
 template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
   m.def(("PaddedConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels,
                                                          const std::vector<DimSize_t>& kernel_dims,
@@ -86,7 +131,32 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("no_bias") = false);
+       py::arg("no_bias") = false,
+    R"mydelimiter(
+        Initialize a node containing a Depthwise Padded Convolution operator.
+
+        This operator performs a depthwise convolution operation, where each input channel is 
+        convolved separately with a different kernel. The operation includes explicit padding, 
+        stride control, and dilation options.
+
+        :param nb_channels: Number of input channels (also the number of output channels since depthwise convolution does not mix channels).
+        :type nb_channels: int
+        :param kernel_dims: The size of the convolutional kernel for each dimension.
+        :type kernel_dims: List[int]
+        :param stride_dims: The stride (step size) for kernel movement.
+        :type stride_dims: List[int]
+        :param padding_dims: Explicit padding to apply before convolution.
+        :type padding_dims: List[int]
+        :param dilation_dims: The dilation factor for kernel spacing.
+        :type dilation_dims: List[int]
+        :param no_bias: Whether to disable bias addition in the convolution.
+        :type no_bias: bool
+        :param name: Name of the node (optional).
+        :type name: str
+        :return: A node containing the Depthwise Padded Convolution operator.
+        :rtype: :py:class:`PaddedConvDepthWiseOp`
+    )mydelimiter");
+
   m.def(("PaddedConvDepthWise" + std::to_string(DIM) + "DOp").c_str(), [](
                                                          const std::vector<DimSize_t>& kernel_dims,
                                                          const std::vector<DimSize_t> &stride_dims,
@@ -102,90 +172,258 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
     }, py::arg("kernel_dims"),
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
-       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
+       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
+    R"mydelimiter(
+        Initialize a Depthwise Padded Convolution operator.
+
+        This function defines a depthwise convolution operation that includes explicit padding 
+        before applying the kernel. Depthwise convolution applies a separate filter to each 
+        input channel, preserving channel independence.
 
+        :param kernel_dims: The size of the convolutional kernel for each dimension.
+        :type kernel_dims: List[int]
+        :param stride_dims: The stride (step size) for kernel movement.
+        :type stride_dims: List[int]
+        :param padding_dims: Padding applied before convolution.
+        :type padding_dims: List[int]
+        :param dilation_dims: The dilation factor for kernel spacing.
+        :type dilation_dims: List[int]
+        :return: A Depthwise Padded Convolution operator.
+        :rtype: :py:class:`PaddedConvDepthWiseOp`
+    )mydelimiter");
 }
 
 template <DimIdx_t DIM> void declare_PaddedAvgPoolingOp(py::module &m) {
   m.def(("PaddedAvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                          const std::string& name,
                                                          const std::vector<DimSize_t> &stride_dims,
-                                                         const std::vector<DimSize_t> &padding_dims)
+                                                         const std::vector<DimSize_t> &dilations,
+                                                         const std::vector<DimSize_t> &padding_dims,
+                                                         bool ceil_mode)
     {
         AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(dilations.size() == DIM, "dilations size [{}] does not match DIM [{}]", dilations.size(), DIM);
         AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
 
-        return PaddedAvgPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()));
+        return PaddedAvgPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilations.begin()), to_array<2*DIM>(padding_dims.begin()), ceil_mode);
     }, py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0));
+       py::arg("dilations") = std::vector<DimSize_t>(DIM,1),
+       py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
+       py::arg("ceil_mode") = false,
+    R"mydelimiter(
+        Initialize a node containing a Padded Average Pooling operator.
+
+        This operator performs an average pooling operation with explicit padding. The output value 
+        is computed as the average of input values within a defined kernel window.
+
+        :param kernel_dims: The size of the pooling kernel for each dimension.
+        :type kernel_dims: List[int]
+        :param stride_dims: The stride (step size) for kernel movement.
+        :type stride_dims: List[int]
+        :param dilations: The dilation factor for the kernel, which increases the spacing between elements.
+        :type dilations: List[int]
+        :param padding_dims: Explicit padding to apply before pooling.
+        :type padding_dims: List[int]
+        :param ceil_mode: If set to True, the output shape is computed using ceil instead of floor.
+        :type ceil_mode: bool
+        :param name: Name of the node (optional).
+        :type name: str
+        :return: A node containing the Padded Average Pooling operator.
+        :rtype: :py:class:`PaddedAvgPoolingOp`
+    )mydelimiter");
+
   m.def(("PaddedAvgPooling" + std::to_string(DIM) + "DOp").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                          const std::vector<DimSize_t> &stride_dims,
-                                                         const std::vector<DimSize_t> &padding_dims)
+                                                         const std::vector<DimSize_t> &dilations,
+                                                         const std::vector<DimSize_t> &padding_dims,
+                                                         bool ceil_mode)
     {
         AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(dilations.size() == DIM, "dilations size [{}] does not match DIM [{}]", dilations.size(), DIM);
         AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
 
-        return PaddedAvgPooling_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()));
+        return PaddedAvgPooling_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilations.begin()), to_array<2*DIM>(padding_dims.begin()), ceil_mode);
     }, py::arg("kernel_dims"),
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0));
+       py::arg("dilations") = std::vector<DimSize_t>(DIM,1),
+       py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
+       py::arg("ceil_mode") = false,
+    R"mydelimiter(
+        Initialize a Padded Average Pooling operator.
+
+        This function defines an average pooling operation with explicit padding before pooling is applied. 
+        The operation computes the average of the elements inside each kernel window.
+
+        :param kernel_dims: The size of the pooling kernel for each dimension.
+        :type kernel_dims: List[int]
+        :param stride_dims: The stride (step size) for kernel movement.
+        :type stride_dims: List[int]
+        :param dilations: The dilation factor for the kernel, which increases the spacing between elements.
+        :type dilations: List[int]
+        :param padding_dims: Padding applied before pooling.
+        :type padding_dims: List[int]
+        :param ceil_mode: If set to True, the output shape is computed using ceil instead of floor.
+        :type ceil_mode: bool
+        :return: A Padded Average Pooling operator.
+        :rtype: :py:class:`PaddedAvgPoolingOp`
+    )mydelimiter");
 }
 
 template <DimIdx_t DIM> void declare_PaddedMaxPoolingOp(py::module &m) {
   m.def(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                          const std::string& name,
                                                          const std::vector<DimSize_t> &stride_dims,
+                                                         const std::vector<DimSize_t> &dilations,
                                                          const std::vector<DimSize_t> &padding_dims,
                                                          bool ceil_mode)
     {
         AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(dilations.size() == DIM, "dilations size [{}] does not match DIM [{}]", dilations.size(), DIM);
         AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
 
-        return PaddedMaxPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), ceil_mode);
+        return PaddedMaxPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilations.begin()), to_array<2*DIM>(padding_dims.begin()), ceil_mode);
     }, py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("dilations") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
-       py::arg("ceil_mode") = false);
+       py::arg("ceil_mode") = false,
+    R"mydelimiter(
+        Initialize a node containing a Padded Max Pooling operator.
+
+        This operator performs a max pooling operation with explicit padding before pooling is applied. 
+        The output value is computed as the maximum of input values within a defined kernel window.
+
+        :param kernel_dims: The size of the pooling kernel for each dimension.
+        :type kernel_dims: List[int]
+        :param stride_dims: The stride (step size) for kernel movement.
+        :type stride_dims: List[int]
+        :param dilations: The dilation factor for the kernel, which increases the spacing between elements.
+        :type dilations: List[int]
+        :param padding_dims: Explicit padding to apply before pooling.
+        :type padding_dims: List[int]
+        :param ceil_mode: If set to True, the output shape is computed using ceil instead of floor.
+        :type ceil_mode: bool
+        :param name: Name of the node (optional).
+        :type name: str
+        :return: A node containing the Padded Max Pooling operator.
+        :rtype: :py:class:`PaddedMaxPoolingOp`
+    )mydelimiter");
+
   m.def(("PaddedMaxPooling" + std::to_string(DIM) + "DOp").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                          const std::vector<DimSize_t> &stride_dims,
+                                                         const std::vector<DimSize_t> &dilations,
                                                          const std::vector<DimSize_t> &padding_dims,
                                                          bool ceil_mode)
     {
         AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(dilations.size() == DIM, "dilations size [{}] does not match DIM [{}]", dilations.size(), DIM);
         AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
 
-        return PaddedMaxPooling_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), ceil_mode);
+        return PaddedMaxPooling_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilations.begin()), to_array<2*DIM>(padding_dims.begin()), ceil_mode);
     }, py::arg("kernel_dims"),
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("dilations") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
-       py::arg("ceil_mode") = false);
+       py::arg("ceil_mode") = false,
+    R"mydelimiter(
+        Initialize a Padded Max Pooling operator.
+
+        This function defines a max pooling operation with explicit padding before pooling is applied. 
+        The operation computes the maximum of the elements inside each kernel window.
 
+        :param kernel_dims: The size of the pooling kernel for each dimension.
+        :type kernel_dims: List[int]
+        :param stride_dims: The stride (step size) for kernel movement.
+        :type stride_dims: List[int]
+        :param dilations: The dilation factor for the kernel, which increases the spacing between elements.
+        :type dilations: List[int]
+        :param padding_dims: Padding applied before pooling.
+        :type padding_dims: List[int]
+        :param ceil_mode: If set to True, the output shape is computed using ceil instead of floor.
+        :type ceil_mode: bool
+        :return: A Padded Max Pooling operator.
+        :rtype: :py:class:`PaddedMaxPoolingOp`
+    )mydelimiter");
 }
 
+
 void declare_LSTMOp(py::module &m) {
-  m.def("LSTM", &LSTM, py::arg("in_channels"),
+  m.def("LSTM", &LSTM,
+       py::arg("in_channels"),
        py::arg("hidden_channels"),
        py::arg("seq_length"),
        py::arg("nobias") = false,
-       py::arg("name") = "");
+       py::arg("name") = "",
+    R"mydelimiter(
+        Initialize a node containing an LSTM (Long Short-Term Memory) operator.
+
+        The LSTM operator is a recurrent neural network (RNN) variant designed to model sequential data 
+        while addressing the vanishing gradient problem. It includes gating mechanisms to control 
+        information flow through time.
+
+        :param in_channels: The number of input features per time step.
+        :type in_channels: int
+        :param hidden_channels: The number of hidden units in the LSTM.
+        :type hidden_channels: int
+        :param seq_length: The number of time steps in the input sequence.
+        :type seq_length: int
+        :param nobias: If set to True, no bias terms are included in the LSTM computation.
+        :type nobias: bool
+        :param name: Name of the node (optional).
+        :type name: str
+        :return: A node containing the LSTM operator.
+        :rtype: :py:class:`LSTM`
+    )mydelimiter");
+
   m.def("LSTMOp", &LSTM_Op,
        py::arg("seq_length"),
-       py::arg("name") = "");
+       py::arg("name") = "",
+    R"mydelimiter(
+        Initialize an LSTM operation.
+
+        This function sets up an LSTM operator to process sequential data. The LSTM maintains hidden 
+        states over time steps, allowing it to learn long-range dependencies.
+
+        :param seq_length: The length of the input sequence.
+        :type seq_length: int
+        :param name: Name of the node (optional).
+        :type name: str
+        :return: An LSTM operator.
+        :rtype: :py:class:`LSTMOp`
+    )mydelimiter");
 }
 
+
 void declare_LeakyOp(py::module &m) {
     m.def("Leaky", &Leaky, 
           py::arg("nb_timesteps"),
           py::arg("beta"),
           py::arg("threshold") = 1.0,
-          py::arg("name") = "");
+          py::arg("name") = "",
+    R"mydelimiter(
+        Initialize a Leaky neuron operator.
+
+        The Leaky operator introduces a decay factor, allowing neuron states to "leak" over time instead of resetting 
+        abruptly. This helps in maintaining temporal memory.
+
+        :param nb_timesteps: The number of time steps for the operation.
+        :type nb_timesteps: int
+        :param beta: The leakage factor controlling decay over time.
+        :type beta: float
+        :param threshold: The activation threshold (default is 1.0).
+        :type threshold: float
+        :param name: Name of the node (optional).
+        :type name: str
+        :return: A node containing the Leaky operator.
+        :rtype: :py:class:`Leaky`
+    )mydelimiter");
 }
 
 void init_MetaOperatorDefs(py::module &m) {
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index cc6f0bf2502027fea467b9db39561769fcebbd2b..ef1111b39a2f6fff3153dfb7441543ff5c3956c2 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -50,9 +50,11 @@ void init_Conv(py::module&);
 void init_ConvDepthWise(py::module&);
 void init_DepthToSpace(py::module&);
 void init_Div(py::module&);
+void init_Equal(py::module&);
 void init_Erf(py::module&);
 void init_Expand(py::module&);
 void init_FC(py::module&);
+void init_Flatten(py::module&);
 void init_Gather(py::module&);
 void init_GenericOperator(py::module&);
 void init_GlobalAveragePooling(py::module&);
@@ -149,9 +151,11 @@ void init_Aidge(py::module& m) {
     init_ConstantOfShape(m);
     init_DepthToSpace(m);
     init_Div(m);
+    init_Equal(m);
     init_Erf(m);
     init_Expand(m);
     init_FC(m);
+    init_Flatten(m);
     init_Gather(m);
     init_GenericOperator(m);
     init_GlobalAveragePooling(m);
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index 78266e3fb391d6f33da9e65b2125dd57885ac89e..79341687c6190505ca41eafee3c1ee24f7b6088c 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -47,17 +47,28 @@ std::shared_ptr<Aidge::Operator> Aidge::AvgPooling_Op<DIM>::clone() const {
 template <Aidge::DimIdx_t DIM>
 bool Aidge::AvgPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
-        std::array<DimSize_t, DIM + 2> outputDims;
+        std::array<DimSize_t, DIM + 2> outputDims{};
         const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
-        outputDims[0] = inputDims[0];
-        outputDims[1] = inputDims[1];
 
-        for (std::size_t dim = 0; dim < mAttributes->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
+        std::function<float(float)> roundingFunction;
+        if (mAttributes->template getAttr<AvgPoolingAttr::CeilMode>()) {
+            roundingFunction = [](float x) { return std::ceil(x); };
+        } else {
+            roundingFunction = [](float x) { return std::floor(x); };
+        }
+
+        for (std::size_t dim = 0; dim < mAttributes->template getAttr<AvgPoolingAttr::KernelDims>().size(); ++dim) {
+            const auto kernelDim = mAttributes->template getAttr<AvgPoolingAttr::KernelDims>()[dim];
+            const auto strideDim = mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[dim];
+            const auto dilationDim = mAttributes->template getAttr<AvgPoolingAttr::Dilations>()[dim];
+
             outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                                        std::floor(static_cast<float>(inputDims[dim+2] -
-                                                            mAttributes->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) /
-                                        static_cast<float>(mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
+                                            roundingFunction(static_cast<float>(inputDims[dim+2] -
+                                                                    (kernelDim - 1) * dilationDim - 1) /
+                                            static_cast<float>(strideDim)));
         }
+        outputDims[1] = inputDims[1];
+        outputDims[0] = inputDims[0];
         getOutput(0)->resize(outputDims);
         return true;
     }
@@ -96,7 +107,8 @@ Aidge::AvgPooling_Op<DIM>::computeReceptiveField(const std::vector<Aidge::DimSiz
             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
                         * mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]
                         + 1
-                        + (mAttributes->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
+                        + (mAttributes->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
+                        * mAttributes->template getAttr<AvgPoolingAttr::Dilations>()[static_cast<std::size_t>(i)]);
             inputIdxDims[2+i] *= mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
         }
         std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
@@ -128,10 +140,12 @@ template class Aidge::AvgPooling_Op<4>;
 template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Aidge::Node> Aidge::AvgPooling(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
                                            const std::string& name,
-                                           const std::array<Aidge::DimSize_t, DIM> &stride_dims) {
+                                           const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                                           const std::array<Aidge::DimSize_t, DIM> &dilations,
+                                           bool ceil_mode) {
     AIDGE_ASSERT(DIM<=MaxDim, "Too many kernel dimensions required by {}, not supported", AvgPooling_Op<DIM>::Type);
-    return std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims), name);
+    return std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilations, ceil_mode), name);
 }
-template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&);
-template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&);
-template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, const std::array<Aidge::DimSize_t, 3>&);
+template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, const std::array<Aidge::DimSize_t, 3>&, const std::array<Aidge::DimSize_t, 3>&, bool);
diff --git a/src/operator/Equal.cpp b/src/operator/Equal.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..cc0fcd984062baeac3da47d03a3d64cda63eada3
--- /dev/null
+++ b/src/operator/Equal.cpp
@@ -0,0 +1,62 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstddef>    // std::size_t
+#include <memory>
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Equal.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::Equal_Op::Type = "Equal";
+
+bool Aidge::Equal_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
+        const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
+
+        std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
+        const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
+
+        std::size_t out_id = outDims.size() - 1;
+        std::size_t low_id = lowDims.size() - 1;
+        std::size_t i = 0;
+        while (i++ < lowDims.size()) {
+            if (outDims[out_id] == 1) {
+                outDims[out_id] = lowDims[low_id];
+            }
+            else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for Equal Operation: {} for input#0 vs {} for input#1",
+                    inputsDims0, inputsDims1);
+            }
+            --out_id;
+            --low_id;
+        }
+        mOutputs[0]->resize(outDims);
+        return true;
+    }
+
+    return false;
+}
+
+void Aidge::Equal_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Equal_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Equal_Op::getAvailableBackends() const {
+    return Registrar<Equal_Op>::getKeys();
+}
diff --git a/src/operator/MaxPooling.cpp b/src/operator/MaxPooling.cpp
index 535b53749caeffca34eb0bf541f06dee30a3a333..afd8e00cc07b9ecaf28fcb7d7b28fa3422446429 100644
--- a/src/operator/MaxPooling.cpp
+++ b/src/operator/MaxPooling.cpp
@@ -25,11 +25,13 @@ const std::string Aidge::MaxPooling_Op<DIM>::Type = "MaxPooling" + std::to_strin
 template <Aidge::DimIdx_t DIM>
 Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
                             const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                            const std::array<Aidge::DimSize_t, DIM> &dilations,
                             bool ceil_mode)
     : OperatorTensor(Type, {InputCategory::Data}, 1),
     mAttributes(std::make_shared<Attributes_>(
     attr<MaxPoolingAttr::StrideDims>(stride_dims),
     attr<MaxPoolingAttr::KernelDims>(kernel_dims),
+    attr<MaxPoolingAttr::Dilations>(dilations),
     attr<MaxPoolingAttr::CeilMode>(ceil_mode)))
 {}
 
@@ -63,11 +65,15 @@ bool Aidge::MaxPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
             roundingFunction = [](float x) { return std::floor(x); };
         }
 
-        for (std::size_t dim = 0; dim < mAttributes->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
+        for (std::size_t dim = 0; dim < mAttributes->template getAttr<MaxPoolingAttr::KernelDims>().size(); ++dim) {
+            const auto kernelDim = mAttributes->template getAttr<MaxPoolingAttr::KernelDims>()[dim];
+            const auto strideDim = mAttributes->template getAttr<MaxPoolingAttr::StrideDims>()[dim];
+            const auto dilationDim = mAttributes->template getAttr<MaxPoolingAttr::Dilations>()[dim];
+
             outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                                        roundingFunction(static_cast<float>(inputDims[dim+2] -
-                                                                mAttributes->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
-                                        static_cast<float>(mAttributes->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
+                                            roundingFunction(static_cast<float>(inputDims[dim+2] -
+                                                                    (kernelDim - 1) * dilationDim - 1) /
+                                            static_cast<float>(strideDim)));
         }
         outputDims[1] = inputDims[1];
         outputDims[0] = inputDims[0];
@@ -98,12 +104,13 @@ template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Aidge::Node> Aidge::MaxPooling(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
                                            const std::string& name,
                                            const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                                           const std::array<Aidge::DimSize_t, DIM> &dilations,
                                            bool ceil_mode)
 {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
-    return std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, ceil_mode), name);
+    return std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilations, ceil_mode), name);
 }
 
-template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, bool);
-template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, bool);
-template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, const std::array<Aidge::DimSize_t, 3>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, const std::array<Aidge::DimSize_t, 3>&, const std::array<Aidge::DimSize_t, 3>&, bool);
diff --git a/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp b/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
index bcda67d0ce4c43e4936739affb9d681942062cb1..8b86c5512a60c29cfd198e027dc524c2970fc743 100644
--- a/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
+++ b/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
@@ -34,31 +34,35 @@ template <std::array<DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::string& name,
                                   const std::array<DimSize_t, DIM> &stride_dims,
-                                  const std::array<DimSize_t, 2*DIM> &padding_dims)
+                                  const std::array<DimSize_t, DIM> &dilations,
+                                  const std::array<DimSize_t, 2*DIM> &padding_dims,
+                                  bool ceil_mode)
 {
     auto graph = Sequential({
         Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
-        AvgPooling(kernel_dims, (!name.empty()) ? name + "_avgpooling" : "", stride_dims)
+        AvgPooling(kernel_dims, (!name.empty()) ? name + "_avgpooling" : "", stride_dims, dilations, ceil_mode)
     });
 
     return MetaOperator(("PaddedAvgPooling" + std::to_string(DIM) + "D").c_str(), graph, {}, name);
 }
 
-template std::shared_ptr<Node> PaddedAvgPooling<1>(const std::array<DimSize_t,1>&, const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
-template std::shared_ptr<Node> PaddedAvgPooling<2>(const std::array<DimSize_t,2>&, const std::string&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&);
+template std::shared_ptr<Node> PaddedAvgPooling<1>(const std::array<DimSize_t,1>&, const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&, bool);
+template std::shared_ptr<Node> PaddedAvgPooling<2>(const std::array<DimSize_t,2>&, const std::string&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&, bool);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
 std::shared_ptr<Node> PaddedAvgPooling(const DimSize_t (&kernel_dims)[DIM],
                                        const std::string& name,
                                        const std::array<DimSize_t, DIM> &stride_dims,
-                                       const std::array<DimSize_t, 2*DIM> &padding_dims)
+                                       const std::array<DimSize_t, DIM> &dilations,
+                                       const std::array<DimSize_t, 2*DIM> &padding_dims,
+                                       bool ceil_mode)
 {
-    return PaddedAvgPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
+    return PaddedAvgPooling(to_array(kernel_dims), name, stride_dims, dilations, padding_dims, ceil_mode);
 }
 
-template std::shared_ptr<Node> PaddedAvgPooling<1>(const DimSize_t (&kernel_dims)[1], const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
-template std::shared_ptr<Node> PaddedAvgPooling<2>(const DimSize_t (&kernel_dims)[2], const std::string&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&);
+template std::shared_ptr<Node> PaddedAvgPooling<1>(const DimSize_t (&kernel_dims)[1], const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&, bool);
+template std::shared_ptr<Node> PaddedAvgPooling<2>(const DimSize_t (&kernel_dims)[2], const std::string&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&, bool);
 
 
 //////////////////////////////////
@@ -68,17 +72,19 @@ template std::shared_ptr<Node> PaddedAvgPooling<2>(const DimSize_t (&kernel_dims
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::array<DimSize_t, DIM> &stride_dims,
-                                  const std::array<DimSize_t, 2*DIM> &padding_dims)
+                                  const std::array<DimSize_t, DIM> &dilations,
+                                  const std::array<DimSize_t, 2*DIM> &padding_dims,
+                                  bool ceil_mode)
 {
     auto graph = Sequential({
         Pad<DIM>(padding_dims, ""),
-        AvgPooling(kernel_dims, "", stride_dims)
+        AvgPooling(kernel_dims, "", stride_dims, dilations, ceil_mode)
     });
 
     return std::make_shared<MetaOperator_Op>(("PaddedAvgPooling" + std::to_string(DIM) + "D").c_str(), graph);
 }
 
-template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<1>(const std::array<DimSize_t,1>&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
-template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<2>(const std::array<DimSize_t,2>&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&);
+template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<1>(const std::array<DimSize_t,1>&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&, bool);
+template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<2>(const std::array<DimSize_t,2>&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&, bool);
 
 } // namespace Aidge