diff --git a/aidge_core/__init__.py b/aidge_core/__init__.py
index 8544c5647befe4d5aa4aa362d016787131c36692..32042125ed6ecb1d935e240837afe6516706dbcb 100644
--- a/aidge_core/__init__.py
+++ b/aidge_core/__init__.py
@@ -11,4 +11,6 @@ from aidge_core.aidge_core import * # import so generated by PyBind
 import aidge_core.export_utils
 import aidge_core.utils
 from aidge_core.aidge_export_aidge import serialize_to_cpp
+from aidge_core.show_graphview import gview_to_json
+from aidge_core.mem_info import *
 from ._version import *
diff --git a/aidge_core/export_utils/code_generation.py b/aidge_core/export_utils/code_generation.py
index 995df18b52d5701af5259d571e6a0a91a83ba665..4f0f4634dd8ac09c8c0a86506dc52d420889b22a 100644
--- a/aidge_core/export_utils/code_generation.py
+++ b/aidge_core/export_utils/code_generation.py
@@ -19,6 +19,8 @@ def generate_file(file_path: Union[Path, str], template_path: Union[Path, str],
         file_path = Path(file_path)
     if isinstance(template_path, str):
         template_path = Path(template_path)
+    if not template_path.exists():
+        raise ValueError(f"Path to template {template_path} is not valid !")
     # Make dir
     file_path.parent.mkdir(parents=True, exist_ok=True)
 
diff --git a/aidge_core/export_utils/export_registry.py b/aidge_core/export_utils/export_registry.py
index eabc6eb6b656b0c0b0c8381b665d6a6d8f3805ff..fd24008a6de6c58c1e78f088e086817e2a769373 100644
--- a/aidge_core/export_utils/export_registry.py
+++ b/aidge_core/export_utils/export_registry.py
@@ -28,6 +28,10 @@ class ExportLib(aidge_core.OperatorImpl):
     # key: Path where static file is
     # Value: Path where to copy the file relative to the export root
     static_files: Dict[str, str] = {}
+    # Custom main generation jinja file
+    main_jinja_path = None
+    # Main memory section
+    memory_section = None
     # PRIVATE
     # Registry of exportNode, class level dictionary, shared across all ExportLib
     _cls_export_node_registry = {}
diff --git a/aidge_core/export_utils/node_export.py b/aidge_core/export_utils/node_export.py
index d22cc65d1eb247d09a48318d83274acfc3757d3d..479eaf01ff8c8e85a3bf83adac88f5ee7fe86857 100644
--- a/aidge_core/export_utils/node_export.py
+++ b/aidge_core/export_utils/node_export.py
@@ -299,11 +299,15 @@ class ExportNodeCpp(ExportNode):
 
         if self.config_template != "":
             path_to_definition = f"{self.config_path}/{self.attributes['name']}.{self.config_extension}"
-            code_generation.generate_file(
-                str(export_folder / path_to_definition),
-                self.config_template,
-                **self.attributes
-            )
+
+            try:
+                code_generation.generate_file(
+                    str(export_folder / path_to_definition),
+                    self.config_template,
+                    **self.attributes
+                )
+            except Exception as e:
+                raise RuntimeError(f"Error when creating config file for {self.node.name()}[{self.node.type()}].") from e
             kernel_include_list.append(path_to_definition)
 
         return self.include_list + kernel_include_list
diff --git a/aidge_core/export_utils/scheduler_export.py b/aidge_core/export_utils/scheduler_export.py
index 6829832feda7af7b2c808df4cd430fc77b37b3cb..df0b4a385327e4bdccd6fe4de46043d151658dbd 100644
--- a/aidge_core/export_utils/scheduler_export.py
+++ b/aidge_core/export_utils/scheduler_export.py
@@ -6,7 +6,7 @@ from aidge_core.export_utils import ExportLib, generate_file, copy_file
 from typing import List, Tuple
 
 
-def scheduler_export(scheduler, export_folder_path: str, export_lib: ExportLib = None, memory_manager=None, memory_manager_args=None) -> None:
+def scheduler_export(scheduler, export_folder_path: str, export_lib: ExportLib = None, memory_manager=None, memory_manager_args=None, labels=False) -> None:
         graphview = scheduler.graph_view()
         export_folder = Path().absolute() / export_folder_path
 
@@ -114,6 +114,7 @@ def scheduler_export(scheduler, export_folder_path: str, export_lib: ExportLib =
             headers=set(list_configs),
             actions=list_actions,
             mem_ctype=inputs_dtype[0],  # Legacy behavior ...
+            mem_section=export_lib.mem_section, 
             peak_mem=peak_mem,
             inputs_name=inputs_name,
             inputs_dtype=inputs_dtype,
@@ -136,14 +137,20 @@ def scheduler_export(scheduler, export_folder_path: str, export_lib: ExportLib =
         if len(outputs_name) != len(outputs_dtype) or len(outputs_name) != len(outputs_size):
             raise RuntimeError("FATAL: Output args list does not have the same length this is an internal bug.")
 
+        if export_lib is not None and export_lib.main_jinja_path is not None:
+            main_jinja_path = export_lib.main_jinja_path
+        else :
+            main_jinja_path = str(ROOT / "templates" / "main.jinja")
+
         generate_file(
             str(export_folder / "main.cpp"),
-            str(ROOT / "templates" / "main.jinja"),
+            main_jinja_path,
             func_name=func_name,
             inputs_name=inputs_name,
             outputs_name=outputs_name,
             outputs_dtype=outputs_dtype,
-            outputs_size=outputs_size
+            outputs_size=outputs_size,
+            labels=labels
         )
 
         if export_lib is not None:
diff --git a/aidge_core/export_utils/templates/forward.jinja b/aidge_core/export_utils/templates/forward.jinja
index aec5867b132e0ece9a8a39d9cf5daadeb25ea24a..fde4b2a1392c4ada353af06246951e26c6236df6 100644
--- a/aidge_core/export_utils/templates/forward.jinja
+++ b/aidge_core/export_utils/templates/forward.jinja
@@ -14,7 +14,11 @@
 {%- endfor %}
 
 // Memory block
+{%- if mem_section == None %}
 static {{mem_ctype}} mem[{{peak_mem}}];
+{%- else %}
+static {{mem_ctype}} mem[{{peak_mem}}] __attribute__((section("{{ mem_section }}")));
+{%- endif %}
 
 {# Forward function #}
 {#- Support multiple inputs with different datatypes and multiple outputs with different datatypes -#}
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index dc0a12c76c8c72d656229ec90a81f1724f88faf7..6088cf31c91cdd1b939bb4508850a9d0f798e5d2 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -41,6 +41,7 @@
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/operator/BitShift.hpp"
+#include "aidge/operator/Clip.hpp"
 #include "aidge/operator/Concat.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
@@ -51,6 +52,7 @@
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/operator/GlobalAveragePooling.hpp"
 #include "aidge/operator/GridSample.hpp"
+#include "aidge/operator/Heaviside.hpp"
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/operator/MaxPooling.hpp"
 #include "aidge/operator/MetaOperator.hpp"
diff --git a/include/aidge/operator/Clip.hpp b/include/aidge/operator/Clip.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..aa37b50320e9deaa94e83ff7cc3578745b560228
--- /dev/null
+++ b/include/aidge/operator/Clip.hpp
@@ -0,0 +1,122 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_CLIP_H_
+#define AIDGE_CORE_OPERATOR_CLIP_H_
+
+#include <memory>
+#include <vector>
+#include <limits>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+enum class ClipAttr { Min, Max };
+
+
+class Clip_Op : public OperatorTensor,
+    public Registrable<Clip_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Clip_Op&)>> {
+
+public:
+    static const std::string Type;
+private:
+    using Attributes_ = StaticAttributes<ClipAttr, float, float>;
+    template <ClipAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Clip_Op() = delete;
+    Clip_Op(float min,float max) : 
+    OperatorTensor(Type, {InputCategory::Data,InputCategory::OptionalData,InputCategory::OptionalData}, 1),
+    mAttributes(std::make_shared<Attributes_>(attr<ClipAttr::Min>(min), attr<ClipAttr::Max>(max)))
+    {}
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Clip_Op(const Clip_Op& op)
+        : OperatorTensor(op),
+          mAttributes(op.mAttributes)
+
+    {
+        if (op.mImpl){
+            SET_IMPL_MACRO(Clip_Op, *this, op.backend());
+        }else{
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Clip_Op
+     */
+    std::shared_ptr<Operator> clone() const override 
+    {
+        return std::make_shared<Clip_Op>(*this);
+    }
+    bool dimsForwarded() const override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
+    
+    /**
+     * @brief Setter to specify the backend to use
+     */
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    /**
+    * @brief Getter and Setter for min attribute value
+    * @return float& 
+    */
+    inline float& min() const noexcept { return mAttributes->getAttr<ClipAttr::Min>(); }
+    /**
+    * @brief Getter and Setter for max attribute value
+    * @return float& 
+    */
+    inline float& max() const noexcept { return mAttributes->getAttr<ClipAttr::Max>(); }
+
+    std::set<std::string> getAvailableBackends() const override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input","min_empty_tensor","max_empty_tensor"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+    /**
+     * @brief The Clip operator is a tensor operator that performs a clipping operation on tensor elements.
+            This class allows limiting tensor values to a specified range, defined by the min 
+            and max parameters (Tensors of empty shapes). Values outside this range are replaced by the corresponding limit values
+            When ‘min’ is greater than ‘max’, the clip operator sets all the ‘input’ values to the value of ‘max’
+    * @param[in] name Name of the node
+    * @param[in] min Min clip value as attribute
+     * @param[in] max Max clip value as attribute
+    * @return std::shared_ptr<Node> 
+    */
+    std::shared_ptr<Aidge::Node> Clip(
+        const std::string &name = "",
+        float min = std::numeric_limits<float>::lowest(),
+        float max = std::numeric_limits<float>::max()
+        );
+
+}
+namespace {
+template <>
+const char* const EnumStrings<Aidge::ClipAttr>::data[]
+    = {"min", "max"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_CLIP_H_ */
diff --git a/include/aidge/operator/Heaviside.hpp b/include/aidge/operator/Heaviside.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..65c7f341a1c5aa12eeada2165e459cc1c933e327
--- /dev/null
+++ b/include/aidge/operator/Heaviside.hpp
@@ -0,0 +1,107 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_HEAVISIDE_H_
+#define AIDGE_CORE_OPERATOR_HEAVISIDE_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class HeavisideAttr { 
+    /**
+     * @brief The value used in the output tensor when the input is 0.
+     */
+    Value 
+};
+
+class Heaviside_Op
+    : public OperatorTensor,
+      public Registrable<
+          Heaviside_Op,
+          std::string,
+          std::function<std::shared_ptr<OperatorImpl>(const Heaviside_Op &)>> {
+  private:
+    using Attributes_ = StaticAttributes<HeavisideAttr, float>;
+    template <HeavisideAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+  public:
+    static const std::string Type;
+
+    /*
+     * Compute the Heaviside step function for each element of the first input.
+     * Heaviside step function is defined as :
+     *
+     * \f[
+     * heaviside(input, values) = \begin{cases}
+     *     0      & \text{if input }  < 0 \\
+     *     values & \text{if input }  = 0 \\
+     *     1      & \text{if input }  > 0
+     * \end{cases}
+     * \f]
+     * */
+    Heaviside_Op(float value);
+
+    Heaviside_Op(const Heaviside_Op &op);
+
+    std::shared_ptr<Operator> clone() const override;
+
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+
+    std::set<std::string> getAvailableBackends() const override;
+
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input", "data_values"};
+    }
+
+    static const std::vector<std::string> getOutputsName() {
+        return {"output"};
+    }
+
+    inline std::shared_ptr<Attributes> attributes() const override {
+        return mAttributes;
+    }
+    inline float &value() const {
+        return mAttributes->template getAttr<HeavisideAttr::Value>();
+    }
+};
+
+/**
+ * @brief Create a Heaviside node.
+ *
+ * Initializes a Heaviside node that computes the Heaviside step function for each element 
+ * of the input tensor, using the specified value for inputs equal to zero.
+ *
+ * @param value The value used in the output tensor when the input is 0.
+ * @param name  Optional. The name of the node.
+ * 
+ * @return A shared pointer to a Node representing the Heaviside operation.
+ */
+std::shared_ptr<Node> Heaviside(float value, const std::string &name = "");
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::HeavisideAttr>::data[] = {"value"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_HEAVISIDE_H_ */
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index 155627f2cfd3173ccfbbe2a1ce8c23784cd06d71..beeca8d72a2067ed2dfcd98cf3d9ff0cb7b6ff3a 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -25,6 +25,11 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+/**
+ * @brief implementation of the operator Transpose.
+ * @note Since this operator implementation is agnostic to the backend it is
+ * located here instead of in aidge_backend.
+ */
 class TransposeImpl : public OperatorImpl {
 public:
     TransposeImpl(const Operator& op, const std::string& backend = "")
@@ -33,8 +38,22 @@ public:
     void forward() override;
 };
 
-enum class TransposeAttr { OutputDimsOrder };
+enum class TransposeAttr {
+  /**
+   * @brief order of the ouput dims from the input dims. If left empty,
+   * the dimensions of input will be reversed.
+   */
+    OutputDimsOrder
+};
 
+/**
+ * @brief This operator has as purpose to transpose the axes of a given tensor.
+ * input#0 : Tensor to transpose
+ * @example Calling transpose() on a tensor of dimensions [1, 2, 3] with OutputDimsOrder=(1,0,2) result
+ * in a tensor of dim [2, 1, 3].
+ * @example Calling transpose() on a tensor of dimensions [1,2,3,4] with an empty OutputDimsOrder vector
+ * will result in a tensor of dim [4,3,2,1].
+ */
 class Transpose_Op : public OperatorTensor,
                 public Registrable<Transpose_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Transpose_Op&)>> {
 
@@ -50,6 +69,10 @@ private:
 public:
     Transpose_Op() = delete;
 
+  /**
+   * @brief constructor for Transpose op
+   * @param[in] outputDimsOrder axes permutation order. By default axes are reversed.
+   */
     Transpose_Op(const std::vector<DimSize_t> &outputDimsOrder);
 
     /**
@@ -70,6 +93,9 @@ public:
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    /**
+     * @brief axes new order, if left empty, axes will be reversed.
+     */
     inline std::vector<DimSize_t>& outputDimsOrder() const noexcept { return mAttributes -> getAttr<TransposeAttr::OutputDimsOrder>(); }
 
     static const std::vector<std::string> getInputsName(){
@@ -80,8 +106,8 @@ public:
     }
 };
 
-std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder,
-                                           const std::string& name = "");
+std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder = {},
+                                const std::string& name = "");
 }  // namespace Aidge
 
 namespace {
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 7e29cbb04f63bf99d86f63004dfede452a7a8ce0..fe606cfb557042d581e09da7419d80841d1dc2d4 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -315,6 +315,7 @@ void init_Tensor(py::module& m){
     .def(py::self - py::self)
     .def(py::self * py::self)
     .def(py::self / py::self)
+    .def("clone", &Tensor::clone)
 	.def("sqrt", &Tensor::sqrt)
     .def("set_datatype", &Tensor::setDataType, py::arg("datatype"), py::arg("copyCast") = true)
     .def("set_backend", &Tensor::setBackend, py::arg("name"), py::arg("device") = 0, py::arg("copyFrom") = true)
@@ -334,8 +335,8 @@ void init_Tensor(py::module& m){
     .def("cpy_transpose", (void (Tensor::*)(const Tensor& src, const std::vector<DimSize_t>& transpose)) &Tensor::copyTranspose, py::arg("src"), py::arg("transpose"))
 
     .def("__str__", [](Tensor& b) {
-        if (b.empty()) {
-            return std::string("{}");
+        if (b.empty() && b.undefined()) {
+                return std::string("{}");
         } else {
             return b.toString();
         }
diff --git a/python_binding/filler/pybind_Filler.cpp b/python_binding/filler/pybind_Filler.cpp
index a85c0d6cd6fa0367dfc26328d214c99a4288a3be..cb0a823639fd7a2f5b9f47be2bd78b9a7e41f2d4 100644
--- a/python_binding/filler/pybind_Filler.cpp
+++ b/python_binding/filler/pybind_Filler.cpp
@@ -30,11 +30,17 @@ void init_Filler(py::module &m) {
          [](std::shared_ptr<Tensor> tensor, py::object value) -> void {
              switch (tensor->dataType()) {
                  case DataType::Float64:
-                     constantFiller<double>(tensor, value.cast<double>());
+                     constantFiller<cpptype_t<DataType::Float64>>(tensor, value.cast<cpptype_t<DataType::Float64>>());
                      break;
                  case DataType::Float32:
-                     constantFiller<float>(tensor, value.cast<float>());
+                     constantFiller<cpptype_t<DataType::Float32>>(tensor, value.cast<cpptype_t<DataType::Float32>>());
                      break;
+                case DataType::Int64:
+                    constantFiller<cpptype_t<DataType::Int64>>(tensor, value.cast<cpptype_t<DataType::Int64>>());
+                    break;
+                case DataType::Int32:
+                    constantFiller<cpptype_t<DataType::Int32>>(tensor, value.cast<cpptype_t<DataType::Int32>>());
+                    break;
                  default:
                      AIDGE_THROW_OR_ABORT(
                          py::value_error,
@@ -44,14 +50,14 @@ void init_Filler(py::module &m) {
          py::arg("tensor"), py::arg("value"))
         .def(
             "normal_filler",
-            [](std::shared_ptr<Tensor> tensor, double mean,
-               double stdDev) -> void {
+            [](std::shared_ptr<Tensor> tensor, py::object mean,
+               py::object stdDev) -> void {
                 switch (tensor->dataType()) {
                     case DataType::Float64:
-                        normalFiller<double>(tensor, mean, stdDev);
+                        normalFiller<cpptype_t<DataType::Float64>>(tensor, mean.cast<cpptype_t<DataType::Float64>>(), stdDev.cast<cpptype_t<DataType::Float64>>());
                         break;
                     case DataType::Float32:
-                        normalFiller<float>(tensor, mean, stdDev);
+                        normalFiller<cpptype_t<DataType::Float32>>(tensor, mean.cast<cpptype_t<DataType::Float32>>(), stdDev.cast<cpptype_t<DataType::Float32>>());
                         break;
                     default:
                         AIDGE_THROW_OR_ABORT(
@@ -60,23 +66,39 @@ void init_Filler(py::module &m) {
                 }
             },
             py::arg("tensor"), py::arg("mean") = 0.0, py::arg("stdDev") = 1.0)
-        .def(
-            "uniform_filler",
-            [](std::shared_ptr<Tensor> tensor, double min, double max) -> void {
+        .def("uniform_filler", [] (std::shared_ptr<Tensor> tensor, py::object min, py::object max) -> void {
+            if (py::isinstance<py::int_>(min) && py::isinstance<py::int_>(max)) {
                 switch (tensor->dataType()) {
-                    case DataType::Float64:
-                        uniformFiller<double>(tensor, min, max);
+                    case DataType::Int32:
+                        uniformFiller<std::int32_t>(tensor, min.cast<std::int32_t>(), max.cast<std::int32_t>());
+                        break;
+                    case DataType::Int64:
+                        uniformFiller<std::int64_t>(tensor, min.cast<std::int64_t>(), max.cast<std::int64_t>());
+                        break;
+                    default:
+                        AIDGE_THROW_OR_ABORT(
+                            py::value_error,
+                            "Data type is not supported for Uniform filler.");
                         break;
+                }
+            } else if (py::isinstance<py::float_>(min) && py::isinstance<py::float_>(max)) {
+                switch (tensor->dataType()) {
                     case DataType::Float32:
-                        uniformFiller<float>(tensor, min, max);
+                        uniformFiller<float>(tensor, min.cast<float>(), max.cast<float>());
+                        break;
+                    case DataType::Float64:
+                        uniformFiller<double>(tensor, min.cast<double>(), max.cast<double>());
                         break;
                     default:
                         AIDGE_THROW_OR_ABORT(
                             py::value_error,
                             "Data type is not supported for Uniform filler.");
+                        break;
                 }
-            },
-            py::arg("tensor"), py::arg("min"), py::arg("max"))
+            } else {
+                AIDGE_THROW_OR_ABORT(py::value_error,"Input must be either an int or a float.");
+            }
+            }, py::arg("tensor"), py::arg("min"), py::arg("max"))
         .def(
             "xavier_uniform_filler",
             [](std::shared_ptr<Tensor> tensor, py::object scaling,
diff --git a/python_binding/graph/pybind_Matching.cpp b/python_binding/graph/pybind_Matching.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..94f2471c3f234c1e401484c099a3815dd26d3c30
--- /dev/null
+++ b/python_binding/graph/pybind_Matching.cpp
@@ -0,0 +1,51 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <memory>
+#include <string>
+#include "aidge/graph/Matching.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+//#include "aidge/data/Data.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+void init_SinglePassGraphMatching(py::module& m) {
+    py::class_<Aidge::SinglePassGraphMatching::MatchingResult>(m,"MatchingResult")
+        .def(py::init<>())
+        .def_readwrite("graph", &Aidge::SinglePassGraphMatching::MatchingResult::graph)
+        .def_readwrite("anchors", &Aidge::SinglePassGraphMatching::MatchingResult::anchors)
+        .def_readwrite("startNode", &Aidge::SinglePassGraphMatching::MatchingResult::startNode);
+ 
+    py::class_<Aidge::SinglePassGraphMatching>(m, "SinglePassGraphMatching") 
+        .def(py::init<std::shared_ptr<GraphView>>(), py::arg("graph"))
+        .def("match", 
+        [](Aidge::SinglePassGraphMatching& self, const std::string& query, bool disjoint){
+            // Note: Need to convert set to vector has MatchingResult is not hashable and 
+            // set<MatchingResult> cannot be binded
+            std::set<Aidge::SinglePassGraphMatching::MatchingResult> set_res = self.match(query, disjoint);
+            std::vector<Aidge::SinglePassGraphMatching::MatchingResult> vec_res(set_res.begin(), set_res.end());
+            return vec_res;
+        },
+        py::arg("query"), py::arg("disjoint") = false, 
+        R"mydelimiter( Matches a query by direct, single-pass parse and match.
+        :param query: The query string to search.
+        :param disjoint: If true, only keep the longest disjoint matches.
+        :return: A set of MatchingResult instances.
+        )mydelimiter");
+
+
+
+}
+}  // namespace Aidge
diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp
index d8e77bb259cbcbae7940a09dc405bb8f50b5b79b..35f6327444a874d8f5c2e94da6520244e095263a 100644
--- a/python_binding/graph/pybind_Node.cpp
+++ b/python_binding/graph/pybind_Node.cpp
@@ -34,6 +34,11 @@ void init_Node(py::module& m) {
     Type of the node.
     )mydelimiter")
 
+    .def("attributes", &Node::attributes,
+    R"mydelimiter(
+    Get attributes.
+    )mydelimiter")
+
     .def("get_operator", &Node::getOperator,
     R"mydelimiter(
     Get the Operator object of the Node.
@@ -48,7 +53,7 @@ void init_Node(py::module& m) {
     :rtype: str
     )mydelimiter")
 
-    .def("create_unique_name", &Node::createUniqueName, py::arg("base_name"), 
+    .def("create_unique_name", &Node::createUniqueName, py::arg("base_name"),
     R"mydelimiter(
     Given a base name, generate a new name which is unique in all the GraphViews containing this node.
 
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 9a1bdacd169beebc843448d23bdaf8502de437b4..43b44eb7300072e501d33829b88537850beef37a 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -23,7 +23,7 @@ namespace Aidge {
 
 template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
-    const std::string pyClassName("BatchNormOp" + std::to_string(DIM) + "D");
+    const std::string pyClassName("BatchNorm" + std::to_string(DIM) + "DOp");
     py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(), py::multiple_inheritance())
         .def(py::init<float, float>(),
diff --git a/python_binding/operator/pybind_Clip.cpp b/python_binding/operator/pybind_Clip.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..27c47811a3c192f1f657f339fe4caf047a944224
--- /dev/null
+++ b/python_binding/operator/pybind_Clip.cpp
@@ -0,0 +1,48 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Clip.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Clip(py::module& m) {
+    py::class_<Clip_Op, std::shared_ptr<Clip_Op>, OperatorTensor>(m, "ClipOp", py::multiple_inheritance())
+    .def(py::init<float, float>(), py::arg("min") = std::numeric_limits<float>::lowest(), py::arg("max") = std::numeric_limits<float>::max())
+    .def_static("get_inputs_name", &Clip_Op::getInputsName)
+    .def_static("get_outputs_name", &Clip_Op::getOutputsName)
+    .def("min",&Clip_Op::min,py::return_value_policy::reference_internal)
+    .def("max",&Clip_Op::max,py::return_value_policy::reference_internal);
+    
+    
+    declare_registrable<Clip_Op>(m, "ClipOp");
+   
+    m.def("Clip", &Clip,py::arg("name") = "",
+    py::arg("min")= std::numeric_limits<float>::lowest(),
+    py::arg("max")= std::numeric_limits<float>::max(),
+    R"mydelimiter(ClipOp is a tensor operator that performs a clipping operation on tensor elements.
+        This class allows limiting tensor values to a specified range, defined by the min 
+        and max parameters. Values outside this range are replaced by the corresponding 
+        limit values. When 'min' is greater than 'max', the clip operator sets all the 'input' values to the value of 'max'
+        :param min: minimum clipping value.
+        :type min: float
+        :param max: maximum clipping value.
+        :type max: float
+        :param name: name of the node.
+    )mydelimiter");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_DepthToSpace.cpp b/python_binding/operator/pybind_DepthToSpace.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..efb8a7406774a5b071e8ebc3bda69d6ec773b50a
--- /dev/null
+++ b/python_binding/operator/pybind_DepthToSpace.cpp
@@ -0,0 +1,59 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/DepthToSpace.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/Types.h"
+
+static typename Aidge::DepthToSpace_Op::Mode stringToMode(const std::string& mode) {
+static std::unordered_map<std::string, typename Aidge::DepthToSpace_Op::Mode> map = {
+    {"DCR", Aidge::DepthToSpace_Op::Mode::DCR},
+    {"CRD", Aidge::DepthToSpace_Op::Mode::CRD}
+};
+return map[mode];
+}
+
+namespace py = pybind11;
+namespace Aidge {
+
+void declare_DepthToSpace(py::module &m) {
+
+    py::class_<DepthToSpace_Op, std::shared_ptr<DepthToSpace_Op>, OperatorTensor> (m, "DepthToSpaceOp", py::multiple_inheritance())
+    .def(py::init([](const std::uint32_t blockSize, const std::string& mode) {
+            return new DepthToSpace_Op(blockSize, stringToMode(mode));
+        }), py::arg("block_size"), py::arg("mode") = "CRD")
+    .def_static("get_inputs_name", &DepthToSpace_Op::getInputsName)
+    .def_static("get_outputs_name", &DepthToSpace_Op::getOutputsName)
+    .def_readonly_static("Type", &DepthToSpace_Op::Type)
+    .def("__repr__", [](DepthToSpace_Op& b) {
+        return fmt::format("Operator(type='{}')", b.Type);
+    });
+
+  declare_registrable<DepthToSpace_Op>(m, "DepthToSpaceOp");
+
+  m.def("DepthToSpace", [](
+            const std::uint32_t blockSize,
+            const std::string& mode,
+            const std::string& name) {
+        return DepthToSpace(blockSize, stringToMode(mode), name);
+    }, py::arg("block_size"), py::arg("mode") = "CRD", py::arg("name") = "");
+}
+
+void init_DepthToSpace(py::module &m) {
+  declare_DepthToSpace(m);
+}
+
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_GridSample.cpp b/python_binding/operator/pybind_GridSample.cpp
index 6d6c03b82ad4f905c41bb0cf849fc4e05fda4cb2..69454c2abfa56a16dbc3f3638d68e98ce93d2538 100644
--- a/python_binding/operator/pybind_GridSample.cpp
+++ b/python_binding/operator/pybind_GridSample.cpp
@@ -55,7 +55,7 @@ void declare_GridSampleOp(py::module &m) {
             return new GridSample_Op(stringToInterpolationMode(mode), stringToPaddingMode(padding_mode), align_corners);
         }), py::arg("mode") = "linear",
             py::arg("padding_mode") = "zeros",
-            py::arg("alogn_corners") = false)
+            py::arg("align_corners") = false)
         .def_static("get_inputs_name", &GridSample_Op::getInputsName)
         .def_static("get_outputs_name", &GridSample_Op::getOutputsName)
         .def_readonly_static("Type", &GridSample_Op::Type)
diff --git a/python_binding/operator/pybind_Heaviside.cpp b/python_binding/operator/pybind_Heaviside.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..cbc2502aac018927c544a57f343a6305ee2bd86f
--- /dev/null
+++ b/python_binding/operator/pybind_Heaviside.cpp
@@ -0,0 +1,56 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Heaviside.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+
+namespace Aidge {
+
+void init_Heaviside(py::module &m) {
+    py::class_<Heaviside_Op, std::shared_ptr<Heaviside_Op>, OperatorTensor>(
+        m,
+        "HeavisideOp",
+        py::multiple_inheritance(),
+         R"mydelimiter(
+          Initialize an Heaviside node. This node will compute a heaviside step function 
+          on each element of the input tensor.
+          heaviside(input, values) = { 0  if input < 0 
+                                     { values if input == 0
+                                     { 1 if input > 0
+
+          :param value : The value use for the output tensor when input is 0.
+          :type value : float
+          :param name : Name of the node.
+          )mydelimiter")
+        .def(py::init<float>(), py::arg("value"))
+        .def_static("get_inputs_name", &Heaviside_Op::getInputsName)
+        .def_static("get_outputs_name", &Heaviside_Op::getOutputsName)
+        .def_readonly_static("Type", &Heaviside_Op::Type);
+
+    declare_registrable<Heaviside_Op>(m, "HeavisideOp");
+    m.def("Heaviside", &Heaviside, py::arg("value"), py::arg("name") = "",
+            R"mydelimiter(
+          Initialize an Heaviside node. This node will compute a heaviside step function 
+          on each element of the input tensor.
+          heaviside(input, values) = { 0  if input < 0 
+                                     { values if input == 0
+                                     { 1 if input > 0
+
+          :param value : The value use for the output tensor when input is 0.
+          :type value : float
+          :param name : Name of the node.
+          )mydelimiter");
+}
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index 930dd95f3c3e4b10d2b4f8b496dfbbbcc6822050..20794a15585529e10a83d2bf6fa7c18edfbde3fa 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -28,13 +28,26 @@ namespace Aidge {
 void declare_Transpose(py::module &m) {
   const std::string pyClassName("TransposeOp");
   py::class_<Transpose_Op, std::shared_ptr<Transpose_Op>, OperatorTensor>(
-    m, "TransposeOp", py::multiple_inheritance())
-    .def(py::init<const std::vector<DimSize_t>&>(), py::arg("output_dims_order"))
+    m, "TransposeOp", py::multiple_inheritance(),
+      R"mydelimiter(
+		      Initialize transpose operator
+		          :param output_dims_order : axes permutation order, must be of rank = r and values between [0;r-1]
+					with r = input_tensor.nbDims()
+		:type output_dims_order : :py:class: List[Int]
+		)mydelimiter")
+    .def(py::init<const std::vector<DimSize_t>&>(), py::arg("output_dims_order")=std::vector<std::size_t>())
     .def_static("get_inputs_name", &Transpose_Op::getInputsName)
     .def_static("get_outputs_name", &Transpose_Op::getOutputsName)
     .def_readonly_static("Type", &Transpose_Op::Type);
   declare_registrable<Transpose_Op>(m, pyClassName);
-  m.def("Transpose", &Transpose, py::arg("output_dims_order"), py::arg("name") = "");
+  m.def("Transpose", &Transpose, py::arg("output_dims_order")=std::vector<std::size_t>(), py::arg("name") = "",
+  R"mydelimiter(
+    Initialize a node containing a transpose operator.
+	:param output_dims_order : axes permutation order, must be of rank = r and values between [0;r-1]
+					with r = input_tensor.nbDims()
+	:type output_dims_order : :py:class: List[Int]
+    :param name : name of the node.
+)mydelimiter");
 }
 
 void init_Transpose(py::module &m) {
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 229aba25366e729dbd0dd5aefe7b7562fb7a41cf..c287314f25c90e6bf8962e31637c9c2990d4db9b 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -36,10 +36,12 @@ void init_Atan(py::module&);
 void init_AvgPooling(py::module&);
 void init_BatchNorm(py::module&);
 void init_BitShift(py::module&);
+void init_Clip(py::module&);
 void init_Concat(py::module&);
 void init_ConstantOfShape(py::module&);
 void init_Conv(py::module&);
 void init_ConvDepthWise(py::module&);
+void init_DepthToSpace(py::module&);
 void init_Div(py::module&);
 void init_Erf(py::module&);
 void init_FC(py::module&);
@@ -47,6 +49,7 @@ void init_Gather(py::module&);
 void init_GenericOperator(py::module&);
 void init_GlobalAveragePooling(py::module&);
 void init_GridSample(py::module&);
+void init_Heaviside(py::module&);
 void init_Identity(py::module&);
 void init_LeakyReLU(py::module&);
 void init_MatMul(py::module&);
@@ -81,6 +84,7 @@ void init_Node(py::module&);
 void init_GraphView(py::module&);
 void init_OpArgs(py::module&);
 void init_Connector(py::module&);
+void init_SinglePassGraphMatching(py::module&);
 
 void init_GraphRegex(py::module&);
 void init_MatchSolution(py::module&);
@@ -108,6 +112,7 @@ void init_Aidge(py::module& m) {
     init_GraphView(m);
     init_OpArgs(m);
     init_Connector(m);
+    init_SinglePassGraphMatching(m);
 
     init_OperatorImpl(m);
     init_Log(m);
@@ -122,10 +127,12 @@ void init_Aidge(py::module& m) {
     init_AvgPooling(m);
     init_BatchNorm(m);
     init_BitShift(m);
+    init_Clip(m);
     init_Concat(m);
     init_Conv(m);
     init_ConvDepthWise(m);
     init_ConstantOfShape(m);
+    init_DepthToSpace(m);
     init_Div(m);
     init_Erf(m);
     init_FC(m);
@@ -133,6 +140,7 @@ void init_Aidge(py::module& m) {
     init_GenericOperator(m);
     init_GlobalAveragePooling(m);
     init_GridSample(m);
+    init_Heaviside(m);
     init_Identity(m);
     init_LeakyReLU(m);
     init_MatMul(m);
diff --git a/src/filler/ConstantFiller.cpp b/src/filler/ConstantFiller.cpp
index 1e992f4a192c2fd629b2c813c902b127f29a2b02..b2118866f92290103d50290085c7675215a4d997 100644
--- a/src/filler/ConstantFiller.cpp
+++ b/src/filler/ConstantFiller.cpp
@@ -39,6 +39,7 @@ void Aidge::constantFiller(std::shared_ptr<Aidge::Tensor> tensor, T constantValu
     tensor->copyCastFrom(tensorWithValues);
 }
 
-
+template void Aidge::constantFiller<std::int32_t>(std::shared_ptr<Aidge::Tensor>, std::int32_t);
+template void Aidge::constantFiller<std::int64_t>(std::shared_ptr<Aidge::Tensor>, std::int64_t);
 template void Aidge::constantFiller<float>(std::shared_ptr<Aidge::Tensor>, float);
 template void Aidge::constantFiller<double>(std::shared_ptr<Aidge::Tensor>, double);
diff --git a/src/filler/UniformFiller.cpp b/src/filler/UniformFiller.cpp
index a942f59d717fd8d7b541ee28868a7fb9f2e7cd95..1951fcc623612bd688048fcc5fb71526032b2a4a 100644
--- a/src/filler/UniformFiller.cpp
+++ b/src/filler/UniformFiller.cpp
@@ -8,8 +8,9 @@
  * SPDX-License-Identifier: EPL-2.0
  *
  ********************************************************************************/
+#include <cstdint>  // std::int32_t
 #include <memory>
-#include <random>  // normal_distribution, uniform_real_distribution
+#include <random>   // normal_distribution, uniform_real_distribution
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/filler/Filler.hpp"
@@ -19,10 +20,16 @@ template <typename T>
 void Aidge::uniformFiller(std::shared_ptr<Aidge::Tensor> tensor, T min, T max) {
     AIDGE_ASSERT(tensor->getImpl(),
                  "Tensor got no implementation, cannot fill it.");
-    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
+    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type {} and {}",NativeType<T>::type, tensor->dataType());
 
 
-    std::uniform_real_distribution<T> uniformDist(min, max);
+     using DistType = typename std::conditional<
+        std::is_integral<T>::value,
+        std::uniform_int_distribution<T>,
+        std::uniform_real_distribution<T>
+    >::type;
+
+    DistType uniformDist(min, max);
 
     std::shared_ptr<Aidge::Tensor> cpyTensor;
     // Create cpy only if tensor not on CPU
@@ -42,3 +49,7 @@ template void Aidge::uniformFiller<float>(std::shared_ptr<Aidge::Tensor>, float,
                                           float);
 template void Aidge::uniformFiller<double>(std::shared_ptr<Aidge::Tensor>,
                                            double, double);
+template void Aidge::uniformFiller<std::int32_t>(std::shared_ptr<Aidge::Tensor>,
+                                                 std::int32_t, std::int32_t);
+template void Aidge::uniformFiller<std::int64_t>(std::shared_ptr<Aidge::Tensor>,
+                                                 std::int64_t, std::int64_t);
\ No newline at end of file
diff --git a/src/operator/Clip.cpp b/src/operator/Clip.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..10b864b54594c86ed1486611fdd91fd916f2291b
--- /dev/null
+++ b/src/operator/Clip.cpp
@@ -0,0 +1,93 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Clip.hpp"
+
+#include <memory>
+#include <string>
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/operator/Clip.hpp"
+
+const std::string Aidge::Clip_Op::Type = "Clip";
+
+bool Aidge::Clip_Op::dimsForwarded() const {
+    if ((getInput(1) && !getInput(1)->undefined())
+        || (getInput(2) && !getInput(2)->undefined()))
+    {
+        // output dims are data dependent
+        return false;
+    }
+
+    return OperatorTensor::dimsForwarded();
+}
+
+
+bool Aidge::Clip_Op::forwardDims(bool allowDataDependency) 
+{
+    if (getInput(1) ) 
+    { 
+        if( this->min() != std::numeric_limits<float>::lowest())
+        {
+            Log::notice("{} : ignoring non-empty min attribute because input#1 "
+                  "take precedence",
+                  type());
+        }
+        if (!allowDataDependency) {
+        Log::warn("{} : unable to forwardDims() because output dims are data "
+                    "dependent on input#1",
+                    type());
+        return false;
+        }
+        std::shared_ptr<Tensor> fallback;
+        const auto& minV = mInputs[1]->refCastFrom(fallback, NativeType<float>::type, "cpu");
+        this->min() = *(static_cast<float*>(minV.getImpl()->hostPtr()));
+    }
+    if (getInput(2)) 
+    { 
+       if( this->max() != std::numeric_limits<float>::max())
+        {
+            Log::notice("{} : ignoring non-empty max attribute because input#2 "
+                  "take precedence",
+                  type());
+        }
+        if (!allowDataDependency) {
+        Log::warn("{} : unable to forwardDims() because output dims are data "
+                    "dependent on input#2",
+                    type());
+        return false;
+        }
+        std::shared_ptr<Tensor> fallback;
+        const auto& maxV = mInputs[2]->refCastFrom(fallback, NativeType<float>::type, "cpu");
+        this->max() = *(static_cast<float*>(maxV.getImpl()->hostPtr()));
+    }
+    if (!inputsAssociated(false)) {
+        return false;
+    }
+    else if ((getInput(1) && !getInput(1)->empty())  || (getInput(2) && !getInput(2)->empty()))
+    {
+        AIDGE_THROW_OR_ABORT(std::runtime_error,"Expected Input#1 and Input#2 to be scalar (Tensors of empty shapes)");
+    }
+    mOutputs[0] -> resize(getInput(0)->dims());
+    return true;
+}
+void Aidge::Clip_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    mImpl = Registrar<Clip_Op>::create(name)(*this);
+    mOutputs[0]->setBackend(name, device);
+}
+std::set<std::string> Aidge::Clip_Op::getAvailableBackends() const {
+    return Registrar<Clip_Op>::getKeys();
+}
+std::shared_ptr<Aidge::Node> Aidge::Clip(const std::string &name,float min,float max)
+{
+    return std::make_shared<Node>(std::make_shared<Clip_Op>(min, max), name);
+}
\ No newline at end of file
diff --git a/src/operator/Heaviside.cpp b/src/operator/Heaviside.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9ecb3b436d8312ef479d6bc0592cfe372235fa25
--- /dev/null
+++ b/src/operator/Heaviside.cpp
@@ -0,0 +1,64 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstddef>   // std::size_t
+#include <memory>
+#include <stdexcept> // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Heaviside.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+// ----------------------------------------------------------- Heaviside_Op
+// class
+
+const std::string Heaviside_Op::Type = "Heaviside";
+
+Heaviside_Op::Heaviside_Op(float value)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+      mAttributes(
+          std::make_shared<Attributes_>(attr<HeavisideAttr::Value>(value))) {}
+
+Heaviside_Op::Heaviside_Op(const Heaviside_Op &op)
+    : OperatorTensor(op), mAttributes(op.mAttributes) {
+    if (op.mImpl) {
+        SET_IMPL_MACRO(Heaviside_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Heaviside_Op::clone() const {
+    return std::make_shared<Heaviside_Op>(*this);
+}
+
+void Heaviside_Op::setBackend(const std::string &name, DeviceIdx_t device) {
+    SET_IMPL_MACRO(Heaviside_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Heaviside_Op::getAvailableBackends() const {
+    return Registrar<Heaviside_Op>::getKeys();
+}
+
+// --------------------------------------------------------------- Free
+// functions
+
+NodePtr Heaviside(float value, const std::string &name) {
+    return std::make_shared<Node>(std::make_shared<Heaviside_Op>(value), name);
+}
+
+} // namespace Aidge
diff --git a/src/operator/Pad.cpp b/src/operator/Pad.cpp
index ba762da5737e986941e0c72196503415f7af29b7..a0b5f2df52e373bd92dd57cc621318f2abbb45c9 100644
--- a/src/operator/Pad.cpp
+++ b/src/operator/Pad.cpp
@@ -34,9 +34,9 @@ bool Aidge::Pad_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
         const std::array<DimSize_t, DIM + 2> inputDims = getInput(0)->template dims<DIM+2>();
 
         for (std::size_t dim = 0; dim < DIM; ++dim) {
-            outputDims[dim+2] = mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
+            outputDims[dim+2] = mAttributes->template getAttr<PadAttr::BeginEndBorders>()[dim]
                                 + inputDims[dim+2]
-                                + mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
+                                + mAttributes->template getAttr<PadAttr::BeginEndBorders>()[DIM+dim];
         }
         outputDims[1] = inputDims[1];
         outputDims[0] = inputDims[0];
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index 0cb1717f1c96c393b8845db129eee1429966cd98..d24b9c90927830db6f1c1256d133d871ba3dc37f 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -59,6 +59,15 @@ std::shared_ptr<Aidge::Operator> Aidge::Transpose_Op::clone() const {
 bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         AIDGE_ASSERT(!getInput(0)->empty(), "Not applicable on scalars.");
+        // If permutation vector is not given, reverse the dims of input tensor
+        if (outputDimsOrder().empty())
+        {
+            this->outputDimsOrder().resize(getInput(0)->nbDims());
+            std::iota(this->outputDimsOrder().rbegin(), this->outputDimsOrder().rend(), 0);
+        }
+
+        AIDGE_ASSERT(outputDimsOrder().size() == getInput(0)->nbDims(),
+                     "Permutation vector must have the same rank as input tensor.");
         std::vector<DimSize_t> outputDims;
         for (std::size_t i = 0; i < outputDimsOrder().size(); ++i) {
             outputDims.push_back(getInput(0)->dims()[outputDimsOrder()[i]]);
@@ -86,6 +95,6 @@ std::set<std::string> Aidge::Transpose_Op::getAvailableBackends() const {
 //////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Transpose(const std::vector<Aidge::DimSize_t> &outputDimsOrder,
-                                           const std::string& name) {
+                                              const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Transpose_Op>(outputDimsOrder), name);
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_Clip_Op.cpp b/unit_tests/operator/Test_Clip_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..51d6d248067c89be9703fd4d48f02347f285330a
--- /dev/null
+++ b/unit_tests/operator/Test_Clip_Op.cpp
@@ -0,0 +1,110 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <random>   // std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Clip.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+TEST_CASE("[core/operator] Clip_Op(forwardDims)", "[Clip][forwardDims]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create a random number generator
+    auto rd = Catch::Generators::Detail::getSeed;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+
+    // Create Clip Operator
+    std::shared_ptr<Node> myClip = Clip();
+    auto op = std::static_pointer_cast<OperatorTensor>(myClip -> getOperator());
+
+    // Input tensor
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(Array2D<int,2,2> {
+            {
+                {1, 2},
+                {3, 4}
+            }
+        });
+    op -> associateInput(0,T0);
+    // Tensor representign Min value
+    std::shared_ptr<Tensor> Tmin = std::make_shared<Tensor>(2.0);
+    op -> associateInput(1,Tmin);
+    
+    // Tensor representign Max value
+    std::shared_ptr<Tensor> Tmax = std::make_shared<Tensor>(4.0);
+    op -> associateInput(2,Tmax);
+    /**
+     * @todo Special case: scalar not handled yet by
+     * ``OperatorTensor::forwardDims()``
+     */
+    SECTION("Scalar") {
+        //We set every Input as a Scalar
+        T0->resize({});
+        Tmin->resize({});
+        Tmax->resize({});
+
+        REQUIRE_NOTHROW(op->forwardDims(true));
+        REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+    SECTION("Normal Input") {
+        // a scalar is compatible with any other Tensor
+        // input_0
+        std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(Array2D<int,2,2> {
+        {
+            {1, 2},
+            {3, 4}
+        }
+        });
+        const std::size_t nb_dims = nbDimsDist(gen);
+        std::vector<std::size_t> dims(nb_dims);
+        for (std::size_t i = 0; i < nb_dims; ++i)
+        {
+            dims[i] = dimsDist(gen);
+        }
+        T0->resize(dims);
+        op->associateInput(0,T0);
+        REQUIRE_NOTHROW(op->forwardDims(true));
+        REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    
+    SECTION("Min and max attributes ")
+    {
+        std::shared_ptr<Node> clip_attr = Clip("",-1.0,2.0);
+        auto opc = std::static_pointer_cast<OperatorTensor>(clip_attr -> getOperator());
+        std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+        opc -> associateInput(0,T0);
+        std::shared_ptr<Tensor> Tmin = std::make_shared<Tensor>(7);
+        opc-> associateInput(1,Tmin);
+        std::shared_ptr<Tensor> Tmax = std::make_shared<Tensor>(4);
+        opc -> associateInput(2,Tmax);
+
+        REQUIRE_NOTHROW(opc->forwardDims(true));
+        REQUIRE((opc->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+    SECTION("Min and max attributes (No Input for min and max)")
+    {
+        std::shared_ptr<Node> clip = Clip("",-1.0,2.0);
+        auto opcl = std::static_pointer_cast<OperatorTensor>(clip -> getOperator());
+        std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+        opcl -> associateInput(0,T0);
+        REQUIRE_NOTHROW(opcl->forwardDims());
+        REQUIRE((opcl->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_Heaviside_Op.cpp b/unit_tests/operator/Test_Heaviside_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d53268d1213730dc6202193d26b0531e781518f7
--- /dev/null
+++ b/unit_tests/operator/Test_Heaviside_Op.cpp
@@ -0,0 +1,70 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+#include <cstddef> // std::size_t
+#include <memory>
+#include <random>  // std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Heaviside.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+
+TEST_CASE("[core/operator] Heaviside_Op(forwardDims)",
+          "[Heaviside][forwardDims]") {
+
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create a random number generator
+    auto rd = Catch::Generators::Detail::getSeed;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+
+    // Create Heaviside Operator
+    std::shared_ptr<Node> myHeaviside = Heaviside(0.5);
+    auto op =
+        std::static_pointer_cast<OperatorTensor>(myHeaviside->getOperator());
+
+    // input_0
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op->associateInput(0, T0);
+
+    SECTION("Scalar") {
+        // input 0
+        T0->resize({});
+
+        REQUIRE_NOTHROW(op->forwardDims());
+        REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+
+    SECTION("+1-D Tensor") {
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T0->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_TransposeImpl.cpp b/unit_tests/operator/Test_TransposeImpl.cpp
index 18f0d68d87ac1ee66ffb1f24c4c130f9b020d56e..02b338dd265ceb4d9e03bbf86398cc8db38045df 100644
--- a/unit_tests/operator/Test_TransposeImpl.cpp
+++ b/unit_tests/operator/Test_TransposeImpl.cpp
@@ -128,6 +128,75 @@ TEST_CASE("[cpu/operator] Transpose(forward)") {
         op->setBackend("cpu");
         myTranspose->forward();
 
+        REQUIRE(*(op->getOutput(0)) == *output);
+    }
+    SECTION("Default permutation") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array4D<int,2,3,1,4> {
+            {
+                {
+                    {
+                        {1, 2, 3, 4}
+                    },
+                    {
+                        {5, 6, 7, 8}
+                    },
+                    {
+                        {9, 10, 11, 12}
+                    }
+                },
+                {
+                    {
+                        {13, 14, 15, 16}
+                    },
+                    {
+                        {17, 18, 19, 20}
+                    },
+                    {
+                        {21, 22, 23, 24}
+                    }
+                }
+            }
+        });
+
+        std::shared_ptr<Tensor> output = std::make_shared<Tensor>(Array4D<int,4,1,3,2> {
+            {
+                {
+                        {
+                            { 1, 13},
+                            { 5, 17},
+                            { 9, 21}
+                        }
+                    },
+                    {
+                        {
+                            { 2, 14},
+                            { 6, 18},
+                            {10, 22}
+                        }
+                    },
+                    {
+                        {
+                            { 3, 15},
+                            { 7, 19},
+                            {11, 23}
+                        }
+                    },
+                    {
+                        {
+                            { 4, 16},
+                            { 8, 20},
+                            {12, 24}
+                        }
+                    }
+            }
+        });
+        std::shared_ptr<Node> myTranspose = Transpose({});
+        auto op = std::static_pointer_cast<OperatorTensor>(myTranspose -> getOperator());
+        op->associateInput(0,input);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        myTranspose->forward();
+
         REQUIRE(*(op->getOutput(0)) == *output);
     }
 }