diff --git a/.gitlab/ci/build.gitlab-ci.yml b/.gitlab/ci/build.gitlab-ci.yml
index da0d23c9de978ebcdbb370a6f4a92262829e05b9..73b85c8a409e675c849b9ca66557c63b5acf6359 100644
--- a/.gitlab/ci/build.gitlab-ci.yml
+++ b/.gitlab/ci/build.gitlab-ci.yml
@@ -12,6 +12,7 @@ build:ubuntu_cpp:
     - make -j4 all install
 
   artifacts:
+    expire_in: 1 week
     paths:
       - build_cpp/
       - install_cpp/
@@ -29,6 +30,7 @@ build:ubuntu_python:
     - export AIDGE_INSTALL=`pwd`/install
     - python3 -m pip install .
   artifacts:
+    expire_in: 1 week
     paths:
       - venv/
 
@@ -57,6 +59,7 @@ build:windows_cpp:
     - cmake --install . --config Debug
 
   artifacts:
+    expire_in: 1 week
     paths:
       - build_cpp/
       - install_cpp/
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 67ad9304bc3e682a9436fb52306b3ca8120c1c4b..b764086c8e974dc53aadd345cdd287918d599afb 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -52,9 +52,9 @@ target_include_directories(${module_name}
 )
 
 # PYTHON BINDING
-generate_python_binding(${project} ${module_name})
-
 if (PYBIND)
+    generate_python_binding(${project} ${module_name})
+
     # Handles Python + pybind11 headers dependencies
     target_link_libraries(${module_name}
         PUBLIC 
@@ -66,22 +66,12 @@ endif()
 
 target_compile_features(${module_name} PRIVATE cxx_std_14)
 
-
-if(WERROR)
-    target_compile_options(${module_name} PRIVATE
+target_compile_options(${module_name} PRIVATE
     $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
-    -Wall -Wextra -fPIC -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow -Werror>)
-    target_compile_options(${module_name} PRIVATE
+    -Wall -Wextra -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow $<$<BOOL:${WERROR}>:-Werror>>)
+target_compile_options(${module_name} PRIVATE
     $<$<CXX_COMPILER_ID:MSVC>:
     /W4>)
-else()
-    target_compile_options(${module_name} PRIVATE
-        $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
-        -Wall -Wextra -fPIC -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow -Wpedantic>)
-        target_compile_options(${module_name} PRIVATE
-        $<$<CXX_COMPILER_ID:MSVC>:
-        /W4>)
-endif()
 
 if(CMAKE_COMPILER_IS_GNUCXX AND COVERAGE)
     append_coverage_compiler_flags()
diff --git a/cmake/PybindModuleCreation.cmake b/cmake/PybindModuleCreation.cmake
index 18f4abc38e2537c3f4d949f08772a57b90758cb0..8030c1a8639e4b7ae0c5fb865e928a4260c6ae7d 100644
--- a/cmake/PybindModuleCreation.cmake
+++ b/cmake/PybindModuleCreation.cmake
@@ -1,23 +1,21 @@
-function(generate_python_binding name target_to_bind) 
-    if (PYBIND)
-        add_definitions(-DPYBIND)
-        Include(FetchContent)
+function(generate_python_binding name target_to_bind)
+    add_definitions(-DPYBIND)
+    Include(FetchContent)
 
-        FetchContent_Declare(
-        PyBind11
-        GIT_REPOSITORY https://github.com/pybind/pybind11.git
-        GIT_TAG        v2.10.4 # or a later release
-        )
+    FetchContent_Declare(
+    PyBind11
+    GIT_REPOSITORY https://github.com/pybind/pybind11.git
+    GIT_TAG        v2.10.4 # or a later release
+    )
 
-        # Use the New FindPython mode, recommanded. Requires CMake 3.15+
-        find_package(Python COMPONENTS Interpreter Development)
-        FetchContent_MakeAvailable(PyBind11)
+    # Use the New FindPython mode, recommanded. Requires CMake 3.15+
+    find_package(Python COMPONENTS Interpreter Development)
+    FetchContent_MakeAvailable(PyBind11)
 
-        message(STATUS "Creating binding for module ${name}")
-        file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp")
+    message(STATUS "Creating binding for module ${name}")
+    file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp")
 
-        pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install
-        target_include_directories(${name} PUBLIC "python_binding")
-        target_link_libraries(${name} PUBLIC ${target_to_bind})        
-    endif()
+    pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install
+    target_include_directories(${name} PUBLIC "python_binding")
+    target_link_libraries(${name} PUBLIC ${target_to_bind})
 endfunction()
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index ff6601c487ea97294019a12ba899d251b08077e7..cfda3ac7fa024f8cf80b4589d978b9b5bff5b4f0 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -34,11 +34,13 @@
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/operator/Matmul.hpp"
-#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/MaxPooling.hpp"
+//#include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/operator/Softmax.hpp"
+#include "aidge/operator/Scaling.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
 #include "aidge/utils/CParameter.hpp"
 #include "aidge/utils/Parameter.hpp"
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index f11136adaaa3d23fa9d3dc5749dd5d6771cbc42c..7a2b4bac008a82d0454a6dd057d8bf78c7605926 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -320,8 +320,20 @@ public:
 
     void link(std::string name1_inID, std::string name2_outID);
 
-    void insert(Node &newNode, Node &inNode, std::initializer_list<Node> outNodes,
-                IOIndex_t tensorIdx);
+    /**
+     * @brief Insert a node (newParentNode) as a parent of the passed node (childNode).
+     * 
+     * @param childNode Node that gets a new parent.
+     * @param newParentNode Inserted Node.
+     * @param childInputTensorIdx Index of the input Tensor for the childNode linked to the inserted Node output.
+     * @param newParentInputTensorIdx Index of the input Tensor for the newParentNode linked to the former parent of childNode.
+     * @param newParentOutputTensorIdx Index of the output Tensor for the newParentNode linked to the childNode's input Tensor.
+     */
+    void insertParent(NodePtr childNode, 
+                        NodePtr newParentNode, 
+                        IOIndex_t childInputTensorIdx, 
+                        IOIndex_t newParentInputTensorIdx, 
+                        IOIndex_t newParentOutputTensorIdx);
 
     /**
      * @brief Replace the current GraphView with the set of given Nodes if possible
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index 11def52dbab30159e9e882fb19d16f1549aa3887..340a8318cbd0d59b7710bce7d46b7acd1670dd5b 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -303,7 +303,7 @@ public:
    * @param inId Input index.
    * @return std::shared_ptr<Node>&
    */
-  inline NodePtr &getParents(const IOIndex_t inId) {
+  inline NodePtr &getParent(const IOIndex_t inId) {
     assert(inId != gk_IODefaultIndex);
     return mParents.at(inId);
   }
diff --git a/include/aidge/hook/execTime.hpp b/include/aidge/hook/execTime.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..212fef58696be702e89c8ad973dcc0dd0fc389ae
--- /dev/null
+++ b/include/aidge/hook/execTime.hpp
@@ -0,0 +1,59 @@
+/**
+ * \file execTime.hpp
+ * \brief execTime structure
+ * \version file 1.0.0
+ * \date Creation 27 June 2023
+ * \date 27 June 2023
+ * \par ChangeLog
+ * \par
+ *  v1.0.0, 27 June 2023<br>
+ *  - Initial version.
+ * \author mn271187, ik243221
+ * \copyright
+ *  Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
+ *  rights reserved.
+ */
+
+#ifndef execTime_H_
+#define execTime_H_
+
+#include "aidge/operator/Operator.hpp"
+#include "aidge/hook/hook.hpp"
+#include <memory>
+#include <chrono>
+#include <vector>
+
+namespace Aidge {
+
+class ExecTime : public Hook {
+private:
+    std::vector<std::chrono::high_resolution_clock::time_point> registeredTimes = std::vector<std::chrono::high_resolution_clock::time_point>();
+public:
+    ExecTime(const std::shared_ptr<Operator> op) : Hook(op) {}
+    ~ExecTime() = default;
+
+    void call() override final {
+        registeredTimes.push_back(std::chrono::high_resolution_clock::now());
+    }
+
+    static std::shared_ptr<ExecTime> create(const std::shared_ptr<Operator> op)
+    {
+        return std::make_shared<ExecTime>(op);
+    }
+
+    std::vector<std::chrono::high_resolution_clock::time_point> getTimes() {
+        return  registeredTimes;
+    }
+
+    std::chrono::high_resolution_clock::time_point getTime(size_t idx) {
+        return registeredTimes[idx];
+    }
+
+};
+
+namespace {
+    static Registrar<Hook> registrarHook_ExecTime({"execution_time"}, Aidge::ExecTime::create);
+}
+}
+
+#endif /* execTime_H_ */
\ No newline at end of file
diff --git a/include/aidge/hook/hook.hpp b/include/aidge/hook/hook.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..0448659b937c3498f57cae9935196ef2f38ecf6d
--- /dev/null
+++ b/include/aidge/hook/hook.hpp
@@ -0,0 +1,41 @@
+/**
+ * \file Hook.hpp
+ * \brief Hook structure
+ * \version file 1.0.0
+ * \date Creation 27 June 2023
+ * \date 27 June 2023
+ * \par ChangeLog
+ * \par
+ *  v1.0.0, 27 June 2023<br>
+ *  - Initial version.
+ * \author mn271187, ik243221
+ * \copyright
+ *  Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
+ *  rights reserved.
+ */
+
+#ifndef Hook_H_
+#define Hook_H_
+
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include <memory>
+
+namespace Aidge {
+
+class Operator;
+class Hook : public Registrable<Hook, std::tuple<std::string>, std::shared_ptr<Hook>(const std::shared_ptr<Operator>)> {
+//class Hook : public Registrable<Hook, std::tuple<std::string>, std::shared_ptr<Hook>(const std::shared_ptr<Operator>)>{
+protected:
+    const std::shared_ptr<Operator> mOperator;
+
+public:
+    Hook(std::shared_ptr<Operator> op) : mOperator(op) {}
+    virtual ~Hook();
+
+    virtual void call() = 0;
+
+};
+}
+
+#endif /* Hook_H_ */
\ No newline at end of file
diff --git a/include/aidge/hook/outputRange.hpp b/include/aidge/hook/outputRange.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..a2da2a997d594c0ef78fb7c31f33b32c3495c4eb
--- /dev/null
+++ b/include/aidge/hook/outputRange.hpp
@@ -0,0 +1,74 @@
+/**
+ * \file execTime.hpp
+ * \brief execTime structure
+ * \version file 1.0.0
+ * \date Creation 27 June 2023
+ * \date 27 June 2023
+ * \par ChangeLog
+ * \par
+ *  v1.0.0, 27 June 2023<br>
+ *  - Initial version.
+ * \author ik243221
+ * \copyright
+ *  Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
+ *  rights reserved.
+ */
+
+#ifndef AIDGE_CORE_HOOK_OUTPUTRANGE_H_
+#define AIDGE_CORE_HOOK_OUTPUTRANGE_H_
+
+#include "aidge/operator/Operator.hpp"
+#include "aidge/hook/hook.hpp"
+#include <memory>
+#include <chrono>
+#include <vector>
+#include <cmath>
+namespace Aidge {
+
+class OutputRange : public Hook {
+private:
+    std::vector<float> registeredOutputs = std::vector<float>();
+public:
+    OutputRange(const std::shared_ptr<Operator> op) : Hook(op) {}
+    ~OutputRange() = default;
+
+    void call() override final {
+        //std::cout << "call() outputRange hook " << std::endl;
+        //this assumes there is only 1 output possible
+        std::shared_ptr<Tensor> tensor = mOperator->getOutput(0);
+        //tensor->print();
+        //std::cout << "call() outputRange hook : tensor printed" << std::endl;
+        float max_value = 0.;
+        float * casted_tensor = static_cast<float *>(tensor->getImpl()->rawPtr());
+        //find the absolute max value in the tensor, save it to registered outputs
+        for(std::size_t i = 0; i < tensor->size(); ++i) {
+            //std::cout << "call() outputRange hook : casted_tensor[i] = " << casted_tensor[i] << std::endl;
+            if(std::abs(casted_tensor[i]) > max_value){
+                max_value = std::abs(casted_tensor[i]);
+            }
+        }
+        //std::cout << "call() outputRange hook : max_value = " << max_value << std::endl;
+        registeredOutputs.push_back(max_value);
+    }
+
+    static std::shared_ptr<OutputRange> create(const std::shared_ptr<Operator> op)
+    {
+        return std::make_shared<OutputRange>(op);
+    }
+
+    std::vector<float> getOutputs() {
+        return  registeredOutputs;
+    }
+
+    float getOutput(size_t idx) {
+        return registeredOutputs[idx];
+    }
+
+};
+
+namespace {
+    static Registrar<Hook> registrarHook_OutputRange({"output_range"}, Aidge::OutputRange::create);
+}
+}
+
+#endif /* outputRange_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index dab5df9a8f2d1e7d2cd680703d70e38d564c2564..12fb7e16741e9f7ad96d51b0b847b91265c3a7d2 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -62,7 +62,12 @@ class GenericOperator_Op
      * @return template<class T> The parameter.
      */
     template <class T>
-    T getParameter(std::string const &key) const {
+    const T& getParameter(std::string const &key) const {
+        return mParams.Get<const T>(key);
+    }
+
+    template <class T>
+    T& getParameter(std::string const &key) {
         return mParams.Get<T>(key);
     }
 
@@ -75,8 +80,8 @@ class GenericOperator_Op
     /// internal buffer in a new location (previous value is still in memory at
     /// its previous location)
     template <class T>
-    void addParameter(std::string const &key, T const &value) {
-        mParams.Add<T>(key, value);
+    void addParameter(std::string const &key, T&& value) {
+        mParams.Add<T>(key, std::forward<T>(value));
     }
 
 
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..073243e801c6e1297129424b0c93b1a7c4f112f3
--- /dev/null
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -0,0 +1,174 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_MAXPOOLING_H_
+#define AIDGE_CORE_OPERATOR_MAXPOOLING_H_
+
+#include <array>
+#include <numeric>
+#include <vector>
+#include <cmath>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class MaxPoolingParam { StrideDims, KernelDims, PaddingDims };
+
+template <DimIdx_t DIM>
+class MaxPooling_Op : public Operator,
+                public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
+                public Parameterizable<MaxPoolingParam,
+                                       std::array<DimSize_t, DIM>,
+                                       std::array<DimSize_t, DIM>,
+                                       std::array<DimSize_t, (DIM<<1) >> {
+private:
+    // FIXME: change accessibility
+    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char *Type = "MaxPooling";
+
+    MaxPooling_Op() = delete;
+
+    using Parameterizable_ = Parameterizable<MaxPoolingParam,
+                                             std::array<DimSize_t, DIM>,
+                                             std::array<DimSize_t, DIM>,
+                                             std::array<DimSize_t, (DIM<<1)> >;
+    template <MaxPoolingParam e>
+    using param = typename Parameterizable_::template param<e>;
+
+    constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
+                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                            const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0))
+        : Operator(Type),
+          Parameterizable_(param<MaxPoolingParam::StrideDims>(stride_dims),
+                           param<MaxPoolingParam::KernelDims>(kernel_dims),
+                           param<MaxPoolingParam::PaddingDims>(padding_dims)),
+          mOutput(std::make_shared<Tensor>()) {
+        setDatatype(DataType::Float32);
+    }
+
+    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 1 && "operators supports only 3 inputs");
+        (void) inputIdx; // avoid unused warning
+        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
+
+        mInput = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    constexpr void computeOutputDims() override final {
+        if (!mInput->empty()) {
+            std::array<DimSize_t, DIM + 2> outputDims = {};
+
+            for (std::size_t dim = 0; dim < this->template get<MaxPoolingParam::KernelDims>().size() ; ++dim) {
+                outputDims[dim+2] = 1 + static_cast<DimSize_t>(
+                                            std::floor(static_cast<float>(mInput->dims()[dim+2] -
+                                                                    this->template get<MaxPoolingParam::KernelDims>()[dim] +
+                                                                    this->template get<MaxPoolingParam::PaddingDims>()[dim] +
+                                                                    this->template get<MaxPoolingParam::PaddingDims>()[dim+DIM]) /
+                                            static_cast<float>(this->template get<MaxPoolingParam::StrideDims>()[dim])));
+            }
+            outputDims[1] = mInput->dims()[1];
+            outputDims[0] = mInput->dims()[0];
+            mOutput->resize(outputDims);
+        }
+    }
+
+    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return *(mInput.get());
+    }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "MaxPooling Operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return mInput;
+    }
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "MaxPooling Operators has only 1 outputs");
+        (void) outputIdx; // avoid unused warning
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mInput);
+    }
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string &name) {
+        mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInput->setBackend(name);
+    }
+
+    void setDatatype(const DataType &datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInput->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+};
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
+                                           const std::string& name = "",
+                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                           const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
+    // FIXME: properly handle default w&b initialization in every cases
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
+    auto avgPool = std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, padding_dims), name);
+    return avgPool;
+}
+
+template <DimSize_t DIM>
+inline std::shared_ptr<Node> MaxPooling(
+    DimSize_t const (&kernel_dims)[DIM],
+    const std::string& name = "",
+    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+    const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
+    return MaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::MaxPoolingParam>::data[] = {"StrideDims", "KernelDims", "PaddingDims"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 36f846ddae329be28b8e51e2bff1580a509562e1..122a42a42f38309aa1cd1661324fcc6f5c2d3fcc 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -20,12 +20,14 @@
 #include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/hook/hook.hpp"
 
 namespace Aidge {
 
 class Operator : public std::enable_shared_from_this<Operator> {
 protected:
   std::unique_ptr<OperatorImpl> mImpl; // implementation of the operator
+  std::map<std::string, std::shared_ptr<Hook>> mHooks;
 
 private:
   std::string mType;
@@ -48,6 +50,15 @@ public:
     virtual std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const = 0;
     virtual Tensor& output(const IOIndex_t /*outputIdx*/) const = 0;
 
+    std::shared_ptr<Hook> getHook(std::string hookName) {
+        return mHooks[hookName];
+    }
+    void addHook(std::string hookName) {
+        mHooks.insert(std::pair<std::string, std::shared_ptr<Hook>>(hookName,Registrar<Hook>::create({hookName})(shared_from_this())));
+    }
+
+    void runHooks() const;
+
 ///////////////////////////////////////////////////////
 //        IMPLEMENTATION
 ///////////////////////////////////////////////////////
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..e158ecd7567eb683558d9e09a6cf03e5cc35ce42
--- /dev/null
+++ b/include/aidge/operator/Scaling.hpp
@@ -0,0 +1,140 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_OPERATOR_Scaling_H__
+#define __AIDGE_CORE_OPERATOR_Scaling_H__
+
+#include <vector>
+#include <memory>
+
+
+
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class ScalingParam {
+    scalingFactor
+};
+
+class Scaling_Op : public Operator,
+    public Registrable<Scaling_Op, std::string, std::unique_ptr<OperatorImpl>(const Scaling_Op&)>,
+    public Parameterizable<ScalingParam, float> {
+public:
+    // FIXME: change accessibility
+    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "Scaling";
+
+    Scaling_Op() = delete;
+
+    using Parameterizable_ = Parameterizable<ScalingParam, float>;
+    template <ScalingParam e> using param = typename Parameterizable_::template param<e>;
+
+    Scaling_Op(float scalingFactor)
+            : Operator(Type),
+            Parameterizable_(
+                param<ScalingParam::scalingFactor>(scalingFactor))
+    {
+        setDatatype(DataType::Float32);
+    }
+
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx == 0 && "operator supports only 1 input");
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+        (void) inputIdx; //avoid unused warning
+        mInput = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        if (!mInput->empty())
+            mOutput->resize(mInput->dims());
+    }
+
+    bool outputDimsForwarded() const override final {
+        return !(mOutput->empty());
+    }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert((inputIdx == 0) && "Scaling Operator has only 1 input");
+        (void) inputIdx; // avoid unused warning
+        return *(mInput.get());
+    }
+    inline Tensor& output(const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "Scaling Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return *(mOutput.get());
+    }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final { 
+        assert((inputIdx == 0) && "Scaling Operator has only 1 input");
+        (void) inputIdx; // avoid unused warning
+        return mInput;
+    }
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "Scaling Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operator supports only 1 input");
+        (void) inputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mInput);
+    }
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning;
+        return mOutput;
+    }
+
+
+    void setBackend(const std::string& name) {
+        mImpl = Registrar<Scaling_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+        // FIXME: temporary workaround
+        mInput->setBackend(name);
+    }
+    void setDatatype(const DataType& datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInput->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+};
+
+inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor), name);
+}
+}
+
+namespace {
+template <>
+const char* const EnumStrings<Aidge::ScalingParam>::data[]
+    = {"scalingFactor"};
+}
+
+#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
diff --git a/include/aidge/utils/Any.hpp b/include/aidge/utils/Any.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..0310c38ccd855f64c8485a114962738203f03ef5
--- /dev/null
+++ b/include/aidge/utils/Any.hpp
@@ -0,0 +1,154 @@
+
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_ANY_H_
+#define AIDGE_ANY_H_
+
+
+#include <typeinfo>    // typeid
+#include <type_traits> // std::enable_if_t, std::decay_t, std::is_same, std::is_copy_constructible, std::remove_cv, std::remove_reference
+#include <assert.h>
+#include <new>
+
+class _any {
+private:
+    /// @brief Operation to perform on the object.
+    enum _Op { _Op_access, _Op_get_type_info, _Op_clone, _Op_destroy };
+
+    union _Arg {
+        const std::type_info* _M_typeinfo;
+        _any* _M_any;
+    };
+
+    /// @brief Stored data without type information.
+    void* _M_data;
+
+    /// @brief Member function to perform type-related computations on stored data.
+    void (*_M_manager)(_Op, const _any*, _Arg*);
+
+public:
+    /// @brief Class to centralize functions and type information in a memory efficient way.
+    /// @tparam Tp Decayed stored type.
+    template <typename Tp>
+    struct Manager {
+        static void manage(_Op which, const _any* __any, _Arg* __arg) {
+            auto ptr = static_cast<const Tp*>(__any->_M_data);
+            switch (which)
+            {
+            case _Op_get_type_info:
+                __arg->_M_typeinfo = &typeid(Tp);
+                break;
+            case _Op_clone:
+                __arg->_M_any->_M_data = new Tp(*ptr);
+                __arg->_M_any->_M_manager = __any->_M_manager;
+                break;
+            case _Op_destroy:
+                delete ptr;
+                break;
+            }
+
+        }
+        static Tp* access(const _any* __any) {
+            return static_cast<Tp*>(__any->_M_data);
+        }
+
+        // template <typename Up>
+        // static void create(void* data, Up&& value) {
+        //     data = new Tp(std::forward<Up>(value));
+        // }
+    };
+
+private:
+    template<typename _Tp, typename _VTp = std::decay_t<_Tp>>
+    using _Decay_if_not_any = std::enable_if_t<!std::is_same<_VTp, _any>::value, _VTp>;
+
+public:
+    /// @brief Default constructor
+    _any() noexcept : _M_manager(nullptr) { }
+
+    /// @brief Copy constructor
+    /// @param __other
+    _any(const _any& __other)
+    {
+        if (!__other._M_manager)
+            _M_manager = nullptr;
+        else
+        {
+            _Arg __arg;
+            __arg._M_any = this;
+            __other._M_manager(_Op_clone, &__other, &__arg);
+        }
+    }
+
+    /// @brief Move constructor
+    /// @param __other
+    _any(_any&& __other)
+    {
+        if (!__other._M_manager)
+            _M_manager = nullptr;
+        else
+        {
+            _M_data = __other._M_data;
+            _M_manager = __other._M_manager;
+            const_cast<_any*>(&__other)->_M_manager = nullptr;
+        }
+    }
+
+    /// @brief By-value constructor.
+    /// @tparam T Data type.
+    /// @tparam VT Decayed data type.
+    /// @param value
+    template<typename T, typename VT = _Decay_if_not_any<T>, std::enable_if_t<std::is_copy_constructible<VT>::value, bool> = true>
+    explicit _any(T&& value)
+        : _M_manager(&Manager<VT>::manage),
+          _M_data(new VT{std::forward<T>(value)})
+    {}
+
+    ~_any()
+    {
+        if(_M_manager) {
+            _M_manager(_Op_destroy, this, nullptr);
+            _M_manager = nullptr;
+        }
+    }
+
+    /// @brief Access type id of the value currently stored
+    /// @return
+    const std::type_info& type() const
+    {
+        if (!_M_manager)
+            return typeid(void);
+        _Arg __arg;
+        _M_manager(_Op_get_type_info, this, &__arg);
+        return *__arg._M_typeinfo;
+    }
+};
+
+/// @brief Access value stored in the object converted in the template type if possible.
+/// @tparam _ValueType
+/// @param __any
+/// @return Stored value.
+template<typename _ValueType>
+inline _ValueType any_cast(const _any& __any)
+{
+    using _Up =  std::remove_cv_t<std::remove_reference_t<_ValueType>>;
+    assert((std::__or_<std::is_reference<_ValueType>, std::is_copy_constructible<_ValueType>>::value && "Template argument must be a reference or CopyConstructible type"));
+    assert((std::is_constructible<_ValueType, const _Up&>::value && "Template argument must be constructible from a const value."));
+    assert(std::is_object<_Up>::value);
+    assert(__any.type() == typeid(_Up));
+    auto __p = static_cast<_Up*>(__any._M_data);
+    if (__p)
+        return static_cast<_ValueType>(*__p);
+    throw std::bad_cast();
+}
+
+#endif /* AIDGE_ANY_H_ */
\ No newline at end of file
diff --git a/include/aidge/utils/CParameter.hpp b/include/aidge/utils/CParameter.hpp
index 0f4c74ab8bccb7bc134e035a5f12d31d51663e5d..7d60ed239ae58666833c4ce227aaf16542679036 100644
--- a/include/aidge/utils/CParameter.hpp
+++ b/include/aidge/utils/CParameter.hpp
@@ -12,23 +12,34 @@
 #ifndef AIDGE_CPARAMETER_H_
 #define AIDGE_CPARAMETER_H_
 
-#include <assert.h>
 #include <map>
 #include <vector>
+#include <type_traits>
+#include <typeinfo>
+#include <assert.h>
+
+#include "aidge/utils/Any.hpp"
+
 
 namespace Aidge {
 
 ///\todo store also a fix-sized code that indicates the type
 ///\todo managing complex types or excluding non-trivial, non-aggregate types
-class CParameter
-{
+class CParameter {
 private:
-    template <typename T>
-    struct is_vector : std::false_type {};
-
-    template <typename T, typename Alloc>
-    struct is_vector<std::vector<T, Alloc>> : std::true_type {};
-
+    template<typename _ValueType>
+    inline _ValueType& any_cast_ref(const _any& __any)
+    {
+        using _Up =  std::remove_cv_t<std::remove_reference_t<_ValueType>>;
+        assert(((std::is_reference<_ValueType>::value || std::is_copy_constructible<_ValueType>::value) && "Template argument must be a reference or CopyConstructible type"));
+        assert((std::is_constructible<_ValueType, const _Up&>::value && "Template argument must be constructible from a const value."));
+        assert(std::is_object<_Up>::value);
+        assert(__any.type() == typeid(_Up));
+        if (_any::Manager<_Up>::access(&__any)) { // assess if _any object is empty
+            return *static_cast<_ValueType*>(_any::Manager<_Up>::access(&__any));
+        }
+        throw std::bad_cast();
+    }
 public:
     // not copyable, not movable
     CParameter(CParameter const &) = delete;
@@ -48,15 +59,16 @@ public:
      *  param buffer that will get invalid after the CParam death.
      * \note at() throws if the parameter does not exist, using find to test for parameter existance
      */
-    template<class T> T Get(std::string const i_ParamName) const
+    template<class T> T& Get(const std::string i_ParamName)
     {
-        assert(m_Params.find(i_ParamName) != m_Params.end());
-        assert(m_Types.find(i_ParamName) != m_Types.end());
-        assert(m_Params.at(i_ParamName) <= m_OffSet);
-        assert(typeid(T).name() == m_Types.at(i_ParamName));
-        return *reinterpret_cast<T *>(m_BeginBuffer + m_Params.at(i_ParamName));
+        return any_cast_ref<T>(m_Buffer[m_Params.at(i_ParamName)]);
     }
 
+    // template<class T> const T& Get(const std::string i_ParamName) const
+    // {
+    //     return any_cast<T>(m_Buffer[m_Params.at(i_ParamName)]);
+    // }
+
     ///\brief Add a parameter value, identified by its name
     ///\tparam T expected parameter type
     ///\param i_ParamName Parameter name
@@ -64,21 +76,15 @@ public:
     ///\todo Pass i_Value by ref if large or not trivial
     ///\bug If parameter already exists, its value is changed but written in the
     /// internal buffer in a new location (previous value is still in memory at its previous location)
-    template<class T> void Add(std::string const &i_ParamName, T const &i_Value)
+    template<class T> void Add(const std::string &i_ParamName, T&& i_Value)
     {
-        m_Buffer.resize(m_Buffer.size() + (sizeof(T) / sizeof(uint8_t)));
-        m_BeginBuffer = m_Buffer.data(); // Update buffer ptr in case of memory reordering
-        *reinterpret_cast<T *>(m_BeginBuffer + m_OffSet)
-            = i_Value; // Black-magic used to add anytype into the vector
-        m_Params[i_ParamName] = m_OffSet; // Copy pointer offset
-        m_OffSet += sizeof(T); // Increment offset
-
-        m_Types[i_ParamName] = typeid(i_Value).name();
+        m_Params[i_ParamName] = m_Buffer.size(); // Copy pointer offset
+        m_Buffer.push_back(_any(std::forward<T>(i_Value)));
     }
 
 
     std::string getParamType(std::string const &i_ParamName){
-        return m_Types[i_ParamName];
+        return m_Buffer[m_Params.at(i_ParamName)].type().name();
     }
 
     std::vector<std::string> getParametersName(){
@@ -91,23 +97,8 @@ public:
 private:
     std::map<std::string, std::size_t> m_Params; // { Param name : offset }
 
-    ///\brief Map to check type error
-    /* Note : i tried this : `std::map<std::string, std::type_info const *> mTypes;`
-    but looks like the type_ingo object was destroyed.
-    I am not a hugde fan of storing a string and making string comparison.
-    Maybe we can use a custom enum type (or is there a standard solution ?)
-    */
-    std::map<std::string, std::string> m_Types;
-
-    ///\brief All parameters values concatenated in raw binary form.
-    std::vector<uint8_t> m_Buffer = {};
-
-    ///\brief Starting address of the buffer
-    uint8_t *m_BeginBuffer = m_Buffer.data();
-
-    ///\brief Offset, in number of uint8_t, of the next parameter to write
-    std::size_t m_OffSet = 0;
-
+    ///\brief All raw pointers to parameters values concatenated. Use custom any class compatible with C++14.
+    std::vector<_any> m_Buffer = {};
 };
 
 }
diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp
index 62b86982053d82bef6e0fd80e490632b95b968e5..e3666d247324fc419570611f41bbe67c7c68cc4e 100644
--- a/python_binding/graph/pybind_Node.cpp
+++ b/python_binding/graph/pybind_Node.cpp
@@ -136,6 +136,16 @@ void init_Node(py::module& m) {
             :rtype: int
             )mydelimiter")
 
+            .def("get_parents", &Node::getParents,
+            R"mydelimiter(
+            Get parents.
+            )mydelimiter")
+
+            .def("get_children", (std::set<std::shared_ptr<Node>> (Node::*)() const) &Node::getChildren,
+            R"mydelimiter(
+            Get children.
+            )mydelimiter")
+
             .def("__call__", &Node::operator(), py::arg("connectors"));
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9bd951c446e080ff27b099527ac9bbc350646140
--- /dev/null
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -0,0 +1,89 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
+#include <string>
+#include <vector>
+#include <array>
+
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/MaxPooling.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/data/Tensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
+  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Operator, PyAbstractParametrizable>(
+    m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
+    py::multiple_inheritance())
+  .def(py::init<const std::array<DimSize_t, DIM> &,
+                const std::array<DimSize_t, DIM> &,
+                const std::array<DimSize_t, (DIM<<1)> &>(),
+        py::arg("kernel_dims"),
+        py::arg("stride_dims"),
+        py::arg("padding_dims"));
+  
+  m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, 
+                                                                  const std::string& name,
+                                                                  const std::vector<DimSize_t> &stride_dims,
+                                                                  const std::vector<DimSize_t> &padding_dims) {
+        // Lambda function wrapper because PyBind fails to convert const array.
+        // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
+        if (kernel_dims.size() != DIM) {
+            throw std::runtime_error("kernel_dims size [" + std::to_string(kernel_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
+        }
+        if (stride_dims.size() != DIM) {
+            throw std::runtime_error("stride_dims size [" + std::to_string(stride_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
+        }
+        if (padding_dims.size() != (DIM<<1)) {
+            throw std::runtime_error("padding_dims size [" + std::to_string(padding_dims.size()) + "] does not match DIM [" + std::to_string(DIM<<1) +"]");
+        }
+        DimSize_t tmp_kernel_dims_array[DIM];
+        for (size_t i = 0; i < DIM; ++i) {
+            tmp_kernel_dims_array[i] = kernel_dims[i];
+        }
+        DimSize_t tmp_stride_dims_array[DIM];
+        for (size_t i = 0; i < DIM; ++i) {
+            tmp_stride_dims_array[i] = stride_dims[i];
+        }
+        DimSize_t tmp_padding_dims_array[DIM<<1];
+        for (size_t i = 0; i < (DIM<<1); ++i) {
+            tmp_padding_dims_array[i] = padding_dims[i];
+        }
+        const DimSize_t (&kernel_dims_array)[DIM] = tmp_kernel_dims_array;
+        const DimSize_t (&stride_dims_array)[DIM] = tmp_stride_dims_array;
+        const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array;
+        return MaxPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array));
+    }, py::arg("kernel_dims"),
+       py::arg("name") = "",
+       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0));
+  
+}
+
+
+void init_MaxPooling(py::module &m) {
+  declare_MaxPoolingOp<1>(m);
+  declare_MaxPoolingOp<2>(m);
+  declare_MaxPoolingOp<3>(m);
+ 
+  // FIXME:
+  // m.def("MaxPooling1D", static_cast<NodeAPI(*)(const char*, int, int, int const
+  // (&)[1])>(&MaxPooling));
+}
+} // namespace Aidge
+#endif
\ No newline at end of file
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 83619032c3ef8e5b4b279c1ffb550f1f4340f450..6627565898eae837a1d9fd8ce0d6cac9f50c25c2 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -29,6 +29,7 @@ void init_FC(py::module&);
 void init_GenericOperator(py::module&);
 void init_LeakyReLU(py::module&);
 void init_Matmul(py::module&);
+void init_MaxPooling(py::module&);
 void init_Producer(py::module&);
 void init_ReLU(py::module&);
 void init_Softmax(py::module&);
@@ -75,6 +76,7 @@ void init_Aidge(py::module& m){
     init_GenericOperator(m);
     init_LeakyReLU(m);
     init_Matmul(m);
+    init_MaxPooling(m);
     init_ReLU(m);
     init_Softmax(m);
 
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 9798dfe639475b78c761f0450c80635c5c80a63d..486a1ffe6cec4f37bb88cbfc5664ce843c4caa2b 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -33,13 +33,10 @@ Aidge::Connector Aidge::GraphView::operator()(
     (void)input; // avoid unused warning
   }
 
+  IOIndex_t inID = 0;
   for (const Connector &ctor : ctors) {
     assert((ctor.node() != nullptr) &&
            "Input Connector must be associated with a node");
-    (void)ctors; // avoid unused warning
-  }
-  IOIndex_t inID = 0;
-  for (const Connector &ctor : ctors) {
     ctor.node()->addChild(shared_from_this(), static_cast<std::size_t>(ctor.index()),
                           {inNode, inID++});
   }
@@ -326,7 +323,7 @@ void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnablePara
   // add learnable parameters to the graph
   if (includeLearnableParam) {
     for (IOIndex_t i = node->nbDataInputs(); i < node->nbInputs(); ++i) {
-      std::shared_ptr<Node> parentNode = node->getParents(static_cast<IOIndex_t>(i));
+      std::shared_ptr<Node> parentNode = node->getParent(static_cast<IOIndex_t>(i));
       if (parentNode) {
           parentNode->addView(shared_from_this());
           mNodes.insert(parentNode);
@@ -522,12 +519,24 @@ void Aidge::GraphView::link(std::string /*name1_inID*/,
   printf("Not implemented yet.\n");
 }
 
-void Aidge::GraphView::insert(Node & /*newNode*/, Node & /*inNode*/,
-                             std::initializer_list<Node> /*outNodes*/,
-                             IOIndex_t /*tensorIdx*/) {
-  printf("Not implemented yet.\n");
+void Aidge::GraphView::insertParent(NodePtr childNode, 
+                  NodePtr newParentNode, 
+                  IOIndex_t childInputTensorIdx, 
+                  IOIndex_t newParentInputTensorIdx, 
+                  IOIndex_t newParentOutputTensorIdx){
+  NodePtr currentParentNode = childNode->getParent(childInputTensorIdx);
+  const IOIndex_t currentParentOutputTensorIdx = childNode->input(childInputTensorIdx).second;
+  // Remove child from current parent & current Parent from child 
+  currentParentNode->removeChild(childNode, currentParentOutputTensorIdx);
+
+  // Add child 
+  currentParentNode->addChild(newParentNode,currentParentOutputTensorIdx, newParentInputTensorIdx);
+  newParentNode->addChild(childNode, newParentOutputTensorIdx, childInputTensorIdx);
+
+  add(newParentNode);
 }
 
+
 bool Aidge::GraphView::replaceWith(std::set<std::shared_ptr<Node>> newNodes) {
   // TODO : only supports one input/output node for now
   assert(mNodes.size()>0 && "There must be at least one Node to replace");
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index 5fcc0e1139d8ccd9368eaba90231fb12370e761e..abf572831d8f0b5c2c5eb836ea46e05b8114da55 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -226,7 +226,7 @@ void Aidge::Node::addChild(std::shared_ptr<GraphView> otherView, const IOIndex_t
 }
 
 void Aidge::Node::addParent(const std::shared_ptr<Node> other_node, const IOIndex_t inId) {
-    if (getParents(inId) != nullptr) {
+    if (getParent(inId) != nullptr) {
         printf("Warning, you're replacing a Parent.\n");
     }
     assert((inId != gk_IODefaultIndex) && (inId < nbInputs()) && "Input index out of bound.");
diff --git a/src/operator/Operator.cpp b/src/operator/Operator.cpp
index b3896b12143488275b2a064819595c380da62844..09a17a428e1de91c0318f710e6f097573cf529a6 100644
--- a/src/operator/Operator.cpp
+++ b/src/operator/Operator.cpp
@@ -42,6 +42,14 @@ void Aidge::Operator::updateConsummerProducer(){
     mImpl->updateConsummerProducer();
 }
 
-void Aidge::Operator::forward() { mImpl->forward(); }
+void Aidge::Operator::runHooks() const {
+    for (auto& hook : mHooks) {
+        hook.second->call();
+    }
+}
+void Aidge::Operator::forward() {
+    mImpl->forward();
+    runHooks();
+}
 
 void Aidge::Operator::backward() { mImpl->backward(); }
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index dc693193c6606c99b1628d23ad253015f8f8dbe6..319370ebad95869efd450eade58a2ecd36075090 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -330,4 +330,48 @@ TEST_CASE("[core/graph] GraphView(replaceWith)") {
         REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({r1, r4}));
         REQUIRE((r1->output(0))[0].first == r4);
     }
+}
+
+TEST_CASE("[core/graph] GraphView(insertParent)") {
+    auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
+    auto conv1 = Conv(3, 32, {3, 3}, "conv1");
+    auto conv2 = Conv(32, 64, {3, 3}, "conv2");
+    auto conv3 = Conv(32, 64, {1, 1}, "conv3");
+    auto g = std::make_shared<GraphView>("TestGraph");
+    dataProvider->addChild(conv1, 0);
+    g->add(conv1);
+    g->addChild(conv2, conv1, 0);
+    g->addChild(conv3, conv1, 0);
+    g->save("graphForwardDims");
+    g->forwardDims();
+
+    auto newConv = Conv(32, 32, {1, 1}, "newConv");
+
+    SECTION("Check insertParent conv2 then insertParent conv3") {
+        g->insertParent(conv2, newConv, 0, 0, 0);
+
+        std::set<NodePtr> expectedConv1Children = {conv3, newConv};
+        std::set<NodePtr> expectedNewConvChildren = {conv2};
+         
+        REQUIRE(conv1->getOperator()->getOutput(0) == conv3->getOperator()->getInput(0));
+        REQUIRE(conv1->getOperator()->getOutput(0) == newConv->getOperator()->getInput(0));
+        REQUIRE(conv1->getOperator()->getOutput(0) != conv2->getOperator()->getInput(0));
+        REQUIRE(newConv->getOperator()->getOutput(0) == conv2->getOperator()->getInput(0));
+        REQUIRE((newConv->getChildren()) == expectedNewConvChildren);
+        REQUIRE((conv1->getChildren()) == expectedConv1Children);
+
+        g->insertParent(conv3, newConv, 0, 0, 0);
+
+        std::set<NodePtr> expectedConv1Children2 = {newConv};
+        std::set<NodePtr> expectedNewConvChildren2 = {conv2, conv3};
+
+        REQUIRE(conv1->getOperator()->getOutput(0) != conv3->getOperator()->getInput(0));
+        REQUIRE(conv1->getOperator()->getOutput(0) == newConv->getOperator()->getInput(0));
+        REQUIRE(conv1->getOperator()->getOutput(0) != conv2->getOperator()->getInput(0));
+        REQUIRE(newConv->getOperator()->getOutput(0) == conv2->getOperator()->getInput(0));
+        REQUIRE(newConv->getOperator()->getOutput(0) == conv3->getOperator()->getInput(0));
+        REQUIRE((newConv->getChildren()) == expectedNewConvChildren2);
+        REQUIRE((conv1->getChildren()) == expectedConv1Children2);
+
+    }
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_GenericOperator.cpp b/unit_tests/operator/Test_GenericOperator.cpp
index 886326214a4a285fb32e5909da5114d74782ee46..2208399897f586becca798eb469344af01dbab64 100644
--- a/unit_tests/operator/Test_GenericOperator.cpp
+++ b/unit_tests/operator/Test_GenericOperator.cpp
@@ -20,10 +20,10 @@ using namespace Aidge;
 TEST_CASE("[core/operators] GenericOp(add & get parameters)", "[Operator]") {
     SECTION("INT") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        int value = 5;
         const char* key = "intParam";
-        Testop.addParameter(key, value);
-        REQUIRE(Testop.getParameter<int>(key) == value);
+        Testop.addParameter(key, int(5));
+        int registeredVal = Testop.getParameter<int>(key);
+        REQUIRE(registeredVal == 5);
     }
     SECTION("LONG") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);