diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 13c360796fb4912ffb6b5ad17d68c7b56b38b943..cfda3ac7fa024f8cf80b4589d978b9b5bff5b4f0 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -40,6 +40,7 @@
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/operator/Softmax.hpp"
+#include "aidge/operator/Scaling.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
 #include "aidge/utils/CParameter.hpp"
 #include "aidge/utils/Parameter.hpp"
diff --git a/include/aidge/hook/execTime.hpp b/include/aidge/hook/execTime.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..212fef58696be702e89c8ad973dcc0dd0fc389ae
--- /dev/null
+++ b/include/aidge/hook/execTime.hpp
@@ -0,0 +1,59 @@
+/**
+ * \file execTime.hpp
+ * \brief execTime structure
+ * \version file 1.0.0
+ * \date Creation 27 June 2023
+ * \date 27 June 2023
+ * \par ChangeLog
+ * \par
+ *  v1.0.0, 27 June 2023<br>
+ *  - Initial version.
+ * \author mn271187, ik243221
+ * \copyright
+ *  Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
+ *  rights reserved.
+ */
+
+#ifndef execTime_H_
+#define execTime_H_
+
+#include "aidge/operator/Operator.hpp"
+#include "aidge/hook/hook.hpp"
+#include <memory>
+#include <chrono>
+#include <vector>
+
+namespace Aidge {
+
+class ExecTime : public Hook {
+private:
+    std::vector<std::chrono::high_resolution_clock::time_point> registeredTimes = std::vector<std::chrono::high_resolution_clock::time_point>();
+public:
+    ExecTime(const std::shared_ptr<Operator> op) : Hook(op) {}
+    ~ExecTime() = default;
+
+    void call() override final {
+        registeredTimes.push_back(std::chrono::high_resolution_clock::now());
+    }
+
+    static std::shared_ptr<ExecTime> create(const std::shared_ptr<Operator> op)
+    {
+        return std::make_shared<ExecTime>(op);
+    }
+
+    std::vector<std::chrono::high_resolution_clock::time_point> getTimes() {
+        return  registeredTimes;
+    }
+
+    std::chrono::high_resolution_clock::time_point getTime(size_t idx) {
+        return registeredTimes[idx];
+    }
+
+};
+
+namespace {
+    static Registrar<Hook> registrarHook_ExecTime({"execution_time"}, Aidge::ExecTime::create);
+}
+}
+
+#endif /* execTime_H_ */
\ No newline at end of file
diff --git a/include/aidge/hook/hook.hpp b/include/aidge/hook/hook.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..0448659b937c3498f57cae9935196ef2f38ecf6d
--- /dev/null
+++ b/include/aidge/hook/hook.hpp
@@ -0,0 +1,41 @@
+/**
+ * \file Hook.hpp
+ * \brief Hook structure
+ * \version file 1.0.0
+ * \date Creation 27 June 2023
+ * \date 27 June 2023
+ * \par ChangeLog
+ * \par
+ *  v1.0.0, 27 June 2023<br>
+ *  - Initial version.
+ * \author mn271187, ik243221
+ * \copyright
+ *  Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
+ *  rights reserved.
+ */
+
+#ifndef Hook_H_
+#define Hook_H_
+
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include <memory>
+
+namespace Aidge {
+
+class Operator;
+class Hook : public Registrable<Hook, std::tuple<std::string>, std::shared_ptr<Hook>(const std::shared_ptr<Operator>)> {
+//class Hook : public Registrable<Hook, std::tuple<std::string>, std::shared_ptr<Hook>(const std::shared_ptr<Operator>)>{
+protected:
+    const std::shared_ptr<Operator> mOperator;
+
+public:
+    Hook(std::shared_ptr<Operator> op) : mOperator(op) {}
+    virtual ~Hook();
+
+    virtual void call() = 0;
+
+};
+}
+
+#endif /* Hook_H_ */
\ No newline at end of file
diff --git a/include/aidge/hook/outputRange.hpp b/include/aidge/hook/outputRange.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..a2da2a997d594c0ef78fb7c31f33b32c3495c4eb
--- /dev/null
+++ b/include/aidge/hook/outputRange.hpp
@@ -0,0 +1,74 @@
+/**
+ * \file execTime.hpp
+ * \brief execTime structure
+ * \version file 1.0.0
+ * \date Creation 27 June 2023
+ * \date 27 June 2023
+ * \par ChangeLog
+ * \par
+ *  v1.0.0, 27 June 2023<br>
+ *  - Initial version.
+ * \author ik243221
+ * \copyright
+ *  Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
+ *  rights reserved.
+ */
+
+#ifndef AIDGE_CORE_HOOK_OUTPUTRANGE_H_
+#define AIDGE_CORE_HOOK_OUTPUTRANGE_H_
+
+#include "aidge/operator/Operator.hpp"
+#include "aidge/hook/hook.hpp"
+#include <memory>
+#include <chrono>
+#include <vector>
+#include <cmath>
+namespace Aidge {
+
+class OutputRange : public Hook {
+private:
+    std::vector<float> registeredOutputs = std::vector<float>();
+public:
+    OutputRange(const std::shared_ptr<Operator> op) : Hook(op) {}
+    ~OutputRange() = default;
+
+    void call() override final {
+        //std::cout << "call() outputRange hook " << std::endl;
+        //this assumes there is only 1 output possible
+        std::shared_ptr<Tensor> tensor = mOperator->getOutput(0);
+        //tensor->print();
+        //std::cout << "call() outputRange hook : tensor printed" << std::endl;
+        float max_value = 0.;
+        float * casted_tensor = static_cast<float *>(tensor->getImpl()->rawPtr());
+        //find the absolute max value in the tensor, save it to registered outputs
+        for(std::size_t i = 0; i < tensor->size(); ++i) {
+            //std::cout << "call() outputRange hook : casted_tensor[i] = " << casted_tensor[i] << std::endl;
+            if(std::abs(casted_tensor[i]) > max_value){
+                max_value = std::abs(casted_tensor[i]);
+            }
+        }
+        //std::cout << "call() outputRange hook : max_value = " << max_value << std::endl;
+        registeredOutputs.push_back(max_value);
+    }
+
+    static std::shared_ptr<OutputRange> create(const std::shared_ptr<Operator> op)
+    {
+        return std::make_shared<OutputRange>(op);
+    }
+
+    std::vector<float> getOutputs() {
+        return  registeredOutputs;
+    }
+
+    float getOutput(size_t idx) {
+        return registeredOutputs[idx];
+    }
+
+};
+
+namespace {
+    static Registrar<Hook> registrarHook_OutputRange({"output_range"}, Aidge::OutputRange::create);
+}
+}
+
+#endif /* outputRange_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 92524937473d0ee46f9bc8dcebc9b79b3b8df838..892e5bdb7d9cf86cc7c7c82f5c7d2361defb70a8 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -20,12 +20,14 @@
 #include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/hook/hook.hpp"
 
 namespace Aidge {
 
 class Operator : public std::enable_shared_from_this<Operator> {
 protected:
   std::unique_ptr<OperatorImpl> mImpl; // implementation of the operator
+  std::map<std::string, std::shared_ptr<Hook>> mHooks;
 
 private:
   std::string mType;
@@ -55,6 +57,15 @@ public:
     virtual std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const = 0;
     virtual Tensor& output(const IOIndex_t /*outputIdx*/) const = 0;
 
+    std::shared_ptr<Hook> getHook(std::string hookName) {
+        return mHooks[hookName];
+    }
+    void addHook(std::string hookName) {
+        mHooks.insert(std::pair<std::string, std::shared_ptr<Hook>>(hookName,Registrar<Hook>::create({hookName})(shared_from_this())));
+    }
+
+    void runHooks() const;
+
 ///////////////////////////////////////////////////////
 //        IMPLEMENTATION
 ///////////////////////////////////////////////////////
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..e158ecd7567eb683558d9e09a6cf03e5cc35ce42
--- /dev/null
+++ b/include/aidge/operator/Scaling.hpp
@@ -0,0 +1,140 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_OPERATOR_Scaling_H__
+#define __AIDGE_CORE_OPERATOR_Scaling_H__
+
+#include <vector>
+#include <memory>
+
+
+
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class ScalingParam {
+    scalingFactor
+};
+
+class Scaling_Op : public Operator,
+    public Registrable<Scaling_Op, std::string, std::unique_ptr<OperatorImpl>(const Scaling_Op&)>,
+    public Parameterizable<ScalingParam, float> {
+public:
+    // FIXME: change accessibility
+    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "Scaling";
+
+    Scaling_Op() = delete;
+
+    using Parameterizable_ = Parameterizable<ScalingParam, float>;
+    template <ScalingParam e> using param = typename Parameterizable_::template param<e>;
+
+    Scaling_Op(float scalingFactor)
+            : Operator(Type),
+            Parameterizable_(
+                param<ScalingParam::scalingFactor>(scalingFactor))
+    {
+        setDatatype(DataType::Float32);
+    }
+
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx == 0 && "operator supports only 1 input");
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+        (void) inputIdx; //avoid unused warning
+        mInput = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        if (!mInput->empty())
+            mOutput->resize(mInput->dims());
+    }
+
+    bool outputDimsForwarded() const override final {
+        return !(mOutput->empty());
+    }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert((inputIdx == 0) && "Scaling Operator has only 1 input");
+        (void) inputIdx; // avoid unused warning
+        return *(mInput.get());
+    }
+    inline Tensor& output(const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "Scaling Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return *(mOutput.get());
+    }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final { 
+        assert((inputIdx == 0) && "Scaling Operator has only 1 input");
+        (void) inputIdx; // avoid unused warning
+        return mInput;
+    }
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "Scaling Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operator supports only 1 input");
+        (void) inputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mInput);
+    }
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning;
+        return mOutput;
+    }
+
+
+    void setBackend(const std::string& name) {
+        mImpl = Registrar<Scaling_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+        // FIXME: temporary workaround
+        mInput->setBackend(name);
+    }
+    void setDatatype(const DataType& datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInput->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+};
+
+inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor), name);
+}
+}
+
+namespace {
+template <>
+const char* const EnumStrings<Aidge::ScalingParam>::data[]
+    = {"scalingFactor"};
+}
+
+#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
diff --git a/src/operator/Operator.cpp b/src/operator/Operator.cpp
index b3896b12143488275b2a064819595c380da62844..09a17a428e1de91c0318f710e6f097573cf529a6 100644
--- a/src/operator/Operator.cpp
+++ b/src/operator/Operator.cpp
@@ -42,6 +42,14 @@ void Aidge::Operator::updateConsummerProducer(){
     mImpl->updateConsummerProducer();
 }
 
-void Aidge::Operator::forward() { mImpl->forward(); }
+void Aidge::Operator::runHooks() const {
+    for (auto& hook : mHooks) {
+        hook.second->call();
+    }
+}
+void Aidge::Operator::forward() {
+    mImpl->forward();
+    runHooks();
+}
 
 void Aidge::Operator::backward() { mImpl->backward(); }