diff --git a/include/aidge/backend/cpu/operator/ClipImpl_kernels.hpp b/include/aidge/backend/cpu/operator/ClipImpl_kernels.hpp
index 6646aa1c3772962afd320452a51257c319367808..71c0f65f806e87c80bb25bc9dfaaf3ecb13d8e43 100644
--- a/include/aidge/backend/cpu/operator/ClipImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ClipImpl_kernels.hpp
@@ -29,7 +29,7 @@ void ClipImpl_cpu_forward_kernel(
     O* output = static_cast<O*>(output_);
 
     for (std::size_t i = 0; i < length; ++i) {
-        output[i] = std::max(min_, std::min(static_cast<float>(input[i]), max_));
+       output[i] = std::min(std::max(static_cast<float>(input[i]), min_), max_);
     }
 }
 
diff --git a/unit_tests/operator/Test_ClipImpl.cpp b/unit_tests/operator/Test_ClipImpl.cpp
index a2da6d71a989f026439e3e60f66d272cc6f7e864..45c8da5bf7ecc84fad6b3e694fe204540f579af3 100644
--- a/unit_tests/operator/Test_ClipImpl.cpp
+++ b/unit_tests/operator/Test_ClipImpl.cpp
@@ -61,7 +61,7 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
     std::chrono::time_point<std::chrono::system_clock> end;
     std::chrono::duration<double, std::micro> duration;
 
-    SECTION("Simple clamp test [Forward]") {
+    SECTION("Simple clip test [Forward]") {
         std::size_t totalComputation = 0;
         for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
             // generate Tensors dimensions
@@ -121,7 +121,67 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
         std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl;
         std::cout << "total time: " << duration.count() << std::endl;
     } 
-    SECTION("Clamp with Clip Attr [Forward]")
+    SECTION("Clip test with min >= max [Forward]") {
+        std::size_t totalComputation = 0;
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            // generate Tensors dimensions
+            const std::size_t dim0 = distDims(gen);
+            const std::size_t dim1 = distDims(gen);
+            totalComputation += dim0*dim1;
+
+            // Create and populate the array with random float values
+            float* Array = new float[dim0*dim1];
+            for (int i = 0; i < dim0*dim1; ++i) {
+                Array[i] = dis(gen); // Generate random float value
+            }
+
+            // Convert Input to Tensor
+            std::shared_ptr<Tensor> TInput = std::make_shared<Tensor>(DataType::Float32);
+            TInput -> resize({dim0,dim1});
+            TInput -> setBackend("cpu");
+            TInput -> getImpl() -> setRawPtr(Array, dim0*dim1);
+            
+            float min = dismax(gen);
+            std::shared_ptr<Tensor> Tmin = std::make_shared<Tensor>(DataType::Float32);
+            Tmin -> resize({});
+            Tmin -> setBackend("cpu");
+            Tmin -> getImpl() -> setRawPtr(&min,1);
+
+            float max = dismin(gen); //We generate max and min so that max is always <= min
+            std::shared_ptr<Tensor> Tmax = std::make_shared<Tensor>(DataType::Float32);
+            Tmax -> resize({});
+            Tmax -> setBackend("cpu");
+            Tmax -> getImpl() -> setRawPtr(&max,1);
+            // convert res to Tensor
+            std::vector<float> GT(Array, Array + (dim0*dim1));
+            for (float& val : GT)
+            {
+                val = max;
+            }
+            std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>(DataType::Float32);
+            Tres -> resize({dim0,dim1});
+            Tres -> setBackend("cpu");
+            Tres -> getImpl() -> setRawPtr(GT.data(), dim0*dim1);
+
+            op->associateInput(0, TInput);
+            op->associateInput(1, Tmin);
+            op->associateInput(2, Tmax);
+            op->setDataType(DataType::Float32);
+            op->setBackend("cpu");
+            op->forwardDims(true);
+            
+            start = std::chrono::system_clock::now();
+            myClip->forward();
+            end = std::chrono::system_clock::now();
+
+            duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+            REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+        }
+        std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl;
+        std::cout << "total time: " << duration.count() << std::endl;
+    } 
+    SECTION("Clip with Clip Attr [Forward]")
     {
         std::size_t totalComputation = 0;
         for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) 
@@ -174,7 +234,7 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
         std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl;
         std::cout << "total time: " << duration.count() << std::endl;
     }
-    SECTION("Simple clamp test [Backward]") {
+    SECTION("Simple clip test [Backward]") {
         std::size_t totalComputation = 0;
         duration = std::chrono::duration<double, std::micro>::zero();
         for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
diff --git a/unit_tests/operator/Test_ScalingMeta.cpp b/unit_tests/operator/Test_ScalingMeta.cpp
deleted file mode 100644
index ae39ee0ac53c3c1b9c2300035f92444855bdcfb8..0000000000000000000000000000000000000000
--- a/unit_tests/operator/Test_ScalingMeta.cpp
+++ /dev/null
@@ -1,86 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <catch2/catch_test_macros.hpp>
-#include <cmath>
-#include <cstdlib>
-#include <memory>
-
-#include "aidge/utils/TensorUtils.hpp"
-#include "aidge/backend/cpu/operator/ConvImpl.hpp"
-#include "aidge/backend/cpu/operator/PadImpl.hpp"
-#include "aidge/data/Tensor.hpp"
-#include "aidge/operator/Conv.hpp"
-#include "aidge/operator/MetaOperator.hpp"
-#include "aidge/operator/MetaOperatorDefs.hpp"
-#include "aidge/operator/Pad.hpp"
-#include "aidge/operator/Pop.hpp"
-#include "aidge/scheduler/SequentialScheduler.hpp"
-#include "aidge/scheduler/ParallelScheduler.hpp"
-
-using namespace Aidge;
-
-TEST_CASE("ScalingNodeMeta", "[ScalingMeta][CPU]") {
-    /*SECTION("Scaling MetaOperator")
-    {
-        std::shared_ptr<Tensor> t0 = std::make_shared<Tensor>(
-        Array2D<float, 3, 3>{{{45, 72, 2},
-                                {84.15, 144.45, 0.01484},
-                                {0.62132, 17.67132, 212.132}}});
-        
-        auto scal = ScalingMeta(2,8,false);
-        auto scalop = std::static_pointer_cast<OperatorTensor>(scal->getOperator());
-        t0->setBackend("cpu");
-        scalop->associateInput(0,t0);
-        scalop->setBackend("cpu");
-        scalop->forwardDims();
-        scalop->forward();
-        //auto sf = scalop -> getInput(1);
-        auto out0 = scalop->getOutput(0);
-        auto in0 = scalop->getInput(0);
-        auto in1 = scalop->getInput(1);
-        std::cout << "in0 is: ";
-        in0->print();
-        std::cout << "in1 is: ";
-        in1->print();
-        std::cout << "output is: " ;
-        out0->print();
-    }*/
-    SECTION("MulPTQ")
-    {
-        std::shared_ptr<Tensor> t0 = std::make_shared<Tensor>(
-        Array2D<float, 3, 3>{{{45, 72, 2},
-                                {84.15, 144.45, 0.01484},
-                                {0.62132, 17.67132, 212.132}}});
-        
-        auto scal = MulPTQ(2.001);
-        auto scalop = std::static_pointer_cast<OperatorTensor>(scal->getOperator());
-        t0->setBackend("cpu");
-        scal->getOperator()->associateInput(0,t0);
-
-        auto g = getConnectedGraphView(scal);
-        g->setDataType(DataType::Float32);
-        g->setBackend("cpu");
-
-        auto scheduler = SequentialScheduler(g);
-        scheduler.forward();
-
-        auto out0 = scalop->getOutput(0);
-        auto in0 = scalop->getInput(0);
-        auto in1 = scalop->getInput(1);
-        std::cout << "in0 is: ";
-        in0->print();
-        std::cout << "in1 is: ";
-        in1->print();
-        std::cout << "output is: " ;
-        out0->print();
-    }
-}
\ No newline at end of file