diff --git a/src/operator/ClipImpl.cpp b/src/operator/ClipImpl.cpp
index 09014e9607686a0f9cd771e737674211d19077ea..3949ae9568c4a3a4d38bf573eecf420f95a8c5e7 100644
--- a/src/operator/ClipImpl.cpp
+++ b/src/operator/ClipImpl.cpp
@@ -30,16 +30,29 @@ void Aidge::ClipImpl_cpu::forward() {
     std::shared_ptr<Tensor> in2 = op_.getInput(2);
     std::shared_ptr<Tensor> out0 = op_.getOutput(0);
     AIDGE_ASSERT(in0, "missing input #0");
-    AIDGE_ASSERT(in1, "missing input #1 -> Min value empty shape Tensor");
-    AIDGE_ASSERT(in2, "missing input #2 -> Max value empty shape Tensor");
-    
+    /*AIDGE_ASSERT(in1, "missing input #1 -> Min value empty shape Tensor");
+    AIDGE_ASSERT(in2, "missing input #2 -> Max value empty shape Tensor");*/
+    void* min;
+    void* max;
+
+    if (!in1 || !in2)
+    {
+        min = &op_.min();
+        max = &op_.max();
+    }
+    else
+    {
+        min = getCPUPtr(mOp.getRawInput(1));
+        max = getCPUPtr(mOp.getRawInput(2));
+    }
     // Find the correct kernel type
     const auto impl = Registrar<ClipImpl_cpu>::create(getBestMatch(getRequiredSpec()));
 
+
     // Call kernel
     impl.forward(
-       getCPUPtr(mOp.getRawInput(1)),
-       getCPUPtr(mOp.getRawInput(2)),
+       min,
+       max,
        getCPUPtr(mOp.getRawInput(0)), 
        in0->size(), 
        getCPUPtr(mOp.getRawOutput(0))
@@ -60,11 +73,22 @@ void Aidge::ClipImpl_cpu::backward() {
     
     // Find the correct kernel type
     const auto impl = Registrar<ClipImpl_cpu>::create(getBestMatch(getRequiredSpec()));
-
+    void* min;
+    void* max;
+    if (!in1min || !in2max)
+    {
+        min = &op_.min();
+        max = &op_.max();
+    }
+    else
+    {
+        min = getCPUPtr(mOp.getRawInput(1));
+        max = getCPUPtr(mOp.getRawInput(2));
+    }
     // Call kernel
     impl.backward(
-        getCPUPtr(in1min), 
-        getCPUPtr(in2max), 
+        min, 
+        max, 
         gra_in0->size(), 
         getCPUPtr(in0), 
         getCPUPtr(gra_out0), 
diff --git a/unit_tests/operator/Test_ClipImpl.cpp b/unit_tests/operator/Test_ClipImpl.cpp
index 9598f405966aa663e16204f2ea63515b27b1088d..b7f286f1c6e0703509c9117afa06705706f7c3c2 100644
--- a/unit_tests/operator/Test_ClipImpl.cpp
+++ b/unit_tests/operator/Test_ClipImpl.cpp
@@ -119,8 +119,60 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
         }
         std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl;
         std::cout << "total time: " << duration.count() << std::endl;
-    }
+    } 
+    SECTION("Clamp with Clip Attr [Forward]")
+    {
+        std::size_t totalComputation = 0;
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) 
+        {
+
+            float min = dismin(gen);
+            float max = dismax(gen);
+            std::shared_ptr<Node> myCl = Aidge::Clip("",min,max);
+            auto op = std::static_pointer_cast<OperatorTensor>(myCl -> getOperator());
+
+
+            // generate Tensors dimensions
+            const std::size_t dim0 = 3;
+            const std::size_t dim1 = 3;
+            totalComputation += dim0*dim1;
+
+            // Create and populate the array with random float values
+            float* Array = new float[dim0*dim1];
+            for (int i = 0; i < dim0*dim1; ++i) {
+                Array[i] = dis(gen); // Generate random float value
+            }
+            // Convert Input to Tensor
+            std::shared_ptr<Tensor> TInput = std::make_shared<Tensor>(DataType::Float32);
+            TInput -> resize({dim0,dim1});
+            TInput -> setBackend("cpu");
+            TInput -> getImpl() -> setRawPtr(Array, dim0*dim1);
+
+            // convert res to Tensordf
+            std::vector<float> GT(Array, Array + (dim0*dim1));
+            for (float& val : GT)
+            {
+                val = std::max(min, std::min(val, max));
+            }
+            std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>(DataType::Float32);
+            Tres -> resize({dim0,dim1});
+            Tres -> setBackend("cpu");
+            Tres -> getImpl() -> setRawPtr(GT.data(), dim0*dim1);
+            op->associateInput(0, TInput);
+            op->setDataType(DataType::Float32);
+            op->setBackend("cpu");
+            op->forwardDims();
+            start = std::chrono::system_clock::now();
+            myCl->forward();
+            end = std::chrono::system_clock::now();
+
+            duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
 
+            REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+        }
+        std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl;
+        std::cout << "total time: " << duration.count() << std::endl;
+    }
     SECTION("Simple clamp test [Backward]") {
         std::size_t totalComputation = 0;
         duration = std::chrono::duration<double, std::micro>::zero();
@@ -200,6 +252,6 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
         std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl;
         std::cout << "total time: " << duration.count() << std::endl;
     }
-  }
  }
-} // namespace Aidge 
\ No newline at end of file
+} // namespace Aidge 
+}
\ No newline at end of file