diff --git a/.gitignore b/.gitignore
index ba5c59398b68083c6c1c5fe820fb9070d999c18e..643b0dcd4b000f71aa0e87d778731ba8dd5975c5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,6 +5,7 @@
 build*/
 install*/
 include/aidge/backend/quantization_version.h
+include/aidge/quantization_version.h
 
 # VSCode
 .vscode
diff --git a/include/aidge/quantization/PTQ/PTQ.hpp b/include/aidge/quantization/PTQ/PTQ.hpp
index 7f11c012636b04d0434ff7a6a04bbf5131096171..d68b540db5afa09d64bc3a005f73e6bc076c6669 100644
--- a/include/aidge/quantization/PTQ/PTQ.hpp
+++ b/include/aidge/quantization/PTQ/PTQ.hpp
@@ -188,7 +188,6 @@ namespace Aidge {
      * @param singleShift Whether to convert the scaling factors into powers of two. If true the approximations are compensated using the previous nodes weights.
      * @param useCuda Wheter to speed up the PTQ by computing the values ranges using CUDA kernels. 
      * This flag does not set the backend of the graphview to "cuda" at the end of the PTQ pipeline 
-     * @param foldGraph Whether to apply the constant folding recipe which makes the end graphview much easier to read
      * @param bitshiftRounding Whether rounding should be applied after bit-shifting operations. If enabled, the result of bit-shifting is rounded to the nearest integer.
      * @param verbose Whether to print internal informations about the quantization process.
      */
@@ -201,7 +200,6 @@ namespace Aidge {
         bool optimizeSigns,
         bool singleShift,
         bool useCuda,
-        bool foldGraph,
         bool bitshiftRounding,
         bool verbose);
     /**
diff --git a/python_binding/pybind_PTQ.cpp b/python_binding/pybind_PTQ.cpp
index d7bc00dcc095736419732b9ed56918ca37663b50..17204fb67fe88f0a14f624d68b40327942b3e1b7 100644
--- a/python_binding/pybind_PTQ.cpp
+++ b/python_binding/pybind_PTQ.cpp
@@ -104,7 +104,6 @@ void init_PTQ(py::module &m) {
         py::arg("optimize_signs") = false,
         py::arg("single_shift") = false, 
         py::arg("use_cuda") = false,
-        py::arg("fold_graph") = true,
         py::arg("bitshift_rounding") = false,
         py::arg("verbose") = false,
     R"mydelimiter(
diff --git a/src/PTQ/PTQ.cpp b/src/PTQ/PTQ.cpp
index df203f2547e720bcfbef109e05e7ccca5ed42b9e..9ec8b1b478b3028261f49f3f2569f77b08cb2ff6 100644
--- a/src/PTQ/PTQ.cpp
+++ b/src/PTQ/PTQ.cpp
@@ -1321,7 +1321,6 @@ void quantizeNetwork(std::shared_ptr<GraphView> graphView,
     bool optimizeSigns,
     bool singleShift,
     bool useCuda,
-    bool foldGraph,
     bool bitshiftRounding,
     bool verbose)
 {
@@ -1369,7 +1368,8 @@ void quantizeNetwork(std::shared_ptr<GraphView> graphView,
     }
     if( targetType != DataType::Float64 && targetType != DataType::Float32 && targetType != DataType::Float16) 
     {
-        AIDGE_ASSERT(!noQuant,"Cannot cast operators with the noQuant (Fake Quantization) flag set to true!")
+        AIDGE_ASSERT(!noQuant,"Cannot cast operators with the noQuant (Fake Quantization) flag set to true!");
+        AIDGE_ASSERT(!optimizeSigns,"True quantization is not yet supported with optimize_sign option.")
         Log::notice("Starting to cast operators into the desired type ...");
         castQuantizedGraph(graphView,targetType,singleShift,bitshiftRounding);
         
@@ -1380,11 +1380,7 @@ void quantizeNetwork(std::shared_ptr<GraphView> graphView,
     {
         setupDataType(graphView, inputDataSet, targetType);
     }
-    if(foldGraph)
-    {
-        Log::notice("Applying constant folding recipe to the graph ...");
-        applyConstFold(graphView);
-    }
+    
     //Mandatory to handle all of the newly added connections!
     graphView->updateInputsOutputs();