diff --git a/aidge_export_cpp/static/rescaling_utils.hpp b/aidge_export_cpp/static/rescaling_utils.hpp
index bfd229f9f409946c707590e517505fe405b76096..2cb6b8e4cfd157ee2b41c4be95f5fe9068e39f15 100644
--- a/aidge_export_cpp/static/rescaling_utils.hpp
+++ b/aidge_export_cpp/static/rescaling_utils.hpp
@@ -1,6 +1,9 @@
 #ifndef __AIDGE_EXPORT_CPP_RESCALING_UTILS_HPP__
 #define __AIDGE_EXPORT_CPP_RESCALING_UTILS_HPP__
 
+#include <cstdint>
+#include <cstddef>
+
 // ---------------------------------------------------
 // ----------------- Saturate Utils ------------------
 // ---------------------------------------------------
diff --git a/aidge_export_cpp/static/typedefs.hpp b/aidge_export_cpp/static/typedefs.hpp
index 1544a98f45af80055a84176bda1c14b6b0ae7c30..e8148098314152d8494371661d8552c1cecf44be 100644
--- a/aidge_export_cpp/static/typedefs.hpp
+++ b/aidge_export_cpp/static/typedefs.hpp
@@ -5,6 +5,7 @@
 #include <type_traits>
 #include <limits>
 #include <cstdint>
+#include <cstddef>
 
 typedef enum {
     Tanh,
@@ -125,6 +126,44 @@ constexpr void pack_rev_set(T& data, int i, decltype(data.fields.op0) val) {
     }
 }
 
+
+// ----------------------------------------------------------------------------
+// -------------- Custom bit-width types operator overloading -----------------
+// ----------------------------------------------------------------------------
+
+template <std::size_t N_BITS, bool SIGNED, typename T>
+constexpr packed_bitint<1, N_BITS, SIGNED>& operator+=(packed_bitint<1, N_BITS, SIGNED>& d, T rhs)
+    {d.value += static_cast<decltype(d.value)>(rhs); return d;}
+
+template <std::size_t N_BITS, bool SIGNED, typename T>
+constexpr packed_bitint<1, N_BITS, SIGNED> operator+(packed_bitint<1, N_BITS, SIGNED> d, T rhs)
+    {d += rhs; return d;}
+
+template <std::size_t N_BITS, bool SIGNED, typename T>
+constexpr packed_bitint<1, N_BITS, SIGNED>& operator-=(packed_bitint<1, N_BITS, SIGNED>& d, T rhs)
+    {d.value -= static_cast<decltype(d.value)>(rhs); return d;}
+
+template <std::size_t N_BITS, bool SIGNED, typename T>
+constexpr packed_bitint<1, N_BITS, SIGNED> operator-(packed_bitint<1, N_BITS, SIGNED> d, T rhs)
+    {d -= rhs; return d;}
+
+template <std::size_t N_BITS, bool SIGNED, typename T>
+constexpr packed_bitint<1, N_BITS, SIGNED>& operator*=(packed_bitint<1, N_BITS, SIGNED>& d, T rhs)
+    {d.value *= static_cast<decltype(d.value)>(rhs); return d;}
+
+template <std::size_t N_BITS, bool SIGNED, typename T>
+constexpr packed_bitint<1, N_BITS, SIGNED> operator*(packed_bitint<1, N_BITS, SIGNED> d, T rhs)
+    {d *= rhs; return d;}
+
+template <std::size_t N_BITS, bool SIGNED, typename T>
+constexpr packed_bitint<1, N_BITS, SIGNED>& operator/=(packed_bitint<1, N_BITS, SIGNED>& d, T rhs)
+    {d.value /= static_cast<decltype(d.value)>(rhs); return d;}
+
+template <std::size_t N_BITS, bool SIGNED, typename T>
+constexpr packed_bitint<1, N_BITS, SIGNED> operator/(packed_bitint<1, N_BITS, SIGNED> d, T rhs)
+    {d /= rhs; return d;}
+
+
 // ----------------------------------------------------------------------------
 // ---------------- Custom bit-width types specializations --------------------
 // ----------------------------------------------------------------------------
diff --git a/aidge_export_cpp/templates/configuration/activation_config.jinja b/aidge_export_cpp/templates/configuration/activation_config.jinja
index 45c2760706fd6e3a07b9cf5c9be8a1e585a2f0b1..fcd99d4c9e56663fc42f18e22e4ed884b9ebcc73 100644
--- a/aidge_export_cpp/templates/configuration/activation_config.jinja
+++ b/aidge_export_cpp/templates/configuration/activation_config.jinja
@@ -1,6 +1,7 @@
 {#- For name header -#}
 #ifndef {{ name|upper }}_LAYER_H
 #define {{ name|upper }}_LAYER_H
+
 #include "network/rescaling_utils.hpp"
 
 {# For layer configuration -#}
diff --git a/aidge_export_cpp/templates/configuration/batchnorm_config.jinja b/aidge_export_cpp/templates/configuration/batchnorm_config.jinja
index 34412212dbb1ff689d4c04d89f8206e22bad8d3b..d69c753e28752049fb43d8333a5992d000dfc7d8 100644
--- a/aidge_export_cpp/templates/configuration/batchnorm_config.jinja
+++ b/aidge_export_cpp/templates/configuration/batchnorm_config.jinja
@@ -1,6 +1,7 @@
 {#- For name header -#}
 #ifndef {{ name|upper }}_LAYER_H
 #define {{ name|upper }}_LAYER_H
+
 #include "network/rescaling_utils.hpp"
 
 {# For layer configuration -#}
diff --git a/aidge_export_cpp/templates/configuration/concat_config.jinja b/aidge_export_cpp/templates/configuration/concat_config.jinja
index 3ae4177b21960e14e5cc0002b00f2db8a17b5d31..7ccacadc4b50ee80f24b2c6025afbe474ea6fb19 100644
--- a/aidge_export_cpp/templates/configuration/concat_config.jinja
+++ b/aidge_export_cpp/templates/configuration/concat_config.jinja
@@ -2,6 +2,8 @@
 #ifndef {{ name|upper }}_LAYER_H
 #define {{ name|upper }}_LAYER_H
 
+#include <cstddef>
+
 {% include "./_def_io.jinja" %}
 {% include "./_meminfo.jinja" %}
 
diff --git a/aidge_export_cpp/templates/configuration/convolution_config.jinja b/aidge_export_cpp/templates/configuration/convolution_config.jinja
index 5774e031ba553414250886a0896de1f491425e80..63e3980f5ec0a0350209438f1b9eada6657cc427 100644
--- a/aidge_export_cpp/templates/configuration/convolution_config.jinja
+++ b/aidge_export_cpp/templates/configuration/convolution_config.jinja
@@ -1,7 +1,9 @@
 {#- For name header -#}
 #ifndef {{ name|upper }}_LAYER_H
 #define {{ name|upper }}_LAYER_H
+
 #include "network/rescaling_utils.hpp"
+
 {# For layer configuration -#}
 {% include "./_def_io.jinja" %}
 {% include "./_meminfo.jinja" %}
diff --git a/aidge_export_cpp/templates/configuration/elemwise_config.jinja b/aidge_export_cpp/templates/configuration/elemwise_config.jinja
index f23e835d875f6377fc206d09bc36a455476fbbe9..ba417fa29ec9af794935e7b02b5954158f323734 100644
--- a/aidge_export_cpp/templates/configuration/elemwise_config.jinja
+++ b/aidge_export_cpp/templates/configuration/elemwise_config.jinja
@@ -1,6 +1,8 @@
 {#- For name header -#}
 #ifndef {{ name|upper }}_LAYER_H
 #define {{ name|upper }}_LAYER_H
+
+#include <cstddef>
 #include "network/rescaling_utils.hpp"
 
 {% include "./_def_io.jinja" %}
diff --git a/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja b/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja
index b50f64fb500b7ae036f91821b689175c2c9b3e92..2b4329a6633dd2b16c7dbac0d7ce72097149558d 100644
--- a/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja
+++ b/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja
@@ -1,7 +1,9 @@
 {#- For name header -#}
 #ifndef {{ name|upper }}_LAYER_H
 #define {{ name|upper }}_LAYER_H
+
 #include "network/rescaling_utils.hpp"
+
 {# For layer configuration -#}
 {% include "./_def_io.jinja" %}
 {% include "./_meminfo.jinja" %}
diff --git a/aidge_export_cpp/templates/configuration/matmul_config.jinja b/aidge_export_cpp/templates/configuration/matmul_config.jinja
index e3d59c110e737dcadbb30b19019d54d4df259d2b..2b56da8cd42d9a1dc8a63bbe97f816a4924e245c 100644
--- a/aidge_export_cpp/templates/configuration/matmul_config.jinja
+++ b/aidge_export_cpp/templates/configuration/matmul_config.jinja
@@ -1,6 +1,8 @@
 {#- For name header -#}
 #ifndef {{ name|upper }}_LAYER_H
 #define {{ name|upper }}_LAYER_H
+
+#include <cstddef>
 #include "network/rescaling_utils.hpp"
 
 {# For layer configuration -#}
diff --git a/aidge_export_cpp/templates/configuration/reducemean_config.jinja b/aidge_export_cpp/templates/configuration/reducemean_config.jinja
index e65bc1f8870601cfbe03f96a74133721531c4de6..9d8c9696f0070855fbf9f43ef4bd531890e44530 100644
--- a/aidge_export_cpp/templates/configuration/reducemean_config.jinja
+++ b/aidge_export_cpp/templates/configuration/reducemean_config.jinja
@@ -1,6 +1,9 @@
 {#- For name header -#}
 #ifndef {{ name|upper }}_LAYER_H
 #define {{ name|upper }}_LAYER_H
+
+#include <cstddef>
+
 {# For layer configuration -#}
 {% include "./_def_io.jinja" %}
 {% include "./_meminfo.jinja" %}
diff --git a/aidge_export_cpp/templates/configuration/slice_config.jinja b/aidge_export_cpp/templates/configuration/slice_config.jinja
index 1f3d8ed39fda10986ecd0779742ceb05d14b011e..11d08e66e7bc7fc9e4376e3864f643c1dc3b6515 100644
--- a/aidge_export_cpp/templates/configuration/slice_config.jinja
+++ b/aidge_export_cpp/templates/configuration/slice_config.jinja
@@ -2,6 +2,8 @@
 #ifndef {{ name|upper }}_LAYER_H
 #define {{ name|upper }}_LAYER_H
 
+#include <cstddef>
+
 {% include "./_def_io.jinja" %}
 {% include "./_meminfo.jinja" %}
 {# For layer configuration -#}
diff --git a/aidge_export_cpp/templates/configuration/transpose_ND_config.jinja b/aidge_export_cpp/templates/configuration/transpose_ND_config.jinja
index 145d7a51a99271eb72d007e4389da6b04547ca1c..4c3a987389a8a91657478cd12465fe083d3025e9 100644
--- a/aidge_export_cpp/templates/configuration/transpose_ND_config.jinja
+++ b/aidge_export_cpp/templates/configuration/transpose_ND_config.jinja
@@ -2,6 +2,8 @@
 #ifndef {{ name|upper }}_LAYER_H
 #define {{ name|upper }}_LAYER_H
 
+#include <cstddef>
+
 {# For layer configuration -#}
 {% include "./_def_io.jinja" %}
 {% include "./_meminfo.jinja" %}
diff --git a/aidge_export_cpp/templates/data/inputs.jinja b/aidge_export_cpp/templates/data/inputs.jinja
index b1bc38860b25d257488fca6d28daf9bdc7c07ffb..58a263f2f8ab7e5c5ce10c296b747529471ba738 100644
--- a/aidge_export_cpp/templates/data/inputs.jinja
+++ b/aidge_export_cpp/templates/data/inputs.jinja
@@ -1,5 +1,6 @@
 {#- For libraries #}
 #include <stdint.h>
+#include "network/typedefs.hpp"
 
 {#- Design dimensions of the array #}
 {%- set dims_str = "" %}
diff --git a/aidge_export_cpp/templates/data/parameters.jinja b/aidge_export_cpp/templates/data/parameters.jinja
index 70e2b7dcc5b5728bcc252c4974d4e09b79f975f0..92fb9eeb770fff42238af357cd40751c03e55bd2 100644
--- a/aidge_export_cpp/templates/data/parameters.jinja
+++ b/aidge_export_cpp/templates/data/parameters.jinja
@@ -1,19 +1,6 @@
 {#- For libraries -#}
 #include <stdint.h>
-
-{%- set format_map = {
-    "int8_t": "%4d",
-    "int16_t": "%6d",
-    "int32_t": "%6d",
-    "int64_t": "%8d",
-    "uint8_t": "%4d",
-    "uint16_t": "%6d",
-    "uint32_t": "%6d",
-    "uint64_t": "%8d",
-    "float": "%.9f",
-    "half_float::half": "%.9f",
-    "double": "%.17f"
-} %}
+#include "network/typedefs.hpp"
 
 {# Design header of the array -#}
 static const {{ dtype }} {{ name }}[{{ dims | join("*") }}] __attribute__((section(".nn_data"))) =
@@ -21,7 +8,7 @@ static const {{ dtype }} {{ name }}[{{ dims | join("*") }}] __attribute__((secti
 {# 1D #}
 {%- if dims | length == 1 -%}
 {%- for x in range(dims[0]) -%}
-{{ format_map[dtype] | format(values[x]) }}, 
+{{ "{:>20}".format(values[x]) }}, 
 {%- endfor -%}
 {%- endif -%}
 
@@ -30,7 +17,7 @@ static const {{ dtype }} {{ name }}[{{ dims | join("*") }}] __attribute__((secti
 {%- for y in range(dims[0]) %}
 {{ ' ' }}
     {%- for x in range(dims[1]) -%}
-        {{ format_map[dtype] | format(values[y][x]) }}, 
+        {{ "{:>20}".format(values[y][x]) }}, 
     {%- endfor %}
 {%- endfor -%}
 {%- endif -%}
@@ -42,7 +29,7 @@ static const {{ dtype }} {{ name }}[{{ dims | join("*") }}] __attribute__((secti
     {%- for y in range(dims[1]) %}
     {{ ' ' }}
         {%- for x in range(dims[2]) -%}
-            {{ format_map[dtype] | format(values[z][y][x]) }}, 
+            {{ "{:>20}".format(values[z][y][x]) }}, 
         {%- endfor -%}
     {%- endfor %}
 {%- endfor -%}
@@ -57,7 +44,7 @@ static const {{ dtype }} {{ name }}[{{ dims | join("*") }}] __attribute__((secti
         {%- for y in range(dims[2]) %}
         {{ ' ' }}
             {%- for x in range(dims[3]) -%}
-                {{ format_map[dtype] | format(values[n][z][y][x]) }}, 
+                {{ "{:>20}".format(values[n][z][y][x]) }}, 
             {%- endfor -%}
         {%- endfor %}
     {%- endfor %}
diff --git a/aidge_export_cpp/unit_tests/test_export.py b/aidge_export_cpp/unit_tests/test_export.py
index 55f558220753723749a601e40cd625173655a089..018589abefd7e21f067c56d6162443b73f7de10a 100644
--- a/aidge_export_cpp/unit_tests/test_export.py
+++ b/aidge_export_cpp/unit_tests/test_export.py
@@ -41,31 +41,22 @@ def initFiller(model):
             else:
                 pass
 
-def _np_init(shape, dtype=np.float32):
+def _np_init(shape, range=[-1, 1], dtype=aidge_core.dtype.float32):
     """
-    Generates a NumPy array with the given shape, filled with random values between -1 and 1
-    with a step of 0.1.
+    Generates a NumPy array with the given shape, filled with random values in
+    given range. A step of 0.1 is used for floating point type.
 
     :param shape: Tuple of dimensions for the array
     :param dtype: Data type of the output array (default: np.float32)
     :return: A NumPy array with the given shape and dtype
     """
     total_elements = reduce(operator.mul, shape, 1)
-    data = (np.random.randint(0, 21, size=total_elements) - 10) / 10.0
-    return data.reshape(shape).astype(dtype)
-
-def _np_init_ones(shape, default_value=0.01, dtype=np.float32):
-    """
-    Generates a NumPy array with the given shape, filled with random values between -1 and 1
-    with a step of 0.1.
-
-    :param shape: Tuple of dimensions for the array
-    :param dtype: Data type of the output array (default: np.float32)
-    :return: A NumPy array with the given shape and dtype
-    """
-    total_elements = reduce(operator.mul, shape, 1)
-    data = np.ones(total_elements) * default_value
-    return data.reshape(shape).astype(dtype)
+    data = np.random.randint(range[0], range[1], size=total_elements)
+    if aidge_core.is_floating_point(dtype):
+        data = (np.random.randint(10 * range[0], 10 * range[1], size=total_elements)) / 10.0
+        return data.reshape(shape).astype(np.float32)
+    else:
+        return data.reshape(shape).astype(np.int32)
 
 
 # Global dictionary to store test reports
@@ -103,7 +94,7 @@ class test_operator_export(unittest.TestCase):
         #     shutil.rmtree(self.export_folder, ignore_errors=True)
 
 
-    def unit_test_export(self, graph_view, op_name, in_dims, random_inputs=True, random_weights=True, default_value=0.01):
+    def unit_test_export(self, graph_view, op_name, in_dims, dtypes=[aidge_core.dtype.float32], random_weights=True, default_value=0.01):
         """
         TODO:
         * Handle multiple dataformat
@@ -118,75 +109,88 @@ class test_operator_export(unittest.TestCase):
         graph_view.set_backend("cpu")
         graph_view.forward_dims(dims=in_dims)
 
-        for node in graph_view.get_nodes():
-            if node.type() == "Producer":
-                prod_op = node.get_operator()
-                value = prod_op.get_output(0)
-
-                if (random_weights):
-                    tensor = aidge_core.Tensor(_np_init(value.dims()))
-                    node.get_operator().set_output(0, tensor)
-                elif default_value != None:
-                    aidge_core.constant_filler(value, default_value)
-
-        # Fuse operators to match implemented cpp kernels
-        cpp_fuse_to_metaops(graph_view)
-
-        scheduler = aidge_core.SequentialScheduler(graph_view)
-
-        if (random_inputs):
-            in_tensor = [aidge_core.Tensor(_np_init(in_dim)) for in_dim in in_dims]
-        else:
-            in_tensor = [aidge_core.Tensor(_np_init_ones(in_dim, default_value)) for in_dim in in_dims]
-
-        scheduler.forward(data=in_tensor)
-
-        # Name the metaops
-        set_nodes_names(scheduler)
-
-        # Note the convention ``<op_name>_test`` is useful for gitignore to avoid pushing generated export by accident.
-        export_folder = op_name + "_test"
-        self.export_folder = export_folder
-        shutil.rmtree(export_folder, ignore_errors=True)
-
-        aidge_core.export_utils.generate_main_compare_cpp(export_folder, graph_view)
-
-        graph_view.set_backend(aidge_export_cpp.ExportLibCpp._name)
-        aidge_core.adapt_to_backend(graph_view)
-        graph_view.forward_dims(dims=in_dims)
-        graph_view.save(export_folder + "/graph")
-    
-        scheduler = aidge_core.SequentialScheduler(graph_view)
-        scheduler.generate_scheduling()
-
-        # Export the model in C++ standalone
-        aidge_core.export_utils.scheduler_export(
-                scheduler,
-                export_folder,
-                aidge_export_cpp.ExportLibCpp,
-                memory_manager=aidge_core.mem_info.generate_optimized_memory_info,
-                memory_manager_args={"stats_folder": f"{export_folder}/stats", "wrapping": False }
-        )
-
-        print("COMPILATION")
-
-        try:
-            for std_line in run_command(["make"], cwd=export_folder):
-                print(std_line, end="")
-        except subprocess.CalledProcessError as e:
-            self.assertTrue(0, f"An error occurred: {e}\nFailed to generate export.")
-
-        print("RUN EXPORT")
-        cmd_gen = run_command(["./bin/run_export"], cwd=export_folder)
-        try:
-            for std_line in cmd_gen:
-                print(std_line, end="")
-        except subprocess.CalledProcessError as e:
-            self.assertTrue(0, f"An error occurred: {e}\nFailed to run export for comparison.")
-
-        return_code = next(cmd_gen, 0)
-
-        self.assertFalse(return_code, f"Export result are different than backend ones: {return_code} errors found.")
+        for dtype in dtypes:
+            graph = graph_view.clone()
+
+            range = [-1, 1]
+            if aidge_core.is_integer(dtype):
+                graph.set_datatype(aidge_core.dtype.int32)
+                # Make sure there won't be overflow, as we don't apply any rescaling!
+                nb_bits = aidge_core.dtype_bit_width(dtype) // 2 - 1
+                if aidge_core.is_unsigned(dtype):
+                    range = [0, 2**nb_bits - 1]
+                else:
+                    range = [-2**(nb_bits - 1), 2**(nb_bits - 1) - 1]
+
+                if default_value != None:
+                    default_value = 1
+
+            for node in graph.get_nodes():
+                if node.type() == "Producer":
+                    prod_op = node.get_operator()
+                    value = prod_op.get_output(0)
+
+                    if (random_weights):
+                        tensor = aidge_core.Tensor(_np_init(value.dims(), range))
+                        tensor.set_datatype(dtype)
+                        node.get_operator().set_output(0, tensor)
+                    elif default_value != None:
+                        aidge_core.constant_filler(value, default_value)
+
+            # Fuse operators to match implemented cpp kernels
+            cpp_fuse_to_metaops(graph)
+
+            scheduler = aidge_core.SequentialScheduler(graph)
+            in_tensor = [aidge_core.Tensor(_np_init(in_dim, range)) for in_dim in in_dims]
+            [t.set_datatype(dtype) for t in in_tensor]
+            scheduler.forward(data=in_tensor)
+
+            # Name the metaops
+            set_nodes_names(scheduler)
+
+            # Note the convention ``<op_name>_test`` is useful for gitignore to avoid pushing generated export by accident.
+            export_folder = op_name + "_" + str(dtype) + "_test"
+            self.export_folder = export_folder
+            shutil.rmtree(export_folder, ignore_errors=True)
+
+            aidge_core.export_utils.generate_main_compare_cpp(export_folder, graph)
+
+            graph.set_backend(aidge_export_cpp.ExportLibCpp._name)
+            aidge_core.adapt_to_backend(graph)
+            graph.forward_dims(dims=in_dims)
+            graph.save(export_folder + "/graph")
+        
+            scheduler = aidge_core.SequentialScheduler(graph)
+            scheduler.generate_scheduling()
+
+            # Export the model in C++ standalone
+            aidge_core.export_utils.scheduler_export(
+                    scheduler,
+                    export_folder,
+                    aidge_export_cpp.ExportLibCpp,
+                    memory_manager=aidge_core.mem_info.generate_optimized_memory_info,
+                    memory_manager_args={"stats_folder": f"{export_folder}/stats", "wrapping": False }
+            )
+
+            print("COMPILATION")
+
+            try:
+                for std_line in run_command(["make"], cwd=export_folder):
+                    print(std_line, end="")
+            except subprocess.CalledProcessError as e:
+                self.assertTrue(0, f"An error occurred: {e}\nFailed to generate export.")
+
+            print("RUN EXPORT")
+            cmd_gen = run_command(["./bin/run_export"], cwd=export_folder)
+            try:
+                for std_line in cmd_gen:
+                    print(std_line, end="")
+            except subprocess.CalledProcessError as e:
+                self.assertTrue(0, f"An error occurred: {e}\nFailed to run export for comparison.")
+
+            return_code = next(cmd_gen, 0)
+
+            self.assertFalse(return_code, f"Export result are different than backend ones: {return_code} errors found.")
 
 
     def test_FC_flatten_in(self):
@@ -618,7 +622,7 @@ class test_operator_export(unittest.TestCase):
             aidge_core.Conv2D(in_channels=3, out_channels=3, kernel_dims=(3, 3), name="conv")
         ])
 
-        self.unit_test_export(model, "Conv2D", [[1, 3, 12, 12]])
+        self.unit_test_export(model, "Conv2D", [[1, 3, 12, 12]], dtypes=[aidge_core.dtype.float32, aidge_core.dtype.int32, aidge_core.dtype.int8, aidge_core.dtype.int4])
 
     def test_conv2D_asym(self):
         print("Conv2D_asym")
@@ -788,7 +792,7 @@ class test_operator_export(unittest.TestCase):
             aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
         ])
 
-        self.unit_test_export(model, "BatchNorm2D", [[1, 1, 5, 5]], False, False)
+        self.unit_test_export(model, "BatchNorm2D", [[1, 1, 5, 5]], random_weights=False)
 
     def test_batchnorm2D_Larger(self):
         print("BatchNormalization2DLarger")
@@ -796,7 +800,7 @@ class test_operator_export(unittest.TestCase):
             aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
         ])
 
-        self.unit_test_export(model, "BatchNorm2DLarger", [[1, 1, 5, 7]], False, False)
+        self.unit_test_export(model, "BatchNorm2DLarger", [[1, 1, 5, 7]], random_weights=False)
 
     def test_batchnorm2D_Higher(self):
         print("BatchNormalization2DHigher")
@@ -804,7 +808,7 @@ class test_operator_export(unittest.TestCase):
             aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
         ])
 
-        self.unit_test_export(model, "BatchNorm2DHigher", [[1, 1, 7, 5]], False, False)
+        self.unit_test_export(model, "BatchNorm2DHigher", [[1, 1, 7, 5]], random_weights=False)
 
     def test_batchnorm2D_Denser(self):
         print("BatchNormalization2DDenser")
@@ -812,7 +816,7 @@ class test_operator_export(unittest.TestCase):
             aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
         ])
 
-        self.unit_test_export(model, "BatchNorm2DDenser", [[1, 3, 5, 7]], False, False)
+        self.unit_test_export(model, "BatchNorm2DDenser", [[1, 3, 5, 7]], random_weights=False)
 
     def test_batchnorm2D_with_bigger_batch_size(self):
         print("BatchNormalization2DBiggerBatchSize")
@@ -820,7 +824,7 @@ class test_operator_export(unittest.TestCase):
             aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
         ])
 
-        self.unit_test_export(model, "BatchNormalization2DBiggerBatchSize", [[4, 3, 5, 7]], False, False)
+        self.unit_test_export(model, "BatchNormalization2DBiggerBatchSize", [[4, 3, 5, 7]], random_weights=False)
 
 
     def test_batchnorm2D_Larger(self):
@@ -829,7 +833,7 @@ class test_operator_export(unittest.TestCase):
             aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
         ])
 
-        self.unit_test_export(model, "BatchNorm2DLarger", [[1, 1, 5, 7]], False, False)
+        self.unit_test_export(model, "BatchNorm2DLarger", [[1, 1, 5, 7]], random_weights=False)
 
     def test_batchnorm2D_Higher(self):
         print("BatchNormalization2DHigher")
@@ -837,7 +841,7 @@ class test_operator_export(unittest.TestCase):
             aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
         ])
 
-        self.unit_test_export(model, "BatchNorm2DHigher", [[1, 1, 7, 5]], False, False)
+        self.unit_test_export(model, "BatchNorm2DHigher", [[1, 1, 7, 5]], random_weights=False)
 
     def test_batchnorm2D_Denser(self):
         print("BatchNormalization2DDenser")
@@ -845,7 +849,7 @@ class test_operator_export(unittest.TestCase):
             aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
         ])
 
-        self.unit_test_export(model, "BatchNorm2DDenser", [[1, 3, 5, 7]], False, False)
+        self.unit_test_export(model, "BatchNorm2DDenser", [[1, 3, 5, 7]], random_weights=False)
 
     def test_Conv(self):
         print("Conv")
@@ -925,7 +929,7 @@ class test_operator_export(unittest.TestCase):
             aidge_core.Conv2D(2, 2, [3, 3], name="InputNode")
         ])
         initFiller(model)
-        self.unit_test_export(model, "Conv2", [[1, 2, 9, 9]], random_inputs=True, random_weights=False, default_value=None)
+        self.unit_test_export(model, "Conv2", [[1, 2, 9, 9]], random_weights=False, default_value=None)
 
     def test_erf(self):
         print("Erf")