diff --git a/aidge_backend_cpu/unit_tests/test_recipies.py b/aidge_backend_cpu/unit_tests/test_recipies.py
new file mode 100644
index 0000000000000000000000000000000000000000..60949adf245f4f4a7ed316879fb307131f70739a
--- /dev/null
+++ b/aidge_backend_cpu/unit_tests/test_recipies.py
@@ -0,0 +1,77 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+import aidge_backend_cpu
+
+from functools import reduce
+import numpy as np
+
+class test_recipies(unittest.TestCase):
+    def setUp(self):
+        pass
+
+    def tearDown(self):
+        pass
+
+    def test_fuse_batchnorm(self):
+        dims = [1, 1, 10, 10]
+        size = reduce((lambda x, y: x*y), dims)
+
+        input_data =  np.arange(size).reshape(dims).astype(np.float32)
+        input_tensor = aidge_core.Tensor(input_data)
+
+        input_node = aidge_core.Producer(input_tensor, "X")
+        conv = aidge_core.Conv2D(1, 1, [3, 3], name="Conv0")
+        bn = aidge_core.BatchNorm2D(name="Add0")
+
+        graph_view = aidge_core.sequential([conv, bn])
+
+        # Add random values to conv and BatchNorm parameters
+        input_node.add_child(graph_view)
+        input_node.get_operator().set_datatype(aidge_core.DataType.Float32)
+        input_node.get_operator().set_backend("cpu")
+        graph_view.set_datatype(aidge_core.DataType.Float32)
+        graph_view.set_backend("cpu")
+
+        np_weights = np.arange(9).reshape([1, 1, 3, 3]).astype(np.float32)
+        np_bias = np.arange(1).reshape([1, 1]).astype(np.float32)
+
+        np_scale = np.array([0.05]).astype(np.float32)
+        np_shift = np.array([0.05]).astype(np.float32)
+        np_mean = np.array([0.05]).astype(np.float32)
+        np_var = np.array([0.05]).astype(np.float32)
+        conv.input(1)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_weights))
+        conv.input(2)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_bias))
+        bn.input(1)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_scale))
+        bn.input(2)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_shift))
+        bn.input(3)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_mean))
+        bn.input(4)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_var))
+        scheduler0 = aidge_core.SequentialScheduler(graph_view)
+        scheduler0.forward()
+
+        for outNode in graph_view.get_output_nodes():
+            output_aidge0 = outNode.get_operator().output(0)
+
+        aidge_core.fuse_batchnorm(graph_view)
+        scheduler1 = aidge_core.SequentialScheduler(graph_view)
+        scheduler1.forward()
+
+        for outNode in graph_view.get_output_nodes():
+            output_aidge1 = outNode.get_operator().output(0)
+
+        self.assertTrue(aidge_core.approx_eq(output_aidge0, output_aidge1, 0.000001, 0.0001))
+
+if __name__ == '__main__':
+    unittest.main()
+
+
+