diff --git a/aidge_core/unit_tests/test_operator_squeeze.py b/aidge_core/unit_tests/test_operator_squeeze.py
new file mode 100644
index 0000000000000000000000000000000000000000..b43605893f32f17e7b544b2fea09b16bdd982050
--- /dev/null
+++ b/aidge_core/unit_tests/test_operator_squeeze.py
@@ -0,0 +1,194 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+from aidge_core import Log
+import numpy as np
+from numpy import testing as npt
+
+
+class TestSqueeze(unittest.TestCase):
+    """
+    Test squeeze operator
+    """
+
+    def setUp(self):
+        ############DEFINING INPUT AND OUTPUTS FOR TESTS
+        axes_to_squeeze_0 = [0]
+        axes_to_squeeze_many = [0, 1, 4]
+        axes_to_squeeze_all = []
+        axes_to_squeeze_error = [1, 2, 4, 5, 10, 3, 42, 127, 12, 3, 4, 1, 4, 50]
+
+        squeeze_dim_0 = aidge_core.Squeeze(axes_to_squeeze_0, name="squeeze_dim_0")
+        squeeze_many = aidge_core.Squeeze(axes_to_squeeze_many, name="squeeze_many")
+        squeeze_all = aidge_core.Squeeze(axes_to_squeeze_all, name="squeeze_all")
+        squeeze_error = aidge_core.Squeeze(axes_to_squeeze_error, name="squeeze_error")
+
+        input_1_data_shape = np.array([1, 2, 3])
+        input_2_data_hape = np.array([1, 1, 3, 3, 1, 9])
+        input_3_data_shape = np.array([1])
+        input_4_data_shape = np.array([1, 1, 4])
+
+        input_axes_0 = axes_to_squeeze_0
+        input_axes_many = axes_to_squeeze_many
+        input_axes_all = axes_to_squeeze_all
+        # input_axes_error = aidge_core.Tensor(axes_to_squeeze_error)
+
+        ####################### DEFINING TEST RUNS
+        self.tests_axes_defined_by_attribute = [
+            (input_1_data_shape, squeeze_dim_0, np.array([2, 3])),
+            (input_1_data_shape, squeeze_all, np.array([2, 3])),
+            (input_2_data_hape, squeeze_dim_0, np.array([1, 3, 3, 1, 9])),
+            (input_2_data_hape, squeeze_many, np.array([3, 3, 9])),
+            (input_2_data_hape, squeeze_all, np.array([3, 3, 9])),
+            (input_3_data_shape, squeeze_dim_0, np.array([])),
+            (input_3_data_shape, squeeze_all, np.array([])),
+            (input_4_data_shape, squeeze_dim_0, np.array([1, 4])),
+            (input_4_data_shape, squeeze_all, np.array([4])),
+        ]
+
+        # operators are puprposefully chosen with different predefined attribute than the input_axes tensor
+        self.tests_axes_defined_by_input = [
+            (input_1_data_shape, input_axes_0, squeeze_error, np.array([2, 3])),
+            (input_1_data_shape, input_axes_all, squeeze_error, np.array([2, 3])),
+            (input_2_data_hape, input_axes_0, squeeze_error, np.array([1, 3, 3, 1, 9])),
+            (input_2_data_hape, input_axes_many, squeeze_error, np.array([3, 3, 9])),
+            (input_2_data_hape, input_axes_all, squeeze_error, np.array([3, 3, 9])),
+            (input_3_data_shape, input_axes_0, squeeze_error, np.array([])),
+            (input_3_data_shape, input_axes_all, squeeze_error, np.array([])),
+            (input_4_data_shape, input_axes_0, squeeze_error, np.array([1, 4])),
+            (input_4_data_shape, input_axes_all, squeeze_error, np.array([4])),
+        ]
+        self.test_error = [
+            (input_1_data_shape, squeeze_error),
+            (input_1_data_shape, squeeze_many),
+            (input_3_data_shape, squeeze_many),
+            (input_4_data_shape, squeeze_many),
+        ]
+        return
+
+    def tearDown(self):
+        pass
+
+    def test_axes_defined_via_tensor_input(self):
+        Log.notice("\ntest_axes_defined_via_tensor_input")
+        for index, (
+            input_shape,
+            input_axes_to_squeeze,
+            squeeze_node_template,
+            output_shape,
+        ) in enumerate(self.tests_axes_defined_by_input):
+            test_squeeze_node = squeeze_node_template
+            test_squeeze_op = test_squeeze_node.get_operator()
+
+            print(f"\nTest {index}")
+            print(f"input shape : {input_shape}")
+            print(f"input axes: {np.array(input_axes_to_squeeze)}")
+            print(f"operator : {test_squeeze_node}")
+            print(f"expected output_shape : {output_shape}")
+
+            test_squeeze_op.set_backend("cpu")
+            test_squeeze_op.set_datatype(aidge_core.dtype.float32)
+
+            input_values = np.ones(shape=input_shape, dtype=np.float32)
+            output_values = np.ones(shape=output_shape, dtype=np.float32)
+
+            input_data = aidge_core.Tensor(input_values)
+            input_data.set_datatype(aidge_core.dtype.float32)
+            input_data.set_backend("cpu")
+
+            input_axes = aidge_core.Tensor(
+                np.array(input_axes_to_squeeze, dtype=np.float32)
+            )
+            input_axes.set_datatype(aidge_core.dtype.int8)
+            input_axes.set_backend("cpu")
+
+            test_squeeze_op.set_input(0, input_data)
+            test_squeeze_op.set_input(1, input_axes)
+
+            self.assertEqual(test_squeeze_op.forward_dims(True), True)
+            test_squeeze_op.forward()
+
+            squeeze_output = test_squeeze_op.get_output(0)
+
+            npt.assert_array_equal(
+                squeeze_output.dims(),
+                output_shape,
+                err_msg=f"SQUEEZE FAILURE : expected result differs from output size\n\toperator : {test_squeeze_node}\n\tinput.shape : {input_shape.shape}",
+            )
+            npt.assert_array_almost_equal(
+                np.array(squeeze_output, dtype=np.float32),
+                output_values,
+                7,
+                err_msg=f"SQUEEZE FAILURE : output tensor values differs from expected values\n\toperator : {test_squeeze_node}\n\tinput.shape : {input_shape.shape}",
+            )
+            # self.assertEqual(test_squeeze_op.dims_forwarded(), True, "SQUEEZE_FAILURE : dims_forwarded failed.")
+        return
+
+    def test_axes_defined_via_attribute(self):
+        Log.notice("\ntest_axes_defined_via_attribute")
+        for index, (input_shape, squeeze_node_template, output_shape) in enumerate(
+            self.tests_axes_defined_by_attribute
+        ):
+            test_squeeze_node = squeeze_node_template
+            test_squeeze_op = test_squeeze_node.get_operator()
+
+            print(f"\nTest {index}")
+            print(f"input size : {input_shape.shape}")
+            print(f"operator : {test_squeeze_node}")
+            print(f"expected output_shape : {output_shape}")
+
+            test_squeeze_node.get_operator().set_backend("cpu")
+
+            input_values = np.ones(shape=input_shape, dtype=np.float32)
+            output_values = np.ones(shape=output_shape, dtype=np.float32)
+            input_data = aidge_core.Tensor(input_values)
+            input_data.set_datatype(aidge_core.dtype.float32)
+            input_data.set_backend("cpu")
+            test_squeeze_op.set_input(0, input_data)
+
+            test_squeeze_op.forward_dims()
+            test_squeeze_op.forward()
+
+            squeeze_output = test_squeeze_op.get_output(0)
+
+            npt.assert_array_equal(
+                squeeze_output.dims(),
+                output_shape,
+                err_msg=f"SQUEEZE FAILURE : expected result differs from output size\n\toperator : {test_squeeze_node}\n\tinput.shape : {input_shape.shape}",
+            )
+            npt.assert_array_almost_equal(
+                np.array(squeeze_output, dtype=np.float32),
+                output_values,
+                7,
+                err_msg=f"SQUEEZE FAILURE : output tensor values differs from expected values\n\toperator : {test_squeeze_node}\n\tinput.shape : {input_shape.shape}",
+            )
+        return
+
+    def test_error(self):
+        for input_shape, squeeze_node_template in self.test_error:
+            test_squeeze_node = squeeze_node_template
+            test_squeeze_op = test_squeeze_node.get_operator()
+
+            input_values = np.ones(shape=input_shape)
+            input_data = aidge_core.Tensor(input_values)
+            input_data.set_datatype(aidge_core.dtype.float32)
+            input_data.set_backend("cpu")
+            test_squeeze_op.set_input(0, input_data)
+
+            with self.assertRaises((RuntimeError, AssertionError)):
+                test_squeeze_op.forward_dims()
+                test_squeeze_op.forward()
+        return
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/aidge_core/unit_tests/test_operator_unsqueeze.py b/aidge_core/unit_tests/test_operator_unsqueeze.py
new file mode 100644
index 0000000000000000000000000000000000000000..12f55fa30bc027fa5a3cea6ccb6a8d2970cad018
--- /dev/null
+++ b/aidge_core/unit_tests/test_operator_unsqueeze.py
@@ -0,0 +1,211 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+import numpy as np
+from numpy import testing as npt
+
+
+class TestUnsqueeze(unittest.TestCase):
+    """
+    Test unsqueeze operator
+    """
+
+    def setUp(self):
+        axis_to_unsqueeze_dim_0 = [0]
+        axis_to_unsqueeze_many = [1, 4, 5]
+        axis_to_unsqueeze_error_identical_index = [0, 0, 0]
+        axis_to_unsqueeze_error_too_high_index = [50]
+        axis_to_unsqueeze_onnx_test = [0, 4]
+        unsqueeze_dim_0 = aidge_core.Unsqueeze(
+            axis_to_unsqueeze_dim_0, name="unsqueeze_dim_0"
+        )
+        unsqueeze_many = aidge_core.Unsqueeze(
+            axis_to_unsqueeze_many, name="unsqueeze_many"
+        )
+        unsqueeze_error_identical_index = aidge_core.Unsqueeze(
+            axis_to_unsqueeze_error_identical_index,
+            name="unsqueeze_error_identical_index",
+        )
+        unsqueeze_error_node = aidge_core.Unsqueeze(
+            axis_to_unsqueeze_error_too_high_index,
+            name="unsqueeze_error_index_too_high",
+        )
+        unsqueeze_onnx_test = aidge_core.Unsqueeze(
+            axis_to_unsqueeze_onnx_test, name="unsqueeze taken from onnx documentation"
+        )
+
+        input_1_data_shape = np.array([1, 2, 3])
+        input_2_data_shape = np.array([2, 1, 3, 3])
+        input_3_data_shape = np.array([1, 1, 4])
+        input_onnx_data_shape = np.array([3, 4, 5])
+
+        input_axes_dim_0 = axis_to_unsqueeze_dim_0
+        input_axes_many = axis_to_unsqueeze_many
+        input_axes_onnx_test = axis_to_unsqueeze_onnx_test
+
+        self.tests_axes_defined_by_attribute = [
+            (input_1_data_shape, unsqueeze_dim_0, np.array([1, 1, 2, 3])),
+            (input_2_data_shape, unsqueeze_dim_0, np.array([1, 2, 1, 3, 3])),
+            (input_2_data_shape, unsqueeze_many, np.array([2, 1, 1, 3, 1, 1, 3])),
+            (input_3_data_shape, unsqueeze_dim_0, np.array([1, 1, 1, 4])),
+            (input_3_data_shape, unsqueeze_many, np.array([1, 1, 1, 4, 1, 1])),
+            (input_onnx_data_shape, unsqueeze_onnx_test, np.array([1, 3, 4, 5, 1])),
+        ]
+
+        self.tests_axes_defined_by_tensor = [
+            (
+                input_1_data_shape,
+                input_axes_dim_0,
+                unsqueeze_error_node,
+                np.array([1, 1, 2, 3]),
+            ),
+            (
+                input_2_data_shape,
+                input_axes_dim_0,
+                unsqueeze_error_node,
+                np.array([1, 2, 1, 3, 3]),
+            ),
+            (
+                input_2_data_shape,
+                input_axes_many,
+                unsqueeze_error_node,
+                np.array([2, 1, 1, 3, 1, 1, 3]),
+            ),
+            (
+                input_3_data_shape,
+                input_axes_dim_0,
+                unsqueeze_error_node,
+                np.array([1, 1, 1, 4]),
+            ),
+            (
+                input_3_data_shape,
+                input_axes_many,
+                unsqueeze_error_node,
+                np.array([1, 1, 1, 4, 1, 1]),
+            ),
+            (
+                input_onnx_data_shape,
+                input_axes_onnx_test,
+                unsqueeze_error_node,
+                np.array([1, 3, 4, 5, 1]),
+            ),
+        ]
+
+        self.test_error = [
+            (input_1_data_shape, unsqueeze_error_identical_index),
+            (input_1_data_shape, unsqueeze_error_node),
+            (input_1_data_shape, unsqueeze_many),  # dims too high
+        ]
+        return
+
+    def tearDown(self):
+        pass
+
+    def test_axes_defined_by_attribute(self):
+        for index, (
+            input_shape,
+            unsqueeze_template,
+            expected_output_shape,
+        ) in enumerate(self.tests_axes_defined_by_attribute):
+            test_unsqueeze = unsqueeze_template
+            test_unsqueeze_op = test_unsqueeze.get_operator()
+
+            print(f"\nTest {index}")
+            print(f"input size : {input_shape}")
+            print(f"operator : {test_unsqueeze}")
+            print(f"expected output_shape : {expected_output_shape}")
+
+            test_unsqueeze_op.set_backend("cpu")
+
+            input_values = np.ones(shape=input_shape, dtype=np.float32)
+            expected_output_values = np.ones(
+                shape=expected_output_shape, dtype=np.float32
+            )
+            input_tensor = aidge_core.Tensor(input_values)
+            test_unsqueeze_op.set_input(0, input_tensor)
+
+            test_unsqueeze_op.forward_dims()
+            test_unsqueeze_op.forward()
+
+            unsqueeze_output = test_unsqueeze_op.get_output(0)
+
+            npt.assert_array_equal(
+                unsqueeze_output.dims(),
+                expected_output_shape,
+                err_msg=f"UNSQUEEZE FAILURE : expected result dimensions differs from output's\n\toperator : {test_unsqueeze}\n\tinput.shape : {input_shape.shape}",
+            )
+            npt.assert_array_almost_equal(
+                np.array(unsqueeze_output),
+                expected_output_values,
+                7,
+                err_msg=f"UNSQUEEZE FAILURE : output tensor values differs from expected values\n\toperator : {test_unsqueeze}\n\tinput.shape : {input_shape.shape}",
+            )
+        return
+
+    def test_axes_defined_via_tensor_input(self):
+        for index, (
+            input_shape,
+            input_axes_to_squeeze,
+            squeeze_node_template,
+            output_shape,
+        ) in enumerate(self.tests_axes_defined_by_tensor):
+            test_squeeze_node = squeeze_node_template
+            test_squeeze_op = test_squeeze_node.get_operator()
+
+            print(f"\nTest {index}")
+            print(f"input shape : {input_shape}")
+            print(f"input axes: {np.array(input_axes_to_squeeze)}")
+            print(f"operator : {test_squeeze_node}")
+            print(f"expected output_shape : {output_shape}")
+
+            test_squeeze_op.set_backend("cpu")
+            test_squeeze_op.set_datatype(aidge_core.dtype.float32)
+
+            input_values = np.ones(shape=input_shape, dtype=np.float32)
+            output_values = np.ones(shape=output_shape, dtype=np.float32)
+
+            input_data = aidge_core.Tensor(input_values)
+            input_data.set_datatype(aidge_core.dtype.float32)
+            input_data.set_backend("cpu")
+
+            input_axes = aidge_core.Tensor(
+                np.array(input_axes_to_squeeze, dtype=np.float32)
+            )
+            input_axes.set_datatype(aidge_core.dtype.int8)
+            input_axes.set_backend("cpu")
+
+            test_squeeze_op.set_input(0, input_data)
+            test_squeeze_op.set_input(1, input_axes)
+
+            self.assertEqual(test_squeeze_op.forward_dims(True), True)
+            test_squeeze_op.forward()
+
+            squeeze_output = test_squeeze_op.get_output(0)
+
+            npt.assert_array_equal(
+                squeeze_output.dims(),
+                output_shape,
+                err_msg=f"SQUEEZE FAILURE : expected result differs from output size\n\toperator : {test_squeeze_node}\n\tinput.shape : {input_shape.shape}",
+            )
+            npt.assert_array_almost_equal(
+                np.array(squeeze_output, dtype=np.float32),
+                output_values,
+                7,
+                err_msg=f"SQUEEZE FAILURE : output tensor values differs from expected values\n\toperator : {test_squeeze_node}\n\tinput.shape : {input_shape.shape}",
+            )
+            # self.assertEqual(test_squeeze_op.dims_forwarded(), True, "SQUEEZE_FAILURE : dims_forwarded failed.")
+        return
+
+
+if __name__ == "__main__":
+    unittest.main()
+
diff --git a/include/aidge/graph/OpArgs.hpp b/include/aidge/graph/OpArgs.hpp
index bf14d39af34c2e14d98906a663edf335c30c6f12..70a431b5621270a6b6083a436aba145ce9dafbf3 100644
--- a/include/aidge/graph/OpArgs.hpp
+++ b/include/aidge/graph/OpArgs.hpp
@@ -12,8 +12,10 @@
 #ifndef AIDGE_CORE_GRAPH_OPARGS_H_
 #define AIDGE_CORE_GRAPH_OPARGS_H_
 
-#include <memory>
 #include <cassert>
+#include <memory>
+#include <string>
+#include <vector>
 
 namespace Aidge {
 class Node;
@@ -56,20 +58,22 @@ public:
  * one in a sequential way. Nodes linked with the Sequential graph
  * generation instructions must have a single output.
  * Sequential(A, B, C) returns A-->B-->C.
- * @param inputs List of Node and GraphView to link sequentially.
+ * @param[in] inputs List of Node and GraphView to link sequentially.
+ * @param[in] name : name of the graphview to return
  * @return std::shared_ptr<GraphView> Pointer to the generated view.
  */
-std::shared_ptr<GraphView> Sequential(std::vector<OpArgs> inputs);
+std::shared_ptr<GraphView> Sequential(std::vector<OpArgs> inputs, std::string name = "");
 
 /////////////////////////////
 // Parallel
 
 /**
  * @brief Creates a GraphView with provided Nodes without linking them.
- * @param inputs List of Node and GraphView to link sequentially.
+ * @param[in] inputs List of Node and GraphView to link sequentially.
+ * @param[in] name : name of the graphview to return
  * @return std::shared_ptr<GraphView> pointer to the generated view.
  */
-std::shared_ptr<GraphView> Parallel(std::vector<OpArgs> inputs);
+std::shared_ptr<GraphView> Parallel(std::vector<OpArgs> inputs, std::string name = "");
 
 /////////////////////////////
 // Residual
@@ -81,9 +85,10 @@ std::shared_ptr<GraphView> Parallel(std::vector<OpArgs> inputs);
  * generation instructions must have a single output.
  * Recursive(A, B, C) returns A-->B-->C , A-->C.
  * @param inputs List of Node and GraphView to link sequentially.
+ * @param[in] name : name of the graphview to return
  * @return std::shared_ptr<GraphView> pointer to the generated view.
  */
-std::shared_ptr<GraphView> Residual(std::vector<OpArgs> inputs);
+std::shared_ptr<GraphView> Residual(std::vector<OpArgs> inputs, std::string name = "");
 
 }
 
diff --git a/include/aidge/graphRegex/GraphRegex.hpp b/include/aidge/graphRegex/GraphRegex.hpp
index b62a42fcfeb258e5c659eaeb6681190482f37aa4..573447cf934b196e8b0c32d7a58e1977f5aa5f9a 100644
--- a/include/aidge/graphRegex/GraphRegex.hpp
+++ b/include/aidge/graphRegex/GraphRegex.hpp
@@ -12,13 +12,12 @@
 namespace Aidge{
 
 /**
- * type for recipes function use in query and resolve  
-*/
+ * @brief type for recipes function use in query and resolve  
+ */
 using RecipesFunctionType = std::function<void(std::shared_ptr<MatchSolution>)>;
 
 /**
- * @brief class which is the hight level interface for graph matching, used to simplify match definition  
- * 
+ * @brief high level interface for graph matching, used to simplify match definition 
  */
 class GraphRegex{
 
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index 1097454fce62f645eb83c491498031738847e96c..c8cdd93810e18bd3cdd0a2d080e54aae2d787c66 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -40,6 +40,14 @@ protected:
 public:
     OperatorTensor() = delete;
 
+    /**
+     * @brief Operator tensor constructor. This function is not meant to be called directly but by a derived class constructor
+     * every operator class derive from this class.
+     * 
+	 * @param[in] type     : type of operator (i.e. "Add", "AveragePool",...)
+	 * @param[in] inputsCategory : describes the type of each input.
+	 * @param[in] nbOut    : Number of tensors this operator will output
+     */
     OperatorTensor(const std::string& type, const std::vector<InputCategory>& inputsCategory,
                    const IOIndex_t nbOut);
 
@@ -79,6 +87,15 @@ public:
      * For each dataInput Tensor of the Operator, the first index and dimensions of the feature area.
      */
     virtual std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const;
+
+	/**
+	 * @brief Will compute the dimensions of operator's output tensor given the input sizes
+ 	 *        If the output dimensions cannot be computed because it depends on some undefined inputs then forwardDims will return false and enter in TOKEN mode for subsequent tensors.
+ 	 *        - TOKEN mode means that forwarddims will only ensure that all inputs and outputs of the graph the node is within are connected.
+ 	 * @param[in] allowDataDependency if set to true, this means that this operator output dimensions depends on the dimensions of optionnal parameter tensors.
+ 	 * @return true if dims have been properly forwarded. false otherwise. If set to false, then forwardDims will enter in token mode.
+ 	 *      
+     */
     virtual bool forwardDims(bool allowDataDependency = false);
     virtual bool dimsForwarded() const;
     ///////////////////////////////////////////////////
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 257a6965be4c08735f23ae575ffe104bb706593a..c52e779cbfec04f9ae6796c3bb6f21407c0cb0fb 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -103,7 +103,7 @@ public:
     void forward() override final;
 
     void backward() override final {
-        // fmt::print("Basic Producer backward() function.\n");
+        Log::debug("Basic Producer backward() function.");
     }
 
     void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) const override;
diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..73321b5689c0c10d9d06ea60c551cc6dfaced149
--- /dev/null
+++ b/include/aidge/operator/Squeeze.hpp
@@ -0,0 +1,159 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_SQUEEZE_H_
+#define AIDGE_CORE_OPERATOR_SQUEEZE_H_
+
+#include <cstdint>
+#include <cstdlib>
+#include <functional>
+#include <limits>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+/**
+ * @brief implementation of the operator squeeze.
+ * @note Since this operator implementation is agnostic to the backend it is
+ * located here instead of in aidge_backend_cpu/cuda.
+ */
+class Squeeze_OpImpl : public OperatorImpl {
+public:
+  Squeeze_OpImpl(const Operator &op, const std::string &backend = "")
+      : OperatorImpl(op, backend) {}
+  void forward() override;
+};
+
+enum class SqueezeAttr {
+  /**
+   * @brief axes to squeeze, if left empty all 1 sized
+   * dimensions will be removed.
+   */
+  Axes
+};
+
+/**
+ * @brief This operator has as purpose to remove dummy dimensions around given
+ * axes.
+ * input#0 : Tensor to squeeze
+ * input#1 Optionnal : 1D tensor that lists the axes to squeeze
+ * @note the axes to squeeze can either be given via attribute or via input #1,
+ * for the sake of simplicity of the example unders, the axes to squeeze are
+ * given via attribute
+ * @example Calling squeeze(1) on a tensor of dimensions (2,1,3,4) will result
+ * in a tensor of dim (2,3,4).
+ * @example Calling squeeze(1) on a tensor of dimensions (1,2,3,4) will result
+ * in a tensor of dim (1,2,3,4).
+ * @example Calling squeeze() with no argument will result in the removal of
+ * every 1-sized dimension in the tensor.
+ */
+class Squeeze_Op
+    : public OperatorTensor,
+      public Registrable<Squeeze_Op, std::string,
+                         std::shared_ptr<OperatorImpl>(const Squeeze_Op &)> {
+
+public:
+  static const std::string
+      Type; // name of the type of the operation (Here "Squeeze")
+
+private:
+  using Attributes_ = StaticAttributes<SqueezeAttr, std::vector<int8_t>>;
+  template <SqueezeAttr e> using attr = typename Attributes_::template attr<e>;
+  const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+  /**
+   * @brief constructor for Squeeze op
+   * @param[in] axes around which perform the operation
+   */
+  Squeeze_Op(const std::vector<int8_t> &axes = {})
+      : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData},
+                       1),
+        mAttributes(
+            std::make_shared<Attributes_>(attr<SqueezeAttr::Axes>(axes))) {
+    mImpl = std::make_shared<Squeeze_OpImpl>(*this);
+  }
+
+  /**
+   * @brief Copy-constructor. Copy the operator attributes and its output
+   * tensor(s), but not its input tensors (the new operator has no input
+   * associated).
+   * @param op Operator to copy.
+   */
+  Squeeze_Op(const Squeeze_Op &op)
+      : OperatorTensor(op), mAttributes(op.mAttributes) {
+    if (!op.backend().empty()) {
+      SET_IMPL_MACRO(Squeeze_Op, *this, op.backend());
+    } else {
+      mImpl = std::make_shared<Squeeze_OpImpl>(*this);
+    }
+  }
+
+  /**
+   * @brief Clone the operator using its copy-constructor.
+   * @see Operator::MatMul_Op
+   */
+  std::shared_ptr<Operator> clone() const override final {
+    return std::make_shared<Squeeze_Op>(*this);
+  }
+
+  /**
+   * @brief Compute dimensions for the output Tensor
+   */
+  bool forwardDims(bool allowDataDependency = false) override final;
+  bool dimsForwarded() const override final;
+
+  void setBackend(const std::string &name,
+                  DeviceIdx_t device = 0) override final;
+
+  inline std::shared_ptr<Attributes> attributes() const override {
+    return mAttributes;
+  }
+
+  /**
+   * @brief axes to squeeze, if left empty all 1 sized
+   * dimensions will be removed.
+   */
+  inline std::vector<int8_t> &axes() const noexcept {
+    return mAttributes->template getAttr<SqueezeAttr::Axes>();
+  }
+
+  static const std::vector<std::string> getInputsName() {
+    return {"data_input", "axes_to_squeeze"};
+  }
+  static const std::vector<std::string> getOutputsName() {
+    return {"squeezed"};
+  }
+};
+
+// helper with C-style array instead of std::array for kernel_dims to allow
+// automatic template DIM deduction
+inline std::shared_ptr<Node> Squeeze(const std::vector<int8_t> axes = {},
+                                     const std::string &name = "") {
+  return std::make_shared<Node>(std::make_shared<Squeeze_Op>(axes), name);
+}
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::SqueezeAttr>::data[] = {"Axes"};
+}
+
+#endif // AIDGE_CORE_OPERATOR_SQUEEZE_H_
diff --git a/include/aidge/operator/Unsqueeze.hpp b/include/aidge/operator/Unsqueeze.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..3443801bc4a4771109b54a709bd6a77a96b57274
--- /dev/null
+++ b/include/aidge/operator/Unsqueeze.hpp
@@ -0,0 +1,157 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_UNSQUEEZE_H_
+#define AIDGE_CORE_OPERATOR_UNSQUEEZE_H_
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+/**
+ * @brief implementation of the operator unsqueeze.
+ * @note Since this operator implementation is agnostic to the backend it is
+ * located here instead of in aidge_backend_cpu/cuda.
+ */
+class Unsqueeze_OpImpl : public OperatorImpl {
+public:
+  Unsqueeze_OpImpl(const Operator &op, const std::string &backend = "")
+      : OperatorImpl(op, backend) {}
+  void forward() override;
+};
+
+enum class UnsqueezeAttr {
+  /**
+   * @brief vector of axes to unsqueeze.
+   * values must be comprised within
+   * [ -a ; a-1 ]
+   * with a = input_tensor.nbDim() + dims_to_unsqueeze.size()
+   */
+  Axes
+};
+
+/**
+ * @brief This operator has as purpose to add a dummy dimension around given
+ * axis. Unsqueezing the 2nd dim of a tensor of dim (1,2,3,4) will result in a
+ * tensor of dim (1,2,1,3,4)
+ * You can also unsqueeze dimensions whose index is higher than the nb of input
+ * dimensions as long as :
+ * dims_to_unsqueeze[i] < tensor.nbDim() +
+ * dims_to_unsqueeze.size()
+ */
+class Unsqueeze_Op
+    : public OperatorTensor,
+      public Registrable<Unsqueeze_Op, std::string,
+                         std::shared_ptr<OperatorImpl>(const Unsqueeze_Op &)> {
+
+public:
+  static const std::string
+      Type; // name of the type of the operation (Here "Unsqueeze")
+
+private:
+  using Attributes_ = StaticAttributes<UnsqueezeAttr, std::vector<int8_t>>;
+  template <UnsqueezeAttr e>
+  using attr = typename Attributes_::template attr<e>;
+  const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+  Unsqueeze_Op() =
+      delete; // no default constructor since this class has attributes
+
+  /**
+   * @brief constructor for Unsqueeze op
+   * @param[in] axis around which perform the operation
+   */
+  Unsqueeze_Op(const std::vector<int8_t> &axes)
+      : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData},
+                       1),
+        mAttributes(
+            std::make_shared<Attributes_>(attr<UnsqueezeAttr::Axes>(axes))) {
+    mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
+  }
+
+  /**
+   * @brief Copy-constructor. Copy the operator attributes and its output
+   * tensor(s), but not its input tensors (the new operator has no input
+   * associated).
+   * @param op Operator to copy.
+   */
+  Unsqueeze_Op(const Unsqueeze_Op &op)
+      : OperatorTensor(op), mAttributes(op.mAttributes) {
+    if (!op.backend().empty()) {
+      SET_IMPL_MACRO(Unsqueeze_Op, *this, op.backend());
+    } else {
+      mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
+    }
+  }
+
+  /**
+   * @brief Clone the operator using its copy-constructor.
+   * @see Operator::MatMul_Op
+   */
+  std::shared_ptr<Operator> clone() const override final {
+    return std::make_shared<Unsqueeze_Op>(*this);
+  }
+
+  /**
+   * @brief Compute dimensions for the output Tensor
+   */
+  bool forwardDims(bool allowDataDependency = false) override final;
+  bool dimsForwarded() const override final;
+
+  void setBackend(const std::string &name,
+                  DeviceIdx_t device = 0) override final;
+
+  inline std::shared_ptr<Attributes> attributes() const override {
+    return mAttributes;
+  }
+  /**
+   * @brief vector of axes to unsqueeze.
+   * values must be comprised within
+   * [ -a ; a-1 ]
+   * with : a = input_tensor.nbDim() + dims_to_unsqueeze.size()
+   */
+  inline std::vector<int8_t> &axes() const noexcept {
+    return mAttributes->template getAttr<UnsqueezeAttr::Axes>();
+  }
+
+  static const std::vector<std::string> getInputsName() {
+    return {"data_input", "axes_to_unsqueeze"};
+  }
+  static const std::vector<std::string> getOutputsName() {
+    return {"unsqueezed"};
+  }
+};
+
+// helper with C-style array instead of std::array for kernel_dims to allow
+// automatic template DIM deduction
+inline std::shared_ptr<Node> Unsqueeze(const std::vector<int8_t> &axes = {},
+                                       const std::string &name = "") {
+  return std::make_shared<Node>(std::make_shared<Unsqueeze_Op>(axes), name);
+}
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::UnsqueezeAttr>::data[] = {"Axes"};
+}
+
+#endif // AIDGE_CORE_OPERATOR_UNSQUEEZE_H_
diff --git a/pyproject.toml b/pyproject.toml
index b820759982252b69790cde89c500e3b11f9a52da..cc0a43c83394a2dd61ae4f99572bd902eb724c9b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -36,8 +36,10 @@ include = [ # package names should match these glob patterns (["*"] by default)
     "aidge_core*"
 ]
 exclude = [ # exclude packages matching these glob patterns (empty by default)
-    "aidge_core.unit_tests",
-    "aidge_core.unit_tests.static"
+    ".unit_tests",
+    ".unit_tests.static",
+    ".aidge_export_aidge.__pycache__",
+    ".aidge_export_aidge.utils.__pycache__",
 ] 
 
 # SETUPTOOLS_SCM
diff --git a/python_binding/graph/pybind_OpArgs.cpp b/python_binding/graph/pybind_OpArgs.cpp
index 6ea89f91945ac44f2142c5b9e8440b11ec6a1663..a129ca51c27367ceb1f7518ca85afe134e98cc4a 100644
--- a/python_binding/graph/pybind_OpArgs.cpp
+++ b/python_binding/graph/pybind_OpArgs.cpp
@@ -31,9 +31,9 @@ void init_OpArgs(py::module& m){
     py::implicitly_convertible<Node, OpArgs>();
     py::implicitly_convertible<GraphView, OpArgs>();
 
-    m.def("sequential", &Sequential, py::arg("inputs"));
-    m.def("parallel", &Parallel, py::arg("inputs"));
-    m.def("residual", &Residual, py::arg("inputs"));
+    m.def("sequential", &Sequential, py::arg("inputs"), py::arg("name") =  "");
+    m.def("parallel", &Parallel, py::arg("inputs"), py::arg("name") =  "");
+    m.def("residual", &Residual, py::arg("inputs"), py::arg("name") =  "");
 
 }
 }
diff --git a/python_binding/operator/pybind_Squeeze.cpp b/python_binding/operator/pybind_Squeeze.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ca90fb46af40189dbe66c320ecdd237470ffa112
--- /dev/null
+++ b/python_binding/operator/pybind_Squeeze.cpp
@@ -0,0 +1,52 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+#include <pybind11/pybind11.h>
+#include <string>
+#include <vector>
+
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Squeeze.hpp"
+#include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Squeeze(py::module &m) {
+  py::class_<Squeeze_Op, std::shared_ptr<Squeeze_Op>, OperatorTensor>(
+      m, "SqueezeOp", py::multiple_inheritance(),
+		R"mydelimiter(
+		Initialize squeeze operator
+		:param axes :   axes to squeeze between [-r;r-1] 
+						with r = input_tensor.nbDims()
+						& r in [-128 , 127]
+		:type axes : :py:class: List[Int]
+		)mydelimiter")
+      .def("get_inputs_name", &Squeeze_Op::getInputsName)
+      .def("get_outputs_name", &Squeeze_Op::getOutputsName)
+      .def("axes", &Squeeze_Op::axes);
+  // Here we bind the constructor of the Squeeze Node. We add an argument
+  // for each attribute of the operator (in here we only have 'axes') and
+  // the last argument is the node's name.
+  m.def("Squeeze", &Squeeze, py::arg("axes") = std::vector<int8_t>({}),
+        py::arg("name") = "",
+        R"mydelimiter(
+    Initialize a node containing a squeeze operator.
+	:param axes :   axes to squeeze between [-r;r-1] 
+					with r = input_tensor.nbDims()
+					& r in [-128 , 127]
+	:type axes : :py:class: List[Int]
+    :param name : name of the node.
+)mydelimiter");
+}
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Unsqueeze.cpp b/python_binding/operator/pybind_Unsqueeze.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..40c179c4064f07896113732a7e3c32db5f19c060
--- /dev/null
+++ b/python_binding/operator/pybind_Unsqueeze.cpp
@@ -0,0 +1,48 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <string>
+#include <vector>
+
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Unsqueeze.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Unsqueeze(py::module &m) {
+  py::class_<Unsqueeze_Op, std::shared_ptr<Unsqueeze_Op>, OperatorTensor>(
+      m, "UnsqueezeOp", py::multiple_inheritance(),
+      R"mydelimiter(
+		Initialize an unsqueeze operator.
+		:param axes :   axes to unsqueeze between [-r;r-1] 
+						with r = input_tensor.nbDims() + len(axes)
+		:type axes : :py:class: List[Int]
+		)mydelimiter")
+      // Here we bind the methods of the Unsqueeze_Op that wil want to access
+      .def("get_inputs_name", &Unsqueeze_Op::getInputsName)
+      .def("get_outputs_name", &Unsqueeze_Op::getOutputsName)
+      .def("axes", &Unsqueeze_Op::axes);
+  // Here we bind the constructor of the Unsqueeze Node. We add an argument for
+  // each attribute of the operator (in here we only have 'axes') and the last
+  // argument is the node's name.
+  m.def("Unsqueeze", &Unsqueeze, py::arg("axes") = std::vector<int8_t>({}),
+        py::arg("name") = "",
+        R"mydelimiter(
+    Initialize a node containing an unsqueeze operator.
+	:param axes :   axes to unsqueeze between [-r;r-1] 
+					with r = input_tensor.nbDims() + len(axes)
+	:type axes : :py:class: List[Int]
+    :param name : name of the node.
+)mydelimiter");
+}
+} // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index c72a629b24254a27cf7418af4f22c3df89084ad3..7c9ac168f3f8cd0e6bc09c45aec6041a0bd9faa3 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -42,18 +42,19 @@ void init_Gather(py::module&);
 void init_GenericOperator(py::module&);
 void init_GlobalAveragePooling(py::module&);
 void init_GridSample(py::module&);
+void init_Identity(py::module&);
 void init_LeakyReLU(py::module&);
 void init_MatMul(py::module&);
 void init_MaxPooling(py::module&);
 void init_MetaOperatorDefs(py::module&);
 void init_Mul(py::module&);
-void init_Producer(py::module&);
 void init_Pad(py::module&);
 void init_Pop(py::module&);
 void init_Pow(py::module&);
+void init_Producer(py::module&);
+void init_ReLU(py::module&);
 void init_ReduceMean(py::module&);
 void init_ReduceSum(py::module&);
-void init_ReLU(py::module&);
 void init_Reshape(py::module&);
 void init_Resize(py::module&);
 void init_Scaling(py::module&);
@@ -63,10 +64,11 @@ void init_Slice(py::module&);
 void init_Softmax(py::module&);
 void init_Split(py::module&);
 void init_Sqrt(py::module&);
+void init_Squeeze(py::module&);
 void init_Sub(py::module&);
 void init_Tanh(py::module&);
 void init_Transpose(py::module&);
-void init_Identity(py::module&);
+void init_Unsqueeze(py::module&);
 
 void init_Node(py::module&);
 void init_GraphView(py::module&);
@@ -102,6 +104,7 @@ void init_Aidge(py::module& m) {
     init_Log(m);
     init_Operator(m);
     init_OperatorTensor(m);
+
     init_Add(m);
     init_And(m);
     init_ArgMax(m);
@@ -117,18 +120,18 @@ void init_Aidge(py::module& m) {
     init_GenericOperator(m);
     init_GlobalAveragePooling(m);
     init_GridSample(m);
+    init_Identity(m);
     init_LeakyReLU(m);
     init_MatMul(m);
     init_MaxPooling(m);
     init_MetaOperatorDefs(m);
     init_Mul(m);
     init_Pad(m);
-
     init_Pop(m);
     init_Pow(m);
+    init_ReLU(m);
     init_ReduceMean(m);
     init_ReduceSum(m);
-    init_ReLU(m);
     init_Reshape(m);
     init_Resize(m);
     init_Scaling(m);
@@ -138,10 +141,11 @@ void init_Aidge(py::module& m) {
     init_Softmax(m);
     init_Split(m);
     init_Sqrt(m);
+    init_Squeeze(m);
     init_Sub(m);
     init_Tanh(m);
     init_Transpose(m);
-    init_Identity(m);
+    init_Unsqueeze(m);
 
     init_Producer(m);
 
diff --git a/python_binding/utils/pybind_Log.cpp b/python_binding/utils/pybind_Log.cpp
index f70a4bfab54ee14194ea04f96efa33a6b8e04201..ca8d1f33086fb5093c76826e5a2f53df873badf5 100644
--- a/python_binding/utils/pybind_Log.cpp
+++ b/python_binding/utils/pybind_Log.cpp
@@ -78,6 +78,13 @@ void init_Log(py::module& m){
     .def_static("set_console_level", &Log::setConsoleLevel, py::arg("level"),
           R"mydelimiter(
           Set the minimum log level displayed in the console.
+          Available `Level`s in ascending order : 
+            - Level.Debug
+            - Level.Info
+            - Level.Notice
+            - Level.Warn
+            - Level.Error
+            - Level.Fatal          
 
           :param level: Log level.
           :type level: Level
@@ -93,6 +100,13 @@ void init_Log(py::module& m){
     .def_static("set_file_level", &Log::setFileLevel, py::arg("level"),
           R"mydelimiter(
           Set the minimum log level saved in the log file.
+          Available `Level`s in ascending order : 
+            - Level.Debug
+            - Level.Info
+            - Level.Notice
+            - Level.Warn
+            - Level.Error
+            - Level.Fatal          
 
           :param level: Log level.
           :type level: Level
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index 1035deb366a9c5df6ff08cd87ebd65a11c2b6e78..382052535cc6b5cd8089f720b8fa9f8d3a0ebce1 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -211,7 +211,7 @@ void Aidge::Node::setInputId(const IOIndex_t inId, const IOIndex_t newNodeoutId)
         "Input index ({}) is out of bound ({}) for node {} (of type {})",
         inId, nbInputs(), name(), type());
     if (mIdOutParents[inId] != gk_IODefaultIndex) {
-        Log::notice("Notice: filling a Tensor already attributed");
+        Log::notice("Filling a Tensor already attributed.");
         auto originalParent = input(inId);
         // remove original parent reference to child
         // find the output ID for original Parent
@@ -279,7 +279,7 @@ void Aidge::Node::addChild(std::shared_ptr<GraphView> otherView, const IOIndex_t
 
 void Aidge::Node::addParent(const std::shared_ptr<Node> other_node, const IOIndex_t inId) {
     if (getParent(inId) != nullptr) {
-        Log::notice("Notice: you are replacing an existing parent for node {} (of type {})", name(), type());
+        Log::notice("You are replacing an existing parent for node {} (of type {}).", name(), type());
     }
     AIDGE_ASSERT(inId != gk_IODefaultIndex && inId < nbInputs(),
         "Input index ({}) is out of bound ({}) for node {} (of type {})",
diff --git a/src/graph/OpArgs.cpp b/src/graph/OpArgs.cpp
index cffd14c35a0fe11055198236eba6e344c0ff782c..6fe2320ea0ed6a71b6c4fad6a3fab4e1b6472abf 100644
--- a/src/graph/OpArgs.cpp
+++ b/src/graph/OpArgs.cpp
@@ -9,16 +9,20 @@
  *
  ********************************************************************************/
 
-#include "aidge/graph/Node.hpp"
-#include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/OpArgs.hpp"
 
+#include <memory>
+#include <string>
+
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+
 Aidge::OpArgs::OpArgs(const OpArgs&) = default;
 Aidge::OpArgs& Aidge::OpArgs::operator=(const OpArgs&) = default;
 Aidge::OpArgs::~OpArgs() noexcept = default;
 
-std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::vector<OpArgs> inputs) {
-    std::shared_ptr<GraphView> gv = std::make_shared<GraphView>();
+std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::vector<OpArgs> inputs, std::string name) {
+    std::shared_ptr<GraphView> gv = std::make_shared<GraphView>(name);
     for (const OpArgs& elt : inputs) {
         if(elt.node() != nullptr) {
             // Connect the first output (ordered) of each output node (ordered)
@@ -61,8 +65,8 @@ std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::vector<OpArgs> inputs)
 }
 
 
-std::shared_ptr<Aidge::GraphView> Aidge::Parallel(std::vector<OpArgs> inputs) {
-    std::shared_ptr<GraphView> gv = std::make_shared<GraphView>();
+std::shared_ptr<Aidge::GraphView> Aidge::Parallel(std::vector<OpArgs> inputs, std::string name) {
+    std::shared_ptr<GraphView> gv = std::make_shared<GraphView>(name);
     for(const OpArgs& elt : inputs) {
         if (elt.node()!=nullptr)
             gv->add(elt.node());
@@ -73,8 +77,8 @@ std::shared_ptr<Aidge::GraphView> Aidge::Parallel(std::vector<OpArgs> inputs) {
 }
 
 
-std::shared_ptr<Aidge::GraphView> Aidge::Residual(std::vector<OpArgs> inputs) {
-    std::shared_ptr<GraphView> gv = Sequential(inputs);
+std::shared_ptr<Aidge::GraphView> Aidge::Residual(std::vector<OpArgs> inputs, std::string name) {
+    std::shared_ptr<GraphView> gv = Sequential(inputs,name);
     AIDGE_ASSERT(gv->outputNodes().size() == 1U,
         "Residual(): Zero or more than one output Node for the GraphView, don't know which one to choose from for the residual connection");
     std::shared_ptr<Node> lastNode = *gv->outputNodes().begin();
diff --git a/src/operator/Squeeze.cpp b/src/operator/Squeeze.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..df81ef3ec980b5cf8bd9f8bd39d093cee529cf75
--- /dev/null
+++ b/src/operator/Squeeze.cpp
@@ -0,0 +1,164 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Squeeze.hpp"
+
+#include <algorithm>
+#include <bitset>
+#include <cstdint>
+#include <fmt/core.h>
+#include <functional>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <stdexcept>
+#include <string>
+#include <vector>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Log.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+const std::string Squeeze_Op::Type = "Squeeze";
+
+bool Squeeze_Op::dimsForwarded() const {
+  if ((getInput(1) && !getInput(1)->undefined())) {
+    // output dims are data dependent
+    return false;
+  }
+
+  return OperatorTensor::dimsForwarded();
+}
+
+bool Squeeze_Op::forwardDims(bool allowDataDependency) {
+  // error checking
+  if (!inputsAssociated(false) || getInput(0)->undefined()) {
+    return false;
+  }
+
+  std::shared_ptr<Tensor> fallback;
+  // Input 1 is axes to squeeze (can also be given via attribute)
+  if (getInput(1)) {
+    if (!this->axes().empty()) {
+      Log::notice("{} : ignoring non-empty axes attribute because input#1 "
+                  "takes precedence",
+                  type());
+    }
+
+    if (!allowDataDependency) {
+      Log::warn("{} : unable to forwardDims() because output dims are data "
+                "dependent on input#1",
+                type());
+      return false;
+    }
+
+    this->axes().clear(); // If both are provided input would override attrs
+    this->axes().reserve(getInput(1)->size());
+    const auto &axes =
+        getInput(1)->refCastFrom(fallback, NativeType<int8_t>::type, "cpu");
+    if (axes.nbDims() == 0) {
+      this->axes().clear();
+    } else {
+      AIDGE_ASSERT(
+          axes.nbDims() == 1,
+          "Axes input tensor should be of size 1. Received {} dimensions : {}",
+          axes.nbDims(), axes.dims());
+      std::copy_n(static_cast<int8_t *>(axes.getImpl()->hostPtr()), axes.size(),
+                  std::back_inserter(this->axes()));
+    }
+  }
+
+  std::vector<DimSize_t> input_dims = getInput(0)->dims();
+  std::vector<DimSize_t> output_dims;
+  output_dims.reserve(input_dims.size());
+  std::vector<DimIdx_t> axes_rectified_idx;
+  axes_rectified_idx.reserve(input_dims.size());
+
+  if (this->axes().size() == 0) { // squeeze() => squeeze all 1 sized dimensions
+    Log::debug("this->axes() is empty, all 1 sized dim will be squeezed. If "
+               "this is an error ensure that the values are properly set via "
+               "attribute or data input#1.");
+    std::copy_if(input_dims.begin(), input_dims.end(),
+                 std::back_inserter(output_dims),
+                 [](DimSize_t dim) { return dim != 1; });
+  } else { // squeeze({N,.....}) => squeeze all specified dimensions that are of
+           // size 1.
+    /////// ensure indexes validity and set pythonic negative indexes to their
+    // positive value
+    for (const int8_t &axis : this->axes()) {
+      AIDGE_ASSERT(axis >= static_cast<int8_t>(-input_dims.size()) &&
+                       axis < static_cast<int8_t>(input_dims.size()),
+                   "{} : Axis index OutOfBounds error, expected value "
+                   "within size limits of input tensor : "
+                   "[-{},{}), got {}.",
+                   type(), input_dims.size(), input_dims.size() - 1, axis);
+      auto temp =
+          static_cast<DimIdx_t>(axis >= 0 ? axis : axis + input_dims.size());
+      if (axes_rectified_idx.end() == std::find(axes_rectified_idx.begin(),
+                                                axes_rectified_idx.end(),
+                                                temp)) {
+        axes_rectified_idx.push_back(temp);
+      }
+    }
+
+    // Create output_dims
+    // speeds up binary search
+    std::sort(axes_rectified_idx.begin(), axes_rectified_idx.end());
+    DimSize_t i = 0;
+    std::copy_if(
+        input_dims.begin(), input_dims.end(), std::back_inserter(output_dims),
+        [&axes_rectified_idx, &i, &input_dims](DimSize_t dim) {
+          // if current dim index is found in axes to squeeze
+          // we ensure that this axis is 1 sized, otherwise an error is thrown
+          bool ok = true;
+          if (std::binary_search(axes_rectified_idx.begin(),
+                                 axes_rectified_idx.end(), i)) {
+            AIDGE_ASSERT(dim == 1,
+                         "{} : Tried to squeeze axis nb {} of a tensor of dim "
+                         "{}. Dim to squeeze has to be 1-sized, got size {}."
+                         "Axes to squeeze : {}",
+                         __func__, i, input_dims, input_dims[i],
+                         axes_rectified_idx);
+            ok = false;
+          }
+          i++; // Incrementing counter since there is no enumerate
+               // fctn (until C++23)
+          return ok;
+        });
+  }
+  mOutputs[0]->resize(output_dims);
+  return true;
+}
+
+void Squeeze_Op::setBackend(const std::string &name,
+                            Aidge::DeviceIdx_t device) {
+  if (Registrar<Squeeze_Op>::exists({name})) {
+    SET_IMPL_MACRO(Squeeze_Op, *this, name);
+  } else {
+    mImpl = std::make_shared<Squeeze_OpImpl>(*this);
+  }
+  mOutputs[0]->setBackend(name, device);
+}
+
+void Aidge::Squeeze_OpImpl::forward() {
+  const Squeeze_Op &op_ = static_cast<const Squeeze_Op &>(mOp);
+  // Check if input is provided
+  AIDGE_ASSERT(op_.getInput(0), "Squeeze : missing input 0");
+
+  op_.getOutput(0)->getImpl()->copy(op_.getInput(0)->getImpl()->rawPtr(),
+                                    op_.getInput(0)->size());
+}
+
+} // namespace Aidge
diff --git a/src/operator/Unsqueeze.cpp b/src/operator/Unsqueeze.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e88e0f8ca861f4f7765ae3ca71bf864c20b54461
--- /dev/null
+++ b/src/operator/Unsqueeze.cpp
@@ -0,0 +1,127 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Unsqueeze.hpp"
+
+#include <cstdint>
+#include <fmt/core.h>
+#include <functional>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Log.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+const std::string Unsqueeze_Op::Type = "Unsqueeze";
+
+bool Aidge::Unsqueeze_Op::dimsForwarded() const {
+  if ((getInput(1) && !getInput(1)->undefined())) {
+    // output dims are data dependent
+    return false;
+  }
+
+  return OperatorTensor::dimsForwarded();
+}
+
+bool Unsqueeze_Op::forwardDims(bool allowDataDependency) {
+  // error checking
+  if (!inputsAssociated(true)) {
+    return false;
+  }
+  std::shared_ptr<Tensor> fallback;
+  // Copy optional input #1, if present, to attribute Axes
+  if (getInput(1)) {
+    if (!this->axes().empty()) {
+      Log::notice("{} : ignoring non-empty \"axes\" attribute because input#1 "
+                  "takes precedence",
+                  type());
+    }
+
+    if (!allowDataDependency) {
+      Log::warn("{} : unable to forwardDims() because output dims are data "
+                "dependent on input#1",
+                type());
+      return false;
+    }
+
+    this->axes().clear(); // If both are provided input would override attrs
+    this->axes().reserve(getInput(1)->size());
+    const auto &axes =
+        getInput(1)->refCastFrom(fallback, NativeType<int8_t>::type, "cpu");
+    std::copy_n(static_cast<int8_t *>(axes.getImpl()->hostPtr()),
+                axes.size(), std::back_inserter(this->axes()));
+  }
+  AIDGE_ASSERT(!this->axes().empty(),
+               "{} : Axes to unsqueeze can be defined via input#1 or axes "
+               "attribute. None of them were provided.",
+               type());
+
+  std::vector<DimSize_t> input_dims = getInput(0)->dims();
+  std::vector<DimIdx_t> axes_rectified_idx;
+  axes_rectified_idx.reserve(this->axes().size());
+  DimIdx_t output_nb_dims = input_dims.size() + this->axes().size();
+
+  for (const int8_t &axis : this->axes()) {
+    AIDGE_ASSERT(axis >= static_cast<int8_t>(-output_nb_dims) &&
+                     axis < static_cast<int8_t>(output_nb_dims),
+                 "{} : Axis index OutOfBounds enrror, expected value "
+                 "within size limits of input tensor : "
+                 "[-{},{}), got {}.",
+                 type(), output_nb_dims, output_nb_dims - 1, axis);
+    axes_rectified_idx.push_back(
+        static_cast<DimIdx_t>(axis >= 0 ? axis : axis + output_nb_dims));
+  }
+  // sort by descending order
+  std::sort(axes_rectified_idx.begin(), axes_rectified_idx.end());
+  // Raise error if duplicate indexes are found
+  const auto &it = std::adjacent_find(axes_rectified_idx.begin(), axes_rectified_idx.end());
+  AIDGE_ASSERT(
+      it == axes_rectified_idx.end(),
+      "{} : The index {} appears multiple times in list of input dims. "
+      "Check positive and negative indexes.\nRaw indexes :\t{}\nRectified "
+      "indexes :\t{}",
+      type(), *it, this->axes(), axes_rectified_idx);
+
+  // computation
+  std::vector<DimSize_t> output_dims(input_dims);
+  output_dims.reserve(input_dims.size() + this->axes().size());
+  for (const DimIdx_t &axis : axes_rectified_idx) {
+    output_dims.insert(output_dims.begin() + axis, 1);
+  }
+  mOutputs[0]->resize(output_dims);
+  return true;
+}
+
+void Unsqueeze_Op::setBackend(const std::string &name,
+                              Aidge::DeviceIdx_t device) {
+  if (Registrar<Unsqueeze_Op>::exists({name})) {
+    SET_IMPL_MACRO(Unsqueeze_Op, *this, name);
+  } else {
+    mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
+  }
+  mOutputs[0]->setBackend(name, device);
+}
+
+void Aidge::Unsqueeze_OpImpl::forward() {
+  const Unsqueeze_Op &op_ = static_cast<const Unsqueeze_Op &>(mOp);
+  // Check if input is provided
+  AIDGE_ASSERT(op_.getInput(0), "Unsqueeze : missing input 0");
+  op_.getOutput(0)->getImpl()->copy(op_.getInput(0)->getImpl()->rawPtr(),
+                                    op_.getInput(0)->size());
+}
+
+} // namespace Aidge
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index 9c2109bf6fdfb1a1fbc57afc3fd09e08a1dfc2db..851f1895c3862ed3deedc73f2ee70f6835b4a8a3 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -33,6 +33,7 @@
 #include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Log.hpp"
 #include "aidge/utils/Types.h"
 
 
@@ -665,7 +666,7 @@ Aidge::Elts_t Aidge::Scheduler::getNbAvailableData(const std::shared_ptr<Node>&
         // => This means data was fed manually to the input, without a Producer
         // In this case, we assume a single-use data (unlike a Producer, which
         // keep producing the data each time it is needed).
-        fmt::print("No producer node attached to input#{} for node {} ({})\n", inputIdx, node->name(), node->type());
+        Log::warn("No producer node attached to input#{} for node {} ({})\n", inputIdx, node->name(), node->type());
         return Elts_t::DataElts(std::static_pointer_cast<Tensor>(node->getOperator()->getRawInput(inputIdx))->size());
     }
 
diff --git a/unit_tests/operator/Test_Squeeze_Op.cpp b/unit_tests/operator/Test_Squeeze_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..471a1dcd1e45384b2c65da75ddee9d3ec039dc34
--- /dev/null
+++ b/unit_tests/operator/Test_Squeeze_Op.cpp
@@ -0,0 +1,457 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Squeeze.hpp"
+
+#include <aidge/utils/Types.h>
+#include <algorithm>
+#include <array>
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+#include <chrono>
+#include <cmath>
+#include <cstddef> // std::size_t
+#include <cstdint> // std::uint16_t
+#include <fmt/core.h>
+#include <iostream>
+#include <iterator>
+#include <memory>
+#include <numeric> // std::accumulate
+#include <ostream>
+#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/TensorUtils.hpp"
+
+namespace Aidge {
+TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
+  Log::setConsoleLevel(Log::Notice);
+  constexpr std::uint16_t NB_TRIALS = 10;
+  // Create a random number generator
+  auto random_seed = Catch::Generators::Detail::getSeed;
+  std::mt19937 gen(random_seed());
+
+  // Random float distribution between 0 and 1
+  constexpr int8_t max_nb_dims = 7;
+  std::uniform_real_distribution<float> tensor_value_dist(0.1f, 1.1f);
+  std::uniform_int_distribution<std::size_t> tensor_nb_dims_dist(
+      std::size_t(1), std::size_t(max_nb_dims));
+  std::uniform_int_distribution<std::size_t> tensor_dims_size_dist(
+      std::size_t(1), std::size_t(5));
+  std::uniform_int_distribution<std::size_t> nb_dims_to_squeeze_dist(
+      std::size_t(1), std::size_t(2));
+  std::uniform_int_distribution<short> idx_dims_to_squeeze_dist(-9, 8);
+
+  std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>();
+
+  SECTION("ERROR : Inputs not ready") {
+    SECTION("unconnected input") {
+      std::shared_ptr<Node> squeeze_node = Squeeze();
+      auto op =
+          std::static_pointer_cast<OperatorTensor>(squeeze_node->getOperator());
+      REQUIRE_THROWS(op->forwardDims());
+    }
+
+    SECTION("empty tensor") {
+      // Create the Squeeze Operator
+      std::shared_ptr<Node> squeeze_node = Squeeze(std::vector<int8_t>({0}));
+      auto op =
+          std::static_pointer_cast<OperatorTensor>(squeeze_node->getOperator());
+      op->associateInput(0, input_T);
+
+      CHECK(op->forwardDims() == false);
+    }
+  }
+  SECTION("ERROR : nb_dims_to_squeeze>input.size()") {
+    constexpr size_t nb_dims_to_squeeze = 100;
+
+    std::vector<int8_t> dims_to_squeeze(nb_dims_to_squeeze);
+    std::generate(dims_to_squeeze.begin(), dims_to_squeeze.end(),
+                  [&gen, &idx_dims_to_squeeze_dist]() {
+                    return idx_dims_to_squeeze_dist(gen);
+                  });
+    Log::error("dims_to_sqeeze = {}", dims_to_squeeze);
+
+    std::shared_ptr<Node> squeeze_node = Squeeze(dims_to_squeeze);
+    auto op =
+        std::static_pointer_cast<OperatorTensor>(squeeze_node->getOperator());
+
+    // input tensor
+    const std::size_t nb_dims = tensor_nb_dims_dist(gen);
+    std::vector<std::size_t> dims_in(nb_dims);
+    std::generate(dims_in.begin(), dims_in.end(),
+                  [&tensor_dims_size_dist, &gen]() {
+                    return tensor_dims_size_dist(gen);
+                  });
+
+    // Test
+    input_T->resize(dims_in);
+    op->setInput(0, input_T);
+    REQUIRE_THROWS(op->forwardDims());
+  }
+  SECTION("Compare with reference output") {
+    SECTION("axes is given via attribute") {
+      SECTION("Squeeze a 1-sized-axis") {
+        int8_t nb_dims = 4;
+        std::shared_ptr<Node> squeeze_node = Squeeze(std::vector<int8_t>({0}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            squeeze_node->getOperator());
+        op->associateInput(0, input_T);
+
+        std::vector<DimSize_t> dims_in{1, 2, 3, 4};
+        input_T->resize(dims_in);
+
+        CHECK(op->forwardDims());
+        CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>({2, 3, 4}));
+        CHECK((op->getOutput(0)->dims().size()) == 3);
+      }
+      SECTION("Squeeze multiple 1-sized axes") {
+        // test should be successful
+        std::shared_ptr<Node> squeeze_node =
+            Squeeze(std::vector<int8_t>({1, -4}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            squeeze_node->getOperator());
+        op->associateInput(0, input_T);
+
+        std::vector<DimSize_t> dims_in{1, 1, 13, 200};
+        input_T->resize(dims_in);
+
+        CHECK(op->forwardDims());
+        CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>{13, 200});
+        CHECK((op->getOutput(0)->dims().size()) == 2);
+      }
+      SECTION("Squeeze a non-1-Sized axis") {
+        int8_t nb_dims = 4;
+        std::shared_ptr<Node> squeeze_node = Squeeze(std::vector<int8_t>({3}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            squeeze_node->getOperator());
+        op->associateInput(0, input_T);
+
+        std::vector<DimSize_t> dims_in{1, 2, 3, 4};
+        input_T->resize(dims_in);
+
+        REQUIRE_THROWS(op->forwardDims());
+      }
+      SECTION("Squeeze multiple non-sized-axes") {
+        std::shared_ptr<Node> squeeze_node =
+            Squeeze(std::vector<int8_t>({1, -2}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            squeeze_node->getOperator());
+        op->associateInput(0, input_T);
+
+        std::array<DimSize_t, 3> dims_in{2, 3, 4};
+        input_T->resize(dims_in);
+
+        REQUIRE_THROWS((op->forwardDims()));
+      }
+    }
+    SECTION("axes is given via tensor") {
+      SECTION("tensor is empty") {
+        // arguments here should be overriden by axes_T values
+        std::shared_ptr<Node> myUnsqueeze =
+            Squeeze(std::vector<std::int8_t>({0, 4}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            myUnsqueeze->getOperator());
+        op->associateInput(0, input_T);
+
+        auto axes_T =
+            std::make_shared<Aidge::Tensor>(std::vector<DimSize_t>({}));
+        axes_T->setDataType(Aidge::DataType::Int8);
+        axes_T->setBackend("cpu");
+
+        std::vector<DimSize_t> dims_in{3, 1, 4, 1, 1, 5};
+        input_T->resize(dims_in);
+        op->associateInput(0, input_T);
+        op->associateInput(1, axes_T);
+
+        CHECK(op->forwardDims(true));
+        CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>({3, 4, 5}));
+      }
+      SECTION("tensor not empty") {
+        // arguments here should be overriden by axes_T values
+        std::shared_ptr<Node> myUnsqueeze =
+            Squeeze(std::vector<std::int8_t>({3, 1}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            myUnsqueeze->getOperator());
+        op->associateInput(0, input_T);
+
+        auto axes_T =
+            std::make_shared<Aidge::Tensor>(Aidge::Array1D<int8_t, 2>({0, 3}));
+        axes_T->setDataType(Aidge::DataType::Int8);
+        axes_T->setBackend("cpu");
+
+        std::vector<DimSize_t> dims_in{1, 3, 4, 1, 5};
+        input_T->resize(dims_in);
+        op->associateInput(0, input_T);
+        op->associateInput(1, axes_T);
+
+        CHECK(op->forwardDims(true) == true);
+        CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>({3, 4, 5}));
+      }
+    }
+  }
+  SECTION("Squeeze()") {
+    // Create the Operator
+    std::shared_ptr<Node> squeeze_node = Squeeze();
+    auto op =
+        std::static_pointer_cast<OperatorTensor>(squeeze_node->getOperator());
+    op->associateInput(0, input_T);
+
+    for (uint16_t trial = 0; trial < NB_TRIALS; ++trial) {
+      // input tensor
+      const std::size_t nb_dims = tensor_nb_dims_dist(gen);
+      std::vector<std::size_t> dims_in(nb_dims);
+
+      std::generate(dims_in.begin(), dims_in.end(),
+                    [&gen, &tensor_dims_size_dist]() {
+                      return tensor_dims_size_dist(gen);
+                    });
+
+      // output tensor
+      std::vector<DimSize_t> dims_out;
+      dims_out.reserve(dims_in.size());
+      std::copy_if(dims_in.begin(), dims_in.end(), std::back_inserter(dims_out),
+                   [](DimSize_t dim) { return dim != 1; });
+      // Test
+      input_T->resize(dims_in);
+      op->setInput(0, input_T);
+      CHECK(op->forwardDims() == true);
+      CHECK(op->getOutput(0)->dims() == dims_out);
+
+      int nb_ones = std::count_if(dims_in.begin(), dims_in.end(),
+                                  [](int8_t dim) { return dim == 1; });
+      CHECK((op->getInput(0)->dims().size() -
+             op->getOutput(0)->dims().size()) == nb_ones);
+    }
+  }
+  SECTION("Squeeze({N,...})") {
+    int number_of_operation{0};
+    for (uint16_t trial = 0; trial < NB_TRIALS; ++trial) {
+      // Create the Operator
+      size_t nb_dims_to_squeeze = nb_dims_to_squeeze_dist(gen);
+      std::vector<int8_t> dims_to_squeeze(nb_dims_to_squeeze);
+      std::generate(dims_to_squeeze.begin(), dims_to_squeeze.end(),
+                    [&gen, &idx_dims_to_squeeze_dist]() {
+                      return idx_dims_to_squeeze_dist(gen);
+                    });
+      std::shared_ptr<Node> squeeze_node = Squeeze({dims_to_squeeze});
+      auto op =
+          std::static_pointer_cast<OperatorTensor>(squeeze_node->getOperator());
+      op->associateInput(0, input_T);
+
+      // input tensor
+      const std::size_t nb_dims_tensor = tensor_nb_dims_dist(gen);
+      std::vector<std::size_t> dims_in(nb_dims_tensor);
+      std::generate(dims_in.begin(), dims_in.end(),
+                    [&gen, &tensor_dims_size_dist]() {
+                      return tensor_dims_size_dist(gen);
+                    });
+      input_T->resize(dims_in);
+      op->setInput(0, input_T);
+
+      // rectifying indexes
+      std::transform(dims_to_squeeze.begin(), dims_to_squeeze.end(),
+                     dims_to_squeeze.begin(),
+                     [&nb_dims_tensor](int8_t dim_to_squeeze) {
+                       return dim_to_squeeze < 0
+                                  ? dim_to_squeeze + nb_dims_tensor
+                                  : dim_to_squeeze;
+                     });
+      std::sort(dims_to_squeeze.begin(), dims_to_squeeze.end());
+      auto it = std::unique(dims_to_squeeze.begin(), dims_to_squeeze.end());
+      dims_to_squeeze.erase(it, dims_to_squeeze.end());
+
+      // ensuring arguments given to Squeeze are good
+      bool not_in_bounds = false;
+      bool dim_to_squeeze_not_1_sized = false;
+      for (const auto dim_to_squeeze : dims_to_squeeze) {
+        not_in_bounds = dim_to_squeeze >= nb_dims_tensor;
+        if (not_in_bounds) {
+          break;
+        }
+        dim_to_squeeze_not_1_sized = dims_in.at(dim_to_squeeze) != 1;
+        if (dim_to_squeeze_not_1_sized) {
+          break;
+        }
+      }
+
+      if (nb_dims_tensor > max_nb_dims || not_in_bounds ||
+          dim_to_squeeze_not_1_sized) {
+        REQUIRE_THROWS(op->forwardDims());
+      } else {
+        // output tensor
+        int i = 0;
+        std::vector<DimSize_t> dims_out;
+        dims_out.reserve(dims_in.size());
+        std::copy_if(dims_in.begin(), dims_in.end(),
+                     std::back_inserter(dims_out),
+                     [&dims_to_squeeze, &i](DimSize_t dim) {
+                       bool ok = dim != 1 ||
+                                 !std::binary_search(dims_to_squeeze.begin(),
+                                                     dims_to_squeeze.end(), i);
+                       i++; // incrementing counter since C++ has not enumerate
+                            // fctn (until C++23)
+                       return ok;
+                     });
+        CHECK(op->forwardDims() == true);
+        CHECK(op->getOutput(0)->dims() == dims_out);
+      }
+    }
+  }
+}
+
+TEST_CASE("[core/operator] Squeeze(forward)", "[Squeeze][forward]") {
+  Log::setConsoleLevel(Log::Notice);
+  constexpr std::uint16_t NB_TRIALS = 10;
+  // Create a random number generator
+  auto random_seed = Catch::Generators::Detail::getSeed;
+  std::mt19937 gen(random_seed());
+
+  constexpr int8_t max_nb_dims = 7;
+  std::uniform_real_distribution<float> tensor_value_dist(0.1f, 1.1f);
+  std::uniform_int_distribution<std::size_t> tensor_nb_dims_dist(
+      std::size_t(1), std::size_t(max_nb_dims));
+  std::uniform_int_distribution<std::size_t> tensor_dims_size_dist(
+      std::size_t(1), std::size_t(5));
+  std::uniform_int_distribution<std::size_t> nb_dims_to_squeeze_dist(
+      std::size_t(1), std::size_t(2));
+  std::uniform_int_distribution<short> idx_dims_to_squeeze_dist(-9, 8);
+
+  std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>();
+
+  // BENCHMARKING
+  std::chrono::time_point<std::chrono::system_clock> start;
+  std::chrono::time_point<std::chrono::system_clock> end;
+  std::chrono::duration<double, std::micro> duration{};
+
+  Log::setConsoleLevel(Log::Notice);
+  int number_of_operation{0};
+  for (uint16_t trial = 0; trial < NB_TRIALS; ++trial) {
+    // Create the Operator
+    size_t nb_dims_to_squeeze = nb_dims_to_squeeze_dist(gen);
+    std::vector<int8_t> dims_to_squeeze(nb_dims_to_squeeze);
+    std::generate(dims_to_squeeze.begin(), dims_to_squeeze.end(),
+                  [&gen, &idx_dims_to_squeeze_dist]() {
+                    return idx_dims_to_squeeze_dist(gen);
+                  });
+    std::shared_ptr<Node> squeeze_node = Squeeze({dims_to_squeeze});
+    auto op =
+        std::static_pointer_cast<OperatorTensor>(squeeze_node->getOperator());
+    op->setDataType(DataType::Float32);
+    op->setBackend("cpu");
+
+    // input tensor
+    const std::size_t nb_dims_tensor = tensor_nb_dims_dist(gen);
+    std::vector<std::size_t> dims_in(nb_dims_tensor);
+    std::generate(dims_in.begin(), dims_in.end(),
+                  [&gen, &tensor_dims_size_dist]() {
+                    return tensor_dims_size_dist(gen);
+                  });
+    input_T->resize(dims_in);
+    op->setInput(0, input_T);
+
+    // rectifying indexes
+    std::transform(dims_to_squeeze.begin(), dims_to_squeeze.end(),
+                   dims_to_squeeze.begin(),
+                   [&nb_dims_tensor](int8_t dim_to_squeeze) {
+                     return dim_to_squeeze < 0 ? dim_to_squeeze + nb_dims_tensor
+                                               : dim_to_squeeze;
+                   });
+
+    // ensuring arguments given to Squeeze are good
+    bool not_in_bounds = false;
+    bool dim_to_squeeze_not_1_sized = false;
+    for (const auto dim_to_squeeze : dims_to_squeeze) {
+      not_in_bounds = dim_to_squeeze >= nb_dims_tensor;
+      if (not_in_bounds) {
+        break;
+      }
+      dim_to_squeeze_not_1_sized = dims_in.at(dim_to_squeeze) != 1;
+      if (dim_to_squeeze_not_1_sized) {
+        break;
+      }
+    }
+    if (nb_dims_tensor > max_nb_dims || not_in_bounds ||
+        dim_to_squeeze_not_1_sized) {
+      REQUIRE_THROWS(op->forwardDims());
+    } else {
+      // output tensor
+      int i = 0;
+      std::vector<DimSize_t> dims_out;
+      dims_out.reserve(dims_in.size());
+      for (DimIdx_t i = 0; i < dims_in.size(); ++i) {
+        if (dims_in[i] == 1 &&
+            std::find(dims_to_squeeze.begin(), dims_to_squeeze.end(), i) !=
+                dims_to_squeeze.end()) {
+          continue;
+        }
+        dims_out.push_back(dims_in[i]);
+      }
+      CHECK(op->forwardDims());
+      CHECK(op->getOutput(0)->dims() == dims_out);
+
+      SECTION("forward") {
+        // Create the input Tensor
+        std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>();
+        input_T->setDataType(DataType::Float32);
+        input_T->setBackend("cpu");
+        op->associateInput(0, input_T);
+
+        // Create results Tensor
+        std::shared_ptr<Tensor> result_T = std::make_shared<Tensor>();
+        result_T->setDataType(DataType::Float32);
+        result_T->setBackend("cpu");
+
+        const std::size_t nb_elems =
+            std::accumulate(dims_in.cbegin(), dims_in.cend(), std::size_t(1),
+                            std::multiplies<std::size_t>());
+        float *array_in = new float[nb_elems];
+        for (std::size_t i = 0; i < nb_elems; ++i) {
+          float val = tensor_value_dist(gen);
+          array_in[i] = val;
+        }
+        number_of_operation += nb_elems; // Copying all values : 1
+                                         // assignation / item in the tensor
+        // input0
+        input_T->resize(dims_in);
+        input_T->getImpl()->setRawPtr(array_in, nb_elems);
+
+        result_T->resize(dims_out);
+        result_T->getImpl()->setRawPtr(array_in, nb_elems);
+
+        CHECK(op->forwardDims() == true);
+        start = std::chrono::system_clock::now();
+        REQUIRE_NOTHROW(squeeze_node->forward());
+        end = std::chrono::system_clock::now();
+        duration +=
+            std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+        CHECK(approxEq<float>(*result_T, *(op->getOutput(0))));
+        CHECK(result_T->nbDims() == op->getOutput(0)->nbDims());
+        for (DimSize_t i = 0; i < op->getOutput(0)->nbDims(); ++i) {
+          CHECK(result_T->dims().at(i) == op->getOutput(0)->dims().at(i));
+        }
+        CHECK(approxEq<float>(*result_T, *(op->getOutput(0))));
+
+        delete[] array_in;
+      }
+      std::cout << "Squeeze total execution time : " << duration.count() << "µs"
+                << std::endl;
+      std::cout << "Number of operations : " << number_of_operation
+                << std::endl;
+      std::cout << "Operation / µs = " << number_of_operation / duration.count()
+                << std::endl;
+    }
+  }
+}
+
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_Unsqueeze_Op.cpp b/unit_tests/operator/Test_Unsqueeze_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..79f5b89b1c08f409b214a9439431c2d2a51ddbd2
--- /dev/null
+++ b/unit_tests/operator/Test_Unsqueeze_Op.cpp
@@ -0,0 +1,382 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <algorithm>
+#include <chrono>
+#include <cmath>
+#include <cstddef> // std::size_t
+#include <cstdint> // std::uint16_t
+#include <fmt/core.h>
+#include <iostream>
+#include <memory>
+#include <numeric> // std::accumulate
+#include <ostream>
+#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution
+#include <vector>
+
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Unsqueeze.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/TensorUtils.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+bool ensure_axes_validity(std::vector<int8_t> dims_to_unsqueeze,
+                          DimIdx_t nb_dims_input_tensor) {
+
+  bool in_bounds =
+      std::all_of(dims_to_unsqueeze.begin(), dims_to_unsqueeze.end(),
+                  [&nb_dims_input_tensor,
+                   &dims_to_unsqueeze](const int8_t &dim_to_unsqueeze) {
+                    return (dim_to_unsqueeze <
+                            nb_dims_input_tensor + dims_to_unsqueeze.size());
+                  });
+
+  std::sort(dims_to_unsqueeze.begin(), dims_to_unsqueeze.end());
+  bool index_appear_twice =
+      dims_to_unsqueeze.end() !=
+      std::adjacent_find(dims_to_unsqueeze.begin(), dims_to_unsqueeze.end());
+
+  return in_bounds && !index_appear_twice;
+}
+
+std::vector<DimSize_t>
+generate_unsqueeze_output_dims(std::vector<size_t> dims_in,
+                               std::vector<int8_t> dims_to_unsqueeze) {
+
+  std::sort(dims_to_unsqueeze.begin(), dims_to_unsqueeze.end());
+  std::vector<DimSize_t> dims_out(dims_in);
+  dims_out.reserve(dims_in.size() + dims_to_unsqueeze.size());
+  for (const DimIdx_t &dim : dims_to_unsqueeze) {
+    dims_out.insert(dims_out.begin() + dim, 1);
+  }
+  return dims_out;
+}
+
+std::vector<int8_t> rectify_indexes(const std::vector<int8_t> & dims_to_unsqueeze,
+                                    const int8_t offset) {
+  std::vector<int8_t> output;
+  output.reserve(dims_to_unsqueeze.size());
+  for (int8_t dim : dims_to_unsqueeze) {
+    output.push_back(dim >= 0 ? dim : dim + offset);
+  }
+  return output;
+}
+
+TEST_CASE("[core/operator] Unsqueeze(forwardDims)",
+          "[Unsqueeze][forwardDims]") {
+  constexpr std::uint16_t NB_TRIALS = 10;
+  // Create a random number generator
+  auto random_seed = Catch::Generators::Detail::getSeed;
+  std::mt19937 gen(random_seed());
+
+  std::uniform_real_distribution<float> valueDist(0.1f, 1.1f);
+  std::uniform_int_distribution<std::size_t> tensor_dims_size_dist(
+      std::size_t(1), std::size_t(10));
+  std::uniform_int_distribution<std::size_t> tensor_nb_dims_dist(
+      std::size_t(1), std::size_t(7));
+  std::uniform_int_distribution<std::size_t> nb_dims_to_unsqueeze_dist(
+      std::size_t(1), std::size_t(8));
+
+  std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>();
+  std::shared_ptr<Tensor> axes_T = std::make_shared<Tensor>();
+
+  SECTION("ERROR : Inputs not ready") {
+    SECTION("unconnected input") {
+      std::shared_ptr<Node> myUnsqueeze =
+          Unsqueeze(std::vector<std::int8_t>({0}));
+      auto op =
+          std::static_pointer_cast<OperatorTensor>(myUnsqueeze->getOperator());
+      REQUIRE_THROWS(op->forwardDims());
+    }
+
+    std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>();
+
+    SECTION("empty tensor") {
+      // Create the Unsqueeze Operator
+      std::shared_ptr<Node> myUnsqueeze =
+          Unsqueeze(std::vector<std::int8_t>({0}));
+      auto op =
+          std::static_pointer_cast<OperatorTensor>(myUnsqueeze->getOperator());
+      op->associateInput(0, input_T);
+
+      CHECK(op->forwardDims() == false);
+    }
+  }
+  SECTION("Compare with reference output") {
+    int8_t nb_dims = 3;
+    SECTION("axes is given via attribute") {
+      SECTION("unsqueez(0)") {
+        std::shared_ptr<Node> myUnsqueeze =
+            Unsqueeze(std::vector<std::int8_t>({0}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            myUnsqueeze->getOperator());
+        op->associateInput(0, input_T);
+
+        std::vector<DimSize_t> dims_in{2, 3, 4};
+        input_T->resize(dims_in);
+
+        CHECK(op->forwardDims() == true);
+        CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>({1, 2, 3, 4}));
+        CHECK((op->getOutput(0)->dims().size()) == nb_dims + 1);
+      }
+      SECTION("Unsqueeze(1)") {
+        std::shared_ptr<Node> myUnsqueeze =
+            Unsqueeze(std::vector<std::int8_t>({1}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            myUnsqueeze->getOperator());
+        op->associateInput(0, input_T);
+
+        std::array<DimSize_t, 3> dims_in{2, 3, 4};
+        input_T->resize(dims_in);
+
+        CHECK(op->forwardDims() == true);
+        CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>({2, 1, 3, 4}));
+        CHECK((op->getOutput(0)->dims().size()) == nb_dims + 1);
+      }
+      SECTION("Unsqueeze(2)") {
+        std::shared_ptr<Node> myUnsqueeze =
+            Unsqueeze(std::vector<std::int8_t>({2}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            myUnsqueeze->getOperator());
+        op->associateInput(0, input_T);
+
+        std::vector<DimSize_t> dims_in{2, 3, 4};
+        input_T->resize(dims_in);
+
+        CHECK(op->forwardDims() == true);
+        CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>({2, 3, 1, 4}));
+        CHECK((op->getOutput(0)->dims().size()) == nb_dims + 1);
+      }
+      SECTION("Unsqueeze({0,4})") {
+        std::shared_ptr<Node> myUnsqueeze =
+            Unsqueeze(std::vector<std::int8_t>({0, 4}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            myUnsqueeze->getOperator());
+        op->associateInput(0, input_T);
+
+        std::vector<DimSize_t> dims_in{3, 4, 5};
+        input_T->resize(dims_in);
+
+        CHECK(op->forwardDims() == true);
+        CHECK(op->getOutput(0)->dims() ==
+              std::vector<DimSize_t>({1, 3, 4, 5, 1}));
+      }
+    }
+    SECTION("axes is given via tensor") {
+        // arguments here should be overriden by axes_T values
+        std::shared_ptr<Node> myUnsqueeze =
+            Unsqueeze(std::vector<std::int8_t>({0, 4}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            myUnsqueeze->getOperator());
+        op->associateInput(0, input_T);
+
+        auto axes_T = std::make_shared<Aidge::Tensor>(
+            Aidge::Array1D<int8_t, 3>({1, 3, 4}));
+        axes_T->setDataType(Aidge::DataType::Int8);
+        axes_T->setBackend("cpu");
+
+        std::vector<DimSize_t> dims_in{3, 4, 5};
+        input_T->resize(dims_in);
+        op->associateInput(0, input_T);
+        op->associateInput(1, axes_T);
+
+        CHECK(op->forwardDims(true) == true);
+        CHECK(op->getOutput(0)->dims() ==
+              std::vector<DimSize_t>({3, 1, 4, 1, 1, 5}));
+    }
+  }
+  SECTION("Random testing") {
+    SECTION("Unsqueeze({N,...})") {
+      int number_of_operation{0};
+      for (uint16_t trial = 0; trial < NB_TRIALS; ++trial) {
+        const size_t nb_dims_to_unsqueeze = nb_dims_to_unsqueeze_dist(gen);
+        const size_t nb_dims_tensor = tensor_nb_dims_dist(gen);
+        const size_t idx_dims_to_unsqueeze_max =
+            nb_dims_to_unsqueeze + nb_dims_tensor;
+        const size_t variance_error = 2;
+        std::uniform_int_distribution<short> idx_dims_to_unsqueeze_dist(
+            -idx_dims_to_unsqueeze_max - variance_error,
+            idx_dims_to_unsqueeze_max - 1 + variance_error);
+        // Create the Operator
+        std::vector<int8_t> dims_to_unsqueeze(nb_dims_to_unsqueeze);
+        std::generate(dims_to_unsqueeze.begin(), dims_to_unsqueeze.end(),
+                      [&gen, &idx_dims_to_unsqueeze_dist]() {
+                        return idx_dims_to_unsqueeze_dist(gen);
+                      });
+        std::shared_ptr<Node> unsqueeze_node = Unsqueeze(dims_to_unsqueeze);
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            unsqueeze_node->getOperator());
+        op->associateInput(0, input_T);
+
+        // input tensor
+        std::vector<std::size_t> dims_in(nb_dims_tensor);
+        std::generate(dims_in.begin(), dims_in.end(),
+                      [&gen, &tensor_dims_size_dist]() {
+                        return tensor_dims_size_dist(gen);
+                      });
+        input_T->resize(dims_in);
+        op->setInput(0, input_T);
+
+        dims_to_unsqueeze = rectify_indexes(
+            dims_to_unsqueeze, input_T->nbDims() + dims_to_unsqueeze.size());
+        bool dims_to_unsqueeze_valid =
+            ensure_axes_validity(dims_to_unsqueeze, input_T->nbDims());
+        Log::warn("raw dims_to_unsqueeze : {}", dims_to_unsqueeze);
+        Log::warn("dims_to_unsqueeze : {}", dims_to_unsqueeze);
+        Log::warn("tensor dims : {}", input_T->dims());
+
+        if (!dims_to_unsqueeze_valid) {
+          ensure_axes_validity(dims_to_unsqueeze, input_T->nbDims());
+          REQUIRE_THROWS(op->forwardDims(true));
+        } else {
+          // output tensor
+          std::vector<DimSize_t> dims_out =
+              generate_unsqueeze_output_dims(dims_in, dims_to_unsqueeze);
+          Log::warn("dims_out : {}", dims_out);
+          CHECK(op->forwardDims(true) == true);
+          CHECK(op->getOutput(0)->dims() == dims_out);
+          generate_unsqueeze_output_dims(dims_in, dims_to_unsqueeze);
+        }
+      }
+    }
+  }
+}
+
+TEST_CASE("[core/operator] Unsqueeze(forward)", "[Unsqueeze][forward]") {
+  constexpr std::uint16_t NB_TRIALS = 10;
+  // Create a random number generator
+  std::random_device rd;
+  auto random_seed = rd();
+  std::cout << "True random seed : " << random_seed << std::endl;
+  std::mt19937 gen(random_seed);
+  // Random float distribution between 0 and 1
+  std::uniform_real_distribution<float> valueDist(0.1f, 1.1f);
+  std::uniform_int_distribution<std::size_t> tensor_dims_size_dist(
+      std::size_t(1), std::size_t(10));
+  std::size_t min_tensor_nb_dims{1};
+  std::size_t max_tensor_nb_dims{7};
+  std::uniform_int_distribution<std::size_t> tensor_nb_dims_dist(
+      min_tensor_nb_dims, max_tensor_nb_dims);
+  std::uniform_int_distribution<std::size_t> nb_dims_to_unsqueeze_dist(
+      std::size_t(1), std::size_t(8));
+  std::uniform_int_distribution<short> idx_dims_to_unsqueeze_dist(-9, 8);
+
+  std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>();
+  input_T->setDataType(DataType::Float32);
+  input_T->setBackend("cpu");
+  std::shared_ptr<Tensor> result_T = std::make_shared<Tensor>();
+  result_T->setDataType(DataType::Float32);
+  result_T->setBackend("cpu");
+
+  // BENCHMARKING
+  std::chrono::time_point<std::chrono::system_clock> start;
+  std::chrono::time_point<std::chrono::system_clock> end;
+  std::chrono::duration<double, std::micro> duration{};
+
+  int number_of_operation{0};
+  for (uint16_t trial = 0; trial < NB_TRIALS; ++trial) {
+    // Create the Operator
+    size_t nb_dims_to_unsqueeze = nb_dims_to_unsqueeze_dist(gen);
+    std::vector<int8_t> dims_to_unsqueeze(nb_dims_to_unsqueeze);
+    std::generate(dims_to_unsqueeze.begin(), dims_to_unsqueeze.end(),
+                  [&gen, &idx_dims_to_unsqueeze_dist]() {
+                    return idx_dims_to_unsqueeze_dist(gen);
+                  });
+    std::shared_ptr<Node> unsqueeze_node = Unsqueeze(dims_to_unsqueeze);
+    auto op =
+        std::static_pointer_cast<OperatorTensor>(unsqueeze_node->getOperator());
+    op->setDataType(DataType::Float32);
+    op->setBackend("cpu");
+    op->associateInput(0, input_T);
+
+    // input tensor
+    const std::size_t nb_dims_tensor = tensor_nb_dims_dist(gen);
+    std::vector<std::size_t> dims_in(nb_dims_tensor);
+    std::generate(dims_in.begin(), dims_in.end(),
+                  [&gen, &tensor_dims_size_dist]() {
+                    return tensor_dims_size_dist(gen);
+                  });
+    input_T->resize(dims_in);
+    op->setInput(0, input_T);
+
+    // rectifying indexes
+    std::transform(
+        dims_to_unsqueeze.begin(), dims_to_unsqueeze.end(),
+        dims_to_unsqueeze.begin(),
+        [&nb_dims_tensor, &nb_dims_to_unsqueeze](int8_t dim_to_unsqueeze) {
+          return dim_to_unsqueeze < 0
+                     ? dim_to_unsqueeze +
+                           (nb_dims_tensor + nb_dims_to_unsqueeze)
+                     : dim_to_unsqueeze;
+        });
+
+    // ensuring arguments given to Unsqueeze are good
+    bool axes_to_unsqueeze_valid =
+        ensure_axes_validity(dims_to_unsqueeze, input_T->nbDims());
+    if (!axes_to_unsqueeze_valid) {
+      REQUIRE_THROWS(op->forwardDims(true));
+    } else {
+      // output tensor
+      std::vector<DimSize_t> dims_out =
+          generate_unsqueeze_output_dims(dims_in, dims_to_unsqueeze);
+      CHECK(op->forwardDims(true) == true);
+      CHECK(op->getOutput(0)->dims() == dims_out);
+
+      SECTION("forward") {
+        const std::size_t nb_elems =
+            std::accumulate(dims_in.cbegin(), dims_in.cend(), std::size_t(1),
+                            std::multiplies<std::size_t>());
+        float *array_in = new float[nb_elems];
+        for (std::size_t i = 0; i < nb_elems; ++i) {
+          array_in[i] = valueDist(gen);
+        }
+        number_of_operation += nb_elems; // Copying all values : 1
+                                         // assignation / item in the tensor
+
+        // input0
+        input_T->resize(dims_in);
+        input_T->getImpl()->setRawPtr(array_in, nb_elems);
+
+        // results
+        result_T->resize(dims_out);
+        result_T->getImpl()->setRawPtr(array_in, nb_elems);
+
+        CHECK(op->forwardDims(true) == true);
+        start = std::chrono::system_clock::now();
+        REQUIRE_NOTHROW(unsqueeze_node->forward());
+        end = std::chrono::system_clock::now();
+        duration +=
+            std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+        CHECK(result_T->nbDims() == op->getOutput(0)->nbDims());
+        for (DimSize_t i = 0; i < op->getOutput(0)->nbDims(); ++i) {
+          CHECK(result_T->dims().at(i) == op->getOutput(0)->dims().at(i));
+        }
+        CHECK(approxEq<float>(*result_T, *(op->getOutput(0))));
+
+        delete[] array_in;
+      }
+    }
+    std::cout << "Unsqueeze total execution time : " << duration.count() << "µs"
+              << std::endl;
+    std::cout << "Number of operations : " << number_of_operation << std::endl;
+    std::cout << "Operation / µs = " << number_of_operation / duration.count()
+              << std::endl;
+  }
+}
+
+} // namespace Aidge