diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py
new file mode 100644
index 0000000000000000000000000000000000000000..6726be68ac92a388c4e658d7815de90287991b2f
--- /dev/null
+++ b/benchmark/benchmark.py
@@ -0,0 +1,326 @@
+"""
+Operator Kernel Performance Benchmarking Script
+
+This script benchmarks operator kernels using a specified inference module.
+It supports timing measurements and (optionally) output comparisons with ONNXRuntime.
+The configuration is provided via a JSON file.
+"""
+
+import argparse
+import copy
+import importlib
+import json
+import os
+import sys
+
+import numpy as np
+import onnx
+
+import aidge_core as ai
+import aidge_onnx
+from aidge_onnx.generate_singleop_onnx import create_onnx_model
+
+# Benchmark parameters
+NB_WARMUPS: int = 10
+NB_ITERATIONS: int = 50
+
+
+def load_inference_module(module_name: str):
+    """
+    Dynamically imports and returns the inference module.
+    Exits if the module is not installed.
+    """
+    try:
+        module = importlib.import_module(module_name)
+        print(f"'{module_name}' module successfully imported")
+        return module
+    except ImportError:
+        print(
+            f"Error: {module_name} is not installed. Install it using 'pip install {module_name}'."
+        )
+        sys.exit(1)
+
+
+def load_config(config_file: str) -> dict:
+    """
+    Loads and returns the JSON configuration from the given file.
+    """
+    with open(config_file, "r") as f:
+        return json.load(f)
+
+
+def update_test_config(
+    param: str,
+    value,
+    base_attributes: dict,
+    base_input_shapes: list,
+    other_parameters: dict,
+    operator_attributes: list,
+):
+    """
+    Updates the operator attributes and input shapes based on the test parameter.
+
+    Returns:
+        tuple: (updated_attributes, updated_input_shapes) or (None, None) if keys are missing.
+    """
+    attributes = copy.deepcopy(base_attributes)
+    input_shapes = copy.deepcopy(base_input_shapes)
+
+    # Update if the parameter is a valid operator attribute
+    if param in operator_attributes:
+        attributes[param] = value
+
+    try:
+        extra_attrs = other_parameters[param][str(value)]["attributes"]
+    except KeyError:
+        print(
+            f"'{param}': '{value}': 'attributes' - Key not found in other_parameters. Config file may be ill-formed."
+        )
+        return None, None
+    attributes.update(extra_attrs)
+
+    try:
+        extra_input_shapes = other_parameters[param][str(value)]["input_shapes"]
+    except KeyError:
+        print(
+            f"'{param}': '{value}': 'input_shapes' - Key not found in other_parameters. Config file may be ill-formed."
+        )
+        return None, None
+
+    for shape_update in extra_input_shapes:
+        name, new_shape = shape_update
+        for base_shape in input_shapes:
+            if base_shape[0] == name:
+                base_shape[1] = new_shape
+                break
+
+    return attributes, input_shapes
+
+
+# def get_results_file_path(module_name: str, operator_aidge: str, save_directory: str) -> str:
+#     """
+#     Constructs and returns the file path for saving the benchmark results.
+#     """
+#     if module_name == "onnxruntime":
+#         filename = f"{operator_aidge}_onnxruntime.json"
+#     elif module_name == "pytorch":
+#         filename = f"{operator_aidge}_pytorch.json"
+#     else:
+#         filename = f"{operator_aidge}.json"
+#     return os.path.join(save_directory, filename)
+
+
+def measure_inference_time(
+    module_name: str, model: onnx.ModelProto, input_data, inference_module=None
+) -> list[float]:
+    """
+    Measures inference time using the appropriate benchmark function.
+    """
+    if module_name == "onnxruntime":
+        import benchmark_onnxruntime
+
+        return benchmark_onnxruntime.measure_inference_time(
+            model, {v[0]: v[1] for v in input_data}, NB_WARMUPS, NB_ITERATIONS
+        )
+    elif module_name == "torch":
+        import benchmark_torch
+
+        return benchmark_torch.measure_inference_time(
+            model, {v[0]: v[1] for v in input_data}, NB_WARMUPS, NB_ITERATIONS
+        )
+    else:
+        model = aidge_onnx.load(model=model) if "aidge" in module_name else model
+        return inference_module.benchmark.measure_inference_time(
+            model, input_data, NB_WARMUPS, NB_ITERATIONS
+        )
+
+
+def compute_output(
+    module_name: str, model: onnx.ModelProto, input_data, inference_module
+) -> list[np.ndarray]:
+    """
+    Measures inference time using the appropriate benchmark function.
+    """
+    if module_name == "onnxruntime":
+        import benchmark_onnxruntime
+
+        return benchmark_onnxruntime.compute_output(
+            model, {v[0]: v[1] for v in input_data}
+        )
+    elif module_name == "torch":
+        import benchmark_torch
+
+        return benchmark_torch.compute_output(model, {v[0]: v[1] for v in input_data})
+    else:
+        model = aidge_onnx.load(model=model) if "aidge" in module_name else model
+        return inference_module.benchmark.compute_output(model, input_data)
+
+
+def prepare_input_data(
+    input_shapes: list[str, list[int]], initializer_rank: int
+) -> list[str, np.ndarray]:
+    """
+    Generates random input data for the first `initializer_rank` inputs.
+    """
+    data: list[str, np.ndarray] = []
+    for i, conf in enumerate(input_shapes):
+        name, shape = conf
+        if i < initializer_rank:
+            random_array = np.array(np.random.rand(*shape)).astype(np.float32)
+            data.append((name, random_array))
+    return data
+
+
+def main():
+    parser = argparse.ArgumentParser(
+        description="Operator Kernel Performance Benchmarking"
+    )
+    parser.add_argument(
+        "--config-file",
+        "-cf",
+        type=str,
+        required=True,
+        help="Path to configuration JSON with operator type, attributes, and input sizes.",
+    )
+    parser.add_argument(
+        "--module-to-bench",
+        "-mtb",
+        type=str,
+        required=True,
+        help="Name of the module containing the inference functions",
+    )
+    parser.add_argument(
+        "--compare-with-onnxruntime",
+        "-cwo",
+        action="store_true",
+        help="Compare output with ONNXRuntime",
+    )
+    parser.add_argument(
+        "--time", "-t", action="store_true", help="Compute inference time"
+    )
+    parser.add_argument(
+        "--save-directory",
+        type=str,
+        required=True,
+        help="Directory to save the results",
+    )
+    args = parser.parse_args()
+
+    compare_mode = args.compare_with_onnxruntime
+    time_mode = args.time
+    module_name = args.module_to_bench
+    save_directory = args.save_directory
+
+    # Load the inference module
+    inference_module = load_inference_module(module_name)
+
+    # Configure aidge logging
+    ai.Log.set_console_level(ai.Level.Error)
+    ai.Log.set_precision(10)
+
+    # Load configuration
+    config = load_config(args.config_file)
+    operator_name: str = config["operator"]
+    opset_version: int = config["opset_version"]
+    initializer_rank: int = config.get("initializer_rank", 1)
+
+    base_input_shapes: list[str, list[int]] = config["base_configuration"][
+        "input_shapes"
+    ]
+    base_attributes: dict = config["base_configuration"].get("attributes", {})
+
+    main_parameters: dict[str, Any] = config["test_configuration"].get(
+        "main_parameters", {}
+    )
+    other_parameters: dict[str, dict] = config["test_configuration"].get(
+        "other_parameters", {}
+    )
+
+    # Get operator attributes from the schema for filtering test parameters
+    operator_schema = onnx.defs.get_schema(operator_name, opset_version)
+    operator_attributes: list[str] = list(operator_schema.attributes)
+
+    # Create a base ONNX model to determine the operator type for naming the results file
+    base_model: onnx.ModelProto = create_onnx_model(
+        operator_name,
+        opset_version,
+        base_input_shapes,
+        initializer_rank,
+        **base_attributes,
+    )
+    operator_aidge: str = list(aidge_onnx.load(model=base_model).get_ordered_outputs())[0][0].type()
+
+    # Initialize or load existing benchmark results
+    results = {"library": "", "compare": {}, "time": {}}
+    filename: str = f"{operator_aidge.lower()}_{module_name}.json"
+    results_file_path = os.path.join(save_directory, filename)
+    # results_file_path = get_results_file_path(module_name, operator_aidge, save_directory)
+    if os.path.exists(results_file_path):
+        with open(results_file_path, "r") as f:
+            results = json.load(f)
+    results["library"] = module_name
+
+    # Loop over each test parameter and its values
+    print("Starting tests...")
+    for param, test_values in main_parameters.items():
+        if time_mode:
+            results["time"].setdefault(param, {})
+        if compare_mode:
+            results["compare"].setdefault(param, {})
+
+        for value in test_values:
+            print(f"â–· {param} -- {value}")
+            updated_attrs, updated_input_shapes = update_test_config(
+                param,
+                value,
+                base_attributes,
+                base_input_shapes,
+                other_parameters,
+                operator_attributes,
+            )
+            if updated_attrs is None or updated_input_shapes is None:
+                continue  # Skip this test case if configuration is missing
+
+            # Create the updated ONNX model
+            model = create_onnx_model(
+                operator_name,
+                opset_version,
+                updated_input_shapes,
+                initializer_rank,
+                **updated_attrs,
+            )
+
+            input_data = prepare_input_data(updated_input_shapes, initializer_rank)
+
+            if time_mode:
+                print(f" {'├' if compare_mode else '└'}┬─Measuring kernel inference time...")
+                timing = measure_inference_time(module_name, model, input_data, inference_module)
+                results["time"][param][str(value)] = timing
+                print(
+                f" {'│' if compare_mode else ' '}└─[ time = {np.array(timing).mean():.2e} ± {np.array(timing).std():.2e} seconds ]"
+                )
+
+            if compare_mode:
+                print(f" └┬─Assessing results are the same as 'onnxruntime' module...")
+                ref = compute_output("onnxruntime", model, input_data, None)
+                tested = compute_output(module_name, model, input_data, inference_module)
+
+                if len(ref) > 1:
+                    print("Multi-output comparison not handled yet")
+                    sys.exit(1)
+                results["compare"][param][str(value)] = bool(
+                    np.all(np.isclose(ref, tested))
+                )
+                print(
+                    f"  └─[ {'o' if results['compare'][param][str(value)] else 'x'} ]"
+                )
+            print()
+
+    # Save results
+    print(f"Printing results to JSON '{results_file_path}'")
+    with open(results_file_path, "w") as outfile:
+        json.dump(results, outfile, indent=4)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/benchmark/benchmark_onnxruntime.py b/benchmark/benchmark_onnxruntime.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb7336bcfdaa604d36e61d9404907ae3ee48708c
--- /dev/null
+++ b/benchmark/benchmark_onnxruntime.py
@@ -0,0 +1,39 @@
+import numpy as np
+import onnxruntime as ort
+from onnx import ModelProto
+import time
+
+def measure_inference_time(model: ModelProto, input_data: dict[str, np.ndarray], nb_warmup: int = 10, nb_iterations: int = 50) -> list[float]:
+    """
+    Run the provided ONNX model using ONNXRuntime.
+    Performs 10 warm-up runs followed by 50 timed runs (using CPU process time).
+
+    Args:
+        model: The ONNX model (ModelProto).
+        input_data: Dictionary mapping all input names to NumPy arrays.
+
+    Returns:
+        List of CPU times (in seconds) for the 50 timed runs.
+    """
+    sess_opt = ort.SessionOptions()
+    sess_opt.intra_op_num_threads = 1
+    sess = ort.InferenceSession(model.SerializeToString(), sess_opt)
+
+    timings = []
+    # Warm-up runs.
+    for i in range(nb_warmup + nb_iterations):
+        if i < nb_warmup:
+            sess.run(None, input_data)
+        else:
+            start = time.process_time()
+            sess.run(None, input_data)
+            end = time.process_time()
+            timings.append((end - start))
+    return timings
+
+def compute_output(model: ModelProto, input_data: dict[str, np.ndarray]) -> list[np.ndarray]:
+    sess = ort.InferenceSession(model.SerializeToString())
+    # Run the session with the provided input_data.
+    outputs = sess.run(None, input_data)
+    # Return all outputs.
+    return np.array(outputs)
\ No newline at end of file
diff --git a/benchmark/benchmark_torch.py b/benchmark/benchmark_torch.py
new file mode 100644
index 0000000000000000000000000000000000000000..0275daa0152e3125ff9846712f3fee6706f85811
--- /dev/null
+++ b/benchmark/benchmark_torch.py
@@ -0,0 +1,65 @@
+import torch
+import numpy as np
+from onnx import ModelProto
+from onnx2torch import convert
+import time
+
+def measure_inference_time(model_onnx: ModelProto, input_data: dict[str, np.ndarray], nb_warmup: int = 10, nb_iterations: int = 50) -> list[float]:
+    """
+    Run the provided PyTorch model.
+    Performs 10 warm-up runs followed by 50 timed runs (using CPU process time).
+
+    Args:
+        model_onnx: The ONNX model.
+        input_data: Dictionary mapping all input names to NumPy arrays.
+
+    Returns:
+        List of CPU times (in seconds) for the 50 timed runs.
+    """
+    model = convert(model_onnx)
+
+    device = torch.device("cpu")
+    model.to(device)
+    model.eval()
+
+    torch.set_num_threads(1)
+
+    inputs = [torch.tensor(v, device=device) for _, v in input_data.items()]
+    timings = []
+
+    with torch.no_grad():
+        # Warm-up runs
+        for i in range(nb_warmup + nb_iterations):
+            if i < nb_warmup:
+                model(*inputs)
+            else:
+                start = time.process_time()
+                model(*inputs)
+                end = time.process_time()
+                timings.append(end - start)
+    return timings
+
+def compute_output(model_onnx: ModelProto, input_data: dict[str, np.ndarray]) -> list[np.ndarray]:
+    """
+    Run the PyTorch model inference.
+
+    Args:
+        model: The PyTorch model.
+        input_data: Dictionary mapping all input names to NumPy arrays.
+
+    Returns:
+        The first output tensor if there is only one, else a list of output tensors.
+    """
+    model = convert(model_onnx)
+
+    device = torch.device("cpu")
+    model.to(device)
+    model.eval()
+
+    inputs = [torch.tensor(v, device=device) for _, v in input_data.items()]
+
+    with torch.no_grad():
+        outputs = model(*inputs)
+    outputs = [o.numpy() if isinstance(o, torch.Tensor) else np.array(o) for o in outputs]
+
+    return outputs
diff --git a/benchmark/generate_graph.py b/benchmark/generate_graph.py
new file mode 100644
index 0000000000000000000000000000000000000000..720755225812348360ae35e6d7fdbd371b702762
--- /dev/null
+++ b/benchmark/generate_graph.py
@@ -0,0 +1,256 @@
+import argparse
+import json
+import sys
+import textwrap
+import numpy as np
+import pandas as pd
+import matplotlib.pyplot as plt
+import seaborn as sns
+
+# Set a default style
+sns.set_theme(style="ticks", palette="flare")
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(
+        description="Compare time performance of operator kernels by plotting relative differences."
+    )
+    parser.add_argument(
+        "--operator-config", "-oc", type=str, default="config.json",
+        help="Path to the configuration JSON file (default: config.json)"
+    )
+    parser.add_argument(
+        "--ref", "-r", type=str, required=True,
+        help="Path to the JSON file with reference results"
+    )
+    parser.add_argument(
+        "--libs", "-l", type=str, nargs='+', required=True,
+        help=("Paths to one or more JSON files with library results. For violin/box mode, "
+              "exactly one file must be provided. For bar mode, multiple files can be provided.")
+    )
+    # parser.add_argument(
+    #     "--plot-type", "-pt", type=str, choices=["violin", "box", "bar"], default="violin",
+    #     help="Type of plot to use: 'violin', 'box', or 'bar' (default: violin)"
+    # )
+    return parser.parse_args()
+
+
+def load_json(file_path: str):
+    with open(file_path, 'r') as f:
+        return json.load(f)
+
+
+def create_relative_difference_plots(test_parameters: dict, ref_times: dict, ref_library: str,
+                                       plot_type: str, new_times: dict = None, new_library: str = None,
+                                       libraries: list = None):
+    """
+    Creates subplots comparing relative differences.
+
+    For "violin" and "box" modes, `new_times` and `new_library` are used.
+    For "bar" mode, a list of library tuples (library_name, times) in `libraries` is used.
+    In bar mode the reference library (ref_library) is always added as the baseline (ratio = 1).
+    """
+    n_params = len(test_parameters)
+    n_cols = 1
+    if n_params > 1:
+        if n_params == 2 or n_params == 4:
+            n_cols = 2
+        else:
+            n_cols = 3
+
+    n_rows = (n_params + n_cols - 1) // n_cols
+
+    fig, axes = plt.subplots(n_rows, n_cols, figsize=(10 if n_params == 1 else 15, 5 * n_rows))
+    # Ensure axes is always iterable
+    if n_params == 1:
+        axes = [axes]
+    axes_flat = axes.flatten() if n_params > 1 else axes
+
+    for idx, (param_name, param_values) in enumerate(test_parameters.items()):
+        ax = axes_flat[idx]
+        # if plot_type in ["violin", "box"]:
+        #     # Compute relative differences (%) per run
+        #     plot_data = []
+        #     for val in param_values:
+        #         new_arr = np.array(new_times[param_name][str(val)])
+        #         ref_arr = np.array(ref_times[param_name][str(val)])
+        #         rel_diff = (new_arr - ref_arr) / ref_arr * 100
+        #         plot_data.extend([(str(val), diff) for diff in rel_diff])
+        #     df = pd.DataFrame(plot_data, columns=[f'{param_name} Value', 'Relative Difference (%)'])
+        #     # Optionally filter extreme outliers using IQR
+        #     for col in df.select_dtypes(include='number').columns:
+        #         Q1 = df[col].quantile(0.25)
+        #         Q3 = df[col].quantile(0.75)
+        #         IQR = Q3 - Q1
+        #         lower_bound = Q1 - IQR
+        #         upper_bound = Q3 + IQR
+        #         df = df[(df[col] >= lower_bound) & (df[col] <= upper_bound)]
+        #     if plot_type == "violin":
+        #         sns.violinplot(
+        #             data=df,
+        #             x=f'{param_name} Value',
+        #             y='Relative Difference (%)',
+        #             hue=f'{param_name} Value',
+        #             palette='flare',
+        #             inner='quartile',
+        #             ax=ax
+        #         )
+        #     else:  # box plot
+        #         sns.boxplot(
+        #             data=df,
+        #             x=f'{param_name} Value',
+        #             y='Relative Difference (%)',
+        #             hue=f'{param_name} Value',
+        #             palette='flare',
+        #             ax=ax
+        #         )
+        #     if ax.get_legend() is not None:
+        #         ax.legend_.remove()
+        #     ax.grid(True, axis='y', alpha=0.5, color='gray')
+        #     ax.axhline(y=0, color='red', linewidth=1)
+        #     ax.set_ylim(-30, 150)
+        #     stats_text = (f'Mean: {df["Relative Difference (%)"].mean():.2f}%\n'
+        #                   f'Std: {df["Relative Difference (%)"].std():.2f}%')
+        #     ax.text(0.02, 0.98, stats_text, transform=ax.transAxes,
+        #             verticalalignment='top',
+        #             bbox=dict(boxstyle='round', facecolor='white', alpha=0.8))
+        # elif plot_type == "bar":
+            # For bar plots: compute the median time ratio for each library compared to reference
+        data = []
+        for val in param_values:
+            data.append((str(val), ref_library, 1.0))  # Reference baseline (ratio = 1)
+            for lib_name, lib_times in libraries:
+                lib_arr = np.array(lib_times[param_name][str(val)])
+                ref_arr = np.array(ref_times[param_name][str(val)])
+                median_lib = np.median(lib_arr)
+                median_ref = np.median(ref_arr)
+                ratio = median_lib / median_ref if median_ref != 0 else np.nan
+                data.append((str(val), lib_name, ratio))
+        df_bar = pd.DataFrame(data, columns=[f'{param_name}', 'Library', 'Ratio'])
+        sns.barplot(
+            data=df_bar,
+            x=f'{param_name}',
+            y='Ratio',
+            hue='Library',
+            palette='viridis',
+            ax=ax,
+            errorbar=None
+        )
+        ax.grid(True, axis='y', alpha=0.5, color='gray')
+        # Annotate bars with their ratio values
+        for container in ax.containers:
+            labels = [f'{h:.2f}' if h > 1e-6 else '' for h in container.datavalues]
+            ax.bar_label(container, labels=labels, padding=3)
+        ax.set_ylim(0, max(df_bar['Ratio'].max() * 1.1, 1.1))
+        # else:
+        #     ax.text(0.5, 0.5, "Unknown plot type", horizontalalignment='center', verticalalignment='center')
+
+    # Remove any unused subplots
+    for idx in range(len(test_parameters), len(axes_flat)):
+        fig.delaxes(axes_flat[idx])
+    if n_params == 1:
+        plt.tight_layout(rect=[0, 0.05, 1, 0.88])
+    else:
+        plt.tight_layout(rect=[0, 0.05, 1, 0.93])
+
+    # Create a common legend (if any) at the top center
+    common_handles, common_labels = None, None
+    for ax in fig.axes:
+        leg = ax.get_legend()
+        if leg is not None:
+            common_handles, common_labels = ax.get_legend_handles_labels()
+            break
+    if common_handles is not None and common_labels is not None:
+        fig.legend(common_handles, common_labels, loc='upper center', ncol=len(common_labels),
+                   bbox_to_anchor=(0.5, 0.99), title="Library", fontsize=14)
+    # Remove legends from individual subplots
+    for ax in axes_flat:
+        if ax.get_legend() is not None:
+            ax.get_legend().remove()
+    return fig
+
+
+def main():
+    args = parse_args()
+    config = load_json(args.operator_config)
+    ref_results = load_json(args.ref)
+    library_files = args.libs
+
+    operator = config["operator"]
+    test_parameters = config["test_configuration"].get("main_parameters", {})
+
+    # Load reference times and library name from reference JSON
+    ref_times = ref_results.get("time")
+    ref_library = ref_results.get("library", "ref_lib")
+    if ref_times is None:
+        print("Reference JSON does not contain time results.")
+        sys.exit(1)
+
+    # if args.plot_type in ["violin", "box"]:
+    #     if len(library_files) != 1:
+    #         print("Error: For violin/box mode, exactly one library JSON file must be provided.")
+    #         sys.exit(1)
+    #     comp_results = load_json(library_files[0])
+    #     comp_times = comp_results.get("time")
+    #     comp_library = comp_results.get("library", "comp_lib")
+    #     if comp_times is None:
+    #         print("Library JSON does not contain time results.")
+    #         sys.exit(1)
+    #     fig = create_relative_difference_plots(
+    #         test_parameters, ref_times, ref_library,
+    #         plot_type=args.plot_type, new_times=comp_times, new_library=comp_library
+    #     )
+    #     filename = f"{operator}_comp_{comp_library}-vs-{ref_library}.svg"
+    # elif args.plot_type == "bar":
+    libraries = []
+    for lib_file in library_files:
+        lib_result = load_json(lib_file)
+        lib_times = lib_result.get("time")
+        lib_name = lib_result.get("library", "lib")
+        if lib_times is None:
+            print(f"Library JSON {lib_file} does not contain time results. Skipping.")
+            continue
+        libraries.append((lib_name, lib_times))
+    if not libraries:
+        print("No valid library results available for bar plot.")
+        sys.exit(1)
+    fig = create_relative_difference_plots(
+        test_parameters, ref_times, ref_library,
+        plot_type="bar", libraries=libraries
+    )
+    # lib_names = "-".join([name for name, _ in libraries])
+    filename = f"{operator}_inference_time_comparison.svg"
+    # else:
+    #     print("Unsupported plot type.")
+    #     sys.exit(1)
+
+    ##############################
+    # Prepare footer texts
+    footer_title = f'[{operator}] kernel relative inference time comparison'
+    default_config = config.get("base_configuration", {})
+
+    # Wrap the default configuration text to a given width.
+    wrapped_config = textwrap.wrap(f'Base configuration: {default_config}', width=160)
+    n_lines = len(wrapped_config)
+    config_text = "\n".join(wrapped_config)
+
+    # Adjust the figure layout to provide extra space at the bottom.
+    if len(test_parameters) == 1:
+        plt.subplots_adjust(bottom=0.2+0.02*n_lines)
+    else:
+        plt.subplots_adjust(bottom=0.14+0.02*n_lines)
+
+    # Add the footer title (bottom center) with fontsize 16.
+    fig.text(0.5, 0.035+n_lines*0.025, footer_title, ha='center', va='bottom', fontsize=18)
+
+    # Add the default configuration text just below the title with the computed fontsize.
+    fig.text(0.5, 0.02, config_text, ha='center', va='bottom', fontsize=12)
+
+    ############################
+    # save
+    plt.savefig(filename)
+    print(f"Plot saved as {filename}")
+
+
+if __name__ == "__main__":
+    main()