Skip to content

The exporter does not seem to broadcast values for Add node

What commit version of aidge do you use

  • aidge_core: 0.6.2
  • aidge_export_cpp: 0.3.1

Problem description

I have a test that performs the addition of two tensors A of dimensions [1] and B of dimension [2]. The CPU backend supports the operations, apparently broadcasting the value of A. The CPP export generates the code, but seems to do no broadcasting.

As per the CPU backend, broadcasting should occur.

Those are the logs provided by a comparison generated using aidge_core.export_utils.generate_main_compare_cpp(...)

Add_output_0:
---------------------------------------------------------------------------------
|	Idx	|	Expected	|	Predicted	|	ERROR ?	|
|-------------------------------------------------------------------------------|
|	    0	|	0.000000	|	 0.000000	|		|
|	    1	|	1.000000	|	 0.000000	|	X	|


---------------------------------
|		|	TOTAL	|
|-------------------------------|
|	CORRECT	|	    1	|
|	ERROR	|	    1	|
---------------------------------

Reproducible example code

import pathlib
import shutil
import subprocess

import aidge_backend_cpu  # noqa: F401
import aidge_core as ai
import aidge_export_cpp
import numpy as np

if __name__ == "__main__":
    add_layer = ai.Add("Add")
    model = ai.sequential([add_layer])

    # Set backend and datatype
    model.set_backend("cpu")
    model.set_datatype(ai.dtype.float32)

    ### GENERATING SCHEDULING
    scheduler = ai.SequentialScheduler(model)

    ### REFERENCE INFERENCE
    lhs = ai.Tensor(np.arange(1, dtype=np.float32))
    lhs.set_backend("cpu")
    # lhs.set_backend(ai.dtype.float32)
    rhs = ai.Tensor(np.arange(2, dtype=np.float32))
    rhs.set_backend("cpu")
    # rhs.set_backend(ai.dtype.float32)
    scheduler.forward(
        data=[
            lhs, rhs,
        ],
    )

    ### LOG OUTPUTS AND SCHEDULING
    model.log_outputs("aidge_cpp_results")

    tmp = pathlib.Path().absolute() / "export_acetone"
    # run_exporter_comparison(scheduler, (lhs, rhs,), str(tmp), exporter=aidge_export_cpp.ExportLibCpp)
    exporter = aidge_export_cpp.ExportLibCpp
    tensors = [lhs, rhs]

    ai.export_utils.scheduler_export(
        scheduler,
        tmp,
        exporter,
        memory_manager=ai.mem_info.compute_default_mem_info,
    )
    for ii, tensor in enumerate(tensors):
        ai.export_utils.generate_input_file(
            export_folder=tmp,
            array_name=f"InputNode_input_{ii}",
            tensor=tensor,
        )
    ai.export_utils.generate_main_compare_cpp(
        export_folder=tmp,
        graph_view=scheduler.graph_view(),
    )
    # Compile and run comparison
    make_path = shutil.which("make")
    assert make_path is not None
    subprocess.run([make_path], cwd=tmp, check=True)
    r = subprocess.run(
        [pathlib.Path(tmp) / "bin" / "run_export"],
        cwd=tmp,
        check=False,
        capture_output=True,
        text=True,
    )
    print(r.stdout)
    assert r.returncode == 0, f"Comparison failed between reference and exporter '{exporter}'"
Edited by Benjamin Lesage