SegFault on set_backend from scheduler_export.py
What commit version of aidge do you use
-
aidge_core
: cda46ffd -
aidge_quantization
: from @noamzerah fork to have the pybind of Quantizer
Problem description
In the context of exporting a simple network make with aidge with only one conv and the new Quantizer operator, fuse and export them with aidge_export_arm_cortex.
I faced a SIGSEGV on set_backend here node.get_operator().set_backend(export_lib._name)
from scheduler_export.py
As you can see in the backtrace, our tensor does not exist.
To avoid having a SIGSEV without hints, I'm going to do an MR to add checks
Node(name='CSgo', optype='ConvScaling', parents: [0, 1, 1, 0, 0], children: [[]])
Thread 1 "python" received signal SIGSEGV, Segmentation fault.
0x00007ffff6846a2e in std::__shared_ptr<Aidge::TensorImpl, (__gnu_cxx::_Lock_policy)2>::operator bool (this=0x60) at /usr/include/c++/11/bits/shared_ptr_base.h:1300
1300 { return _M_ptr != nullptr; }
(gdb) bt
#0 0x00007ffff6846a2e in std::__shared_ptr<Aidge::TensorImpl, (__gnu_cxx::_Lock_policy)2>::operator bool (this=0x60) at /usr/include/c++/11/bits/shared_ptr_base.h:1300
#1 0x00007ffff6ca35b8 in Aidge::Tensor::setBackend (this=0x0, name="export_cmsisnn", device=0 '\000', copyFrom=true)
at /local2/is148265/wb274724/STM32_dev/dev/superpoint/aidge/aidge/aidge_core/src/data/Tensor.cpp:230
#2 0x00007ffff6e49367 in Aidge::MetaOperator_Op::setBackend (this=0x555555f92550, name="export_cmsisnn", device=0 '\000')
at /local2/is148265/wb274724/STM32_dev/dev/superpoint/aidge/aidge/aidge_core/src/operator/MetaOperator.cpp:100
#3 0x00007ffff6b04a11 in pybind11::cpp_function::cpp_function<void, Aidge::Operator, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, unsigned char, pybind11::name, pybind11::is_method, pybind11::sibling, pybind11::arg, pybind11::arg_v>(void (Aidge::Operator::*)(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, unsigned char), pybind11::name const&, pybind11::is_method const&, pybind11::sibling const&, pybind11::arg const&, pybind11::arg_v const&)::{lambda(Aidge::Operator*, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, unsigned char)#1}::operator()(Aidge::Operator*, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, unsigned char) const (__closure=0x555555c4d458, c=0x555555f92550, args#0="export_cmsisnn", args#1=0 '\000')
at /local2/is148265/wb274724/STM32_dev/dev/superpoint/aidge/aidge/aidge_core/build/_deps/pybind11-src/include/pybind11/pybind11.h:110
#4 0x00007ffff6b0f75d in pybind11::detail::argument_loader<Aidge::Operator*, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, unsigned char>::call_impl<void, pybind11::cpp_function::cpp_function<void, Aidge::Operator, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, unsigned char, pybind11::name, pybind11::is_method, pybind11::sibling, pybind11::arg, pybind11::arg_v>(void (Aidge::Operator::*)(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, unsigned char), pybind11::name const&, pybind11::is_method const&, pybind11::sibling const&, pybind11::arg const&, pybind11::arg_v const&)::{lambda(Aidge::Operator*, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator--Type <RET> for more, q to quit, c to continue without paging--
Reproducible example code
import aidge_core
import aidge_backend_cpu
import aidge_export_arm_cortexm
import aidge_export_cpp
import aidge_onnx
import aidge_quantization
import numpy as np
np.random.seed(123)
input_dims = [1,2,5,5]
model = aidge_core.sequential([
aidge_core.Conv2D(in_channels=2, out_channels = 2, kernel_dims = [1,1], name = 'conv', stride_dims= [1, 1], dilation_dims = [1, 1], no_bias= False),
aidge_quantization.Quantizer(scalingFactor = 0.005 , clip_min = -128.0, clip_max = 127.0, name="scale")
])
for n in model.get_nodes() :
if n.type() == "Producer":
dims = n.get_operator().get_output(0).dims()
array = np.random.randint(-128, 127, size=dims, dtype=np.int8)
n.get_operator().set_output(0, aidge_core.Tensor(array, backend = "cpu"))
model.set_datatype(aidge_core.dtype.float32)
model.set_backend("cpu")
# Init Producer
def propagate(model, scheduler, tensor):
# Setup the input
input_tensor = aidge_core.Tensor(tensor)
# Tensor backend must be set again ...
input_tensor.set_backend("cpu")
input_tensor.set_datatype(aidge_core.dtype.float32)
# Run the inference
scheduler.forward(True, [input_tensor])
# Gather the results
output_node = model.get_output_nodes().pop()
output_tensor = output_node.get_operator().get_output(0)
return np.array(output_tensor)
scheduler = aidge_core.SequentialScheduler(model)
input_array = np.random.randint(-128, 127, size=input_dims, dtype=np.int8)
output_array = propagate(model, scheduler, input_array)
print(output_array)
model.save("PreFuse")
print("Fuse ConvScaling")
aidge_core.fuse_to_metaops(model, "Conv2D->Quantizer", "ConvScaling")
model.compile('cpu',aidge_core.dtype.float32, dims=[input_dims])
model.save("PostFuse")
scheduler = aidge_core.SequentialScheduler(model)
scheduler.generate_scheduling()
print("Generate Scheduling ok ")
for node in model.get_nodes():
if node.type() != "Producer":
if node.type() in ["ConvScaling", "FcScaling"]:
node.set_name("CSgo")
node.get_operator().get_input(0).set_datatype(aidge_core.dtype.int8)
node.get_operator().get_input(1).set_datatype(aidge_core.dtype.int8)
node.get_operator().get_input(2).set_datatype(aidge_core.dtype.int32)
node.get_operator().get_output(0).set_datatype(aidge_core.dtype.int8)
else:
node.get_operator().set_datatype(aidge_core.dtype.int8)
aidge_export_arm_cortexm.export("ConvScalingExport", model, scheduler, lib="cmsis_nn")