Cannot generate scheduler after quantization
Required prerequisites
Aidge_quantization
What commit version of aidge do you use
-
aidge_core
: 1e21aa51 -
aidge_quantization
: b164c89a843fac5897f14a6e6bf59d15fe539a06 (no cuda version for aidge_quantization)
Problem description
When I fuse Conv and Scaling. The scheduler cannot compute the dims of the new metaOP. Before that, I also have an error who tell me that the input isn't connected, or when i save my model in .mmd i can see that an input is connected to my Conv.
aidge_core.fuse_to_metaops(model, "Conv2D->Scaling", "Cmsis_Nn_Conv")
model.save("model_af_fuse")
scheduler = aidge_core.SequentialScheduler(model)
scheduler.generate_scheduling()
Replaced 0 (out of 0) matching sub-graph with meta operators
Conv_Op::setBackend(): could not set backend for weight input, because input is not connected
Replaced 1 (out of 1) matching sub-graph with meta operators
Assertion failed: requiredSize.type == Elts_t::Data in /local2/is148265/wb274724/STM32_dev/dev/superpoint/aidge/aidge/aidge_core/src/scheduler/Scheduler.cpp:442
Cannot generate memory with token-based producer-consumer model for node (of type Cmsis_Nn_Conv).
Traceback (most recent call last):
Reproducible example code
np.random.seed(123)
##########################################################
# Création du model qui contient qu'une seule Convolution
##########################################################
model = aidge_core.sequential([
aidge_core.Conv2D(in_channels=64, out_channels=10, kernel_dims = [3,3], name='Conv')
], name = "aha")
model.set_datatype(aidge_core.dtype.float32)
# Paramètres aléatoire avec seed pour les poids et bias
for node in model.get_nodes():
if node.type() == "Producer":
prod_op = node.get_operator()
value = prod_op.get_output(0)
tuple_out = node.output(0)[0]
# No conv in current network
if tuple_out[0].type() == "Conv" and tuple_out[1]==1:
# Conv weight
aidge_core.xavier_uniform_filler(value)
elif tuple_out[0].type() == "Conv" and tuple_out[1]==2:
# Conv bias
aidge_core.constant_filler(value, 0.01)
scheduler = aidge_core.SequentialScheduler(model)
# Configuration for the model + forward dimensions
dims = [1,64,32,32]
input_tab = np.random.uniform(low=0.0, high=1.0, size=dims)
aidge_export_cpp.generate_input_file(array_name="simple_conv_export", array=input_tab.reshape(-1), export_folder="simple_conv_export")
aidge_core.remove_flatten(model)
aidge_core.fuse_batchnorm(model)
# input_node = aidge_core.Producer(dims, "input")
# input_node.add_child(model)
# model.add(input_node)
model.compile("cpu", aidge_core.dtype.float32,dims=[dims])
scheduler = aidge_core.SequentialScheduler(model)
##########################################################
# Quantification avec des samples aléatoires
##########################################################
def propagate(model, scheduler, sample):
# Setup the input
sample = np.reshape(sample,dims)
input_tensor = aidge_core.Tensor(sample)
input_tensor.set_backend("cpu")
input_tensor.set_datatype(aidge_core.dtype.float32)
# input_tensor.get_operator().set_output(0, input_tensor)
# Run the inference
scheduler.forward(True, [input_tensor])
# Gather the results
output_node = model.get_output_nodes().pop()
output_tensor = output_node.get_operator().get_output(0)
return np.array(output_tensor)
# Create Samples
samples = []
for _ in range(10):
samples.append(np.random.uniform(low=0.0, high=1.0, size=dims))
for i in range(10):
output_array = propagate(model, scheduler, samples[i])
print('Propagate ok ')
NB_SAMPLES = 10
NB_BITS = 8
tensors = []
for sample in samples[0:NB_SAMPLES]:
tensor = aidge_core.Tensor(sample)
tensors.append(tensor)
aidge_quantization.quantize_network(model, NB_BITS, tensors, use_cuda = False, apply_rounding = True)
scheduler = aidge_core.SequentialScheduler(model)
model.save("model_af_quant")
print("PTQ done")
##########################################################
# Change dtype
##########################################################
aidge_core.expand_metaops(model)
aidge_core.fuse_to_metaops(model, "FC->Scaling->ReLU?", "Cmsis_Nn_FC")
aidge_core.fuse_to_metaops(model, "Conv2D->Scaling", "Cmsis_Nn_Conv")
model.save("model_af_fuse")
scheduler = aidge_core.SequentialScheduler(model)
scheduler.generate_scheduling()
aidge_export_arm_cortexm.export("simple_conv_export", model, scheduler, board="stm32h7")