Skip to content
Snippets Groups Projects
Commit 96195252 authored by Axel Farrugia's avatar Axel Farrugia
Browse files

[Chore] Clean unused code

parent ea5961d0
No related branches found
No related tags found
No related merge requests found
...@@ -149,7 +149,10 @@ FOLD_GRAPH = True ...@@ -149,7 +149,10 @@ FOLD_GRAPH = True
DEV_MODE = args.dev DEV_MODE = args.dev
AIDGE_CMP = args.aidge_cmp AIDGE_CMP = args.aidge_cmp
IMAGENET_PATH = "/database2/ILSVRC2012/val" # Search for ILSVRC2012 ### Add your paths here ###
IMAGENET_PATH = "/database/ILSVRC2012/val" # Look for ILSVRC2012/val
VAL_PATH = "/database/ILSVRC2012/val.txt" # File containing labels of image of val folder (Look for val.txt)
###########################
def print_cfg(): def print_cfg():
print('\n RNG_SEED = ', RNG_SEED) print('\n RNG_SEED = ', RNG_SEED)
...@@ -164,7 +167,7 @@ def print_cfg(): ...@@ -164,7 +167,7 @@ def print_cfg():
print(' TARGET_TYPE = ', TARGET_TYPE) print(' TARGET_TYPE = ', TARGET_TYPE)
print(' FOLD_GRAPH = ', FOLD_GRAPH) print(' FOLD_GRAPH = ', FOLD_GRAPH)
print(' USE_CUDA = ', USE_CUDA) print(' USE_CUDA = ', USE_CUDA)
print(' DEV_MODE = ', DEV_MODE) print(' DEV_MODE = ', DEV_MODE)
print(' ROUNDING = ', ROUNDING) print(' ROUNDING = ', ROUNDING)
print_cfg() print_cfg()
...@@ -175,8 +178,6 @@ np.random.seed(RNG_SEED) ...@@ -175,8 +178,6 @@ np.random.seed(RNG_SEED)
backend = "cuda" if USE_CUDA else "cpu" backend = "cuda" if USE_CUDA else "cpu"
VAL_PATH = "/database2/ILSVRC2012/val.txt" # File containing labels of image of val folder
image_label_pairs = [] image_label_pairs = []
with open(VAL_PATH, 'r') as f: with open(VAL_PATH, 'r') as f:
for line in f: for line in f:
...@@ -185,14 +186,10 @@ with open(VAL_PATH, 'r') as f: ...@@ -185,14 +186,10 @@ with open(VAL_PATH, 'r') as f:
image_name, label = parts image_name, label = parts
image_label_pairs.append((image_name, int(label))) image_label_pairs.append((image_name, int(label)))
#random.shuffle(image_label_pairs)
np.random.seed(RNG_SEED) np.random.seed(RNG_SEED)
#image_label_pairs = np.random.permutation(image_label_pairs).tolist() NB_SELECT = max(NB_TEST, NB_CALIB) # Check that NB_TEST and NB_CALIB are fixed
NB_SELECT = max(NB_TEST, NB_CALIB) # Vérifie que NB_TEST et NB_CALIB sont fixés
selected_pairs = image_label_pairs[:NB_SELECT] selected_pairs = image_label_pairs[:NB_SELECT]
#selected_pairs = image_label_pairs[:max(NB_TEST, NB_CALIB)]
# -------------------------------------------------------------- # --------------------------------------------------------------
# CREATE THE SAMPLES # CREATE THE SAMPLES
# -------------------------------------------------------------- # --------------------------------------------------------------
...@@ -222,10 +219,6 @@ for image_name, label in selected_pairs: ...@@ -222,10 +219,6 @@ for image_name, label in selected_pairs:
except Exception as e: except Exception as e:
print(f"Error with image {image_path}: {e}") print(f"Error with image {image_path}: {e}")
#print(f"Number of loaded tensors: {len(tensors)}")
#for lbl, img_path in zip(labels, paths):
# print(f"Label: {lbl} -> Image Path: {img_path}")
backend = "cuda" if USE_CUDA else "cpu" backend = "cuda" if USE_CUDA else "cpu"
aidge_tensors = [] aidge_tensors = []
for tensor in tensors: for tensor in tensors:
...@@ -343,13 +336,6 @@ Each time the graph has been change, it has to be reset. ...@@ -343,13 +336,6 @@ Each time the graph has been change, it has to be reset.
Here some Quantizer and Cast nodes have been added. Here some Quantizer and Cast nodes have been added.
""" """
""" [START Fix]
We need first to manually add an input tensor with the correct datatype,
as it is not automatically done in PTQ.
"""
# input_node = model.get_ordered_inputs()[0]
# input_node[0].get_operator().set_input(0,aidge_tensors[0])
""" [END Fix]"""
if quantize_model: if quantize_model:
scheduler.reset_scheduling() scheduler.reset_scheduling()
...@@ -357,17 +343,6 @@ if quantize_model: ...@@ -357,17 +343,6 @@ if quantize_model:
# PERFORM THE EXAMPLE INFERENCES AGAIN # PERFORM THE EXAMPLE INFERENCES AGAIN
# -------------------------------------------------------------- # --------------------------------------------------------------
#for node in model.get_input_nodes():
# if node.type() == "Pad2D":
# node.set_name("Pad2D_input")
#
#for node in model.get_nodes():
# if (node.type() == "Conv2D"):
# if node.get_parent(0).name() == "Pad2D_input":
# node.set_name("Conv2D_input")
model.save("post_ptq") model.save("post_ptq")
if (DO_EXAMPLES and quantize_model): if (DO_EXAMPLES and quantize_model):
...@@ -385,11 +360,6 @@ if (DO_EXAMPLES and quantize_model): ...@@ -385,11 +360,6 @@ if (DO_EXAMPLES and quantize_model):
print('\n MODEL ACCURACY = ', accuracy * 100, '%') print('\n MODEL ACCURACY = ', accuracy * 100, '%')
print('\n QUANTIZED ACCURACY = ', quant_accuracy * 100, '%') print('\n QUANTIZED ACCURACY = ', quant_accuracy * 100, '%')
print("post ptq")
# output_array = propagate(model, scheduler, aidge_tensors[0])
#model.log_outputs("log_outputs_post_ptq")
if USE_CUDA: if USE_CUDA:
model.set_backend("cpu") model.set_backend("cpu")
for aidge_tensor in aidge_tensors: for aidge_tensor in aidge_tensors:
...@@ -531,19 +501,13 @@ for node in model.get_nodes(): ...@@ -531,19 +501,13 @@ for node in model.get_nodes():
# EXPORT THE MODEL # EXPORT THE MODEL
# -------------------------------------------------------------- # --------------------------------------------------------------
model.save("exported_model") model.save("exported_model")
inputs_tensor = aidge_core.Tensor(np.array(aidge_tensors[0])) inputs_tensor = aidge_core.Tensor(np.array(aidge_tensors[0]))
# print(np.array(inputs_tensor)[0]) inputs_tensor.set_data_format(aidge_core.dformat.nchw) # Init the dataformat (default -> nchw)
inputs_tensor.set_data_format(aidge_core.dformat.nchw) inputs_tensor.set_data_format(aidge_core.dformat.nhwc) # Transpose the data (nchw -> nhwc)
inputs_tensor.set_data_format(aidge_core.dformat.nhwc)
if args.dtype == "int8": if args.dtype == "int8":
inputs_tensor.set_datatype(aidge_core.dtype.int8) inputs_tensor.set_datatype(aidge_core.dtype.int8)
#print(np.array(inputs_tensor)[0,:,:,:])
#inputs_tensor.cpy_transpose(inputs_tensor, aidge_core.get_permutation_mapping(aidge_core.dformat.nchw, aidge_core.dformat.nhwc))
# print(np.array(inputs_tensor)[0])
aidge_export_cpp.export(EXPORT_FOLDER, aidge_export_cpp.export(EXPORT_FOLDER,
model, model,
scheduler, scheduler,
...@@ -551,37 +515,3 @@ aidge_export_cpp.export(EXPORT_FOLDER, ...@@ -551,37 +515,3 @@ aidge_export_cpp.export(EXPORT_FOLDER,
inputs_tensor=inputs_tensor, inputs_tensor=inputs_tensor,
dev_mode = DEV_MODE, dev_mode = DEV_MODE,
aidge_cmp = AIDGE_CMP) aidge_cmp = AIDGE_CMP)
#
## --------------------------------------------------------------
## GENERATE LABELS AND INPUTS FOR EXAMPLE INFERENCE
## --------------------------------------------------------------
#
#input_label = np.array(labels).astype(np.int32).reshape(len(labels), 1)
#generate_input_file(export_folder=EXPORT_FOLDER + "/data",
# array_name="labels",
# tensor=aidge_core.Tensor(input_label))
#
#input_tensor = np.array(aidge_tensors[0:NB_TEST]).astype(np.int8).reshape(NB_TEST, 3, 224, 224)
#generate_input_file(export_folder=EXPORT_FOLDER + "/data",
# array_name="inputs",
# tensor=aidge_core.Tensor(input_tensor))
#
#
#if TEST_MODE:
# input_tensor = aidge_core.Tensor(input_tensor)
# input_tensor.set_data_format(aidge_core.dformat.nchw)
# input_tensor.cpy_transpose(input_tensor, aidge_core.get_permutation_mapping(aidge_core.dformat.nchw, aidge_core.dformat.nhwc))
# generate_input_file(export_folder=EXPORT_FOLDER + "/data",
# array_name="inputs_ref",
# tensor=input_tensor)
#
## --------------------------------------------------------------
## GENERATE DOCUMENTATION
## --------------------------------------------------------------
#
#"""
#Copy the corresponding README file into the generated export.
#"""
#
#generate_documentation(EXPORT_FOLDER, TEST_MODE)
#
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment