Skip to content
Snippets Groups Projects
Commit 1a4bf629 authored by Maxence Naud's avatar Maxence Naud
Browse files

Remove merge artifacts

parent c5e15c4f
No related branches found
No related tags found
3 merge requests!27v0.2.0,!22v0.4.0,!15Export refactor
Pipeline #57698 passed
......@@ -192,7 +192,6 @@ class MaxPoolCPP(ExportNodeCpp):
# No padding with MaxPooling
# Use PaddedMaxPooling to add padding attribute
<<<<<<< HEAD
self.attributes["padding"] = [0, 0]
self.attributes["pool_type"] = "Max"
self.attributes["activation"] = "Linear"
......@@ -281,175 +280,3 @@ class FcCPP(ExportNodeCpp):
str(ROOT / "kernels" / "activation.hpp"),
str(ROOT / "kernels" / "rescaling.hpp")
]
=======
self.padding = [0, 0]
if len(self.inputs_dims[0]) == 4:
# if dims == [batch, nb_channels, height, width]
# transform to [nb_channels, height, width]
self.inputs_dims[0] = self.inputs_dims[0][1:]
if len(self.outputs_dims[0]) == 4:
# if dims == [batch, nb_outputs]
# transform to [nb_outputs, 1, 1]
self.outputs_dims[0] = self.outputs_dims[0][1:]
def export(self, export_folder:Path, list_configs:list):
copyfile(str(ROOT / "kernels" / "pooling.hpp"),
str(export_folder / "include" / "kernels"))
list_configs.append("kernels/pooling.hpp")
list_configs.append(f"layers/{self.name}.h")
generate_file(
str(export_folder / "layers" / f"{self.name}.h"),
str(ROOT / "templates" / "configuration" / "pooling_config.jinja"),
name=self.name,
input_dims=self.inputs_dims[0],
output_dims=self.outputs_dims[0],
kernel=self.kernel,
stride=self.stride,
padding=self.padding,
pool_type="Max",
activation="Linear")
return list_configs
def forward(self, list_actions:list):
if not self.is_last:
list_actions.append(set_up_output(self.name, "float"))
list_actions.append(generate_str(
str(ROOT / "templates" / "kernel_forward" / "pooling_forward.jinja"),
name=self.name,
input_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input",
output_name=self.name
))
return list_actions
@operator_register("FC")
class FcCPP(ExportNode):
def __init__(self, node):
super().__init__(node)
if len(self.inputs_dims[0]) == 4:
# if dims == [batch, nb_channels, height, width]
# transform to [nb_channels, height, width]
self.inputs_dims[0] = self.inputs_dims[0][1:]
elif len(self.inputs_dims[0]) == 2:
# if dims == [batch, nb_channels]
# transform to [nb_channels, 1, 1]
self.inputs_dims[0] = [self.inputs_dims[0][1], 1, 1]
if len(self.outputs_dims[0]) == 2:
# if dims == [batch, nb_outputs]
# transform to [nb_outputs, 1, 1]
self.outputs_dims[0] = [self.outputs_dims[0][1], 1, 1]
def export(self, export_folder:Path, list_configs:list):
copyfile(str(ROOT / "kernels" / "fullyconnected.hpp"),
str(export_folder / "include" / "kernels"))
copyfile(str(ROOT / "kernels" / "macs.hpp"),
str(export_folder / "include" / "kernels"))
copyfile(str(ROOT / "kernels" / "activation.hpp"),
str(export_folder / "include" / "kernels"))
# Add to config list the include of configurations
list_configs.append("kernels/fullyconnected.hpp")
list_configs.append(f"layers/{self.name}.h")
# Export configuration file
generate_file(
str(export_folder / "layers" / f"{self.name}.h"),
str(ROOT / "templates" / "configuration" / "fullyconnected_config.jinja"),
name=self.name,
input_dims=self.inputs_dims[0],
output_dims=self.outputs_dims[0],
activation="Linear",
rescaling="NoScaling")
return list_configs
def forward(self, list_actions:list):
if not self.is_last:
list_actions.append(set_up_output(self.name, "float"))
list_actions.append(generate_str(
str(ROOT / "templates" / "kernel_forward" / "fullyconnected_forward.jinja"),
name=self.name,
inputs_name= self.inputs[0].name() if (self.inputs[0] is not None) else self.name + '_input',
weights_name=self.inputs[1].name(),
biases_name=self.inputs[2].name(),
outputs_name=self.name
))
return list_actions
@operator_register("MatMul")
class MatMulCPP(ExportNode):
def __init__(self, node):
super().__init__(node)
dims0, dims1, outdims = [tuple(x) for x in [self.inputs_dims[0], self.inputs_dims[1], self.outputs_dims[0]]]
# TODO: MatMul aidge operator supports N-D multi broadcast dimensions where N > 2
assert len(dims0) <= 2 and len(dims1) <= 2, (
f"MatMul export do not support yet dimensions above 2D: inputs shapes are: {dims0}, {dims1}")
# Cast to at least 1D
# Note that from MatMul::forwardDims(), scalar inputs are supported
# which is actually more general than np.matmul
dims0 = dims0 if len(dims0) >= 1 else (1, 1)
dims1 = dims1 if len(dims1) >= 1 else (1, 1)
# Cast to at least 2D
dims0 = dims0 if len(dims0) >= 2 else (1, dims0[0])
dims1 = dims1 if len(dims1) >= 2 else (dims1[0], 1)
assert dims0[1] == dims1[0], (
f"MatMul input dimensions do no match, expected (m, k), (k, n): inputs shapes are: {dims0}, {dims1}")
outdims = outdims if len(outdims) > 0 else (1, 1)
assert outdims == (dims0[0], dims1[1]), (
f"MatMul output dimensions do no match, expected (m, n) for inputs (m, k) (k, n): output shape is: {outdims}, inputs shapes are: {dims0}, {dims1}")
self.matmul_inputs_dims = dims0, dims1
self.matmul_output_dims = outdims
def export(self, export_folder:Path, list_configs:list):
copyfile(str(ROOT / "kernels" / "matmul.hpp"),
str(export_folder / "include" / "kernels"))
copyfile(str(ROOT / "kernels" / "activation.hpp"),
str(export_folder / "include" / "kernels"))
# Add to config list the include of configurations
list_configs.append("kernels/matmul.hpp")
list_configs.append(f"layers/{self.name}.h")
# Export configuration file
generate_file(
str(export_folder / "layers" / f"{self.name}.h"),
str(ROOT / "templates" / "configuration" / "matmul_config.jinja"),
name=self.name,
inputs_dims=self.matmul_inputs_dims,
output_dims=self.matmul_output_dims,
activation="Linear",
rescaling="NoScaling",
)
return list_configs
def forward(self, list_actions:list):
if not self.is_last:
list_actions.append(set_up_output(self.name, "float"))
list_actions.append(generate_str(
str(ROOT / "templates" / "kernel_forward" / "matmul_forward.jinja"),
name=self.name,
inputs1_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input1",
inputs2_name=self.inputs[1].name() if self.inputs[1] else self.name + "_input2",
outputs_name=self.name
))
return list_actions
>>>>>>> origin/dev
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment