Skip to content
Snippets Groups Projects

feat/release_pip

Closed Grégoire Kubler requested to merge feat/release_pip into dev
1 unresolved thread
Files
8
@@ -118,13 +118,13 @@ class AidgeModule(torch.nn.Module):
# TODO: add a system to avoid creating a new node everytime
if self.input_nodes[0] == None:
if self.input_nodes[0] is None:
self.input_nodes[0] = aidge_core.Producer(
aidge_tensor, "Input_0")
# TODO: get datatype & backend from graph view
self.input_nodes[0].get_operator().set_datatype(
aidge_core.DataType.Float32)
aidge_core.dtype.float32)
self.input_nodes[0].get_operator().set_backend("cpu")
self.input_nodes[0].add_child(self._graph_view)
@@ -151,7+151,7 @@
@staticmethod
def backward(ctx, grad_output):
if not self.grad_compiled: aidge_core.compile_gradient(self._graph_view)
    • @cmoineau

      Running the tests in local to try to solve these issues returns an error on this line : aidge_core doesn't seem to have any compile_gradients function, I lloked for it in the whole codebase but cannot find it anywhere.

      • Since aidge_core@ecc96977 grad is lazy init thus copile_gradient and init_grad have been removed.

      • I removed the call to compile gradients

      • Ok now the real issue : /

        https://gitlab.eclipse.org/eclipse/aidge/aidge_interop_torch/-/jobs/406724#L968

        It seems that the imported model contains 3 input nodes instead of 1 :

        $ p aidge_model.get_input_nodes()
        {Node(name='input_24', optype='Conv', parents: [1, 1, 0], children: 1), Node(name='input_12', optype='Conv', parents: [1, 1, 0], children: 1), Node(name='input', optype='Conv', parents: [0, 1, 0], children: 1)}

        here is the model after being exported to onnx & re imported to aidge. We can clearly see that the export/import phase added 3 inputs & 2 inptuts which is not normal.

        %%{init: {'flowchart': { 'curve': 'monotoneY'}, 'fontFamily': 'Verdana' } }%%
        flowchart TB
        
        FC_1("data_37\n<sub><em>(FC#1)</em></sub>")
        Producer_10("layer_15_bias\n<sub><em>(Producer#10)</em></sub>"):::producerCls
        ReLU_0("Relu_1\n<sub><em>(ReLU#0)</em></sub>")
        Producer_8("layer_13_bias\n<sub><em>(Producer#8)</em></sub>"):::producerCls
        Producer_9("layer_15_weight\n<sub><em>(Producer#9)</em></sub>"):::producerCls
        Producer_6("layer_10_weight\n<sub><em>(Producer#6)</em></sub>"):::producerCls
        Conv_2("input_24\n<sub><em>(Conv#2)</em></sub>")
        Producer_2("layer_2_bias\n<sub><em>(Producer#2)</em></sub>"):::producerCls
        Producer_7("layer_13_weight\n<sub><em>(Producer#7)</em></sub>"):::producerCls
        Producer_3("layer_5_weight\n<sub><em>(Producer#3)</em></sub>"):::producerCls
        Producer_0("layer_0_weight\n<sub><em>(Producer#0)</em></sub>"):::producerCls_rootCls
        MaxPooling_0("input_8\n<sub><em>(MaxPooling#0)</em></sub>")
        Conv_1("input_12\n<sub><em>(Conv#1)</em></sub>")
        ReLU_2("Relu_6\n<sub><em>(ReLU#2)</em></sub>")
        BatchNormalization_1("BatchNormalization_7\n<sub><em>(BatchNormalization#1)</em></sub>"):::genericCls
        ReLU_3("Relu_8\n<sub><em>(ReLU#3)</em></sub>")
        MaxPooling_1("input_20\n<sub><em>(MaxPooling#1)</em></sub>")
        ReLU_4("Relu_11\n<sub><em>(ReLU#4)</em></sub>")
        FC_0("input_28\n<sub><em>(FC#0)</em></sub>")
        ReLU_5("Relu_14\n<sub><em>(ReLU#5)</em></sub>")
        Producer_5("layer_7_bias\n<sub><em>(Producer#5)</em></sub>"):::producerCls
        Producer_4("layer_7_weight\n<sub><em>(Producer#4)</em></sub>"):::producerCls
        Conv_0("input\n<sub><em>(Conv#0)</em></sub>")
        BatchNormalization_0("BatchNormalization_2\n<sub><em>(BatchNormalization#0)</em></sub>"):::genericCls
        ReLU_1("Relu_3\n<sub><em>(ReLU#1)</em></sub>")
        Producer_1("layer_2_weight\n<sub><em>(Producer#1)</em></sub>"):::producerCls
        Producer_10-->|"0 [10]&rarr;2"|FC_1
        ReLU_0-->|"0&rarr;0"|BatchNormalization_0
        Producer_8-->|"0 [84]&rarr;2"|FC_0
        Producer_9-->|"0 [10, 84]&rarr;1"|FC_1
        Producer_6-->|"0 [120, 16, 5, 5]&rarr;1"|Conv_2
        Conv_2-->|"0&rarr;0"|ReLU_4
        Producer_2-->|"0 [6]&rarr;2"|BatchNormalization_0
        Producer_2-->|"0 [6]&rarr;2"|BatchNormalization_0
        Producer_7-->|"0 [84, 120]&rarr;1"|FC_0
        Producer_3-->|"0 [16, 6, 5, 5]&rarr;1"|Conv_1
        Producer_0-->|"0 [6, 1, 5, 5]&rarr;1"|Conv_0
        MaxPooling_0-->|"0&rarr;0"|Conv_1
        Conv_1-->|"0&rarr;0"|ReLU_2
        ReLU_2-->|"0&rarr;0"|BatchNormalization_1
        BatchNormalization_1-->|"0&rarr;0"|ReLU_3
        ReLU_3-->|"0&rarr;0"|MaxPooling_1
        MaxPooling_1-->|"0&rarr;0"|Conv_2
        ReLU_4-->|"0&rarr;0"|FC_0
        FC_0-->|"0&rarr;0"|ReLU_5
        ReLU_5-->|"0&rarr;0"|FC_1
        Producer_5-->|"0 [16]&rarr;2"|BatchNormalization_1
        Producer_5-->|"0 [16]&rarr;2"|BatchNormalization_1
        Producer_4-->|"0 [16]&rarr;1"|BatchNormalization_1
        Producer_4-->|"0 [16]&rarr;1"|BatchNormalization_1
        Conv_0-->|"0&rarr;0"|ReLU_0
        BatchNormalization_0-->|"0&rarr;0"|ReLU_1
        ReLU_1-->|"0&rarr;0"|MaxPooling_0
        Producer_1-->|"0 [6]&rarr;1"|BatchNormalization_0
        Producer_1-->|"0 [6]&rarr;1"|BatchNormalization_0
        input0((in#0)):::inputCls--->|"&rarr;0"|Conv_0
        input1((in#1)):::inputCls--->|"&rarr;2"|Conv_0
        input2((in#2)):::inputCls--->|"&rarr;2"|Conv_1
        input3((in#3)):::inputCls--->|"&rarr;2"|Conv_2
        FC_1--->|"0&rarr;"|output0((out#0)):::outputCls
        BatchNormalization_1--->|"1&rarr;"|output1((out#1)):::outputCls
        BatchNormalization_1--->|"2&rarr;"|output2((out#2)):::outputCls
        BatchNormalization_0--->|"1&rarr;"|output3((out#3)):::outputCls
        BatchNormalization_0--->|"2&rarr;"|output4((out#4)):::outputCls
        classDef inputCls fill:#afa
        classDef outputCls fill:#ffa
        classDef externalCls fill:#ccc
        classDef producerCls fill:#ccf
        classDef genericCls fill:#f9f9ff,stroke-width:1px,stroke-dasharray: 5 5
        classDef metaCls stroke-width:5px
        classDef rootCls stroke:#f00
        classDef producerCls_rootCls stroke:#f00,fill:#ccf
        classDef genericCls_rootCls stroke:#f00,fill:#f9f9ff,stroke-width:1px,stroke-dasharray: 5 5
        classDef metaCls_rootCls stroke:#f00,stroke-width:5px

        I dont know enough of pytorch & aidge_interop torch as of now to unerstand where is the issue, i'll sort this out tomorrow.

      • Clearly this module is not a priority. I would be in favor of merging this in dev with the broken CI and mark this module as broken. The reasoning behind this being that this MR doesn't break the CI and just highlight issues. These issues are not on the roadmap :smile:

        @pierregaillard do you agree with this prioritization ?

      • ( On my side I agree with not having to fix a big I have no Idea how to fix )

      • Please register or sign in to reply
Please register or sign in to reply
if self.multi_outputs_flag:
raise RuntimeError(
Loading