From 838aa7044d625d7a4d876fc8580a423fbf324f33 Mon Sep 17 00:00:00 2001 From: cmoineau <cyril.moineau@cea.fr> Date: Thu, 10 Oct 2024 06:37:54 +0000 Subject: [PATCH] Update export cpp with new registry methods. --- aidge_export_cpp/export_registry.py | 7 +- aidge_export_cpp/kernels/activation.hpp | 30 ++--- aidge_export_cpp/kernels/batchnorm.hpp | 8 +- aidge_export_cpp/kernels/convolution.hpp | 26 ++--- aidge_export_cpp/kernels/fullyconnected.hpp | 16 +-- aidge_export_cpp/operators.py | 103 +++++------------- .../configuration/activation_config.jinja | 1 + .../configuration/convolution_config.jinja | 2 +- .../configuration/elemwise_config.jinja | 1 + .../configuration/fullyconnected_config.jinja | 2 +- .../templates/network/environment.jinja | 25 ----- .../templates/network/network_forward.jinja | 36 ------ 12 files changed, 78 insertions(+), 179 deletions(-) delete mode 100644 aidge_export_cpp/templates/network/environment.jinja delete mode 100644 aidge_export_cpp/templates/network/network_forward.jinja diff --git a/aidge_export_cpp/export_registry.py b/aidge_export_cpp/export_registry.py index fe7e80c..7f10eea 100644 --- a/aidge_export_cpp/export_registry.py +++ b/aidge_export_cpp/export_registry.py @@ -2,9 +2,14 @@ from aidge_core.export_utils import ExportLib from aidge_export_cpp.utils import ROOT class ExportLibCpp(ExportLib): - name="export_cpp" + _name="export_cpp" static_files={ str(ROOT / "static" / "Makefile"): "", str(ROOT / "static" / "include" / "network" / "typedefs.hpp"): "dnn/include/network", str(ROOT / "static" / "include" / "network" / "utils.hpp"): "dnn/include/network", } + +# TODO ugly fix for Tensor registration issue... +import aidge_core +aidge_core.register_Tensor(["export_cpp", aidge_core.dtype.float32], + aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.float32])) diff --git a/aidge_export_cpp/kernels/activation.hpp b/aidge_export_cpp/kernels/activation.hpp index b5fbd61..d669515 100644 --- a/aidge_export_cpp/kernels/activation.hpp +++ b/aidge_export_cpp/kernels/activation.hpp @@ -4,36 +4,36 @@ #include <type_traits> #include "network/typedefs.hpp" #include "network/utils.hpp" -#include "network/rescaling.hpp" +#include "kernels/rescaling.hpp" -template<typename Output_T, typename T, +template<typename Output_T, typename T, typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr> __attribute__((always_inline)) inline -Output_T saturate (T value, int32_t /*sat*/) +Output_T saturate (T value, int32_t /*sat*/) { return value; } -template<typename Output_T, typename T, +template<typename Output_T, typename T, typename std::enable_if<!std::is_floating_point<T>::value>::type* = nullptr> __attribute__((always_inline)) inline -Output_T saturate (T value, uint32_t sat) +Output_T saturate (T value, uint32_t sat) { if (std::is_unsigned<Output_T>::value) { return clamp(value, T(0), (T(1) << sat) - 1); } else { return clamp(value, -(T(1) << (sat - 1)), (T(1) << (sat - 1)) - 1); - } + } } -template<typename Output_T, - typename Sum_T, +template<typename Output_T, + typename Sum_T, typename Rescaling_T> -__attribute__((always_inline)) inline -Output_T activation_forward_value (Sum_T weightedSum, - int output, - ActivationFunction_T func, - const Rescaling_T& __restrict rescaling) +__attribute__((always_inline)) inline +Output_T activation_forward_value (Sum_T weightedSum, + int output, + ActivationFunction_T func, + const Rescaling_T& __restrict rescaling) { switch(func) { case Linear: @@ -49,7 +49,7 @@ Output_T activation_forward_value (Sum_T weightedSum, break; } - // Value fixed here for now but it should be generated by + // Value fixed here for now but it should be generated by // the export module or determined by the type of Output_T // For now only works for int8_t and uint8_t const uint32_t NB_BITS = 8; @@ -60,7 +60,7 @@ Output_T activation_forward_value (Sum_T weightedSum, template<int NB_DATA, ActivationFunction_T ACTIVATION, typename Input_T, typename Output_T, typename Rescaling_T> -__attribute__((always_inline)) inline +__attribute__((always_inline)) inline void activation_forward ( const Input_T* __restrict inputs, Output_T* __restrict outputs, diff --git a/aidge_export_cpp/kernels/batchnorm.hpp b/aidge_export_cpp/kernels/batchnorm.hpp index d63c961..740ea21 100644 --- a/aidge_export_cpp/kernels/batchnorm.hpp +++ b/aidge_export_cpp/kernels/batchnorm.hpp @@ -2,17 +2,17 @@ #define __AIDGE_EXPORT_CPP_KERNELS_BATCHNORM__ #include "network/typedefs.hpp" -#include "network/rescaling.hpp" +#include "kernels/rescaling.hpp" #include <math.h> // WARNING: this kernel only works for 32-bits floating point values -template<int NB_OUTPUTS, +template<int NB_OUTPUTS, int OUTPUTS_HEIGHT, int OUTPUTS_WIDTH, ActivationFunction_T ACTIVATION, - typename Input_T, typename Output_T, + typename Input_T, typename Output_T, typename Param_T> -__attribute__((always_inline)) inline +__attribute__((always_inline)) inline void batchnorm_forward ( const Input_T* __restrict inputs, Output_T* __restrict outputs, diff --git a/aidge_export_cpp/kernels/convolution.hpp b/aidge_export_cpp/kernels/convolution.hpp index 39f931a..efc7ee7 100644 --- a/aidge_export_cpp/kernels/convolution.hpp +++ b/aidge_export_cpp/kernels/convolution.hpp @@ -2,13 +2,13 @@ #define __AIDGE_EXPORT_CPP_KERNELS_CONVOLUTION__ #include "network/typedefs.hpp" -#include "network/rescaling.hpp" +#include "kernels/rescaling.hpp" #include "network/utils.hpp" #include "kernels/macs.hpp" #include "kernels/activation.hpp" -template<int NB_CHANNELS, +template<int NB_CHANNELS, int CHANNELS_HEIGHT, int CHANNELS_WIDTH, int NB_OUTPUTS, int OUTPUTS_HEIGHT, int OUTPUTS_WIDTH, @@ -17,10 +17,10 @@ template<int NB_CHANNELS, int DILATION_Y, int DILATION_X, int KERNEL_HEIGHT, int KERNEL_WIDTH, ActivationFunction_T ACTIVATION, - typename Input_T, typename Output_T, + typename Input_T, typename Output_T, typename Weight_T, typename Bias_T, typename Rescaling_T> -__attribute__((always_inline)) inline +__attribute__((always_inline)) inline void convolution_forward( const Input_T* __restrict inputs, Output_T* __restrict outputs, @@ -28,10 +28,10 @@ void convolution_forward( const Bias_T* __restrict biases, const Rescaling_T& __restrict rescaling) { - constexpr int DILATED_KERNEL_HEIGHT + constexpr int DILATED_KERNEL_HEIGHT = KERNEL_HEIGHT + (DILATION_Y - 1) * (KERNEL_HEIGHT - 1); - constexpr int DILATED_KERNEL_WIDTH + constexpr int DILATED_KERNEL_WIDTH = KERNEL_WIDTH + (DILATION_X - 1) * (KERNEL_WIDTH - 1); constexpr int OUTPUTS_HEIGHT_NOPAD @@ -44,7 +44,7 @@ void convolution_forward( : max(PADDING_Y - (oy * STRIDE_Y), 0); const int syMax = (PADDING_Y == 0 && OUTPUTS_HEIGHT == OUTPUTS_HEIGHT_NOPAD) ? DILATED_KERNEL_HEIGHT - : clamp(CHANNELS_HEIGHT + PADDING_Y - (oy * STRIDE_Y), + : clamp(CHANNELS_HEIGHT + PADDING_Y - (oy * STRIDE_Y), 0, DILATED_KERNEL_HEIGHT); const int iy = (oy * STRIDE_Y) - PADDING_Y; @@ -57,7 +57,7 @@ void convolution_forward( const int sxMax = (PADDING_X == 0 && OUTPUTS_WIDTH == OUTPUTS_WIDTH_NOPAD) ? DILATED_KERNEL_WIDTH - : clamp(CHANNELS_WIDTH + PADDING_X - (ox * STRIDE_X), + : clamp(CHANNELS_WIDTH + PADDING_X - (ox * STRIDE_X), 0, DILATED_KERNEL_WIDTH); const int ix = (ox * STRIDE_X) - PADDING_X; @@ -85,8 +85,8 @@ void convolution_forward( || sxMax - sxMin == KERNEL_WIDTH)) { macsOnRange<KERNEL_WIDTH * NB_CHANNELS>( - inputs + iOffset, - weights + wOffset, + inputs + iOffset, + weights + wOffset, weightedSum); } else { @@ -100,11 +100,11 @@ void convolution_forward( int iOffsetInRange = iOffset + sx * DILATION_X * NB_CHANNELS; - + macsOnRange<NB_CHANNELS>( // same input line so no wrapping can occur - inputs + iOffsetInRange, - weights + wOffset + sx * NB_CHANNELS, + inputs + iOffsetInRange, + weights + wOffset + sx * NB_CHANNELS, weightedSum); } } diff --git a/aidge_export_cpp/kernels/fullyconnected.hpp b/aidge_export_cpp/kernels/fullyconnected.hpp index 92aef15..895ed1c 100644 --- a/aidge_export_cpp/kernels/fullyconnected.hpp +++ b/aidge_export_cpp/kernels/fullyconnected.hpp @@ -2,20 +2,20 @@ #define __AIDGE_EXPORT_CPP_KERNELS_FULLYCONNECTED__ #include "network/typedefs.hpp" -#include "network/rescaling.hpp" +#include "kernels/rescaling.hpp" #include "network/utils.hpp" #include "kernels/macs.hpp" #include "kernels/activation.hpp" -template<int NB_CHANNELS, +template<int NB_CHANNELS, int CHANNELS_HEIGHT, int CHANNELS_WIDTH, int NB_OUTPUTS, int OUTPUTS_HEIGHT, int OUTPUTS_WIDTH, ActivationFunction_T ACTIVATION, - typename Input_T, typename Output_T, + typename Input_T, typename Output_T, typename Weight_T, typename Bias_T, typename Rescaling_T> -__attribute__((always_inline)) inline +__attribute__((always_inline)) inline void fullyconnected_forward ( const Input_T* __restrict inputs, Output_T* __restrict outputs, @@ -35,7 +35,7 @@ void fullyconnected_forward ( for (int iy = 0; iy < CHANNELS_HEIGHT; ++iy) { for (int ix = 0; ix < CHANNELS_WIDTH; ++ix) { for (int ch = 0; ch < NB_CHANNELS; ++ch) { - weightedSum += inputs[CHANNELS_WIDTH*NB_CHANNELS*iy + NB_CHANNELS*ix + ch] + weightedSum += inputs[CHANNELS_WIDTH*NB_CHANNELS*iy + NB_CHANNELS*ix + ch] * weights[CHANNELS_HEIGHT*CHANNELS_WIDTH*NB_CHANNELS*och + CHANNELS_HEIGHT*CHANNELS_WIDTH*ch + CHANNELS_HEIGHT*iy + ix]; } } @@ -58,8 +58,8 @@ Here the kernel to use with inputs in NHWC and weights in NHWC * (iy + CHANNELS_HEIGHT * och); macsOnRange<NB_CHANNELS * CHANNELS_WIDTH>( - inputs + iOffset, - weights + wOffset, + inputs + iOffset, + weights + wOffset, weightedSum); } @@ -69,4 +69,4 @@ Here the kernel to use with inputs in NHWC and weights in NHWC } -#endif // __AIDGE_EXPORT_CPP_KERNELS_FULLYCONNECTED__ \ No newline at end of file +#endif // __AIDGE_EXPORT_CPP_KERNELS_FULLYCONNECTED__ diff --git a/aidge_export_cpp/operators.py b/aidge_export_cpp/operators.py index b31fe85..8bdc421 100644 --- a/aidge_export_cpp/operators.py +++ b/aidge_export_cpp/operators.py @@ -1,31 +1,11 @@ import os import numpy as np from pathlib import Path -from aidge_core.export_utils import ExportNode, ExportNodeCpp, operator_register, generate_file +import aidge_core +from aidge_core.export_utils import ExportNode, ExportNodeCpp, generate_file from aidge_export_cpp.utils import ROOT from aidge_export_cpp.utils.converter import numpy_dtype2ctype -from aidge_export_cpp.utils.generation import * from aidge_export_cpp import ExportLibCpp -############################################## -################### Utils #################### -############################################## - - -# def get_node_parents(node): -# parents = [] -# for parent in node.get_parents(): -# if parent.type() != "Producer": -# parents.append(parent) -# return parents - - -# def get_producer_parents(node): -# parents = [] -# for parent in node.get_parents(): -# if parent.type() == "Producer": -# parents.append(parent) -# return parents - ############################################## ############## Export functions ############## @@ -55,10 +35,8 @@ def export_params(name: str, ############## Operators helper ############## ############################################## - -@operator_register(ExportLibCpp, "Producer") +@ExportLibCpp.register("Producer", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any))) class ProducerCPP(ExportNode): - def __init__(self, node, mem_info, is_input, is_output): super().__init__(node, mem_info, is_input, is_output) self.values = np.array(self.operator.get_output(0)) @@ -77,11 +55,15 @@ class ProducerCPP(ExportNode): def forward(self): # A Producer does nothing during forward return [] - @classmethod - def exportable(cls, node): - return True # TODO add check i/o NCHW -@operator_register(ExportLibCpp, "ReLU") +# TODO : find a way to remove this dummy exportnode +@ExportLibCpp.register("Pad2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any))) +class Pad_ARMCortexM(ExportNodeCpp): + def __init__(self, node, mem_info, is_input, is_output): + raise NotImplementedError("Pad2D nodes is not implemented") + + +@ExportLibCpp.register("ReLU", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32))) class ReLUCPP(ExportNodeCpp): def __init__(self, node, mem_info, is_input, is_output): super().__init__(node, mem_info, is_input, is_output) @@ -97,11 +79,7 @@ class ReLUCPP(ExportNodeCpp): str(ROOT / "kernels" / "rescaling.hpp") ] - @classmethod - def exportable(cls, node): - return True # TODO add check i/o NCHW - -@operator_register(ExportLibCpp, "Conv") +@ExportLibCpp.register("Conv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32))) class ConvCPP(ExportNodeCpp): def __init__(self, node, mem_info, is_input, is_output): super().__init__(node, mem_info, is_input, is_output) @@ -121,22 +99,17 @@ class ConvCPP(ExportNodeCpp): str(ROOT / "kernels" / "activation.hpp"), str(ROOT / "kernels" / "rescaling.hpp") ] - @classmethod - def exportable(cls, node): - return True # TODO add check i/o NCHW - - -@operator_register(ExportLibCpp, "PaddedConv") +@ExportLibCpp.register_metaop("PaddedConv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32))) class PaddedConvCPP(ExportNodeCpp): def __init__(self, node, mem_info, is_input, is_output): super().__init__(node, mem_info, is_input, is_output) # TODO find a way to retrive attr for meta op for n in self.operator.get_micro_graph().get_nodes(): - if n.type() == "Pad": + if n.type() == "Pad2D": self.attributes["padding"] = n.get_operator( ).attr.begin_end_borders - if n.type() == "Conv": + if n.type() == "Conv2D": self.attributes["kernel_dims"] = n.get_operator( ).attr.kernel_dims self.attributes["stride_dims"] = n.get_operator( @@ -156,11 +129,8 @@ class PaddedConvCPP(ExportNodeCpp): str(ROOT / "kernels" / "activation.hpp"), str(ROOT / "kernels" / "rescaling.hpp") ] - @classmethod - def exportable(cls, node): - return True # TODO add check i/o NCHW -@operator_register(ExportLibCpp, "Add") +@ExportLibCpp.register("Add", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32))) class AddCPP(ExportNodeCpp): def __init__(self, node, mem_info, is_input, is_output): super().__init__(node, mem_info, is_input, is_output) @@ -177,11 +147,8 @@ class AddCPP(ExportNodeCpp): str(ROOT / "kernels" / "activation.hpp"), str(ROOT / "kernels" / "rescaling.hpp") ] - @classmethod - def exportable(cls, node): - return True # TODO add check i/o NCHW -@operator_register(ExportLibCpp, "Sub") +@ExportLibCpp.register("Sub", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32))) class SubCPP(ExportNodeCpp): def __init__(self, node, mem_info, is_input, is_output): super().__init__(node, mem_info, is_input, is_output) @@ -198,12 +165,9 @@ class SubCPP(ExportNodeCpp): str(ROOT / "kernels" / "activation.hpp"), str(ROOT / "kernels" / "rescaling.hpp") ] - @classmethod - def exportable(cls, node): - return True # TODO add check i/o NCHW -@operator_register(ExportLibCpp, "Mul") +@ExportLibCpp.register("Mul", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32))) class MulCPP(ExportNodeCpp): def __init__(self, node, mem_info, is_input, is_output): super().__init__(node, mem_info, is_input, is_output) @@ -220,11 +184,8 @@ class MulCPP(ExportNodeCpp): str(ROOT / "kernels" / "activation.hpp"), str(ROOT / "kernels" / "rescaling.hpp") ] - @classmethod - def exportable(cls, node): - return True # TODO add check i/o NCHW -@operator_register(ExportLibCpp, "MaxPooling") +@ExportLibCpp.register("MaxPooling2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32))) class MaxPoolCPP(ExportNodeCpp): def __init__(self, node, mem_info, is_input, is_output): super().__init__(node, mem_info, is_input, is_output) @@ -243,21 +204,19 @@ class MaxPoolCPP(ExportNodeCpp): self.kernels_to_copy = [ str(ROOT / "kernels" / "pooling.hpp"), str(ROOT / "kernels" / "activation.hpp"), + str(ROOT / "kernels" / "rescaling.hpp") ] - @classmethod - def exportable(cls, node): - return True # TODO add check i/o NCHW -@operator_register(ExportLibCpp, "PaddedMaxPooling") +@ExportLibCpp.register_metaop("PaddedMaxPooling2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32))) class PaddedMaxPoolCPP(ExportNodeCpp): def __init__(self, node, mem_info, is_input, is_output): super().__init__(node, mem_info, is_input, is_output) for n in self.operator.get_micro_graph().get_nodes(): - if n.type() == "Pad": + if n.type() == "Pad2D": self.attributes["padding"] = n.get_operator( ).attr.begin_end_borders - if n.type() == "MaxPooling": + if n.type() == "MaxPooling2D": self.attributes["kernel_dims"] = n.get_operator( ).attr.kernel_dims self.attributes["stride_dims"] = n.get_operator( @@ -273,12 +232,11 @@ class PaddedMaxPoolCPP(ExportNodeCpp): self.kernels_to_copy = [ str(ROOT / "kernels" / "pooling.hpp"), str(ROOT / "kernels" / "activation.hpp"), + str(ROOT / "kernels" / "rescaling.hpp") ] - @classmethod - def exportable(cls, node): - return True # TODO add check i/o NCHW -@operator_register(ExportLibCpp, "GlobalAveragePooling") + +@ExportLibCpp.register("GlobalAveragePooling", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32))) class GlobalAveragePoolCPP(ExportNodeCpp): def __init__(self, node, mem_info, is_input, is_output): super().__init__(node, mem_info, is_input, is_output) @@ -302,12 +260,10 @@ class GlobalAveragePoolCPP(ExportNodeCpp): self.kernels_to_copy = [ str(ROOT / "kernels" / "pooling.hpp"), str(ROOT / "kernels" / "activation.hpp"), + str(ROOT / "kernels" / "rescaling.hpp") ] - @classmethod - def exportable(cls, node): - return True # TODO add check i/o NCHW -@operator_register(ExportLibCpp, "FC") +@ExportLibCpp.register("FC", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32))) class FcCPP(ExportNodeCpp): def __init__(self, node, mem_info, is_input, is_output): super().__init__(node, mem_info, is_input, is_output) @@ -324,6 +280,3 @@ class FcCPP(ExportNodeCpp): str(ROOT / "kernels" / "activation.hpp"), str(ROOT / "kernels" / "rescaling.hpp") ] - @classmethod - def exportable(cls, node): - return True # TODO add check i/o NCHW diff --git a/aidge_export_cpp/templates/configuration/activation_config.jinja b/aidge_export_cpp/templates/configuration/activation_config.jinja index f9535e7..1ab5b21 100644 --- a/aidge_export_cpp/templates/configuration/activation_config.jinja +++ b/aidge_export_cpp/templates/configuration/activation_config.jinja @@ -1,6 +1,7 @@ {#- For name header -#} #ifndef {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H +#include "kernels/rescaling.hpp" {# For layer configuration -#} {%- set nb_data = in_chan[0] * in_height[0] * in_width[0] %} diff --git a/aidge_export_cpp/templates/configuration/convolution_config.jinja b/aidge_export_cpp/templates/configuration/convolution_config.jinja index c536a48..a4a2462 100644 --- a/aidge_export_cpp/templates/configuration/convolution_config.jinja +++ b/aidge_export_cpp/templates/configuration/convolution_config.jinja @@ -1,7 +1,7 @@ {#- For name header -#} #ifndef {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H - +#include "kernels/rescaling.hpp" {# For layer configuration -#} {% include "./_def_io.jinja" %} {% include "./_meminfo.jinja" %} diff --git a/aidge_export_cpp/templates/configuration/elemwise_config.jinja b/aidge_export_cpp/templates/configuration/elemwise_config.jinja index fc801f1..91a0be4 100644 --- a/aidge_export_cpp/templates/configuration/elemwise_config.jinja +++ b/aidge_export_cpp/templates/configuration/elemwise_config.jinja @@ -1,6 +1,7 @@ {#- For name header -#} #ifndef {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H +#include "kernels/rescaling.hpp" {% include "./_def_io.jinja" %} {% include "./_meminfo.jinja" %} diff --git a/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja b/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja index 0546304..3c80338 100644 --- a/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja +++ b/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja @@ -1,7 +1,7 @@ {#- For name header -#} #ifndef {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H - +#include "kernels/rescaling.hpp" {# For layer configuration -#} {% include "./_def_io.jinja" %} {% include "./_meminfo.jinja" %} diff --git a/aidge_export_cpp/templates/network/environment.jinja b/aidge_export_cpp/templates/network/environment.jinja deleted file mode 100644 index ced5991..0000000 --- a/aidge_export_cpp/templates/network/environment.jinja +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef ENV_LAYER_H -#define ENV_LAYER_H - -#include <stdint.h> - -#define ENV_SIZE_X {{ size_x }} -#define ENV_SIZE_Y {{ size_y }} -#define ENV_NB_OUTPUTS {{ nb_outputs }} - -#define ENV_DATA_UNSIGNED {{ is_unsigned }} - -#define ENV_OUTPUTS_SIZE (ENV_NB_OUTPUTS*ENV_SIZE_X*ENV_SIZE_Y) - -#define NETWORK_TARGETS 1 -//Output targets network dimension definition: -static unsigned int OUTPUTS_HEIGHT[NETWORK_TARGETS] = {1}; -static unsigned int OUTPUTS_WIDTH[NETWORK_TARGETS] = {1}; -static unsigned int NB_OUTPUTS[NETWORK_TARGETS] = {1000}; -static unsigned int NB_TARGET[NETWORK_TARGETS] = {1000}; -static unsigned int OUTPUTS_SIZE[NETWORK_TARGETS] = {(OUTPUTS_WIDTH[0]*OUTPUTS_HEIGHT[0])}; -typedef int32_t Target_0_T; -typedef Target_0_T Target_T; - - -#endif // ENV_LAYER_H diff --git a/aidge_export_cpp/templates/network/network_forward.jinja b/aidge_export_cpp/templates/network/network_forward.jinja deleted file mode 100644 index 592e8c9..0000000 --- a/aidge_export_cpp/templates/network/network_forward.jinja +++ /dev/null @@ -1,36 +0,0 @@ -{#- For libraries #} - -#include <stdint.h> - -#ifdef SAVE_OUTPUTS -#include <sys/types.h> -#include <sys/stat.h> -#endif - -#include "network/rescaling.hpp" - -// Layer & memory configurations -{%- for header in headers %} -#include "{{ header }}" -{%- endfor %} - -{# mem has the datatype of the firt output #} -{#- Change here to improve it -#} -static {{outputs[0][0]}} mem[{{peak_mem}}]; - -{# Forward function #} -{#- Support multiple inputs with different datatypes and multiple outputs with different datatypes -#} -void model_forward({% for inp in inputs %}const {{inp[0]}}* {{inp[1]}}, {% endfor %}{% for out in outputs %}{{out[0]}}* {{out[1]}}{{ ", " if not loop.last else "" }}{% endfor %}) -{ - - #ifdef SAVE_OUTPUTS - // Creation of the outputs directory - struct stat st {}; - if (stat("outputs", &st) == -1) { - mkdir("outputs", 0700); - } - #endif - {%- for action in actions %} - {{ action }} - {%- endfor %} -} -- GitLab