From aab3e328ee79645e5225756d28b4b870614d6e3a Mon Sep 17 00:00:00 2001 From: Axel Farrugia <axel.farrugia@cea.fr> Date: Wed, 23 Apr 2025 18:34:00 +0200 Subject: [PATCH] [Refactor] Split rescaling.hpp and activation.hpp into two types of files : - kernels/ : The forward call of the corresponding kernels (Btw activation and rescaling have exactly the same implementation...) - network/ : The utilitary functions which may be used by other kernels Also move the `macs.hpp` file into the network folder, for coherence --- aidge_export_cpp/export_registry.py | 2 + aidge_export_cpp/kernels/activation.hpp | 57 +------------ aidge_export_cpp/kernels/batchnorm.hpp | 2 +- aidge_export_cpp/kernels/convolution.hpp | 6 +- aidge_export_cpp/kernels/elemwise.hpp | 2 +- aidge_export_cpp/kernels/fullyconnected.hpp | 6 +- aidge_export_cpp/kernels/matmul.hpp | 2 +- aidge_export_cpp/kernels/rescaling.hpp | 83 +------------------ aidge_export_cpp/kernels/softmax.hpp | 1 - aidge_export_cpp/operators/CppActivation.py | 1 - aidge_export_cpp/operators/CppBatchNorm.py | 4 +- aidge_export_cpp/operators/CppConv.py | 4 +- aidge_export_cpp/operators/CppElemWise.py | 2 - aidge_export_cpp/operators/CppFc.py | 4 +- aidge_export_cpp/operators/CppPool.py | 1 - aidge_export_cpp/operators/CppRescaling.py | 3 +- aidge_export_cpp/operators/CppSoftmax.py | 1 - aidge_export_cpp/static/activation_utils.hpp | 56 +++++++++++++ aidge_export_cpp/{kernels => static}/macs.hpp | 0 aidge_export_cpp/static/rescaling_utils.hpp | 78 +++++++++++++++++ .../configuration/activation_config.jinja | 2 +- .../configuration/batchnorm_config.jinja | 2 +- .../configuration/convolution_config.jinja | 2 +- .../configuration/elemwise_config.jinja | 2 +- .../configuration/fullyconnected_config.jinja | 2 +- 25 files changed, 158 insertions(+), 167 deletions(-) create mode 100644 aidge_export_cpp/static/activation_utils.hpp rename aidge_export_cpp/{kernels => static}/macs.hpp (100%) create mode 100644 aidge_export_cpp/static/rescaling_utils.hpp diff --git a/aidge_export_cpp/export_registry.py b/aidge_export_cpp/export_registry.py index d3fcd9a..ee54890 100644 --- a/aidge_export_cpp/export_registry.py +++ b/aidge_export_cpp/export_registry.py @@ -7,4 +7,6 @@ class ExportLibCpp(ExportLib): str(ROOT / "static" / "Makefile"): "", str(ROOT / "static" / "typedefs.hpp"): "dnn/include/network", str(ROOT / "static" / "utils.hpp"): "dnn/include/network", + str(ROOT / "static" / "rescaling_utils.hpp"): "dnn/include/network", + str(ROOT / "static" / "activation_utils.hpp"): "dnn/include/network", } diff --git a/aidge_export_cpp/kernels/activation.hpp b/aidge_export_cpp/kernels/activation.hpp index d669515..ee80ed2 100644 --- a/aidge_export_cpp/kernels/activation.hpp +++ b/aidge_export_cpp/kernels/activation.hpp @@ -1,61 +1,8 @@ #ifndef __AIDGE_EXPORT_CPP_KERNELS_ACTIVATION__ #define __AIDGE_EXPORT_CPP_KERNELS_ACTIVATION__ -#include <type_traits> -#include "network/typedefs.hpp" -#include "network/utils.hpp" -#include "kernels/rescaling.hpp" - -template<typename Output_T, typename T, - typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr> -__attribute__((always_inline)) inline -Output_T saturate (T value, int32_t /*sat*/) -{ - return value; -} - -template<typename Output_T, typename T, - typename std::enable_if<!std::is_floating_point<T>::value>::type* = nullptr> -__attribute__((always_inline)) inline -Output_T saturate (T value, uint32_t sat) -{ - if (std::is_unsigned<Output_T>::value) { - return clamp(value, T(0), (T(1) << sat) - 1); - } else { - return clamp(value, -(T(1) << (sat - 1)), (T(1) << (sat - 1)) - 1); - } -} - -template<typename Output_T, - typename Sum_T, - typename Rescaling_T> -__attribute__((always_inline)) inline -Output_T activation_forward_value (Sum_T weightedSum, - int output, - ActivationFunction_T func, - const Rescaling_T& __restrict rescaling) -{ - switch(func) { - case Linear: - case Saturation: { - break; - } - case Rectifier: { - if(weightedSum <= 0) weightedSum = 0; - break; - } - default: - // Unsupported activation function - break; - } - - // Value fixed here for now but it should be generated by - // the export module or determined by the type of Output_T - // For now only works for int8_t and uint8_t - const uint32_t NB_BITS = 8; - return saturate<Output_T>(rescaling(weightedSum, output), NB_BITS); -} - +#include "network/activation_utils.hpp" +#include "network/rescaling_utils.hpp" template<int NB_DATA, ActivationFunction_T ACTIVATION, diff --git a/aidge_export_cpp/kernels/batchnorm.hpp b/aidge_export_cpp/kernels/batchnorm.hpp index f05a047..27866ab 100644 --- a/aidge_export_cpp/kernels/batchnorm.hpp +++ b/aidge_export_cpp/kernels/batchnorm.hpp @@ -2,7 +2,7 @@ #define __AIDGE_EXPORT_CPP_KERNELS_BATCHNORM__ #include "network/typedefs.hpp" -#include "kernels/activation.hpp" +#include "network/activation_utils.hpp" #include <math.h> diff --git a/aidge_export_cpp/kernels/convolution.hpp b/aidge_export_cpp/kernels/convolution.hpp index 5855654..0648d80 100644 --- a/aidge_export_cpp/kernels/convolution.hpp +++ b/aidge_export_cpp/kernels/convolution.hpp @@ -2,10 +2,10 @@ #define __AIDGE_EXPORT_CPP_KERNELS_CONVOLUTION__ #include "network/typedefs.hpp" -#include "kernels/rescaling.hpp" +#include "network/rescaling_utils.hpp" #include "network/utils.hpp" -#include "kernels/macs.hpp" -#include "kernels/activation.hpp" +#include "network/macs.hpp" +#include "network/activation_utils.hpp" template<int NB_CHANNELS, diff --git a/aidge_export_cpp/kernels/elemwise.hpp b/aidge_export_cpp/kernels/elemwise.hpp index 67ee574..9468b33 100644 --- a/aidge_export_cpp/kernels/elemwise.hpp +++ b/aidge_export_cpp/kernels/elemwise.hpp @@ -2,7 +2,7 @@ #define __AIDGE_EXPORT_CPP_KERNELS_ELEMWISE__ #include "network/typedefs.hpp" -#include "kernels/activation.hpp" +#include "network/activation_utils.hpp" // Generic function for two inputs diff --git a/aidge_export_cpp/kernels/fullyconnected.hpp b/aidge_export_cpp/kernels/fullyconnected.hpp index 60805e7..abaab59 100644 --- a/aidge_export_cpp/kernels/fullyconnected.hpp +++ b/aidge_export_cpp/kernels/fullyconnected.hpp @@ -2,10 +2,10 @@ #define __AIDGE_EXPORT_CPP_KERNELS_FULLYCONNECTED__ #include "network/typedefs.hpp" -#include "kernels/rescaling.hpp" +#include "network/rescaling_utils.hpp" #include "network/utils.hpp" -#include "kernels/macs.hpp" -#include "kernels/activation.hpp" +#include "network/macs.hpp" +#include "network/activation_utils.hpp" template<int NB_CHANNELS, int CHANNELS_HEIGHT, int CHANNELS_WIDTH, diff --git a/aidge_export_cpp/kernels/matmul.hpp b/aidge_export_cpp/kernels/matmul.hpp index 4500993..b507c4f 100644 --- a/aidge_export_cpp/kernels/matmul.hpp +++ b/aidge_export_cpp/kernels/matmul.hpp @@ -2,7 +2,7 @@ #define __AIDGE_EXPORT_CPP_KERNELS_MATMUL__ #include "network/typedefs.hpp" -#include "kernels/activation.hpp" +#include "network/activation_utils.hpp" // Generic function for matmul and activation diff --git a/aidge_export_cpp/kernels/rescaling.hpp b/aidge_export_cpp/kernels/rescaling.hpp index 117a0cd..a831fa8 100644 --- a/aidge_export_cpp/kernels/rescaling.hpp +++ b/aidge_export_cpp/kernels/rescaling.hpp @@ -1,8 +1,8 @@ #ifndef __AIDGE_EXPORT_CPP_NETWORK_RESCALING__ #define __AIDGE_EXPORT_CPP_NETWORK_RESCALING__ -#include "kernels/activation.hpp" - +#include "network/rescaling_utils.hpp" +#include "network/activation_utils.hpp" template<int NB_DATA, ActivationFunction_T ACTIVATION, @@ -23,83 +23,4 @@ void rescaling_forward ( } } - -// --------------------------------------------------- -// ----------------- Saturate Utils ------------------ -// --------------------------------------------------- - -static int64_t toInt64(uint32_t lo, uint32_t hi) { - return (int64_t) (((uint64_t) hi) << 32ull) | ((uint64_t) lo); -} - -static int64_t smlal(int32_t lhs, int32_t rhs, - uint32_t accumLo, uint32_t accumHi) -{ - return ((int64_t) lhs) * ((int64_t) rhs) + toInt64(accumLo, accumHi); -} - -// --------------------------------------------------- -// --------------- Scaling by Shifting --------------- -// --------------------------------------------------- - -template<int SHIFT> -struct SingleShiftScaling { - - template<typename Sum_T> - Sum_T operator()(Sum_T weightedSum, size_t /*output*/) const - { - return (SHIFT != 0) ? ((weightedSum >> (SHIFT - 1)) + 1) >> 1 // Rounding - : weightedSum; - } - - // // Shift attribute - // static const int mShift = SHIFT; - // static const Scaling_T mScalingType = SingleShift; - - // // FP Attribute - // static const int32_t mScaling = 0; - // static const int64_t mFractionalBits = 0; - -}; - -// --------------------------------------------------- -// --------------- Fixed Point Scaling --------------- -// --------------------------------------------------- - -template<int64_t SHIFT, int32_t COEF> -struct FixedPointScaling { - - template<typename Sum_T> - Sum_T operator()(Sum_T weightedSum, size_t /*output*/) const - { - return smlal(weightedSum, COEF, HALF_LO, HALF_HI) >> SHIFT; - } - - // Attributes - static const uint32_t HALF_LO = (SHIFT > 0) - ? (1ull << (SHIFT - 1)) & 0xFFFFFFFF : 0; - static const uint32_t HALF_HI = (SHIFT > 0) - ? (1ull << (SHIFT - 1)) >> 32u : 0; - - // static const int32_t mScaling = SCALING; - // static const int64_t mFractionalBits = FRACTIONAL_BITS; - // static const Scaling_T mScalingType = FixedPoint; - // static const int mShift = 0; -}; - -// --------------------------------------------------- -// ------------------- No Scaling -------------------- -// --------------------------------------------------- - -struct NoScaling { - - template<typename Sum_T> - Sum_T operator()(Sum_T weightedSum, unsigned int /*output*/) const - { - return weightedSum; - } - -}; - - #endif // __AIDGE_EXPORT_CPP_NETWORK_RESCALING__ diff --git a/aidge_export_cpp/kernels/softmax.hpp b/aidge_export_cpp/kernels/softmax.hpp index f5472cf..d29e9b4 100644 --- a/aidge_export_cpp/kernels/softmax.hpp +++ b/aidge_export_cpp/kernels/softmax.hpp @@ -3,7 +3,6 @@ #include "network/typedefs.hpp" #include "network/utils.hpp" -#include "kernels/macs.hpp" #include <type_traits> #include <cmath> diff --git a/aidge_export_cpp/operators/CppActivation.py b/aidge_export_cpp/operators/CppActivation.py index f3ffc8e..b8c9367 100644 --- a/aidge_export_cpp/operators/CppActivation.py +++ b/aidge_export_cpp/operators/CppActivation.py @@ -50,7 +50,6 @@ class CppActivation(ExportNodeCpp): # Path to the kernel(s) files to copy self.add_kernel_to_copy(ROOT / "kernels" / "activation.hpp") - self.add_kernel_to_copy(ROOT / "kernels" / "rescaling.hpp", fwd_include=False) # Include aidge outputs within the fwd file if self.attributes["aidge_cmp"]: diff --git a/aidge_export_cpp/operators/CppBatchNorm.py b/aidge_export_cpp/operators/CppBatchNorm.py index 285a64c..091dc76 100644 --- a/aidge_export_cpp/operators/CppBatchNorm.py +++ b/aidge_export_cpp/operators/CppBatchNorm.py @@ -25,9 +25,7 @@ class CppBatchNorm(ExportNodeCpp): # Path to the kernel(s) files to copy self.add_kernel_to_copy(ROOT / "kernels" / "batchnorm.hpp") - self.add_kernel_to_copy(ROOT / "kernels" / "macs.hpp", fwd_include=False) - self.add_kernel_to_copy(ROOT / "kernels" / "activation.hpp", fwd_include=False) - self.add_kernel_to_copy(ROOT / "kernels" / "rescaling.hpp", fwd_include=False) + self.add_kernel_to_copy(ROOT / "static" / "macs.hpp", "include/network", fwd_include=False) # Include aidge outputs within the fwd file if self.attributes["aidge_cmp"]: diff --git a/aidge_export_cpp/operators/CppConv.py b/aidge_export_cpp/operators/CppConv.py index 5c39fe6..40b7577 100644 --- a/aidge_export_cpp/operators/CppConv.py +++ b/aidge_export_cpp/operators/CppConv.py @@ -50,9 +50,7 @@ class CppConv(ExportNodeCpp): # Path to the kernel(s) files to copy self.add_kernel_to_copy(ROOT / "kernels" / "convolution.hpp") - self.add_kernel_to_copy(ROOT / "kernels" / "macs.hpp", fwd_include=False) - self.add_kernel_to_copy(ROOT / "kernels" / "activation.hpp", fwd_include=False) - self.add_kernel_to_copy(ROOT / "kernels" / "rescaling.hpp", fwd_include=False) + self.add_kernel_to_copy(ROOT / "static" / "macs.hpp", "include/network", fwd_include=False) # Include aidge outputs within the fwd file if self.attributes["aidge_cmp"]: diff --git a/aidge_export_cpp/operators/CppElemWise.py b/aidge_export_cpp/operators/CppElemWise.py index 041bce9..e39fe15 100644 --- a/aidge_export_cpp/operators/CppElemWise.py +++ b/aidge_export_cpp/operators/CppElemWise.py @@ -72,8 +72,6 @@ class CppElemWise(ExportNodeCpp): # Path to the kernel(s) files to copy self.add_kernel_to_copy(ROOT / "kernels" / "elemwise.hpp") - self.add_kernel_to_copy(ROOT / "kernels" / "rescaling.hpp", fwd_include=False) - self.add_kernel_to_copy(ROOT / "kernels" / "activation.hpp", fwd_include=False) # Include aidge outputs within the fwd file if self.attributes["aidge_cmp"]: diff --git a/aidge_export_cpp/operators/CppFc.py b/aidge_export_cpp/operators/CppFc.py index 793d74f..9758b1a 100644 --- a/aidge_export_cpp/operators/CppFc.py +++ b/aidge_export_cpp/operators/CppFc.py @@ -48,9 +48,7 @@ class CppFc(ExportNodeCpp): # Path to the kernel(s) files to copy self.add_kernel_to_copy(ROOT / "kernels" / "fullyconnected.hpp") - self.add_kernel_to_copy(ROOT / "kernels" / "macs.hpp", fwd_include=False) - self.add_kernel_to_copy(ROOT / "kernels" / "activation.hpp", fwd_include=False) - self.add_kernel_to_copy(ROOT / "kernels" / "rescaling.hpp", fwd_include=False) + self.add_kernel_to_copy(ROOT / "static" / "macs.hpp", "include/network", fwd_include=False) # Include aidge outputs within the fwd file if self.attributes["aidge_cmp"]: diff --git a/aidge_export_cpp/operators/CppPool.py b/aidge_export_cpp/operators/CppPool.py index fa9aaec..54a4cbb 100644 --- a/aidge_export_cpp/operators/CppPool.py +++ b/aidge_export_cpp/operators/CppPool.py @@ -44,7 +44,6 @@ class CppPool(ExportNodeCpp): # Path to the kernel(s) files to copy self.add_kernel_to_copy(ROOT / "kernels" / "pooling.hpp") - self.add_kernel_to_copy(ROOT / "kernels" / "activation.hpp", fwd_include=False) # Include aidge outputs within the fwd file if self.attributes["aidge_cmp"]: diff --git a/aidge_export_cpp/operators/CppRescaling.py b/aidge_export_cpp/operators/CppRescaling.py index 815e5c2..96e395a 100644 --- a/aidge_export_cpp/operators/CppRescaling.py +++ b/aidge_export_cpp/operators/CppRescaling.py @@ -48,8 +48,7 @@ class CppRescaling(ExportNodeCpp): self.include_list = [] # Path to the kernel(s) files to copy - self.add_kernel_to_copy(ROOT / "kernels" / "rescaling.hpp", fwd_include=False) - self.add_kernel_to_copy(ROOT / "kernels" / "activation.hpp", fwd_include=False) + self.add_kernel_to_copy(ROOT / "kernels" / "rescaling.hpp") # # Include aidge outputs within the fwd file # if self.attributes["aidge_cmp"]: diff --git a/aidge_export_cpp/operators/CppSoftmax.py b/aidge_export_cpp/operators/CppSoftmax.py index 90bcacf..14c6728 100644 --- a/aidge_export_cpp/operators/CppSoftmax.py +++ b/aidge_export_cpp/operators/CppSoftmax.py @@ -46,7 +46,6 @@ class CppSoftmax(ExportNodeCpp): # Path to the kernel(s) files to copy self.add_kernel_to_copy(ROOT / "kernels" / "softmax.hpp") - self.add_kernel_to_copy(ROOT / "kernels" / "macs.hpp", fwd_include=False) # Include aidge outputs within the fwd file if self.attributes["aidge_cmp"]: diff --git a/aidge_export_cpp/static/activation_utils.hpp b/aidge_export_cpp/static/activation_utils.hpp new file mode 100644 index 0000000..c6a1bcd --- /dev/null +++ b/aidge_export_cpp/static/activation_utils.hpp @@ -0,0 +1,56 @@ +#pragma once + +#include <type_traits> +#include "network/typedefs.hpp" +#include "network/utils.hpp" +#include "network/rescaling_utils.hpp" + +template<typename Output_T, typename T, + typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr> +__attribute__((always_inline)) inline +Output_T saturate (T value, int32_t /*sat*/) +{ + return value; +} + +template<typename Output_T, typename T, + typename std::enable_if<!std::is_floating_point<T>::value>::type* = nullptr> +__attribute__((always_inline)) inline +Output_T saturate (T value, uint32_t sat) +{ + if (std::is_unsigned<Output_T>::value) { + return clamp(value, T(0), (T(1) << sat) - 1); + } else { + return clamp(value, -(T(1) << (sat - 1)), (T(1) << (sat - 1)) - 1); + } +} + +template<typename Output_T, + typename Sum_T, + typename Rescaling_T> +__attribute__((always_inline)) inline +Output_T activation_forward_value (Sum_T weightedSum, + int output, + ActivationFunction_T func, + const Rescaling_T& __restrict rescaling) +{ + switch(func) { + case Linear: + case Saturation: { + break; + } + case Rectifier: { + if(weightedSum <= 0) weightedSum = 0; + break; + } + default: + // Unsupported activation function + break; + } + + // Value fixed here for now but it should be generated by + // the export module or determined by the type of Output_T + // For now only works for int8_t and uint8_t + const uint32_t NB_BITS = 8; + return saturate<Output_T>(rescaling(weightedSum, output), NB_BITS); +} diff --git a/aidge_export_cpp/kernels/macs.hpp b/aidge_export_cpp/static/macs.hpp similarity index 100% rename from aidge_export_cpp/kernels/macs.hpp rename to aidge_export_cpp/static/macs.hpp diff --git a/aidge_export_cpp/static/rescaling_utils.hpp b/aidge_export_cpp/static/rescaling_utils.hpp new file mode 100644 index 0000000..4fdb321 --- /dev/null +++ b/aidge_export_cpp/static/rescaling_utils.hpp @@ -0,0 +1,78 @@ +#pragma once + +// --------------------------------------------------- +// ----------------- Saturate Utils ------------------ +// --------------------------------------------------- + +static int64_t toInt64(uint32_t lo, uint32_t hi) { + return (int64_t) (((uint64_t) hi) << 32ull) | ((uint64_t) lo); +} + +static int64_t smlal(int32_t lhs, int32_t rhs, + uint32_t accumLo, uint32_t accumHi) +{ + return ((int64_t) lhs) * ((int64_t) rhs) + toInt64(accumLo, accumHi); +} + +// --------------------------------------------------- +// --------------- Scaling by Shifting --------------- +// --------------------------------------------------- + +template<int SHIFT> +struct SingleShiftScaling { + + template<typename Sum_T> + Sum_T operator()(Sum_T weightedSum, size_t /*output*/) const + { + return (SHIFT != 0) ? ((weightedSum >> (SHIFT - 1)) + 1) >> 1 // Rounding + : weightedSum; + } + + // // Shift attribute + // static const int mShift = SHIFT; + // static const Scaling_T mScalingType = SingleShift; + + // // FP Attribute + // static const int32_t mScaling = 0; + // static const int64_t mFractionalBits = 0; + +}; + +// --------------------------------------------------- +// --------------- Fixed Point Scaling --------------- +// --------------------------------------------------- + +template<int64_t SHIFT, int32_t COEF> +struct FixedPointScaling { + + template<typename Sum_T> + Sum_T operator()(Sum_T weightedSum, size_t /*output*/) const + { + return smlal(weightedSum, COEF, HALF_LO, HALF_HI) >> SHIFT; + } + + // Attributes + static const uint32_t HALF_LO = (SHIFT > 0) + ? (1ull << (SHIFT - 1)) & 0xFFFFFFFF : 0; + static const uint32_t HALF_HI = (SHIFT > 0) + ? (1ull << (SHIFT - 1)) >> 32u : 0; + + // static const int32_t mScaling = SCALING; + // static const int64_t mFractionalBits = FRACTIONAL_BITS; + // static const Scaling_T mScalingType = FixedPoint; + // static const int mShift = 0; +}; + +// --------------------------------------------------- +// ------------------- No Scaling -------------------- +// --------------------------------------------------- + +struct NoScaling { + + template<typename Sum_T> + Sum_T operator()(Sum_T weightedSum, unsigned int /*output*/) const + { + return weightedSum; + } + +}; diff --git a/aidge_export_cpp/templates/configuration/activation_config.jinja b/aidge_export_cpp/templates/configuration/activation_config.jinja index df55575..84b122b 100644 --- a/aidge_export_cpp/templates/configuration/activation_config.jinja +++ b/aidge_export_cpp/templates/configuration/activation_config.jinja @@ -1,7 +1,7 @@ {#- For name header -#} #ifndef {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H -#include "kernels/rescaling.hpp" +#include "network/rescaling_utils.hpp" {# For layer configuration -#} {%- set nb_data = in_chan[0] * in_height[0] * in_width[0] %} diff --git a/aidge_export_cpp/templates/configuration/batchnorm_config.jinja b/aidge_export_cpp/templates/configuration/batchnorm_config.jinja index 751c55f..0c0bc49 100644 --- a/aidge_export_cpp/templates/configuration/batchnorm_config.jinja +++ b/aidge_export_cpp/templates/configuration/batchnorm_config.jinja @@ -1,7 +1,7 @@ {#- For name header -#} #ifndef {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H -#include "kernels/rescaling.hpp" +#include "network/rescaling_utils.hpp" {# For layer configuration -#} {% include "./_def_io.jinja" %} diff --git a/aidge_export_cpp/templates/configuration/convolution_config.jinja b/aidge_export_cpp/templates/configuration/convolution_config.jinja index d29f9e3..f1a57db 100644 --- a/aidge_export_cpp/templates/configuration/convolution_config.jinja +++ b/aidge_export_cpp/templates/configuration/convolution_config.jinja @@ -1,7 +1,7 @@ {#- For name header -#} #ifndef {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H -#include "kernels/rescaling.hpp" +#include "network/rescaling_utils.hpp" {# For layer configuration -#} {% include "./_def_io.jinja" %} {% include "./_meminfo.jinja" %} diff --git a/aidge_export_cpp/templates/configuration/elemwise_config.jinja b/aidge_export_cpp/templates/configuration/elemwise_config.jinja index 41c9c3f..f839602 100644 --- a/aidge_export_cpp/templates/configuration/elemwise_config.jinja +++ b/aidge_export_cpp/templates/configuration/elemwise_config.jinja @@ -1,7 +1,7 @@ {#- For name header -#} #ifndef {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H -#include "kernels/rescaling.hpp" +#include "network/rescaling_utils.hpp" {% include "./_def_io.jinja" %} {% include "./_meminfo.jinja" %} diff --git a/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja b/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja index c14a6d3..856d727 100644 --- a/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja +++ b/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja @@ -1,7 +1,7 @@ {#- For name header -#} #ifndef {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H -#include "kernels/rescaling.hpp" +#include "network/rescaling_utils.hpp" {# For layer configuration -#} {% include "./_def_io.jinja" %} {% include "./_meminfo.jinja" %} -- GitLab