diff --git a/aidge_export_cpp/export_registry.py b/aidge_export_cpp/export_registry.py index d3fcd9a734acaeae8b6d6d7171c3ce1b3e41cc54..ee54890a758cdd9a7de8bc25f3f403a75fe72c12 100644 --- a/aidge_export_cpp/export_registry.py +++ b/aidge_export_cpp/export_registry.py @@ -7,4 +7,6 @@ class ExportLibCpp(ExportLib): str(ROOT / "static" / "Makefile"): "", str(ROOT / "static" / "typedefs.hpp"): "dnn/include/network", str(ROOT / "static" / "utils.hpp"): "dnn/include/network", + str(ROOT / "static" / "rescaling_utils.hpp"): "dnn/include/network", + str(ROOT / "static" / "activation_utils.hpp"): "dnn/include/network", } diff --git a/aidge_export_cpp/kernels/activation.hpp b/aidge_export_cpp/kernels/activation.hpp index d6695159255e4c2c12ced879a90cbe6b01dae0eb..ee80ed275ab9edf574dee6e7d32276f00ba92412 100644 --- a/aidge_export_cpp/kernels/activation.hpp +++ b/aidge_export_cpp/kernels/activation.hpp @@ -1,61 +1,8 @@ #ifndef __AIDGE_EXPORT_CPP_KERNELS_ACTIVATION__ #define __AIDGE_EXPORT_CPP_KERNELS_ACTIVATION__ -#include <type_traits> -#include "network/typedefs.hpp" -#include "network/utils.hpp" -#include "kernels/rescaling.hpp" - -template<typename Output_T, typename T, - typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr> -__attribute__((always_inline)) inline -Output_T saturate (T value, int32_t /*sat*/) -{ - return value; -} - -template<typename Output_T, typename T, - typename std::enable_if<!std::is_floating_point<T>::value>::type* = nullptr> -__attribute__((always_inline)) inline -Output_T saturate (T value, uint32_t sat) -{ - if (std::is_unsigned<Output_T>::value) { - return clamp(value, T(0), (T(1) << sat) - 1); - } else { - return clamp(value, -(T(1) << (sat - 1)), (T(1) << (sat - 1)) - 1); - } -} - -template<typename Output_T, - typename Sum_T, - typename Rescaling_T> -__attribute__((always_inline)) inline -Output_T activation_forward_value (Sum_T weightedSum, - int output, - ActivationFunction_T func, - const Rescaling_T& __restrict rescaling) -{ - switch(func) { - case Linear: - case Saturation: { - break; - } - case Rectifier: { - if(weightedSum <= 0) weightedSum = 0; - break; - } - default: - // Unsupported activation function - break; - } - - // Value fixed here for now but it should be generated by - // the export module or determined by the type of Output_T - // For now only works for int8_t and uint8_t - const uint32_t NB_BITS = 8; - return saturate<Output_T>(rescaling(weightedSum, output), NB_BITS); -} - +#include "network/activation_utils.hpp" +#include "network/rescaling_utils.hpp" template<int NB_DATA, ActivationFunction_T ACTIVATION, diff --git a/aidge_export_cpp/kernels/batchnorm.hpp b/aidge_export_cpp/kernels/batchnorm.hpp index f05a047511e12f895ef88be0e402b89e5197432b..27866ab923eb8a519e684030cfb63f894c15ec98 100644 --- a/aidge_export_cpp/kernels/batchnorm.hpp +++ b/aidge_export_cpp/kernels/batchnorm.hpp @@ -2,7 +2,7 @@ #define __AIDGE_EXPORT_CPP_KERNELS_BATCHNORM__ #include "network/typedefs.hpp" -#include "kernels/activation.hpp" +#include "network/activation_utils.hpp" #include <math.h> diff --git a/aidge_export_cpp/kernels/convolution.hpp b/aidge_export_cpp/kernels/convolution.hpp index 5855654b39d5d7faf09e81735fbe80fa248ace94..0648d80f2b891c9b10cc6653649221974379db55 100644 --- a/aidge_export_cpp/kernels/convolution.hpp +++ b/aidge_export_cpp/kernels/convolution.hpp @@ -2,10 +2,10 @@ #define __AIDGE_EXPORT_CPP_KERNELS_CONVOLUTION__ #include "network/typedefs.hpp" -#include "kernels/rescaling.hpp" +#include "network/rescaling_utils.hpp" #include "network/utils.hpp" -#include "kernels/macs.hpp" -#include "kernels/activation.hpp" +#include "network/macs.hpp" +#include "network/activation_utils.hpp" template<int NB_CHANNELS, diff --git a/aidge_export_cpp/kernels/elemwise.hpp b/aidge_export_cpp/kernels/elemwise.hpp index 67ee574c1cb7d197f3c976ce80a2a63d36aec873..9468b33f6b9785f36f511b14daffe9cc4a0ed420 100644 --- a/aidge_export_cpp/kernels/elemwise.hpp +++ b/aidge_export_cpp/kernels/elemwise.hpp @@ -2,7 +2,7 @@ #define __AIDGE_EXPORT_CPP_KERNELS_ELEMWISE__ #include "network/typedefs.hpp" -#include "kernels/activation.hpp" +#include "network/activation_utils.hpp" // Generic function for two inputs diff --git a/aidge_export_cpp/kernels/fullyconnected.hpp b/aidge_export_cpp/kernels/fullyconnected.hpp index 60805e7b90fa29ba00c6736bb8771985aeca19b4..abaab59c355263a79c905ffeb8a2a72b6e976445 100644 --- a/aidge_export_cpp/kernels/fullyconnected.hpp +++ b/aidge_export_cpp/kernels/fullyconnected.hpp @@ -2,10 +2,10 @@ #define __AIDGE_EXPORT_CPP_KERNELS_FULLYCONNECTED__ #include "network/typedefs.hpp" -#include "kernels/rescaling.hpp" +#include "network/rescaling_utils.hpp" #include "network/utils.hpp" -#include "kernels/macs.hpp" -#include "kernels/activation.hpp" +#include "network/macs.hpp" +#include "network/activation_utils.hpp" template<int NB_CHANNELS, int CHANNELS_HEIGHT, int CHANNELS_WIDTH, diff --git a/aidge_export_cpp/kernels/matmul.hpp b/aidge_export_cpp/kernels/matmul.hpp index 4500993e02cf42fb698bc9004462800bdd3f7dc4..b507c4f1e37065a620a0ac37ed370cfa6847487d 100644 --- a/aidge_export_cpp/kernels/matmul.hpp +++ b/aidge_export_cpp/kernels/matmul.hpp @@ -2,7 +2,7 @@ #define __AIDGE_EXPORT_CPP_KERNELS_MATMUL__ #include "network/typedefs.hpp" -#include "kernels/activation.hpp" +#include "network/activation_utils.hpp" // Generic function for matmul and activation diff --git a/aidge_export_cpp/kernels/rescaling.hpp b/aidge_export_cpp/kernels/rescaling.hpp index 117a0cd611456eb9009ea37a135b45e5095e09df..a831fa8730dfa45384c6f251d7fe079caa015ce6 100644 --- a/aidge_export_cpp/kernels/rescaling.hpp +++ b/aidge_export_cpp/kernels/rescaling.hpp @@ -1,8 +1,8 @@ #ifndef __AIDGE_EXPORT_CPP_NETWORK_RESCALING__ #define __AIDGE_EXPORT_CPP_NETWORK_RESCALING__ -#include "kernels/activation.hpp" - +#include "network/rescaling_utils.hpp" +#include "network/activation_utils.hpp" template<int NB_DATA, ActivationFunction_T ACTIVATION, @@ -23,83 +23,4 @@ void rescaling_forward ( } } - -// --------------------------------------------------- -// ----------------- Saturate Utils ------------------ -// --------------------------------------------------- - -static int64_t toInt64(uint32_t lo, uint32_t hi) { - return (int64_t) (((uint64_t) hi) << 32ull) | ((uint64_t) lo); -} - -static int64_t smlal(int32_t lhs, int32_t rhs, - uint32_t accumLo, uint32_t accumHi) -{ - return ((int64_t) lhs) * ((int64_t) rhs) + toInt64(accumLo, accumHi); -} - -// --------------------------------------------------- -// --------------- Scaling by Shifting --------------- -// --------------------------------------------------- - -template<int SHIFT> -struct SingleShiftScaling { - - template<typename Sum_T> - Sum_T operator()(Sum_T weightedSum, size_t /*output*/) const - { - return (SHIFT != 0) ? ((weightedSum >> (SHIFT - 1)) + 1) >> 1 // Rounding - : weightedSum; - } - - // // Shift attribute - // static const int mShift = SHIFT; - // static const Scaling_T mScalingType = SingleShift; - - // // FP Attribute - // static const int32_t mScaling = 0; - // static const int64_t mFractionalBits = 0; - -}; - -// --------------------------------------------------- -// --------------- Fixed Point Scaling --------------- -// --------------------------------------------------- - -template<int64_t SHIFT, int32_t COEF> -struct FixedPointScaling { - - template<typename Sum_T> - Sum_T operator()(Sum_T weightedSum, size_t /*output*/) const - { - return smlal(weightedSum, COEF, HALF_LO, HALF_HI) >> SHIFT; - } - - // Attributes - static const uint32_t HALF_LO = (SHIFT > 0) - ? (1ull << (SHIFT - 1)) & 0xFFFFFFFF : 0; - static const uint32_t HALF_HI = (SHIFT > 0) - ? (1ull << (SHIFT - 1)) >> 32u : 0; - - // static const int32_t mScaling = SCALING; - // static const int64_t mFractionalBits = FRACTIONAL_BITS; - // static const Scaling_T mScalingType = FixedPoint; - // static const int mShift = 0; -}; - -// --------------------------------------------------- -// ------------------- No Scaling -------------------- -// --------------------------------------------------- - -struct NoScaling { - - template<typename Sum_T> - Sum_T operator()(Sum_T weightedSum, unsigned int /*output*/) const - { - return weightedSum; - } - -}; - - #endif // __AIDGE_EXPORT_CPP_NETWORK_RESCALING__ diff --git a/aidge_export_cpp/kernels/softmax.hpp b/aidge_export_cpp/kernels/softmax.hpp index f5472cf6d807bc2f547e58616943f6e72dccd80e..d29e9b42cba35287c71d32f211550a51b784aa12 100644 --- a/aidge_export_cpp/kernels/softmax.hpp +++ b/aidge_export_cpp/kernels/softmax.hpp @@ -3,7 +3,6 @@ #include "network/typedefs.hpp" #include "network/utils.hpp" -#include "kernels/macs.hpp" #include <type_traits> #include <cmath> diff --git a/aidge_export_cpp/operators/CppActivation.py b/aidge_export_cpp/operators/CppActivation.py index f3ffc8ed8b803113481a873976c706c89fd2d377..b8c936731651dcc239acc968288833d90395485e 100644 --- a/aidge_export_cpp/operators/CppActivation.py +++ b/aidge_export_cpp/operators/CppActivation.py @@ -50,7 +50,6 @@ class CppActivation(ExportNodeCpp): # Path to the kernel(s) files to copy self.add_kernel_to_copy(ROOT / "kernels" / "activation.hpp") - self.add_kernel_to_copy(ROOT / "kernels" / "rescaling.hpp", fwd_include=False) # Include aidge outputs within the fwd file if self.attributes["aidge_cmp"]: diff --git a/aidge_export_cpp/operators/CppBatchNorm.py b/aidge_export_cpp/operators/CppBatchNorm.py index 285a64c811f5bf940c27bef4898cd97f3e8e9fc6..091dc76c248fd733bdaf8f51754e375182927bf6 100644 --- a/aidge_export_cpp/operators/CppBatchNorm.py +++ b/aidge_export_cpp/operators/CppBatchNorm.py @@ -25,9 +25,7 @@ class CppBatchNorm(ExportNodeCpp): # Path to the kernel(s) files to copy self.add_kernel_to_copy(ROOT / "kernels" / "batchnorm.hpp") - self.add_kernel_to_copy(ROOT / "kernels" / "macs.hpp", fwd_include=False) - self.add_kernel_to_copy(ROOT / "kernels" / "activation.hpp", fwd_include=False) - self.add_kernel_to_copy(ROOT / "kernels" / "rescaling.hpp", fwd_include=False) + self.add_kernel_to_copy(ROOT / "static" / "macs.hpp", "include/network", fwd_include=False) # Include aidge outputs within the fwd file if self.attributes["aidge_cmp"]: diff --git a/aidge_export_cpp/operators/CppConv.py b/aidge_export_cpp/operators/CppConv.py index 5c39fe6016f0101b417d79df167afbcfba3ff785..40b75777c3d49e9051a5a80498aa04171f2560a1 100644 --- a/aidge_export_cpp/operators/CppConv.py +++ b/aidge_export_cpp/operators/CppConv.py @@ -50,9 +50,7 @@ class CppConv(ExportNodeCpp): # Path to the kernel(s) files to copy self.add_kernel_to_copy(ROOT / "kernels" / "convolution.hpp") - self.add_kernel_to_copy(ROOT / "kernels" / "macs.hpp", fwd_include=False) - self.add_kernel_to_copy(ROOT / "kernels" / "activation.hpp", fwd_include=False) - self.add_kernel_to_copy(ROOT / "kernels" / "rescaling.hpp", fwd_include=False) + self.add_kernel_to_copy(ROOT / "static" / "macs.hpp", "include/network", fwd_include=False) # Include aidge outputs within the fwd file if self.attributes["aidge_cmp"]: diff --git a/aidge_export_cpp/operators/CppElemWise.py b/aidge_export_cpp/operators/CppElemWise.py index 041bce9aa194c4843e53d38645aae4878ef638e5..e39fe1524c67b6dc814d93288fd0b92cdca0893f 100644 --- a/aidge_export_cpp/operators/CppElemWise.py +++ b/aidge_export_cpp/operators/CppElemWise.py @@ -72,8 +72,6 @@ class CppElemWise(ExportNodeCpp): # Path to the kernel(s) files to copy self.add_kernel_to_copy(ROOT / "kernels" / "elemwise.hpp") - self.add_kernel_to_copy(ROOT / "kernels" / "rescaling.hpp", fwd_include=False) - self.add_kernel_to_copy(ROOT / "kernels" / "activation.hpp", fwd_include=False) # Include aidge outputs within the fwd file if self.attributes["aidge_cmp"]: diff --git a/aidge_export_cpp/operators/CppFc.py b/aidge_export_cpp/operators/CppFc.py index 793d74f983ffe09dc77d841f72aae294e95bf07a..9758b1aa7d94ef5b0e07cf6bbd8007909a0b6507 100644 --- a/aidge_export_cpp/operators/CppFc.py +++ b/aidge_export_cpp/operators/CppFc.py @@ -48,9 +48,7 @@ class CppFc(ExportNodeCpp): # Path to the kernel(s) files to copy self.add_kernel_to_copy(ROOT / "kernels" / "fullyconnected.hpp") - self.add_kernel_to_copy(ROOT / "kernels" / "macs.hpp", fwd_include=False) - self.add_kernel_to_copy(ROOT / "kernels" / "activation.hpp", fwd_include=False) - self.add_kernel_to_copy(ROOT / "kernels" / "rescaling.hpp", fwd_include=False) + self.add_kernel_to_copy(ROOT / "static" / "macs.hpp", "include/network", fwd_include=False) # Include aidge outputs within the fwd file if self.attributes["aidge_cmp"]: diff --git a/aidge_export_cpp/operators/CppPool.py b/aidge_export_cpp/operators/CppPool.py index fa9aaeca8eac86ca1f313bfe286e5d205a57a439..54a4cbbe7c7d03a2abd4cd3a71073710b2495915 100644 --- a/aidge_export_cpp/operators/CppPool.py +++ b/aidge_export_cpp/operators/CppPool.py @@ -44,7 +44,6 @@ class CppPool(ExportNodeCpp): # Path to the kernel(s) files to copy self.add_kernel_to_copy(ROOT / "kernels" / "pooling.hpp") - self.add_kernel_to_copy(ROOT / "kernels" / "activation.hpp", fwd_include=False) # Include aidge outputs within the fwd file if self.attributes["aidge_cmp"]: diff --git a/aidge_export_cpp/operators/CppRescaling.py b/aidge_export_cpp/operators/CppRescaling.py index 815e5c2f862e18d5ef8a5cc221fc1c99b98721b2..96e395ac504387258a1436c7fb749408766ae173 100644 --- a/aidge_export_cpp/operators/CppRescaling.py +++ b/aidge_export_cpp/operators/CppRescaling.py @@ -48,8 +48,7 @@ class CppRescaling(ExportNodeCpp): self.include_list = [] # Path to the kernel(s) files to copy - self.add_kernel_to_copy(ROOT / "kernels" / "rescaling.hpp", fwd_include=False) - self.add_kernel_to_copy(ROOT / "kernels" / "activation.hpp", fwd_include=False) + self.add_kernel_to_copy(ROOT / "kernels" / "rescaling.hpp") # # Include aidge outputs within the fwd file # if self.attributes["aidge_cmp"]: diff --git a/aidge_export_cpp/operators/CppSoftmax.py b/aidge_export_cpp/operators/CppSoftmax.py index 90bcacf18e5332a8f67d7f9742e89444170fb852..14c6728e2b1e8fa5bbfadb8da9ba2955c7de8784 100644 --- a/aidge_export_cpp/operators/CppSoftmax.py +++ b/aidge_export_cpp/operators/CppSoftmax.py @@ -46,7 +46,6 @@ class CppSoftmax(ExportNodeCpp): # Path to the kernel(s) files to copy self.add_kernel_to_copy(ROOT / "kernels" / "softmax.hpp") - self.add_kernel_to_copy(ROOT / "kernels" / "macs.hpp", fwd_include=False) # Include aidge outputs within the fwd file if self.attributes["aidge_cmp"]: diff --git a/aidge_export_cpp/static/activation_utils.hpp b/aidge_export_cpp/static/activation_utils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..c6a1bcdc0ce289a384519673ca04a001a5ca9692 --- /dev/null +++ b/aidge_export_cpp/static/activation_utils.hpp @@ -0,0 +1,56 @@ +#pragma once + +#include <type_traits> +#include "network/typedefs.hpp" +#include "network/utils.hpp" +#include "network/rescaling_utils.hpp" + +template<typename Output_T, typename T, + typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr> +__attribute__((always_inline)) inline +Output_T saturate (T value, int32_t /*sat*/) +{ + return value; +} + +template<typename Output_T, typename T, + typename std::enable_if<!std::is_floating_point<T>::value>::type* = nullptr> +__attribute__((always_inline)) inline +Output_T saturate (T value, uint32_t sat) +{ + if (std::is_unsigned<Output_T>::value) { + return clamp(value, T(0), (T(1) << sat) - 1); + } else { + return clamp(value, -(T(1) << (sat - 1)), (T(1) << (sat - 1)) - 1); + } +} + +template<typename Output_T, + typename Sum_T, + typename Rescaling_T> +__attribute__((always_inline)) inline +Output_T activation_forward_value (Sum_T weightedSum, + int output, + ActivationFunction_T func, + const Rescaling_T& __restrict rescaling) +{ + switch(func) { + case Linear: + case Saturation: { + break; + } + case Rectifier: { + if(weightedSum <= 0) weightedSum = 0; + break; + } + default: + // Unsupported activation function + break; + } + + // Value fixed here for now but it should be generated by + // the export module or determined by the type of Output_T + // For now only works for int8_t and uint8_t + const uint32_t NB_BITS = 8; + return saturate<Output_T>(rescaling(weightedSum, output), NB_BITS); +} diff --git a/aidge_export_cpp/kernels/macs.hpp b/aidge_export_cpp/static/macs.hpp similarity index 100% rename from aidge_export_cpp/kernels/macs.hpp rename to aidge_export_cpp/static/macs.hpp diff --git a/aidge_export_cpp/static/rescaling_utils.hpp b/aidge_export_cpp/static/rescaling_utils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..4fdb321820f92f8d33e474aabc4665a99cb0d4b0 --- /dev/null +++ b/aidge_export_cpp/static/rescaling_utils.hpp @@ -0,0 +1,78 @@ +#pragma once + +// --------------------------------------------------- +// ----------------- Saturate Utils ------------------ +// --------------------------------------------------- + +static int64_t toInt64(uint32_t lo, uint32_t hi) { + return (int64_t) (((uint64_t) hi) << 32ull) | ((uint64_t) lo); +} + +static int64_t smlal(int32_t lhs, int32_t rhs, + uint32_t accumLo, uint32_t accumHi) +{ + return ((int64_t) lhs) * ((int64_t) rhs) + toInt64(accumLo, accumHi); +} + +// --------------------------------------------------- +// --------------- Scaling by Shifting --------------- +// --------------------------------------------------- + +template<int SHIFT> +struct SingleShiftScaling { + + template<typename Sum_T> + Sum_T operator()(Sum_T weightedSum, size_t /*output*/) const + { + return (SHIFT != 0) ? ((weightedSum >> (SHIFT - 1)) + 1) >> 1 // Rounding + : weightedSum; + } + + // // Shift attribute + // static const int mShift = SHIFT; + // static const Scaling_T mScalingType = SingleShift; + + // // FP Attribute + // static const int32_t mScaling = 0; + // static const int64_t mFractionalBits = 0; + +}; + +// --------------------------------------------------- +// --------------- Fixed Point Scaling --------------- +// --------------------------------------------------- + +template<int64_t SHIFT, int32_t COEF> +struct FixedPointScaling { + + template<typename Sum_T> + Sum_T operator()(Sum_T weightedSum, size_t /*output*/) const + { + return smlal(weightedSum, COEF, HALF_LO, HALF_HI) >> SHIFT; + } + + // Attributes + static const uint32_t HALF_LO = (SHIFT > 0) + ? (1ull << (SHIFT - 1)) & 0xFFFFFFFF : 0; + static const uint32_t HALF_HI = (SHIFT > 0) + ? (1ull << (SHIFT - 1)) >> 32u : 0; + + // static const int32_t mScaling = SCALING; + // static const int64_t mFractionalBits = FRACTIONAL_BITS; + // static const Scaling_T mScalingType = FixedPoint; + // static const int mShift = 0; +}; + +// --------------------------------------------------- +// ------------------- No Scaling -------------------- +// --------------------------------------------------- + +struct NoScaling { + + template<typename Sum_T> + Sum_T operator()(Sum_T weightedSum, unsigned int /*output*/) const + { + return weightedSum; + } + +}; diff --git a/aidge_export_cpp/templates/configuration/activation_config.jinja b/aidge_export_cpp/templates/configuration/activation_config.jinja index df55575f2c7be140b416f01208763e9cfa7988fb..84b122ba5207f0022d72f35bb4f8e7064bf7fe32 100644 --- a/aidge_export_cpp/templates/configuration/activation_config.jinja +++ b/aidge_export_cpp/templates/configuration/activation_config.jinja @@ -1,7 +1,7 @@ {#- For name header -#} #ifndef {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H -#include "kernels/rescaling.hpp" +#include "network/rescaling_utils.hpp" {# For layer configuration -#} {%- set nb_data = in_chan[0] * in_height[0] * in_width[0] %} diff --git a/aidge_export_cpp/templates/configuration/batchnorm_config.jinja b/aidge_export_cpp/templates/configuration/batchnorm_config.jinja index 751c55fcc8e390806ca5151caf7a3c6af5fe9da8..0c0bc49b521556eee1a4e455486caae44a2b86cb 100644 --- a/aidge_export_cpp/templates/configuration/batchnorm_config.jinja +++ b/aidge_export_cpp/templates/configuration/batchnorm_config.jinja @@ -1,7 +1,7 @@ {#- For name header -#} #ifndef {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H -#include "kernels/rescaling.hpp" +#include "network/rescaling_utils.hpp" {# For layer configuration -#} {% include "./_def_io.jinja" %} diff --git a/aidge_export_cpp/templates/configuration/convolution_config.jinja b/aidge_export_cpp/templates/configuration/convolution_config.jinja index d29f9e32ec7e47101085adfc0b42bbcb0537f03f..f1a57db1b7511d270c3ab7d62a87008735a12df3 100644 --- a/aidge_export_cpp/templates/configuration/convolution_config.jinja +++ b/aidge_export_cpp/templates/configuration/convolution_config.jinja @@ -1,7 +1,7 @@ {#- For name header -#} #ifndef {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H -#include "kernels/rescaling.hpp" +#include "network/rescaling_utils.hpp" {# For layer configuration -#} {% include "./_def_io.jinja" %} {% include "./_meminfo.jinja" %} diff --git a/aidge_export_cpp/templates/configuration/elemwise_config.jinja b/aidge_export_cpp/templates/configuration/elemwise_config.jinja index 41c9c3fd981a9bbcbd3b4809472d08a9c406f1d6..f839602fff707bc4dc30b11835846c977130cab4 100644 --- a/aidge_export_cpp/templates/configuration/elemwise_config.jinja +++ b/aidge_export_cpp/templates/configuration/elemwise_config.jinja @@ -1,7 +1,7 @@ {#- For name header -#} #ifndef {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H -#include "kernels/rescaling.hpp" +#include "network/rescaling_utils.hpp" {% include "./_def_io.jinja" %} {% include "./_meminfo.jinja" %} diff --git a/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja b/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja index c14a6d3f5f6b15b9b23cb99a12039c2026b10b26..856d727abc11ceb6f914e9d71d286ef5882322d6 100644 --- a/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja +++ b/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja @@ -1,7 +1,7 @@ {#- For name header -#} #ifndef {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H -#include "kernels/rescaling.hpp" +#include "network/rescaling_utils.hpp" {# For layer configuration -#} {% include "./_def_io.jinja" %} {% include "./_meminfo.jinja" %}