Skip to content
Snippets Groups Projects
Commit ab8cc839 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Added Conv 4-bits testing

parent 96880761
No related branches found
No related tags found
2 merge requests!94Draft: [Feat] Support dequantization of an exported model,!69Add support for lower than 8-bits precision
Pipeline #81103 failed
Showing
with 169 additions and 116 deletions
#ifndef __AIDGE_EXPORT_CPP_RESCALING_UTILS_HPP__ #ifndef __AIDGE_EXPORT_CPP_RESCALING_UTILS_HPP__
#define __AIDGE_EXPORT_CPP_RESCALING_UTILS_HPP__ #define __AIDGE_EXPORT_CPP_RESCALING_UTILS_HPP__
#include <cstdint>
#include <cstddef>
// --------------------------------------------------- // ---------------------------------------------------
// ----------------- Saturate Utils ------------------ // ----------------- Saturate Utils ------------------
// --------------------------------------------------- // ---------------------------------------------------
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <type_traits> #include <type_traits>
#include <limits> #include <limits>
#include <cstdint> #include <cstdint>
#include <cstddef>
typedef enum { typedef enum {
Tanh, Tanh,
...@@ -125,6 +126,44 @@ constexpr void pack_rev_set(T& data, int i, decltype(data.fields.op0) val) { ...@@ -125,6 +126,44 @@ constexpr void pack_rev_set(T& data, int i, decltype(data.fields.op0) val) {
} }
} }
// ----------------------------------------------------------------------------
// -------------- Custom bit-width types operator overloading -----------------
// ----------------------------------------------------------------------------
template <std::size_t N_BITS, bool SIGNED, typename T>
constexpr packed_bitint<1, N_BITS, SIGNED>& operator+=(packed_bitint<1, N_BITS, SIGNED>& d, T rhs)
{d.value += static_cast<decltype(d.value)>(rhs); return d;}
template <std::size_t N_BITS, bool SIGNED, typename T>
constexpr packed_bitint<1, N_BITS, SIGNED> operator+(packed_bitint<1, N_BITS, SIGNED> d, T rhs)
{d += rhs; return d;}
template <std::size_t N_BITS, bool SIGNED, typename T>
constexpr packed_bitint<1, N_BITS, SIGNED>& operator-=(packed_bitint<1, N_BITS, SIGNED>& d, T rhs)
{d.value -= static_cast<decltype(d.value)>(rhs); return d;}
template <std::size_t N_BITS, bool SIGNED, typename T>
constexpr packed_bitint<1, N_BITS, SIGNED> operator-(packed_bitint<1, N_BITS, SIGNED> d, T rhs)
{d -= rhs; return d;}
template <std::size_t N_BITS, bool SIGNED, typename T>
constexpr packed_bitint<1, N_BITS, SIGNED>& operator*=(packed_bitint<1, N_BITS, SIGNED>& d, T rhs)
{d.value *= static_cast<decltype(d.value)>(rhs); return d;}
template <std::size_t N_BITS, bool SIGNED, typename T>
constexpr packed_bitint<1, N_BITS, SIGNED> operator*(packed_bitint<1, N_BITS, SIGNED> d, T rhs)
{d *= rhs; return d;}
template <std::size_t N_BITS, bool SIGNED, typename T>
constexpr packed_bitint<1, N_BITS, SIGNED>& operator/=(packed_bitint<1, N_BITS, SIGNED>& d, T rhs)
{d.value /= static_cast<decltype(d.value)>(rhs); return d;}
template <std::size_t N_BITS, bool SIGNED, typename T>
constexpr packed_bitint<1, N_BITS, SIGNED> operator/(packed_bitint<1, N_BITS, SIGNED> d, T rhs)
{d /= rhs; return d;}
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// ---------------- Custom bit-width types specializations -------------------- // ---------------- Custom bit-width types specializations --------------------
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
......
{#- For name header -#} {#- For name header -#}
#ifndef {{ name|upper }}_LAYER_H #ifndef {{ name|upper }}_LAYER_H
#define {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H
#include "network/rescaling_utils.hpp" #include "network/rescaling_utils.hpp"
{# For layer configuration -#} {# For layer configuration -#}
......
{#- For name header -#} {#- For name header -#}
#ifndef {{ name|upper }}_LAYER_H #ifndef {{ name|upper }}_LAYER_H
#define {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H
#include "network/rescaling_utils.hpp" #include "network/rescaling_utils.hpp"
{# For layer configuration -#} {# For layer configuration -#}
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
#ifndef {{ name|upper }}_LAYER_H #ifndef {{ name|upper }}_LAYER_H
#define {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H
#include <cstddef>
{% include "./_def_io.jinja" %} {% include "./_def_io.jinja" %}
{% include "./_meminfo.jinja" %} {% include "./_meminfo.jinja" %}
......
{#- For name header -#} {#- For name header -#}
#ifndef {{ name|upper }}_LAYER_H #ifndef {{ name|upper }}_LAYER_H
#define {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H
#include "network/rescaling_utils.hpp" #include "network/rescaling_utils.hpp"
{# For layer configuration -#} {# For layer configuration -#}
{% include "./_def_io.jinja" %} {% include "./_def_io.jinja" %}
{% include "./_meminfo.jinja" %} {% include "./_meminfo.jinja" %}
......
{#- For name header -#} {#- For name header -#}
#ifndef {{ name|upper }}_LAYER_H #ifndef {{ name|upper }}_LAYER_H
#define {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H
#include <cstddef>
#include "network/rescaling_utils.hpp" #include "network/rescaling_utils.hpp"
{% include "./_def_io.jinja" %} {% include "./_def_io.jinja" %}
......
{#- For name header -#} {#- For name header -#}
#ifndef {{ name|upper }}_LAYER_H #ifndef {{ name|upper }}_LAYER_H
#define {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H
#include "network/rescaling_utils.hpp" #include "network/rescaling_utils.hpp"
{# For layer configuration -#} {# For layer configuration -#}
{% include "./_def_io.jinja" %} {% include "./_def_io.jinja" %}
{% include "./_meminfo.jinja" %} {% include "./_meminfo.jinja" %}
......
{#- For name header -#} {#- For name header -#}
#ifndef {{ name|upper }}_LAYER_H #ifndef {{ name|upper }}_LAYER_H
#define {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H
#include <cstddef>
#include "network/rescaling_utils.hpp" #include "network/rescaling_utils.hpp"
{# For layer configuration -#} {# For layer configuration -#}
......
{#- For name header -#} {#- For name header -#}
#ifndef {{ name|upper }}_LAYER_H #ifndef {{ name|upper }}_LAYER_H
#define {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H
#include <cstddef>
{# For layer configuration -#} {# For layer configuration -#}
{% include "./_def_io.jinja" %} {% include "./_def_io.jinja" %}
{% include "./_meminfo.jinja" %} {% include "./_meminfo.jinja" %}
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
#ifndef {{ name|upper }}_LAYER_H #ifndef {{ name|upper }}_LAYER_H
#define {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H
#include <cstddef>
{% include "./_def_io.jinja" %} {% include "./_def_io.jinja" %}
{% include "./_meminfo.jinja" %} {% include "./_meminfo.jinja" %}
{# For layer configuration -#} {# For layer configuration -#}
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
#ifndef {{ name|upper }}_LAYER_H #ifndef {{ name|upper }}_LAYER_H
#define {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H
#include <cstddef>
{# For layer configuration -#} {# For layer configuration -#}
{% include "./_def_io.jinja" %} {% include "./_def_io.jinja" %}
{% include "./_meminfo.jinja" %} {% include "./_meminfo.jinja" %}
......
{#- For libraries #} {#- For libraries #}
#include <stdint.h> #include <stdint.h>
#include "network/typedefs.hpp"
{#- Design dimensions of the array #} {#- Design dimensions of the array #}
{%- set dims_str = "" %} {%- set dims_str = "" %}
......
{#- For libraries -#} {#- For libraries -#}
#include <stdint.h> #include <stdint.h>
#include "network/typedefs.hpp"
{%- set format_map = {
"int8_t": "%4d",
"int16_t": "%6d",
"int32_t": "%6d",
"int64_t": "%8d",
"uint8_t": "%4d",
"uint16_t": "%6d",
"uint32_t": "%6d",
"uint64_t": "%8d",
"float": "%.9f",
"half_float::half": "%.9f",
"double": "%.17f"
} %}
{# Design header of the array -#} {# Design header of the array -#}
static const {{ dtype }} {{ name }}[{{ dims | join("*") }}] __attribute__((section(".nn_data"))) = static const {{ dtype }} {{ name }}[{{ dims | join("*") }}] __attribute__((section(".nn_data"))) =
...@@ -21,7 +8,7 @@ static const {{ dtype }} {{ name }}[{{ dims | join("*") }}] __attribute__((secti ...@@ -21,7 +8,7 @@ static const {{ dtype }} {{ name }}[{{ dims | join("*") }}] __attribute__((secti
{# 1D #} {# 1D #}
{%- if dims | length == 1 -%} {%- if dims | length == 1 -%}
{%- for x in range(dims[0]) -%} {%- for x in range(dims[0]) -%}
{{ format_map[dtype] | format(values[x]) }}, {{ "{:>20}".format(values[x]) }},
{%- endfor -%} {%- endfor -%}
{%- endif -%} {%- endif -%}
...@@ -30,7 +17,7 @@ static const {{ dtype }} {{ name }}[{{ dims | join("*") }}] __attribute__((secti ...@@ -30,7 +17,7 @@ static const {{ dtype }} {{ name }}[{{ dims | join("*") }}] __attribute__((secti
{%- for y in range(dims[0]) %} {%- for y in range(dims[0]) %}
{{ ' ' }} {{ ' ' }}
{%- for x in range(dims[1]) -%} {%- for x in range(dims[1]) -%}
{{ format_map[dtype] | format(values[y][x]) }}, {{ "{:>20}".format(values[y][x]) }},
{%- endfor %} {%- endfor %}
{%- endfor -%} {%- endfor -%}
{%- endif -%} {%- endif -%}
...@@ -42,7 +29,7 @@ static const {{ dtype }} {{ name }}[{{ dims | join("*") }}] __attribute__((secti ...@@ -42,7 +29,7 @@ static const {{ dtype }} {{ name }}[{{ dims | join("*") }}] __attribute__((secti
{%- for y in range(dims[1]) %} {%- for y in range(dims[1]) %}
{{ ' ' }} {{ ' ' }}
{%- for x in range(dims[2]) -%} {%- for x in range(dims[2]) -%}
{{ format_map[dtype] | format(values[z][y][x]) }}, {{ "{:>20}".format(values[z][y][x]) }},
{%- endfor -%} {%- endfor -%}
{%- endfor %} {%- endfor %}
{%- endfor -%} {%- endfor -%}
...@@ -57,7 +44,7 @@ static const {{ dtype }} {{ name }}[{{ dims | join("*") }}] __attribute__((secti ...@@ -57,7 +44,7 @@ static const {{ dtype }} {{ name }}[{{ dims | join("*") }}] __attribute__((secti
{%- for y in range(dims[2]) %} {%- for y in range(dims[2]) %}
{{ ' ' }} {{ ' ' }}
{%- for x in range(dims[3]) -%} {%- for x in range(dims[3]) -%}
{{ format_map[dtype] | format(values[n][z][y][x]) }}, {{ "{:>20}".format(values[n][z][y][x]) }},
{%- endfor -%} {%- endfor -%}
{%- endfor %} {%- endfor %}
{%- endfor %} {%- endfor %}
......
...@@ -41,31 +41,22 @@ def initFiller(model): ...@@ -41,31 +41,22 @@ def initFiller(model):
else: else:
pass pass
def _np_init(shape, dtype=np.float32): def _np_init(shape, range=[-1, 1], dtype=aidge_core.dtype.float32):
""" """
Generates a NumPy array with the given shape, filled with random values between -1 and 1 Generates a NumPy array with the given shape, filled with random values in
with a step of 0.1. given range. A step of 0.1 is used for floating point type.
:param shape: Tuple of dimensions for the array :param shape: Tuple of dimensions for the array
:param dtype: Data type of the output array (default: np.float32) :param dtype: Data type of the output array (default: np.float32)
:return: A NumPy array with the given shape and dtype :return: A NumPy array with the given shape and dtype
""" """
total_elements = reduce(operator.mul, shape, 1) total_elements = reduce(operator.mul, shape, 1)
data = (np.random.randint(0, 21, size=total_elements) - 10) / 10.0 data = np.random.randint(range[0], range[1], size=total_elements)
return data.reshape(shape).astype(dtype) if aidge_core.is_floating_point(dtype):
data = (np.random.randint(10 * range[0], 10 * range[1], size=total_elements)) / 10.0
def _np_init_ones(shape, default_value=0.01, dtype=np.float32): return data.reshape(shape).astype(np.float32)
""" else:
Generates a NumPy array with the given shape, filled with random values between -1 and 1 return data.reshape(shape).astype(np.int32)
with a step of 0.1.
:param shape: Tuple of dimensions for the array
:param dtype: Data type of the output array (default: np.float32)
:return: A NumPy array with the given shape and dtype
"""
total_elements = reduce(operator.mul, shape, 1)
data = np.ones(total_elements) * default_value
return data.reshape(shape).astype(dtype)
# Global dictionary to store test reports # Global dictionary to store test reports
...@@ -103,7 +94,7 @@ class test_operator_export(unittest.TestCase): ...@@ -103,7 +94,7 @@ class test_operator_export(unittest.TestCase):
# shutil.rmtree(self.export_folder, ignore_errors=True) # shutil.rmtree(self.export_folder, ignore_errors=True)
def unit_test_export(self, graph_view, op_name, in_dims, random_inputs=True, random_weights=True, default_value=0.01): def unit_test_export(self, graph_view, op_name, in_dims, dtypes=[aidge_core.dtype.float32], random_weights=True, default_value=0.01):
""" """
TODO: TODO:
* Handle multiple dataformat * Handle multiple dataformat
...@@ -118,75 +109,88 @@ class test_operator_export(unittest.TestCase): ...@@ -118,75 +109,88 @@ class test_operator_export(unittest.TestCase):
graph_view.set_backend("cpu") graph_view.set_backend("cpu")
graph_view.forward_dims(dims=in_dims) graph_view.forward_dims(dims=in_dims)
for node in graph_view.get_nodes(): for dtype in dtypes:
if node.type() == "Producer": graph = graph_view.clone()
prod_op = node.get_operator()
value = prod_op.get_output(0) range = [-1, 1]
if aidge_core.is_integer(dtype):
if (random_weights): graph.set_datatype(aidge_core.dtype.int32)
tensor = aidge_core.Tensor(_np_init(value.dims())) # Make sure there won't be overflow, as we don't apply any rescaling!
node.get_operator().set_output(0, tensor) nb_bits = aidge_core.dtype_bit_width(dtype) // 2 - 1
elif default_value != None: if aidge_core.is_unsigned(dtype):
aidge_core.constant_filler(value, default_value) range = [0, 2**nb_bits - 1]
else:
# Fuse operators to match implemented cpp kernels range = [-2**(nb_bits - 1), 2**(nb_bits - 1) - 1]
cpp_fuse_to_metaops(graph_view)
if default_value != None:
scheduler = aidge_core.SequentialScheduler(graph_view) default_value = 1
if (random_inputs): for node in graph.get_nodes():
in_tensor = [aidge_core.Tensor(_np_init(in_dim)) for in_dim in in_dims] if node.type() == "Producer":
else: prod_op = node.get_operator()
in_tensor = [aidge_core.Tensor(_np_init_ones(in_dim, default_value)) for in_dim in in_dims] value = prod_op.get_output(0)
scheduler.forward(data=in_tensor) if (random_weights):
tensor = aidge_core.Tensor(_np_init(value.dims(), range))
# Name the metaops tensor.set_datatype(dtype)
set_nodes_names(scheduler) node.get_operator().set_output(0, tensor)
elif default_value != None:
# Note the convention ``<op_name>_test`` is useful for gitignore to avoid pushing generated export by accident. aidge_core.constant_filler(value, default_value)
export_folder = op_name + "_test"
self.export_folder = export_folder # Fuse operators to match implemented cpp kernels
shutil.rmtree(export_folder, ignore_errors=True) cpp_fuse_to_metaops(graph)
aidge_core.export_utils.generate_main_compare_cpp(export_folder, graph_view) scheduler = aidge_core.SequentialScheduler(graph)
in_tensor = [aidge_core.Tensor(_np_init(in_dim, range)) for in_dim in in_dims]
graph_view.set_backend(aidge_export_cpp.ExportLibCpp._name) [t.set_datatype(dtype) for t in in_tensor]
aidge_core.adapt_to_backend(graph_view) scheduler.forward(data=in_tensor)
graph_view.forward_dims(dims=in_dims)
graph_view.save(export_folder + "/graph") # Name the metaops
set_nodes_names(scheduler)
scheduler = aidge_core.SequentialScheduler(graph_view)
scheduler.generate_scheduling() # Note the convention ``<op_name>_test`` is useful for gitignore to avoid pushing generated export by accident.
export_folder = op_name + "_" + str(dtype) + "_test"
# Export the model in C++ standalone self.export_folder = export_folder
aidge_core.export_utils.scheduler_export( shutil.rmtree(export_folder, ignore_errors=True)
scheduler,
export_folder, aidge_core.export_utils.generate_main_compare_cpp(export_folder, graph)
aidge_export_cpp.ExportLibCpp,
memory_manager=aidge_core.mem_info.generate_optimized_memory_info, graph.set_backend(aidge_export_cpp.ExportLibCpp._name)
memory_manager_args={"stats_folder": f"{export_folder}/stats", "wrapping": False } aidge_core.adapt_to_backend(graph)
) graph.forward_dims(dims=in_dims)
graph.save(export_folder + "/graph")
print("COMPILATION")
scheduler = aidge_core.SequentialScheduler(graph)
try: scheduler.generate_scheduling()
for std_line in run_command(["make"], cwd=export_folder):
print(std_line, end="") # Export the model in C++ standalone
except subprocess.CalledProcessError as e: aidge_core.export_utils.scheduler_export(
self.assertTrue(0, f"An error occurred: {e}\nFailed to generate export.") scheduler,
export_folder,
print("RUN EXPORT") aidge_export_cpp.ExportLibCpp,
cmd_gen = run_command(["./bin/run_export"], cwd=export_folder) memory_manager=aidge_core.mem_info.generate_optimized_memory_info,
try: memory_manager_args={"stats_folder": f"{export_folder}/stats", "wrapping": False }
for std_line in cmd_gen: )
print(std_line, end="")
except subprocess.CalledProcessError as e: print("COMPILATION")
self.assertTrue(0, f"An error occurred: {e}\nFailed to run export for comparison.")
try:
return_code = next(cmd_gen, 0) for std_line in run_command(["make"], cwd=export_folder):
print(std_line, end="")
self.assertFalse(return_code, f"Export result are different than backend ones: {return_code} errors found.") except subprocess.CalledProcessError as e:
self.assertTrue(0, f"An error occurred: {e}\nFailed to generate export.")
print("RUN EXPORT")
cmd_gen = run_command(["./bin/run_export"], cwd=export_folder)
try:
for std_line in cmd_gen:
print(std_line, end="")
except subprocess.CalledProcessError as e:
self.assertTrue(0, f"An error occurred: {e}\nFailed to run export for comparison.")
return_code = next(cmd_gen, 0)
self.assertFalse(return_code, f"Export result are different than backend ones: {return_code} errors found.")
def test_FC_flatten_in(self): def test_FC_flatten_in(self):
...@@ -618,7 +622,7 @@ class test_operator_export(unittest.TestCase): ...@@ -618,7 +622,7 @@ class test_operator_export(unittest.TestCase):
aidge_core.Conv2D(in_channels=3, out_channels=3, kernel_dims=(3, 3), name="conv") aidge_core.Conv2D(in_channels=3, out_channels=3, kernel_dims=(3, 3), name="conv")
]) ])
self.unit_test_export(model, "Conv2D", [[1, 3, 12, 12]]) self.unit_test_export(model, "Conv2D", [[1, 3, 12, 12]], dtypes=[aidge_core.dtype.float32, aidge_core.dtype.int32, aidge_core.dtype.int8, aidge_core.dtype.int4])
def test_conv2D_asym(self): def test_conv2D_asym(self):
print("Conv2D_asym") print("Conv2D_asym")
...@@ -788,7 +792,7 @@ class test_operator_export(unittest.TestCase): ...@@ -788,7 +792,7 @@ class test_operator_export(unittest.TestCase):
aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn") aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
]) ])
self.unit_test_export(model, "BatchNorm2D", [[1, 1, 5, 5]], False, False) self.unit_test_export(model, "BatchNorm2D", [[1, 1, 5, 5]], random_weights=False)
def test_batchnorm2D_Larger(self): def test_batchnorm2D_Larger(self):
print("BatchNormalization2DLarger") print("BatchNormalization2DLarger")
...@@ -796,7 +800,7 @@ class test_operator_export(unittest.TestCase): ...@@ -796,7 +800,7 @@ class test_operator_export(unittest.TestCase):
aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn") aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
]) ])
self.unit_test_export(model, "BatchNorm2DLarger", [[1, 1, 5, 7]], False, False) self.unit_test_export(model, "BatchNorm2DLarger", [[1, 1, 5, 7]], random_weights=False)
def test_batchnorm2D_Higher(self): def test_batchnorm2D_Higher(self):
print("BatchNormalization2DHigher") print("BatchNormalization2DHigher")
...@@ -804,7 +808,7 @@ class test_operator_export(unittest.TestCase): ...@@ -804,7 +808,7 @@ class test_operator_export(unittest.TestCase):
aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn") aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
]) ])
self.unit_test_export(model, "BatchNorm2DHigher", [[1, 1, 7, 5]], False, False) self.unit_test_export(model, "BatchNorm2DHigher", [[1, 1, 7, 5]], random_weights=False)
def test_batchnorm2D_Denser(self): def test_batchnorm2D_Denser(self):
print("BatchNormalization2DDenser") print("BatchNormalization2DDenser")
...@@ -812,7 +816,7 @@ class test_operator_export(unittest.TestCase): ...@@ -812,7 +816,7 @@ class test_operator_export(unittest.TestCase):
aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn") aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
]) ])
self.unit_test_export(model, "BatchNorm2DDenser", [[1, 3, 5, 7]], False, False) self.unit_test_export(model, "BatchNorm2DDenser", [[1, 3, 5, 7]], random_weights=False)
def test_batchnorm2D_with_bigger_batch_size(self): def test_batchnorm2D_with_bigger_batch_size(self):
print("BatchNormalization2DBiggerBatchSize") print("BatchNormalization2DBiggerBatchSize")
...@@ -820,7 +824,7 @@ class test_operator_export(unittest.TestCase): ...@@ -820,7 +824,7 @@ class test_operator_export(unittest.TestCase):
aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn") aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
]) ])
self.unit_test_export(model, "BatchNormalization2DBiggerBatchSize", [[4, 3, 5, 7]], False, False) self.unit_test_export(model, "BatchNormalization2DBiggerBatchSize", [[4, 3, 5, 7]], random_weights=False)
def test_batchnorm2D_Larger(self): def test_batchnorm2D_Larger(self):
...@@ -829,7 +833,7 @@ class test_operator_export(unittest.TestCase): ...@@ -829,7 +833,7 @@ class test_operator_export(unittest.TestCase):
aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn") aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
]) ])
self.unit_test_export(model, "BatchNorm2DLarger", [[1, 1, 5, 7]], False, False) self.unit_test_export(model, "BatchNorm2DLarger", [[1, 1, 5, 7]], random_weights=False)
def test_batchnorm2D_Higher(self): def test_batchnorm2D_Higher(self):
print("BatchNormalization2DHigher") print("BatchNormalization2DHigher")
...@@ -837,7 +841,7 @@ class test_operator_export(unittest.TestCase): ...@@ -837,7 +841,7 @@ class test_operator_export(unittest.TestCase):
aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn") aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
]) ])
self.unit_test_export(model, "BatchNorm2DHigher", [[1, 1, 7, 5]], False, False) self.unit_test_export(model, "BatchNorm2DHigher", [[1, 1, 7, 5]], random_weights=False)
def test_batchnorm2D_Denser(self): def test_batchnorm2D_Denser(self):
print("BatchNormalization2DDenser") print("BatchNormalization2DDenser")
...@@ -845,7 +849,7 @@ class test_operator_export(unittest.TestCase): ...@@ -845,7 +849,7 @@ class test_operator_export(unittest.TestCase):
aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn") aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
]) ])
self.unit_test_export(model, "BatchNorm2DDenser", [[1, 3, 5, 7]], False, False) self.unit_test_export(model, "BatchNorm2DDenser", [[1, 3, 5, 7]], random_weights=False)
def test_Conv(self): def test_Conv(self):
print("Conv") print("Conv")
...@@ -925,7 +929,7 @@ class test_operator_export(unittest.TestCase): ...@@ -925,7 +929,7 @@ class test_operator_export(unittest.TestCase):
aidge_core.Conv2D(2, 2, [3, 3], name="InputNode") aidge_core.Conv2D(2, 2, [3, 3], name="InputNode")
]) ])
initFiller(model) initFiller(model)
self.unit_test_export(model, "Conv2", [[1, 2, 9, 9]], random_inputs=True, random_weights=False, default_value=None) self.unit_test_export(model, "Conv2", [[1, 2, 9, 9]], random_weights=False, default_value=None)
def test_erf(self): def test_erf(self):
print("Erf") print("Erf")
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment