diff --git a/CMakeLists.txt b/CMakeLists.txt index 66ef8ff28503a70de816d546b72e21d8528f0e33..729853eec605b9ad7baee163557699368f1c9103 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -64,6 +64,14 @@ if(NOT $ENV{AIDGE_INSTALL} STREQUAL "") endif() find_package(aidge_core REQUIRED) +find_package(OpenSSL QUIET) +if(OpenSSL_FOUND) + message(STATUS "OpenSSL found: ${OPENSSL_VERSION}") + add_definitions(-DWITH_OPENSSL) +else() + message(WARNING "OpenSSL not found, SHA256 will not be available.") +endif() + ############################################## # Create target and set properties file(GLOB_RECURSE src_files "src/*.cpp") @@ -112,6 +120,12 @@ target_include_directories(${module_name} ${CMAKE_CURRENT_SOURCE_DIR}/src ) +set(AIDGE_REQUIRES_OPENSSL FALSE) +if(OpenSSL_FOUND) + target_link_libraries(${module_name} PRIVATE OpenSSL::SSL OpenSSL::Crypto) + set(AIDGE_REQUIRES_OPENSSL TRUE) +endif() + target_compile_features(${module_name} PRIVATE cxx_std_14) target_compile_options(${module_name} PRIVATE diff --git a/aidge_backend_cpu-config.cmake.in b/aidge_backend_cpu-config.cmake.in index d8e1372bc8a7b79bd09c79b654af4291c995ac58..7582102c24a551db7f346e1b614d7dcaa4940b1d 100644 --- a/aidge_backend_cpu-config.cmake.in +++ b/aidge_backend_cpu-config.cmake.in @@ -2,6 +2,10 @@ include(CMakeFindDependencyMacro) find_dependency(aidge_core) +set(AIDGE_REQUIRES_OPENSSL @AIDGE_REQUIRES_OPENSSL@) +if (AIDGE_REQUIRES_OPENSSL) + find_dependency(OpenSSL) +endif() include(CMakeFindDependencyMacro) diff --git a/aidge_backend_cpu/unit_tests/test_scheduler.py b/aidge_backend_cpu/unit_tests/test_scheduler.py index 494f34565ffd644971c97e9adfa06709dee9e36d..b60ff3f01307f22bd8bf635df2d776cb1267d0f5 100644 --- a/aidge_backend_cpu/unit_tests/test_scheduler.py +++ b/aidge_backend_cpu/unit_tests/test_scheduler.py @@ -57,9 +57,9 @@ class test_scheduler(unittest.TestCase): scheduler = aidge_core.SequentialScheduler(graph_view) scheduler.generate_scheduling() - self.assertEqual(len(scheduler.get_static_scheduling()), 10) + self.assertEqual(len(scheduler.get_sequential_static_scheduling()), 10) # Do not care about the order of execution of the producers - self.assertListEqual([i.name() for i in scheduler.get_static_scheduling()[-3:]], EXPECTED_SCHEDULE) + self.assertListEqual([i.name() for i in scheduler.get_sequential_static_scheduling()[-3:]], EXPECTED_SCHEDULE) def test_parallel_scheduling(self): @@ -83,9 +83,9 @@ class test_scheduler(unittest.TestCase): scheduler = aidge_core.SequentialScheduler(graph_view) scheduler.generate_scheduling() - self.assertEqual(len(scheduler.get_static_scheduling()), 11) + self.assertEqual(len(scheduler.get_sequential_static_scheduling()), 11) # Do not care about the order of execution of the producers - self.assertTrue([i.name() for i in scheduler.get_static_scheduling()[-4:]] in EXPECTED_SCHEDULE) + self.assertTrue([i.name() for i in scheduler.get_sequential_static_scheduling()[-4:]] in EXPECTED_SCHEDULE) if __name__ == '__main__': unittest.main() diff --git a/include/aidge/backend/cpu.hpp b/include/aidge/backend/cpu.hpp index ffc03ae5d6cb1d44637bc223ce4099af88f08070..80574b4a46fef0c843c9511836f162e02de5aab3 100644 --- a/include/aidge/backend/cpu.hpp +++ b/include/aidge/backend/cpu.hpp @@ -28,6 +28,7 @@ #include "aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp" #include "aidge/backend/cpu/operator/ConvImpl.hpp" #include "aidge/backend/cpu/operator/ConstantOfShapeImpl.hpp" +#include "aidge/backend/cpu/operator/CryptoHashImpl.hpp" #include "aidge/backend/cpu/operator/DivImpl.hpp" #include "aidge/backend/cpu/operator/EqualImpl.hpp" #include "aidge/backend/cpu/operator/ErfImpl.hpp" @@ -40,6 +41,7 @@ #include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp" #include "aidge/backend/cpu/operator/LnImpl.hpp" #include "aidge/backend/cpu/operator/MatMulImpl.hpp" +#include "aidge/backend/cpu/operator/ModImpl.hpp" #include "aidge/backend/cpu/operator/MulImpl.hpp" #include "aidge/backend/cpu/operator/PadImpl.hpp" #include "aidge/backend/cpu/operator/PaddedConvImpl.hpp" diff --git a/include/aidge/backend/cpu/operator/CryptoHashImpl.hpp b/include/aidge/backend/cpu/operator/CryptoHashImpl.hpp new file mode 100644 index 0000000000000000000000000000000000000000..d7f07f999d47a3a6c88ba921bc91766a447de48a --- /dev/null +++ b/include/aidge/backend/cpu/operator/CryptoHashImpl.hpp @@ -0,0 +1,36 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_TANHIMPL_H_ +#define AIDGE_CPU_OPERATOR_TANHIMPL_H_ + +#include "aidge/backend/cpu/operator/OperatorImpl.hpp" +#include "aidge/operator/CryptoHash.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/GetCPUPtr.h" +#include <memory> +#include <vector> + +#ifdef WITH_OPENSSL +#include <openssl/sha.h> + +namespace Aidge { +// Operator implementation entry point for the backend +using CryptoHashImpl_cpu = OperatorImpl_cpu<CryptoHash_Op, + void(const std::size_t, const void*, void*)>; + +// Implementation entry point registration to Operator +REGISTRAR(CryptoHash_Op, "cpu", Aidge::CryptoHashImpl_cpu::create); +} // namespace Aidge +#endif + +#endif /* AIDGE_CPU_OPERATOR_TANHIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/CryptoHashImpl_kernels.hpp b/include/aidge/backend/cpu/operator/CryptoHashImpl_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..cd596b6905988050666c7c2dff15a4cf8078e52a --- /dev/null +++ b/include/aidge/backend/cpu/operator/CryptoHashImpl_kernels.hpp @@ -0,0 +1,52 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_CRYPTOHASHIMPL_KERNELS_H_ +#define AIDGE_CPU_OPERATOR_CRYPTOHASHIMPL_KERNELS_H_ + +#include "aidge/utils/Registrar.hpp" + +#include "aidge/backend/cpu/operator/CryptoHashImpl.hpp" + +#ifdef WITH_OPENSSL +namespace Aidge { +template <class I, class O> +void CryptoHashImpl_cpu_forward_kernel(std::size_t inputLength, + const void* input_, + void* output_) { + + const I* input = static_cast<const I*>(input_); + O* output = static_cast<O*>(output_); + + // output must be at least SHA256_DIGEST_LENGTH bytes length + SHA256(reinterpret_cast<const uint8_t*>(input), inputLength * sizeof(I), reinterpret_cast<uint8_t*>(output)); +} + +// Kernels registration to implementation entry point +REGISTRAR(CryptoHashImpl_cpu, + {{DataType::UInt8, DataFormat::Any}, {DataType::UInt8}}, + {ProdConso::inPlaceModel, Aidge::CryptoHashImpl_cpu_forward_kernel<uint8_t, uint8_t>, nullptr}); +REGISTRAR(CryptoHashImpl_cpu, + {{DataType::UInt8, DataFormat::Any}, {DataType::UInt64}}, + {ProdConso::inPlaceModel, Aidge::CryptoHashImpl_cpu_forward_kernel<uint8_t, uint64_t>, nullptr}); +REGISTRAR(CryptoHashImpl_cpu, + {{DataType::Float32, DataFormat::Any}, {DataType::UInt8}}, + {ProdConso::inPlaceModel, Aidge::CryptoHashImpl_cpu_forward_kernel<float, uint8_t>, nullptr}); +REGISTRAR(CryptoHashImpl_cpu, + {{DataType::Float32, DataFormat::Any}, {DataType::UInt64}}, + {ProdConso::inPlaceModel, Aidge::CryptoHashImpl_cpu_forward_kernel<float, uint64_t>, nullptr}); +REGISTRAR(CryptoHashImpl_cpu, + {{DataType::Float64, DataFormat::Any}, {DataType::UInt8}}, + {ProdConso::inPlaceModel, Aidge::CryptoHashImpl_cpu_forward_kernel<double, uint8_t>, nullptr}); +} // namespace Aidge +#endif + +#endif /* AIDGE_CPU_OPERATOR_CRYPTOHASHIMPL_KERNELS_H_ */ diff --git a/include/aidge/backend/cpu/operator/ModImpl.hpp b/include/aidge/backend/cpu/operator/ModImpl.hpp new file mode 100644 index 0000000000000000000000000000000000000000..96ff599b6633c66aad411b484e292b6a076e3090 --- /dev/null +++ b/include/aidge/backend/cpu/operator/ModImpl.hpp @@ -0,0 +1,33 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_MODIMPL_H_ +#define AIDGE_CPU_OPERATOR_MODIMPL_H_ + +#include <memory> +#include <tuple> +#include <vector> + +#include "aidge/backend/cpu/operator/OperatorImpl.hpp" +#include "aidge/operator/Mod.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +namespace Aidge { +// Operator implementation entry point for the backend +using ModImpl_cpu = OperatorImpl_cpu<Mod_Op, + void(bool, const std::size_t, const std::size_t, const std::size_t, const void*, const void*,void*)>; + +// Implementation entry point registration to Operator +REGISTRAR(Mod_Op, "cpu", Aidge::ModImpl_cpu::create); +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_MODIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/ModImpl_kernels.hpp b/include/aidge/backend/cpu/operator/ModImpl_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..15d18bf4de5cee7e7d75817a2ccf425f5ff41971 --- /dev/null +++ b/include/aidge/backend/cpu/operator/ModImpl_kernels.hpp @@ -0,0 +1,80 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_MODIMPL_KERNELS_H_ +#define AIDGE_CPU_OPERATOR_MODIMPL_KERNELS_H_ + +#include <numeric> // std::accumulate +#include <cstddef> // std::size_t +#include <cstdint> // std::int32_t, std::int64_t +#include <functional> // std::multiplies + +#include "aidge/utils/Registrar.hpp" + +#include "aidge/backend/cpu/data/Broadcasting.hpp" +#include "aidge/backend/cpu/operator/ModImpl.hpp" + +namespace Aidge { + +template <typename T, + typename std::enable_if<std::is_integral<T>::value>::type* = nullptr> +static inline T modulus(T a, T b) { + return a % b; +} + +template <typename T, + typename std::enable_if<!std::is_integral<T>::value>::type* = nullptr> +static inline T modulus(T /*a*/, T /*b*/) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Mod Operator with fmod attribute set to false only supports integer types."); +} + +template <class I1, class I2, class O> +constexpr void ModImpl_cpu_forward_kernel(bool fmod, + const std::size_t input1size_, + const std::size_t input2size_, + const std::size_t output1size_, + const void* input1_, + const void* input2_, + void* output_) { + + const I1* input_1 = static_cast<const I1*>(input1_); + const I2* input_2 = static_cast<const I2*>(input2_); + O* output = static_cast<O*>(output_); + +// suppose values are contiguous in memory + for (std::size_t i = 0; i < output1size_; ++i) { + const std::size_t in1_id = (input1size_ != 1) ? i : 0; + const std::size_t in2_id = (input2size_ != 1) ? i : 0; + if (fmod) { + output[i] = static_cast<O>(std::fmod(input_1[in1_id], input_2[in2_id])); + } + else { + output[i] = static_cast<O>(modulus(input_1[in1_id], input_2[in2_id])); + } + } +} + +// Kernels registration to implementation entry point +REGISTRAR(ModImpl_cpu, + {DataType::Float32}, + {ProdConso::inPlaceModel, Aidge::ModImpl_cpu_forward_kernel<float, float, float>, nullptr}); +REGISTRAR(ModImpl_cpu, + {DataType::Float64}, + {ProdConso::inPlaceModel, Aidge::ModImpl_cpu_forward_kernel<double, double, double>, nullptr}); +REGISTRAR(ModImpl_cpu, + {DataType::Int32}, + {ProdConso::inPlaceModel, Aidge::ModImpl_cpu_forward_kernel<std::int32_t, std::int32_t, std::int32_t>, nullptr}); +REGISTRAR(ModImpl_cpu, + {DataType::UInt64}, + {ProdConso::inPlaceModel, Aidge::ModImpl_cpu_forward_kernel<std::uint64_t, std::uint64_t, std::uint64_t>, nullptr}); +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_MODIMPL_KERNELS_H_ */ diff --git a/src/operator/CryptoHashImpl.cpp b/src/operator/CryptoHashImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..10d82dd05408733b898da0c8d3edb38df76dbe1a --- /dev/null +++ b/src/operator/CryptoHashImpl.cpp @@ -0,0 +1,46 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <cassert> +#include <chrono> // std::chrono::milliseconds +#include <numeric> // std::accumulate +#include <thread> // std::this_thread::sleep_for +#include <vector> + +#include "aidge/operator/CryptoHash.hpp" +#include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/GetCPUPtr.h" + +#include "aidge/backend/cpu/operator/CryptoHashImpl.hpp" +#include "aidge/backend/cpu/operator/CryptoHashImpl_kernels.hpp" + +#ifdef WITH_OPENSSL +template <> +void Aidge::CryptoHashImpl_cpu::forward() { + const CryptoHash_Op& op_ = dynamic_cast<const CryptoHash_Op&>(mOp); + std::shared_ptr<Tensor> in0 = op_.getInput(0); + std::shared_ptr<Tensor> out0 = op_.getOutput(0); + AIDGE_ASSERT(in0, "missing input #0"); + + // Find the correct kernel type + const auto impl = Registrar<CryptoHashImpl_cpu>::create(getBestMatch(getRequiredSpec())); + + // Call kernel + impl.forward(in0->size(), + getCPUPtr(mOp.getRawInput(0)), + getCPUPtr(mOp.getRawOutput(0))); +} + +template <> +void Aidge::CryptoHashImpl_cpu::backward() { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not available for CryptoHash_Op"); +} +#endif diff --git a/src/operator/ModImpl.cpp b/src/operator/ModImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..161f7bc114d6e8bf566b1e2739c1d057ecfdf3f7 --- /dev/null +++ b/src/operator/ModImpl.cpp @@ -0,0 +1,131 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <memory> +#include <vector> + +#include "aidge/backend/cpu/data/Broadcasting.hpp" +#include "aidge/backend/cpu/data/GetCPUPtr.h" +#include "aidge/backend/cpu/operator/ModImpl.hpp" +#include "aidge/backend/cpu/operator/ModImpl_kernels.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Types.h" + +template <> +void Aidge::ModImpl_cpu::forward() { + // 1. Same number of dimensions -> [5,2,1,7] & [1,2,6,7] + // 2. Find the highest equal dimension -> 3 + // Exception: if the first diverging dimension is the last one, then -> 4 (dims.size()) + // 3. Compute the highest number of contiguous data -> 7 + // 4. Compute stride and offset step for the broadcast mechanism + // 5. Call a simple kernel + const auto& opTensor = static_cast<const Mod_Op&>(mOp); + + // Find the correct kernel type + const auto impl = Registrar<ModImpl_cpu>::create(getBestMatch(getRequiredSpec())); + + // Compute compatible input dimensions + std::vector<std::size_t> dims0 = opTensor.getInput(0)->dims(); + std::vector<std::size_t> dims1 = opTensor.getInput(1)->dims(); + const std::vector<std::size_t>& outDims = opTensor.getOutput(0)->dims(); + + // special case for equal dimensions, the kernel is called with the entire arrays at once + if (dims0 == dims1) { + const std::size_t input0_contiguous_size = std::accumulate(dims0.cbegin(), dims0.cend(), std::size_t(1), std::multiplies<std::size_t>()); + impl.forward(opTensor.fmod(), + input0_contiguous_size, input0_contiguous_size, input0_contiguous_size, + getCPUPtr(mOp.getRawInput(0)), + getCPUPtr(mOp.getRawInput(1)), + getCPUPtr(mOp.getRawOutput(0))); + return; + } + + // set dimensions to be of equal size by filling the smallest one with ones. + if (dims0.size() > dims1.size()) { + dims1.insert(dims1.cbegin(), dims0.size() - dims1.size(), std::size_t(1)); + } + else if (dims1.size() > dims0.size()) { + dims0.insert(dims0.cbegin(), dims1.size() - dims0.size(), std::size_t(1)); + } + + const std::size_t nbDims = dims0.size(); + + // Find the highest equal dimension + // std::size_t contiguousIdx = nbDims - 1; + std::size_t contiguousIdx = nbDims; + while (contiguousIdx-- > 0) { + // for (; contiguousIdx+1 > 0; --contiguousIdx) { + if (dims0[contiguousIdx] != dims1[contiguousIdx]) { + if (contiguousIdx == (nbDims -1)) { // last dimensions of one of the input Tensor are of size 1 + const std::vector<std::size_t>& dims = (dims0[contiguousIdx] == 1) ? dims0 : dims1; + while ((contiguousIdx+1 > 0) && (dims[contiguousIdx] == 1)) { + --contiguousIdx; + } + } + break; + } + } + ++contiguousIdx; + + // Compute the highest number of contiguous data for each Tensor + const std::size_t input0_contiguous_size = std::accumulate(dims0.cbegin()+contiguousIdx, dims0.cend(), std::size_t(1), std::multiplies<std::size_t>()); + const std::size_t input1_contiguous_size = std::accumulate(dims1.cbegin()+contiguousIdx, dims1.cend(), std::size_t(1), std::multiplies<std::size_t>()); + const std::size_t output_contiguous_size = std::accumulate(outDims.cbegin()+contiguousIdx, outDims.cend(), std::size_t(1), std::multiplies<std::size_t>()); + + // initialize strides to iterate through data because of broadcasting + std::unique_ptr<std::int32_t[]> stride_post0 = std::make_unique<std::int32_t[]>(contiguousIdx); + std::unique_ptr<std::int32_t[]> stride_post1 = std::make_unique<std::int32_t[]>(contiguousIdx); + std::unique_ptr<std::int32_t[]> stride_step0 = std::make_unique<std::int32_t[]>(contiguousIdx); + std::unique_ptr<std::int32_t[]> stride_step1 = std::make_unique<std::int32_t[]>(contiguousIdx); + if (contiguousIdx > 0) { + stride_post0[contiguousIdx - 1] = 1; + stride_post1[contiguousIdx - 1] = 1; + for (std::size_t i = contiguousIdx - 2; i != static_cast<std::size_t>(-1); --i) { + stride_post0[i] = stride_post0[i+1]*static_cast<std::int32_t>(dims0[i+1]); + stride_post1[i] = stride_post1[i+1]*static_cast<std::int32_t>(dims1[i+1]); + } + for (std::size_t i = 0; i != contiguousIdx; ++i) { + stride_step0[i] = (dims0[i] == 1) ? 1 - stride_post0[i] : 1; + stride_step1[i] = (dims1[i] == 1) ? 1 - stride_post1[i] : 1; + } + } + + // variables for arrays offsets + std::size_t offsetIn0 = 0; + std::size_t offsetIn1 = 0; + std::size_t offsetOut = 0; + + + std::size_t dim = contiguousIdx - 1; + const std::size_t nbStacks = std::accumulate(outDims.cbegin(), outDims.cbegin() + contiguousIdx, std::size_t(1), std::multiplies<std::size_t>()); + for (std::size_t stack = 0; stack < nbStacks;) { + impl.forward(opTensor.fmod(), input0_contiguous_size, input1_contiguous_size, output_contiguous_size, + getCPUPtr(mOp.getRawInput(0), offsetIn0*input0_contiguous_size), + getCPUPtr(mOp.getRawInput(1), offsetIn1*input1_contiguous_size), + getCPUPtr(mOp.getRawOutput(0), offsetOut*output_contiguous_size)); + if (++stack < nbStacks) { + std::size_t tmp_stack = stack; + while(tmp_stack % outDims[dim] == 0) { + tmp_stack /= outDims[dim]; + dim--; + } + offsetIn0 += stride_step0[dim]; + offsetIn1 += stride_step1[dim]; + ++offsetOut; + dim = contiguousIdx - 1; + } + } +} + +template <> +void Aidge::ModImpl_cpu::backward() { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for Mod_Op on backend cpu"); +} diff --git a/unit_tests/operator/Test_CryptoHash.cpp b/unit_tests/operator/Test_CryptoHash.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7453ea19c765d6a2bf79a66972d120b7a0ca6de5 --- /dev/null +++ b/unit_tests/operator/Test_CryptoHash.cpp @@ -0,0 +1,56 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <cmath> // std::abs +#include <cstddef> // std::size_t +#include <memory> + +#include <catch2/catch_test_macros.hpp> + +#include "aidge/backend/cpu/operator/CryptoHashImpl.hpp" +#include "aidge/data/Data.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/graph/Node.hpp" +#include "aidge/operator/CryptoHash.hpp" +#include "aidge/utils/ArrayHelpers.hpp" + +using namespace Aidge; + +#ifdef WITH_OPENSSL +TEST_CASE("[cpu/operator] CryptoHash(forward)") { + SECTION("1D Tensor") { + std::shared_ptr<Tensor> input0 = + std::make_shared<Tensor>(Array1D<uint8_t, 5>{ + {'a', 'b', 'c', 'd', 'e'}}); + std::shared_ptr<Tensor> expectedOutput = + std::make_shared<Tensor>(Array1D<uint8_t, 32>{ + {0x36, 0xbb, 0xe5, 0x0e, 0xd9, 0x68, 0x41, 0xd1, + 0x04, 0x43, 0xbc, 0xb6, 0x70, 0xd6, 0x55, 0x4f, + 0x0a, 0x34, 0xb7, 0x61, 0xbe, 0x67, 0xec, 0x9c, + 0x4a, 0x8a, 0xd2, 0xc0, 0xc4, 0x4c, 0xa4, 0x2c}}); + + std::shared_ptr<Node> myCryptoHash = CryptoHash(); + auto op = std::static_pointer_cast<CryptoHash_Op>(myCryptoHash->getOperator()); + op->associateInput(0, input0); + op->setDataType(DataType::UInt8); + op->setBackend("cpu"); + myCryptoHash->forward(); + + REQUIRE(op->getOutput(0)->size() == 32); + + uint8_t* resPtr = static_cast<uint8_t*>(op->getOutput(0)->getImpl()->rawPtr()); + uint8_t* expectedPtr = static_cast<uint8_t*>(expectedOutput->getImpl()->rawPtr()); + for (std::size_t i = 0; i < expectedOutput->size(); ++i) { + REQUIRE(resPtr[i] == expectedPtr[i]); + } + } +} +#endif diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp index 0c4a64bb062bb8e9219f26c27910b72439ed8c5c..de720f5bc7e3c18bcd725c676d458f30344d3b1a 100644 --- a/unit_tests/operator/Test_MetaOperator.cpp +++ b/unit_tests/operator/Test_MetaOperator.cpp @@ -18,6 +18,7 @@ #include "aidge/backend/cpu/operator/ConvImpl.hpp" #include "aidge/backend/cpu/operator/PadImpl.hpp" +#include "aidge/backend/cpu/operator/TanhImpl.hpp" #include "aidge/data/Tensor.hpp" #include "aidge/filler/Filler.hpp" #include "aidge/operator/Conv.hpp" @@ -278,9 +279,9 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") { REQUIRE(op->getNbConsumedData(1).data == 32768); REQUIRE(op->getNbProducedData(0).data == 34816); REQUIRE(op->getNbProducedData(1).data == 34816); - REQUIRE(microGraphScheduler->getStaticScheduling(0).size() == 26); - REQUIRE(microGraphScheduler->getStaticScheduling(1).size() == 24); - REQUIRE(microGraphScheduler->getStaticScheduling(15).size() == 24); + REQUIRE(microGraphScheduler->getSequentialStaticScheduling(0).size() == 26); + REQUIRE(microGraphScheduler->getSequentialStaticScheduling(1).size() == 24); + REQUIRE(microGraphScheduler->getSequentialStaticScheduling(15).size() == 24); } SECTION("LSTM(forward_values)") { diff --git a/unit_tests/scheduler/Test_Scheduler.cpp b/unit_tests/scheduler/Test_Scheduler.cpp index 956169c387c4a34f500f66b214dcf95a145feafd..54e57ec44a9b803cdba0812ceebbac35c2445adf 100644 --- a/unit_tests/scheduler/Test_Scheduler.cpp +++ b/unit_tests/scheduler/Test_Scheduler.cpp @@ -21,6 +21,10 @@ #include "aidge/operator/Pop.hpp" #include "aidge/operator/Stack.hpp" #include "aidge/operator/Identity.hpp" +#include "aidge/operator/CryptoHash.hpp" +#include "aidge/operator/Mod.hpp" +#include "aidge/operator/Tanh.hpp" +#include "aidge/operator/Select.hpp" #include "aidge/operator/MetaOperator.hpp" #include "aidge/scheduler/SequentialScheduler.hpp" #include "aidge/scheduler/ParallelScheduler.hpp" @@ -30,6 +34,9 @@ #include "aidge/backend/cpu/operator/ReLUImpl.hpp" #include "aidge/backend/cpu/operator/SqrtImpl.hpp" #include "aidge/backend/cpu/operator/AddImpl.hpp" +#include "aidge/backend/cpu/operator/CryptoHashImpl.hpp" +#include "aidge/backend/cpu/operator/ModImpl.hpp" +#include "aidge/backend/cpu/operator/TanhImpl.hpp" #include "aidge/recipes/GraphViewHelper.hpp" @@ -512,4 +519,74 @@ TEST_CASE("[cpu/scheduler] Accumulate", "[scheduler]") { std::shared_ptr<Tensor> output = std::static_pointer_cast<OperatorTensor>(pop_o->getOperator())->getOutput(0); REQUIRE(*output == *expectedOutput); } + +#ifdef WITH_OPENSSL +TEST_CASE("[cpu/scheduler] Select", "[scheduler]") { + std::shared_ptr<Tensor> in = std::make_shared<Tensor>( + Array2D<float, 2, 3>{{{1, 2, 3}, {4, 5, 6}}}); + + std::shared_ptr<GraphView> g = Sequential({ + Producer(in, "input"), + Parallel({ + Sequential({ + CryptoHash("hash"), + Mod("mod") + }), + ReLU("relu"), + Tanh("tanh"), + Sqrt("sqrt") + }), + Select(3, "select") + }); + + auto modProd = Producer(std::make_shared<Tensor>(Array1D<uint64_t, 1>{{3}})); + modProd->addChild(g->getNode("mod"), 0, 1); + g->add(modProd); + + g->getNode("hash")->getOperator()->setDataType(DataType::UInt64); + g->getNode("mod")->getOperator()->setDataType(DataType::UInt64); + g->setBackend("cpu"); + g->save("select"); + + auto scheduler = SequentialScheduler(g); + scheduler.generateScheduling(); + scheduler.saveStaticSchedulingDiagram("select_scheduling"); + REQUIRE_NOTHROW(scheduler.forward(true)); + + g->save("select_forwarded"); + + auto expectedOutputHash = std::make_shared<Tensor>( + Array1D<uint64_t, 4>{{0x1b7cf58dfe2dae24, 0x3bac903def4ce580, 0x5f5a347389d97f41, 0x2c2dc759abc6b61}}); + auto outputHash = std::static_pointer_cast<OperatorTensor>(g->getNode("hash")->getOperator())->getOutput(0); + REQUIRE(*outputHash == *expectedOutputHash); + + auto expectedOutputMod = std::make_shared<Tensor>( + Array1D<uint64_t, 4>{{2, 1, 1, 2}}); + auto outputMod = std::static_pointer_cast<OperatorTensor>(g->getNode("mod")->getOperator())->getOutput(0); + REQUIRE(*outputMod == *expectedOutputMod); + + auto expectedOutput = std::make_shared<Tensor>( + Array2D<float, 2, 3>{{{std::sqrt(1), std::sqrt(2), std::sqrt(3)}, {std::sqrt(4), std::sqrt(5), std::sqrt(6)}}}); + auto output = std::static_pointer_cast<OperatorTensor>(g->getNode("select")->getOperator())->getOutput(0); + REQUIRE(*output == *expectedOutput); + + scheduler.resetScheduling(); + scheduler.tagConditionalNodes(); + + REQUIRE(g->getNode("relu")->attributes()->hasAttr("schedule.cond")); + REQUIRE(g->getNode("relu")->attributes()->getAttr<std::set<std::pair<NodePtr, size_t>>>("schedule.cond") + == std::set<std::pair<NodePtr, size_t>>{{g->getNode("select"), 0}}); + REQUIRE(g->getNode("tanh")->attributes()->hasAttr("schedule.cond")); + REQUIRE(g->getNode("tanh")->attributes()->getAttr<std::set<std::pair<NodePtr, size_t>>>("schedule.cond") + == std::set<std::pair<NodePtr, size_t>>{{g->getNode("select"), 1}}); + REQUIRE(g->getNode("sqrt")->attributes()->hasAttr("schedule.cond")); + REQUIRE(g->getNode("sqrt")->attributes()->getAttr<std::set<std::pair<NodePtr, size_t>>>("schedule.cond") + == std::set<std::pair<NodePtr, size_t>>{{g->getNode("select"), 2}}); + REQUIRE(!g->getNode("input")->attributes()->hasAttr("schedule.cond")); + + scheduler.generateScheduling(); + scheduler.saveStaticSchedulingDiagram("select_scheduling_tag"); + REQUIRE_NOTHROW(scheduler.forward(true)); +} +#endif } // namespace Aidge