diff --git a/include/aidge/backend/cuda.hpp b/include/aidge/backend/cuda.hpp index ec2c94090c8e8944008b94a49605e309d14d39ab..47beaec8dee2dc4308aa7fd03adb7b2cc70a079c 100644 --- a/include/aidge/backend/cuda.hpp +++ b/include/aidge/backend/cuda.hpp @@ -19,13 +19,13 @@ #include "aidge/backend/cuda/operator/AbsImpl.hpp" #include "aidge/backend/cuda/operator/AddImpl.hpp" -#include "aidge/backend/cuda/operator/AndImpl.hpp" #include "aidge/backend/cuda/operator/ArgMaxImpl.hpp" #include "aidge/backend/cuda/operator/AvgPoolingImpl.hpp" #include "aidge/backend/cuda/operator/BatchNormImpl.hpp" #include "aidge/backend/cuda/operator/ConvImpl.hpp" #include "aidge/backend/cuda/operator/ClipImpl.hpp" #include "aidge/backend/cuda/operator/DivImpl.hpp" +#include "aidge/backend/cuda/operator/EqualImpl.hpp" #include "aidge/backend/cuda/operator/ErfImpl.hpp" #include "aidge/backend/cuda/operator/FCImpl.hpp" #include "aidge/backend/cuda/operator/GlobalAveragePoolingImpl.hpp" diff --git a/include/aidge/backend/cuda/operator/AndImpl.hpp b/include/aidge/backend/cuda/operator/EqualImpl.hpp similarity index 72% rename from include/aidge/backend/cuda/operator/AndImpl.hpp rename to include/aidge/backend/cuda/operator/EqualImpl.hpp index e90a4c5fe3d7b4cd529dcb4cb5400a6447f53e3c..ca989560aa87c33e7a5150a4b08aec151c359dd8 100644 --- a/include/aidge/backend/cuda/operator/AndImpl.hpp +++ b/include/aidge/backend/cuda/operator/EqualImpl.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef AIDGE_BACKEND_CUDA_OPERATOR_ANDIMPL_H_ -#define AIDGE_BACKEND_CUDA_OPERATOR_ANDIMPL_H_ +#ifndef AIDGE_BACKEND_CUDA_OPERATOR_EQUALIMPL_H_ +#define AIDGE_BACKEND_CUDA_OPERATOR_EQUALIMPL_H_ #include <array> #include <memory> @@ -20,7 +20,7 @@ #include <cudnn.h> #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/operator/And.hpp" +#include "aidge/operator/Equal.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" @@ -28,12 +28,12 @@ namespace Aidge { // Operator implementation entry point for the backend -class AndImpl_cuda : public OperatorImpl { +class EqualImpl_cuda : public OperatorImpl { public: - AndImpl_cuda(const And_Op& op) : OperatorImpl(op, "cuda") {} + EqualImpl_cuda(const Equal_Op& op) : OperatorImpl(op, "cuda") {} - static std::unique_ptr<AndImpl_cuda> create(const And_Op& op) { - return std::make_unique<AndImpl_cuda>(op); + static std::unique_ptr<EqualImpl_cuda> create(const Equal_Op& op) { + return std::make_unique<EqualImpl_cuda>(op); } virtual std::vector<ImplSpec> getAvailableImplSpecs() const override { @@ -51,7 +51,7 @@ private: }; // Implementation entry point registration to Operator -REGISTRAR(And_Op, "cuda", Aidge::AndImpl_cuda::create); +REGISTRAR(Equal_Op, "cuda", Aidge::EqualImpl_cuda::create); } // namespace Aidge -#endif /* AIDGE_BACKEND_CUDA_OPERATOR_ANDIMPL_H_ */ +#endif /* AIDGE_BACKEND_CUDA_OPERATOR_EQUALIMPL_H_ */ diff --git a/include/aidge/backend/cuda/operator/AndImpl_CUDA_kernels.hpp b/include/aidge/backend/cuda/operator/EqualImpl_CUDA_kernels.hpp similarity index 80% rename from include/aidge/backend/cuda/operator/AndImpl_CUDA_kernels.hpp rename to include/aidge/backend/cuda/operator/EqualImpl_CUDA_kernels.hpp index bae79a03d03cd5fb7d5fdc4fbebf1dd7562370ae..307a00bf40f1a774f1ecb839a2360cd4f7bc2ea5 100644 --- a/include/aidge/backend/cuda/operator/AndImpl_CUDA_kernels.hpp +++ b/include/aidge/backend/cuda/operator/EqualImpl_CUDA_kernels.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef AIDGE_CUDA_OPERATOR_ANDIMPL_KERNELS_H_ -#define AIDGE_CUDA_OPERATOR_ANDIMPL_KERNELS_H_ +#ifndef AIDGE_CUDA_OPERATOR_EQUALIMPL_KERNELS_H_ +#define AIDGE_CUDA_OPERATOR_EQUALIMPL_KERNELS_H_ #include <stdexcept> #include <cfloat> @@ -24,12 +24,12 @@ namespace Aidge { template <class T> -void AndForward(const T* input1, const T* input2, T* output, +void EqualForward(const T* input1, const T* input2, T* output, const std::vector<int>& input1Dims,const std::vector<int>& input2Dims, const std::vector<int>& inputStrides, const std::vector<int>& input2Strides,const std::vector<int>& outputStrides, int outSize); } -#endif /* AIDGE_CUDA_OPERATOR_ANDIMPL_KERNELS_H_ */ +#endif /* AIDGE_CUDA_OPERATOR_EQUALIMPL_KERNELS_H_ */ diff --git a/src/operator/AndImpl.cpp b/src/operator/EqualImpl.cpp similarity index 77% rename from src/operator/AndImpl.cpp rename to src/operator/EqualImpl.cpp index e1ee9ebcb9437b89666da21a915907b5434ece26..d43cf6b85a587094e8f9267f45854ab6f3215824 100644 --- a/src/operator/AndImpl.cpp +++ b/src/operator/EqualImpl.cpp @@ -15,23 +15,23 @@ #include <vector> #include "aidge/backend/cuda/data/TensorImpl.hpp" -#include "aidge/backend/cuda/operator/AndImpl.hpp" -#include "aidge/backend/cuda/operator/AndImpl_CUDA_kernels.hpp" +#include "aidge/backend/cuda/operator/EqualImpl.hpp" +#include "aidge/backend/cuda/operator/EqualImpl_CUDA_kernels.hpp" #include "aidge/backend/cuda/utils/CudaContext.hpp" #include "aidge/backend/cuda/utils/CudaUtils.hpp" -#include "aidge/operator/And.hpp" +#include "aidge/operator/Equal.hpp" #include "aidge/utils/Types.h" -void Aidge::AndImpl_cuda::forward() { - const And_Op& op = static_cast<const And_Op&>(mOp); +void Aidge::EqualImpl_cuda::forward() { + const Equal_Op& op = static_cast<const Equal_Op&>(mOp); // Check inputs - AIDGE_ASSERT(op.getInput(0), "missing input in And operator"); - AIDGE_ASSERT(op.getInput(0)->hasImpl(), "cannot run And forward because the 0-th input has no implementation."); + AIDGE_ASSERT(op.getInput(0), "missing input in Equal operator"); + AIDGE_ASSERT(op.getInput(0)->hasImpl(), "cannot run Equal forward because the 0-th input has no implementation."); DataType datatypeFirstInput = op.getInput(0)->dataType(); for (IOIndex_t i = 1; i < op.nbInputs(); ++i) { - AIDGE_ASSERT(op.getInput(i), "missing input in And operator"); - AIDGE_ASSERT(op.getInput(i)->hasImpl(), "cannot run And forward because the {}-th input has no implementation.", i); - AIDGE_ASSERT(op.getInput(i)->dataType() == datatypeFirstInput, "Cannot And inputs with two differents data type."); + AIDGE_ASSERT(op.getInput(i), "missing input in Equal operator"); + AIDGE_ASSERT(op.getInput(i)->hasImpl(), "cannot run Equal forward because the {}-th input has no implementation.", i); + AIDGE_ASSERT(op.getInput(i)->dataType() == datatypeFirstInput, "Cannot Equal inputs with two differents data type."); } std::vector<std::shared_ptr<Tensor>> inputFallbacks(op.nbInputs()); @@ -75,7 +75,7 @@ void Aidge::AndImpl_cuda::forward() { } template <class T> -void Aidge::AndImpl_cuda::forward_(const std::vector<Tensor>& inputs, const std::vector<std::vector<int>>& inputsDims, const std::vector<std::vector<int>>& inputsStrides) { +void Aidge::EqualImpl_cuda::forward_(const std::vector<Tensor>& inputs, const std::vector<std::vector<int>>& inputsDims, const std::vector<std::vector<int>>& inputsStrides) { const OperatorTensor& op = static_cast<const OperatorTensor&>(mOp); const T * input1Ptr = static_cast<const T*>(inputs[0].getImpl()->rawPtr()); const T * input2Ptr = static_cast<const T*>(inputs[1].getImpl()->rawPtr()); @@ -88,7 +88,7 @@ void Aidge::AndImpl_cuda::forward_(const std::vector<Tensor>& inputs, const std: } } - Aidge::AndForward<T>(input1Ptr, input2Ptr, outputPtr, + Aidge::EqualForward<T>(input1Ptr, input2Ptr, outputPtr, inputsDims[0], inputsDims[1], inputsStrides[0], inputsStrides[1], outputStrides, static_cast<int>(op.getOutput(0)->size())); diff --git a/src/operator/AndImpl_CUDA_kernels.cu b/src/operator/EqualImpl_CUDA_kernels.cu similarity index 88% rename from src/operator/AndImpl_CUDA_kernels.cu rename to src/operator/EqualImpl_CUDA_kernels.cu index 34bfccf98c013d8bfc934325f4e327cbae9e7b4a..ab6104a80d9d4b9f426067aaf6fea2a62db3b2d1 100644 --- a/src/operator/AndImpl_CUDA_kernels.cu +++ b/src/operator/EqualImpl_CUDA_kernels.cu @@ -11,7 +11,7 @@ #include <cuda_fp16.h> -#include "aidge/backend/cuda/operator/AndImpl_CUDA_kernels.hpp" +#include "aidge/backend/cuda/operator/EqualImpl_CUDA_kernels.hpp" // Helper function for comparison template <typename T> @@ -24,7 +24,7 @@ __device__ bool compareE<half>(half a, half b) { } template <typename T> -__global__ void and_cuda_Kernel(const T* input1, const T* input2, T* output, +__global__ void equal_cuda_Kernel(const T* input1, const T* input2, T* output, int* input1_shape, int* input2_shape, int* input1_strides, int* input2_strides, int* output_strides, int num_dims, int size) { @@ -44,7 +44,7 @@ __global__ void and_cuda_Kernel(const T* input1, const T* input2, T* output, } template <typename T> -void Aidge::AndForward(const T* input1, const T* input2, T* output, +void Aidge::EqualForward(const T* input1, const T* input2, T* output, const std::vector<int>& input1Dims,const std::vector<int>& input2Dims, const std::vector<int>& input1Strides, const std::vector<int>& input2Strides,const std::vector<int>& outputStrides, int outSize) @@ -68,7 +68,7 @@ void Aidge::AndForward(const T* input1, const T* input2, T* output, int num_dims = input1Dims.size(); // Launch the kernel - and_cuda_Kernel<<<numBlocks, blockSize>>>(input1, input2, output, + equal_cuda_Kernel<<<numBlocks, blockSize>>>(input1, input2, output, d_input1_shape, d_input2_shape, d_input1_strides, d_input2_strides, d_output_strides, num_dims, outSize); @@ -79,17 +79,17 @@ void Aidge::AndForward(const T* input1, const T* input2, T* output, CHECK_CUDA_STATUS(cudaFree(d_output_strides)); }; -template void Aidge::AndForward(const double* input1, const double* input2, double* output, +template void Aidge::EqualForward(const double* input1, const double* input2, double* output, const std::vector<int>& input1Dims,const std::vector<int>& input2Dims, const std::vector<int>& inputStrides, const std::vector<int>& input2Strides,const std::vector<int>& outputStrides, int outSize); -template void Aidge::AndForward(const float* input1, const float* input2, float* output, +template void Aidge::EqualForward(const float* input1, const float* input2, float* output, const std::vector<int>& input1Dims,const std::vector<int>& input2Dims, const std::vector<int>& inputStrides, const std::vector<int>& input2Strides,const std::vector<int>& outputStrides, int outSize); -template void Aidge::AndForward(const half* input1, const half* input2, half* output, +template void Aidge::EqualForward(const half* input1, const half* input2, half* output, const std::vector<int>& input1Dims,const std::vector<int>& input2Dims, const std::vector<int>& inputStrides, const std::vector<int>& input2Strides,const std::vector<int>& outputStrides, int outSize); \ No newline at end of file diff --git a/unit_tests/Test_AndImpl.cpp b/unit_tests/Test_EqualImpl.cpp similarity index 95% rename from unit_tests/Test_AndImpl.cpp rename to unit_tests/Test_EqualImpl.cpp index 951868a2f652a00bcea793f8b5abb00012aabca8..67f189f2f2e5a63a494f6fa947a8cf483ed75a11 100644 --- a/unit_tests/Test_AndImpl.cpp +++ b/unit_tests/Test_EqualImpl.cpp @@ -14,14 +14,14 @@ #include <catch2/catch_test_macros.hpp> #include "aidge/backend/cuda/data/TensorImpl.hpp" -#include "aidge/backend/cuda/operator/AndImpl.hpp" +#include "aidge/backend/cuda/operator/EqualImpl.hpp" #include "aidge/data/Data.hpp" #include "aidge/data/Tensor.hpp" #include "aidge/utils/TensorUtils.hpp" using namespace Aidge; -TEST_CASE("[gpu/operator] And(forward)", "[And][GPU]") { +TEST_CASE("[gpu/operator] Equal(forward)", "[Equal][GPU]") { SECTION("Same size inputs") { std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array4D<float,3,3,3,2> { { // @@ -83,7 +83,7 @@ TEST_CASE("[gpu/operator] And(forward)", "[And][GPU]") { } }); - std::shared_ptr<And_Op> op = std::make_shared<And_Op>(); + std::shared_ptr<Equal_Op> op = std::make_shared<Equal_Op>(); op->associateInput(0, input_1); op->associateInput(1, input_2); op->setBackend("cuda"); @@ -118,7 +118,7 @@ TEST_CASE("[gpu/operator] And(forward)", "[And][GPU]") { } // }); // input_2->setBackend("cuda"); - std::shared_ptr<And_Op> op = std::make_shared<And_Op>(); + std::shared_ptr<Equal_Op> op = std::make_shared<Equal_Op>(); op->associateInput(0, input_1); op->associateInput(1, input_2); op->setDataType(DataType::Float32); diff --git a/unit_tests/Test_ReshapeImpl.cpp b/unit_tests/Test_ReshapeImpl.cpp index d62fc4625c51ff4affb207d8dd7c30d0661ad294..76e91f74ea1b2e703530783a2021f075c3a5713f 100644 --- a/unit_tests/Test_ReshapeImpl.cpp +++ b/unit_tests/Test_ReshapeImpl.cpp @@ -81,7 +81,7 @@ TEST_CASE("[gpu/operator] Reshape(forward)") { } }; - std::shared_ptr<Reshape_Op> op = std::make_shared<Reshape_Op>(std::vector<std::int64_t>({2, 3})); + std::shared_ptr<Reshape_Op> op = std::make_shared<Reshape_Op>(std::vector<std::int64_t>({3, 2})); op->setDataType(DataType::Float32); op->setBackend("cuda");