diff --git a/include/aidge/backend/cpu/operator/AddImpl.hpp b/include/aidge/backend/cpu/operator/AddImpl.hpp index 57669c628b4fa650f137c2b28c8c0a4584bf6c35..1d3b29d43678e8d97e05b9b169a98f7e757838d8 100644 --- a/include/aidge/backend/cpu/operator/AddImpl.hpp +++ b/include/aidge/backend/cpu/operator/AddImpl.hpp @@ -12,16 +12,17 @@ #ifndef AIDGE_CPU_OPERATOR_ADDIMPL_H_ #define AIDGE_CPU_OPERATOR_ADDIMPL_H_ +#include <cstddef> // std::size_t +#include <memory> // std::unique_ptr, std::make_unique +#include <string> +#include <vector> + #include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/Add.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" -#include "aidge/backend/cpu/data/GetCPUPtr.h" -#include <memory> -#include <vector> namespace Aidge { -// class Add_Op<2>; // compute kernel registry for forward and backward class AddImplForward_cpu @@ -33,7 +34,7 @@ class AddImplBackward_cpu class AddImpl_cpu : public OperatorImpl { public: - AddImpl_cpu(const Add_Op& op) : OperatorImpl(op) {} + AddImpl_cpu(const Add_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<AddImpl_cpu> create(const Add_Op& op) { return std::make_unique<AddImpl_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp index bfb2b1947281fc30e38fd1fe1663bd5de415d3ee..1b62de7e145dfab02e78319600c1b30b29fd715b 100644 --- a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp +++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp @@ -38,7 +38,7 @@ class AvgPoolingImpl2DBackward_cpu class AvgPoolingImpl2D_cpu : public OperatorImpl { public: - AvgPoolingImpl2D_cpu(const AvgPooling_Op<2> &op) : OperatorImpl(op) {} + AvgPoolingImpl2D_cpu(const AvgPooling_Op<2> &op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<AvgPoolingImpl2D_cpu> create(const AvgPooling_Op<2> &op) { return std::make_unique<AvgPoolingImpl2D_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp index a599aeb7b427161eb7541829242820c0306d0d31..3743c40a706156c45e6b1e7bf5dfdd50f40ed195 100644 --- a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp +++ b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp @@ -53,7 +53,7 @@ class BatchNormImpl2DBackward_cpu class BatchNormImpl2D_cpu : public OperatorImpl { public: - BatchNormImpl2D_cpu(const BatchNorm_Op<2> &op) : OperatorImpl(op) {} + BatchNormImpl2D_cpu(const BatchNorm_Op<2> &op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<BatchNormImpl2D_cpu> create(const BatchNorm_Op<2> &op) { return std::make_unique<BatchNormImpl2D_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/ConcatImpl.hpp b/include/aidge/backend/cpu/operator/ConcatImpl.hpp index d0d3e06365c524da1af485583dda6d6208ef3fb9..559d5026d3b7430489ffb1cf08ef143df013c4c4 100644 --- a/include/aidge/backend/cpu/operator/ConcatImpl.hpp +++ b/include/aidge/backend/cpu/operator/ConcatImpl.hpp @@ -41,7 +41,7 @@ class ConcatImplBackward_cpu class ConcatImpl_cpu : public OperatorImpl { public: - ConcatImpl_cpu(const Concat_Op& op) : OperatorImpl(op) {} + ConcatImpl_cpu(const Concat_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<ConcatImpl_cpu> create(const Concat_Op& op) { return std::make_unique<ConcatImpl_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp index f72890d8903ca4a9876809759587ed4b1ac22e67..470e189d3a9a8ce52dd067794cfd1bf6a7404696 100644 --- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp +++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp @@ -40,7 +40,7 @@ class ConvDepthWiseImpl2DBackward_cpu class ConvDepthWiseImpl2D_cpu : public OperatorImpl { public: - ConvDepthWiseImpl2D_cpu(const ConvDepthWise_Op<2> &op) : OperatorImpl(op) {} + ConvDepthWiseImpl2D_cpu(const ConvDepthWise_Op<2> &op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<ConvDepthWiseImpl2D_cpu> create(const ConvDepthWise_Op<2> &op) { return std::make_unique<ConvDepthWiseImpl2D_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/ConvImpl.hpp b/include/aidge/backend/cpu/operator/ConvImpl.hpp index 9bc2f27412f388a7fd03db06ac97c612044fab5f..5e739b06118e788f716f6e5d6a41a58cab9b5203 100644 --- a/include/aidge/backend/cpu/operator/ConvImpl.hpp +++ b/include/aidge/backend/cpu/operator/ConvImpl.hpp @@ -40,7 +40,7 @@ class ConvImpl2DBackward_cpu class ConvImpl2D_cpu : public OperatorImpl { public: - ConvImpl2D_cpu(const Conv_Op<2>& op) : OperatorImpl(op) {} + ConvImpl2D_cpu(const Conv_Op<2>& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<ConvImpl2D_cpu> create(const Conv_Op<2> &op) { return std::make_unique<ConvImpl2D_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/DivImpl.hpp b/include/aidge/backend/cpu/operator/DivImpl.hpp index 710e288d8e0f95b69a2f4973679f1195e6d9cb6a..06a1ae49ffacf3fbf0ae923081d8d9cf1a5a40d6 100644 --- a/include/aidge/backend/cpu/operator/DivImpl.hpp +++ b/include/aidge/backend/cpu/operator/DivImpl.hpp @@ -34,7 +34,7 @@ class DivImplBackward_cpu class DivImpl_cpu : public OperatorImpl { public: - DivImpl_cpu(const Div_Op& op) : OperatorImpl(op) {} + DivImpl_cpu(const Div_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<DivImpl_cpu> create(const Div_Op& op) { return std::make_unique<DivImpl_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/ErfImpl.hpp b/include/aidge/backend/cpu/operator/ErfImpl.hpp index 5c0a6fd49f4e2d435eed8e8baa979f59dbd84e68..1402868ea5b8cb441c12dbefaad17304fdfdc749 100644 --- a/include/aidge/backend/cpu/operator/ErfImpl.hpp +++ b/include/aidge/backend/cpu/operator/ErfImpl.hpp @@ -32,7 +32,7 @@ class ErfImplBackward_cpu class ErfImpl_cpu : public OperatorImpl { public: - ErfImpl_cpu(const Erf_Op& op) : OperatorImpl(op) {} + ErfImpl_cpu(const Erf_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<ErfImpl_cpu> create(const Erf_Op& op) { return std::make_unique<ErfImpl_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/FCImpl.hpp b/include/aidge/backend/cpu/operator/FCImpl.hpp index 86bb7fd1271e5857b595dda8efc0354851c94b7e..fedd8b38b2dbee9e5fd288a07d5cd722470723e5 100644 --- a/include/aidge/backend/cpu/operator/FCImpl.hpp +++ b/include/aidge/backend/cpu/operator/FCImpl.hpp @@ -26,23 +26,42 @@ namespace Aidge { // compute kernel registry for forward and backward class FCImplForward_cpu : public Registrable<FCImplForward_cpu, - std::tuple<DataType, DataType, DataType, DataType>, - void(const FC_Op::Attrs &, const DimSize_t, const DimSize_t, - const void *, const void *, const void *, void *)> {}; + std::tuple<DataType, + DataType, + DataType, + DataType>, + void(const FC_Op::Attrs&, + const DimSize_t, + const DimSize_t, + const void *, + const void *, + const void *, + void *)> {}; class FCImplBackward_cpu : public Registrable<FCImplBackward_cpu, - std::tuple<DataType, DataType, DataType, DataType>, - void(const FC_Op::Attrs &, const DimSize_t, const DimSize_t, - const void *, const void *, const void *, void *)> {}; + std::tuple<DataType, + DataType, + DataType, + DataType>, + void(const FC_Op::Attrs&, + const DimSize_t, + const DimSize_t, + const void *, + const void *, + const void *, + void *, + void *, + void *)> {}; class FCImpl_cpu : public OperatorImpl { public: - FCImpl_cpu(const FC_Op &op) : OperatorImpl(op) {} + FCImpl_cpu(const FC_Op &op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<FCImpl_cpu> create(const FC_Op &op) { return std::make_unique<FCImpl_cpu>(op); } - void forward() override; + void forward() override final; + void backward() override final; }; namespace { diff --git a/include/aidge/backend/cpu/operator/FCImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/FCImpl_backward_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..50fb5f49033cccd3c554d692bc336c7d5d677384 --- /dev/null +++ b/include/aidge/backend/cpu/operator/FCImpl_backward_kernels.hpp @@ -0,0 +1,84 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_FCIMPL_BACKWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_FCIMPL_BACKWARD_KERNEL_H_ + +#include "aidge/utils/Registrar.hpp" +#include <algorithm> + +#include "aidge/backend/cpu/operator/FCImpl.hpp" + +namespace Aidge { +template <class I, class O, class W, class B> +void FCImpl_cpu_backward_kernel(const FC_Op::Attrs& attrs, const DimSize_t batchSize, const DimSize_t oneInputSize, + const void* input_, const void* originalInput_, const void* weight_, void* output_, void* weightGrad_, void* biasesGrad_) { + // FIXME: missing FC attributes as arguments + const I* input = static_cast<const I*>(input_); + const I* originalInput = static_cast<const I*>(originalInput_); + const W* weight = static_cast<const W*>(weight_); + O* output = static_cast<O*>(output_); + W* weightGrad = static_cast<W*>(weightGrad_); + B* biasesGrad = static_cast<B*>(biasesGrad_); + + + // bias grad + if (std::get<1>(attrs)) { // no bias + std::fill(biasesGrad, biasesGrad + std::get<0>(attrs), B(0)); + } else { + for (std::size_t o = 0; o < std::get<0>(attrs); ++o) { // nb outputs + B sum{0}; + for (std::size_t b = 0; b < batchSize; ++b) { + sum += input[b*std::get<0>(attrs) + o]; + } + biasesGrad[o] = sum; + } + } + + // weight grad + for (std::size_t o = 0; o < std::get<0>(attrs); ++o) { + for (std::size_t c = 0; c < oneInputSize; ++c) { + W sum{0}; + for (std::size_t b = 0; b < batchSize; ++b) { + sum += originalInput[b*oneInputSize + c]*input[b*std::get<0>(attrs) + o]; + } + weightGrad[o*oneInputSize + c] = sum; + } + } + + // input grad + for (std::size_t b = 0; b < batchSize; ++b) { + for (std::size_t c = 0; c < oneInputSize; ++c) { + O sum{0}; + for (std::size_t o = 0; o < std::get<0>(attrs); ++o) { + sum += weight[o*oneInputSize + c] * input[b*std::get<0>(attrs) + o]; + } + output[b*oneInputSize + c] = sum; + } + } +} + + +namespace { +static Registrar<FCImplBackward_cpu> registrarFCImpl2DBackward_cpu_Float32( + {DataType::Float32, DataType::Float32, DataType::Float32, DataType::Float32}, + Aidge::FCImpl_cpu_backward_kernel<float, float, float, float>); +static Registrar<FCImplBackward_cpu> registrarFCImpl2DBackward_cpu_Int32( + {DataType::Int32, DataType::Int32, DataType::Int32, DataType::Int32}, + Aidge::FCImpl_cpu_backward_kernel<int, int, int, int>); +static Registrar<FCImplBackward_cpu> registrarFCImpl2DBackward_cpu_Float64( + {DataType::Float64, DataType::Float64, DataType::Float64, DataType::Float64}, + Aidge::FCImpl_cpu_backward_kernel<double, double, double, double>); +} // namespace + +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_FCIMPL_BACKWARD_KERNEL_H_ */ diff --git a/include/aidge/backend/cpu/operator/GatherImpl.hpp b/include/aidge/backend/cpu/operator/GatherImpl.hpp index 1d235ff14ca01955c268a7b061e6ecb7b2bbbb2a..fce777d0ac4d53134aa65689b6ac2ec02b805d98 100644 --- a/include/aidge/backend/cpu/operator/GatherImpl.hpp +++ b/include/aidge/backend/cpu/operator/GatherImpl.hpp @@ -32,7 +32,7 @@ class GatherImplBackward_cpu class GatherImpl_cpu : public OperatorImpl { public: - GatherImpl_cpu(const Gather_Op& op) : OperatorImpl(op) {} + GatherImpl_cpu(const Gather_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<GatherImpl_cpu> create(const Gather_Op& op) { return std::make_unique<GatherImpl_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp index 4a1da034935e6b1f6c2069b4f91153b77a9f0636..42116c52d829a8b4ba27311b3ab2d35fcea37e8b 100644 --- a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp +++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp @@ -12,17 +12,17 @@ #ifndef AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_H_ #define AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_H_ +#include <memory> +#include <tuple> +#include <vector> + #include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/LeakyReLU.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" #include "aidge/backend/cpu/data/GetCPUPtr.h" -#include <memory> -#include <vector> namespace Aidge { -// class LeakyReLU_Op; - // compute kernel registry for forward and backward class LeakyReLUImplForward_cpu : public Registrable<LeakyReLUImplForward_cpu, std::tuple<DataType, DataType>, void(const LeakyReLU_Op::Attrs&, std::size_t, const void*, void*)> { @@ -33,14 +33,17 @@ class LeakyReLUImplBackward_cpu class LeakyReLUImpl_cpu : public OperatorImpl { public: - LeakyReLUImpl_cpu(const LeakyReLU_Op& op) : OperatorImpl(op) {} + LeakyReLUImpl_cpu(const LeakyReLU_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<LeakyReLUImpl_cpu> create(const LeakyReLU_Op& op) { return std::make_unique<LeakyReLUImpl_cpu>(op); } NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; - void forward() override; + + void forward() override final; + + void backward() override final; }; namespace { diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl_backward_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..949e6af66a476693b347f38a45edea10e21bc933 --- /dev/null +++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl_backward_kernels.hpp @@ -0,0 +1,45 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_BACKWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_BACKWARD_KERNEL_H_ + +#include "aidge/utils/Registrar.hpp" + +#include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp" + +namespace Aidge { +template <class I, class O> +void LeakyReLUImpl_cpu_backward_kernel(const LeakyReLU_Op::Attrs& attrs, + std::size_t inputLenght, + const void* input_, + void* output_) { + + const I* input = static_cast<const I*>(input_); + O* output = static_cast<O*>(output_); + I negativeSlope = static_cast<I>(std::get<0>(attrs)); + + for (std::size_t i = 0; i < inputLenght; ++i) { + output[i] = input[i] > 0 ? input[i] : negativeSlope*input[i]; + } +} + +namespace { +static Registrar<LeakyReLUImplBackward_cpu> registrarLeakyReLUImplBackward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::LeakyReLUImpl_cpu_backward_kernel<float, float>); +static Registrar<LeakyReLUImplBackward_cpu> registrarLeakyReLUImplBackward_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::LeakyReLUImpl_cpu_backward_kernel<int, int>); +static Registrar<LeakyReLUImplBackward_cpu> registrarLeakyReLUImplBackward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::LeakyReLUImpl_cpu_backward_kernel<double, double>); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_BACKWARD_KERNEL_H_ */ diff --git a/include/aidge/backend/cpu/operator/MatMulImpl.hpp b/include/aidge/backend/cpu/operator/MatMulImpl.hpp index 437ba404b1cc39973448f3c5567aec2fe35994e3..e4b76d64baadbcb1baa7d24180c4bb13ed47215b 100644 --- a/include/aidge/backend/cpu/operator/MatMulImpl.hpp +++ b/include/aidge/backend/cpu/operator/MatMulImpl.hpp @@ -35,7 +35,7 @@ class MatMulImplBackward_cpu class MatMulImpl_cpu : public OperatorImpl { public: - MatMulImpl_cpu(const MatMul_Op &op): OperatorImpl(op) {} + MatMulImpl_cpu(const MatMul_Op &op): OperatorImpl(op, "cpu") {} static std::unique_ptr<MatMulImpl_cpu> create(const MatMul_Op &op) { return std::make_unique<MatMulImpl_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp b/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp index 6cde34d9b123b4f83cbfce412ffa62e0144af8d4..15629b59b31f6f2228802861f6ae0d7d70b2bff9 100644 --- a/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp +++ b/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp @@ -38,7 +38,7 @@ class MaxPoolingImpl2DBackward_cpu class MaxPoolingImpl2D_cpu : public OperatorImpl { public: - MaxPoolingImpl2D_cpu(const MaxPooling_Op<2> &op) : OperatorImpl(op) {} + MaxPoolingImpl2D_cpu(const MaxPooling_Op<2> &op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<MaxPoolingImpl2D_cpu> create(const MaxPooling_Op<2> &op) { return std::make_unique<MaxPoolingImpl2D_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/MemorizeImpl.hpp b/include/aidge/backend/cpu/operator/MemorizeImpl.hpp index 6569478001189b60795f21cf618c77c65aeefbfb..10d18d780e1e450d1a2c58faa932e9d851a41f19 100644 --- a/include/aidge/backend/cpu/operator/MemorizeImpl.hpp +++ b/include/aidge/backend/cpu/operator/MemorizeImpl.hpp @@ -23,7 +23,7 @@ namespace Aidge { class MemorizeImpl_cpu : public OperatorImpl { public: - MemorizeImpl_cpu(const Memorize_Op& op) : OperatorImpl(op) {} + MemorizeImpl_cpu(const Memorize_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<MemorizeImpl_cpu> create(const Memorize_Op& op) { return std::make_unique<MemorizeImpl_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/MulImpl.hpp b/include/aidge/backend/cpu/operator/MulImpl.hpp index a6f63ba284baf4cc12190d6b96a89f0baa821c95..230094475088c6f7802f8a8af75986ded55e9137 100644 --- a/include/aidge/backend/cpu/operator/MulImpl.hpp +++ b/include/aidge/backend/cpu/operator/MulImpl.hpp @@ -33,7 +33,7 @@ class MulImplBackward_cpu class MulImpl_cpu : public OperatorImpl { public: - MulImpl_cpu(const Mul_Op& op) : OperatorImpl(op) {} + MulImpl_cpu(const Mul_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<MulImpl_cpu> create(const Mul_Op& op) { return std::make_unique<MulImpl_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/PadImpl.hpp b/include/aidge/backend/cpu/operator/PadImpl.hpp index 2320662710f9802878811e51ec4439bd812aea67..a1efb0f699beb7a45cc104e7c6ab723c1952a5b1 100644 --- a/include/aidge/backend/cpu/operator/PadImpl.hpp +++ b/include/aidge/backend/cpu/operator/PadImpl.hpp @@ -40,7 +40,7 @@ class PadImpl2DBackward_cpu class PadImpl2D_cpu : public OperatorImpl { public: - PadImpl2D_cpu(const Pad_Op<2> &op) : OperatorImpl(op) {} + PadImpl2D_cpu(const Pad_Op<2> &op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<PadImpl2D_cpu> create(const Pad_Op<2> &op) { return std::make_unique<PadImpl2D_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/PopImpl.hpp b/include/aidge/backend/cpu/operator/PopImpl.hpp index 86c20349d5554e400c15a6e3488cb547f86abee2..29272f5d759b5b39c6bfd704ab1e84b0777e33c5 100644 --- a/include/aidge/backend/cpu/operator/PopImpl.hpp +++ b/include/aidge/backend/cpu/operator/PopImpl.hpp @@ -33,7 +33,7 @@ class PopImplBackward_cpu class PopImpl_cpu : public OperatorImpl { public: - PopImpl_cpu(const Pop_Op& op) : OperatorImpl(op) {} + PopImpl_cpu(const Pop_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<PopImpl_cpu> create(const Pop_Op& op) { return std::make_unique<PopImpl_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/PowImpl.hpp b/include/aidge/backend/cpu/operator/PowImpl.hpp index c6e4cd36746141d7f1d1092c9bd45af41d8a9173..f82b3dfd91ad6e1ea6f732105963c1ee07b08367 100644 --- a/include/aidge/backend/cpu/operator/PowImpl.hpp +++ b/include/aidge/backend/cpu/operator/PowImpl.hpp @@ -33,7 +33,7 @@ class PowImplBackward_cpu class PowImpl_cpu : public OperatorImpl { public: - PowImpl_cpu(const Pow_Op& op) : OperatorImpl(op) {} + PowImpl_cpu(const Pow_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<PowImpl_cpu> create(const Pow_Op& op) { return std::make_unique<PowImpl_cpu>(op); @@ -41,6 +41,7 @@ public: NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; void forward() override; + void backward() override; }; namespace { diff --git a/include/aidge/backend/cpu/operator/ReLUImpl.hpp b/include/aidge/backend/cpu/operator/ReLUImpl.hpp index 3338d0c40c057995fe37b1652966241bf4a96b59..1c87fe6d80b3d571c55e4355d8b5ef703a2133e4 100644 --- a/include/aidge/backend/cpu/operator/ReLUImpl.hpp +++ b/include/aidge/backend/cpu/operator/ReLUImpl.hpp @@ -12,13 +12,15 @@ #ifndef AIDGE_CPU_OPERATOR_RELUIMPL_H_ #define AIDGE_CPU_OPERATOR_RELUIMPL_H_ +#include <cstddef> // std::size_t +#include <memory> +#include <tuple> // std::tuple +#include <vector> + #include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/ReLU.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" -#include "aidge/backend/cpu/data/GetCPUPtr.h" -#include <memory> -#include <vector> namespace Aidge { // class ReLU_Op; @@ -33,14 +35,17 @@ class ReLUImplBackward_cpu class ReLUImpl_cpu : public OperatorImpl { public: - ReLUImpl_cpu(const ReLU_Op& op) : OperatorImpl(op) {} + ReLUImpl_cpu(const ReLU_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<ReLUImpl_cpu> create(const ReLU_Op& op) { return std::make_unique<ReLUImpl_cpu>(op); } NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; - void forward() override; + + void forward() override final; + + void backward() override final; }; namespace { diff --git a/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..b68ea076cb94eb9550b4a7af89ef58162ee15aea --- /dev/null +++ b/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp @@ -0,0 +1,45 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_RELUIMPL_BACKWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_RELUIMPL_BACKWARD_KERNEL_H_ + +#include <cstddef> // std::size_t + +#include "aidge/utils/Registrar.hpp" + +#include "aidge/backend/cpu/operator/ReLUImpl.hpp" + +namespace Aidge { +template <class I, class O> +void ReLUImpl_cpu_backward_kernel(const std::size_t inputLenght, + const void* input_, + void* output_) { + + const I* input = static_cast<const I*>(input_); + O* output = static_cast<O*>(output_); + + for (std::size_t i = 0; i < inputLenght; ++i) { + output[i] = (input[i] > I(0)) ? static_cast<O>(input[i]) : O(0); + } +} + +namespace { +static Registrar<ReLUImplBackward_cpu> registrarReLUImplBackward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::ReLUImpl_cpu_backward_kernel<float, float>); +static Registrar<ReLUImplBackward_cpu> registrarReLUImplBackward_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::ReLUImpl_cpu_backward_kernel<int, int>); +static Registrar<ReLUImplBackward_cpu> registrarReLUImplBackward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::ReLUImpl_cpu_backward_kernel<double, double>); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_RELUIMPL_BACKWARD_KERNEL_H_ */ diff --git a/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp b/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp index 9b85eb812caffca3820a711d46775e1134db863f..e2b7288320e3e57495044381c34c5b1be1d3c243 100644 --- a/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp +++ b/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp @@ -25,55 +25,22 @@ namespace Aidge { // class ReduceMean_Op; -// compute kernel registry for forward and backward -// DIM 1 -class ReduceMeanImpl1DForward_cpu - : public Registrable<ReduceMeanImpl1DForward_cpu, +// Every DIM +class ReduceMeanImplForward_cpu + : public Registrable<ReduceMeanImplForward_cpu, std::tuple<DataType, DataType>, - void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; + void(const ReduceMean_Op::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; class ReduceMeanImpl1DBackward_cpu : public Registrable<ReduceMeanImpl1DBackward_cpu, std::tuple<DataType, DataType>, - void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; + void(const ReduceMean_Op::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; -// DIM 2 -class ReduceMeanImpl2DForward_cpu - : public Registrable<ReduceMeanImpl2DForward_cpu, - std::tuple<DataType, DataType>, - void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; -class ReduceMeanImpl2DBackward_cpu - : public Registrable<ReduceMeanImpl2DBackward_cpu, - std::tuple<DataType, DataType>, - void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; -// DIM 3 -class ReduceMeanImpl3DForward_cpu - : public Registrable<ReduceMeanImpl3DForward_cpu, - std::tuple<DataType, DataType>, - void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; -class ReduceMeanImpl3DBackward_cpu - : public Registrable<ReduceMeanImpl3DBackward_cpu, - std::tuple<DataType, DataType>, - void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; - -class ReduceMeanImpl1D_cpu : public OperatorImpl { - public: - ReduceMeanImpl1D_cpu(const ReduceMean_Op<1>& op) : OperatorImpl(op) {} - - static std::unique_ptr<ReduceMeanImpl1D_cpu> create(const ReduceMean_Op<1> &op) { - return std::make_unique<ReduceMeanImpl1D_cpu>(op); - } - - public: - NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; - void forward() override; -}; - -class ReduceMeanImpl2D_cpu : public OperatorImpl { +class ReduceMeanImpl_cpu : public OperatorImpl { public: - ReduceMeanImpl2D_cpu(const ReduceMean_Op<2>& op) : OperatorImpl(op) {} + ReduceMeanImpl_cpu(const ReduceMean_Op& op) : OperatorImpl(op, "cpu") {} - static std::unique_ptr<ReduceMeanImpl2D_cpu> create(const ReduceMean_Op<2> &op) { - return std::make_unique<ReduceMeanImpl2D_cpu>(op); + static std::unique_ptr<ReduceMeanImpl_cpu> create(const ReduceMean_Op &op) { + return std::make_unique<ReduceMeanImpl_cpu>(op); } public: @@ -81,23 +48,80 @@ class ReduceMeanImpl2D_cpu : public OperatorImpl { void forward() override; }; -class ReduceMeanImpl3D_cpu : public OperatorImpl { - public: - ReduceMeanImpl3D_cpu(const ReduceMean_Op<3>& op) : OperatorImpl(op) {} - - static std::unique_ptr<ReduceMeanImpl3D_cpu> create(const ReduceMean_Op<3> &op) { - return std::make_unique<ReduceMeanImpl3D_cpu>(op); - } - - public: - NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; - void forward() override; -}; +// // compute kernel registry for forward and backward +// // DIM 1 +// class ReduceMeanImpl1DForward_cpu +// : public Registrable<ReduceMeanImpl1DForward_cpu, +// std::tuple<DataType, DataType>, +// void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; +// class ReduceMeanImpl1DBackward_cpu +// : public Registrable<ReduceMeanImpl1DBackward_cpu, +// std::tuple<DataType, DataType>, +// void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; + +// // DIM 2 +// class ReduceMeanImpl2DForward_cpu +// : public Registrable<ReduceMeanImpl2DForward_cpu, +// std::tuple<DataType, DataType>, +// void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; +// class ReduceMeanImpl2DBackward_cpu +// : public Registrable<ReduceMeanImpl2DBackward_cpu, +// std::tuple<DataType, DataType>, +// void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; +// // DIM 3 +// class ReduceMeanImpl3DForward_cpu +// : public Registrable<ReduceMeanImpl3DForward_cpu, +// std::tuple<DataType, DataType>, +// void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; +// class ReduceMeanImpl3DBackward_cpu +// : public Registrable<ReduceMeanImpl3DBackward_cpu, +// std::tuple<DataType, DataType>, +// void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; + +// class ReduceMeanImpl1D_cpu : public OperatorImpl { +// public: +// ReduceMeanImpl1D_cpu(const ReduceMean_Op<1>& op) : OperatorImpl(op, "cpu") {} + +// static std::unique_ptr<ReduceMeanImpl1D_cpu> create(const ReduceMean_Op<1> &op) { +// return std::make_unique<ReduceMeanImpl1D_cpu>(op); +// } + +// public: +// NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; +// void forward() override; +// }; + +// class ReduceMeanImpl2D_cpu : public OperatorImpl { +// public: +// ReduceMeanImpl2D_cpu(const ReduceMean_Op<2>& op) : OperatorImpl(op, "cpu") {} + +// static std::unique_ptr<ReduceMeanImpl2D_cpu> create(const ReduceMean_Op<2> &op) { +// return std::make_unique<ReduceMeanImpl2D_cpu>(op); +// } + +// public: +// NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; +// void forward() override; +// }; + +// class ReduceMeanImpl3D_cpu : public OperatorImpl { +// public: +// ReduceMeanImpl3D_cpu(const ReduceMean_Op<3>& op) : OperatorImpl(op, "cpu") {} + +// static std::unique_ptr<ReduceMeanImpl3D_cpu> create(const ReduceMean_Op<3> &op) { +// return std::make_unique<ReduceMeanImpl3D_cpu>(op); +// } + +// public: +// NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; +// void forward() override; +// }; namespace { // add cpu backend to ReduceMean_Op<2> implementation registry -static Registrar<ReduceMean_Op<1>> registrarReduceMeanImpl1D_cpu("cpu", Aidge::ReduceMeanImpl1D_cpu::create); -static Registrar<ReduceMean_Op<2>> registrarReduceMeanImpl2D_cpu("cpu", Aidge::ReduceMeanImpl2D_cpu::create); -static Registrar<ReduceMean_Op<3>> registrarReduceMeanImpl3D_cpu("cpu", Aidge::ReduceMeanImpl3D_cpu::create); +static Registrar<ReduceMean_Op> registrarReduceMeanImpl_cpu("cpu", Aidge::ReduceMeanImpl_cpu::create); +// static Registrar<ReduceMean_Op<1>> registrarReduceMeanImpl1D_cpu("cpu", Aidge::ReduceMeanImpl1D_cpu::create); +// static Registrar<ReduceMean_Op<2>> registrarReduceMeanImpl2D_cpu("cpu", Aidge::ReduceMeanImpl2D_cpu::create); +// static Registrar<ReduceMean_Op<3>> registrarReduceMeanImpl3D_cpu("cpu", Aidge::ReduceMeanImpl3D_cpu::create); } // namespace } // namespace Aidge diff --git a/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp index 46eb61f2f03acd47d74725ade1425a92f028690c..d7a967e84f53924a4b050ed79d1220f9bc79232e 100644 --- a/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp @@ -12,10 +12,12 @@ #ifndef AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_FORWARD_KERNEL_H_ #define AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_FORWARD_KERNEL_H_ -#include <cstddef> -#include <algorithm> // std::copy, std::for_each -#include <numeric> //std::accumulate +#include <algorithm> // std::for_each +#include <cstddef> // std::size_t +#include <cstdint> // std::int32_t #include <functional> //std::multiplies +#include <numeric> //std::accumulate +#include <vector> #include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp" #include "aidge/data/Data.hpp" @@ -23,8 +25,8 @@ #include "aidge/utils/Registrar.hpp" namespace Aidge { -template <class I, class O, DimSize_t DIM> -void ReduceMeanImpl_cpu_forward_kernel(const typename ReduceMean_Op<DIM>::Attrs& attrs, +template <class I, class O> +void ReduceMeanImpl_cpu_forward_kernel(const typename ReduceMean_Op::Attrs& attrs, const std::vector<DimSize_t>& inputDims, const void* input_, void* output_) { @@ -32,14 +34,15 @@ void ReduceMeanImpl_cpu_forward_kernel(const typename ReduceMean_Op<DIM>::Attrs& const I* input = static_cast<const I*>(input_); O* output = static_cast<O*>(output_); + const std::vector<std::int32_t>& axes = std::get<0>(attrs); const std::size_t nb_dims = inputDims.size(); const std::size_t totalElements = std::accumulate(inputDims.cbegin(), inputDims.cend(), 1, std::multiplies<std::size_t>()); - if (DIM == 1) { - const std::size_t stride_pre = std::accumulate(inputDims.cbegin(), inputDims.cbegin() + std::get<0>(attrs)[0], 1, std::multiplies<std::size_t>()); - const std::size_t stride_post = std::accumulate(inputDims.crbegin(), inputDims.crbegin() + nb_dims -1 - std::get<0>(attrs)[0], 1, std::multiplies<std::size_t>()); + if (axes.size() == 1) { + const std::size_t stride_pre = std::accumulate(inputDims.cbegin(), inputDims.cbegin() + axes[0], 1, std::multiplies<std::size_t>()); + const std::size_t stride_post = std::accumulate(inputDims.crbegin(), inputDims.crbegin() + nb_dims -1 - axes[0], 1, std::multiplies<std::size_t>()); - const std::size_t dim_i = inputDims[std::get<0>(attrs)[0]]; + const std::size_t dim_i = inputDims[axes[0]]; for (std::size_t pre = 0; pre < stride_pre; ++pre) { for (std::size_t post = 0; post < stride_post; ++post) { const std::size_t idx_i = pre * dim_i * stride_post + post; @@ -68,7 +71,7 @@ void ReduceMeanImpl_cpu_forward_kernel(const typename ReduceMean_Op<DIM>::Attrs& const I* inputAccumulation = input; I* outputAccumulation = nullptr; - for (const auto& axisInt : std::get<0>(attrs)) { + for (const auto& axisInt : axes) { const std::size_t a = static_cast<std::size_t>(axisInt); outputElements /= inputDims[a]; outputAccumulation = new I[outputElements]; @@ -93,7 +96,7 @@ void ReduceMeanImpl_cpu_forward_kernel(const typename ReduceMean_Op<DIM>::Attrs& // Copy elements from inputAccumulation to output while dividing by divisor I divisor = totalElements / outputElements; std::transform(inputAccumulation, inputAccumulation + outputElements, output, - [divisor](int element) { return element / divisor; }); + [divisor](I element) { return element / divisor; }); if (outputAccumulation) { delete[] outputAccumulation; } @@ -103,29 +106,36 @@ void ReduceMeanImpl_cpu_forward_kernel(const typename ReduceMean_Op<DIM>::Attrs& } namespace { -// DIM = 1 -static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Float32( - {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,1>); -static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Int32( - {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,1>); -static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Float64( - {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,1>); - -// DIM = 2 -static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Float32( - {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,2>); -static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Int32( - {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,2>); -static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Float64( - {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,2>); - -// DIM = 3 -static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Float32( - {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,3>); -static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Int32( - {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,3>); -static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Float64( - {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,3>); +static Registrar<ReduceMeanImplForward_cpu> registrarReduceMeanImplForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float>); +static Registrar<ReduceMeanImplForward_cpu> registrarReduceMeanImplForward_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int>); +static Registrar<ReduceMeanImplForward_cpu> registrarReduceMeanImplForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double>); + +// // DIM = 1 +// static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Float32( +// {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,1>); +// static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Int32( +// {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,1>); +// static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Float64( +// {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,1>); + +// // DIM = 2 +// static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Float32( +// {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,2>); +// static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Int32( +// {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,2>); +// static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Float64( +// {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,2>); + +// // DIM = 3 +// static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Float32( +// {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,3>); +// static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Int32( +// {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,3>); +// static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Float64( +// {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,3>); } // namespace } // namespace Aidge diff --git a/include/aidge/backend/cpu/operator/ReshapeImpl.hpp b/include/aidge/backend/cpu/operator/ReshapeImpl.hpp index d5754b34e952d52b2071744e9f8e863074ef9fa3..d2d819e8d56df59437904aa9b4ae91185c8288f2 100644 --- a/include/aidge/backend/cpu/operator/ReshapeImpl.hpp +++ b/include/aidge/backend/cpu/operator/ReshapeImpl.hpp @@ -32,7 +32,7 @@ class ReshapeImplBackward_cpu class ReshapeImpl_cpu : public OperatorImpl { public: - ReshapeImpl_cpu(const Reshape_Op& op) : OperatorImpl(op) {} + ReshapeImpl_cpu(const Reshape_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<ReshapeImpl_cpu> create(const Reshape_Op& op) { return std::make_unique<ReshapeImpl_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/ScalingImpl.hpp b/include/aidge/backend/cpu/operator/ScalingImpl.hpp index bbcb4553d7aa4b17d733e0f455373bebb9c3581c..088625e963b158811aad85665a25b68bf2892bb9 100644 --- a/include/aidge/backend/cpu/operator/ScalingImpl.hpp +++ b/include/aidge/backend/cpu/operator/ScalingImpl.hpp @@ -34,7 +34,7 @@ class ScalingImplBackward_cpu class ScalingImpl_cpu : public OperatorImpl { public: - ScalingImpl_cpu(const Scaling_Op& op) : OperatorImpl(op) {} + ScalingImpl_cpu(const Scaling_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<ScalingImpl_cpu> create(const Scaling_Op& op) { return std::make_unique<ScalingImpl_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/SigmoidImpl.hpp b/include/aidge/backend/cpu/operator/SigmoidImpl.hpp index 8678a5a56500ec9e37689df7a37ae72bfb3f74d4..f54a6c84aa83414cbe8a7a1713f36dd3311dda3f 100644 --- a/include/aidge/backend/cpu/operator/SigmoidImpl.hpp +++ b/include/aidge/backend/cpu/operator/SigmoidImpl.hpp @@ -33,7 +33,7 @@ class SigmoidImplBackward_cpu class SigmoidImpl_cpu : public OperatorImpl { public: - SigmoidImpl_cpu(const Sigmoid_Op& op) : OperatorImpl(op) {} + SigmoidImpl_cpu(const Sigmoid_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<SigmoidImpl_cpu> create(const Sigmoid_Op& op) { return std::make_unique<SigmoidImpl_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/SliceImpl.hpp b/include/aidge/backend/cpu/operator/SliceImpl.hpp index 1cba5906064c51a4f0da2f1f3682b0828a080d43..72d6105388924dc1553cbeba2124da66d804980f 100644 --- a/include/aidge/backend/cpu/operator/SliceImpl.hpp +++ b/include/aidge/backend/cpu/operator/SliceImpl.hpp @@ -40,7 +40,7 @@ class SliceImplBackward_cpu class SliceImpl_cpu : public OperatorImpl { public: - SliceImpl_cpu(const Slice_Op& op) : OperatorImpl(op) {} + SliceImpl_cpu(const Slice_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<SliceImpl_cpu> create(const Slice_Op& op) { return std::make_unique<SliceImpl_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp index 005b52f646f9e9ddf14af09cc22d9e2a44ba6dd4..9eb5323702358650f3af91b46a8a1a0872b02675 100644 --- a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp +++ b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp @@ -33,7 +33,7 @@ class SoftmaxImplBackward_cpu class SoftmaxImpl_cpu : public OperatorImpl { public: - SoftmaxImpl_cpu(const Softmax_Op& op) : OperatorImpl(op) {} + SoftmaxImpl_cpu(const Softmax_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<SoftmaxImpl_cpu> create(const Softmax_Op& op) { return std::make_unique<SoftmaxImpl_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/SqrtImpl.hpp b/include/aidge/backend/cpu/operator/SqrtImpl.hpp index b3723f27b077b9d5ea7e69fd33bd012d02654ffe..33fa7b5bc802005112a2b47357312883706e43e9 100644 --- a/include/aidge/backend/cpu/operator/SqrtImpl.hpp +++ b/include/aidge/backend/cpu/operator/SqrtImpl.hpp @@ -12,16 +12,17 @@ #ifndef AIDGE_CPU_OPERATOR_SQRTIMPL_H_ #define AIDGE_CPU_OPERATOR_SQRTIMPL_H_ +#include <cstddef> // std::size_t +#include <memory> +#include <tuple> +#include <vector> + #include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/Sqrt.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" -#include "aidge/backend/cpu/data/GetCPUPtr.h" -#include <memory> -#include <vector> namespace Aidge { -// class Sqrt_Op; // compute kernel registry for forward and backward class SqrtImplForward_cpu @@ -33,14 +34,17 @@ class SqrtImplBackward_cpu class SqrtImpl_cpu : public OperatorImpl { public: - SqrtImpl_cpu(const Sqrt_Op& op) : OperatorImpl(op) {} + SqrtImpl_cpu(const Sqrt_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<SqrtImpl_cpu> create(const Sqrt_Op& op) { return std::make_unique<SqrtImpl_cpu>(op); } NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; - void forward() override; + + void forward() override final; + + void backward() override final; }; namespace { diff --git a/include/aidge/backend/cpu/operator/SqrtImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/SqrtImpl_backward_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..9cf5118a5ac81520d7a180b6aba22417ca512890 --- /dev/null +++ b/include/aidge/backend/cpu/operator/SqrtImpl_backward_kernels.hpp @@ -0,0 +1,46 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_SQRTIMPL_BACKWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_SQRTIMPL_BACKWARD_KERNEL_H_ + +#include <cmath> // std::sqrt +#include <cstddef> // std::size_t + +#include "aidge/utils/Registrar.hpp" + +#include "aidge/backend/cpu/operator/SqrtImpl.hpp" + +namespace Aidge { +template <class I, class O> +void SqrtImpl_cpu_backward_kernel(const std::size_t inputLenght, + const void* input_, + void* output_) { + + const I* input = static_cast<const I*>(input_); + O* output = static_cast<O*>(output_); + + for (std::size_t i = 0; i < inputLenght; ++i) { + output[i] = static_cast<O>(0.5/(std::sqrt(static_cast<float>(input[i])))); + } +} + +namespace { +static Registrar<SqrtImplBackward_cpu> registrarSqrtImplBackward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::SqrtImpl_cpu_backward_kernel<float, float>); +static Registrar<SqrtImplBackward_cpu> registrarSqrtImplBackward_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::SqrtImpl_cpu_backward_kernel<int, int>); +static Registrar<SqrtImplBackward_cpu> registrarSqrtImplBackward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::SqrtImpl_cpu_backward_kernel<double, double>); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_SQRTIMPL_BACKWARD_KERNEL_H_ */ diff --git a/include/aidge/backend/cpu/operator/SqrtImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/SqrtImpl_forward_kernels.hpp index a180fc2cc206ef27b52d506a981f9f50f7bf8a3e..886b978c2345ce555d229d684ba83f952be9e00e 100644 --- a/include/aidge/backend/cpu/operator/SqrtImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/SqrtImpl_forward_kernels.hpp @@ -12,14 +12,16 @@ #ifndef AIDGE_CPU_OPERATOR_SQRTIMPL_FORWARD_KERNEL_H_ #define AIDGE_CPU_OPERATOR_SQRTIMPL_FORWARD_KERNEL_H_ +#include <cmath> // std::sqrt +#include <cstddef> // std::size_t + #include "aidge/utils/Registrar.hpp" -#include <cmath> #include "aidge/backend/cpu/operator/SqrtImpl.hpp" namespace Aidge { template <class I, class O> -void SqrtImpl_cpu_forward_kernel(std::size_t inputLenght, +void SqrtImpl_cpu_forward_kernel(const std::size_t inputLenght, const void* input_, void* output_) { @@ -27,7 +29,7 @@ void SqrtImpl_cpu_forward_kernel(std::size_t inputLenght, O* output = static_cast<O*>(output_); for (std::size_t i = 0; i < inputLenght; ++i) { - output[i] = std::sqrt(input[i]); + output[i] = static_cast<O>(std::sqrt(static_cast<float>(input[i]))); } } diff --git a/include/aidge/backend/cpu/operator/SubImpl.hpp b/include/aidge/backend/cpu/operator/SubImpl.hpp index b329ec6eb0ed7f450b62cdbe289d69acf4f4edc4..2d957aa67b3061994f7fb2bf9550e4d5338d3967 100644 --- a/include/aidge/backend/cpu/operator/SubImpl.hpp +++ b/include/aidge/backend/cpu/operator/SubImpl.hpp @@ -33,7 +33,7 @@ class SubImplBackward_cpu class SubImpl_cpu : public OperatorImpl { public: - SubImpl_cpu(const Sub_Op& op) : OperatorImpl(op) {} + SubImpl_cpu(const Sub_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<SubImpl_cpu> create(const Sub_Op& op) { return std::make_unique<SubImpl_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/TanhImpl.hpp b/include/aidge/backend/cpu/operator/TanhImpl.hpp index 3e88a3d00b5829fc24d8dc77ce53cb358551c7e4..4169b1a533a8b2382644246ea295a683e6f83f1d 100644 --- a/include/aidge/backend/cpu/operator/TanhImpl.hpp +++ b/include/aidge/backend/cpu/operator/TanhImpl.hpp @@ -33,7 +33,7 @@ class TanhImplBackward_cpu class TanhImpl_cpu : public OperatorImpl { public: - TanhImpl_cpu(const Tanh_Op& op) : OperatorImpl(op) {} + TanhImpl_cpu(const Tanh_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<TanhImpl_cpu> create(const Tanh_Op& op) { return std::make_unique<TanhImpl_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/TransposeImpl.hpp b/include/aidge/backend/cpu/operator/TransposeImpl.hpp index 712e672752648f5ff8a3c073f6c81bbe7cc85d9d..3c6913dd71d6642d8b76198a272d64bfaba833e8 100644 --- a/include/aidge/backend/cpu/operator/TransposeImpl.hpp +++ b/include/aidge/backend/cpu/operator/TransposeImpl.hpp @@ -57,7 +57,7 @@ class TransposeImpl6DBackward_cpu class TransposeImpl2D_cpu : public OperatorImpl { public: - TransposeImpl2D_cpu(const Transpose_Op<2>& op) : OperatorImpl(op) {} + TransposeImpl2D_cpu(const Transpose_Op<2>& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<TransposeImpl2D_cpu> create(const Transpose_Op<2>& op) { return std::make_unique<TransposeImpl2D_cpu>(op); @@ -68,7 +68,7 @@ public: }; class TransposeImpl3D_cpu : public OperatorImpl { public: - TransposeImpl3D_cpu(const Transpose_Op<3>& op) : OperatorImpl(op) {} + TransposeImpl3D_cpu(const Transpose_Op<3>& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<TransposeImpl3D_cpu> create(const Transpose_Op<3>& op) { return std::make_unique<TransposeImpl3D_cpu>(op); @@ -79,7 +79,7 @@ public: }; class TransposeImpl4D_cpu : public OperatorImpl { public: - TransposeImpl4D_cpu(const Transpose_Op<4>& op) : OperatorImpl(op) {} + TransposeImpl4D_cpu(const Transpose_Op<4>& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<TransposeImpl4D_cpu> create(const Transpose_Op<4>& op) { return std::make_unique<TransposeImpl4D_cpu>(op); @@ -90,7 +90,7 @@ public: }; class TransposeImpl5D_cpu : public OperatorImpl { public: - TransposeImpl5D_cpu(const Transpose_Op<5>& op) : OperatorImpl(op) {} + TransposeImpl5D_cpu(const Transpose_Op<5>& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<TransposeImpl5D_cpu> create(const Transpose_Op<5>& op) { return std::make_unique<TransposeImpl5D_cpu>(op); @@ -101,7 +101,7 @@ public: }; class TransposeImpl6D_cpu : public OperatorImpl { public: - TransposeImpl6D_cpu(const Transpose_Op<6>& op) : OperatorImpl(op) {} + TransposeImpl6D_cpu(const Transpose_Op<6>& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<TransposeImpl6D_cpu> create(const Transpose_Op<6>& op) { return std::make_unique<TransposeImpl6D_cpu>(op); diff --git a/src/operator/AddImpl.cpp b/src/operator/AddImpl.cpp index 7355ebcb3e8fb68bf74dbd1ce831bf471d285cb7..abd40bd6af06c52945815fd6245e661710fa1127 100644 --- a/src/operator/AddImpl.cpp +++ b/src/operator/AddImpl.cpp @@ -9,17 +9,18 @@ * ********************************************************************************/ +#include "aidge/backend/cpu/operator/AddImpl.hpp" + #include <cassert> #include <numeric> // std::accumulate #include <vector> -#include "aidge/utils/Types.h" #include "aidge/backend/cpu/data/GetCPUPtr.h" +#include "aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp" #include "aidge/data/Data.hpp" #include "aidge/data/Tensor.hpp" - -#include "aidge/backend/cpu/operator/AddImpl.hpp" -#include "aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp" +#include "aidge/utils/Types.h" +#include "aidge/utils/ErrorHandling.hpp" Aidge::NbElts_t Aidge::AddImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { // this implementation can be in-place @@ -27,15 +28,18 @@ Aidge::NbElts_t Aidge::AddImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex } void Aidge::AddImpl_cpu::forward() { - assert(mOp.getRawInput(0) && "missing input in Add operator"); - DataType datatypeFirstInput = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(); - for (IOIndex_t i = 1; i < mOp.nbInputs(); ++i) { - assert(mOp.getRawInput(i) && "missing input in Add operator"); - assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(i))->dataType() == datatypeFirstInput); + const auto& opTensor = static_cast<const OperatorTensor&>(mOp); + AIDGE_ASSERT(opTensor.getInput(0)->hasImpl(), "cannot run Add forward because the 0-th input has no implementation."); + assert(opTensor.getInput(0) && "missing input in Add operator"); + DataType datatypeFirstInput = opTensor.getInput(0)->dataType(); + for (IOIndex_t i = 1; i < opTensor.nbInputs(); ++i) { + AIDGE_ASSERT(opTensor.getInput(i)->hasImpl(), "cannot run Add forward because the {}-th input has no implementation.", i); + assert(opTensor.getInput(i) && "missing input in Add operator"); + assert(opTensor.getInput(i)->dataType() == datatypeFirstInput); } // Find the correct kernel type - const auto outputDataType = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType(); + const auto outputDataType = opTensor.getOutput(0)->dataType(); const Registrar<AddImplForward_cpu>::registrar_key registrarKey = { datatypeFirstInput, outputDataType}; @@ -55,26 +59,26 @@ void Aidge::AddImpl_cpu::forward() { // TODO: right now, if needed, memory will be allocated/deallocated at each // call to forward(). We might put the following shared_ptr as members of // this class to avoid that. - std::size_t nbDims = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->nbDims(); + const std::size_t nbDims = opTensor.getOutput(0)->nbDims(); std::vector<std::vector<std::size_t>> inputsDims; std::vector<const void*> opInputs; - std::vector<std::shared_ptr<Tensor>> inputsFallback(mOp.nbInputs()); - for (IOIndex_t i = 0; i < mOp.nbInputs(); ++i) { + std::vector<std::shared_ptr<Tensor>> inputsFallback(opTensor.nbInputs()); + for (IOIndex_t i = 0; i < opTensor.nbInputs(); ++i) { std::vector<std::size_t> inputDims(nbDims, 1); - auto dims = std::static_pointer_cast<Tensor>(mOp.getRawInput(i))->dims(); + auto dims = opTensor.getInput(i)->dims(); for(std::size_t j=dims.size()-1; j+1>0; --j) { std::size_t idx = nbDims - (dims.size()-j); inputDims[idx] = dims[j]; } inputsDims.push_back(inputDims); - const auto& input = std::static_pointer_cast<Tensor>(mOp.getRawInput(i))->refCastFrom(inputsFallback[i], *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))); + const auto& input = opTensor.getInput(i)->refCastFrom(inputsFallback[i], *opTensor.getOutput(0)); opInputs.push_back(input.getImpl()->rawPtr()); } kernelFunc(opInputs, inputsDims, - std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->size(), - std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), - getCPUPtr(mOp.getRawOutput(0))); + opTensor.getOutput(0)->size(), + opTensor.getOutput(0)->dims(), + getCPUPtr(opTensor.getRawOutput(0))); } diff --git a/src/operator/ConvImpl.cpp b/src/operator/ConvImpl.cpp index b849142dd3abe0131fb0c6c448530a7669ce27dc..34ea7b37ec9929908192bde6f31d84ae581640a2 100644 --- a/src/operator/ConvImpl.cpp +++ b/src/operator/ConvImpl.cpp @@ -28,17 +28,19 @@ Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputI } void Aidge::ConvImpl2D_cpu::forward() { + const auto& opTensor = static_cast<const OperatorTensor&>(mOp); + // FIXME: uncomment the following code once memory handling will work assert(mOp.getRawInput(0) && "missing input #0"); assert(mOp.getRawInput(1) && "missing input #1"); assert(mOp.getRawInput(2) && "missing input #2"); // Find the correct kernel type - const auto outputDataType = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType(); + const auto outputDataType = opTensor.getOutput(0)->dataType(); const Registrar<ConvImpl2DForward_cpu>::registrar_key registrarKey = { - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->dataType(), + opTensor.getInput(0)->dataType(), + opTensor.getInput(1)->dataType(), + opTensor.getInput(2)->dataType(), outputDataType}; Registrar<ConvImpl2DForward_cpu>::registrar_type kernelFunc; @@ -57,12 +59,12 @@ void Aidge::ConvImpl2D_cpu::forward() { // call to forward(). We might put the following shared_ptr as members of // this class to avoid that. std::shared_ptr<Tensor> input0Fallback, input1Fallback, input2Fallback; - const auto& input0 = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->refCastFrom(input0Fallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))); - const auto& input1 = std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->refCastFrom(input1Fallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))); - const auto& input2 = std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->refCastFrom(input2Fallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))); + const auto& input0 = opTensor.getInput(0)->refCastFrom(input0Fallback, *opTensor.getOutput(0)); + const auto& input1 = opTensor.getInput(1)->refCastFrom(input1Fallback, *opTensor.getOutput(0)); + const auto& input2 = opTensor.getInput(2)->refCastFrom(input2Fallback, *opTensor.getOutput(0)); // Call kernel - kernelFunc(dynamic_cast<const Conv_Op<2>&>(mOp).getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>(), + kernelFunc(dynamic_cast<const Conv_Op<2>&>(mOp).getStaticAttributes(), opTensor.getInput(0)->template dims<4>(), input0.getImpl()->rawPtr(), input1.getImpl()->rawPtr(), input2.getImpl()->rawPtr(), getCPUPtr(mOp.getRawOutput(0))); } diff --git a/src/operator/DivImpl.cpp b/src/operator/DivImpl.cpp index 729aff2452b46f00eb6d3e0b558c0b3d58ea2f0e..8e2118e9e78fd364189769ead2eb01f1c55b3c58 100644 --- a/src/operator/DivImpl.cpp +++ b/src/operator/DivImpl.cpp @@ -57,17 +57,18 @@ void Aidge::DivImpl_cpu::forward() { // 3. Compute the highest number of contiguous data -> 7 // 4. Compute stride and offset step for the broadcast mechnism // 5. Call a simple kernel + const auto& opTensor = static_cast<const Div_Op&>(mOp); // Find the correct kernel type auto kernelFunc = Registrar<DivImplForward_cpu>::create({ - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + opTensor.getInput(0)->dataType(), + opTensor.getInput(1)->dataType(), + opTensor.getOutput(0)->dataType()}); // Compute compatible input dimensions - std::vector<std::size_t> dims0 = static_cast<const Div_Op&>(mOp).getInput(0)->dims(); - std::vector<std::size_t> dims1 = static_cast<const Div_Op&>(mOp).getInput(1)->dims(); - const std::vector<std::size_t>& outDims = static_cast<const Div_Op&>(mOp).getOutput(0)->dims(); + std::vector<std::size_t> dims0 = opTensor.getInput(0)->dims(); + std::vector<std::size_t> dims1 = opTensor.getInput(1)->dims(); + const std::vector<std::size_t>& outDims = opTensor.getOutput(0)->dims(); // if (dims0 == dims1) { // const std::size_t input0_contiguous_size = std::accumulate(dims0.cbegin(), dims0.cend(), std::size_t(1), std::multiplies<std::size_t>()); @@ -108,24 +109,24 @@ void Aidge::DivImpl_cpu::forward() { const std::size_t output_contiguous_size = std::accumulate(outDims.cbegin()+contiguousIdx, outDims.cend(), std::size_t(1), std::multiplies<std::size_t>()); // initialize strides to iterate through data because of broadcasting - std::size_t *stride_post0; - std::size_t *stride_post1; + std::int32_t *stride_post0; + std::int32_t *stride_post1; std::int32_t *stride_step0; std::int32_t *stride_step1; if (contiguousIdx > 0) { - stride_post0 = new std::size_t[contiguousIdx]; + stride_post0 = new std::int32_t[contiguousIdx]; stride_post0[contiguousIdx - 1] = 1; - stride_post1 = new std::size_t[contiguousIdx]; + stride_post1 = new std::int32_t[contiguousIdx]; stride_post1[contiguousIdx - 1] = 1; for (std::size_t i = contiguousIdx - 2; i != static_cast<std::size_t>(-1); --i) { - stride_post0[i] = stride_post0[i+1]*dims0[i+1]; - stride_post1[i] = stride_post1[i+1]*dims1[i+1]; + stride_post0[i] = stride_post0[i+1]*static_cast<std::int32_t>(dims0[i+1]); + stride_post1[i] = stride_post1[i+1]*static_cast<std::int32_t>(dims1[i+1]); } stride_step0 = new std::int32_t[contiguousIdx]; stride_step1 = new std::int32_t[contiguousIdx]; for (std::size_t i = 0; i != contiguousIdx; ++i) { - stride_step0[i] = (dims0[i] == 1) ? 1 - static_cast<std::int32_t>(stride_post0[i]) : 1; - stride_step1[i] = (dims1[i] == 1) ? 1 - static_cast<std::int32_t>(stride_post1[i]) : 1; + stride_step0[i] = (dims0[i] == 1) ? 1 - stride_post0[i] : 1; + stride_step1[i] = (dims1[i] == 1) ? 1 - stride_post1[i] : 1; } } diff --git a/src/operator/ErfImpl.cpp b/src/operator/ErfImpl.cpp index 06ec65008aee41215192cd05e126ac4f82388c1b..55752e4f5b9f798a6901e108ddcba2f61fdf9774 100644 --- a/src/operator/ErfImpl.cpp +++ b/src/operator/ErfImpl.cpp @@ -9,32 +9,34 @@ * ********************************************************************************/ -#include <cassert> -#include <chrono> // std::chrono::milliseconds -#include <numeric> // std::accumulate -#include <thread> // std::this_thread::sleep_for +#include "aidge/backend/cpu/operator/ErfImpl.hpp" + +#include <memory> #include <vector> +#include "aidge/backend/cpu/operator/ErfImpl_forward_kernels.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Erf.hpp" #include "aidge/utils/Types.h" -#include "aidge/backend/cpu/operator/ErfImpl.hpp" -#include "aidge/backend/cpu/operator/ErfImpl_forward_kernels.hpp" - Aidge::NbElts_t Aidge::ErfImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { // this implementation can be in-place return 0; } void Aidge::ErfImpl_cpu::forward() { + const Erf_Op& op = static_cast<const Erf_Op&>(mOp); // Find the correct kernel type auto kernelFunc = Registrar<ErfImplForward_cpu>::create({ - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + op.getInput(0)->dataType(), + op.getOutput(0)->dataType() + }); // Call kernel - kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), - std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); + kernelFunc( + op.getInput(0)->size(), + op.getInput(0)->getImpl()->rawPtr(), + op.getOutput(0)->getImpl()->rawPtr() + ); } diff --git a/src/operator/FCImpl.cpp b/src/operator/FCImpl.cpp index 995245907c8c87b0367c7edfa4493bd6b7faf660..eecff38afd4d4487d51a070d6c0f4c2507a2b478 100644 --- a/src/operator/FCImpl.cpp +++ b/src/operator/FCImpl.cpp @@ -9,31 +9,34 @@ * ********************************************************************************/ -#include <cassert> -#include <chrono> // std::chrono::milliseconds -#include <numeric> // std::accumulate -#include <thread> // std::this_thread::sleep_for -#include <vector> +#include "aidge/backend/cpu/operator/FCImpl.hpp" + +#include <cstddef> // std::size_t +#include <functional> +#include <memory> +#include <tuple> +#include "aidge/backend/cpu/data/GetCPUPtr.h" +#include "aidge/backend/cpu/operator/FCImpl_backward_kernels.hpp" +#include "aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp" #include "aidge/operator/FC.hpp" +#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/Types.h" -#include "aidge/backend/cpu/data/GetCPUPtr.h" -#include "aidge/backend/cpu/operator/FCImpl.hpp" -#include "aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp" void Aidge::FCImpl_cpu::forward() { - assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0"); - assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(1)) && "missing input #1"); - assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(2)) && "missing input #2"); + const FC_Op& op_ = dynamic_cast<const FC_Op&>(mOp); + AIDGE_ASSERT(op_.getInput(0), "missing input #0"); + AIDGE_ASSERT(op_.getInput(1), "missing input #1"); + AIDGE_ASSERT(op_.getInput(2), "missing input #2"); // Find the correct kernel type - const auto outputDataType = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType(); + const auto outputDataType = op_.getOutput(0)->dataType(); const Registrar<FCImplForward_cpu>::registrar_key registrarKey = { - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->dataType(), + op_.getInput(0)->dataType(), + op_.getInput(1)->dataType(), + op_.getInput(2)->dataType(), outputDataType}; Registrar<FCImplForward_cpu>::registrar_type kernelFunc; @@ -52,9 +55,9 @@ void Aidge::FCImpl_cpu::forward() // call to forward(). We might put the following shared_ptr as members of // this class to avoid that. std::shared_ptr<Tensor> input0Fallback, input1Fallback, input2Fallback; - const auto& input0 = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->refCastFrom(input0Fallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))); - const auto& input1 = std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->refCastFrom(input1Fallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))); - const auto& input2 = std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->refCastFrom(input2Fallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))); + const auto& input0 = op_.getInput(0)->refCastFrom(input0Fallback, *(op_.getOutput(0))); + const auto& input1 = op_.getInput(1)->refCastFrom(input1Fallback, *(op_.getOutput(0))); + const auto& input2 = op_.getInput(2)->refCastFrom(input2Fallback, *(op_.getOutput(0))); // Call kernel const auto batchSize = (input0.dims().size() > 1) ? input0.dims()[0] : 1; @@ -64,3 +67,49 @@ void Aidge::FCImpl_cpu::forward() input0.getImpl()->rawPtr(), input1.getImpl()->rawPtr(), input2.getImpl()->rawPtr(), getCPUPtr(mOp.getRawOutput(0))); } + +void Aidge::FCImpl_cpu::backward() +{ + const FC_Op& op_ = dynamic_cast<const FC_Op&>(mOp); + const auto& fc_grad = op_.getOutput(0)->grad(); + assert(fc_grad && "missing ouput #0 gradient"); + + // Find the correct kernel type + const Registrar<FCImplBackward_cpu>::registrar_key registrarKey = { + fc_grad->dataType(), + op_.getInput(0)->grad()->dataType(), + op_.getInput(1)->grad()->dataType(), + op_.getInput(2)->grad()->dataType()}; + + Registrar<FCImplBackward_cpu>::registrar_type kernelFunc; + if (Registrar<FCImplBackward_cpu>::exists(registrarKey)) { + // One exists with the right inputs/output types + kernelFunc = Registrar<FCImplBackward_cpu>::create(registrarKey); + } + else { + // Otherwise, fallback to the kernel with all types matching output type + kernelFunc = Registrar<FCImplBackward_cpu>::create({ + fc_grad->dataType(), fc_grad->dataType(), fc_grad->dataType(), fc_grad->dataType()}); + } + + // Convert input data (no overhead if not needed!) + // TODO: right now, if needed, memory will be allocated/deallocated at each + // call to forward(). We might put the following shared_ptr as members of + // this class to avoid that. + std::shared_ptr<Tensor> input0gradFallback, input1gradFallback, input2gradFallback; + const auto& input0grad = op_.getInput(0)->grad()->refCastFrom(input0gradFallback, *(op_.getOutput(0))); + const auto& input1grad = op_.getInput(1)->grad()->refCastFrom(input1gradFallback, *(op_.getOutput(0))); + const auto& input2grad = op_.getInput(2)->grad()->refCastFrom(input2gradFallback, *(op_.getOutput(0))); + + // Call kernel + const auto batchSize = (input0grad.dims().size() > 1) ? input0grad.dims()[0] : 1; + kernelFunc(dynamic_cast<const FC_Op&>(mOp).getStaticAttributes(), + batchSize, + input0grad.size() / batchSize, + getCPUPtr(fc_grad), + getCPUPtr(op_.getInput(0)), + getCPUPtr(mOp.getRawInput(1)), + input0grad.getImpl()->rawPtr(), + input1grad.getImpl()->rawPtr(), + input2grad.getImpl()->rawPtr()); +} diff --git a/src/operator/GatherImpl.cpp b/src/operator/GatherImpl.cpp index ce98627d95e0d05541db1ccaf4896abe756431b0..d80b53e7e864faf3fca289f94aba4f511bcba161 100644 --- a/src/operator/GatherImpl.cpp +++ b/src/operator/GatherImpl.cpp @@ -9,32 +9,34 @@ * ********************************************************************************/ -#include <cassert> -#include <chrono> // std::chrono::milliseconds -#include <numeric> // std::accumulate -#include <thread> // std::this_thread::sleep_for +#include "aidge/backend/cpu/operator/GatherImpl.hpp" + +#include <memory> #include <vector> +#include "aidge/backend/cpu/operator/GatherImpl_forward_kernels.hpp" +#include "aidge/data/Data.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Gather.hpp" #include "aidge/utils/Types.h" -#include "aidge/backend/cpu/operator/GatherImpl.hpp" -#include "aidge/backend/cpu/operator/GatherImpl_forward_kernels.hpp" - Aidge::NbElts_t Aidge::GatherImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { // this implementation can be in-place return 0; } void Aidge::GatherImpl_cpu::forward() { + const Gather_Op& op = static_cast<const Gather_Op&>(mOp); auto kernelFunc = Registrar<GatherImplForward_cpu>::create({ - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + op.getInput(0)->dataType(), + op.getOutput(0)->dataType() + }); // Call kernel kernelFunc(dynamic_cast<const Gather_Op&>(mOp).getStaticAttributes(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), - std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); + op.getInput(0)->dims(), + op.getInput(0)->getImpl()->rawPtr(), + op.getOutput(0)->getImpl()->rawPtr() + ); } diff --git a/src/operator/LeakyReLUImpl.cpp b/src/operator/LeakyReLUImpl.cpp index 17912eb1dc75930eaf7595eb189af39df4d4fa2e..67847429eb06b24eac9ac43893a0bc24b934f655 100644 --- a/src/operator/LeakyReLUImpl.cpp +++ b/src/operator/LeakyReLUImpl.cpp @@ -10,17 +10,17 @@ ********************************************************************************/ #include <cassert> -#include <chrono> // std::chrono::milliseconds -#include <numeric> // std::accumulate -#include <thread> // std::this_thread::sleep_for #include <vector> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/LeakyReLU.hpp" #include "aidge/utils/Types.h" +#include "aidge/utils/Registrar.hpp" #include "aidge/backend/cpu/data/GetCPUPtr.h" #include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp" #include "aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp" +#include "aidge/backend/cpu/operator/LeakyReLUImpl_backward_kernels.hpp" Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { // this implementation can be in-place @@ -28,16 +28,38 @@ Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbRequiredProtected(const Aidge::IO } void Aidge::LeakyReLUImpl_cpu::forward() { - assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0"); + const LeakyReLU_Op& op_ = dynamic_cast<const LeakyReLU_Op&>(mOp); + std::shared_ptr<Tensor> in0 = op_.getInput(0); + std::shared_ptr<Tensor> out0 = op_.getOutput(0); + AIDGE_ASSERT(in0, "missing input #0"); // Find the correct kernel type auto kernelFunc = Registrar<LeakyReLUImplForward_cpu>::create({ - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + in0->dataType(), + out0->dataType()}); // Call kernel kernelFunc(dynamic_cast<const LeakyReLU_Op&>(mOp).getStaticAttributes(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(), + in0->size(), getCPUPtr(mOp.getRawInput(0)), getCPUPtr(mOp.getRawOutput(0))); } + +void Aidge::LeakyReLUImpl_cpu::backward() { + // reversing in and out Data for backprop + const LeakyReLU_Op& op_ = dynamic_cast<const LeakyReLU_Op&>(mOp); + std::shared_ptr<Tensor> in0 = op_.getOutput(0)->grad(); + std::shared_ptr<Tensor> out0 = op_.getInput(0)->grad(); + AIDGE_ASSERT(in0, "missing input #0"); + + // Find the correct kernel type + auto kernelFunc = Registrar<LeakyReLUImplForward_cpu>::create({ + in0->dataType(), + out0->dataType()}); + + // Call kernel + kernelFunc(dynamic_cast<const LeakyReLU_Op&>(mOp).getStaticAttributes(), + in0->size(), + getCPUPtr(in0), + getCPUPtr(out0)); +} \ No newline at end of file diff --git a/src/operator/PowImpl.cpp b/src/operator/PowImpl.cpp index 22b4e27afd4e327c42be066bf7eeb6effdd8b2a9..de79e1978c61387019c7f5fa69932e4bbd52b5bc 100644 --- a/src/operator/PowImpl.cpp +++ b/src/operator/PowImpl.cpp @@ -48,3 +48,25 @@ void Aidge::PowImpl_cpu::forward() { getCPUPtr(mOp.getRawInput(1)), getCPUPtr(mOp.getRawOutput(0))); } + +void Aidge::PowImpl_cpu::backward() { + // Find the correct kernel type + const Pow_Op& op_ = dynamic_cast<const Pow_Op&>(mOp); + auto kernelFunc = Registrar<PowImplForward_cpu>::create({ + op_.getOutput(0)->grad()->dataType(), + op_.getInput(0)->grad()->dataType(), + op_.getInput(1)->grad()->dataType()}); + + const std::vector<std::size_t> input0gradDims = getBroadcastedDims(op_.getInput(0)->grad()->dims(), + op_.getOutput(0)->grad()->dims()); + const std::vector<std::size_t> input1gradDims = getBroadcastedDims(op_.getInput(1)->grad()->dims(), + op_.getOutput(0)->grad()->dims()); + + // Call kernel + kernelFunc(op_.getOutput(0)->grad()->dims(), + input0gradDims, + input1gradDims, + getCPUPtr(mOp.getRawOutput(0)), + getCPUPtr(mOp.getRawInput(0)), + getCPUPtr(mOp.getRawInput(1))); +} \ No newline at end of file diff --git a/src/operator/ReLUImpl.cpp b/src/operator/ReLUImpl.cpp index 8863be282ce0c7b7bfbfb938372cf304bc4cc4bd..005521461a40cf36547953ae9bcf5dbb2b0e1094 100644 --- a/src/operator/ReLUImpl.cpp +++ b/src/operator/ReLUImpl.cpp @@ -9,18 +9,18 @@ * ********************************************************************************/ -#include <cassert> -#include <chrono> // std::chrono::milliseconds -#include <numeric> // std::accumulate -#include <thread> // std::this_thread::sleep_for +#include <memory> #include <vector> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/ReLU.hpp" #include "aidge/utils/Types.h" #include "aidge/backend/cpu/data/GetCPUPtr.h" +#include "aidge/utils/ErrorHandling.hpp" #include "aidge/backend/cpu/operator/ReLUImpl.hpp" #include "aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp" +#include "aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp" Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { // this implementation can be in-place @@ -28,15 +28,33 @@ Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex } void Aidge::ReLUImpl_cpu::forward() { - assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0"); + std::shared_ptr<Tensor> in0 = std::static_pointer_cast<Tensor>(mOp.getRawInput(0)); + AIDGE_ASSERT(in0, "missing input #0"); // Find the correct kernel type auto kernelFunc = Registrar<ReLUImplForward_cpu>::create({ - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + in0->dataType(), std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); // Call kernel - kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(), + kernelFunc(in0->size(), getCPUPtr(mOp.getRawInput(0)), getCPUPtr(mOp.getRawOutput(0))); } + +void Aidge::ReLUImpl_cpu::backward() { + // reversing in and out Tensors + const ReLU_Op& op_ = dynamic_cast<const ReLU_Op&>(mOp); + std::shared_ptr<Tensor> in0 = op_.getOutput(0)->grad(); + std::shared_ptr<Tensor> out0 = op_.getInput(0)->grad(); + AIDGE_ASSERT(out0, "current {} operator output#0 has not gradient Tensor.", op_.type()); + + // Find the correct kernel type + auto kernelFunc = Registrar<ReLUImplBackward_cpu>::create({ + in0->dataType(), + out0->dataType() + }); + + // Call kernel + kernelFunc(in0->size(), getCPUPtr(in0), getCPUPtr(out0)); +} diff --git a/src/operator/ReduceMeanImpl.cpp b/src/operator/ReduceMeanImpl.cpp index e31a53d84947e5b2ced14ee9ee6e2badaef07071..82f96f112016d0498d241ee9ed14989066cbc979 100644 --- a/src/operator/ReduceMeanImpl.cpp +++ b/src/operator/ReduceMeanImpl.cpp @@ -9,71 +9,87 @@ * ********************************************************************************/ -#include <cassert> -#include <chrono> // std::chrono::milliseconds -#include <numeric> // std::accumulate -#include <thread> // std::this_thread::sleep_for +#include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp" + +#include <memory> #include <vector> #include "aidge/utils/Types.h" #include "aidge/operator/ReduceMean.hpp" - -#include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp" #include "aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp" -Aidge::NbElts_t Aidge::ReduceMeanImpl1D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { - // this implementation can be in-place - return 0; -} -Aidge::NbElts_t Aidge::ReduceMeanImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { - // this implementation can be in-place - return 0; -} -Aidge::NbElts_t Aidge::ReduceMeanImpl3D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { + +Aidge::NbElts_t Aidge::ReduceMeanImpl_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { // this implementation can be in-place return 0; } +// Aidge::NbElts_t Aidge::ReduceMeanImpl1D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { +// // this implementation can be in-place +// return 0; +// } +// Aidge::NbElts_t Aidge::ReduceMeanImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { +// // this implementation can be in-place +// return 0; +// } +// Aidge::NbElts_t Aidge::ReduceMeanImpl3D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { +// // this implementation can be in-place +// return 0; +// } -void Aidge::ReduceMeanImpl1D_cpu::forward() { - +void Aidge::ReduceMeanImpl_cpu::forward() { + const ReduceMean_Op& op_ = dynamic_cast<const ReduceMean_Op&>(mOp); // Find the correct kernel type - auto kernelFunc = - Registrar<ReduceMeanImpl1DForward_cpu>::create({ - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + auto kernelFunc = Registrar<ReduceMeanImplForward_cpu>::create({ + op_.getInput(0)->dataType(), + op_.getOutput(0)->dataType()}); // Call kernel - kernelFunc(dynamic_cast<const ReduceMean_Op<1>&>(mOp).getStaticAttributes(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), - std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); + kernelFunc(op_.getStaticAttributes(), + op_.getInput(0)->dims(), + op_.getInput(0)->getImpl()->rawPtr(), + op_.getOutput(0)->getImpl()->rawPtr()); } -void Aidge::ReduceMeanImpl2D_cpu::forward() { +// void Aidge::ReduceMeanImpl1D_cpu::forward() { - // Find the correct kernel type - auto kernelFunc = - Registrar<ReduceMeanImpl2DForward_cpu>::create({ - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); +// // Find the correct kernel type +// auto kernelFunc = +// Registrar<ReduceMeanImpl1DForward_cpu>::create({ +// std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), +// std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); - // Call kernel - kernelFunc(dynamic_cast<const ReduceMean_Op<2>&>(mOp).getStaticAttributes(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), - std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); -} +// // Call kernel +// kernelFunc(dynamic_cast<const ReduceMean_Op<1>&>(mOp).getStaticAttributes(), +// std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), +// std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), +// std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +// } -void Aidge::ReduceMeanImpl3D_cpu::forward() { +// void Aidge::ReduceMeanImpl2D_cpu::forward() { - // Find the correct kernel type - auto kernelFunc = - Registrar<ReduceMeanImpl3DForward_cpu>::create({ - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); +// // Find the correct kernel type +// auto kernelFunc = +// Registrar<ReduceMeanImpl2DForward_cpu>::create({ +// std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), +// std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); - // Call kernel - kernelFunc(dynamic_cast<const ReduceMean_Op<3>&>(mOp).getStaticAttributes(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), - std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); -} \ No newline at end of file +// // Call kernel +// kernelFunc(dynamic_cast<const ReduceMean_Op<2>&>(mOp).getStaticAttributes(), +// std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), +// std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), +// std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +// } + +// void Aidge::ReduceMeanImpl3D_cpu::forward() { + +// // Find the correct kernel type +// auto kernelFunc = +// Registrar<ReduceMeanImpl3DForward_cpu>::create({ +// std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), +// std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + +// // Call kernel +// kernelFunc(dynamic_cast<const ReduceMean_Op<3>&>(mOp).getStaticAttributes(), +// std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), +// std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), +// std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +// } \ No newline at end of file diff --git a/src/operator/ReshapeImpl.cpp b/src/operator/ReshapeImpl.cpp index 02dea1da3d4422abf37b62193bba83e83c87a83f..11df6f663d9a78476103d9671d9d428719c0126d 100644 --- a/src/operator/ReshapeImpl.cpp +++ b/src/operator/ReshapeImpl.cpp @@ -9,13 +9,13 @@ * ********************************************************************************/ -#include <cassert> +#include "aidge/backend/cpu/operator/ReshapeImpl.hpp" +#include "aidge/backend/cpu/operator/ReshapeImpl_forward_kernels.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Reshape.hpp" #include "aidge/utils/Types.h" - -#include "aidge/backend/cpu/operator/ReshapeImpl.hpp" -#include "aidge/backend/cpu/operator/ReshapeImpl_forward_kernels.hpp" +#include "aidge/utils/ErrorHandling.hpp" Aidge::NbElts_t Aidge::ReshapeImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { // this implementation can be in-place @@ -23,17 +23,17 @@ Aidge::NbElts_t Aidge::ReshapeImpl_cpu::getNbRequiredProtected(const Aidge::IOIn } void Aidge::ReshapeImpl_cpu::forward() { - assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size() == - std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->size() - && "input must have the same overall size as shape"); + const Reshape_Op& op_ = static_cast<const Reshape_Op&>(mOp); + AIDGE_ASSERT(op_.getInput(0)->size() == op_.getOutput(0)->size(), + "input must have the same overall size as shape"); // Find the correct kernel type auto kernelFunc = Registrar<ReshapeImplForward_cpu>::create({ - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + op_.getInput(0)->dataType(), + op_.getOutput(0)->dataType()}); // Call kernel - kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), - std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); + kernelFunc(op_.getInput(0)->size(), + op_.getInput(0)->getImpl()->rawPtr(), + op_.getOutput(0)->getImpl()->rawPtr()); } diff --git a/src/operator/SqrtImpl.cpp b/src/operator/SqrtImpl.cpp index 2766e8ae21738775aadad86629a99d0a180e537e..cb635cce517ef0fc6494e7570bad66e19da89aa2 100644 --- a/src/operator/SqrtImpl.cpp +++ b/src/operator/SqrtImpl.cpp @@ -9,18 +9,18 @@ * ********************************************************************************/ -#include <cassert> -#include <chrono> // std::chrono::milliseconds -#include <numeric> // std::accumulate -#include <thread> // std::this_thread::sleep_for +#include <memory> #include <vector> +#include "aidge/backend/cpu/data/GetCPUPtr.h" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Sqrt.hpp" +#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/Types.h" -#include "aidge/backend/cpu/data/GetCPUPtr.h" #include "aidge/backend/cpu/operator/SqrtImpl.hpp" #include "aidge/backend/cpu/operator/SqrtImpl_forward_kernels.hpp" +#include "aidge/backend/cpu/operator/SqrtImpl_backward_kernels.hpp" Aidge::NbElts_t Aidge::SqrtImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { // this implementation can be in-place @@ -28,15 +28,35 @@ Aidge::NbElts_t Aidge::SqrtImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex } void Aidge::SqrtImpl_cpu::forward() { - assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0"); + std::shared_ptr<Tensor> in0 = std::static_pointer_cast<Tensor>(mOp.getRawInput(0)); + std::shared_ptr<Tensor> out0 = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0)); + AIDGE_ASSERT(in0, "missing input #0"); // Find the correct kernel type auto kernelFunc = Registrar<SqrtImplForward_cpu>::create({ - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + in0->dataType(), + out0->dataType()}); // Call kernel - kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(), + kernelFunc(in0->size(), getCPUPtr(mOp.getRawInput(0)), getCPUPtr(mOp.getRawOutput(0))); +} + +void Aidge::SqrtImpl_cpu::backward() { + // reversing in and out Data for backprop + const Sqrt_Op& op_ = dynamic_cast<const Sqrt_Op&>(mOp); + std::shared_ptr<Tensor> out0grad = op_.getOutput(0)->grad(); + std::shared_ptr<Tensor> in0grad = op_.getInput(0)->grad(); + AIDGE_ASSERT(out0grad, "missing output #0"); + + // Find the correct kernel type + auto kernelFunc = Registrar<SqrtImplForward_cpu>::create({ + out0grad->dataType(), + in0grad->dataType()}); + + // Call kernel + kernelFunc(out0grad->size(), + getCPUPtr(out0grad), + getCPUPtr(in0grad)); } \ No newline at end of file diff --git a/unit_tests/data/Test_TensorImpl.cpp b/unit_tests/data/Test_TensorImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..31fbed4c090f5e4848df12f2bc2ccd36e3aedf9d --- /dev/null +++ b/unit_tests/data/Test_TensorImpl.cpp @@ -0,0 +1,192 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <catch2/catch_test_macros.hpp> +#include <cstddef> // std::size_t +#include <cstdint> // std::uint16_t +#include <chrono> +#include <iostream> +#include <memory> +#include <numeric> // std::accumulate +#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution + +#include "aidge/data/Tensor.hpp" +#include "aidge/backend/cpu/data/TensorImpl.hpp" +#include "aidge/operator/Add.hpp" +#include "aidge/backend/cpu/operator/AddImpl.hpp" + +namespace Aidge { + +TEST_CASE("Test addition of Tensors","[TensorImpl][Add]") { + constexpr std::uint16_t NBTRIALS = 10; + // Create a random number generator + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1 + std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10)); + std::uniform_int_distribution<int> boolDist(0,1); + + // Create MatMul Operator + std::shared_ptr<Node> mySub = Add(2); + auto op = std::static_pointer_cast<OperatorTensor>(mySub-> getOperator()); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + + // Create 2 input Tensors + std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(); + op->associateInput(0,T0); + T0->setDataType(DataType::Float32); + T0->setBackend("cpu"); + std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(); + op -> associateInput(1,T1); + T1->setDataType(DataType::Float32); + T1->setBackend("cpu"); + + // Create results Tensor + Tensor Tres{}; + Tres.setDataType(DataType::Float32); + Tres.setBackend("cpu"); + + // To measure execution time of 'MatMul_Op::forward()' member function call + std::chrono::time_point<std::chrono::system_clock> start; + std::chrono::time_point<std::chrono::system_clock> end; + std::chrono::duration<double, std::micro> duration{}; + + std::size_t number_of_operation = 0; + + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // generate 2 random Tensors + // handle dimensions, replace some dimensions with '1' to get broadcasting + constexpr std::size_t nbDims = 4; + std::vector<std::size_t> dims; + for (std::size_t i = 0; i < nbDims; ++i) { + dims.push_back(dimSizeDist(gen)); + } + std::vector<std::size_t> dims0 = dims; + std::vector<std::size_t> dims1 = dims; + std::vector<std::size_t> dimsOut = dims; + for (std::size_t i = 0; i < nbDims; ++i) { + if (boolDist(gen)) { + dims0[i] = 1; + } + if (boolDist(gen)) { + dims1[i] = 1; + } + dimsOut[i] = (dims0[i] == 1) ? dims1[i] : dims0[i]; + } + + // create arrays and fill them with random values + float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]]; + float* array1 = new float[dims1[0]*dims1[1]*dims1[2]*dims1[3]]; + float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]]; + + for (std::size_t i = 0; i < dims0[0]*dims0[1]*dims0[2]*dims0[3]; ++i) { + array0[i] = valueDist(gen); + } + for (std::size_t i = 0; i < dims1[0]*dims1[1]*dims1[2]*dims1[3]; ++i) { + array1[i] = valueDist(gen); + } + + // compute true result + const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1}; + const std::size_t strides1[nbDims] = {dims1[1]*dims1[2]*dims1[3], dims1[2]*dims1[3], dims1[3], 1}; + for (std::size_t a = 0; a < dimsOut[0]; ++a) { + for (std::size_t b = 0; b < dimsOut[1]; ++b) { + const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0) + + strides0[1] * ((dims0[1] > 1) ? b : 0); + const std::size_t idx1_0 = strides1[0] * ((dims1[0] > 1) ? a : 0) + + strides1[1] * ((dims1[1] > 1) ? b : 0); + for (std::size_t c = 0; c < dimsOut[2]; ++c) { + const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a)); + for (std::size_t d = 0; d < dimsOut[3]; ++d) { + std::size_t idx0 = idx0_0 + + strides0[2] * ((dims0[2] > 1) ? c : 0) + + ((dims0[3] > 1) ? d : 0); + std::size_t idx1 = idx1_0 + + strides1[2] * ((dims1[2] > 1) ? c : 0) + + ((dims1[3] > 1) ? d : 0); + result[idx_out + d] = array0[idx0] + array1[idx1]; + // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " - " << array1[idx1] << " -> " << idx_out + d << std::endl; + } + } + } + } + + // conversion to Aidge::Tensors + // input0 + T0->resize(dims0); + T0->getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]); + + // input1 + T1->resize(dims1); + T1->getImpl() -> setRawPtr(array1, dims1[0]*dims1[1]*dims1[2]*dims1[3]); + + // results + Tres.resize(dimsOut); + Tres.getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]); + + Tensor T2 = *T0 + *T1; + REQUIRE(T2 == Tres); + + // no implementation + Tensor T3(T1->dims()); + REQUIRE_THROWS(*T0 + T3); + + // // wrong backend + // static Registrar<Add_Op> registrarAddImpl_custom("custom", [](const Add_Op& op) { return std::make_unique<AddImpl_cpu>(op); } ); + // static Registrar<Tensor> registrarTensorImpl_custom_Int32({"custom", DataType::Int32}, + // [] (DeviceIdx_t device, std::vector<DimSize_t> dims) { + // return std::make_shared<TensorImpl_cpu<int>>(device, dims); + // } + // ); + // T1.setBackend("custom"); + // REQUIRE_THROWS(T0 + T1); + + // wrong datatype + Tensor T4(T1->dims()); + T4.setDataType(DataType::Float64); + REQUIRE_THROWS(*T0 + T4); + } +} + +TEST_CASE("Test substraction of Tensors","[TensorImpl][Sub]") { + Tensor T0 = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}; + Tensor T1 = Array3D<int, 2, 2, 2>{{{{7, 1}, {3, 7}}, {{54, 0}, {7, 12}}}}; + Tensor T2 = T0 - T1; + T2.print(); + REQUIRE(T2 == Tensor(Array3D<int, 2, 2, 2>{{{{-6,1},{0,-3}},{{-49,6},{0,-4}}}})); + + Tensor T3(T1.dims()); + REQUIRE_THROWS(T0 - T3); +} + +TEST_CASE("Test multiplication of Tensors","[TensorImpl][Mul]") { + Tensor T0 = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}; + Tensor T1 = Array3D<int, 2, 2, 2>{{{{7, 2}, {3, 7}}, {{5, 6}, {7, 8}}}}; + Tensor T2 = T0 * T1; + T2.print(); + REQUIRE(T2 == Tensor(Array3D<int, 2, 2, 2>{{{{7,4},{9,28}},{{25,36},{49,64}}}})); + + Tensor T3(T1.dims()); + REQUIRE_THROWS(T0 * T3); +} + +TEST_CASE("Test division of Tensors","[TensorImpl][Div]") { + Tensor T0 = Array3D<int, 2, 2, 2>{{{{7,4},{9,28}},{{25,36},{49,64}}}}; + Tensor T1 = Array3D<int, 2, 2, 2>{{{{7, 2}, {3, 7}}, {{5, 6}, {7, 8}}}}; + Tensor T2 = T0 / T1; + T2.print(); + REQUIRE(T2 == Tensor(Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}})); + + Tensor T3(T1.dims()); + REQUIRE_THROWS(T0 / T3); +} +} // namespace Aidge diff --git a/unit_tests/operator/Test_ReduceMeanImpl.cpp b/unit_tests/operator/Test_ReduceMeanImpl.cpp index 494b7a6ace17173ef7b956bc9dabf4d27e665e5a..d9bf68b78d1ece371cbfb5cda3c502f82eaf97de 100644 --- a/unit_tests/operator/Test_ReduceMeanImpl.cpp +++ b/unit_tests/operator/Test_ReduceMeanImpl.cpp @@ -17,6 +17,7 @@ #include "aidge/operator/Conv.hpp" #include "aidge/backend/cpu.hpp" +#include "aidge/utils/TensorUtils.hpp" using namespace Aidge; @@ -138,35 +139,60 @@ TEST_CASE("[cpu/operator] ReduceMean(forward)", "[ReduceMean][CPU]") { } SECTION("all_axes") { - std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> { - { - { - { 5.0, 1.0 }, - { 20.0, 2.0 } - }, - { - { 30.0, 1.0 }, - { 40.0, 2.0 } - }, + SECTION("1") { + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> { { - { 55.0, 1.0 }, - { 60.0, 2.0 } + { + { 5.0, 1.0 }, + { 20.0, 2.0 } + }, + { + { 30.0, 1.0 }, + { 40.0, 2.0 } + }, + { + { 55.0, 1.0 }, + { 60.0, 2.0 } + } } - } - }); - std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array1D<float,1> { - {18.25} - }); + }); + std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array1D<float,1> { + {18.25} + }); - std::shared_ptr<Node> myReduceMean = ReduceMean({0, 1, 2}, 0); - auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator()); - op->associateInput(0,myInput); - op->setDataType(DataType::Float32); - op->setBackend("cpu"); - op->computeOutputDims(); - myReduceMean->forward(); - op->getOutput(0)->print(); + std::shared_ptr<Node> myReduceMean = ReduceMean({0, 1, 2}, 0); + auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator()); + op->associateInput(0,myInput); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + myReduceMean->forward(); + op->getOutput(0)->print(); - REQUIRE(*(op->getOutput(0)) == *myOutput); + REQUIRE(*(op->getOutput(0)) == *myOutput); + } + SECTION("2") { + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array2D<float,5,4> { + {{ 0.004232f, 0.105120f, 0.045124f, 0.009205f}, + { 0.000766f, 0.272162f, 0.503560f, 0.044163f}, + { 0.049755f, 0.000305f, 0.143634f, 0.013253f}, + { 0.096258f, 0.311231f, 0.358143f, 0.000452f}, + { 0.468617f, 0.015693f, 0.145316f, 0.000105f}} + }); + std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array1D<float,1> { + {0.1293547f} + }); + + std::shared_ptr<Node> myReduceMean = ReduceMean({0, 1}, 0); + auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator()); + op->associateInput(0,myInput); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + myReduceMean->forward(); + op->getOutput(0)->print(); + // approxEq<float>(*(op->getOutput(0)), *myOutput); + REQUIRE(approxEq<float>(*(op->getOutput(0)), *myOutput)); + } } } \ No newline at end of file diff --git a/unit_tests/scheduler/Test_Scheduler.cpp b/unit_tests/scheduler/Test_Scheduler.cpp index 025ca8ba067297ff3232e05ea9142899dca8ddef..525dbf43fe9d80550fbd5e089efa4f2cf56cf5f1 100644 --- a/unit_tests/scheduler/Test_Scheduler.cpp +++ b/unit_tests/scheduler/Test_Scheduler.cpp @@ -20,6 +20,7 @@ #include "aidge/scheduler/Scheduler.hpp" #include "aidge/backend/cpu.hpp" +#include "aidge/recipes/GraphViewHelper.hpp" using namespace Aidge; @@ -300,7 +301,7 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") { std::vector<std::shared_ptr<Aidge::Tensor>> dataIn = {inputTensor}; REQUIRE_NOTHROW(scheduler.forward(true, false, dataIn)); - + scheduler.saveSchedulingDiagram("schedulingSequential"); std::shared_ptr<Tensor> expectedOutput1 = std::make_shared<Tensor>(Array4D<int, 2, 3, 3, 3>{ @@ -345,4 +346,45 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") { bool equal4 = (*other4 == expectedOutput4); REQUIRE(equal4); } +} + +TEST_CASE("[cpu/scheduler] SequentialScheduler(backward)", "[scheduler][backward]") { + + // create GraphView + std::shared_ptr<GraphView> gv = Sequential({ReLU("relu0"), Sqrt("srqt0"), ReLU("relu1")}); + + std::shared_ptr<Tensor> inputTensor = + std::make_shared<Tensor>(Array4D<float, 2, 1, 5, 5>{{{{{0.0f, 1.0f, 2.0f, 3.0f, 4.0f}, + {5.0f, 6.0f, 7.0f, 8.0f, 9.0f}, + {10.0f, 11.0f, 12.0f, 13.0f, 14.0f}, + {15.0f, 16.0f, 17.0f, 18.0f, 19.0f}, + {20.0f, 21.0f, 22.0f, 23.0f, 24.0f}}}, + {{{25.0f, 26.0f, 27.0f, 28.0f, 29.0f}, + {30.0f, 31.0f, 32.0f, 33.0f, 34.0f}, + {35.0f, 36.0f, 37.0f, 38.0f, 39.0f}, + {40.0f, 41.0f, 42.0f, 43.0f, 44.0f}, + {45.0f, 46.0f, 47.0f, 48.0f, 49.0f}}}}}); + auto label = inputTensor; + // implem already set to default + auto myProd = Producer(inputTensor, "prod"); + myProd -> addChild(gv); + gv -> compile("cpu", DataType::Float32); + compile_gradient(gv); + SequentialScheduler scheduler(gv); + scheduler.forward(); + auto predictedOutput = gv->getOrderedOutputs()[0].first; + + std::shared_ptr<Tensor> targetOutput = + std::make_shared<Tensor>(Array4D<float, 2, 1, 5, 5>{{{{{0.0f, 1.0f, 1.0f, 2.0f, 2.0f}, + {2.0f, 2.0f, 3.0f, 3.0f, 3.0f}, + {3.0f, 3.0f, 3.0f, 4.0f, 4.0f}, + {4.0f, 4.0f, 4.0f, 4.0f, 4.0f}, + {4.0f, 5.0f, 5.0f, 5.0f, 5.0f}}}, + {{{5.0f, 5.0f, 5.0f, 5.0f, 5.0f}, + {5.0f, 6.0f, 6.0f, 6.0f, 6.0f}, + {6.0f, 6.0f, 6.0f, 6.0f, 6.0f}, + {6.0f, 6.0f, 6.0f, 7.0f, 7.0f}, + {7.0f, 7.0f, 7.0f, 7.0f, 7.0f}}}}}); + + REQUIRE_NOTHROW(scheduler.backward({targetOutput})); } \ No newline at end of file