diff --git a/.gitlab/ci/build.gitlab-ci.yml b/.gitlab/ci/build.gitlab-ci.yml index 1063298f053d6fffeead06c862d9f68bbc855bba..39d5a378b7ed94b0455ad5cd36464b180c52c535 100644 --- a/.gitlab/ci/build.gitlab-ci.yml +++ b/.gitlab/ci/build.gitlab-ci.yml @@ -136,6 +136,8 @@ build:ubuntu_python: - source venv/bin/activate - python3 -m pip install -r requirements.txt - python3 -m pip install . + - python3 -m pip install numpy unittest-xml-reporting + - python3 -m pip list artifacts: expire_in: 1 week paths: @@ -160,9 +162,10 @@ build:ubuntu_python: # script: # # Download dependencies # # aidge_core -# - 'curl "https://gitlab.eclipse.org/api/v4/projects/5139/jobs/artifacts/main/download?job=build:windows_cpp" -o build_artifacts.zip' -# - Expand-Archive -Path .\build_artifacts.zip -DestinationPath . -Force -# - Remove-Item .\build_cpp\ -Recurse +# - $DEPENDENCY_NAME="aidge_core" +# - $DEPENDENCY_JOB="build:windows_cpp" +# - !reference [.download_dependency_windows, script] +# - Remove-Item .\build_cpp\ -Recurse -Force -ErrorAction Ignore # - $env:CMAKE_PREFIX_PATH = '../install_cpp' # - mkdir -p build_cpp @@ -196,8 +199,9 @@ build:ubuntu_python: # script: # # Download dependencies # # aidge_core (Python) -# - 'curl "https://gitlab.eclipse.org/api/v4/projects/5139/jobs/artifacts/main/download?job=build:windows_python" -o build_artifacts.zip' -# - Expand-Archive -Path .\build_artifacts.zip -DestinationPath . -Force +# - $DEPENDENCY_NAME="aidge_core" +# - $DEPENDENCY_JOB="build:windows_python" +# - !reference [.download_dependency_windows, script] # - python -m pip install virtualenv # - virtualenv venv diff --git a/.gitlab/ci/test.gitlab-ci.yml b/.gitlab/ci/test.gitlab-ci.yml index 8f6b1e54109c4c2dcfa026fd477a93b6c0a1c641..d0c94c2a3bcbb2908863b15b2b52ef068a55ff94 100644 --- a/.gitlab/ci/test.gitlab-ci.yml +++ b/.gitlab/ci/test.gitlab-ci.yml @@ -18,9 +18,8 @@ test:ubuntu_python: script: - source venv/bin/activate - cd ${CI_PROJECT_NAME} - - python3 -m pip install numpy unittest-xml-reporting - - python3 -m pip list - # Run on discovery all tests located in core/unit_tests/python and discard the stdout + + # Run on discovery all tests located in core/unit_tests/python and discard the stdout # only to show the errors/warnings and the results of the tests - python3 -m xmlrunner discover -s unit_tests/ -v -b --output-file xmlrunner-results.xml artifacts: diff --git a/aidge_backend_cpu/unit_tests/test_tensor.py b/aidge_backend_cpu/unit_tests/test_tensor.py deleted file mode 100644 index 37531b43cf7755dfb760e575450b70bfa9a6ff68..0000000000000000000000000000000000000000 --- a/aidge_backend_cpu/unit_tests/test_tensor.py +++ /dev/null @@ -1,71 +0,0 @@ -import unittest -import aidge_core -import aidge_backend_cpu -import numpy as np - - -class test_tensor(unittest.TestCase): - """Test tensor binding - """ - def setUp(self): - pass - def tearDown(self): - pass - - def test_getavailable_backends(self): - self.assertTrue("cpu" in aidge_core.Tensor.get_available_backends()) - - def test_numpy_int_to_tensor(self): - np_array = np.arange(9).reshape(1,1,3,3).astype(np.int32) - # Numpy -> Tensor - t = aidge_core.Tensor(np_array) - self.assertEqual(t.dtype(), aidge_core.DataType.Int32) - for i_t, i_n in zip(t, np_array.flatten()): - self.assertTrue(i_t == i_n) - for i,j in zip(t.dims(), np_array.shape): - self.assertEqual(i,j) - def test_tensor_int_to_numpy(self): - np_array = np.arange(9).reshape(1,1,3,3) - # Numpy -> Tensor - t = aidge_core.Tensor(np_array) - # Tensor -> Numpy - nnarray = np.array(t) - for i_nn, i_n in zip(nnarray.flatten(), np_array.flatten()): - self.assertTrue(i_nn == i_n) - for i,j in zip(t.dims(), nnarray.shape): - self.assertEqual(i,j) - - def test_numpy_int64_to_tensor(self): - np_array = np.arange(9).reshape(1,1,3,3).astype(np.int64) - # Numpy -> Tensor - t = aidge_core.Tensor(np_array) - self.assertEqual(t.dtype(), aidge_core.DataType.Int64) - for i_t, i_n in zip(t, np_array.flatten()): - self.assertTrue(i_t == i_n) - for i,j in zip(t.dims(), np_array.shape): - self.assertEqual(i,j) - - def test_numpy_float_to_tensor(self): - t = aidge_core.Tensor() - np_array = np.random.rand(1, 1, 3, 3).astype(np.float32) - # Numpy -> Tensor - t = aidge_core.Tensor(np_array) - self.assertEqual(t.dtype(), aidge_core.DataType.Float32) - for i_t, i_n in zip(t, np_array.flatten()): - self.assertTrue(i_t == i_n) # TODO : May need to change this to a difference - for i,j in zip(t.dims(), np_array.shape): - self.assertEqual(i,j) - - def test_get_set(self): - dims = [2,2,2] - - np_array = np.arange(8).reshape(dims).astype(np.int32) - # Numpy -> Tensor - t = aidge_core.Tensor(np_array) - for i in range(8): - self.assertEqual(t[i], i) - t[i] = 5 - self.assertEqual(t[i], 5) - -if __name__ == '__main__': - unittest.main() diff --git a/include/aidge/backend/cpu.hpp b/include/aidge/backend/cpu.hpp index 0237962b503a52a2b7338ab81f285ef05a746e81..78a317281475bd05ee317127b02cfeddcfd07e49 100644 --- a/include/aidge/backend/cpu.hpp +++ b/include/aidge/backend/cpu.hpp @@ -12,7 +12,6 @@ #ifndef AIDGE_CPU_IMPORTS_H_ #define AIDGE_CPU_IMPORTS_H_ -#include "aidge/backend/cpu/data/TensorImpl.hpp" #include "aidge/backend/cpu/operator/AddImpl.hpp" #include "aidge/backend/cpu/operator/AvgPoolingImpl.hpp" #include "aidge/backend/cpu/operator/MaxPoolingImpl.hpp" @@ -21,7 +20,9 @@ #include "aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp" #include "aidge/backend/cpu/operator/ConvImpl.hpp" #include "aidge/backend/cpu/operator/DivImpl.hpp" +#include "aidge/backend/cpu/operator/ErfImpl.hpp" #include "aidge/backend/cpu/operator/FCImpl.hpp" +#include "aidge/backend/cpu/operator/GatherImpl.hpp" #include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp" #include "aidge/backend/cpu/operator/MatMulImpl.hpp" #include "aidge/backend/cpu/operator/MemorizeImpl.hpp" @@ -29,7 +30,9 @@ #include "aidge/backend/cpu/operator/PadImpl.hpp" #include "aidge/backend/cpu/operator/PopImpl.hpp" #include "aidge/backend/cpu/operator/PowImpl.hpp" +#include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp" #include "aidge/backend/cpu/operator/ReLUImpl.hpp" +#include "aidge/backend/cpu/operator/ReshapeImpl.hpp" #include "aidge/backend/cpu/operator/ScalingImpl.hpp" #include "aidge/backend/cpu/operator/SigmoidImpl.hpp" #include "aidge/backend/cpu/operator/SliceImpl.hpp" @@ -37,5 +40,9 @@ #include "aidge/backend/cpu/operator/SoftmaxImpl.hpp" #include "aidge/backend/cpu/operator/SubImpl.hpp" #include "aidge/backend/cpu/operator/TanhImpl.hpp" +#include "aidge/backend/cpu/operator/TransposeImpl.hpp" + +#include "aidge/backend/cpu/data/TensorImpl.hpp" + +#endif /* AIDGE_CPU_IMPORTS_H_ */ -#endif /* AIDGE_CPU_IMPORTS_H_ */ \ No newline at end of file diff --git a/include/aidge/backend/cpu/data/GetCPUPtr.h b/include/aidge/backend/cpu/data/GetCPUPtr.h deleted file mode 100644 index 47e3b07e8fa08cdcd714745a9a49bb03e30f79f5..0000000000000000000000000000000000000000 --- a/include/aidge/backend/cpu/data/GetCPUPtr.h +++ /dev/null @@ -1,24 +0,0 @@ -/******************************************************************************** - * Copyright (c) 2023 CEA-List - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License 2.0 which is available at - * http://www.eclipse.org/legal/epl-2.0. - * - * SPDX-License-Identifier: EPL-2.0 - * - ********************************************************************************/ - -#ifndef AIDGE_CPU_DATA_GETCPUPTR_H_ -#define AIDGE_CPU_DATA_GETCPUPTR_H_ - -#include "aidge/data/Tensor.hpp" - -namespace Aidge { -inline void *getCPUPtr(std::shared_ptr<Aidge::Data> const &data) { - const auto tensor = std::static_pointer_cast<Tensor>(data); - return tensor->getImpl()->hostPtr(tensor->getImplOffset()); -} -} // namespace Aidge - -#endif // AIDGE_CPU_DATA_GETCPUPTR_H_ \ No newline at end of file diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp deleted file mode 100644 index 6c92a925a592b69e7dc7b70c38f0f5a363d88601..0000000000000000000000000000000000000000 --- a/include/aidge/backend/cpu/data/TensorImpl.hpp +++ /dev/null @@ -1,193 +0,0 @@ -/******************************************************************************** - * Copyright (c) 2023 CEA-List - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License 2.0 which is available at - * http://www.eclipse.org/legal/epl-2.0. - * - * SPDX-License-Identifier: EPL-2.0 - * - ********************************************************************************/ - -#ifndef AIDGE_CPU_DATA_TENSORIMPL_H_ -#define AIDGE_CPU_DATA_TENSORIMPL_H_ - -#include "aidge/backend/TensorImpl.hpp" -#include "aidge/data/Tensor.hpp" -#include "aidge/data/half.hpp" -#include "aidge/utils/Registrar.hpp" -#include "aidge/utils/Types.h" -#include "aidge/utils/ErrorHandling.hpp" -#include "aidge/utils/future_std/span.hpp" - -namespace Aidge { - -template <class T> -class TensorImpl_cpu : public TensorImpl { -private: - /// Pointer to the data and its capacity - future_std::span<T> mData; - /// If this instance own the data, std::unique_ptr manages it - std::unique_ptr<T[]> mDataOwner; - -public: - static constexpr const char *Backend = "cpu"; - - TensorImpl_cpu(DeviceIdx_t device, NbElts_t length) : TensorImpl(Backend, device, length) {} - - bool operator==(const TensorImpl &otherImpl) const override final { - const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl); - AIDGE_INTERNAL_ASSERT(typedOtherImpl.size() >= mNbElts); - - std::size_t i = 0; - for (; i < mNbElts && - *static_cast<const T*>(rawPtr(i)) == *static_cast<const T*>(typedOtherImpl.rawPtr(i)); - ++i) { - } - return i == mNbElts; - } - - static std::shared_ptr<TensorImpl_cpu> create(DeviceIdx_t device, NbElts_t length) { - return std::make_shared<TensorImpl_cpu<T>>(device, length); - } - - inline std::size_t scalarSize() const noexcept override final { return sizeof(T); } - - void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final { - const T* srcT = static_cast<const T *>(src); - T* dstT = static_cast<T *>(rawPtr(offset)); - - AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity"); - AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "overlapping copy is not supported"); - std::copy(srcT, srcT + length, dstT); - } - - void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final { - if (length == 0) { - return; - } - - T* dstT = static_cast<T *>(rawPtr(offset)); - AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity"); - switch (srcDt) - { - case DataType::Float64: - std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length, - dstT); - break; - case DataType::Float32: - std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length, - dstT); - break; - case DataType::Float16: - std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length, - dstT); - break; - case DataType::Int64: - std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length, - dstT); - break; - case DataType::UInt64: - std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length, - dstT); - break; - case DataType::Int32: - std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length, - dstT); - break; - case DataType::UInt32: - std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length, - dstT); - break; - case DataType::Int16: - std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length, - dstT); - break; - case DataType::UInt16: - std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length, - dstT); - break; - case DataType::Int8: - std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length, - dstT); - break; - case DataType::UInt8: - std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length, - dstT); - break; - default: - AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type."); - break; - } - } - - void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final { - AIDGE_ASSERT(device.first == Backend, "backend must match"); - AIDGE_ASSERT(device.second == 0, "device cannot be != 0 for CPU backend"); - copy(src, length, offset); - } - - inline void copyFromHost(const void *src, NbElts_t length, NbElts_t offset = 0) override final { - copy(src, length, offset); - } - - void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override final { - const T* src = static_cast<const T*>(rawPtr(offset)); - AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity"); - std::copy(src, src + length, static_cast<T *>(dst)); - } - - void *rawPtr(NbElts_t offset = 0) override final { - lazyInit(); - return (mData.data() + offset); - }; - - const void *rawPtr(NbElts_t offset = 0) const override final { - AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const rawPtr"); - return (mData.data() + offset); - }; - - void *hostPtr(NbElts_t offset = 0) override final { - lazyInit(); - return (mData.data() + offset); - }; - - const void *hostPtr(NbElts_t offset = 0) const override final { - AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const hostPtr"); - return (mData.data() + offset); - }; - - void setRawPtr(void *ptr, NbElts_t length) override final { - AIDGE_ASSERT(length >= mNbElts, "trying to set raw pointer of insufficient capacity"); - mData = future_std::span<T>(static_cast<T *>(ptr), length); - mDataOwner.reset(); - }; - - virtual ~TensorImpl_cpu() = default; - -private: - void lazyInit() { - if (mData.size() < mNbElts) { - // Need more data, a re-allocation will occur - AIDGE_ASSERT(mData.empty() || mDataOwner != nullptr, "trying to enlarge non-owned data"); - mDataOwner.reset(new T[mNbElts]); - mData = future_std::span<T>(mDataOwner.get(), mNbElts); - } - } -}; - -namespace { -static Registrar<Tensor> registrarTensorImpl_cpu_Float64( - {"cpu", DataType::Float64}, Aidge::TensorImpl_cpu<double>::create); -static Registrar<Tensor> registrarTensorImpl_cpu_Float32( - {"cpu", DataType::Float32}, Aidge::TensorImpl_cpu<float>::create); -static Registrar<Tensor> registrarTensorImpl_cpu_Float16( - {"cpu", DataType::Float16}, Aidge::TensorImpl_cpu<half_float::half>::create); -static Registrar<Tensor> registrarTensorImpl_cpu_Int32( - {"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int>::create); -static Registrar<Tensor> registrarTensorImpl_cpu_Int64( - {"cpu", DataType::Int64}, Aidge::TensorImpl_cpu<long>::create); -} // namespace -} // namespace Aidge - -#endif /* AIDGE_CPU_DATA_TENSORIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/ErfImpl.hpp b/include/aidge/backend/cpu/operator/ErfImpl.hpp new file mode 100644 index 0000000000000000000000000000000000000000..5c0a6fd49f4e2d435eed8e8baa979f59dbd84e68 --- /dev/null +++ b/include/aidge/backend/cpu/operator/ErfImpl.hpp @@ -0,0 +1,50 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_ERFIMPL_H_ +#define AIDGE_CPU_OPERATOR_ERFIMPL_H_ + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/operator/Erf.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" +#include <memory> +#include <vector> + +namespace Aidge { +// class Erf_Op; + +// compute kernel registry for forward and backward +class ErfImplForward_cpu + : public Registrable<ErfImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> { +}; +class ErfImplBackward_cpu + : public Registrable<ErfImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> { +}; + +class ErfImpl_cpu : public OperatorImpl { +public: + ErfImpl_cpu(const Erf_Op& op) : OperatorImpl(op) {} + + static std::unique_ptr<ErfImpl_cpu> create(const Erf_Op& op) { + return std::make_unique<ErfImpl_cpu>(op); + } + + NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + void forward() override; +}; + +namespace { +static Registrar<Erf_Op> registrarErfImpl_cpu("cpu", Aidge::ErfImpl_cpu::create); +} +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_ERFIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/ErfImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ErfImpl_forward_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..bb92401b6e72b1528d0342474bf394a7c29a4042 --- /dev/null +++ b/include/aidge/backend/cpu/operator/ErfImpl_forward_kernels.hpp @@ -0,0 +1,45 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_ERFIMPL_FORWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_ERFIMPL_FORWARD_KERNEL_H_ + +#include <cmath> + +#include "aidge/utils/Registrar.hpp" + +#include "aidge/backend/cpu/operator/ErfImpl.hpp" + +namespace Aidge { +template <class I, class O> +void ErfImpl_cpu_forward_kernel(std::size_t inputLenght, + const void* input_, + void* output_) { + + const I* input = static_cast<const I*>(input_); + O* output = static_cast<O*>(output_); + + for (std::size_t i = 0; i < inputLenght; ++i) { + output[i] = std::erf(input[i]); + } +} + +namespace { +static Registrar<ErfImplForward_cpu> registrarErfImplForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::ErfImpl_cpu_forward_kernel<float, float>); +static Registrar<ErfImplForward_cpu> registrarErfImplForward_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::ErfImpl_cpu_forward_kernel<int, int>); +static Registrar<ErfImplForward_cpu> registrarErfImplForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::ErfImpl_cpu_forward_kernel<double, double>); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_ERFIMPL_FORWARD_KERNEL_H_ */ diff --git a/include/aidge/backend/cpu/operator/GatherImpl.hpp b/include/aidge/backend/cpu/operator/GatherImpl.hpp new file mode 100644 index 0000000000000000000000000000000000000000..1d235ff14ca01955c268a7b061e6ecb7b2bbbb2a --- /dev/null +++ b/include/aidge/backend/cpu/operator/GatherImpl.hpp @@ -0,0 +1,50 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_GATHERIMPL_H_ +#define AIDGE_CPU_OPERATOR_GATHERIMPL_H_ + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/operator/Gather.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" +#include <memory> +#include <vector> + +namespace Aidge { +// class Gather_Op; + +// compute kernel registry for forward and backward +class GatherImplForward_cpu + : public Registrable<GatherImplForward_cpu, std::tuple<DataType, DataType>, void(const typename Gather_Op::Attrs&, const std::vector<DimSize_t>&, const void*, void*)> { +}; +class GatherImplBackward_cpu + : public Registrable<GatherImplBackward_cpu, std::tuple<DataType, DataType>, void(const typename Gather_Op::Attrs&, const std::vector<DimSize_t>&, const void*, void*)> { +}; + +class GatherImpl_cpu : public OperatorImpl { +public: + GatherImpl_cpu(const Gather_Op& op) : OperatorImpl(op) {} + + static std::unique_ptr<GatherImpl_cpu> create(const Gather_Op& op) { + return std::make_unique<GatherImpl_cpu>(op); + } + + NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + void forward() override; +}; + +namespace { +static Registrar<Gather_Op> registrarGatherImpl_cpu("cpu", Aidge::GatherImpl_cpu::create); +} +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_GATHERIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/GatherImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/GatherImpl_forward_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..0d312e3c143720c7d920128c8d484d4c68439a24 --- /dev/null +++ b/include/aidge/backend/cpu/operator/GatherImpl_forward_kernels.hpp @@ -0,0 +1,66 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_GATHERIMPL_FORWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_GATHERIMPL_FORWARD_KERNEL_H_ + +#include "aidge/utils/Registrar.hpp" +#include <cstddef> +#include <cmath> +#include "aidge/data/Data.hpp" +#include "aidge/utils/Types.h" + +#include "aidge/backend/cpu/operator/GatherImpl.hpp" + +namespace Aidge { +template <class I, class O> +void GatherImpl_cpu_forward_kernel(const typename Gather_Op::Attrs& attrs, const std::vector<DimSize_t>& inputDims, const void* input_, void* output_) +{ + const I* input = static_cast<const I*>(input_); + O* output = static_cast<O*>(output_); + + const std::size_t axisIdx = std::get<2>(attrs)>=0 ? + std::get<2>(attrs) : + static_cast<std::size_t>(std::get<2>(attrs)) + inputDims.size(); + + std::size_t postAxisElems = 1; + for (std::size_t i = axisIdx + 1; i < inputDims.size(); ++i) { + postAxisElems *= inputDims[i]; + } + std::size_t preAxisElems = 1; + for (std::size_t i = 0; i < axisIdx; ++i) { + preAxisElems *= inputDims[i]; + } + + const std::vector<std::int64_t> indices = std::get<0>(attrs); + for (std::size_t i=0; i<preAxisElems; ++i) + { + for(std::size_t j=0; j<indices.size(); ++j) + { + const std::size_t idx = indices[j] >= 0 ? indices[j] : static_cast<std::size_t>(indices[j]) + inputDims[axisIdx]; + const I* startPtr = std::next(input, i * postAxisElems * inputDims[axisIdx] + idx * postAxisElems); + std::copy_n(startPtr, postAxisElems, output); + output += postAxisElems; + } + } +} + +namespace { +static Registrar<GatherImplForward_cpu> registrarGatherImplForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::GatherImpl_cpu_forward_kernel<float, float>); +static Registrar<GatherImplForward_cpu> registrarGatherImplForward_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::GatherImpl_cpu_forward_kernel<int, int>); +static Registrar<GatherImplForward_cpu> registrarGatherImplForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::GatherImpl_cpu_forward_kernel<double, double>); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_GATHERIMPL_FORWARD_KERNEL_H_ */ diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp index 761b9579c3c3dc187e4b0fac24812fa77f916e65..d10b32e18ee983fc1270bc4a7cce35e18f601071 100644 --- a/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp @@ -25,7 +25,7 @@ void LeakyReLUImpl_cpu_forward_kernel(const LeakyReLU_Op::Attrs& attrs, const I* input = static_cast<const I*>(input_); O* output = static_cast<O*>(output_); - I negativeSlope = static_cast<I>(std::get<0>(attrs)); + const I negativeSlope = static_cast<const I>(std::get<0>(attrs)); for (std::size_t i = 0; i < inputLenght; ++i) { output[i] = input[i] >= 0 ? input[i] : input[i] * negativeSlope; diff --git a/include/aidge/backend/cpu/operator/MatMulImpl.hpp b/include/aidge/backend/cpu/operator/MatMulImpl.hpp index e8654c6e9cc8fab9080bbb5ed57ea78ee0b7978c..437ba404b1cc39973448f3c5567aec2fe35994e3 100644 --- a/include/aidge/backend/cpu/operator/MatMulImpl.hpp +++ b/include/aidge/backend/cpu/operator/MatMulImpl.hpp @@ -23,16 +23,14 @@ #include "aidge/backend/cpu/data/GetCPUPtr.h" namespace Aidge { -// class MatMul_Op; -// compute kernel registry for forward and backward class MatMulImplForward_cpu - : public Registrable<MatMulImplForward_cpu, std::tuple<DataType, DataType, DataType>, - void(const MatMul_Op::Attrs &, const DimSize_t, const DimSize_t, + : public Registrable<MatMulImplForward_cpu, std::tuple<DataType, DataType>, + void(const std::size_t, const std::size_t, const std::size_t, const void *, const void *, void *)> {}; class MatMulImplBackward_cpu - : public Registrable<MatMulImplBackward_cpu, std::tuple<DataType, DataType, DataType>, - void(const MatMul_Op::Attrs &, const DimSize_t, const DimSize_t, + : public Registrable<MatMulImplBackward_cpu, std::tuple<DataType, DataType>, + void(const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void *, const void *, void *)> {}; class MatMulImpl_cpu : public OperatorImpl { diff --git a/include/aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp index bc52779eff274379a853ea84fb839c9486652433..5045580fa599aac64f2c1414bfdf2b87ea57e313 100644 --- a/include/aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp @@ -12,45 +12,39 @@ #ifndef AIDGE_CPU_OPERATOR_MATMULIMPL_FORWARD_KERNEL_H_ #define AIDGE_CPU_OPERATOR_MATMULIMPL_FORWARD_KERNEL_H_ -#include "aidge/utils/Registrar.hpp" -#include <algorithm> - #include "aidge/backend/cpu/operator/MatMulImpl.hpp" namespace Aidge { -template <class I, class W, class O> -void MatMulImpl_cpu_forward_kernel(const MatMul_Op::Attrs& attrs, const DimSize_t batchSize, const DimSize_t oneInputSize, - const void* input_, const void* weights_, void* output_) { +template <class I, class O> +void MatMulImpl_cpu_forward_kernel(const std::size_t n, const std::size_t k, const std::size_t m, + const void* input1_, const void* input2_, void* output_) { // FIXME: missing MatMul parameters as arguments - const I* input = static_cast<const I*>(input_); - const W* weights = static_cast<const W*>(weights_); + const I* input1 = static_cast<const I*>(input1_); + const I* input2 = static_cast<const I*>(input2_); O* output = static_cast<O*>(output_); - - std::fill(output, output+(batchSize*std::get<0>(attrs)), O(0)); - - for (std::size_t batch = 0; batch < batchSize; ++batch) { - for (std::size_t out = 0; out < std::get<0>(attrs); ++out) { - output[out + batch*std::get<0>(attrs)] = std::inner_product(input + batch*oneInputSize, - input + (batch + 1)*oneInputSize, - weights + out*oneInputSize, - output[out + batch*std::get<0>(attrs)]); + for (std::size_t i = 0; i < n; ++i) { + for (std::size_t j = 0; j < m; ++j) { + O sum = O(0); + for (std::size_t l = 0; l < k; ++l) { + sum += static_cast<O>(input1[i*k + l] * input2[l*m + j]); + } + output[i*m + j] = sum; } } } - namespace { static Registrar<MatMulImplForward_cpu> registrarMatMulImpl2DForward_cpu_Float32( - {DataType::Float32, DataType::Float32, DataType::Float32}, - Aidge::MatMulImpl_cpu_forward_kernel<float, float, float>); + {DataType::Float32, DataType::Float32}, + Aidge::MatMulImpl_cpu_forward_kernel<float, float>); static Registrar<MatMulImplForward_cpu> registrarMatMulImpl2DForward_cpu_Int32( - {DataType::Int32, DataType::Int32, DataType::Int32}, - Aidge::MatMulImpl_cpu_forward_kernel<int, int, int>); + {DataType::Int32, DataType::Int32}, + Aidge::MatMulImpl_cpu_forward_kernel<int, int>); static Registrar<MatMulImplForward_cpu> registrarMatMulImpl2DForward_cpu_Float64( - {DataType::Float64, DataType::Float64, DataType::Float64}, - Aidge::MatMulImpl_cpu_forward_kernel<double, double, double>); + {DataType::Float64, DataType::Float64}, + Aidge::MatMulImpl_cpu_forward_kernel<double, double>); } // namespace } // namespace Aidge diff --git a/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp b/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp new file mode 100644 index 0000000000000000000000000000000000000000..9b85eb812caffca3820a711d46775e1134db863f --- /dev/null +++ b/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp @@ -0,0 +1,104 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_H_ +#define AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_H_ + +#include <array> +#include <memory> +#include <tuple> +#include <vector> + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/operator/ReduceMean.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +namespace Aidge { +// class ReduceMean_Op; + +// compute kernel registry for forward and backward +// DIM 1 +class ReduceMeanImpl1DForward_cpu + : public Registrable<ReduceMeanImpl1DForward_cpu, + std::tuple<DataType, DataType>, + void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; +class ReduceMeanImpl1DBackward_cpu + : public Registrable<ReduceMeanImpl1DBackward_cpu, + std::tuple<DataType, DataType>, + void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; + +// DIM 2 +class ReduceMeanImpl2DForward_cpu + : public Registrable<ReduceMeanImpl2DForward_cpu, + std::tuple<DataType, DataType>, + void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; +class ReduceMeanImpl2DBackward_cpu + : public Registrable<ReduceMeanImpl2DBackward_cpu, + std::tuple<DataType, DataType>, + void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; +// DIM 3 +class ReduceMeanImpl3DForward_cpu + : public Registrable<ReduceMeanImpl3DForward_cpu, + std::tuple<DataType, DataType>, + void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; +class ReduceMeanImpl3DBackward_cpu + : public Registrable<ReduceMeanImpl3DBackward_cpu, + std::tuple<DataType, DataType>, + void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; + +class ReduceMeanImpl1D_cpu : public OperatorImpl { + public: + ReduceMeanImpl1D_cpu(const ReduceMean_Op<1>& op) : OperatorImpl(op) {} + + static std::unique_ptr<ReduceMeanImpl1D_cpu> create(const ReduceMean_Op<1> &op) { + return std::make_unique<ReduceMeanImpl1D_cpu>(op); + } + + public: + NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + void forward() override; +}; + +class ReduceMeanImpl2D_cpu : public OperatorImpl { + public: + ReduceMeanImpl2D_cpu(const ReduceMean_Op<2>& op) : OperatorImpl(op) {} + + static std::unique_ptr<ReduceMeanImpl2D_cpu> create(const ReduceMean_Op<2> &op) { + return std::make_unique<ReduceMeanImpl2D_cpu>(op); + } + + public: + NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + void forward() override; +}; + +class ReduceMeanImpl3D_cpu : public OperatorImpl { + public: + ReduceMeanImpl3D_cpu(const ReduceMean_Op<3>& op) : OperatorImpl(op) {} + + static std::unique_ptr<ReduceMeanImpl3D_cpu> create(const ReduceMean_Op<3> &op) { + return std::make_unique<ReduceMeanImpl3D_cpu>(op); + } + + public: + NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + void forward() override; +}; +namespace { +// add cpu backend to ReduceMean_Op<2> implementation registry +static Registrar<ReduceMean_Op<1>> registrarReduceMeanImpl1D_cpu("cpu", Aidge::ReduceMeanImpl1D_cpu::create); +static Registrar<ReduceMean_Op<2>> registrarReduceMeanImpl2D_cpu("cpu", Aidge::ReduceMeanImpl2D_cpu::create); +static Registrar<ReduceMean_Op<3>> registrarReduceMeanImpl3D_cpu("cpu", Aidge::ReduceMeanImpl3D_cpu::create); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..46eb61f2f03acd47d74725ade1425a92f028690c --- /dev/null +++ b/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp @@ -0,0 +1,132 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_FORWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_FORWARD_KERNEL_H_ + +#include <cstddef> +#include <algorithm> // std::copy, std::for_each +#include <numeric> //std::accumulate +#include <functional> //std::multiplies + +#include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp" +#include "aidge/data/Data.hpp" +#include "aidge/operator/ReduceMean.hpp" +#include "aidge/utils/Registrar.hpp" + +namespace Aidge { +template <class I, class O, DimSize_t DIM> +void ReduceMeanImpl_cpu_forward_kernel(const typename ReduceMean_Op<DIM>::Attrs& attrs, + const std::vector<DimSize_t>& inputDims, + const void* input_, + void* output_) { + + const I* input = static_cast<const I*>(input_); + O* output = static_cast<O*>(output_); + + const std::size_t nb_dims = inputDims.size(); + const std::size_t totalElements = std::accumulate(inputDims.cbegin(), inputDims.cend(), 1, std::multiplies<std::size_t>()); + + if (DIM == 1) { + const std::size_t stride_pre = std::accumulate(inputDims.cbegin(), inputDims.cbegin() + std::get<0>(attrs)[0], 1, std::multiplies<std::size_t>()); + const std::size_t stride_post = std::accumulate(inputDims.crbegin(), inputDims.crbegin() + nb_dims -1 - std::get<0>(attrs)[0], 1, std::multiplies<std::size_t>()); + + const std::size_t dim_i = inputDims[std::get<0>(attrs)[0]]; + for (std::size_t pre = 0; pre < stride_pre; ++pre) { + for (std::size_t post = 0; post < stride_post; ++post) { + const std::size_t idx_i = pre * dim_i * stride_post + post; + const std::size_t idx_o = pre * stride_post + post; + output[idx_o] = input[idx_i]; + for (std::size_t i = 1; i < dim_i; ++i) { + output[idx_o] += input[idx_i + i*stride_post]; + } + output[idx_o] /= dim_i; + } + } + } else { + std::size_t outputElements = totalElements; + + std::size_t *stride_post = new std::size_t[nb_dims]; + stride_post[nb_dims - 1] = 1; + for (std::size_t i = nb_dims-2; i != static_cast<std::size_t>(-1); --i) { + stride_post[i] = stride_post[i+1]*inputDims[i+1]; + } + std::size_t *stride_pre = new std::size_t[nb_dims]; + stride_pre[0] = 1; + for (std::size_t i = 1; i < nb_dims; ++i) { + stride_pre[i] = stride_pre[i-1]*inputDims[i-1]; + } + + const I* inputAccumulation = input; + I* outputAccumulation = nullptr; + + for (const auto& axisInt : std::get<0>(attrs)) { + const std::size_t a = static_cast<std::size_t>(axisInt); + outputElements /= inputDims[a]; + outputAccumulation = new I[outputElements]; + const std::size_t dim_i = inputDims[a]; + for (std::size_t pre = 0; pre < stride_pre[a]; ++pre) { + for (std::size_t post = 0; post < stride_post[a]; ++post) { + const std::size_t idx_i = pre * dim_i * stride_post[a] + post; + const std::size_t idx_o = pre * stride_post[a] + post; + outputAccumulation[idx_o] = inputAccumulation[idx_i]; + for (std::size_t i = 1; i < dim_i; ++i) { + outputAccumulation[idx_o] += inputAccumulation[idx_i + i*stride_post[a]]; + } + } + } + std::for_each(stride_pre+a+1, stride_pre+nb_dims, [dim_i] (std::size_t& val) { val /= dim_i; }); + if (inputAccumulation != input) { + delete[] inputAccumulation; + } + inputAccumulation = outputAccumulation; + } + + // Copy elements from inputAccumulation to output while dividing by divisor + I divisor = totalElements / outputElements; + std::transform(inputAccumulation, inputAccumulation + outputElements, output, + [divisor](int element) { return element / divisor; }); + if (outputAccumulation) { + delete[] outputAccumulation; + } + delete[] stride_post; + delete[] stride_pre; + } +} + +namespace { +// DIM = 1 +static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,1>); +static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,1>); +static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,1>); + +// DIM = 2 +static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,2>); +static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,2>); +static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,2>); + +// DIM = 3 +static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,3>); +static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,3>); +static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,3>); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_FORWARD_KERNEL_H_ */ diff --git a/include/aidge/backend/cpu/operator/ReshapeImpl.hpp b/include/aidge/backend/cpu/operator/ReshapeImpl.hpp new file mode 100644 index 0000000000000000000000000000000000000000..d5754b34e952d52b2071744e9f8e863074ef9fa3 --- /dev/null +++ b/include/aidge/backend/cpu/operator/ReshapeImpl.hpp @@ -0,0 +1,50 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_RESHAPEIMPL_H_ +#define AIDGE_CPU_OPERATOR_RESHAPEIMPL_H_ + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/operator/Reshape.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" +#include <memory> +#include <vector> + +namespace Aidge { +// class Reshape_Op; + +// compute kernel registry for forward and backward +class ReshapeImplForward_cpu + : public Registrable<ReshapeImplForward_cpu, std::tuple<DataType, DataType>, void(std::size_t, const void*, void*)> { +}; +class ReshapeImplBackward_cpu + : public Registrable<ReshapeImplBackward_cpu, std::tuple<DataType, DataType>, void(std::size_t, const void*, void*)> { +}; + +class ReshapeImpl_cpu : public OperatorImpl { +public: + ReshapeImpl_cpu(const Reshape_Op& op) : OperatorImpl(op) {} + + static std::unique_ptr<ReshapeImpl_cpu> create(const Reshape_Op& op) { + return std::make_unique<ReshapeImpl_cpu>(op); + } + + NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + void forward() override; +}; + +namespace { +static Registrar<Reshape_Op> registrarReshapeImpl_cpu("cpu", Aidge::ReshapeImpl_cpu::create); +} +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_RESHAPEIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/ReshapeImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ReshapeImpl_forward_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..cefdab57ee41ffab0b98a87698d95f5d89a0206d --- /dev/null +++ b/include/aidge/backend/cpu/operator/ReshapeImpl_forward_kernels.hpp @@ -0,0 +1,45 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_RESHAPEIMPL_FORWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_RESHAPEIMPL_FORWARD_KERNEL_H_ + +#include "aidge/utils/Registrar.hpp" +#include <cmath> + +#include "aidge/backend/cpu/operator/ReshapeImpl.hpp" + +namespace Aidge { +template <class I, class O> +void ReshapeImpl_cpu_forward_kernel(std::size_t inputLength, + const void* input_, + void* output_) { + + const I* input = static_cast<const I*>(input_); + O* output = static_cast<O*>(output_); + + std::copy_n(input, inputLength, output); +} + +namespace { +static Registrar<ReshapeImplForward_cpu> registrarReshapeImplForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, + Aidge::ReshapeImpl_cpu_forward_kernel<float, float>); +static Registrar<ReshapeImplForward_cpu> registrarReshapeImplForward_cpu_Int32( + {DataType::Int32, DataType::Int32}, + Aidge::ReshapeImpl_cpu_forward_kernel<int, int>); +static Registrar<ReshapeImplForward_cpu> registrarReshapeImplForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, + Aidge::ReshapeImpl_cpu_forward_kernel<double, double>); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_RESHAPEIMPL_FORWARD_KERNEL_H_ */ diff --git a/include/aidge/backend/cpu/operator/SliceImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/SliceImpl_forward_kernels.hpp index 9f08fab758a1d8c717ccb5f0a0357f94fd86e5e4..d92e9008aff2a4e3c9e392fcc51871001020ce5a 100644 --- a/include/aidge/backend/cpu/operator/SliceImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/SliceImpl_forward_kernels.hpp @@ -35,7 +35,7 @@ void SliceImpl_cpu_forward_kernel(const typename Slice_Op::Attrs& attrs, const std::int64_t axis_ = std::get<2>(attrs)[i]; const std::int64_t start_ = std::get<0>(attrs)[i]; const std::int64_t end_ = std::get<1>(attrs)[i]; - const std::size_t axis = axis_ >= 0 ? axis_ : static_cast<std::size_t>(axis_ + static_cast<std::int32_t>(inputDims.size())); + const std::size_t axis = axis_ >= 0 ? axis_ : static_cast<std::size_t>(axis_) + inputDims.size(); const std::size_t start = start_ >= 0 ? start_ : start_ + inputDims[axis]; const std::size_t end = end_ >= 0 ? end_ : end_ + inputDims[axis]; std::size_t stride = 1; diff --git a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp index 15fb2b5d30e32febca7c8028c8b5212e5b96775f..005b52f646f9e9ddf14af09cc22d9e2a44ba6dd4 100644 --- a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp +++ b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp @@ -25,10 +25,10 @@ namespace Aidge { // compute kernel registry for forward and backward class SoftmaxImplForward_cpu - : public Registrable<SoftmaxImplForward_cpu, std::tuple<DataType, DataType>, void(const DimSize_t, const DimSize_t, const DimSize_t, const void*, void*)> { + : public Registrable<SoftmaxImplForward_cpu, std::tuple<DataType, DataType>, void(std::size_t, const std::vector<DimSize_t>&, const void*, void*)> { }; class SoftmaxImplBackward_cpu - : public Registrable<SoftmaxImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> { + : public Registrable<SoftmaxImplBackward_cpu, std::tuple<DataType, DataType>, void(std::size_t, const std::vector<DimSize_t>&, const void*, void*)> { }; class SoftmaxImpl_cpu : public OperatorImpl { diff --git a/include/aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp index a5a168a08cf85e952cffd556e0cc34d29d35fffa..cc384c38e34d01887fc328d11de383aeef39fb8e 100644 --- a/include/aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp @@ -23,30 +23,33 @@ namespace Aidge { template <class I, class O> -void SoftmaxImpl_cpu_forward_kernel(const DimSize_t batchSize, - const DimSize_t channelSize, - const DimSize_t featureSize, - const void* input_, - void* output_) { - +void SoftmaxImpl_cpu_forward_kernel(std::size_t axisIdx, const std::vector<DimSize_t>& inputDims, const void* input_, void* output_) +{ const I* input = static_cast<const I*>(input_); O* output = static_cast<O*>(output_); - for (std::size_t batch = 0; batch < batchSize; ++batch) { - for (std::size_t feature = 0; feature < featureSize; ++feature) { - std::size_t ioIndex = batch*channelSize*featureSize + feature; + std::size_t postAxisElems = 1; + for (std::size_t i = axisIdx + 1; i < inputDims.size(); ++i) { + postAxisElems *= inputDims[i]; + } + std::size_t preAxisElems = 1; + for (std::size_t i = 0; i < axisIdx; ++i) { + preAxisElems *= inputDims[i]; + } - I sum(0.0); - for (std::size_t ch = 0; ch < channelSize; ++ch) { - output[ioIndex] = std::exp(input[ioIndex]); - sum += output[ioIndex]; - ioIndex+=featureSize; + for (std::size_t i = 0; i < preAxisElems; ++i) { + for (std::size_t j = 0; j < postAxisElems; ++j) { + // Calculate sum of exponentials within the axis + I sumExp = 0; + for (std::size_t k = 0; k < inputDims[axisIdx]; ++k) { + std::size_t inIdx = i * inputDims[axisIdx] * postAxisElems + k * postAxisElems + j; + sumExp += std::exp(input[inIdx]); } - ioIndex = batch*channelSize*featureSize + feature; - for (std::size_t ch = 0; ch < channelSize; ++ch) { - output[ioIndex] /= sum; - ioIndex += featureSize; + // Calculate softmax for the current slice along the axis + for (std::size_t k = 0; k < inputDims[axisIdx]; ++k) { + std::size_t inIdx = i * inputDims[axisIdx] * postAxisElems + k * postAxisElems + j; + output[inIdx] = std::exp(input[inIdx]) / sumExp; } } } diff --git a/include/aidge/backend/cpu/operator/TransposeImpl.hpp b/include/aidge/backend/cpu/operator/TransposeImpl.hpp new file mode 100644 index 0000000000000000000000000000000000000000..712e672752648f5ff8a3c073f6c81bbe7cc85d9d --- /dev/null +++ b/include/aidge/backend/cpu/operator/TransposeImpl.hpp @@ -0,0 +1,123 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_TransposeIMPL_H_ +#define AIDGE_CPU_OPERATOR_TransposeIMPL_H_ + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/operator/Transpose.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" +#include <memory> +#include <vector> + +namespace Aidge { +// class Transpose_Op; + +// compute kernel registry for forward and backward +class TransposeImpl2DForward_cpu + : public Registrable<TransposeImpl2DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<2>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> { +}; +class TransposeImpl3DForward_cpu + : public Registrable<TransposeImpl3DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<3>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> { +}; +class TransposeImpl4DForward_cpu + : public Registrable<TransposeImpl4DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<4>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> { +}; +class TransposeImpl5DForward_cpu + : public Registrable<TransposeImpl5DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<5>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> { +}; +class TransposeImpl6DForward_cpu + : public Registrable<TransposeImpl6DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<6>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> { +}; +class TransposeImpl2DBackward_cpu + : public Registrable<TransposeImpl2DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<2>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> { +}; +class TransposeImpl3DBackward_cpu + : public Registrable<TransposeImpl3DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<3>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> { +}; +class TransposeImpl4DBackward_cpu + : public Registrable<TransposeImpl4DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<4>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> { +}; +class TransposeImpl5DBackward_cpu + : public Registrable<TransposeImpl5DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<5>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> { +}; +class TransposeImpl6DBackward_cpu + : public Registrable<TransposeImpl6DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<6>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> { +}; + + +class TransposeImpl2D_cpu : public OperatorImpl { +public: + TransposeImpl2D_cpu(const Transpose_Op<2>& op) : OperatorImpl(op) {} + + static std::unique_ptr<TransposeImpl2D_cpu> create(const Transpose_Op<2>& op) { + return std::make_unique<TransposeImpl2D_cpu>(op); + } + + NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + void forward() override; +}; +class TransposeImpl3D_cpu : public OperatorImpl { +public: + TransposeImpl3D_cpu(const Transpose_Op<3>& op) : OperatorImpl(op) {} + + static std::unique_ptr<TransposeImpl3D_cpu> create(const Transpose_Op<3>& op) { + return std::make_unique<TransposeImpl3D_cpu>(op); + } + + NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + void forward() override; +}; +class TransposeImpl4D_cpu : public OperatorImpl { +public: + TransposeImpl4D_cpu(const Transpose_Op<4>& op) : OperatorImpl(op) {} + + static std::unique_ptr<TransposeImpl4D_cpu> create(const Transpose_Op<4>& op) { + return std::make_unique<TransposeImpl4D_cpu>(op); + } + + NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + void forward() override; +}; +class TransposeImpl5D_cpu : public OperatorImpl { +public: + TransposeImpl5D_cpu(const Transpose_Op<5>& op) : OperatorImpl(op) {} + + static std::unique_ptr<TransposeImpl5D_cpu> create(const Transpose_Op<5>& op) { + return std::make_unique<TransposeImpl5D_cpu>(op); + } + + NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + void forward() override; +}; +class TransposeImpl6D_cpu : public OperatorImpl { +public: + TransposeImpl6D_cpu(const Transpose_Op<6>& op) : OperatorImpl(op) {} + + static std::unique_ptr<TransposeImpl6D_cpu> create(const Transpose_Op<6>& op) { + return std::make_unique<TransposeImpl6D_cpu>(op); + } + + NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + void forward() override; +}; + +namespace { +static Registrar<Transpose_Op<2>> registrarTransposeImpl2D_cpu("cpu", Aidge::TransposeImpl2D_cpu::create); +static Registrar<Transpose_Op<3>> registrarTransposeImpl3D_cpu("cpu", Aidge::TransposeImpl3D_cpu::create); +static Registrar<Transpose_Op<4>> registrarTransposeImpl4D_cpu("cpu", Aidge::TransposeImpl4D_cpu::create); +static Registrar<Transpose_Op<5>> registrarTransposeImpl5D_cpu("cpu", Aidge::TransposeImpl5D_cpu::create); +static Registrar<Transpose_Op<6>> registrarTransposeImpl6D_cpu("cpu", Aidge::TransposeImpl6D_cpu::create); +} +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_TransposeIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/TransposeImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/TransposeImpl_forward_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..9fd5e5b58ed8e850c0a902e2de93b65cc75d274a --- /dev/null +++ b/include/aidge/backend/cpu/operator/TransposeImpl_forward_kernels.hpp @@ -0,0 +1,110 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_TRANSPOSEIMPL_FORWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_TRANSPOSEIMPL_FORWARD_KERNEL_H_ + +#include "aidge/utils/Registrar.hpp" +#include <cstddef> +#include <cmath> +#include "aidge/data/Data.hpp" +#include "aidge/utils/Types.h" + +#include "aidge/backend/cpu/operator/TransposeImpl.hpp" + +namespace Aidge { +template <class I, class O, DimSize_t DIM> +void TransposeImpl_cpu_forward_kernel( const typename Transpose_Op<DIM>::Attrs& attrs, const std::vector<DimSize_t>& inputDims, const std::vector<DimSize_t>& outputDims, const void* input_, void* output_) +{ + O* output = static_cast<O*>(output_); + const I* input = static_cast<const I*>(input_); + + // Compute total number of elements in the input array + size_t totalElements = 1; + for (size_t dimSize : inputDims) { + totalElements *= dimSize; + } + + std::vector<std::size_t> outStrides(DIM, 1); + for (size_t i = 0; i < DIM; ++i) { + for (size_t j = i+1; j < DIM; ++j) + { + outStrides[i] *= outputDims[j]; + } + } + + std::vector<size_t> indices(outputDims.size(), 0); + for (size_t i = 0; i < totalElements; ++i) { + size_t idx = 0; + // Permute indices based on OutputDimsOrder attr + std::vector<size_t> permutedIndices(DIM); + for (size_t j = 0; j < DIM; ++j) { + permutedIndices[j] = indices[std::get<0>(attrs)[j]]; + } + + for (int j = DIM -1; j >=0; --j) { + idx += permutedIndices[j] * outStrides[j]; + } + // Copy the value in output + output[idx] = input[i]; + + // Update indices for the next iteration + for (int j = DIM - 1; j >= 0; --j) { + if (indices[j] < inputDims[j] - 1) { + indices[j]++; + break; + } else { + indices[j] = 0; + } + } + } + +} +namespace { +// DIM = 2 +static Registrar<TransposeImpl2DForward_cpu> registrarTransposeImpl2DForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::TransposeImpl_cpu_forward_kernel<float, float, 2>); +static Registrar<TransposeImpl2DForward_cpu> registrarTransposeImpl2DForward_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::TransposeImpl_cpu_forward_kernel<int, int, 2>); +static Registrar<TransposeImpl2DForward_cpu> registrarTransposeImpl2DForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::TransposeImpl_cpu_forward_kernel<double, double, 2>); +// DIM = 3 +static Registrar<TransposeImpl3DForward_cpu> registrarTransposeImpl3DForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::TransposeImpl_cpu_forward_kernel<float, float, 3>); +static Registrar<TransposeImpl3DForward_cpu> registrarTransposeImpl3DForward_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::TransposeImpl_cpu_forward_kernel<int, int, 3>); +static Registrar<TransposeImpl3DForward_cpu> registrarTransposeImpl3DForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::TransposeImpl_cpu_forward_kernel<double, double, 3>); +// DIM = 4 +static Registrar<TransposeImpl4DForward_cpu> registrarTransposeImpl4DForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::TransposeImpl_cpu_forward_kernel<float, float, 4>); +static Registrar<TransposeImpl4DForward_cpu> registrarTransposeImpl4DForward_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::TransposeImpl_cpu_forward_kernel<int, int, 4>); +static Registrar<TransposeImpl4DForward_cpu> registrarTransposeImpl4DForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::TransposeImpl_cpu_forward_kernel<double, double, 4>); +// DIM = 5 +static Registrar<TransposeImpl5DForward_cpu> registrarTransposeImpl5DForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::TransposeImpl_cpu_forward_kernel<float, float, 5>); +static Registrar<TransposeImpl5DForward_cpu> registrarTransposeImpl5DForward_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::TransposeImpl_cpu_forward_kernel<int, int, 5>); +static Registrar<TransposeImpl5DForward_cpu> registrarTransposeImpl5DForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::TransposeImpl_cpu_forward_kernel<double, double, 5>); +// DIM = 6 +static Registrar<TransposeImpl6DForward_cpu> registrarTransposeImpl6DForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::TransposeImpl_cpu_forward_kernel<float, float, 6>); +static Registrar<TransposeImpl6DForward_cpu> registrarTransposeImpl6DForward_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::TransposeImpl_cpu_forward_kernel<int, int, 6>); +static Registrar<TransposeImpl6DForward_cpu> registrarTransposeImpl6DForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::TransposeImpl_cpu_forward_kernel<double, double, 6>); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_TRANSPOSEIMPL_FORWARD_KERNEL_H_ */ diff --git a/src/operator/ErfImpl.cpp b/src/operator/ErfImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..06ec65008aee41215192cd05e126ac4f82388c1b --- /dev/null +++ b/src/operator/ErfImpl.cpp @@ -0,0 +1,40 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <cassert> +#include <chrono> // std::chrono::milliseconds +#include <numeric> // std::accumulate +#include <thread> // std::this_thread::sleep_for +#include <vector> + +#include "aidge/operator/Erf.hpp" +#include "aidge/utils/Types.h" + +#include "aidge/backend/cpu/operator/ErfImpl.hpp" +#include "aidge/backend/cpu/operator/ErfImpl_forward_kernels.hpp" + +Aidge::NbElts_t Aidge::ErfImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { + // this implementation can be in-place + return 0; +} + +void Aidge::ErfImpl_cpu::forward() { + + // Find the correct kernel type + auto kernelFunc = Registrar<ErfImplForward_cpu>::create({ + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + + // Call kernel + kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +} diff --git a/src/operator/GatherImpl.cpp b/src/operator/GatherImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ce98627d95e0d05541db1ccaf4896abe756431b0 --- /dev/null +++ b/src/operator/GatherImpl.cpp @@ -0,0 +1,40 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <cassert> +#include <chrono> // std::chrono::milliseconds +#include <numeric> // std::accumulate +#include <thread> // std::this_thread::sleep_for +#include <vector> + +#include "aidge/operator/Gather.hpp" +#include "aidge/utils/Types.h" + +#include "aidge/backend/cpu/operator/GatherImpl.hpp" +#include "aidge/backend/cpu/operator/GatherImpl_forward_kernels.hpp" + +Aidge::NbElts_t Aidge::GatherImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { + // this implementation can be in-place + return 0; +} + +void Aidge::GatherImpl_cpu::forward() { + + auto kernelFunc = Registrar<GatherImplForward_cpu>::create({ + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + + // Call kernel + kernelFunc(dynamic_cast<const Gather_Op&>(mOp).getStaticAttributes(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +} diff --git a/src/operator/MatMulImpl.cpp b/src/operator/MatMulImpl.cpp index f02effb3172e2c0624c6c7532513a2b794ee3a89..488af17617d556ad7a9d9b73909324d67a672459 100644 --- a/src/operator/MatMulImpl.cpp +++ b/src/operator/MatMulImpl.cpp @@ -9,15 +9,14 @@ * ********************************************************************************/ -#include <cassert> -#include <chrono> // std::chrono::milliseconds -#include <numeric> // std::accumulate -#include <thread> // std::this_thread::sleep_for +#include <cstddef> // std::size_t +#include <cstdint> // std::int32_t +#include <numeric> // std::accumulate #include <vector> +#include "aidge/backend/cpu/data/GetCPUPtr.h" #include "aidge/operator/MatMul.hpp" #include "aidge/utils/Types.h" -#include "aidge/backend/cpu/data/GetCPUPtr.h" #include "aidge/backend/cpu/operator/MatMulImpl.hpp" #include "aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp" @@ -30,27 +29,110 @@ void Aidge::MatMulImpl_cpu::forward() // Find the correct kernel type auto kernelFunc = Registrar<MatMulImplForward_cpu>::create( {std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(), std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); - // Call kernel - // if (mOp.getInput(0)->nbDims() == 4) { - // kernelFunc( - // mOp.getStaticAttributes(), - // std::static_pointer_cast<Tensor>(mOp.getInput(0))->template dims<4>(), - // mOp.getInput(0))->getImpl()->rawPtr(), - // mOp.mInputs[1]->getImpl()->rawPtr(), - // mOp.mInputs[2]->getImpl()->rawPtr(), - // getCPUPtr(mOp.getRawOutput(0)); - // } - // else - kernelFunc( - dynamic_cast<const MatMul_Op&>(mOp).getStaticAttributes(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[0], - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size() / std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[0], - getCPUPtr(mOp.getRawInput(0)), - getCPUPtr(mOp.getRawInput(1)), - getCPUPtr(mOp.getRawOutput(0))); + // Compute compatible input dimensions + std::vector<std::size_t> dims0 = static_cast<const MatMul_Op&>(mOp).getInput(0)->dims(); + std::vector<std::size_t> dims1 = static_cast<const MatMul_Op&>(mOp).getInput(1)->dims(); + + // keep second-to-last dimension of dims0 + const std::size_t keepDim0 = (dims0.size() > 1) ? 1 : 0; + // keep last dimension of dims1 + const std::size_t keepDim1 = (dims1.size() > 1) ? 1 : 0; + + if (dims0.size() == 1) { + dims0.insert(dims0.cbegin(), 1); + } + if (dims1.size() == 1) { + dims1.push_back(1); + } + + if (dims0.size() > dims1.size()) { + dims1.insert(dims1.cbegin(), dims0.size() - dims1.size(), std::size_t(1)); + } + else if (dims1.size() > dims0.size()) { + dims0.insert(dims0.cbegin(), dims1.size() - dims0.size(), std::size_t(1)); + } + // const std::size_t dims_size = std::max(dims0.size(), dims1.size()); + // at this point, dims0.size() == dims1.size() + const std::size_t nbDims = dims0.size(); + // initialize strides to iterate through data because of broadcasting + std::size_t *stride_post0; + std::size_t *stride_post1; + std::int32_t *stride_step0; + std::int32_t *stride_step1; + if (nbDims > 2) { + stride_post0 = new std::size_t[nbDims-2]; + stride_post0[nbDims - 3] = 1; + stride_post1 = new std::size_t[nbDims-2]; + stride_post1[nbDims - 3] = 1; + for (std::size_t i = nbDims-4; i != static_cast<std::size_t>(-1); --i) { + stride_post0[i] = stride_post0[i+1]*dims0[i+1]; + stride_post1[i] = stride_post1[i+1]*dims1[i+1]; + } + stride_step0 = new std::int32_t[nbDims-2]; + stride_step1 = new std::int32_t[nbDims-2]; + for (std::size_t i = 0; i != nbDims-2; ++i) { + stride_step0[i] = (dims0[i] == 1) ? 1 - static_cast<std::int32_t>(stride_post0[i]) : 1; + stride_step1[i] = (dims1[i] == 1) ? 1 - static_cast<std::int32_t>(stride_post1[i]) : 1; + } + } + + const std::vector<std::size_t>& outDims = static_cast<const MatMul_Op&>(mOp).getOutput(0)->dims(); + const std::size_t nbMatrices = std::accumulate(outDims.cbegin(), outDims.cend() - keepDim0 - keepDim1, 1, std::multiplies<std::size_t>()); + std::size_t dim = outDims.size() - 1 - keepDim0 - keepDim1; + + // variables for arrays offsets + std::size_t offsetIn0 = 0; + std::size_t offsetIn1 = 0; + std::size_t offsetOut = 0; + const std::size_t n = dims0[nbDims - 2]; + const std::size_t k = dims0[nbDims - 1]; + const std::size_t m = dims1[nbDims - 1]; + const std::size_t matrix0Size = n*k; + const std::size_t matrix1Size = k*m; + const std::size_t matrixOutSize = n*m; + for (std::size_t stack = 0; stack < nbMatrices;) { + kernelFunc(n, k, m, + getCPUPtr(mOp.getRawInput(0), offsetIn0*matrix0Size), + getCPUPtr(mOp.getRawInput(1), offsetIn1*matrix1Size), + getCPUPtr(mOp.getRawOutput(0), offsetOut*matrixOutSize)); + if (++stack < nbMatrices) { + std::size_t tmp_stack = stack; + while(tmp_stack % outDims[dim] == 0) { + tmp_stack /= outDims[dim]; + dim--; + } + offsetIn0 += stride_step0[dim]; + offsetIn1 += stride_step1[dim]; + ++offsetOut; + dim = outDims.size() - 1 - keepDim0 - keepDim1; + } + } + if (nbDims > 2) { + delete[] stride_post0; + delete[] stride_post1; + delete[] stride_step0; + delete[] stride_step1; + } } + +// void Aidge::MatMulImpl_cpu::forward() +// { +// assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0"); +// assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(1)) && "missing input #1"); + +// // Find the correct kernel type +// auto kernelFunc = Registrar<MatMulImplForward_cpu>::create( +// {std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), +// std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + +// kernelFunc( +// std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), +// std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dims(), +// getCPUPtr(mOp.getRawInput(0)), +// getCPUPtr(mOp.getRawInput(1)), +// getCPUPtr(mOp.getRawOutput(0))); +// } diff --git a/src/operator/ReduceMeanImpl.cpp b/src/operator/ReduceMeanImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e31a53d84947e5b2ced14ee9ee6e2badaef07071 --- /dev/null +++ b/src/operator/ReduceMeanImpl.cpp @@ -0,0 +1,79 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <cassert> +#include <chrono> // std::chrono::milliseconds +#include <numeric> // std::accumulate +#include <thread> // std::this_thread::sleep_for +#include <vector> + +#include "aidge/utils/Types.h" +#include "aidge/operator/ReduceMean.hpp" + +#include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp" +#include "aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp" +Aidge::NbElts_t Aidge::ReduceMeanImpl1D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { + // this implementation can be in-place + return 0; +} +Aidge::NbElts_t Aidge::ReduceMeanImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { + // this implementation can be in-place + return 0; +} +Aidge::NbElts_t Aidge::ReduceMeanImpl3D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { + // this implementation can be in-place + return 0; +} + +void Aidge::ReduceMeanImpl1D_cpu::forward() { + + // Find the correct kernel type + auto kernelFunc = + Registrar<ReduceMeanImpl1DForward_cpu>::create({ + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + + // Call kernel + kernelFunc(dynamic_cast<const ReduceMean_Op<1>&>(mOp).getStaticAttributes(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +} + +void Aidge::ReduceMeanImpl2D_cpu::forward() { + + // Find the correct kernel type + auto kernelFunc = + Registrar<ReduceMeanImpl2DForward_cpu>::create({ + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + + // Call kernel + kernelFunc(dynamic_cast<const ReduceMean_Op<2>&>(mOp).getStaticAttributes(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +} + +void Aidge::ReduceMeanImpl3D_cpu::forward() { + + // Find the correct kernel type + auto kernelFunc = + Registrar<ReduceMeanImpl3DForward_cpu>::create({ + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + + // Call kernel + kernelFunc(dynamic_cast<const ReduceMean_Op<3>&>(mOp).getStaticAttributes(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +} \ No newline at end of file diff --git a/src/operator/ReshapeImpl.cpp b/src/operator/ReshapeImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..02dea1da3d4422abf37b62193bba83e83c87a83f --- /dev/null +++ b/src/operator/ReshapeImpl.cpp @@ -0,0 +1,39 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <cassert> + +#include "aidge/operator/Reshape.hpp" +#include "aidge/utils/Types.h" + +#include "aidge/backend/cpu/operator/ReshapeImpl.hpp" +#include "aidge/backend/cpu/operator/ReshapeImpl_forward_kernels.hpp" + +Aidge::NbElts_t Aidge::ReshapeImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { + // this implementation can be in-place + return 0; +} + +void Aidge::ReshapeImpl_cpu::forward() { + assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size() == + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->size() + && "input must have the same overall size as shape"); + + // Find the correct kernel type + auto kernelFunc = Registrar<ReshapeImplForward_cpu>::create({ + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + + // Call kernel + kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +} diff --git a/src/operator/SliceImpl.cpp b/src/operator/SliceImpl.cpp index b60bbe60188f416f28ff2562875dce6e5ee15bd5..32d31f046465425a269d6f8e3fc52eaad31c663a 100644 --- a/src/operator/SliceImpl.cpp +++ b/src/operator/SliceImpl.cpp @@ -79,4 +79,4 @@ void Aidge::SliceImpl_cpu::forward() { mNbProducedData[0] += getRequiredMemory(0, {}); } -void Aidge::SliceImpl_cpu::backward() { printf("Not implemented yet.\n"); } \ No newline at end of file +void Aidge::SliceImpl_cpu::backward() { printf("Not implemented yet.\n"); } diff --git a/src/operator/SoftmaxImpl.cpp b/src/operator/SoftmaxImpl.cpp index c3086d8f9067996b9b0a8546b6deb3e281c777b4..5f5d7411b7bb28ae28480b39c8bfdf5674f877ed 100644 --- a/src/operator/SoftmaxImpl.cpp +++ b/src/operator/SoftmaxImpl.cpp @@ -36,13 +36,12 @@ void Aidge::SoftmaxImpl_cpu::forward() { std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); - DimSize_t batchSize = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[0]; - DimSize_t channelSize = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[1]; - DimSize_t featureSize = (std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size()/batchSize)/channelSize; + Softmax_Op::Attrs attr = dynamic_cast<const Softmax_Op&>(mOp).getStaticAttributes(); + const int& axisIdx = static_cast<const int&>(std::get<0>(attr)); + // Call kernel - kernelFunc(batchSize, - channelSize, - featureSize, - getCPUPtr(mOp.getRawInput(0)), - getCPUPtr(mOp.getRawOutput(0))); + kernelFunc(axisIdx, + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); } diff --git a/src/operator/TransposeImpl.cpp b/src/operator/TransposeImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1fc4458ccb85e4776228a2bf9e1c73589c201a35 --- /dev/null +++ b/src/operator/TransposeImpl.cpp @@ -0,0 +1,123 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <cassert> +#include <chrono> // std::chrono::milliseconds +#include <numeric> // std::accumulate +#include <thread> // std::this_thread::sleep_for +#include <vector> + +#include "aidge/utils/Types.h" +#include "aidge/operator/Transpose.hpp" + +#include "aidge/backend/cpu/operator/TransposeImpl.hpp" +#include "aidge/backend/cpu/operator/TransposeImpl_forward_kernels.hpp" + +Aidge::NbElts_t Aidge::TransposeImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { + // this implementation can be in-place + return 0; +} +Aidge::NbElts_t Aidge::TransposeImpl3D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { + // this implementation can be in-place + return 0; +} +Aidge::NbElts_t Aidge::TransposeImpl4D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { + // this implementation can be in-place + return 0; +} +Aidge::NbElts_t Aidge::TransposeImpl5D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { + // this implementation can be in-place + return 0; +} +Aidge::NbElts_t Aidge::TransposeImpl6D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { + // this implementation can be in-place + return 0; +} + +void Aidge::TransposeImpl2D_cpu::forward() { + // Find the correct kernel type + auto kernelFunc = + Registrar<TransposeImpl2DForward_cpu>::create({ + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + + // auto attr = dynamic_cast<const Transpose_Op<2>&>(mOp).getStaticAttributes(); + // std::vector<DimIdx_t> outDimsOrder; + // outDimsOrder.reserve(std::get<0>(attr).size()); // Reserve space for the new vector + + // std::transform(std::get<0>(attr).begin(), std::get<0>(attr).end(), std::back_inserter(outDimsOrder), + // [](int intValue) { return static_cast<DimIdx_t>(intValue); }); + + // Call kernel + kernelFunc(dynamic_cast<const Transpose_Op<2>&>(mOp).getStaticAttributes(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +} + +void Aidge::TransposeImpl3D_cpu::forward() { + // Find the correct kernel type + auto kernelFunc = + Registrar<TransposeImpl3DForward_cpu>::create({ + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + + // Call kernel + kernelFunc(dynamic_cast<const Transpose_Op<3>&>(mOp).getStaticAttributes(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +} + +void Aidge::TransposeImpl4D_cpu::forward() { + // Find the correct kernel type + auto kernelFunc = + Registrar<TransposeImpl4DForward_cpu>::create({ + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + + // Call kernel + kernelFunc(dynamic_cast<const Transpose_Op<4>&>(mOp).getStaticAttributes(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +} +void Aidge::TransposeImpl5D_cpu::forward() { + // Find the correct kernel type + auto kernelFunc = + Registrar<TransposeImpl5DForward_cpu>::create({ + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + + // Call kernel + kernelFunc(dynamic_cast<const Transpose_Op<5>&>(mOp).getStaticAttributes(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +} +void Aidge::TransposeImpl6D_cpu::forward() { + // Find the correct kernel type + auto kernelFunc = + Registrar<TransposeImpl6DForward_cpu>::create({ + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + + // Call kernel + kernelFunc(dynamic_cast<const Transpose_Op<6>&>(mOp).getStaticAttributes(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +} \ No newline at end of file diff --git a/unit_tests/data/Test_TensorImpl.cpp b/unit_tests/data/Test_TensorImpl.cpp deleted file mode 100644 index cfcfb45e3735538c1650cfd990ea85e2333916ad..0000000000000000000000000000000000000000 --- a/unit_tests/data/Test_TensorImpl.cpp +++ /dev/null @@ -1,100 +0,0 @@ -/******************************************************************************** - * Copyright (c) 2023 CEA-List - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License 2.0 which is available at - * http://www.eclipse.org/legal/epl-2.0. - * - * SPDX-License-Identifier: EPL-2.0 - * - ********************************************************************************/ - -#include <array> - -#include <catch2/catch_test_macros.hpp> - -#include "aidge/data/Tensor.hpp" -#include "aidge/utils/TensorUtils.hpp" -#include "aidge/backend/cpu/data/TensorImpl.hpp" - -using namespace Aidge; - -TEST_CASE("Tensor creation") { - SECTION("from const array") { - Tensor x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}; - - Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}; - - Tensor xFloat = - Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}}; - - SECTION("Tensor features") { - REQUIRE(x.nbDims() == 3); - REQUIRE(x.dims()[0] == 2); - REQUIRE(x.dims()[1] == 2); - REQUIRE(x.dims()[2] == 2); - REQUIRE(x.size() == 8); - } - - SECTION("Access to array") { - REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1); - REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8); - } - - SECTION("get function") { - REQUIRE(x.get<int>({0, 0, 0}) == 1); - REQUIRE(x.get<int>({0, 0, 1}) == 2); - REQUIRE(x.get<int>({0, 1, 1}) == 4); - REQUIRE(x.get<int>({1, 1, 0}) == 7); - x.set<int>({1, 1, 1}, 36); - REQUIRE(x.get<int>({1, 1, 1}) == 36); - } - - SECTION("Pretty printing for debug") { REQUIRE_NOTHROW(x.print()); } - - SECTION("Tensor (in)equality") { - REQUIRE(x == xCopy); - REQUIRE_FALSE(x == xFloat); - } - } -} - -TEST_CASE("Tensor methods") { - Tensor x = Array3D<int, 2, 2, 2>{{ - {{1, 2}, - {3, 4}}, - {{5, 6}, - {7, 8}} - }}; - - Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}; - - Tensor xFloat = - Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}}; - - SECTION("Tensor sharing") { - Tensor xCopyCtor(x); - REQUIRE(xCopyCtor.getImpl() == x.getImpl()); - - Tensor xEqOp = x; - REQUIRE(xEqOp.getImpl() == x.getImpl()); - - Tensor xCloned = x.clone(); - REQUIRE(xCloned.getImpl() != x.getImpl()); - REQUIRE(xCloned == x); - } - - SECTION("Tensor extract") { - Tensor y = x.extract({0, 1}); - REQUIRE(y.getImpl() == x.getImpl()); - REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}})); - REQUIRE(y.isContiguous()); - - Tensor y2 = x.extract({0, 1, 1}, {2, 1, 1}); - REQUIRE(y2.getImpl() == x.getImpl()); - REQUIRE(!y2.isContiguous()); - Tensor y3 = y2.clone(); - REQUIRE(y3.isContiguous()); - REQUIRE(approxEq<int>(y3, Array3D<int, 2, 1, 1>{{{{4}}, {{8}}}})); - } -} diff --git a/unit_tests/operator/Test_ErfImpl.cpp b/unit_tests/operator/Test_ErfImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..db2ae0437742d1cd1b298d62f5bdd7241b755ec4 --- /dev/null +++ b/unit_tests/operator/Test_ErfImpl.cpp @@ -0,0 +1,90 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <catch2/catch_test_macros.hpp> + +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/Erf.hpp" + +#include "aidge/backend/cpu.hpp" + +#include <memory> + + +using namespace Aidge; + +TEST_CASE("[cpu/operator] Erf(forward)") { + SECTION("1D Tensor") { + std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<float,10> { + {0.41384590, 0.43120754, 0.93762982, 0.31049860, 0.77547199, 0.09514862, + 0.16145366, 0.42776686, 0.43487436, 0.41170865} + }); + std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<float,10> { + {0.44163144, 0.45801866, 0.81516320, 0.33941913, 0.72722000, 0.10704061, + 0.18061027, 0.45479023, 0.46144873, 0.43959764} + }); + + std::shared_ptr<Node> myErf = Erf(); + auto op = std::static_pointer_cast<OperatorTensor>(myErf -> getOperator()); + op->associateInput(0,input0); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + myErf->forward(); + + float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); + float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr()); + for (std::size_t i = 0; i< expectedOutput->size(); ++i) { + REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001); + } + } + + SECTION("3D Tensor") { + std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array3D<float,2,2,3> { + { + { + {0.97037154, 0.86208081, 0.77767169}, + {0.38160080, 0.11422747, 0.77284443}, + }, + { + {0.51592529, 0.72543722, 0.54641193}, + {0.93866944, 0.97767913, 0.34172094} + } + } + }); + std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<float,2,2,3> { + { + { + {0.83003384, 0.77721894, 0.72857803}, + {0.41057193, 0.12833349, 0.72559172}, + }, + { + {0.53438270, 0.69507217, 0.56032562}, + {0.81564975, 0.83322692, 0.37109339} + } + } + }); + + std::shared_ptr<Node> myErf = Erf(); + auto op = std::static_pointer_cast<OperatorTensor>(myErf -> getOperator()); + op->associateInput(0,input0); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + myErf->forward(); + + float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); + float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr()); + for (std::size_t i = 0; i< expectedOutput->size(); ++i) { + REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001); + } + } +} \ No newline at end of file diff --git a/unit_tests/operator/Test_GatherImpl.cpp b/unit_tests/operator/Test_GatherImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a8345917ab0a141065e86638c09b2689902679ec --- /dev/null +++ b/unit_tests/operator/Test_GatherImpl.cpp @@ -0,0 +1,100 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <catch2/catch_test_macros.hpp> + +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/Gather.hpp" + +#include "aidge/backend/cpu.hpp" + +#include <memory> + + +using namespace Aidge; + +TEST_CASE("[cpu/operator] Gather(forward)") { + SECTION("2D Tensor axis 0") { + std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<int,3,3> { + { + {1, 2, 3}, + {4, 5, 6}, + {7, 8, 9} + } + }); + std::shared_ptr<Tensor> indexes = std::make_shared<Tensor>(Array2D<int,1,2> { + { + {1, 2} + } + }); + std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,1,2,3> { + { + { + {4, 5, 6}, + {7, 8, 9} + } + } + }); + + std::shared_ptr<Node> myGather = Gather({1, 2}, {1, 2}, 0); + auto op = std::static_pointer_cast<OperatorTensor>(myGather -> getOperator()); + op->associateInput(0,input); + // op->associateInput(1,indexes); + op->setDataType(DataType::Int32); + op->setBackend("cpu"); + op->computeOutputDims(); + myGather->forward(); + op->getOutput(0)->print(); + expectedOutput->print(); + + REQUIRE(*(op->getOutput(0)) == *expectedOutput); + + } + SECTION("2D Tensor axis 1") { + std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<int,3,3> { + { + {1, 2, 3}, + {4, 5, 6}, + {7, 8, 9} + } + }); + std::shared_ptr<Tensor> indexes = std::make_shared<Tensor>(Array2D<int,1,2> { + { + {0, 2} + } + }); + std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,3,1,2> { + { + { + {1, 3} + }, + { + {4, 6} + }, + { + {7, 9} + } + } + }); + + std::shared_ptr<Node> myGather = Gather({0, 2}, {1, 2}, 1); + auto op = std::static_pointer_cast<OperatorTensor>(myGather -> getOperator()); + op->associateInput(0,input); + // op->associateInput(1,indexes); + op->setDataType(DataType::Int32); + op->setBackend("cpu"); + op->computeOutputDims(); + myGather->forward(); + + REQUIRE(*(op->getOutput(0)) == *expectedOutput); + + } +} \ No newline at end of file diff --git a/unit_tests/operator/Test_MatMulImpl.cpp b/unit_tests/operator/Test_MatMulImpl.cpp index 1edb915fb78e3e056f455ddecb8e704eee068cd9..5df0528b5d24be04b324cd05d1f964a57c35b3ea 100644 --- a/unit_tests/operator/Test_MatMulImpl.cpp +++ b/unit_tests/operator/Test_MatMulImpl.cpp @@ -10,102 +10,281 @@ ********************************************************************************/ #include <catch2/catch_test_macros.hpp> +#include <cstddef> // std::size_t +#include <cstdint> // std::uint16_t +#include <chrono> +#include <iostream> #include <memory> +#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution #include "aidge/data/Tensor.hpp" #include "aidge/operator/MatMul.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/TensorUtils.hpp" #include "aidge/backend/cpu/operator/MatMulImpl.hpp" -using namespace Aidge; +namespace Aidge { TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul][CPU]") { - // Test MatMul forward with batch size = 2 and feature size = 75 - std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array2D<int, 5, 75>{ - {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, - 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, - 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, - 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, - 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, - 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}}}); - std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array2D<int, 2, 5>{ - {{23600, 23600, 23600, 23600, 23600}, {68600, 68600, 68600, 68600, 68600}}}); - - std::shared_ptr<Node> myMatMul = MatMul(75, 5, "mymatmul"); + const std::uint16_t NBTRIALS = 10; + // Create a random number generator + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution<float> dis(0.0, 1.0); // Random float distribution between 0 and 1 + std::uniform_int_distribution<std::size_t> distDims(10, 100); + std::uniform_int_distribution<std::size_t> distNbMatrix(1, 5); + + // Create MatMul Operator + std::shared_ptr<Node> myMatMul = MatMul(); auto op = std::static_pointer_cast<OperatorTensor>(myMatMul -> getOperator()); - op->associateInput(1, myWeights); - - SECTION("2D input") { - std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array2D<int, 2, 75>{ - {{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, - 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74}, - {75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, - 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, - 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, - 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149}}}); - op->associateInput(0, myInput); - op->setDataType(DataType::Int32); - op->setBackend("cpu"); - op->computeOutputDims(); - myMatMul->forward(); - REQUIRE(*(op->getOutput(0)) == *myOutput); + + // To measure execution time of 'MatMul_Op::forward()' member function call + std::chrono::time_point<std::chrono::system_clock> start; + std::chrono::time_point<std::chrono::system_clock> end; + std::chrono::duration<double, std::micro> duration; + + SECTION("2-D Tensors") { + std::size_t totalComputation = 0; + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // generate Tensors dimensions + const std::size_t dim0 = distDims(gen); + const std::size_t dim1 = distDims(gen); + const std::size_t dim2 = distDims(gen); + totalComputation += dim0*dim1*dim2; + + // Create and populate the array with random float values + float bigArray1[dim0][dim1]; + for (int i = 0; i < dim0; ++i) { + for (int j = 0; j < dim1; ++j) { + bigArray1[i][j] = dis(gen); // Generate random float value + } + } + float bigArray2[dim1][dim2]; + for (int i = 0; i < dim1; ++i) { + for (int j = 0; j < dim2; ++j) { + bigArray2[i][j] = dis(gen); // Generate random float value + } + } + float res[dim0][dim2]; + for (int i = 0; i < dim0; ++i) { + for (int j = 0; j < dim2; ++j) { + float sum = 0.0; + for (int k = 0; k < dim1; ++k) { + sum += bigArray1[i][k] * bigArray2[k][j]; + } + res[i][j] = sum; + } + } + + + // Convert bigArray1 to Tensor + std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(DataType::Float32); + T1 -> resize({dim0,dim1}); + T1 -> setBackend("cpu"); + T1 -> getImpl() -> setRawPtr(&bigArray1[0][0], dim0*dim1); + // Convert bigArray2 to Tensor + std::shared_ptr<Tensor> T2 = std::make_shared<Tensor>(DataType::Float32); + T2 -> resize({dim1,dim2}); + T2 -> setBackend("cpu"); + T2 -> getImpl() -> setRawPtr(&bigArray2[0][0], dim1*dim2); + // convert res to Tensor + std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>(DataType::Float32); + Tres -> resize({dim0,dim2}); + Tres -> setBackend("cpu"); + Tres -> getImpl() -> setRawPtr(&res[0][0], dim0*dim2); + + op->associateInput(0, T1); + op->associateInput(1, T2); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + start = std::chrono::system_clock::now(); + myMatMul->forward(); + end = std::chrono::system_clock::now(); + duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); + + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); + } + std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl; + std::cout << "total time: " << duration.count() << std::endl; } - SECTION("4D input") { - std::shared_ptr<Tensor> myInput = - std::make_shared<Tensor>(Array4D<int, 2, 3, 5, 5>{{{{{0, 1, 2, 3, 4}, - {5, 6, 7, 8, 9}, - {10, 11, 12, 13, 14}, - {15, 16, 17, 18, 19}, - {20, 21, 22, 23, 24}}, - {{25, 26, 27, 28, 29}, - {30, 31, 32, 33, 34}, - {35, 36, 37, 38, 39}, - {40, 41, 42, 43, 44}, - {45, 46, 47, 48, 49}}, - {{50, 51, 52, 53, 54}, - {55, 56, 57, 58, 59}, - {60, 61, 62, 63, 64}, - {65, 66, 67, 68, 69}, - {70, 71, 72, 73, 74}}}, - {{{75, 76, 77, 78, 79}, - {80, 81, 82, 83, 84}, - {85, 86, 87, 88, 89}, - {90, 91, 92, 93, 94}, - {95, 96, 97, 98, 99}}, - {{100, 101, 102, 103, 104}, - {105, 106, 107, 108, 109}, - {110, 111, 112, 113, 114}, - {115, 116, 117, 118, 119}, - {120, 121, 122, 123, 124}}, - {{125, 126, 127, 128, 129}, - {130, 131, 132, 133, 134}, - {135, 136, 137, 138, 139}, - {140, 141, 142, 143, 144}, - {145, 146, 147, 148, 149}}}}}); - op->associateInput(0, myInput); - op->setDataType(DataType::Int32); + + SECTION("3-D Tensors") { + std::size_t totalComputation = 0; + duration = std::chrono::duration<double, std::micro>::zero(); + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // generate Tensors dimensions + const std::size_t dimNb = distNbMatrix(gen); + const std::size_t dim0 = distDims(gen); + const std::size_t dim1 = distDims(gen); + const std::size_t dim2 = distDims(gen); + totalComputation += dim0*dim1*dim2*dimNb; + + // Create and populate the array with random float values + float bigArray1[dimNb][dim0][dim1]; + for (std::size_t n = 0; n < dimNb; ++n) { + for (std::size_t i = 0; i < dim0; ++i) { + for (std::size_t j = 0; j < dim1; ++j) { + bigArray1[n][i][j] = dis(gen); // Generate random float value + } + } + } + float bigArray2[dimNb][dim1][dim2]; + for (std::size_t n = 0; n < dimNb; ++n) { + for (int i = 0; i < dim1; ++i) { + for (int j = 0; j < dim2; ++j) { + bigArray2[n][i][j] = dis(gen); // Generate random float value + } + } + } + float res[dimNb][dim0][dim2]; + for (std::size_t n = 0; n < dimNb; ++n) { + for (int i = 0; i < dim0; ++i) { + for (int j = 0; j < dim2; ++j) { + float sum = 0.0; + for (int k = 0; k < dim1; ++k) { + sum += bigArray1[n][i][k] * bigArray2[n][k][j]; + } + res[n][i][j] = sum; + } + } + } + // Convert bigArray1 to Tensor + std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(DataType::Float32); + T1 -> resize({dimNb,dim0,dim1}); + T1 -> setBackend("cpu"); + T1 -> getImpl() -> setRawPtr(&bigArray1[0][0], dimNb*dim0*dim1); + // Convert bigArray2 to Tensor + std::shared_ptr<Tensor> T2 = std::make_shared<Tensor>(DataType::Float32); + T2 -> resize({dimNb,dim1,dim2}); + T2 -> setBackend("cpu"); + T2 -> getImpl() -> setRawPtr(&bigArray2[0][0], dimNb*dim1*dim2); + // convert res to Tensor + std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>(DataType::Float32); + Tres -> resize({dimNb,dim0,dim2}); + Tres -> setBackend("cpu"); + Tres -> getImpl() -> setRawPtr(&res[0][0], dimNb*dim0*dim2); + + op->associateInput(0, T1); + op->associateInput(1, T2); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + start = std::chrono::system_clock::now(); + myMatMul->forward(); + end = std::chrono::system_clock::now(); + duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); + + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); + } + std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl; + std::cout << "total time: " << duration.count() << std::endl; + } + + SECTION("4-D Tensors") { + std::size_t totalComputation = 0; + duration = std::chrono::duration<double, std::micro>::zero(); + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // generate Tensors dimensions + const std::size_t dimNb1 = distNbMatrix(gen); + const std::size_t dimNb2 = distNbMatrix(gen); + const std::size_t dim0 = distDims(gen); + const std::size_t dim1 = distDims(gen); + const std::size_t dim2 = distDims(gen); + totalComputation += dim0*dim1*dim2*dimNb1*dimNb2; + + // Create and populate the array with random float values + float bigArray1[dimNb1][dimNb2][dim0][dim1]; + for (std::size_t n1 = 0; n1 < dimNb1; ++n1) { + for (std::size_t n2 = 0; n2 < dimNb2; ++n2) { + for (std::size_t i = 0; i < dim0; ++i) { + for (std::size_t j = 0; j < dim1; ++j) { + bigArray1[n1][n2][i][j] = dis(gen); // Generate random float value + } + } + } + } + float bigArray2[dimNb1][dimNb2][dim1][dim2]; + for (std::size_t n1 = 0; n1 < dimNb1; ++n1) { + for (std::size_t n2 = 0; n2 < dimNb2; ++n2) { + for (std::size_t i = 0; i < dim1; ++i) { + for (std::size_t j = 0; j < dim2; ++j) { + bigArray2[n1][n2][i][j] = dis(gen); // Generate random float value + } + } + } + } + float res[dimNb1][dimNb2][dim0][dim2]; + for (std::size_t n1 = 0; n1 < dimNb1; ++n1) { + for (std::size_t n2 = 0; n2 < dimNb2; ++n2) { + for (int i = 0; i < dim0; ++i) { + for (int j = 0; j < dim2; ++j) { + float sum = 0.0; + for (int k = 0; k < dim1; ++k) { + sum += bigArray1[n1][n2][i][k] * bigArray2[n1][n2][k][j]; + } + res[n1][n2][i][j] = sum; + } + } + } + } + // Convert bigArray1 to Tensor + std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(DataType::Float32); + T1 -> resize({dimNb1,dimNb2,dim0,dim1}); + T1 -> setBackend("cpu"); + T1 -> getImpl() -> setRawPtr(&bigArray1[0][0], dimNb1*dimNb2*dim0*dim1); + // Convert bigArray2 to Tensor + std::shared_ptr<Tensor> T2 = std::make_shared<Tensor>(DataType::Float32); + T2 -> resize({dimNb1,dimNb2,dim1,dim2}); + T2 -> setBackend("cpu"); + T2 -> getImpl() -> setRawPtr(&bigArray2[0][0], dimNb1*dimNb2*dim1*dim2); + // convert res to Tensor + std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>(DataType::Float32); + Tres -> resize({dimNb1,dimNb2,dim0,dim2}); + Tres -> setBackend("cpu"); + Tres -> getImpl() -> setRawPtr(&res[0][0], dimNb1*dimNb2*dim0*dim2); + + op->associateInput(0, T1); + op->associateInput(1, T2); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + start = std::chrono::system_clock::now(); + myMatMul->forward(); + end = std::chrono::system_clock::now(); + duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); + } + std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl; + std::cout << "total time: " << duration.count() << std::endl; + } + + SECTION("+2-D / 1-D") { + // allows to test both computation with a 1-D Tensor and broadcasting + // input_0 + std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(); + op->associateInput(0,T0); + const std::size_t dim0 = distNbMatrix(gen); + const std::size_t dim1 = distNbMatrix(gen) + 1; + const std::size_t dim2 = distNbMatrix(gen); + const std::size_t dim3 = distNbMatrix(gen); + T0->resize({dim0,dim1,dim2,dim3}); + T0->setDataType(DataType::Float32); + T0->setBackend("cpu"); + + // input_1 + std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(); + op -> associateInput(1,T1); + T1->resize({dim3}); + T1->setDataType(DataType::Float32); + T1->setBackend("cpu"); + + op->setDataType(DataType::Float32); op->setBackend("cpu"); op->computeOutputDims(); myMatMul->forward(); - REQUIRE(*(op->getOutput(0)) == *myOutput); - } - // std::cout << static_cast<Tensor>((*myMatMul->getOperator())["weight"])[0][0][0][0] << std::endl; -} \ No newline at end of file + } +} +} // namespace Aidge \ No newline at end of file diff --git a/unit_tests/operator/Test_ReduceMeanImpl.cpp b/unit_tests/operator/Test_ReduceMeanImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..494b7a6ace17173ef7b956bc9dabf4d27e665e5a --- /dev/null +++ b/unit_tests/operator/Test_ReduceMeanImpl.cpp @@ -0,0 +1,172 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <catch2/catch_test_macros.hpp> +#include <memory> + +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/ReduceMean.hpp" +#include "aidge/operator/Conv.hpp" + +#include "aidge/backend/cpu.hpp" + +using namespace Aidge; + +TEST_CASE("[cpu/operator] ReduceMean(forward)", "[ReduceMean][CPU]") { + SECTION("KeepDims") { + SECTION("test 1") { + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> { + { + { + { 5.0, 1.0 }, + { 20.0, 2.0 } + }, + { + { 30.0, 1.0 }, + { 40.0, 2.0 } + }, + { + { 55.0, 1.0 }, + { 60.0, 2.0 } + } + } + }); + Tensor myOutput = Tensor(Array3D<float,3,1,2> { + { + + {{ 12.5, 1.5 }}, + {{ 35.0, 1.5 }}, + {{ 57.5, 1.5 }} + } + }); + + std::shared_ptr<Node> myReduceMean = ReduceMean({1}, 1); + auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator()); + op->associateInput(0,myInput); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + myReduceMean->forward(); + op->getOutput(0)->print(); + + REQUIRE(*(op->getOutput(0)) == myOutput); + } + SECTION("test 2") { + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,3,2> { + { + { + { 0.0, 0.0 }, + { 1.0, 1.0 }, + { 2.0, 2.0 } + }, + { + { 3.0, 3.0 }, + { 4.0, 4.0 }, + { 5.0, 5.0 } + }, + { + { 6.0, 6.0 }, + { 7.0, 7.0 }, + { 8.0, 8.0 } + } + } + }); + Tensor myOutput = Tensor(Array3D<float,3,1,1> { + { + + {{ 1.0 }}, + {{ 4.0 }}, + {{ 7.0 }} + } + }); + + std::shared_ptr<Node> myReduceMean = ReduceMean({1, 2}, 1); + auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator()); + op->associateInput(0,myInput); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + myReduceMean->forward(); + myOutput.print(); + op->getOutput(0)->print(); + REQUIRE(*(op->getOutput(0)) == myOutput); + } + } + SECTION("not_KeepDims") { + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> { + { + { + { 5.0, 1.0 }, + { 20.0, 2.0 } + }, + { + { 30.0, 1.0 }, + { 40.0, 2.0 } + }, + { + { 55.0, 1.0 }, + { 60.0, 2.0 } + } + } + }); + std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array2D<float,3,2> { + { + { 12.5, 1.5 }, + { 35.0, 1.5 }, + { 57.5, 1.5 } + } + }); + + std::shared_ptr<Node> myReduceMean = ReduceMean({1}, 0); + auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator()); + op->associateInput(0,myInput); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + myReduceMean->forward(); + op->getOutput(0)->print(); + + REQUIRE(*(op->getOutput(0)) == *myOutput); + + } + SECTION("all_axes") { + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> { + { + { + { 5.0, 1.0 }, + { 20.0, 2.0 } + }, + { + { 30.0, 1.0 }, + { 40.0, 2.0 } + }, + { + { 55.0, 1.0 }, + { 60.0, 2.0 } + } + } + }); + std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array1D<float,1> { + {18.25} + }); + + std::shared_ptr<Node> myReduceMean = ReduceMean({0, 1, 2}, 0); + auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator()); + op->associateInput(0,myInput); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + myReduceMean->forward(); + op->getOutput(0)->print(); + + REQUIRE(*(op->getOutput(0)) == *myOutput); + } +} \ No newline at end of file diff --git a/unit_tests/operator/Test_ReshapeImpl.cpp b/unit_tests/operator/Test_ReshapeImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1fee1f4cd132acf9ee39a86759f2e628317fce19 --- /dev/null +++ b/unit_tests/operator/Test_ReshapeImpl.cpp @@ -0,0 +1,71 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <catch2/catch_test_macros.hpp> + +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/Reshape.hpp" + +#include "aidge/backend/cpu.hpp" + +#include <memory> + +using namespace Aidge; + +TEST_CASE("[cpu/operator] Reshape(forward)") { + SECTION("1D Tensor") { + std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array1D<float,6> { + {1.0, 2.0, 3.0, 4.0, 5.0, 6.0} + }); + std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,3> { + { + {1.0, 2.0, 3.0}, + {4.0, 5.0, 6.0} + } + }); + + std::shared_ptr<Node> myReshape = Reshape({2, 3}); + auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator()); + op->associateInput(0, input); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + myReshape->forward(); + + REQUIRE(*(op->getOutput(0)) == *expectedOutput); + } + SECTION("2D Tensor") { + std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<float,2,3> { + { + {1.0, 2.0, 3.0}, + {4.0, 5.0, 6.0} + } + + }); + std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,3,2> { + { + {1.0, 2.0}, + {3.0, 4.0}, + {5.0, 6.0} + } + }); + + std::shared_ptr<Node> myReshape = Reshape({3, 2}); + auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator()); + op->associateInput(0, input); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + myReshape->forward(); + + REQUIRE(*(op->getOutput(0)) == *expectedOutput); + } +} \ No newline at end of file diff --git a/unit_tests/operator/Test_SliceImpl.cpp b/unit_tests/operator/Test_SliceImpl.cpp index 7a71f31e9850852cadd659c91683c30ddcbe9849..0b5ae682c659bf5a0f8d50448733b9ec18a4c36e 100644 --- a/unit_tests/operator/Test_SliceImpl.cpp +++ b/unit_tests/operator/Test_SliceImpl.cpp @@ -163,4 +163,4 @@ TEST_CASE("[cpu/operator] Slice(forward)", "[Slice][CPU]") { REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims()); REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType()); } -} \ No newline at end of file +} diff --git a/unit_tests/operator/Test_SoftmaxImpl.cpp b/unit_tests/operator/Test_SoftmaxImpl.cpp index 360b7440599030dbd93954e345f0d5986eb83b15..7459a45e48cad74e722dc881e4653d34b7f549d0 100644 --- a/unit_tests/operator/Test_SoftmaxImpl.cpp +++ b/unit_tests/operator/Test_SoftmaxImpl.cpp @@ -41,15 +41,15 @@ TEST_CASE("[cpu/operator] Softmax(forward)", "[Softmax][CPU]") { std::shared_ptr<Node> mySoftmax = Softmax(1); auto op = std::static_pointer_cast<OperatorTensor>(mySoftmax -> getOperator()); - mySoftmax->getOperator()->associateInput(0,input); - mySoftmax->getOperator()->setDataType(DataType::Float32); - mySoftmax->getOperator()->setBackend("cpu"); + op->associateInput(0,input); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); op->computeOutputDims(); mySoftmax->forward(); float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr()); - for (std::size_t i = 0; i< 20; ++i) { + for (std::size_t i = 0; i< expectedOutput->size(); ++i) { REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001); } @@ -110,17 +110,16 @@ TEST_CASE("[cpu/operator] Softmax(forward)", "[Softmax][CPU]") { std::shared_ptr<Node> mySoftmax = Softmax(1); auto op = std::static_pointer_cast<OperatorTensor>(mySoftmax -> getOperator()); - mySoftmax->getOperator()->associateInput(0,input); - mySoftmax->getOperator()->setDataType(DataType::Float32); - mySoftmax->getOperator()->setBackend("cpu"); + op->associateInput(0,input); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); op->computeOutputDims(); mySoftmax->forward(); float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr()); - for (std::size_t i = 0; i< 54; ++i) { + for (std::size_t i = 0; i< expectedOutput->size(); ++i) { REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001); } - // REQUIRE(*mySoftmax->getOperator()->getOutput(0) == *expectedOutput); } } \ No newline at end of file diff --git a/unit_tests/operator/Test_TransposeImpl.cpp b/unit_tests/operator/Test_TransposeImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d381faadd7750f6a9a48fe9371f98e813b94a310 --- /dev/null +++ b/unit_tests/operator/Test_TransposeImpl.cpp @@ -0,0 +1,127 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <catch2/catch_test_macros.hpp> +#include <memory> + +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/Transpose.hpp" + +#include "aidge/backend/cpu.hpp" + +using namespace Aidge; + +TEST_CASE("[cpu/operator] Transpose(forward)") { + SECTION("3D Tensor") { + std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array3D<float,2,3,4> { + { + {{0.42507452, 0.11244237, 0.43243718, 0.62354952}, + {0.90250170, 0.48719984, 0.45781207, 0.92536664}, + {0.06348717, 0.91678733, 0.64452291, 0.00484818}}, + + {{0.66873497, 0.99508536, 0.55714869, 0.84887981}, + {0.41666120, 0.92365038, 0.80034822, 0.38721532}, + {0.52037925, 0.53937608, 0.66380072, 0.36330253}} + } + }); + std::shared_ptr<Tensor> output = std::make_shared<Tensor>(Array3D<float,2,4,3> { + { + {{0.42507452, 0.90250170, 0.06348717}, + {0.11244237, 0.48719984, 0.91678733}, + {0.43243718, 0.45781207, 0.64452291}, + {0.62354952, 0.92536664, 0.00484818}}, + + {{0.66873497, 0.41666120, 0.52037925}, + {0.99508536, 0.92365038, 0.53937608}, + {0.55714869, 0.80034822, 0.66380072}, + {0.84887981, 0.38721532, 0.36330253}} + } + }); + std::shared_ptr<Node> myTranspose = Transpose<3>(std::array<DimSize_t,3>{{0,2,1}}); + auto op = std::static_pointer_cast<OperatorTensor>(myTranspose -> getOperator()); + op->associateInput(0,input); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + myTranspose->forward(); + + REQUIRE(*(op->getOutput(0)) == *output); + } + SECTION("4D Tensor") { + std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array4D<int,2,3,1,4> { + { + { + { + {1, 2, 3, 4} + }, + { + {5, 6, 7, 8} + }, + { + {9, 10, 11, 12} + } + }, + { + { + {13, 14, 15, 16} + }, + { + {17, 18, 19, 20} + }, + { + {21, 22, 23, 24} + } + } + } + }); + std::shared_ptr<Tensor> output = std::make_shared<Tensor>(Array4D<int,2,4,1,3> { + { + { + { + {1, 5, 9} + }, + { + {2, 6, 10} + }, + { + {3, 7, 11} + }, + { + {4, 8, 12} + } + }, + { + { + {13, 17, 21} + }, + { + {14, 18, 22} + }, + { + {15, 19, 23} + }, + { + {16, 20, 24} + } + } + } + }); + std::shared_ptr<Node> myTranspose = Transpose<4>(std::array<DimSize_t,4>{{0,3,2,1}}); + auto op = std::static_pointer_cast<OperatorTensor>(myTranspose -> getOperator()); + op->associateInput(0,input); + op->setDataType(DataType::Int32); + op->setBackend("cpu"); + op->computeOutputDims(); + myTranspose->forward(); + + REQUIRE(*(op->getOutput(0)) == *output); + } +} \ No newline at end of file diff --git a/unit_tests/scheduler/Test_Scheduler.cpp b/unit_tests/scheduler/Test_Scheduler.cpp index 8779933fc7fc7c07305f1018f9469895026f05e4..025ca8ba067297ff3232e05ea9142899dca8ddef 100644 --- a/unit_tests/scheduler/Test_Scheduler.cpp +++ b/unit_tests/scheduler/Test_Scheduler.cpp @@ -245,4 +245,104 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") { bool equal = (*result == *expectedOutput); REQUIRE(equal); } -} + + SECTION("Test ConnectInput graph") { + std::shared_ptr<GraphView> g = + Sequential({ + Conv(1, 3, {3, 3}, "conv1"), + Conv(3, 4, {1, 1}, "conv2"), + Conv(4, 3, {1, 1}, "conv3"), + FC(27, 5, false, "fc")}); + + // g->getNode("conv1")->getOperator()->setInput(0, inputTensor); + g->getNode("conv1")->getOperator()->setInput(1, weight1); + g->getNode("conv1")->getOperator()->setInput(2, bias1); + + std::shared_ptr<Tensor> weight2 = + std::make_shared<Tensor>(Array4D<int, 4, 3, 1, 1>{{{{{1}}, {{2}}, {{3}}}, + {{{4}}, {{5}}, {{6}}}, + {{{7}}, {{8}}, {{9}}}, + {{{10}}, {{11}}, {{12}}}}}); + std::shared_ptr<Tensor> bias2 = std::make_shared<Tensor>(Array1D<int, 4>{{1, 2, 3, 4}}); + g->getNode("conv2")->getOperator()->setInput(1, weight2); + g->getNode("conv2")->getOperator()->setInput(2, bias2); + // *(g->getNode("conv2")->getOperator()->input(1, weight2); + + std::shared_ptr<Tensor> weight3 = std::make_shared<Tensor>( + Array4D<int, 3, 4, 1, 1>{{{{{1}}, {{2}}, {{3}}, {{4}}}, + {{{5}}, {{6}}, {{7}}, {{8}}}, + {{{9}}, {{10}}, {{11}}, {{12}}}}}); + std::shared_ptr<Tensor> bias3 = std::make_shared<Tensor>(Array1D<int, 3>{{1, 2, 3}}); + g->getNode("conv3")->getOperator()->setInput(1, weight3); + g->getNode("conv3")->getOperator()->setInput(2, bias3); + + std::shared_ptr<Tensor> weightfc = std::make_shared<Tensor>( + Array2D<int, 5, 27>{{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}, + {13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + {10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6}, + {7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, + 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3}, + {4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, + 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}}}); + std::shared_ptr<Tensor> biasfc = std::make_shared<Tensor>(Array1D<int, 5>{{1, 2, 3, 4, 5}}); + g->getNode("fc")->getOperator()->setInput(1, weightfc); + g->getNode("fc")->getOperator()->setInput(2, biasfc); + + // input->addChild(g); + g->setDataType(Aidge::DataType::Int32); + g->setBackend("cpu"); + std::vector<std::vector<Aidge::DimSize_t>> dims = {inputTensor->dims()}; + g->forwardDims(dims); + SequentialScheduler scheduler(g); + + std::vector<std::shared_ptr<Aidge::Tensor>> dataIn = {inputTensor}; + REQUIRE_NOTHROW(scheduler.forward(true, false, dataIn)); + + scheduler.saveSchedulingDiagram("schedulingSequential"); + + std::shared_ptr<Tensor> expectedOutput1 = std::make_shared<Tensor>(Array4D<int, 2, 3, 3, 3>{ + {{{{367, 412, 457}, {592, 637, 682}, {817, 862, 907}}, + {{854, 980, 1106}, {1484, 1610, 1736}, {2114, 2240, 2366}}, + {{1341, 1548, 1755}, {2376, 2583, 2790}, {3411, 3618, 3825}}}, + {{{1492, 1537, 1582}, {1717, 1762, 1807}, {1942, 1987, 2032}}, + {{4004, 4130, 4256}, {4634, 4760, 4886}, {5264, 5390, 5516}}, + {{6516, 6723, 6930}, {7551, 7758, 7965}, {8586, 8793, 9000}}}}}); + + std::shared_ptr<Tensor> expectedOutput2 = std::make_shared<Tensor>(Array4D<int, 2, 4, 3, 3>{ + {{{{6099, 7017, 7935}, {10689, 11607, 12525}, {15279, 16197, 17115}}, + {{13786, 15838, 17890}, {24046, 26098, 28150}, {34306, 36358, 38410}}, + {{21473, 24659, 27845}, {37403, 40589, 43775}, {53333, 56519, 59705}}, + {{29160, 33480, 37800}, {50760, 55080, 59400}, {72360, 76680, 81000}}}, + {{{29049, 29967, 30885}, {33639, 34557, 35475}, {38229, 39147, 40065}}, + {{65086, 67138, 69190}, {75346, 77398, 79450}, {85606, 87658, 89710}}, + {{101123, 104309, 107495}, {117053, 120239, 123425}, {132983, 136169, 139355}}, + {{137160, 141480, 145800}, {158760, 163080, 167400}, {180360, 184680, 189000}}}}}); + + std::shared_ptr<Tensor> expectedOutput3 = std::make_shared<Tensor>(Array4D<int, 2, 3, 3, 3>{ + {{{{214731, 246591, 278451}, {374031, 405891, 437751}, {533331, 565191, 597051}}, + {{496804, 570568, 644332}, {865624, 939388, 1013152}, {1234444, 1308208, 1381972}}, + {{778877, 894545, 1010213}, {1357217, 1472885, 1588553}, {1935557, 2051225, 2166893}}}, + {{{1011231, 1043091, 1074951}, {1170531, 1202391, 1234251}, {1329831, 1361691, 1393551}}, + {{2340904, 2414668, 2488432}, {2709724, 2783488, 2857252}, {3078544, 3152308, 3226072}}, + {{3670577, 3786245, 3901913}, {4248917, 4364585, 4480253}, {4827257, 4942925, 5058593}}}}}); + + Tensor expectedOutput4 = Array2D<int, 2, 5>{ + {{205050376, 198925904, 181355097, 196978090, 238868348}, + {598467376, 561797804, 560823897, 593043790, 698672948}}}; + std::shared_ptr<Tensor> other1 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv1")->getOperator())->getOutput(0); + bool equal1 = (*other1 == *expectedOutput1); + REQUIRE(equal1); + std::shared_ptr<Tensor> other2 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv2")->getOperator())->getOutput(0); + bool equal2 = (*other2 == *expectedOutput2); + REQUIRE(equal2); + std::shared_ptr<Tensor> other3 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv3")->getOperator())->getOutput(0); + bool equal3 = (*other3 == *expectedOutput3); + REQUIRE(equal3); + std::shared_ptr<Tensor> other4 = std::static_pointer_cast<OperatorTensor>(g->getNode("fc")->getOperator())->getOutput(0); + bool equal4 = (*other4 == expectedOutput4); + REQUIRE(equal4); + } +} \ No newline at end of file