From c5f13c8e18534222d3e410cb52e986a21374f4f0 Mon Sep 17 00:00:00 2001 From: Jerome Hue <jerome.hue@cea.fr> Date: Thu, 14 Nov 2024 16:28:56 +0100 Subject: [PATCH] chore: Update stack operator tests --- include/aidge/operator/Stack.hpp | 21 +++- src/operator/Stack.cpp | 28 +++++- unit_tests/operator/Test_StackImpl.cpp | 133 ++++++++++++++++++++++++- 3 files changed, 172 insertions(+), 10 deletions(-) diff --git a/include/aidge/operator/Stack.hpp b/include/aidge/operator/Stack.hpp index 24ac075cf..cf85bbe72 100644 --- a/include/aidge/operator/Stack.hpp +++ b/include/aidge/operator/Stack.hpp @@ -12,15 +12,26 @@ #include "aidge/utils/Types.h" namespace Aidge { +class StackProdConso : public ProdConso { + public: + StackProdConso(const Operator &op) : ProdConso(op) {} + Elts_t getRequiredMemory( + const IOIndex_t outputIdx, + const std::vector<DimSize_t> &inputsSize) const override final; +}; class StackOpImpl : public OperatorImpl { public: StackOpImpl(const Operator &op, const std::string &backend = "") : OperatorImpl(op, backend) {} + + std::shared_ptr<ProdConso> getProdConso() const override { + return std::make_shared<StackProdConso>(mOp); + }; void forward() override; }; -enum class StackAttr { MaxElements }; +enum class StackAttr { ForwardStep, MaxElements }; class StackOp : public OperatorTensor, @@ -30,7 +41,8 @@ class StackOp std::function<std::unique_ptr<OperatorImpl>(const StackOp &)>> { private: - using Attributes_ = StaticAttributes<StackAttr, std::uint32_t>; + using Attributes_ = + StaticAttributes<StackAttr, std::uint32_t, std::uint32_t>; template <StackAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; @@ -64,10 +76,15 @@ class StackOp inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + inline std::uint32_t &maxElements() const { return mAttributes->template getAttr<StackAttr::MaxElements>(); } + inline std::uint32_t &forwardStep() const { + return mAttributes->template getAttr<StackAttr::ForwardStep>(); + } + static const std::vector<std::string> getInputsName() { return {"data_input"}; } diff --git a/src/operator/Stack.cpp b/src/operator/Stack.cpp index 9bdfadd08..183e43c84 100644 --- a/src/operator/Stack.cpp +++ b/src/operator/Stack.cpp @@ -22,18 +22,38 @@ namespace Aidge { +Aidge::Elts_t Aidge::StackProdConso::getRequiredMemory( + const Aidge::IOIndex_t inputIdx, + const std::vector<DimSize_t> &inputsSize) const { + assert(mOp.getRawInput(inputIdx) && "requires valid input"); + + const StackOp &op = dynamic_cast<const StackOp &>(mOp); + // The produced data after one forward pass is simply the input size, + // we do not produced the whole output tensor everytime. + // The output tensor it set to its max dimensions just to some dimensions + // to forward. + return Elts_t::DataElts(op.getInput(inputIdx)->size()); +} + const std::string StackOp::s_type = "Stack"; void StackOpImpl::forward() { const StackOp &op = dynamic_cast<const StackOp &>(mOp); - assert(op.getInput(0) && "missing input #0"); - //*op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()}); + AIDGE_ASSERT(op.getInput(0), "missing input #0"); + AIDGE_ASSERT((op.forwardStep() < op.maxElements()), + "cannot forward anymore, number of cycles exceeded"); + + op.getOutput(0)->getImpl()->copy( + op.getInput(0)->getImpl()->rawPtr(), + op.getInput(0)->size(), + op.forwardStep() * op.getInput(0)->size()); } StackOp::StackOp(std::uint32_t maxElements) : OperatorTensor(s_type, {InputCategory::Data}, 1), mAttributes(std::make_shared<Attributes_>( - attr<StackAttr::MaxElements>(maxElements))) { + attr<StackAttr::MaxElements>(maxElements), + attr<StackAttr::ForwardStep>(0))) { mImpl = std::make_shared<StackOpImpl>(*this); } @@ -75,7 +95,9 @@ std::set<std::string> StackOp::getAvailableBackends() const { } void StackOp::forward() { + Log::info("fw step {}", forwardStep()); Operator::forward(); + ++forwardStep(); } std::shared_ptr<Node> stack(std::uint32_t maxElements, diff --git a/unit_tests/operator/Test_StackImpl.cpp b/unit_tests/operator/Test_StackImpl.cpp index 7652c03a7..f65ea4005 100644 --- a/unit_tests/operator/Test_StackImpl.cpp +++ b/unit_tests/operator/Test_StackImpl.cpp @@ -14,11 +14,12 @@ #include <catch2/matchers/catch_matchers_string.hpp> #include <cstddef> #include <memory> -#include <random> +#include <random> #include <vector> -#include "aidge/operator/Stack.hpp" #include "aidge/data/Tensor.hpp" +#include "aidge/operator/Stack.hpp" +#include "aidge/utils/TensorUtils.hpp" using Catch::Matchers::Equals; @@ -33,10 +34,13 @@ TEST_CASE("[core/operator] Stack(forwardDims)", "[Stack][forwardDims]") { auto stackNode = stack(maxElementsToStack); auto op = std::dynamic_pointer_cast<StackOp>(stackNode->getOperator()); - std::shared_ptr<Tensor> t0 = std::make_shared<Tensor>(Aidge::Array1D<int,3>{{4,5,6}}); + std::shared_ptr<Tensor> t0 = + std::make_shared<Tensor>(Aidge::Array1D<int, 3>{{4, 5, 6}}); // input #0 should be associated with a Tensor - REQUIRE_THROWS_WITH(op->forwardDims(), Equals("Stack: input #0 should be associated with a Tensor")); + REQUIRE_THROWS_WITH( + op->forwardDims(), + Equals("Stack: input #0 should be associated with a Tensor")); op->associateInput(0, t0); REQUIRE_NOTHROW(op->forwardDims()); @@ -44,4 +48,123 @@ TEST_CASE("[core/operator] Stack(forwardDims)", "[Stack][forwardDims]") { auto dims = op->getOutput(0)->dims(); REQUIRE(dims[0] == maxElementsToStack); } -} // namespace Aidge + +TEST_CASE("[core/operator] Stack(forward)", "[Stack][forward]") { + + auto rd = Catch::Generators::Detail::getSeed; + std::mt19937 gen(rd()); + std::uniform_int_distribution<std::size_t> dimsDist(5, 10); + std::uniform_real_distribution<float> valueDist(0.001f, 1.0f); + + auto maxElementsToStack = dimsDist(gen); + + auto stackNode = stack(maxElementsToStack); + auto op = std::dynamic_pointer_cast<StackOp>(stackNode->getOperator()); + op->setBackend("cpu"); + op->setDataType(DataType::Float32); + + SECTION("Stack 1 tensor") { + std::shared_ptr<Tensor> t0 = + std::make_shared<Tensor>(Aidge::Array1D<float, 3>{{4, 5, 6}}); + op->associateInput(0, t0); + REQUIRE_NOTHROW(op->forwardDims()); + + op->setBackend("cpu"); + op->setDataType(DataType::Float32); + + REQUIRE_NOTHROW(op->forward()); + + auto result = op->getOutput(0)->extract({0}); + REQUIRE(approxEq<float>(*t0, result)); + } + + SECTION("Stack many tensors") + { + std::shared_ptr<Tensor> t0 = + std::make_shared<Tensor>(Aidge::Array1D<float, 3>{{1,2,3}}); + + std::shared_ptr<Tensor> t1 = + std::make_shared<Tensor>(Aidge::Array1D<float, 3>{{4,5,6}}); + + std::shared_ptr<Tensor> t2 = + std::make_shared<Tensor>(Aidge::Array1D<float, 3>{{7,8,9}}); + + std::shared_ptr<Tensor> expectedResult = + std::make_shared<Tensor>(Aidge::Array2D<float, 3, 3>{ + {{1,2,3}, {4,5,6},{7,8,9}}}); + + op->setBackend("cpu"); + op->setDataType(DataType::Float32); + + op->associateInput(0, t0); + op->forwardDims(); + op->forward(); + + op->associateInput(0, t1); + op->forward(); + + op->associateInput(0, t2); + op->forward(); + + op->getOutput(0)->resize({3,3}); + op->getOutput(0)->print(); + REQUIRE(approxEq<float>(*(op->getOutput(0)), *expectedResult)); + } + + SECTION("Stack too many tensors") + { + std::shared_ptr<Tensor> t0 = + std::make_shared<Tensor>(Aidge::Array1D<float, 3>{{4, 5, 6}}); + op->associateInput(0, t0); + REQUIRE_NOTHROW(op->forwardDims()); + + for(auto i = 0; i < maxElementsToStack; ++i) { + op->forward(); + } + REQUIRE_THROWS(op->forward()); + } + + SECTION("Producer/Consumer model") + { + std::shared_ptr<Tensor> t0 = + std::make_shared<Tensor>(Aidge::Array1D<float, 3>{{1,2,3}}); + + std::shared_ptr<Tensor> t1 = + std::make_shared<Tensor>(Aidge::Array1D<float, 3>{{4,5,6}}); + + std::shared_ptr<Tensor> t2 = + std::make_shared<Tensor>(Aidge::Array1D<float, 3>{{7,8,9}}); + + std::shared_ptr<Tensor> expectedResult = + std::make_shared<Tensor>(Aidge::Array2D<float, 3, 3>{ + {{1,2,3}, {4,5,6},{7,8,9}}}); + + op->setBackend("cpu"); + op->setDataType(DataType::Float32); + + op->associateInput(0, t0); + op->forwardDims(); + op->forward(); + op->updateConsummerProducer(); + REQUIRE(op->getNbProducedData(0).data == 3); + + op->associateInput(0, t1); + op->forward(); + op->updateConsummerProducer(); + REQUIRE(op->getNbProducedData(0).data == 6); + + op->associateInput(0, t2); + op->forward(); + op->updateConsummerProducer(); + REQUIRE(op->getNbProducedData(0).data == 9); + + op->getOutput(0)->resize({3,3}); + op->getOutput(0)->print(); + REQUIRE(approxEq<float>(*(op->getOutput(0)), *expectedResult)); + } + + // TODO: Test with random data/dims + // TODO: Test with different dimensions when stacking + // TODO: Test Scheduling, with dummy meta operator +} +} // namespace Aidge -- GitLab