From beaa21574019f65ecae53de9b86be98917cfb251 Mon Sep 17 00:00:00 2001
From: Jerome Hue <jerome.hue@cea.fr>
Date: Mon, 17 Mar 2025 09:05:03 +0100
Subject: [PATCH] Add accumulator test for Memorize

---
 src/operator/SubImpl.cpp                  |   2 +-
 unit_tests/operator/Test_Memorize.cpp     | 190 +++++++++++++++++++++-
 unit_tests/operator/Test_MetaOperator.cpp |   1 -
 3 files changed, 190 insertions(+), 3 deletions(-)

diff --git a/src/operator/SubImpl.cpp b/src/operator/SubImpl.cpp
index 2f829991..36016752 100644
--- a/src/operator/SubImpl.cpp
+++ b/src/operator/SubImpl.cpp
@@ -42,8 +42,8 @@ void Aidge::SubImpl_cpu::forward() {
 template <>
 void Aidge::SubImpl_cpu::backward() {
 
-    Log::info("SubImpl_cpu::backward()");
     const Sub_Op& op_ = dynamic_cast<const Sub_Op&>(mOp);
+    //Log::info("SubImpl_cpu::backward() : Node {}", op_.name());
 
     auto in0 = op_.getInput(0);
     auto in1 = op_.getInput(1);
diff --git a/unit_tests/operator/Test_Memorize.cpp b/unit_tests/operator/Test_Memorize.cpp
index 6c1a617e..8f6b3042 100644
--- a/unit_tests/operator/Test_Memorize.cpp
+++ b/unit_tests/operator/Test_Memorize.cpp
@@ -4,11 +4,14 @@
  * This program and the accompanying materials are made available under the
  * terms of the Eclipse Public License 2.0 which is available at
  * http://www.eclipse.org/legal/epl-2.0.
- *
  * SPDX-License-Identifier: EPL-2.0
  *
  ********************************************************************************/
 
+#include <aidge/operator/Identity.hpp>
+#include <aidge/operator/Mul.hpp>
+#include <aidge/operator/Pop.hpp>
+#include <aidge/operator/Stack.hpp>
 #include <memory>
 #include <string>
 
@@ -22,6 +25,7 @@
 #include "aidge/graph/OpArgs.hpp"
 #include "aidge/operator/Add.hpp"
 #include "aidge/operator/Memorize.hpp"
+#include "aidge/operator/PerMemorize.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/recipes/GraphViewHelper.hpp"
 #include "aidge/scheduler/SequentialScheduler.hpp"
@@ -63,4 +67,188 @@ TEST_CASE("[cpu/operator] Memorize(forward)", "[Memorize][CPU]") {
         REQUIRE((*other == expectedOutput));
     }
 }
+
+TEST_CASE("[cpu/operator] Memorize(backward)", "[Memorize][CPU]") {
+
+    // TODO: We will need something a bit more complex.
+    SECTION("Test simple") {
+        std::shared_ptr<Tensor> inputTensor =
+                std::make_shared<Tensor>(Array1D<int, 1>{{1}});
+
+        auto input = Producer({1}, "input");
+        auto init = Producer({1}, "init");
+        auto add = Add("add");
+        auto mem = Memorize(3, "mem");
+
+        input->addChild(add, 0, 0);
+        init->addChild(mem, 0, 1);
+        add->addChild(mem, 0,0);
+        mem->addChild(/*otherNode=*/add, /*outId=*/1, /*otherInId=*/1);
+
+        input->getOperator()->setOutput(0, inputTensor);
+        init->getOperator()->setOutput(0, inputTensor);
+
+        auto g = getConnectedGraphView(input);
+
+        g->setDataType(Aidge::DataType::Int32);
+        g->setBackend("cpu");
+        g->forwardDims();
+        g->save("simple_graph");
+
+        SequentialScheduler scheduler(g);
+        REQUIRE_NOTHROW(scheduler.forward());
+        scheduler.saveSchedulingDiagram("simple");
+
+        const Tensor expectedOutput = Array1D<int, 1>{{4}};
+        std::shared_ptr<Tensor> other = std::static_pointer_cast<OperatorTensor>(mem->getOperator())->getOutput(0);
+        other->print();
+        REQUIRE((*other == expectedOutput));
+
+        // Print gradient
+        Log::notice("Print gradient, before backward ");
+        other->grad()->print();
+        REQUIRE_NOTHROW(scheduler.backward());
+        // Print gradient, after backward NOTE: what do we expect here
+        Log::notice("Print gradient, after backward ");
+        other->grad()->print();
+    }
+
+    SECTION("Test 2") {
+
+        auto mul = Mul();
+        auto mem = Memorize(/*endStep=*/3);
+        auto add = Add();
+        auto pop = Pop();
+
+        // Initialization tensor for Memorize
+        auto input = Producer(std::make_shared<Tensor>(Array1D<float,2>({1.0f, 1.0f})), "input");
+        auto decay = Producer(std::make_shared<Tensor>(Array1D<float,2>({0.9f, 0.9f})), "decay");
+        auto init = Producer(std::make_shared<Tensor>(Array1D<float,2>({0.0f, 0.0f})), "init");
+        auto back = std::make_shared<Tensor>(Array1D<float,2>({1.0f, 1.0f}));
+
+        auto initTensor = std::make_shared<Tensor>(Array2D<float,3,2>({
+            {
+                {1,1},
+                {1,1},
+                {1,1},
+            }
+        }));
+
+        std::static_pointer_cast<OperatorTensor>(pop->getOperator())->setInput(0, initTensor);
+
+
+        init->addChild(mem,0,1);
+        mem->addChild(mul,1,0);
+        decay->addChild(mul,0,1);
+        mul->addChild(add,0,1);
+        pop->addChild(add,0,0);
+        add->addChild(mem,0,0);
+
+        auto graphView = getConnectedGraphView(mem);
+        graphView->compile();
+
+        auto scheduler = SequentialScheduler(graphView);
+        scheduler.forward();
+
+        std::static_pointer_cast<OperatorTensor>(mem->getOperator())->getOutput(0)->print();
+
+        graphView->save("graphSimple2");
+        // TODO: Set gradient and try to backward
+        Log::warn("Printing gradient");
+        std::static_pointer_cast<OperatorTensor>(mem->getOperator())->getOutput(0)->grad()->print();
+        std::static_pointer_cast<OperatorTensor>(mem->getOperator())->getOutput(0)->setGrad(back);
+
+        Log::warn("Starting Backward");
+        scheduler.backward();
+
+        // Expected gradient : 0.81
+        std::static_pointer_cast<OperatorTensor>(mem->getOperator())->getInput(0)->grad()->print();
+        Log::warn("final print");
+        std::static_pointer_cast<OperatorTensor>(pop->getOperator())->getInput(0)->grad()->print();
+        REQUIRE(true);
+    }
+}
+
+TEST_CASE("[cpu/operator] Memorize(forward)", "[Memorize][Periodic]") {
+    auto mul = Mul();
+
+    //auto mem = Memorize(/*endStep=*/3);
+    //auto mem = Identity();
+    auto mem = PerMemorize(2);
+
+    auto add = Add();
+    auto pop = Pop();
+    auto stack = Stack(3);
+
+    // Initialization tensor for Memorize
+    auto input = Producer(std::make_shared<Tensor>(Array1D<float,2>({1.0f, 1.0f})), "input");
+    auto decay = Producer(std::make_shared<Tensor>(Array1D<float,2>({0.9f, 0.9f})), "decay");
+    auto init = Producer(std::make_shared<Tensor>(Array1D<float,2>({0.0f, 0.0f})), "init");
+    auto back = std::make_shared<Tensor>(Array1D<float,2>({1.0f, 1.0f}));
+
+    auto initTensor = std::make_shared<Tensor>(Array2D<float,3,2>({
+        {
+            {1,1},
+            {1,1},
+            {1,1},
+        }
+    }));
+
+    std::static_pointer_cast<OperatorTensor>(pop->getOperator())->setInput(0, initTensor);
+
+    auto memOp = std::static_pointer_cast<OperatorTensor>(mem->getOperator());
+    memOp->setOutput(1, std::make_shared<Tensor>(Array1D<float,2>({0.0f, 0.0f})));
+    memOp->setOutput(0, std::make_shared<Tensor>(Array1D<float,2>({0.0f, 0.0f})));
+
+    //init->addChild(mem,0,0);
+    mem->addChild(mul,1,0);
+    decay->addChild(mul,0,1);
+    mul->addChild(add,0,1);
+    pop->addChild(add,0,0);
+    add->addChild(mem,0,0);
+    mem->addChild(stack,1,0);
+
+    auto graphView = getConnectedGraphView(mem);
+    graphView->compile();
+
+    Log::info("GraphView output nodes : {}", graphView->outputNodes().size());
+    for(auto node : graphView->outputNodes())
+    {
+       Log::info("output node type : {}", node->type());
+    }
+    // TODO: Set ordered outputs for this node.
+
+    auto scheduler = SequentialScheduler(graphView);
+    scheduler.forward();
+
+    //std::static_pointer_cast<OperatorTensor>(mem->getOperator())->getOutput(0)->print();
+    std::static_pointer_cast<OperatorTensor>(stack->getOperator())->getOutput(0)->print();
+    REQUIRE(true);
+}
+
+TEST_CASE("[cpu/operator] Memorize(forward)", "[Memorize][Periodic2]") {
+
+
+    auto input = Producer(std::make_shared<Tensor>(1.0f));
+    auto init = Producer(std::make_shared<Tensor>(1.0f));
+    auto add = Add();
+
+    //auto mem = PerMemorize(3);
+    auto mem = Memorize(3);
+
+    input->addChild(add,0,0);
+    add->addChild(mem,0,0);
+    mem->addChild(add, 1,1);
+    init->addChild(mem, 0, 1);
+
+    auto gv = getConnectedGraphView(mem);
+    gv->compile();
+
+    auto scheduler = SequentialScheduler(gv);
+    scheduler.forward();
+
+    std::static_pointer_cast<OperatorTensor>(mem->getOperator())->getOutput(0)->print();
+
+    REQUIRE(true);
+}
 } // namespace Aidge
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
index 234a4e70..4a4dc22d 100644
--- a/unit_tests/operator/Test_MetaOperator.cpp
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -881,7 +881,6 @@ TEST_CASE("[cpu/operator] MetaOperator", "[Leaky][CPU][Simple]") {
             {1,1,1,1},
     }});
 
-
     auto pop = Pop("pop");
     auto popOp = std::static_pointer_cast<OperatorTensor>(pop->getOperator());
     auto stack = Stack(nbTimeSteps, "stack");
-- 
GitLab