Skip to content
Snippets Groups Projects
Test_ReLUImpl.cpp 5.41 KiB
Newer Older
Cyril Moineau's avatar
Cyril Moineau committed
/********************************************************************************
 * Copyright (c) 2023 CEA-List
 *
 * This program and the accompanying materials are made available under the
 * terms of the Eclipse Public License 2.0 which is available at
 * http://www.eclipse.org/legal/epl-2.0.
 *
 * SPDX-License-Identifier: EPL-2.0
 *
 ********************************************************************************/

#include <catch2/catch_test_macros.hpp>

#include "aidge/data/Tensor.hpp"
#include "aidge/data/TensorImpl.hpp"
Cyril Moineau's avatar
Cyril Moineau committed
#include "aidge_cpu.hpp"
#include "aidge/operator/ReLU.hpp"
Cyril Moineau's avatar
Cyril Moineau committed

#include <memory>


using namespace Aidge;

TEST_CASE("[cpu/operator] ReLU(forward)") {
Cyril Moineau's avatar
Cyril Moineau committed
    SECTION("1D Tensor") {
        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<int,10> {
            {0, 1, 2,-3, 4,-5,-6, 7, 8, 9}
        });
        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int,10> {
            {0, 1, 2, 0, 4, 0, 0, 7, 8, 9}
        });

        std::shared_ptr<Node> myReLU = ReLU();
        myReLU->getOperator()->setDatatype(DataType::Int32);
        myReLU->getOperator()->setBackend("cpu");
        myReLU->getOperator()->associateInput(0,input0);
        myReLU->getOperator()->computeOutputDims();
        myReLU->forward();
        REQUIRE(*(myReLU->getOperator()->getOutput(0)) == *expectedOutput);
    }

    SECTION("2D Tensor") {
        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array2D<int,2,10> {
            {
                { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
                {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
            }
        });
        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<int,2,10> {
            {
                { 0, 1, 2, 0, 4, 0, 0, 7, 8, 9},
                { 0, 4, 2, 0, 4, 0, 0, 7, 0,10}
            }
        });

        std::shared_ptr<Node> myReLU = ReLU();
        myReLU->getOperator()->setDatatype(DataType::Int32);
        myReLU->getOperator()->setBackend("cpu");
        myReLU->getOperator()->associateInput(0,input0);
        myReLU->getOperator()->computeOutputDims();
        myReLU->forward();
        REQUIRE(*myReLU->getOperator()->getOutput(0) == *expectedOutput);
    }

    SECTION("3D Tensor") {
        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array3D<int,2,2,10> {
            {
                {
                    { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
                    {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
                },
                {
                    { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
                    {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
                }
            }
        });
        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,2,2,10> {
            {
                {
                    { 0, 1, 2, 0, 4, 0, 0, 7, 8, 9},
                    { 0, 4, 2, 0, 4, 0, 0, 7, 0,10}
                },
                {
                    { 0, 1, 2, 0, 4, 0, 0, 7, 8, 9},
                    { 0, 4, 2, 0, 4, 0, 0, 7, 0,10}
                }
            }
        });

        std::shared_ptr<Node> myReLU = ReLU();
        myReLU->getOperator()->setDatatype(DataType::Int32);
        myReLU->getOperator()->setBackend("cpu");
        myReLU->getOperator()->associateInput(0,input0);
        myReLU->getOperator()->computeOutputDims();
        myReLU->forward();
        REQUIRE(*(myReLU->getOperator()->getOutput(0)) == *expectedOutput);
    }

    SECTION("4D Tensor") {
        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array4D<int,2,2,2,10> {
            {
                {
                    {
                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
                    },
                    {
                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
                    }
                },
                {
                    {
                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
                    },
                    {
                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
                    }
                }
            }
        });
        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,2,2,2,10> {
            {
                {
                    {
                        { 0, 1, 2, 0, 4, 0, 0, 7, 8, 9},
                        { 0, 4, 2, 0, 4, 0, 0, 7, 0,10}
                    },
                    {
                        { 0, 1, 2, 0, 4, 0, 0, 7, 8, 9},
                        { 0, 4, 2, 0, 4, 0, 0, 7, 0,10}
                    }
                },
                {
                    {
                        { 0, 1, 2, 0, 4, 0, 0, 7, 8, 9},
                        { 0, 4, 2, 0, 4, 0, 0, 7, 0,10}
                    },
                    {
                        { 0, 1, 2, 0, 4, 0, 0, 7, 8, 9},
                        { 0, 4, 2, 0, 4, 0, 0, 7, 0,10}
                    }
                }
            }
        });

        std::shared_ptr<Node> myReLU = ReLU();
        myReLU->getOperator()->setDatatype(DataType::Int32);
        myReLU->getOperator()->setBackend("cpu");
        myReLU->getOperator()->associateInput(0,input0);
        myReLU->getOperator()->computeOutputDims();
        myReLU->forward();
        REQUIRE(*myReLU->getOperator()->getOutput(0) == *expectedOutput);
    }
}