Skip to content
Snippets Groups Projects
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
Test_ReduceMeanImpl.cpp 13.35 KiB
/********************************************************************************
 * Copyright (c) 2023 CEA-List
 *
 * This program and the accompanying materials are made available under the
 * terms of the Eclipse Public License 2.0 which is available at
 * http://www.eclipse.org/legal/epl-2.0.
 *
 * SPDX-License-Identifier: EPL-2.0
 *
 ********************************************************************************/

#include <algorithm>   // std::fill
#include <cstddef>     // std::size_t
#include <cstdint>     // std::int32_t, std::uint16_t
#include <memory>
#include <random>      // std::random_device, std::mt19937
                       // std::uniform_int_distribution, std::uniform_real_distribution
#include <vector>

#include <catch2/catch_test_macros.hpp>
#include <fmt/core.h>

#include "aidge/backend/cpu/data/TensorImpl.hpp"
#include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp"
#include "aidge/data/DataType.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/ReduceMean.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/utils/TensorUtils.hpp"

using namespace Aidge;

TEST_CASE("[cpu/operator] ReduceMean(forward)", "[ReduceMean][CPU]") {
    SECTION("ForwardDims")
    {
        constexpr std::uint16_t NBTRIALS = 10;
        // Create a random number generator
        std::random_device rd;
        std::mt19937 gen(rd());
        std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1
        std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10));
        std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1), std::size_t(5));
        std::uniform_int_distribution<int> boolDist(0,1);

        SECTION("KeepDims") {
            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
                DimSize_t nbDims = nbDimsDist(gen);
                std::vector<DimSize_t> dims(nbDims);
                std::vector<DimSize_t> expectedOutDims(nbDims);
                std::vector<std::int32_t> axes;
                for (std::size_t i = 0; i < nbDims; i++) {
                    dims[i] = dimSizeDist(gen);
                    expectedOutDims[i] = dims[i];
                    if(boolDist(gen)) {
                        axes.push_back(i);
                        expectedOutDims[i] = 1;
                    }
                }
                if (axes.empty()) { // Default behaviour if no axes are provided is to reduce all dimensions
                   std::fill(expectedOutDims.begin(), expectedOutDims.end(), 1);
                }

                std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(dims);
                myInput->setBackend("cpu");
                myInput->setDataType(DataType::Float32);
                myInput->zeros();
                std::shared_ptr<Node> myReduceMean = ReduceMean(axes, true);
                auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
                op->associateInput(0,myInput);
                op->setDataType(DataType::Float32);
                op->setBackend("cpu");
                op->forwardDims();

                const auto outputDims = op->getOutput(0)->dims();
                REQUIRE(outputDims == expectedOutDims);
            }
        }
        SECTION("Not KeepDims") {
            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
                DimSize_t nbDims = nbDimsDist(gen);
                std::vector<DimSize_t> dims(nbDims);
                std::vector<DimSize_t> expectedOutDims;
                std::vector<std::int32_t> axes;
                for (std::size_t i = 0; i < nbDims; i++) {
                    dims[i] = dimSizeDist(gen);
                    if(boolDist(gen)) {
                        axes.push_back(i);
                    }
                    else {
                        expectedOutDims.push_back(dims[i]);
                    }
                }
                if (axes.empty() || expectedOutDims.empty()) { // Default behaviour if no axes are provided is to reduce all dimensions
                   expectedOutDims = std::vector<DimSize_t>{1};
                }

                std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(dims);
                myInput->setBackend("cpu");
                myInput->setDataType(DataType::Float32);
                std::shared_ptr<Node> myReduceMean = ReduceMean(axes, false);
                auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
                op->associateInput(0,myInput);
                op->setDataType(DataType::Float32);
                op->setBackend("cpu");

                op->forwardDims();

                const auto outputDims = op->getOutput(0)->dims();
                REQUIRE(outputDims == expectedOutDims);
            }
        }
        SECTION("NoopWithEmptyAxes") {
            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
                DimSize_t nbDims = nbDimsDist(gen);
                std::vector<DimSize_t> dims(nbDims);
                for (std::size_t i = 0; i < nbDims; i++) {
                    dims[i] = dimSizeDist(gen);
                }
                std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(dims);
                myInput->setBackend("cpu");
                myInput->setDataType(DataType::Float32);
                std::shared_ptr<Node> myReduceMean = ReduceMean(std::vector<int32_t>{}, false, true);
                auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
                op->associateInput(0,myInput);
                op->setDataType(DataType::Float32);
                op->setBackend("cpu");

                op->forwardDims();

                const auto outputDims = op->getOutput(0)->dims();
                REQUIRE(outputDims == dims);
            }
        }
        SECTION("Not NoopWithEmptyAxes") {
            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
                DimSize_t nbDims = nbDimsDist(gen);
                std::vector<DimSize_t> dims(nbDims);
                for (std::size_t i = 0; i < nbDims; i++) {
                    dims[i] = dimSizeDist(gen);
                }
                std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(dims);
                myInput->setBackend("cpu");
                myInput->setDataType(DataType::Float32);
                std::shared_ptr<Node> myReduceMean = ReduceMean({}, false, false);
                auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
                op->associateInput(0,myInput);
                op->setDataType(DataType::Float32);
                op->setBackend("cpu");

                op->forwardDims();

                REQUIRE(op->getOutput(0)->nbDims() == 1);
                REQUIRE(op->getOutput(0)->size() == 1);
            }
        }
    }
    SECTION("KeepDims") {
        SECTION("test 1") {
            std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> {
                {
                    {
                        { 5.0, 1.0 },
                        { 20.0, 2.0 }
                    },
                    {
                        { 30.0, 1.0 },
                        { 40.0, 2.0 }
                    },
                    {
                        { 55.0, 1.0 },
                        { 60.0, 2.0 }
                    }
                }
            });
            Tensor myOutput = Tensor(Array3D<float,3,1,2> {
                {

                    {{ 12.5, 1.5 }},
                    {{ 35.0, 1.5 }},
                    {{ 57.5, 1.5 }}
                }
            });

            std::shared_ptr<Node> myReduceMean = ReduceMean({1}, 1);
            auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
            op->associateInput(0,myInput);
            op->setDataType(DataType::Float32);
            op->setBackend("cpu");
            myReduceMean->forward();
            op->getOutput(0)->print();

            REQUIRE(*(op->getOutput(0)) == myOutput);
        }
        SECTION("test 2") {
            std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,3,2> {
                {
                    {
                        { 0.0, 0.0 },
                        { 1.0, 1.0 },
                        { 2.0, 2.0 }
                    },
                    {
                        { 3.0, 3.0 },
                        { 4.0, 4.0 },
                        { 5.0, 5.0 }
                    },
                    {
                        { 6.0, 6.0 },
                        { 7.0, 7.0 },
                        { 8.0, 8.0 }
                    }
                }
            });
            Tensor myOutput = Tensor(Array3D<float,3,1,1> {
                {

                    {{ 1.0 }},
                    {{ 4.0 }},
                    {{ 7.0 }}
                }
            });

            std::shared_ptr<Node> myReduceMean = ReduceMean({1, 2}, 1);
            auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
            op->associateInput(0,myInput);
            op->setDataType(DataType::Float32);
            op->setBackend("cpu");
            myReduceMean->forward();
            myOutput.print();
            op->getOutput(0)->print();
            REQUIRE(*(op->getOutput(0)) == myOutput);
        }
    }
    SECTION("not_KeepDims") {
        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> {
            {
                {
                    { 5.0, 1.0 },
                    { 20.0, 2.0 }
                },
                {
                    { 30.0, 1.0 },
                    { 40.0, 2.0 }
                },
                {
                    { 55.0, 1.0 },
                    { 60.0, 2.0 }
                }
            }
        });
        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array2D<float,3,2> {
            {
                { 12.5, 1.5 },
                { 35.0, 1.5 },
                { 57.5, 1.5 }
            }
        });

        std::shared_ptr<Node> myReduceMean = ReduceMean({1}, 0);
        auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
        op->associateInput(0,myInput);
        op->setDataType(DataType::Float32);
        op->setBackend("cpu");
        myReduceMean->forward();
        op->getOutput(0)->print();

        REQUIRE(*(op->getOutput(0)) == *myOutput);

    }
    SECTION("all_axes") {
        SECTION("1") {
            std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> {
                {
                    {
                        { 5.0, 1.0 },
                        { 20.0, 2.0 }
                    },
                    {
                        { 30.0, 1.0 },
                        { 40.0, 2.0 }
                    },
                    {
                        { 55.0, 1.0 },
                        { 60.0, 2.0 }
                    }
                }
            });
            std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array1D<float,1> {
                {18.25}
            });

            std::shared_ptr<Node> myReduceMean = ReduceMean({}, 0);
            auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
            op->associateInput(0,myInput);
            op->setDataType(DataType::Float32);
            op->setBackend("cpu");
            myReduceMean->forward();
            op->getOutput(0)->print();

            REQUIRE(*(op->getOutput(0)) == *myOutput);
        }
        SECTION("2") {
            std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array2D<float,5,4> {
               {{ 0.004232f, 0.105120f, 0.045124f, 0.009205f},
                { 0.000766f, 0.272162f, 0.503560f, 0.044163f},
                { 0.049755f, 0.000305f, 0.143634f, 0.013253f},
                { 0.096258f, 0.311231f, 0.358143f, 0.000452f},
                { 0.468617f, 0.015693f, 0.145316f, 0.000105f}}
            });
            std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array1D<float,1> {
                {0.1293547f}
            });

            std::shared_ptr<Node> myReduceMean = ReduceMean({}, 0);
            auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
            op->associateInput(0,myInput);
            op->setDataType(DataType::Float32);
            op->setBackend("cpu");
            myReduceMean->forward();

            REQUIRE(approxEq<float>(*(op->getOutput(0)), *myOutput));
        }
        SECTION("noop_with_empty_axes") {
            std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> {
                {
                    {
                        { 5.0, 1.0 },
                        { 20.0, 2.0 }
                    },
                    {
                        { 30.0, 1.0 },
                        { 40.0, 2.0 }
                    },
                    {
                        { 55.0, 1.0 },
                        { 60.0, 2.0 }
                    }
                }
            });

            std::shared_ptr<Node> myReduceMean = ReduceMean({}, 0, 1);
            auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
            op->associateInput(0,myInput);
            op->setDataType(DataType::Float32);
            op->setBackend("cpu");
            myReduceMean->forward();
            op->getOutput(0)->print();

            REQUIRE(*(op->getOutput(0)) == *myInput);
        }
    }
}