Skip to content
Snippets Groups Projects
Commit 40a34dc2 authored by Houssem ROUIS's avatar Houssem ROUIS
Browse files

separate fwdDims tests section from fwd section

parent b3ae66f7
No related branches found
No related tags found
3 merge requests!166Update 0.5.0 -> 0.6.0,!136Add selection mechanism in graph,!130fix failed onnx tests
...@@ -19,86 +19,85 @@ ...@@ -19,86 +19,85 @@
using namespace Aidge; using namespace Aidge;
TEST_CASE("[cpu/operator] Equal(forward)", "[Equal][CPU]") { TEST_CASE("[cpu/operator] Equal(forwardDims)", "[Equal][CPU]") {
SECTION("ForwardDims") constexpr std::uint16_t NBTRIALS = 10;
{ // Create a random number generator
constexpr std::uint16_t NBTRIALS = 10; std::random_device rd;
// Create a random number generator std::mt19937 gen(rd());
std::random_device rd; std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1
std::mt19937 gen(rd()); std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10));
std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1 std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1), std::size_t(5));
std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10)); std::uniform_int_distribution<int> boolDist(0,1);
std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1), std::size_t(5));
std::uniform_int_distribution<int> boolDist(0,1); SECTION("Same dimensions") {
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
SECTION("Same dimensions") { DimSize_t nbDims = nbDimsDist(gen);
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { std::vector<DimSize_t> dims(nbDims);
DimSize_t nbDims = nbDimsDist(gen); for (std::size_t i = 0; i < nbDims; i++) {
std::vector<DimSize_t> dims(nbDims); dims[i] = dimSizeDist(gen);
for (std::size_t i = 0; i < nbDims; i++) {
dims[i] = dimSizeDist(gen);
}
std::shared_ptr<Tensor> myInput1 = std::make_shared<Tensor>(dims);
myInput1->setBackend("cpu");
myInput1->setDataType(DataType::Float32);
myInput1->zeros();
std::shared_ptr<Tensor> myInput2 = std::make_shared<Tensor>(dims);
myInput2->setBackend("cpu");
myInput2->setDataType(DataType::Float32);
myInput2->zeros();
std::shared_ptr<Node> myEqual = Equal();
auto op = std::static_pointer_cast<OperatorTensor>(myEqual -> getOperator());
op->associateInput(0,myInput1);
op->associateInput(1,myInput2);
op->setDataType(DataType::Float32);
op->setBackend("cpu");
op->forwardDims();
const auto outputDims = op->getOutput(0)->dims();
REQUIRE(outputDims == dims);
} }
std::shared_ptr<Tensor> myInput1 = std::make_shared<Tensor>(dims);
myInput1->setBackend("cpu");
myInput1->setDataType(DataType::Float32);
myInput1->zeros();
std::shared_ptr<Tensor> myInput2 = std::make_shared<Tensor>(dims);
myInput2->setBackend("cpu");
myInput2->setDataType(DataType::Float32);
myInput2->zeros();
std::shared_ptr<Node> myEqual = Equal();
auto op = std::static_pointer_cast<OperatorTensor>(myEqual -> getOperator());
op->associateInput(0,myInput1);
op->associateInput(1,myInput2);
op->setDataType(DataType::Float32);
op->setBackend("cpu");
op->forwardDims();
const auto outputDims = op->getOutput(0)->dims();
REQUIRE(outputDims == dims);
} }
SECTION("Broadcasting") { }
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { SECTION("Broadcasting") {
DimSize_t nbDims = nbDimsDist(gen); for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
std::vector<DimSize_t> dims1(nbDims, 1); DimSize_t nbDims = nbDimsDist(gen);
std::vector<DimSize_t> dims2(nbDims, 1); std::vector<DimSize_t> dims1(nbDims, 1);
std::vector<DimSize_t> expectedOutDims; std::vector<DimSize_t> dims2(nbDims, 1);
for (std::size_t i = 0; i < nbDims; i++) { std::vector<DimSize_t> expectedOutDims;
DimSize_t dim = dimSizeDist(gen); for (std::size_t i = 0; i < nbDims; i++) {
if (boolDist(gen)) { DimSize_t dim = dimSizeDist(gen);
dims1[i] = dim; if (boolDist(gen)) {
} dims1[i] = dim;
if (boolDist(gen)) { }
dims2[i] = dim; if (boolDist(gen)) {
} dims2[i] = dim;
expectedOutDims.push_back(std::max(dims1[i],dims2[i]));
} }
expectedOutDims.push_back(std::max(dims1[i],dims2[i]));
}
std::shared_ptr<Tensor> myInput1 = std::make_shared<Tensor>(dims1); std::shared_ptr<Tensor> myInput1 = std::make_shared<Tensor>(dims1);
myInput1->setBackend("cpu"); myInput1->setBackend("cpu");
myInput1->setDataType(DataType::Float32); myInput1->setDataType(DataType::Float32);
myInput1->zeros(); myInput1->zeros();
std::shared_ptr<Tensor> myInput2 = std::make_shared<Tensor>(dims2); std::shared_ptr<Tensor> myInput2 = std::make_shared<Tensor>(dims2);
myInput2->setBackend("cpu"); myInput2->setBackend("cpu");
myInput2->setDataType(DataType::Float32); myInput2->setDataType(DataType::Float32);
myInput2->zeros(); myInput2->zeros();
std::shared_ptr<Node> myEqual = Equal(); std::shared_ptr<Node> myEqual = Equal();
auto op = std::static_pointer_cast<OperatorTensor>(myEqual -> getOperator()); auto op = std::static_pointer_cast<OperatorTensor>(myEqual -> getOperator());
op->associateInput(0,myInput1); op->associateInput(0,myInput1);
op->associateInput(1,myInput2); op->associateInput(1,myInput2);
op->setDataType(DataType::Float32); op->setDataType(DataType::Float32);
op->setBackend("cpu"); op->setBackend("cpu");
op->forwardDims(); op->forwardDims();
const auto outputDims = op->getOutput(0)->dims(); const auto outputDims = op->getOutput(0)->dims();
REQUIRE(outputDims == expectedOutDims); REQUIRE(outputDims == expectedOutDims);
}
} }
} }
}
TEST_CASE("[cpu/operator] Equal(forward)", "[Equal][CPU]") {
SECTION("Same size inputs") { SECTION("Same size inputs") {
std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array4D<int,3,3,3,2> { std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array4D<int,3,3,3,2> {
{ // { //
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment