Skip to content
Snippets Groups Projects
Commit 839fa0aa authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge branch 'upd_ConstantOfShape' into 'dev'

Upd constant of shape

See merge request !143
parents 5ea40966 9d9647aa
No related branches found
No related tags found
3 merge requests!166Update 0.5.0 -> 0.6.0,!143Upd constant of shape,!136Add selection mechanism in graph
Pipeline #66557 passed
...@@ -12,23 +12,21 @@ ...@@ -12,23 +12,21 @@
#ifndef AIDGE_CPU_OPERATOR_CONSTANTOFSHAPEIMPL_H_ #ifndef AIDGE_CPU_OPERATOR_CONSTANTOFSHAPEIMPL_H_
#define AIDGE_CPU_OPERATOR_CONSTANTOFSHAPEIMPL_H_ #define AIDGE_CPU_OPERATOR_CONSTANTOFSHAPEIMPL_H_
#include <cstddef>
#include <memory> #include <memory>
#include <vector>
#include "aidge/backend/cpu/operator/OperatorImpl.hpp" #include "aidge/backend/cpu/operator/OperatorImpl.hpp"
#include "aidge/operator/ConstantOfShape.hpp" #include "aidge/operator/ConstantOfShape.hpp"
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
namespace Aidge { namespace Aidge {
class Tensor;
// Operator implementation entry point for the backend // Operator implementation entry point for the backend
using ConstantOfShapeImpl_cpu = OperatorImpl_cpu<ConstantOfShape_Op, using ConstantOfShapeImpl_cpu = OperatorImpl_cpu<ConstantOfShape_Op,
void(const std::vector<DimSize_t>, const Tensor&, void *)>; void(const std::shared_ptr<Tensor>&, const Tensor&)>;
// Implementation entry point registration to Operator // Implementation entry point registration to Operator
REGISTRAR(ConstantOfShape_Op, "cpu", Aidge::ConstantOfShapeImpl_cpu::create); REGISTRAR(ConstantOfShape_Op, "cpu", Aidge::ConstantOfShapeImpl_cpu::create);
} // namespace Aidge } // namespace Aidge
#endif /* _AIDGE_CPU_OPERATOR_CONSTANTOFSHAPEIMPL_H_ */ #endif /* _AIDGE_CPU_OPERATOR_CONSTANTOFSHAPEIMPL_H_ */
...@@ -30,20 +30,11 @@ ...@@ -30,20 +30,11 @@
namespace Aidge { namespace Aidge {
template <class O> template <class O>
void ConstantOfShapeimpl_cpu_forward_kernel( void ConstantOfShapeimpl_cpu_forward_kernel(
const std::vector<DimSize_t> output_dims, const Tensor &value, const std::shared_ptr<Tensor>& output_, const Tensor &value) {
void *output_) {
O *output = static_cast<O *>(output_); O* output = static_cast<O*>(output_->getImpl()->hostPtr());
O val; const O val = *reinterpret_cast<O*>(value.getImpl()->hostPtr());
std::copy(static_cast<O *>(value.getImpl()->hostPtr()), std::fill_n(output, output_->size(), val);
static_cast<O *>(value.getImpl()->hostPtr()) +
static_cast<NbElts_t>(1),
&val);
const size_t output_size = std::accumulate(
output_dims.begin(), output_dims.end(), 1, std::multiplies<DimSize_t>());
for (size_t i = 0; i < output_size; ++i) {
output[i] = val;
}
} }
// Kernels registration to implementation entry point // Kernels registration to implementation entry point
......
...@@ -13,15 +13,14 @@ ...@@ -13,15 +13,14 @@
#include <functional> #include <functional>
#include <memory> #include <memory>
#include <vector> #include <stdexcept> // std::runtime_error
#include "aidge/backend/cpu/operator/ConstantOfShapeImpl_kernels.hpp" #include "aidge/backend/cpu/operator/ConstantOfShapeImpl_kernels.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/operator/ConstantOfShape.hpp" #include "aidge/operator/ConstantOfShape.hpp"
#include "aidge/backend/OperatorImpl.hpp" // Aidge::getBestMatch, Aidge::getRequiredSpec
#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
template <> template <>
void Aidge::ConstantOfShapeImpl_cpu::forward() { void Aidge::ConstantOfShapeImpl_cpu::forward() {
...@@ -33,9 +32,7 @@ void Aidge::ConstantOfShapeImpl_cpu::forward() { ...@@ -33,9 +32,7 @@ void Aidge::ConstantOfShapeImpl_cpu::forward() {
const auto impl = Registrar<ConstantOfShapeImpl_cpu>::create(getBestMatch(getRequiredSpec())); const auto impl = Registrar<ConstantOfShapeImpl_cpu>::create(getBestMatch(getRequiredSpec()));
// Call kernel // Call kernel
impl.forward(op_.getOutput(0)->dims(), impl.forward(op_.getOutput(0), op_.value());
op_.value(),
op_.getOutput(0)->getImpl()->rawPtr());
} }
template <> template <>
......
...@@ -27,89 +27,88 @@ ...@@ -27,89 +27,88 @@
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/filler/Filler.hpp" #include "aidge/filler/Filler.hpp"
#include "aidge/operator/ConstantOfShape.hpp" #include "aidge/operator/ConstantOfShape.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/utils/TensorUtils.hpp" #include "aidge/utils/TensorUtils.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
namespace Aidge { namespace Aidge {
TEST_CASE("[cpu/operator] ConstantOfShape", "[ConstantOfShape][CPU]") {
constexpr std::uint16_t NBTRIALS = 10;
// Create a random number generator
auto random_seed = Catch::Generators::Detail::getSeed;
std::mt19937 gen(random_seed());
std::uniform_real_distribution<float> valueDist(
0.1f, 1.1f); // Random float distribution between 0 and 1
std::uniform_int_distribution<DimSize_t> input_tensor_size_dist(
std::size_t(1), std::size_t(10));
std::uniform_int_distribution<int64_t> input_tensor_values_dist(
std::size_t(1), std::size_t(7));
std::uniform_real_distribution<double> operator_attr_value_dist(-100., 100.);
/////////////////////////////////////////////// TEST_CASE("[cpu/operator] ConstantOfShape(forward)", "[ConstantOfShape][CPU][forward]") {
// SETUP FUNCTIONS constexpr std::uint16_t NBTRIALS = 10;
auto generate_input_tensor = // Create a random number generator
[&gen, &input_tensor_size_dist, auto random_seed = Catch::Generators::Detail::getSeed;
&input_tensor_values_dist]() -> std::shared_ptr<Tensor> { std::mt19937 gen(random_seed());
std::vector<DimSize_t> input_dims; std::uniform_real_distribution<float> valueDist(
input_dims.push_back(input_tensor_size_dist(gen)); 0.1f, 1.1f); // Random float distribution between 0 and 1
std::uniform_int_distribution<DimSize_t> input_tensor_size_dist(
std::size_t(1), std::size_t(10));
std::uniform_int_distribution<int64_t> input_tensor_values_dist(
std::size_t(1), std::size_t(7));
std::uniform_real_distribution<double> operator_attr_value_dist(-100., 100.);
auto result = std::make_shared<Tensor>(input_dims); ///////////////////////////////////////////////
result->setDataType(DataType::Int64); // SETUP FUNCTIONS
result->setBackend("cpu"); auto generate_input_tensor =
for (DimSize_t i = 0; i < result->size(); ++i) { [&gen, &input_tensor_size_dist,
result->set<std::int64_t>(i, input_tensor_values_dist(gen)); &input_tensor_values_dist]() -> std::shared_ptr<Tensor> {
} std::vector<DimSize_t> input_dims;
return result; input_dims.push_back(input_tensor_size_dist(gen));
};
auto generate_random_operator = auto result = std::make_shared<Tensor>(input_dims);
[&gen, result->setDataType(DataType::Int64);
&operator_attr_value_dist]() -> std::shared_ptr<ConstantOfShape_Op> { result->setBackend("cpu");
auto node = ConstantOfShape(Tensor(operator_attr_value_dist(gen))); for (DimSize_t i = 0; i < result->size(); ++i) {
auto op = std::static_pointer_cast<ConstantOfShape_Op>(node->getOperator()); result->set<std::int64_t>(i, input_tensor_values_dist(gen));
op->setDataType(DataType::Float64); }
op->setBackend("cpu"); return result;
return op; };
};
auto generate_output_tensor = [](std::shared_ptr<Tensor> input_tensor, auto generate_random_operator =
std::shared_ptr<ConstantOfShape_Op> op) { [&gen,
std::vector<DimSize_t> output_dims; &operator_attr_value_dist]() -> std::shared_ptr<ConstantOfShape_Op> {
output_dims.reserve(input_tensor->size()); std::shared_ptr<ConstantOfShape_Op> op = std::make_shared<ConstantOfShape_Op>(Tensor(operator_attr_value_dist(gen)));
for (DimSize_t i = 0; i < input_tensor->size(); ++i) { op->setDataType(DataType::Float64);
output_dims.push_back(input_tensor->get<int64_t>(i)); op->setBackend("cpu");
} return op;
auto result = std::make_shared<Tensor>(output_dims); };
result->setDataType(op->value().dataType());
result->setBackend("cpu"); auto generate_output_tensor = [](std::shared_ptr<Tensor> input_tensor,
constantFiller(result, op->value().get<double>(0)); std::shared_ptr<ConstantOfShape_Op> op) {
return result; std::vector<DimSize_t> output_dims;
}; output_dims.reserve(input_tensor->size());
for (DimSize_t i = 0; i < input_tensor->size(); ++i) {
output_dims.push_back(input_tensor->get<std::int64_t>(i));
}
auto result = std::make_shared<Tensor>(output_dims);
result->setDataType(op->value().dataType());
result->setBackend("cpu");
constantFiller(result, op->value().get<double>(0));
return result;
};
///////////////////////////////////// /////////////////////////////////////
// BENCHMARKING // BENCHMARKING
std::chrono::time_point<std::chrono::system_clock> start; std::chrono::time_point<std::chrono::system_clock> start;
std::chrono::time_point<std::chrono::system_clock> end; std::chrono::time_point<std::chrono::system_clock> end;
std::chrono::duration<double, std::micro> duration{}; std::chrono::duration<double, std::micro> duration{};
int number_of_operation{0}; int number_of_operation{0};
SECTION("ConstantOfShapeImpl_cpu::forward()") { SECTION("ConstantOfShapeImpl_cpu::forward()") {
for (int i = 0; i < NBTRIALS; ++i) { for (int i = 0; i < NBTRIALS; ++i) {
auto input_T = generate_input_tensor(); auto input_T = generate_input_tensor();
std::shared_ptr<ConstantOfShape_Op> op = generate_random_operator(); std::shared_ptr<ConstantOfShape_Op> op = generate_random_operator();
auto output_T = generate_output_tensor(input_T, op); auto output_T = generate_output_tensor(input_T, op);
op->associateInput(0, input_T); op->associateInput(0, input_T);
REQUIRE(op->forwardDims(true)); REQUIRE(op->forwardDims(true));
REQUIRE_NOTHROW(op->forward()); REQUIRE_NOTHROW(op->forward());
CHECK(output_T->nbDims() == op->getOutput(0)->nbDims()); CHECK(output_T->nbDims() == op->getOutput(0)->nbDims());
for (DimIdx_t i = 0; i < output_T->nbDims(); ++i) { for (DimIdx_t i = 0; i < output_T->nbDims(); ++i) {
CHECK(output_T->dims().at(i) == op->getOutput(0)->dims().at(i)); CHECK(output_T->dims().at(i) == op->getOutput(0)->dims().at(i));
} }
CHECK(approxEq<double>(*output_T, *op->getOutput(0))); CHECK(approxEq<double>(*output_T, *op->getOutput(0)));
}
} }
}
} }
} // namespace Aidge } // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/graph/GraphView.hpp"
#include "aidge/operator/Identity.hpp"
#include "aidge/recipes/Recipes.hpp"
#include <cstdint> // std::int64_t
#include <memory>
#include <catch2/catch_test_macros.hpp>
#include "aidge/graph/OpArgs.hpp"
#include "aidge/operator/ConstantOfShape.hpp"
#include "aidge/operator/Conv.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/operator/ReLU.hpp"
#include "aidge/recipes/Recipes.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
TEST_CASE("[cpu/recipes] foldConstantOfShape",
"[ConstantOfShape][foldConstantOfShape][recipes]") {
auto input_T = std::make_shared<Tensor>(Array1D<std::int64_t, 4>({1, 1, 3, 3}));
auto model = std::make_shared<GraphView>();
SECTION("Sequential model") {
model = Sequential({
Producer(input_T, "prod_0", true),
ConstantOfShape(3, "constantOfShape_0"),
Conv(1, 1, {3, 3}, "Conv_0"),
ReLU("ReLU_1")
});
// aidge_backend_cpu loaded. Recipe should work
REQUIRE(foldConstantOfShape(model) == 1);
CHECK(model->forwardDims());
}
}
} // namespace Aidge
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment