Skip to content
Snippets Groups Projects
Commit 9c1832f9 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Merge branch 'dev' into topk

parents bc09c6ac 2f7af8db
No related tags found
3 merge requests!414Update version 0.5.1 -> 0.6.0,!408[Add] Dropout Operator,!377Add TopK operator
Pipeline #69195 passed
...@@ -6,7 +6,7 @@ function(generate_python_binding name target_to_bind) ...@@ -6,7 +6,7 @@ function(generate_python_binding name target_to_bind)
FetchContent_Declare( FetchContent_Declare(
PyBind11 PyBind11
GIT_REPOSITORY https://github.com/pybind/pybind11.git GIT_REPOSITORY https://github.com/pybind/pybind11.git
GIT_TAG v2.10.4 # or a later release GIT_TAG v2.13.6 # or a later release
) )
FetchContent_MakeAvailable(PyBind11) FetchContent_MakeAvailable(PyBind11)
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include <cmath> // std::abs #include <cmath> // std::abs
#if FMT_VERSION < 100000 // If fmt version is less than 10.0.0 #if FMT_VERSION < 110000 // If fmt version is less than 11.0.0
#include <fmt/core.h> #include <fmt/core.h>
#else #else
#include <fmt/base.h> #include <fmt/base.h>
......
...@@ -52,50 +52,36 @@ bool Clip_Op::dimsForwarded() const { ...@@ -52,50 +52,36 @@ bool Clip_Op::dimsForwarded() const {
bool Clip_Op::forwardDims(bool allowDataDependency) bool Clip_Op::forwardDims(bool allowDataDependency)
{ {
if (getInput(1) ) if (getInput(1))
{ {
if( this->min() != std::numeric_limits<float>::lowest())
{
Log::notice("{} : ignoring non-empty min attribute because input#1 "
"take precedence",
type());
}
if (!allowDataDependency) {
Log::warn("{} : unable to forwardDims() because output dims are data "
"dependent on input#1",
type());
return false;
}
std::shared_ptr<Tensor> fallback; std::shared_ptr<Tensor> fallback;
const auto& minV = mInputs[1]->refCastFrom(fallback, NativeType_v<float>, "cpu"); const auto& minTensor = mInputs[1]->refCastFrom(fallback, DataType::Float32, "cpu");
this->min() = *(static_cast<float*>(minV.getImpl()->hostPtr())); float minValue = *(static_cast<float*>(minTensor.getImpl()->hostPtr()));
if(this->min() != std::numeric_limits<float>::lowest() && this->min() != minValue)
Log::notice("{} : ignoring non-empty min attribute because input#1 take precedence", type());
this->min() = minValue;
} }
if (getInput(2)) if (getInput(2))
{ {
if( this->max() != std::numeric_limits<float>::max())
{
Log::notice("{} : ignoring non-empty max attribute because input#2 "
"take precedence",
type());
}
if (!allowDataDependency) {
Log::warn("{} : unable to forwardDims() because output dims are data "
"dependent on input#2",
type());
return false;
}
std::shared_ptr<Tensor> fallback; std::shared_ptr<Tensor> fallback;
const auto& maxV = mInputs[2]->refCastFrom(fallback, NativeType_v<float>, "cpu"); const auto& maxTensor = mInputs[2]->refCastFrom(fallback, DataType::Float32, "cpu");
this->max() = *(static_cast<float*>(maxV.getImpl()->hostPtr())); float maxValue = *(static_cast<float*>(maxTensor.getImpl()->hostPtr()));
if(this->max() != std::numeric_limits<float>::max() && this->max() != maxValue)
Log::notice("{} : ignoring non-empty max attribute because input#2 take precedence", type());
this->max() = maxValue;
} }
if (!inputsAssociated(false)) {
if (!inputsAssociated(false))
return false; return false;
} else if ((getInput(1) && !getInput(1)->empty()) || (getInput(2) && !getInput(2)->empty()))
else if ((getInput(1) && !getInput(1)->empty()) || (getInput(2) && !getInput(2)->empty())) AIDGE_THROW_OR_ABORT(std::runtime_error, "Expected Input#1 and Input#2 to be scalar (Tensors of empty shapes)");
{
AIDGE_THROW_OR_ABORT(std::runtime_error,"Expected Input#1 and Input#2 to be scalar (Tensors of empty shapes)"); mOutputs[0]->resize(getInput(0)->dims());
}
mOutputs[0] -> resize(getInput(0)->dims());
return true; return true;
} }
void Clip_Op::setBackend(const std::string& name, DeviceIdx_t device) { void Clip_Op::setBackend(const std::string& name, DeviceIdx_t device) {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment