diff --git a/cmake/PybindModuleCreation.cmake b/cmake/PybindModuleCreation.cmake index 853810e24b40eadb0830645a4373c238177ad649..e3f1cf6fa94e72c19ed46bb799c47f9cdb23e3f7 100644 --- a/cmake/PybindModuleCreation.cmake +++ b/cmake/PybindModuleCreation.cmake @@ -6,7 +6,7 @@ function(generate_python_binding name target_to_bind) FetchContent_Declare( PyBind11 GIT_REPOSITORY https://github.com/pybind/pybind11.git - GIT_TAG v2.10.4 # or a later release + GIT_TAG v2.13.6 # or a later release ) FetchContent_MakeAvailable(PyBind11) diff --git a/include/aidge/utils/TensorUtils.hpp b/include/aidge/utils/TensorUtils.hpp index 0ed0da97cbc81fcdb607f74c28c320d2c259195d..c3c03ab85d65a5e15dec3cdd98e098b47e85bd1a 100644 --- a/include/aidge/utils/TensorUtils.hpp +++ b/include/aidge/utils/TensorUtils.hpp @@ -14,7 +14,7 @@ #include "aidge/data/Tensor.hpp" #include <cmath> // std::abs -#if FMT_VERSION < 100000 // If fmt version is less than 10.0.0 +#if FMT_VERSION < 110000 // If fmt version is less than 11.0.0 #include <fmt/core.h> #else #include <fmt/base.h> diff --git a/src/operator/Clip.cpp b/src/operator/Clip.cpp index 87ac105daf249ef1888307d5cda14a2e432d0aaf..1b4b261246fbf1b86b8a3414fee5a7ebb66b6bef 100644 --- a/src/operator/Clip.cpp +++ b/src/operator/Clip.cpp @@ -52,50 +52,36 @@ bool Clip_Op::dimsForwarded() const { bool Clip_Op::forwardDims(bool allowDataDependency) { - if (getInput(1) ) + if (getInput(1)) { - if( this->min() != std::numeric_limits<float>::lowest()) - { - Log::notice("{} : ignoring non-empty min attribute because input#1 " - "take precedence", - type()); - } - if (!allowDataDependency) { - Log::warn("{} : unable to forwardDims() because output dims are data " - "dependent on input#1", - type()); - return false; - } std::shared_ptr<Tensor> fallback; - const auto& minV = mInputs[1]->refCastFrom(fallback, NativeType_v<float>, "cpu"); - this->min() = *(static_cast<float*>(minV.getImpl()->hostPtr())); + const auto& minTensor = mInputs[1]->refCastFrom(fallback, DataType::Float32, "cpu"); + float minValue = *(static_cast<float*>(minTensor.getImpl()->hostPtr())); + + if(this->min() != std::numeric_limits<float>::lowest() && this->min() != minValue) + Log::notice("{} : ignoring non-empty min attribute because input#1 take precedence", type()); + + this->min() = minValue; } if (getInput(2)) { - if( this->max() != std::numeric_limits<float>::max()) - { - Log::notice("{} : ignoring non-empty max attribute because input#2 " - "take precedence", - type()); - } - if (!allowDataDependency) { - Log::warn("{} : unable to forwardDims() because output dims are data " - "dependent on input#2", - type()); - return false; - } std::shared_ptr<Tensor> fallback; - const auto& maxV = mInputs[2]->refCastFrom(fallback, NativeType_v<float>, "cpu"); - this->max() = *(static_cast<float*>(maxV.getImpl()->hostPtr())); + const auto& maxTensor = mInputs[2]->refCastFrom(fallback, DataType::Float32, "cpu"); + float maxValue = *(static_cast<float*>(maxTensor.getImpl()->hostPtr())); + + if(this->max() != std::numeric_limits<float>::max() && this->max() != maxValue) + Log::notice("{} : ignoring non-empty max attribute because input#2 take precedence", type()); + + this->max() = maxValue; } - if (!inputsAssociated(false)) { + + if (!inputsAssociated(false)) return false; - } - else if ((getInput(1) && !getInput(1)->empty()) || (getInput(2) && !getInput(2)->empty())) - { - AIDGE_THROW_OR_ABORT(std::runtime_error,"Expected Input#1 and Input#2 to be scalar (Tensors of empty shapes)"); - } - mOutputs[0] -> resize(getInput(0)->dims()); + else if ((getInput(1) && !getInput(1)->empty()) || (getInput(2) && !getInput(2)->empty())) + AIDGE_THROW_OR_ABORT(std::runtime_error, "Expected Input#1 and Input#2 to be scalar (Tensors of empty shapes)"); + + mOutputs[0]->resize(getInput(0)->dims()); + return true; } void Clip_Op::setBackend(const std::string& name, DeviceIdx_t device) {