diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp index 9e58f2a07ca73d65f15290982ae833ac2dc3b4c9..973fc6f9a94d108d8b81c93384ef8468d8247c41 100644 --- a/python_binding/data/pybind_Tensor.cpp +++ b/python_binding/data/pybind_Tensor.cpp @@ -40,16 +40,16 @@ using NumpyDType = py::detail::npy_api::constants; // Map Numpy dtype ids to aidge datatypes. // If a numpy dtype is not present, np array of this type is rejected. static const std::map<NumpyDType, DataType> NumpyTypeNameAsNativeType = { - { NumpyDType::NPY_INT8_, NativeType<std::int8_t>::type }, - { NumpyDType::NPY_INT16_, NativeType<std::int16_t>::type }, - { NumpyDType::NPY_INT32_, NativeType<std::int32_t>::type }, - { NumpyDType::NPY_INT64_, NativeType<std::int64_t>::type }, - { NumpyDType::NPY_UINT8_, NativeType<std::uint8_t>::type }, - { NumpyDType::NPY_UINT16_, NativeType<std::uint16_t>::type }, - { NumpyDType::NPY_UINT32_, NativeType<std::uint32_t>::type }, - { NumpyDType::NPY_UINT64_, NativeType<std::uint64_t>::type }, - { NumpyDType::NPY_FLOAT_, NativeType<float>::type }, - { NumpyDType::NPY_DOUBLE_, NativeType<double>::type }, + { NumpyDType::NPY_INT8_, NativeType_v<std::int8_t> }, + { NumpyDType::NPY_INT16_, NativeType_v<std::int16_t> }, + { NumpyDType::NPY_INT32_, NativeType_v<std::int32_t> }, + { NumpyDType::NPY_INT64_, NativeType_v<std::int64_t> }, + { NumpyDType::NPY_UINT8_, NativeType_v<std::uint8_t> }, + { NumpyDType::NPY_UINT16_, NativeType_v<std::uint16_t> }, + { NumpyDType::NPY_UINT32_, NativeType_v<std::uint32_t> }, + { NumpyDType::NPY_UINT64_, NativeType_v<std::uint64_t> }, + { NumpyDType::NPY_FLOAT_, NativeType_v<float> }, + { NumpyDType::NPY_DOUBLE_, NativeType_v<double> }, }; // The Numpy API indexes that we need to convert bare numpy scalars @@ -159,20 +159,20 @@ static bool getScalarNativeVal(const py::object obj, NativeValue* val_ptr, DataT using caster_i64 = py::detail::type_caster<std::int64_t>; using caster_f32 = py::detail::type_caster<float>; if (caster_i32().load(obj, false)) { - native_dtype = NativeType<std::int32_t>::type; + native_dtype = NativeType_v<std::int32_t>; native_val.i32 = py::cast<std::int32_t>(obj); } else if (caster_i64().load(obj, false)) { - native_dtype = NativeType<std::int64_t>::type; + native_dtype = NativeType_v<std::int64_t>; native_val.i64 = py::cast<std::int64_t>(obj); } else { - native_dtype = NativeType<float>::type; + native_dtype = NativeType_v<float>; native_val.f32 = py::cast<float>(obj); } found = true; } else if (py::isinstance<py::float_>(obj)) { // Note that for native python float, we cast to float32 which may loss // precision as python floats are of type float64. - native_dtype = NativeType<float>::type; + native_dtype = NativeType_v<float>; native_val.f32 = py::cast<float>(obj); found = true; } @@ -196,19 +196,19 @@ static void getConservativeNativeVal(const py::object obj, NativeValue *val_ptr, using caster_i64 = py::detail::type_caster<std::int64_t>; using caster_u64 = py::detail::type_caster<std::uint64_t>; if (caster_i64().load(obj, false)) { - native_dtype = NativeType<std::int64_t>::type; + native_dtype = NativeType_v<std::int64_t>; native_val.i64 = py::cast<std::int64_t>(obj); } else if (caster_u64().load(obj, false)) { - native_dtype = NativeType<std::uint64_t>::type; + native_dtype = NativeType_v<std::uint64_t>; native_val.u64 = py::cast<std::uint64_t>(obj); } else { - native_dtype = NativeType<double>::type; + native_dtype = NativeType_v<double>; native_val.f64 = py::cast<double>(obj); } found = true; } else if (py::isinstance<py::float_>(obj)) { // Note that for conservative cast we use double which is our larger float - native_dtype = NativeType<double>::type; + native_dtype = NativeType_v<double>; native_val.f64 = py::cast<double>(obj); found = true; } @@ -289,7 +289,7 @@ void addArrayCtor(pyTensorClass& mTensor) { /* Request a buffer descriptor from Python */ py::buffer_info info = b.request(); Tensor* newTensor = new Tensor(); - newTensor->setDataType(NativeType<T>::type); + newTensor->setDataType(NativeType_v<T>); const std::vector<DimSize_t> dims(info.shape.begin(), info.shape.end()); newTensor->resize(dims); diff --git a/src/filler/ConstantFiller.cpp b/src/filler/ConstantFiller.cpp index b2118866f92290103d50290085c7675215a4d997..82ac42293bcb6956ba6bc4d2896f5ffc33286fba 100644 --- a/src/filler/ConstantFiller.cpp +++ b/src/filler/ConstantFiller.cpp @@ -23,7 +23,7 @@ template<typename T> void Aidge::constantFiller(std::shared_ptr<Aidge::Tensor> tensor, T constantValue) { AIDGE_ASSERT(tensor->getImpl(), "Tensor got no implementation, cannot fill it."); - AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type"); + AIDGE_ASSERT(NativeType_v<T> == tensor->dataType(), "Wrong data type"); std::shared_ptr<Aidge::Tensor> cpyTensor; // Create cpy only if tensor not on CPU diff --git a/src/filler/HeFiller.cpp b/src/filler/HeFiller.cpp index ff20b76183c03e7ac90b5c225b3da7a8c6ffb2df..866e6581f3245d4324f5796a571c35c9c46a703d 100644 --- a/src/filler/HeFiller.cpp +++ b/src/filler/HeFiller.cpp @@ -20,7 +20,7 @@ void Aidge::heFiller(std::shared_ptr<Aidge::Tensor> tensor, Aidge::VarianceNorm varianceNorm, T meanNorm, T scaling) { AIDGE_ASSERT(tensor->getImpl(), "Tensor got no implementation, cannot fill it."); - AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type"); + AIDGE_ASSERT(NativeType_v<T> == tensor->dataType(), "Wrong data type"); unsigned int fanIn, fanOut = 0; Aidge::calculateFanInFanOut(tensor, fanIn, fanOut); diff --git a/src/filler/NormalFiller.cpp b/src/filler/NormalFiller.cpp index f30b32431cf466b10c1b10df8e0e5ccec9f483b6..334c0c12d3e821bf371e32c818acc5e8a3c85ea0 100644 --- a/src/filler/NormalFiller.cpp +++ b/src/filler/NormalFiller.cpp @@ -20,7 +20,7 @@ void Aidge::normalFiller(std::shared_ptr<Aidge::Tensor> tensor, double mean, double stdDev) { AIDGE_ASSERT(tensor->getImpl(), "Tensor got no implementation, cannot fill it."); - AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type"); + AIDGE_ASSERT(NativeType_v<T> == tensor->dataType(), "Wrong data type"); std::normal_distribution<T> normalDist(mean, stdDev); diff --git a/src/filler/UniformFiller.cpp b/src/filler/UniformFiller.cpp index 1951fcc623612bd688048fcc5fb71526032b2a4a..22dc479b63045d35db8f7319b06a464f191f920e 100644 --- a/src/filler/UniformFiller.cpp +++ b/src/filler/UniformFiller.cpp @@ -20,7 +20,7 @@ template <typename T> void Aidge::uniformFiller(std::shared_ptr<Aidge::Tensor> tensor, T min, T max) { AIDGE_ASSERT(tensor->getImpl(), "Tensor got no implementation, cannot fill it."); - AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type {} and {}",NativeType<T>::type, tensor->dataType()); + AIDGE_ASSERT(NativeType_v<T> == tensor->dataType(), "Wrong data type {} and {}",NativeType_v<T>, tensor->dataType()); using DistType = typename std::conditional< diff --git a/src/filler/XavierFiller.cpp b/src/filler/XavierFiller.cpp index 734874d449c83087ca0e93df7eeb620e178ee7ba..6856ab218f7d78e9b0ed3331a1424734bf0e573a 100644 --- a/src/filler/XavierFiller.cpp +++ b/src/filler/XavierFiller.cpp @@ -20,7 +20,7 @@ void Aidge::xavierUniformFiller(std::shared_ptr<Aidge::Tensor> tensor, T scaling, Aidge::VarianceNorm varianceNorm) { AIDGE_ASSERT(tensor->getImpl(), "Tensor got no implementation, cannot fill it."); - AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type"); + AIDGE_ASSERT(NativeType_v<T> == tensor->dataType(), "Wrong data type"); unsigned int fanIn, fanOut = 0; Aidge::calculateFanInFanOut(tensor, fanIn, fanOut); @@ -54,7 +54,7 @@ void Aidge::xavierNormalFiller(std::shared_ptr<Aidge::Tensor> tensor, T scaling, Aidge::VarianceNorm varianceNorm) { AIDGE_ASSERT(tensor->getImpl(), "Tensor got no implementation, cannot fill it."); - AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type"); + AIDGE_ASSERT(NativeType_v<T> == tensor->dataType(), "Wrong data type"); unsigned int fanIn, fanOut = 0; Aidge::calculateFanInFanOut(tensor, fanIn, fanOut); diff --git a/src/operator/Clip.cpp b/src/operator/Clip.cpp index 10b864b54594c86ed1486611fdd91fd916f2291b..62787ebcf4e0633292cf54a3161248b5dcd30fac 100644 --- a/src/operator/Clip.cpp +++ b/src/operator/Clip.cpp @@ -32,10 +32,10 @@ bool Aidge::Clip_Op::dimsForwarded() const { } -bool Aidge::Clip_Op::forwardDims(bool allowDataDependency) +bool Aidge::Clip_Op::forwardDims(bool allowDataDependency) { - if (getInput(1) ) - { + if (getInput(1) ) + { if( this->min() != std::numeric_limits<float>::lowest()) { Log::notice("{} : ignoring non-empty min attribute because input#1 " @@ -49,11 +49,11 @@ bool Aidge::Clip_Op::forwardDims(bool allowDataDependency) return false; } std::shared_ptr<Tensor> fallback; - const auto& minV = mInputs[1]->refCastFrom(fallback, NativeType<float>::type, "cpu"); + const auto& minV = mInputs[1]->refCastFrom(fallback, NativeType_v<float>, "cpu"); this->min() = *(static_cast<float*>(minV.getImpl()->hostPtr())); } - if (getInput(2)) - { + if (getInput(2)) + { if( this->max() != std::numeric_limits<float>::max()) { Log::notice("{} : ignoring non-empty max attribute because input#2 " @@ -67,7 +67,7 @@ bool Aidge::Clip_Op::forwardDims(bool allowDataDependency) return false; } std::shared_ptr<Tensor> fallback; - const auto& maxV = mInputs[2]->refCastFrom(fallback, NativeType<float>::type, "cpu"); + const auto& maxV = mInputs[2]->refCastFrom(fallback, NativeType_v<float>, "cpu"); this->max() = *(static_cast<float*>(maxV.getImpl()->hostPtr())); } if (!inputsAssociated(false)) { diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp index 0ebc3e3bc81b15d9414d01f12a2768be6a7ddc42..e0990437a06d5b9fb72cf1909d78f6094120bf80 100644 --- a/src/operator/Gather.cpp +++ b/src/operator/Gather.cpp @@ -104,7 +104,7 @@ bool Aidge::Gather_Op::forwardDims(bool allowDataDependency) { this->gatheredShape() = getInput(1)->dims(); this->indices().clear(); // If both are provided input would override attrs this->indices().reserve(getInput(1)->size()); - const auto& indices = mInputs[1]->refCastFrom(fallback, NativeType<int64_t>::type, "cpu"); + const auto& indices = mInputs[1]->refCastFrom(fallback, NativeType_v<int64_t>, "cpu"); std::copy_n(static_cast<int64_t*>(indices.getImpl()->hostPtr()), indices.size(), std::back_inserter(this->indices())); diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp index 429bbe17dffd879d700efb6110d01a11e2bb140f..8b42cb51440cbc61bf8d4dbf69524adb15dbeb44 100644 --- a/src/operator/Reshape.cpp +++ b/src/operator/Reshape.cpp @@ -91,7 +91,7 @@ bool Aidge::Reshape_Op::forwardDims(bool allowDataDependency) { std::shared_ptr<Tensor> fallback; this->shape().clear(); // If both are provided input would override attrs this->shape().reserve(getInput(1)->size()); - const auto& shape = mInputs[1]->refCastFrom(fallback, NativeType<int64_t>::type, "cpu"); + const auto& shape = mInputs[1]->refCastFrom(fallback, NativeType_v<int64_t>, "cpu"); std::copy_n(static_cast<int64_t*>(shape.getImpl()->hostPtr()), shape.size(), std::back_inserter(this->shape())); diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp index 252f55a6abdea13cc43cf21d3e8c7ab33ddbb86e..b2ef56572a5f972cd0f5be6a276780e5f27536de 100644 --- a/src/operator/Resize.cpp +++ b/src/operator/Resize.cpp @@ -125,7 +125,7 @@ bool Resize_Op::forwardDims(bool allowDataDependency) { std::shared_ptr<Tensor> fallback; const auto &sizes = resizeParam ->refCastFrom(fallback, - NativeType<DimSize_t>::type, + NativeType_v<DimSize_t>, resizeParam->backend()); for (std::size_t dim = 0; dim < getInput(inSizesIdx)->size(); ++dim) { diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp index 31c7c09c9ff41c163f5d505bd4ce6b3aeaf42872..7945200aabbae23abce7d1698b5ddbe8f7ec0882 100644 --- a/src/operator/Slice.cpp +++ b/src/operator/Slice.cpp @@ -172,7 +172,7 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) { this->starts().clear(); // If both are provided input would override attrs this->starts().reserve(getInput(1)->size()); - const auto& starts = getInput(1)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu"); + const auto& starts = getInput(1)->refCastFrom(fallback, NativeType_v<int64_t>, "cpu"); std::copy_n(static_cast<int64_t*>(starts.getImpl()->hostPtr()), starts.size(), std::back_inserter(this->starts())); @@ -193,7 +193,7 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) { this->ends().clear(); // If both are provided input would override attrs this->ends().reserve(getInput(2)->size()); - const auto& ends = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu"); + const auto& ends = getInput(2)->refCastFrom(fallback, NativeType_v<int64_t>, "cpu"); std::copy_n(static_cast<int64_t*>(ends.getImpl()->hostPtr()), ends.size(), std::back_inserter(this->ends())); @@ -214,7 +214,7 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) { this->axes().clear(); // If both are provided input would override attrs this->axes().reserve(getInput(3)->size()); - const auto& axes = getInput(3)->refCastFrom(fallback, NativeType<int8_t>::type, "cpu"); + const auto& axes = getInput(3)->refCastFrom(fallback, NativeType_v<int8_t>, "cpu"); std::copy_n(static_cast<int8_t*>(axes.getImpl()->hostPtr()), axes.size(), std::back_inserter(this->axes())); @@ -235,7 +235,7 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) { this->steps().clear(); // If both are provided input would override attrs this->steps().reserve(getInput(4)->size()); - const auto& steps = getInput(4)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu"); + const auto& steps = getInput(4)->refCastFrom(fallback, NativeType_v<int64_t>, "cpu"); std::copy_n(static_cast<int64_t*>(steps.getImpl()->hostPtr()), steps.size(), std::back_inserter(this->steps())); diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp index 2191f14a150088dfa1d369d2ef31051e5ab16326..09aad0674bc424f50483c064cb7201bc20499faa 100644 --- a/src/operator/Split.cpp +++ b/src/operator/Split.cpp @@ -109,7 +109,7 @@ bool Aidge::Split_Op::forwardDims(bool allowDataDependency) { std::shared_ptr<Tensor> fallback; this->split().clear(); // If both are provided input would override attrs this->split().reserve(getInput(1)->size()); - const auto& splits = getInput(1)->refCastFrom(fallback, NativeType<DimSize_t>::type, "cpu"); + const auto& splits = getInput(1)->refCastFrom(fallback, NativeType_v<DimSize_t>, "cpu"); std::copy_n(static_cast<DimSize_t*>(splits.getImpl()->hostPtr()), splits.size(), std::back_inserter(this->split())); diff --git a/src/operator/Squeeze.cpp b/src/operator/Squeeze.cpp index b51b4f346c92f778fe0a044df187cd8d0d0f7304..a44146366d5466768d937261729325697ce24f6e 100644 --- a/src/operator/Squeeze.cpp +++ b/src/operator/Squeeze.cpp @@ -67,7 +67,7 @@ bool Squeeze_Op::forwardDims(bool allowDataDependency) { this->axes().clear(); // If both are provided input would override attrs this->axes().reserve(getInput(1)->size()); const auto &axes = - getInput(1)->refCastFrom(fallback, NativeType<int8_t>::type, "cpu"); + getInput(1)->refCastFrom(fallback, NativeType_v<int8_t>, "cpu"); if (axes.nbDims() == 0) { this->axes().clear(); } else { diff --git a/src/operator/Stack.cpp b/src/operator/Stack.cpp index ab9ddc4f705cb00cebbe5b9ee68fb1433586a043..a938f470ded47d65a9b15b93ca66dfe186d61e9f 100644 --- a/src/operator/Stack.cpp +++ b/src/operator/Stack.cpp @@ -102,7 +102,7 @@ bool Aidge::StackOp::forwardDims(bool allowDataDependency) { } std::shared_ptr<Tensor> fallback; - const auto& maxElements = getInput(1)->refCastFrom(fallback, NativeType<std::uint32_t>::type, "cpu"); + const auto& maxElements = getInput(1)->refCastFrom(fallback, NativeType_v<std::uint32_t>, "cpu"); AIDGE_ASSERT(maxElements.size() > 0, "Input#1 size should be > 0"); this->maxElements() = static_cast<std::uint32_t*>(maxElements.getImpl()->hostPtr())[0]; } diff --git a/src/operator/Unsqueeze.cpp b/src/operator/Unsqueeze.cpp index f3353b45cd6a732fa456ea0585ec5d040d53ef31..414afc10f5ea091ba9f30c327ccfbcfe6b3fd558 100644 --- a/src/operator/Unsqueeze.cpp +++ b/src/operator/Unsqueeze.cpp @@ -61,7 +61,7 @@ bool Unsqueeze_Op::forwardDims(bool allowDataDependency) { this->axes().clear(); // If both are provided input would override attrs this->axes().reserve(getInput(1)->size()); const auto &axes = - getInput(1)->refCastFrom(fallback, NativeType<int8_t>::type, "cpu"); + getInput(1)->refCastFrom(fallback, NativeType_v<int8_t>, "cpu"); std::copy_n(static_cast<int8_t *>(axes.getImpl()->hostPtr()), axes.size(), std::back_inserter(this->axes())); }