From 2fab2dd486d91d9c00cb707be5176e14f6ca4722 Mon Sep 17 00:00:00 2001 From: Olivier BICHLER <olivier.bichler@cea.fr> Date: Fri, 5 Jan 2024 14:38:28 +0100 Subject: [PATCH] Fixed reviewed issues --- include/aidge/backend/TensorImpl.hpp | 23 +++++++--------- include/aidge/data/Tensor.hpp | 32 +++++++--------------- include/aidge/graph/GraphView.hpp | 4 +-- include/aidge/operator/Add.hpp | 2 +- include/aidge/operator/AvgPooling.hpp | 2 +- include/aidge/operator/BatchNorm.hpp | 2 +- include/aidge/operator/Cast.hpp | 7 +++-- include/aidge/operator/Concat.hpp | 2 +- include/aidge/operator/Conv.hpp | 2 +- include/aidge/operator/ConvDepthWise.hpp | 2 +- include/aidge/operator/Div.hpp | 2 +- include/aidge/operator/FC.hpp | 2 +- include/aidge/operator/GenericOperator.hpp | 2 +- include/aidge/operator/Identity.hpp | 2 +- include/aidge/operator/LeakyReLU.hpp | 2 +- include/aidge/operator/MatMul.hpp | 2 +- include/aidge/operator/MaxPooling.hpp | 2 +- include/aidge/operator/MetaOperator.hpp | 2 +- include/aidge/operator/Move.hpp | 4 +-- include/aidge/operator/Mul.hpp | 2 +- include/aidge/operator/Operator.hpp | 2 +- include/aidge/operator/Pad.hpp | 2 +- include/aidge/operator/Pow.hpp | 2 +- include/aidge/operator/Producer.hpp | 2 +- include/aidge/operator/ReLU.hpp | 2 +- include/aidge/operator/Scaling.hpp | 2 +- include/aidge/operator/Slice.hpp | 2 +- include/aidge/operator/Softmax.hpp | 2 +- include/aidge/operator/Sqrt.hpp | 2 +- include/aidge/operator/Sub.hpp | 2 +- include/aidge/utils/Registrar.hpp | 10 +++---- include/aidge/utils/TensorUtils.hpp | 2 +- include/aidge/utils/Types.h | 4 +++ src/data/Tensor.cpp | 4 +-- src/graph/GraphView.cpp | 4 +-- src/operator/Cast.cpp | 2 ++ src/operator/Move.cpp | 2 ++ 37 files changed, 72 insertions(+), 76 deletions(-) diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp index bc33616a5..1e5c49baf 100644 --- a/include/aidge/backend/TensorImpl.hpp +++ b/include/aidge/backend/TensorImpl.hpp @@ -67,18 +67,18 @@ private: class TensorImpl { public: TensorImpl() = delete; - TensorImpl(const char *backend, int device = 0) : mBackend(backend), mDevice(device){}; + TensorImpl(const char *backend, DeviceIdx_t device = 0) : mBackend(backend), mDevice(device){}; /** * Return the (backend, device) pair for this implementation. */ - std::pair<std::string, int> device() const { return std::make_pair(mBackend, mDevice); } + std::pair<std::string, DeviceIdx_t> device() const { return std::make_pair(mBackend, mDevice); } /** * Set the device ID for current backend. * @param device New device ID on current backend. */ - virtual void setDevice(int device) = 0; + virtual void setDevice(DeviceIdx_t device) = 0; /** * Copy data from the same device. @@ -102,7 +102,7 @@ public: * @param src Pointer on current implementation backend. * @param length Number of elements to copy. */ - virtual void copyFromDevice(const void *src, NbElts_t length, const std::pair<std::string, int>& device) = 0; + virtual void copyFromDevice(const void *src, NbElts_t length, const std::pair<std::string, DeviceIdx_t>& device) = 0; /** * Copy data from host. @@ -121,21 +121,18 @@ public: /** * Return the raw device pointer. * The raw pointer is garanteed to be valid only on the *same* device. + * @param offset Offset, in number of elements. */ - virtual void* rawPtr() = 0; - virtual const void* rawPtr() const = 0; + virtual void* rawPtr(NbElts_t offset = 0) = 0; + virtual const void* rawPtr(NbElts_t offset = 0) const = 0; /** * Return the host pointer. * If the implementation does not have a valid host pointer, nullptr is returned. + * @param offset Offset, in number of elements. */ - virtual void* hostPtr() { return nullptr; }; - virtual const void* hostPtr() const { return nullptr; }; - - /** - * Get the device pointer with an offset (in number of elements). - */ - virtual void* getRawPtr(NbElts_t idx) = 0; + virtual void* hostPtr(NbElts_t /*offset*/ = 0) { return nullptr; }; + virtual const void* hostPtr(NbElts_t /*offset*/ = 0) const { return nullptr; }; /** * Sets the device pointer. The previously owned data is deleted. diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp index 3d518026f..c1458df04 100644 --- a/include/aidge/data/Tensor.hpp +++ b/include/aidge/data/Tensor.hpp @@ -228,7 +228,7 @@ class Tensor : public Data, * @param copyFrom If true (default), move data from previous backend/device * to the new one. Previous data is lost otherwise. */ - inline void setBackend(const std::string &name, int device = 0, bool copyFrom = true) { + inline void setBackend(const std::string &name, DeviceIdx_t device = 0, bool copyFrom = true) { if (mImpl) { if (mImpl->device() != std::make_pair(name, device)) { // Backend change: create new impl, copy from old to new and replace @@ -363,23 +363,11 @@ class Tensor : public Data, */ bool empty() const { return mDims.empty(); } - template <typename expectedType> - expectedType& get(std::size_t idx){ - // TODO : add assert expected Type compatible with datatype - // TODO : add assert idx < Size - return *reinterpret_cast<expectedType *>(mImpl->getRawPtr(idx)); - } - template <typename expectedType> const expectedType& get(std::size_t idx) const { - // TODO : add assert expected Type compatible with datatype - // TODO : add assert idx < Size - return *reinterpret_cast<expectedType *>(mImpl->getRawPtr(idx)); - } - - template <typename expectedType> - expectedType& get(std::vector<std::size_t> coordIdx){ - return get<expectedType>(getIdx(coordIdx)); + AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "wrong data type"); + AIDGE_ASSERT(idx < mSize, "idx out of range"); + return *reinterpret_cast<expectedType *>(mImpl->hostPtr(idx)); } template <typename expectedType> @@ -389,9 +377,9 @@ class Tensor : public Data, template <typename expectedType> void set(std::size_t idx, expectedType value){ - // TODO : add assert expected Type compatible with datatype - // TODO : add assert idx < Size - expectedType* dataPtr = static_cast<expectedType*>(mImpl->getRawPtr(idx)); + AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "wrong data type"); + AIDGE_ASSERT(idx < mSize, "idx out of range"); + expectedType* dataPtr = static_cast<expectedType*>(mImpl->hostPtr(idx)); *dataPtr = value; } @@ -610,8 +598,8 @@ class Tensor : public Data, * @param device The desired device. * @return Reference to either itself or to fallback. */ - Tensor& refFrom(std::shared_ptr<Tensor>& fallback, const std::string &backend, int device = 0); - const Tensor& refFrom(std::shared_ptr<Tensor>& fallback, const std::string &backend, int device = 0) const; + Tensor& refFrom(std::shared_ptr<Tensor>& fallback, const std::string &backend, DeviceIdx_t device = 0); + const Tensor& refFrom(std::shared_ptr<Tensor>& fallback, const std::string &backend, DeviceIdx_t device = 0) const; /** * Return a reference to a Tensor on desired data type and backend/device: @@ -628,7 +616,7 @@ class Tensor : public Data, * @param device The desired device. * @return Reference to either itself or to fallback. */ - Tensor& refCastFrom(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt, const std::string &backend, int device = 0) { + Tensor& refCastFrom(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt, const std::string &backend, DeviceIdx_t device = 0) { // First refFrom, to ensure that fallback, if required, is also on desired device return refFrom(fallback, backend, device).refCast(fallback, dt); } diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp index e22bde7c6..539e8a03e 100644 --- a/include/aidge/graph/GraphView.hpp +++ b/include/aidge/graph/GraphView.hpp @@ -203,7 +203,7 @@ public: * If not, add a Transpose Operator. * 4 - Propagate Tensor dimensions through the consecutive Operators. */ - void compile(const std::string& backend, const Aidge::DataType datatype, int device = 0); + void compile(const std::string& backend, const Aidge::DataType datatype, DeviceIdx_t device = 0); /** * @brief Compute dimensions of input/output Tensors for each Operator of the @@ -212,7 +212,7 @@ public: void forwardDims(); /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */ - void setBackend(const std::string &backend, int device = 0); + void setBackend(const std::string &backend, DeviceIdx_t device = 0); /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */ void setDataType(const DataType &datatype); diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp index 3ecedf727..9aed8299a 100644 --- a/include/aidge/operator/Add.hpp +++ b/include/aidge/operator/Add.hpp @@ -76,7 +76,7 @@ public: // } - void setBackend(const std::string& name, int device = 0) override { + void setBackend(const std::string& name, DeviceIdx_t device = 0) override { mImpl = Registrar<Add_Op>::create(name)(*this); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp index 2d550d173..6c0a64dc8 100644 --- a/include/aidge/operator/AvgPooling.hpp +++ b/include/aidge/operator/AvgPooling.hpp @@ -134,7 +134,7 @@ public: } - void setBackend(const std::string &name, int device = 0) override { + void setBackend(const std::string &name, DeviceIdx_t device = 0) override { mImpl = Registrar<AvgPooling_Op<DIM>>::create(name)(*this); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp index 055c1b308..d3e0fceab 100644 --- a/include/aidge/operator/BatchNorm.hpp +++ b/include/aidge/operator/BatchNorm.hpp @@ -94,7 +94,7 @@ public: } } - void setBackend(const std::string &name, int device = 0) override { + void setBackend(const std::string &name, DeviceIdx_t device = 0) override { mImpl = Registrar<BatchNorm_Op<DIM>>::create(name)(*this); mOutputs[0]->setBackend(name, device); diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp index 38fb0b1ff..7cc398567 100644 --- a/include/aidge/operator/Cast.hpp +++ b/include/aidge/operator/Cast.hpp @@ -28,7 +28,7 @@ namespace Aidge { class Cast_Op : public OperatorTensor, public Registrable<Cast_Op, std::string, std::unique_ptr<OperatorImpl>(const Cast_Op&)> { public: - static constexpr const char* Type = "Cast"; + static const std::string Type; Cast_Op() : OperatorTensor(Type, 1, 0, 1) {} @@ -50,7 +50,10 @@ public: return std::make_shared<Cast_Op>(*this); } - void setBackend(const std::string& name, int device = 0) override { + void setBackend(const std::string& name, DeviceIdx_t device = 0) override { + if (Registrar<Cast_Op>::exists({name})) { + mImpl = Registrar<Cast_Op>::create({name})(*this); + } mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp index ca91172f6..06cc468bd 100644 --- a/include/aidge/operator/Concat.hpp +++ b/include/aidge/operator/Concat.hpp @@ -101,7 +101,7 @@ public: } } - void setBackend(const std::string& name, int device = 0) override { + void setBackend(const std::string& name, DeviceIdx_t device = 0) override { mImpl = Registrar<Concat_Op>::create(name)(*this); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp index cbed859d1..6585c2d30 100644 --- a/include/aidge/operator/Conv.hpp +++ b/include/aidge/operator/Conv.hpp @@ -171,7 +171,7 @@ std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveFiel AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); } - void setBackend(const std::string &name, int device = 0) override { + void setBackend(const std::string &name, DeviceIdx_t device = 0) override { mImpl = Registrar<Conv_Op<DIM>>::create(name)(*this); mOutputs[0]->setBackend(name, device); diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp index c9f172718..839a0ec79 100644 --- a/include/aidge/operator/ConvDepthWise.hpp +++ b/include/aidge/operator/ConvDepthWise.hpp @@ -165,7 +165,7 @@ public: AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); } - void setBackend(const std::string &name, int device = 0) override { + void setBackend(const std::string &name, DeviceIdx_t device = 0) override { mImpl = Registrar<ConvDepthWise_Op<DIM>>::create(name)(*this); mOutputs[0]->setBackend(name, device); diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp index 323f3058b..94b755e0f 100644 --- a/include/aidge/operator/Div.hpp +++ b/include/aidge/operator/Div.hpp @@ -54,7 +54,7 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, int device = 0) override { + void setBackend(const std::string& name, DeviceIdx_t device = 0) override { mImpl = Registrar<Div_Op>::create(name)(*this); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp index 52297525e..36ff7106c 100644 --- a/include/aidge/operator/FC.hpp +++ b/include/aidge/operator/FC.hpp @@ -95,7 +95,7 @@ public: } - void setBackend(const std::string& name, int device = 0) override { + void setBackend(const std::string& name, DeviceIdx_t device = 0) override { mImpl = Registrar<FC_Op>::create(name)(*this); mOutputs[0]->setBackend(name, device); diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp index 6adf03105..c966b5f5c 100644 --- a/include/aidge/operator/GenericOperator.hpp +++ b/include/aidge/operator/GenericOperator.hpp @@ -97,7 +97,7 @@ public: ~GenericOperator_Op() = default; - void setBackend(const std::string & /*name*/, int /*device*/ = 0) override { printf("setBackend: not available yet.\n"); } + void setBackend(const std::string & /*name*/, DeviceIdx_t /*device*/ = 0) override { printf("setBackend: not available yet.\n"); } void setDataType(const DataType& /*datatype*/) const override { printf("setDataType: not available yet.\n"); } void forward() override final { if(mImpl){ diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp index 50c5ef941..7348fa10a 100644 --- a/include/aidge/operator/Identity.hpp +++ b/include/aidge/operator/Identity.hpp @@ -103,7 +103,7 @@ public: } return mInputs[outputIdx]; } - void setBackend(const std::string& /*name*/, int /*device*/ = 0) override final { + void setBackend(const std::string& /*name*/, DeviceIdx_t /*device*/ = 0) override final { // setBackend do nothing, Identity node has no backend it just pass the same Tensor } void setDataType(const DataType& /*dataType*/) const override final { diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp index b8e95b07d..5976f1d88 100644 --- a/include/aidge/operator/LeakyReLU.hpp +++ b/include/aidge/operator/LeakyReLU.hpp @@ -67,7 +67,7 @@ public: - void setBackend(const std::string& name, int device = 0) override { + void setBackend(const std::string& name, DeviceIdx_t device = 0) override { mImpl = Registrar<LeakyReLU_Op>::create(name)(*this); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp index 10488ed99..3d80193be 100644 --- a/include/aidge/operator/MatMul.hpp +++ b/include/aidge/operator/MatMul.hpp @@ -83,7 +83,7 @@ public: } - void setBackend(const std::string& name, int device = 0) override { + void setBackend(const std::string& name, DeviceIdx_t device = 0) override { mImpl = Registrar<MatMul_Op>::create(name)(*this); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp index 1cfa29b94..467a69d73 100644 --- a/include/aidge/operator/MaxPooling.hpp +++ b/include/aidge/operator/MaxPooling.hpp @@ -104,7 +104,7 @@ public: } - void setBackend(const std::string &name, int device = 0) override { + void setBackend(const std::string &name, DeviceIdx_t device = 0) override { mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp index ba1ed5f16..827793a1b 100644 --- a/include/aidge/operator/MetaOperator.hpp +++ b/include/aidge/operator/MetaOperator.hpp @@ -70,7 +70,7 @@ public: } - void setBackend(const std::string &name, int device = 0) override { + void setBackend(const std::string &name, DeviceIdx_t device = 0) override { if (Registrar<MetaOperator_Op>::exists({name, type()})) { // A custom implementation exists for this meta operator mImpl = Registrar<MetaOperator_Op>::create({name, type()})(*this); diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp index be7f8922e..62fb98973 100644 --- a/include/aidge/operator/Move.hpp +++ b/include/aidge/operator/Move.hpp @@ -28,7 +28,7 @@ namespace Aidge { class Move_Op : public OperatorTensor, public Registrable<Move_Op, std::tuple<std::string, std::string>, std::unique_ptr<OperatorImpl>(const Move_Op&)> { public: - static constexpr const char* Type = "Move"; + static const std::string Type; Move_Op() : OperatorTensor(Type, 1, 0, 1) {} @@ -50,7 +50,7 @@ public: return std::make_shared<Move_Op>(*this); } - void setBackend(const std::string& name, int device = 0) override { + void setBackend(const std::string& name, DeviceIdx_t device = 0) override { if (mInputs[0]->getImpl() && Registrar<Move_Op>::exists({mInputs[0]->getImpl()->backend(), name})) { mImpl = Registrar<Move_Op>::create({mInputs[0]->getImpl()->backend(), name})(*this); } diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp index 33011ed1e..78b2fa5f9 100644 --- a/include/aidge/operator/Mul.hpp +++ b/include/aidge/operator/Mul.hpp @@ -56,7 +56,7 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, int device = 0) override { + void setBackend(const std::string& name, DeviceIdx_t device = 0) override { mImpl = Registrar<Mul_Op>::create(name)(*this); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp index ffd627ee7..715b6a028 100644 --- a/include/aidge/operator/Operator.hpp +++ b/include/aidge/operator/Operator.hpp @@ -105,7 +105,7 @@ public: // IMPLEMENTATION /////////////////////////////////////////////////////// - virtual void setBackend(const std::string& name, int device = 0) = 0; + virtual void setBackend(const std::string& name, DeviceIdx_t device = 0) = 0; virtual void setDataType(const DataType& dataType) const = 0; /** diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp index 11cbc3409..56245dd2d 100644 --- a/include/aidge/operator/Pad.hpp +++ b/include/aidge/operator/Pad.hpp @@ -97,7 +97,7 @@ public: } } - void setBackend(const std::string &name, int device = 0) override { + void setBackend(const std::string &name, DeviceIdx_t device = 0) override { mImpl = Registrar<Pad_Op<DIM>>::create(name)(*this); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp index d89776d21..d498cacc7 100644 --- a/include/aidge/operator/Pow.hpp +++ b/include/aidge/operator/Pow.hpp @@ -54,7 +54,7 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, int device = 0) override { + void setBackend(const std::string& name, DeviceIdx_t device = 0) override { mImpl = Registrar<Pow_Op>::create(name)(*this); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp index 51ce579f6..ee00ead69 100644 --- a/include/aidge/operator/Producer.hpp +++ b/include/aidge/operator/Producer.hpp @@ -76,7 +76,7 @@ public: inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); } - void setBackend(const std::string& name, int device = 0) override { + void setBackend(const std::string& name, DeviceIdx_t device = 0) override { mImpl = Registrar<Producer_Op>::create(name)(*this); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp index d6a8c2b61..0bb7cdffe 100644 --- a/include/aidge/operator/ReLU.hpp +++ b/include/aidge/operator/ReLU.hpp @@ -51,7 +51,7 @@ public: } - void setBackend(const std::string& name, int device = 0) override { + void setBackend(const std::string& name, DeviceIdx_t device = 0) override { mImpl = Registrar<ReLU_Op>::create(name)(*this); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp index 077032906..54f1d98d2 100644 --- a/include/aidge/operator/Scaling.hpp +++ b/include/aidge/operator/Scaling.hpp @@ -66,7 +66,7 @@ public: return std::make_shared<Scaling_Op>(*this); } - void setBackend(const std::string& name, int device = 0) override { + void setBackend(const std::string& name, DeviceIdx_t device = 0) override { mImpl = Registrar<Scaling_Op>::create(name)(*this); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp index 95e0c72eb..15a707b5f 100644 --- a/include/aidge/operator/Slice.hpp +++ b/include/aidge/operator/Slice.hpp @@ -90,7 +90,7 @@ public: mOutputs[0]->resize(outputDims); } - void setBackend(const std::string &name, int device = 0) override { + void setBackend(const std::string &name, DeviceIdx_t device = 0) override { mImpl = Registrar<Slice_Op>::create(name)(*this); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp index 913b58cb5..b04d1ebbd 100644 --- a/include/aidge/operator/Softmax.hpp +++ b/include/aidge/operator/Softmax.hpp @@ -51,7 +51,7 @@ public: return std::make_shared<Softmax_Op>(*this); } - void setBackend(const std::string& name, int device = 0) override { + void setBackend(const std::string& name, DeviceIdx_t device = 0) override { mImpl = Registrar<Softmax_Op>::create(name)(*this); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp index b95cdfe85..32adfdb93 100644 --- a/include/aidge/operator/Sqrt.hpp +++ b/include/aidge/operator/Sqrt.hpp @@ -56,7 +56,7 @@ public: return std::make_shared<Sqrt_Op>(*this); } - void setBackend(const std::string& name, int device = 0) override { + void setBackend(const std::string& name, DeviceIdx_t device = 0) override { mImpl = Registrar<Sqrt_Op>::create(name)(*this); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp index 9b84cf3d2..ee5efa24d 100644 --- a/include/aidge/operator/Sub.hpp +++ b/include/aidge/operator/Sub.hpp @@ -59,7 +59,7 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, int device = 0) override { + void setBackend(const std::string& name, DeviceIdx_t device = 0) override { mImpl = Registrar<Sub_Op>::create(name)(*this); mOutputs[0]->setBackend(name, device); } diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp index d2b256582..66a07eb0c 100644 --- a/include/aidge/utils/Registrar.hpp +++ b/include/aidge/utils/Registrar.hpp @@ -54,26 +54,26 @@ struct Registrar { typedef typename C::registrar_key registrar_key; typedef typename C::registrar_type registrar_type; - Registrar(const typename C::registrar_key& key, typename C::registrar_type func) { + Registrar(const registrar_key& key, registrar_type func) { //printf("REGISTRAR: %s\n", key.c_str()); bool newInsert; std::tie(std::ignore, newInsert) = C::registry().insert(std::make_pair(key, func)); //assert(newInsert && "registrar already exists"); } - static bool exists(const typename C::registrar_key& key) { + static bool exists(const registrar_key& key) { const auto it = C::registry().find(key); return (it != C::registry().end()); } - static auto create(const typename C::registrar_key& key){ + static auto create(const registrar_key& key){ const auto it = C::registry().find(key); assert(it != C::registry().end() && "invalid registrar key"); return (*it).second; } - static std::vector<typename C::registrar_key> getKeys(){ - std::vector<typename C::registrar_key> keys; + static std::vector<registrar_key> getKeys(){ + std::vector<registrar_key> keys; for(auto keyValue : C::registry()) keys.push_back(keyValue.first); return keys; diff --git a/include/aidge/utils/TensorUtils.hpp b/include/aidge/utils/TensorUtils.hpp index e4da1e12e..1bfe0929b 100644 --- a/include/aidge/utils/TensorUtils.hpp +++ b/include/aidge/utils/TensorUtils.hpp @@ -33,7 +33,7 @@ namespace Aidge { * @return true if both tensor are approximately equal and have the datatype, shape. Else return false */ template <typename T1, typename T2 = T1> -bool approxEq(const Tensor& t1, const Tensor& t2, float relative, float absolute){ +bool approxEq(const Tensor& t1, const Tensor& t2, float relative = 1e-5f, float absolute = 1e-8f){ assert(t1.dataType() == NativeType<T1>::type); assert(t2.dataType() == NativeType<T2>::type); assert(relative >= 0); diff --git a/include/aidge/utils/Types.h b/include/aidge/utils/Types.h index d65279f1f..b601df1cb 100644 --- a/include/aidge/utils/Types.h +++ b/include/aidge/utils/Types.h @@ -24,6 +24,10 @@ namespace Aidge /// Tensor ////////////////////////////////////// +/// @brief Device index in a given backend +using DeviceIdx_t = std::uint8_t; +constexpr DeviceIdx_t MaxDeviceIdx = std::numeric_limits<DeviceIdx_t>::max(); + /// @brief Number of elements used for scheduling using NbElts_t = std::size_t; constexpr NbElts_t MaxElts = std::numeric_limits<NbElts_t>::max(); diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp index 8a950ba2f..da0c626d7 100644 --- a/src/data/Tensor.cpp +++ b/src/data/Tensor.cpp @@ -107,12 +107,12 @@ const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback, c } } -Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback, const std::string &backend, int device) { +Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback, const std::string &backend, DeviceIdx_t device) { // Scott Meyers' solution to avoid code duplication return const_cast<Tensor&>(static_cast<const Tensor&>(*this).refFrom(fallback, backend, device)); } -const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback, const std::string &backend, int device) const { +const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback, const std::string &backend, DeviceIdx_t device) const { AIDGE_ASSERT(getImpl(), "no backend was set for tensor, cannot refFrom() it"); if (std::make_pair(backend, device) == getImpl()->device()) { diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp index 9b788fc5e..dcfa8275a 100644 --- a/src/graph/GraphView.cpp +++ b/src/graph/GraphView.cpp @@ -247,7 +247,7 @@ Aidge::GraphView::inputs(std::string name) const { return mNodeRegistry.at(name)->inputs(); } -void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType datatype, int device) { +void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType datatype, DeviceIdx_t device) { // Backend // TODO: add Backend attribute to Operator setBackend(backend, device); @@ -319,7 +319,7 @@ void Aidge::GraphView::_forwardDims(std::set<std::shared_ptr<Node>> listNodes) { } } -void Aidge::GraphView::setBackend(const std::string &backend, int device) { +void Aidge::GraphView::setBackend(const std::string &backend, DeviceIdx_t device) { for (auto node : getNodes()) { node->getOperator()->setBackend(backend, device); } diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp index 0ac6d5f53..f09d8eb83 100644 --- a/src/operator/Cast.cpp +++ b/src/operator/Cast.cpp @@ -12,6 +12,8 @@ #include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/Cast.hpp" +const std::string Aidge::Cast_Op::Type = "Cast"; + void Aidge::Cast_Op::forward() { if (mImpl) { mImpl->forward(); diff --git a/src/operator/Move.cpp b/src/operator/Move.cpp index d828e994d..d8776e32f 100644 --- a/src/operator/Move.cpp +++ b/src/operator/Move.cpp @@ -12,6 +12,8 @@ #include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/Move.hpp" +const std::string Aidge::Move_Op::Type = "Move"; + void Aidge::Move_Op::forward() { if (mImpl) { mImpl->forward(); -- GitLab