Skip to content
Snippets Groups Projects
Commit ccb95742 authored by Vincent Templier's avatar Vincent Templier
Browse files

Change ref_cpp reference to cpu in Core library

parent d53be88f
No related branches found
No related tags found
No related merge requests found
...@@ -139,7 +139,7 @@ class Tensor : public Data, ...@@ -139,7 +139,7 @@ class Tensor : public Data,
: Data(Type), : Data(Type),
mDims({SIZE_0}), mDims({SIZE_0}),
mDataType(NativeType<T>::type), mDataType(NativeType<T>::type),
mImpl(Registrar<Tensor>::create({"ref_cpp", NativeType<T>::type})(*this)), mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)),
mSize(SIZE_0), mSize(SIZE_0),
mSizeM1(SIZE_0) { mSizeM1(SIZE_0) {
mImpl->copy(&arr.data[0], SIZE_0); mImpl->copy(&arr.data[0], SIZE_0);
...@@ -149,7 +149,7 @@ class Tensor : public Data, ...@@ -149,7 +149,7 @@ class Tensor : public Data,
constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) { constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
resize({SIZE_0}); resize({SIZE_0});
if (!mImpl) { if (!mImpl) {
mImpl = Registrar<Tensor>::create({"ref_cpp", NativeType<T>::type})(*this); mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this);
} }
mImpl->copy(&arr.data[0], SIZE_0); mImpl->copy(&arr.data[0], SIZE_0);
return *this; return *this;
...@@ -159,7 +159,7 @@ class Tensor : public Data, ...@@ -159,7 +159,7 @@ class Tensor : public Data,
: Data(Type), : Data(Type),
mDims({SIZE_0, SIZE_1}), mDims({SIZE_0, SIZE_1}),
mDataType(NativeType<T>::type), mDataType(NativeType<T>::type),
mImpl(Registrar<Tensor>::create({"ref_cpp", NativeType<T>::type})(*this)), mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)),
mSize(SIZE_0 * SIZE_1), mSize(SIZE_0 * SIZE_1),
mSizeM1(SIZE_1) { mSizeM1(SIZE_1) {
mImpl->copy(&arr.data[0][0], SIZE_0 * SIZE_1); mImpl->copy(&arr.data[0][0], SIZE_0 * SIZE_1);
...@@ -169,7 +169,7 @@ class Tensor : public Data, ...@@ -169,7 +169,7 @@ class Tensor : public Data,
constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) { constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) {
resize({SIZE_0, SIZE_1}); resize({SIZE_0, SIZE_1});
if (!mImpl) { if (!mImpl) {
mImpl = Registrar<Tensor>::create({"ref_cpp", NativeType<T>::type})(*this); mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this);
} }
mImpl->copy(&arr.data[0][0], SIZE_0 * SIZE_1); mImpl->copy(&arr.data[0][0], SIZE_0 * SIZE_1);
return *this; return *this;
...@@ -179,7 +179,7 @@ class Tensor : public Data, ...@@ -179,7 +179,7 @@ class Tensor : public Data,
: Data(Type), : Data(Type),
mDims({SIZE_0, SIZE_1, SIZE_2}), mDims({SIZE_0, SIZE_1, SIZE_2}),
mDataType(NativeType<T>::type), mDataType(NativeType<T>::type),
mImpl(Registrar<Tensor>::create({"ref_cpp", NativeType<T>::type})(*this)), mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)),
mSize(SIZE_0 * SIZE_1 * SIZE_2), mSize(SIZE_0 * SIZE_1 * SIZE_2),
mSizeM1(SIZE_1 * SIZE_2) { mSizeM1(SIZE_1 * SIZE_2) {
mImpl->copy(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2); mImpl->copy(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
...@@ -189,7 +189,7 @@ class Tensor : public Data, ...@@ -189,7 +189,7 @@ class Tensor : public Data,
constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) { constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) {
resize({SIZE_0, SIZE_1, SIZE_2}); resize({SIZE_0, SIZE_1, SIZE_2});
if (!mImpl) { if (!mImpl) {
mImpl = Registrar<Tensor>::create({"ref_cpp", NativeType<T>::type})(*this); mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this);
} }
mImpl->copy(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2); mImpl->copy(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
return *this; return *this;
...@@ -199,7 +199,7 @@ class Tensor : public Data, ...@@ -199,7 +199,7 @@ class Tensor : public Data,
: Data(Type), : Data(Type),
mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}), mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}),
mDataType(NativeType<T>::type), mDataType(NativeType<T>::type),
mImpl(Registrar<Tensor>::create({"ref_cpp", NativeType<T>::type})(*this)), mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)),
mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3), mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3),
mSizeM1(SIZE_1 * SIZE_2 * SIZE_3) { mSizeM1(SIZE_1 * SIZE_2 * SIZE_3) {
mImpl->copy(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3); mImpl->copy(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
...@@ -209,7 +209,7 @@ class Tensor : public Data, ...@@ -209,7 +209,7 @@ class Tensor : public Data,
constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) { constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) {
resize({SIZE_0, SIZE_1, SIZE_2, SIZE_3}); resize({SIZE_0, SIZE_1, SIZE_2, SIZE_3});
if (!mImpl) { if (!mImpl) {
mImpl = Registrar<Tensor>::create({"ref_cpp", NativeType<T>::type})(*this); mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this);
} }
mImpl->copy(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3); mImpl->copy(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
return *this; return *this;
......
...@@ -39,8 +39,8 @@ void addCtor(py::class_<Tensor, ...@@ -39,8 +39,8 @@ void addCtor(py::class_<Tensor,
newTensor->resize(dims); newTensor->resize(dims);
// TODO : Find a better way to choose backend // TODO : Find a better way to choose backend
std::set<std::string> availableBackends = Tensor::getAvailableBackends(); std::set<std::string> availableBackends = Tensor::getAvailableBackends();
if (availableBackends.find("ref_cpp") != availableBackends.end()){ if (availableBackends.find("cpu") != availableBackends.end()){
newTensor->setBackend("ref_cpp"); newTensor->setBackend("cpu");
newTensor->getImpl()->setRawPtr(static_cast<T*>(info.ptr)); newTensor->getImpl()->setRawPtr(static_cast<T*>(info.ptr));
}else{ }else{
printf("Warning : Could not use aidge_ref_cpp backend, verify you have `import aidge_ref_cpp`\n"); printf("Warning : Could not use aidge_ref_cpp backend, verify you have `import aidge_ref_cpp`\n");
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment