Skip to content
Snippets Groups Projects
Commit 49f9606c authored by Maxence Naud's avatar Maxence Naud
Browse files

Change required dependencies includes

parent afb3f869
No related branches found
No related tags found
2 merge requests!318[Upd] release verision 0.5.0,!306ENHANCE: split DataType, DataFormat, EnumString from other files
......@@ -12,13 +12,16 @@
#ifndef AIDGE_BACKEND_OPERATORIMPL_H_
#define AIDGE_BACKEND_OPERATORIMPL_H_
#include <functional> // std::function
#include <memory>
#include <string>
#include <vector>
#include <functional>
#include <utility> // std::pair
#include "aidge/utils/Types.h"
#include "aidge/utils/DynamicAttributes.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/data/DataFormat.hpp"
#include "aidge/data/DataType.hpp"
#include "aidge/data/Elts.hpp"
#include "aidge/scheduler/ProdConso.hpp"
......
......@@ -19,7 +19,7 @@
#include <vector>
#include <string>
#include "aidge/data/Data.hpp"
#include "aidge/data/DataType.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Types.h"
......
......@@ -13,6 +13,7 @@
#define AIDGE_CPU_DATA_TENSORIMPL_H_
#include "aidge/backend/TensorImpl.hpp"
#include "aidge/data/DataType.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
......
......@@ -14,6 +14,7 @@
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Types.h"
#include "aidge/utils/logger/EnumString.hpp"
namespace Aidge {
/**
......
......@@ -27,6 +27,8 @@
#include "aidge/backend/TensorImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/data/DataType.hpp"
#include "aidge/data/DataFormat.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp"
......@@ -85,11 +87,11 @@ class Tensor : public Data,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
Tensor(T val)
: Data(Type),
mDataType(NativeType<VT>::type),
mDataType(NativeType_v<VT>),
mDataFormat(DataFormat::Default),
mDims({}),
mStrides({1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, std::vector<std::size_t>())),
mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<VT>})(0, std::vector<std::size_t>())),
mSize(1)
{
*static_cast<VT*>(mImpl->rawPtr()) = static_cast<VT>(val);
......@@ -114,10 +116,10 @@ class Tensor : public Data,
template <typename T>
Tensor(Vector<T> &&arr)
: Data(Type),
mDataType(NativeType<T>::type),
mDataType(NativeType_v<T>),
mDims({arr.data.size()}),
mStrides({1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {arr.data.size()})),
mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {arr.data.size()})),
mSize(arr.data.size())
{
mImpl->copyFromHost(&arr.data[0], arr.data.size());
......@@ -131,11 +133,11 @@ class Tensor : public Data,
template <typename T, std::size_t SIZE_0>
constexpr Tensor(Array1D<T, SIZE_0> &&arr)
: Data(Type),
mDataType(NativeType<T>::type),
mDataType(NativeType_v<T>),
mDataFormat(DataFormat::Default),
mDims({SIZE_0}),
mStrides({1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0})),
mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0})),
mSize(SIZE_0)
{
mImpl->copyFromHost(&arr.data[0], SIZE_0);
......@@ -150,11 +152,11 @@ class Tensor : public Data,
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
constexpr Tensor(Array2D<T, SIZE_0, SIZE_1> &&arr)
: Data(Type),
mDataType(NativeType<T>::type),
mDataType(NativeType_v<T>),
mDataFormat(DataFormat::Default),
mDims({SIZE_0, SIZE_1}),
mStrides({SIZE_1, 1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1})),
mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0, SIZE_1})),
mSize(SIZE_0 * SIZE_1) {
mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1);
}
......@@ -169,11 +171,11 @@ class Tensor : public Data,
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
constexpr Tensor(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr)
: Data(Type),
mDataType(NativeType<T>::type),
mDataType(NativeType_v<T>),
mDataFormat(DataFormat::Default),
mDims({SIZE_0, SIZE_1, SIZE_2}),
mStrides({SIZE_1 * SIZE_2, SIZE_2, 1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2})),
mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0, SIZE_1, SIZE_2})),
mSize(SIZE_0 * SIZE_1 * SIZE_2) {
mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
}
......@@ -189,11 +191,11 @@ class Tensor : public Data,
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
constexpr Tensor(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr)
: Data(Type),
mDataType(NativeType<T>::type),
mDataType(NativeType_v<T>),
mDataFormat(DataFormat::Default),
mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}),
mStrides({SIZE_1 * SIZE_2 * SIZE_3, SIZE_2 * SIZE_3, SIZE_3, 1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3})),
mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3})),
mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3) {
mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
}
......@@ -615,7 +617,7 @@ public:
template <typename expectedType>
const expectedType& get(std::size_t idx) const {
AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "Tensor::get<>({}): wrong data type, expected {}, got {}", idx, mDataType, NativeType<expectedType>::type);
AIDGE_ASSERT(NativeType_v<expectedType> == mDataType, "Tensor::get<>({}): wrong data type, expected {}, got {}", idx, mDataType, NativeType_v<expectedType>);
AIDGE_ASSERT(mImpl->hostPtr() != nullptr, "Tensor::get<>({}): can only be used for backends providing a valid host pointer.", idx);
AIDGE_ASSERT(idx < mSize, "Tensor::get<>({}): idx {} out of range, tensor size {}", idx, mSize);
return *reinterpret_cast<expectedType *>(mImpl->hostPtr(mImplOffset + idx));
......@@ -628,7 +630,7 @@ public:
template <typename expectedType>
void set(std::size_t idx, expectedType value){
AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "wrong data type");
AIDGE_ASSERT(NativeType_v<expectedType> == mDataType, "wrong data type");
AIDGE_ASSERT(mImpl->hostPtr() != nullptr, "get() can only be used for backends providing a valid host pointer");
AIDGE_ASSERT(idx < mSize, "idx out of range");
expectedType* dataPtr = static_cast<expectedType*>(mImpl->hostPtr(mImplOffset + idx));
......
......@@ -19,9 +19,9 @@
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/utils/Attributes.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/logger/EnumString.hpp"
namespace Aidge {
......
......@@ -13,6 +13,8 @@
#include <pybind11/stl.h>
#include "aidge/data/Data.hpp"
#include "aidge/data/DataType.hpp"
#include "aidge/data/DataFormat.hpp"
namespace py = pybind11;
namespace Aidge {
......@@ -66,7 +68,7 @@ void init_Data(py::module& m){
m.def("format_as", (const char* (*)(DataType)) &format_as, py::arg("dt"));
m.def("format_as", (const char* (*)(DataFormat)) &format_as, py::arg("df"));
m.def("get_data_format_transpose", &getDataFormatTranspose, py::arg("src"), py::arg("dst"));
m.def("get_data_format_transpose", &getDataFormatTranspose, py::arg("src"), py::arg("dst"));
}
}
......@@ -20,7 +20,7 @@
#include <fmt/format.h>
#include <fmt/ranges.h>
#include "aidge/data/Data.hpp" // Aidge::isDataTypeFloatingPoint
#include "aidge/data/DataType.hpp" // Aidge::isFloatingPoint
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/GraphView.hpp"
#include "aidge/graph/Node.hpp"
......@@ -38,7 +38,7 @@ Aidge::OperatorStats::~OperatorStats() = default;
std::size_t Aidge::OperatorStats::getNbArithmIntOps() const {
const auto opTensor = dynamic_cast<const OperatorTensor*>(&mOp);
if (opTensor) {
if (!isDataTypeFloatingPoint(opTensor->getOutput(0)->dataType())) {
if (!isFloatingPoint(opTensor->getOutput(0)->dataType())) {
return getNbArithmOps();
}
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment