Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • eclipse/aidge/aidge_core
  • hrouis/aidge_core
  • mszczep/aidge_core
  • oantoni/aidge_core
  • cguillon/aidge_core
  • jeromeh/aidge_core
  • axelfarr/aidge_core
  • cmoineau/aidge_core
  • noamzerah/aidge_core
  • lrakotoarivony/aidge_core
  • silvanosky/aidge_core
  • maab05/aidge_core
  • mick94/aidge_core
  • lucaslopez/aidge_core_ll
  • wboussella/aidge_core
  • farnez/aidge_core
  • mnewson/aidge_core
17 results
Show changes
Commits on Source (23)
# Version 0.4.0 (December 2024)
# Version 0.2.1 (May 14, 2024)
* rework export mechanism
* rework export mechanism
* change `Operator::computeOutputDims()` with `Operator::forwardDims()`
* automatic docstring decorators for python
* add implementation of Operators only performing data/format manipulation
......
......@@ -20,6 +20,7 @@
#include "aidge/data/half.hpp"
#include "aidge/utils/Attributes.hpp"
#include "aidge/utils/ErrorHandling.hpp"
namespace Aidge {
enum class DataType {
......@@ -91,7 +92,19 @@ DataFormatTranspose getDataFormatTranspose(const DataFormat& src, const DataForm
class Data {
public:
Data() = delete;
Data(Data&& other) = default;
Data(const Data& other) = default;
Data(const std::string& type): mType(type) {};
Data& operator=(const Data& other) {
AIDGE_ASSERT(other.mType == mType, "Cannot copy a different type fo Data object.");
return *this;
};
Data& operator=(Data&& other) {
AIDGE_ASSERT(other.mType == mType, "Cannot copy a different type fo Data object.");
return *this;
};
constexpr const std::string& type() const {
return mType;
}
......
......@@ -23,6 +23,8 @@
#include <type_traits> // std::is_arithmetic
#include <vector>
#include <fmt/core.h>
#include "aidge/backend/TensorImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
......@@ -212,14 +214,13 @@ class Tensor : public Data,
/**
* @brief Copy dimensions, datatype and data from another Tensor.
* If current Tensor already has an implementation, data is copied to the
* existing implementation. Tensor backend/device remain untouched.
* If current Tensor does not have an implementation, only a shallow copy
* is performed and the Tensor will share data with t.
* Tensor backend/device are also copied and only a shallow copy
* is performed for data. Implementation will be shared with original Tensor.
* @param other other Tensor object.
* @return Tensor&
*/
Tensor &operator=(const Tensor& other);
Tensor &operator=(const Tensor& other) = default;
Tensor &operator=(Tensor&& other) = default;
template <typename T>
constexpr Tensor &operator=(Vector<T> &&arr) {
......@@ -273,6 +274,17 @@ class Tensor : public Data,
* @return Tensor
*/
Tensor operator+(const Tensor& other) const;
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
Tensor operator+(T val) const { return *this + Tensor(val); }
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
friend Tensor operator+(T val, const Tensor& other) { return other + val; }
Tensor& operator+=(const Tensor& other);
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
Tensor& operator+=(T val) {return *this += Tensor(val); }
/**
* @brief Element-wise subtraction operation for two ``Tensor``s.
......@@ -284,6 +296,17 @@ class Tensor : public Data,
* @return Tensor
*/
Tensor operator-(const Tensor& other) const;
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
inline Tensor operator-(T val) const { return *this - Tensor(val); }
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
friend inline Tensor operator-(T val, const Tensor& other) { return other - val; }
Tensor& operator-=(const Tensor& other);
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
inline Tensor& operator-=(T val) {return *this -= Tensor(val); }
/**
* @brief Element-wise multiplication operation for two ``Tensor``s.
......@@ -295,6 +318,17 @@ class Tensor : public Data,
* @return Tensor
*/
Tensor operator*(const Tensor& other) const;
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
inline Tensor operator*(T val) const { return *this * Tensor(val); }
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
friend inline Tensor operator*(T val, const Tensor& other) { return other * val; }
Tensor& operator*=(const Tensor& other);
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
inline Tensor& operator*=(T val) {return *this *= Tensor(val); }
/**
* @brief Element-wise division operation for two ``Tensor``s.
......@@ -306,6 +340,14 @@ class Tensor : public Data,
* @return Tensor
*/
Tensor operator/(const Tensor& other) const;
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
inline Tensor operator/(T val) const { return *this / Tensor(val); }
Tensor& operator/=(const Tensor& other);
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
inline Tensor& operator/=(T val) {return *this /= Tensor(val); }
/**
* @brief Element-wise sqrt operation for Tensor.
......@@ -332,14 +374,17 @@ public:
* @brief Perform a deep copy of the tensor.
*/
Tensor clone() const {
Tensor newTensor(*this);
if (!newTensor.isContiguous()) {
newTensor.makeContiguous();
}
else {
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
newTensor.setImpl(newImpl);
Tensor newTensor(*this); // shallow copy
// handle deepcopy of implementation if any
if (newTensor.hasImpl()) {
if (!newTensor.isContiguous()) {
newTensor.makeContiguous();
}
else {
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
newTensor.setImpl(newImpl);
}
}
return newTensor;
}
......@@ -925,4 +970,17 @@ private:
};
} // namespace Aidge
template<>
struct fmt::formatter<Aidge::Tensor> {
template<typename ParseContext>
inline constexpr auto parse(ParseContext& ctx) {
return ctx.begin();
}
template<typename FormatContext>
inline auto format(Aidge::Tensor const& t, FormatContext& ctx) const {
return fmt::format_to(ctx.out(), "{}", t.toString());
}
};
#endif /* AIDGE_CORE_DATA_TENSOR_H_ */
......@@ -23,6 +23,9 @@
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
//Caution: This operator is now deprecated and should no longer be used.
//It has been replaced by the MetaOperator "Quantizer" (located directly in aidge_quantization).
namespace Aidge {
enum class ScalingAttr {
ScalingFactor, QuantizedNbBits, IsOutputUnsigned
......
......@@ -44,6 +44,7 @@ bool approxEq(const Tensor& t1, const Tensor& t2, float relative = 1e-5f, float
}
for(size_t i = 0; i < t1.size(); ++i){
if (static_cast<float>(std::abs(t1.get<T1>(i) - t2.get<T2>(i))) > (absolute + (relative * static_cast<float>(std::abs(t2.get<T2>(i)))))){
fmt::print("t1:\n{}\nt2\n{}\nat index {} {} != {}", t1, t2, i, t1.get<T1>(i), t2.get<T1>(i));
return false;
}
}
......
......@@ -37,6 +37,7 @@ void init_Operator(py::module& m){
py::class_<Operator, std::shared_ptr<Operator>>(m, "Operator")
.def("__repr__", &Operator::repr)
.def("backend", &Operator::backend)
.def("clone", &Operator::clone)
.def("set_output", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setOutput, py::const_), py::arg("outputIdx"), py::arg("data"))
.def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data"))
.def("get_raw_output", &Operator::getRawOutput, py::arg("outputIdx"))
......
......@@ -44,7 +44,24 @@ Tensor Tensor::operator+(const Tensor& other) const {
add_.setBackend(mImpl->backend());
add_.forward();
// using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
return add_.getOutput(0)->clone();
return *add_.getOutput(0);
}
Tensor& Tensor::operator+=(const Tensor& other) {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
auto add_ = Add_Op();
const auto thisPtr = std::make_shared<Tensor>(*this);
add_.associateInput(0, thisPtr);
add_.associateInput(1, std::make_shared<Tensor>(other));
add_.setOutput(0, thisPtr);
add_.setDataType(dataType());
add_.setDataFormat(dataFormat());
add_.setBackend(mImpl->backend());
add_.forward();
return *this;
}
......@@ -61,7 +78,25 @@ Tensor Tensor::operator-(const Tensor& other) const {
sub_.setBackend(mImpl->backend());
sub_.forward();
// using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
return sub_.getOutput(0)->clone();
return *sub_.getOutput(0);
}
Tensor& Tensor::operator-=(const Tensor& other) {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
auto sub_ = Sub_Op();
const auto thisPtr = std::make_shared<Tensor>(*this);
sub_.associateInput(0, thisPtr);
sub_.associateInput(1, std::make_shared<Tensor>(other));
sub_.setOutput(0, thisPtr);
sub_.setDataType(dataType());
sub_.setDataFormat(dataFormat());
sub_.setBackend(mImpl->backend());
sub_.forward();
// using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
return *this;
}
......@@ -81,6 +116,24 @@ Tensor Tensor::operator*(const Tensor& other) const {
return mul_.getOutput(0)->clone();
}
Tensor& Tensor::operator*=(const Tensor& other) {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
auto mul_ = Mul_Op();
const auto thisPtr = std::make_shared<Tensor>(*this);
mul_.associateInput(0, thisPtr);
mul_.associateInput(1, std::make_shared<Tensor>(other));
mul_.setOutput(0, thisPtr);
mul_.setDataType(dataType());
mul_.setDataFormat(dataFormat());
mul_.setBackend(mImpl->backend());
mul_.forward();
// using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
return *this;
}
Tensor Tensor::operator/(const Tensor& other) const {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
......@@ -98,6 +151,24 @@ Tensor Tensor::operator/(const Tensor& other) const {
return div_.getOutput(0)->clone();
}
Tensor& Tensor::operator/=(const Tensor& other) {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
auto div_ = Div_Op();
const auto thisPtr = std::make_shared<Tensor>(*this);
div_.associateInput(0, thisPtr);
div_.associateInput(1, std::make_shared<Tensor>(other));
div_.setOutput(0, thisPtr);
div_.setDataType(dataType());
div_.setDataFormat(dataFormat());
div_.setBackend(mImpl->backend());
div_.forward();
// using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
return *this;
}
Tensor Tensor::sqrt() const {
AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
auto sqrt_ = Sqrt_Op();
......@@ -135,24 +206,24 @@ Tensor Tensor::mean() const {
return mean_.getOutput(0)->clone();
}
Tensor& Tensor::operator=(const Tensor& other) {
if (this == &other) {
return *this;
}
resize(other.dims(), other.strides());
setDataType(other.dataType(), false); // do not convert existing data
if (other.hasImpl()) {
if (hasImpl()) {
copyFrom(other);
} else {
// Perform a shallow copy only
setImpl(other.mImpl, other.mImplOffset);
}
} else {
setImpl(nullptr);
}
return *this;
}
// Tensor& Tensor::operator=(const Tensor& other) {
// if (this == &other) {
// return *this;
// }
// resize(other.dims(), other.strides());
// setDataType(other.dataType(), false); // do not convert existing data
// if (other.hasImpl()) {
// if (hasImpl()) {
// // copyFrom(other);
// // } else {
// // Perform a shallow copy only
// setImpl(other.mImpl, other.mImplOffset);
// }
// } else {
// setImpl(nullptr);
// }
// return *this;
// }
void Tensor::setBackend(const std::string &name, DeviceIdx_t device, bool copyFrom) {
......
......@@ -96,7 +96,9 @@ void Aidge::MetaOperator_Op::setBackend(const std::string &name, Aidge::DeviceId
for(auto i: mGraph->inputNodes()){
auto op_i = std::static_pointer_cast<OperatorTensor>(i->getOperator());
for(std::size_t in_idx=0; in_idx < op_i->nbInputs(); ++in_idx){
op_i->getInput(in_idx)->setBackend(name, device);
if (op_i->getInput(in_idx)) {
op_i->getInput(in_idx)->setBackend(name, device);
}
}
}
for(auto o: mGraph->outputNodes()){
......
......@@ -33,7 +33,7 @@ void Aidge::Pop_OpImpl::forward() {
const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
assert(op.getInput(0) && "missing input #0");
*op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()});
*op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()}).clone();
}
//////////////////////////////////////////////////////////
......
......@@ -44,7 +44,7 @@ Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, boo
attr<ProdAttr::Constant>(constant)))
{
mOutputs[0] = tensor; // copy the pointer of the Tensor
if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
if (mOutputs[0] && mOutputs[0]->hasImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
SET_IMPL_MACRO(Producer_Op, *this, mOutputs[0]->getImpl()->backend());
}
else {
......@@ -61,7 +61,7 @@ Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
: OperatorTensor(op),
mAttributes(op.mAttributes)
{
mOutputs[0] = std::make_shared<Tensor>(*(op.getOutput(0)));
*mOutputs[0] = *(op.getOutput(0));
if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
SET_IMPL_MACRO(Producer_Op, *this, mOutputs[0]->getImpl()->backend());
}
......@@ -71,7 +71,12 @@ Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
}
std::shared_ptr<Aidge::Operator> Aidge::Producer_Op::clone() const {
return std::make_shared<Producer_Op>(*this);
// mOutput cannot be nullptr because of OperatorTensor constructor
std::shared_ptr<Tensor> newTensor = std::make_shared<Tensor>(mOutputs[0]->clone());
std::shared_ptr<Producer_Op> newOp = std::make_shared<Producer_Op>(newTensor, constant());
return newOp;
}
void Aidge::Producer_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
......
......@@ -18,6 +18,10 @@
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
//Caution: This operator is now deprecated and should no longer be used.
//It has been replaced by the MetaOperator "Quantizer" (located directly in aidge_quantization).
const std::string Aidge::Scaling_Op::Type = "Scaling";
Aidge::Scaling_Op::Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned)
......@@ -26,12 +30,15 @@ Aidge::Scaling_Op::Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOu
attr<ScalingAttr::ScalingFactor>(scalingFactor),
attr<ScalingAttr::QuantizedNbBits>(nbBits),
attr<ScalingAttr::IsOutputUnsigned>(isOutputUnsigned)))
{}
{
Log::warn("Caution: The [Scaling] operator is now deprecated and should no longer be used.\nIt has been replaced by the MetaOperator [Quantizer] (located directly in aidge_quantization).");
}
Aidge::Scaling_Op::Scaling_Op(const Aidge::Scaling_Op& op)
: OperatorTensor(op),
mAttributes(op.mAttributes)
{
Log::warn("Caution: The [Scaling] operator is now deprecated and should no longer be used. \nIt has been replaced by the MetaOperator [Quantizer] (located directly in aidge_quantization).");
if (op.mImpl){
SET_IMPL_MACRO(Scaling_Op, *this, op.backend());
} else {
......
......@@ -120,7 +120,27 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
));
}
SECTION("copy constructor / copy assignment operator") {
Tensor t1 = Array1D<int, 2>{{1, 2}};
Tensor t2, t3;
REQUIRE_NOTHROW(t3 = t1);
REQUIRE(t1 == t3);
REQUIRE_NOTHROW(t2 = Tensor(t1));
REQUIRE(t1 == t2);
t1.set<int>(0, 10);
// check copies are shallow
REQUIRE(t2.get<int>(0) == 10);
REQUIRE(t3.get<int>(0) == 10);
// set already existing Tensor
Tensor t4 = Array1D<int, 1>{{11}};
REQUIRE_NOTHROW(t4 = t1);
REQUIRE(t4 == t1);
REQUIRE(t4.size() == 2);
}
SECTION("move constructor / move assignment operator") {
......
......@@ -816,7 +816,7 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
}
}
TEST_CASE("[GraphView] clone") {
TEST_CASE("[GraphView] clone", "[GraphView][Core][Clone]") {
auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
auto conv1 = Conv(3, 32, {3, 3}, "conv1");
auto conv2 = Conv(32, 64, {3, 3}, "conv2");
......
......@@ -16,21 +16,22 @@
#include "aidge/operator/Pop.hpp"
#include "aidge/utils/TensorUtils.hpp"
using Aidge::Tensor;
using Aidge::Pop;
using namespace Aidge;
TEST_CASE("[cpu/operator] Pop(forward)", "[Pop][CPU]") {
std::shared_ptr<Tensor> pop1 = std::make_shared<Tensor>(Aidge::Array1D<int,3>{{4,5,6}});
std::shared_ptr<Tensor> pop2 = std::make_shared<Tensor>(Aidge::Array1D<int,3>{{1,2,3}});
std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Aidge::Array2D<int,2,3>{{{1,2,3}, {4,5,6}}});
std::shared_ptr<Tensor> pop1 = std::make_shared<Tensor>(Array1D<int,3>{{4,5,6}});
std::shared_ptr<Tensor> pop2 = std::make_shared<Tensor>(Array1D<int,3>{{1,2,3}});
std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<int,2,3>{{{1,2,3}, {4,5,6}}});
auto pop = Aidge::Pop("pop");
pop->getOperator()->associateInput(0, input);
pop->getOperator()->setBackend("cpu");
pop->getOperator()->setDataType(Aidge::DataType::Int32);
auto pop = Pop("pop");
std::shared_ptr<Pop_Op> op = std::static_pointer_cast<Pop_Op>(pop->getOperator());
op->associateInput(0, input);
op->setBackend("cpu");
op->setDataType(DataType::Int32);
op->forwardDims();
REQUIRE_NOTHROW(pop->forward());
REQUIRE(*std::static_pointer_cast<Aidge::OperatorTensor>(pop->getOperator())->getOutput(0) == *pop2);
REQUIRE(*op->getOutput(0) == *pop2);
REQUIRE_NOTHROW(pop->forward());
REQUIRE(*std::static_pointer_cast<Aidge::OperatorTensor>(pop->getOperator())->getOutput(0) == *pop1);
REQUIRE(*op->getOutput(0) == *pop1);
}
0.3.0
0.4.0