Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • eclipse/aidge/aidge_core
  • hrouis/aidge_core
  • mszczep/aidge_core
  • oantoni/aidge_core
  • cguillon/aidge_core
  • jeromeh/aidge_core
  • axelfarr/aidge_core
  • cmoineau/aidge_core
  • noamzerah/aidge_core
  • lrakotoarivony/aidge_core
  • silvanosky/aidge_core
  • maab05/aidge_core
  • mick94/aidge_core
  • lucaslopez/aidge_core_ll
  • wboussella/aidge_core
  • farnez/aidge_core
  • mnewson/aidge_core
17 results
Show changes
Commits on Source (34)
Showing
with 616 additions and 67 deletions
# Version 0.4.0 (December 2024)
# Version 0.2.1 (May 14, 2024)
* rework export mechanism
* rework export mechanism
* change `Operator::computeOutputDims()` with `Operator::forwardDims()`
* automatic docstring decorators for python
* add implementation of Operators only performing data/format manipulation
......
......@@ -20,6 +20,7 @@
#include "aidge/data/half.hpp"
#include "aidge/utils/Attributes.hpp"
#include "aidge/utils/ErrorHandling.hpp"
namespace Aidge {
enum class DataType {
......@@ -91,7 +92,19 @@ DataFormatTranspose getDataFormatTranspose(const DataFormat& src, const DataForm
class Data {
public:
Data() = delete;
Data(Data&& other) = default;
Data(const Data& other) = default;
Data(const std::string& type): mType(type) {};
Data& operator=(const Data& other) {
AIDGE_ASSERT(other.mType == mType, "Cannot copy a different type fo Data object.");
return *this;
};
Data& operator=(Data&& other) {
AIDGE_ASSERT(other.mType == mType, "Cannot copy a different type fo Data object.");
return *this;
};
constexpr const std::string& type() const {
return mType;
}
......
......@@ -23,6 +23,8 @@
#include <type_traits> // std::is_arithmetic
#include <vector>
#include <fmt/core.h>
#include "aidge/backend/TensorImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
......@@ -212,14 +214,13 @@ class Tensor : public Data,
/**
* @brief Copy dimensions, datatype and data from another Tensor.
* If current Tensor already has an implementation, data is copied to the
* existing implementation. Tensor backend/device remain untouched.
* If current Tensor does not have an implementation, only a shallow copy
* is performed and the Tensor will share data with t.
* Tensor backend/device are also copied and only a shallow copy
* is performed for data. Implementation will be shared with original Tensor.
* @param other other Tensor object.
* @return Tensor&
*/
Tensor &operator=(const Tensor& other);
Tensor &operator=(const Tensor& other) = default;
Tensor &operator=(Tensor&& other) = default;
template <typename T>
constexpr Tensor &operator=(Vector<T> &&arr) {
......@@ -273,6 +274,17 @@ class Tensor : public Data,
* @return Tensor
*/
Tensor operator+(const Tensor& other) const;
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
Tensor operator+(T val) const { return *this + Tensor(val); }
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
friend Tensor operator+(T val, const Tensor& other) { return other + val; }
Tensor& operator+=(const Tensor& other);
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
Tensor& operator+=(T val) {return *this += Tensor(val); }
/**
* @brief Element-wise subtraction operation for two ``Tensor``s.
......@@ -284,6 +296,17 @@ class Tensor : public Data,
* @return Tensor
*/
Tensor operator-(const Tensor& other) const;
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
inline Tensor operator-(T val) const { return *this - Tensor(val); }
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
friend inline Tensor operator-(T val, const Tensor& other) { return other - val; }
Tensor& operator-=(const Tensor& other);
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
inline Tensor& operator-=(T val) {return *this -= Tensor(val); }
/**
* @brief Element-wise multiplication operation for two ``Tensor``s.
......@@ -295,6 +318,17 @@ class Tensor : public Data,
* @return Tensor
*/
Tensor operator*(const Tensor& other) const;
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
inline Tensor operator*(T val) const { return *this * Tensor(val); }
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
friend inline Tensor operator*(T val, const Tensor& other) { return other * val; }
Tensor& operator*=(const Tensor& other);
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
inline Tensor& operator*=(T val) {return *this *= Tensor(val); }
/**
* @brief Element-wise division operation for two ``Tensor``s.
......@@ -306,6 +340,14 @@ class Tensor : public Data,
* @return Tensor
*/
Tensor operator/(const Tensor& other) const;
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
inline Tensor operator/(T val) const { return *this / Tensor(val); }
Tensor& operator/=(const Tensor& other);
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
inline Tensor& operator/=(T val) {return *this /= Tensor(val); }
/**
* @brief Element-wise sqrt operation for Tensor.
......@@ -332,14 +374,17 @@ public:
* @brief Perform a deep copy of the tensor.
*/
Tensor clone() const {
Tensor newTensor(*this);
if (!newTensor.isContiguous()) {
newTensor.makeContiguous();
}
else {
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
newTensor.setImpl(newImpl);
Tensor newTensor(*this); // shallow copy
// handle deepcopy of implementation if any
if (newTensor.hasImpl()) {
if (!newTensor.isContiguous()) {
newTensor.makeContiguous();
}
else {
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
newTensor.setImpl(newImpl);
}
}
return newTensor;
}
......@@ -925,4 +970,17 @@ private:
};
} // namespace Aidge
template<>
struct fmt::formatter<Aidge::Tensor> {
template<typename ParseContext>
inline constexpr auto parse(ParseContext& ctx) {
return ctx.begin();
}
template<typename FormatContext>
inline auto format(Aidge::Tensor const& t, FormatContext& ctx) const {
return fmt::format_to(ctx.out(), "{}", t.toString());
}
};
#endif /* AIDGE_CORE_DATA_TENSOR_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_FLATTEN_H_
#define AIDGE_CORE_OPERATOR_FLATTEN_H_
#include <memory>
#include <vector>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
class Flatten_OpImpl : public OperatorImpl {
public:
Flatten_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
void forward() override;
};
enum class FlattenAttr { Axis };
class Flatten_Op : public OperatorTensor,
public Registrable<Flatten_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Flatten_Op&)>> {
public:
static const std::string Type;
private:
using Attributes_ = StaticAttributes<FlattenAttr,
std::int64_t>;
template <FlattenAttr e> using attr = typename Attributes_::template attr<e>;
const std::shared_ptr<Attributes_> mAttributes;
public:
Flatten_Op() = delete;
Flatten_Op(std::int64_t axis = 1);
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Flatten_Op(const Flatten_Op& op);
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Flatten_Op
*/
std::shared_ptr<Operator> clone() const override;
bool forwardDims(bool allowDataDependency = false) override final;
void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
std::set<std::string> getAvailableBackends() const override;
std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
inline std::int64_t& axis() const { return mAttributes->template getAttr<FlattenAttr::Axis>(); }
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
std::shared_ptr<Node> Flatten(std::int64_t axis = 1,
const std::string &name = "");
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::FlattenAttr>::data[] = { "axis" };
}
#endif /* AIDGE_CORE_OPERATOR_FLATTEN_H_ */
......@@ -23,6 +23,9 @@
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
//Caution: This operator is now deprecated and should no longer be used.
//It has been replaced by the MetaOperator "Quantizer" (located directly in aidge_quantization).
namespace Aidge {
enum class ScalingAttr {
ScalingFactor, QuantizedNbBits, IsOutputUnsigned
......
......@@ -25,6 +25,12 @@
namespace Aidge {
class Slice_OpImpl : public OperatorImpl {
public:
Slice_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
void forward() override;
};
enum class SliceAttr { Starts, Ends, Axes, Steps };
class Slice_Op
......@@ -32,13 +38,13 @@ class Slice_Op
public Registrable<Slice_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Slice_Op &)>> {
public:
static const std::string Type;
private:
using Attributes_ = StaticAttributes<SliceAttr,
std::vector<std::int64_t>,
std::vector<std::int64_t>,
std::vector<std::int8_t>,
std::vector<std::int64_t>>;
private:
template <SliceAttr e> using attr = typename Attributes_::template attr<e>;
const std::shared_ptr<Attributes_> mAttributes;
......@@ -50,7 +56,6 @@ public:
const std::vector<std::int8_t>& axes,
const std::vector<std::int64_t>& steps);
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
* input tensors (the new operator has no input associated).
......@@ -58,7 +63,6 @@ public:
*/
Slice_Op(const Slice_Op &op);
public:
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Slice_Op
......@@ -103,4 +107,4 @@ template <>
const char *const EnumStrings<Aidge::SliceAttr>::data[] = { "starts", "ends", "axes", "steps" };
}
#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
#endif /* AIDGE_CORE_OPERATOR_SLICE_H_ */
......@@ -44,6 +44,7 @@ bool approxEq(const Tensor& t1, const Tensor& t2, float relative = 1e-5f, float
}
for(size_t i = 0; i < t1.size(); ++i){
if (static_cast<float>(std::abs(t1.get<T1>(i) - t2.get<T2>(i))) > (absolute + (relative * static_cast<float>(std::abs(t2.get<T2>(i)))))){
fmt::print("t1:\n{}\nt2\n{}\nat index {} {} != {}", t1, t2, i, t1.get<T1>(i), t2.get<T1>(i));
return false;
}
}
......
......@@ -37,6 +37,7 @@ void init_Operator(py::module& m){
py::class_<Operator, std::shared_ptr<Operator>>(m, "Operator")
.def("__repr__", &Operator::repr)
.def("backend", &Operator::backend)
.def("clone", &Operator::clone)
.def("set_output", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setOutput, py::const_), py::arg("outputIdx"), py::arg("data"))
.def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data"))
.def("get_raw_output", &Operator::getRawOutput, py::arg("outputIdx"))
......
......@@ -44,7 +44,24 @@ Tensor Tensor::operator+(const Tensor& other) const {
add_.setBackend(mImpl->backend());
add_.forward();
// using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
return add_.getOutput(0)->clone();
return *add_.getOutput(0);
}
Tensor& Tensor::operator+=(const Tensor& other) {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
auto add_ = Add_Op();
const auto thisPtr = std::make_shared<Tensor>(*this);
add_.associateInput(0, thisPtr);
add_.associateInput(1, std::make_shared<Tensor>(other));
add_.setOutput(0, thisPtr);
add_.setDataType(dataType());
add_.setDataFormat(dataFormat());
add_.setBackend(mImpl->backend());
add_.forward();
return *this;
}
......@@ -61,7 +78,25 @@ Tensor Tensor::operator-(const Tensor& other) const {
sub_.setBackend(mImpl->backend());
sub_.forward();
// using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
return sub_.getOutput(0)->clone();
return *sub_.getOutput(0);
}
Tensor& Tensor::operator-=(const Tensor& other) {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
auto sub_ = Sub_Op();
const auto thisPtr = std::make_shared<Tensor>(*this);
sub_.associateInput(0, thisPtr);
sub_.associateInput(1, std::make_shared<Tensor>(other));
sub_.setOutput(0, thisPtr);
sub_.setDataType(dataType());
sub_.setDataFormat(dataFormat());
sub_.setBackend(mImpl->backend());
sub_.forward();
// using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
return *this;
}
......@@ -81,6 +116,24 @@ Tensor Tensor::operator*(const Tensor& other) const {
return mul_.getOutput(0)->clone();
}
Tensor& Tensor::operator*=(const Tensor& other) {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
auto mul_ = Mul_Op();
const auto thisPtr = std::make_shared<Tensor>(*this);
mul_.associateInput(0, thisPtr);
mul_.associateInput(1, std::make_shared<Tensor>(other));
mul_.setOutput(0, thisPtr);
mul_.setDataType(dataType());
mul_.setDataFormat(dataFormat());
mul_.setBackend(mImpl->backend());
mul_.forward();
// using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
return *this;
}
Tensor Tensor::operator/(const Tensor& other) const {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
......@@ -98,6 +151,24 @@ Tensor Tensor::operator/(const Tensor& other) const {
return div_.getOutput(0)->clone();
}
Tensor& Tensor::operator/=(const Tensor& other) {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
auto div_ = Div_Op();
const auto thisPtr = std::make_shared<Tensor>(*this);
div_.associateInput(0, thisPtr);
div_.associateInput(1, std::make_shared<Tensor>(other));
div_.setOutput(0, thisPtr);
div_.setDataType(dataType());
div_.setDataFormat(dataFormat());
div_.setBackend(mImpl->backend());
div_.forward();
// using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
return *this;
}
Tensor Tensor::sqrt() const {
AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
auto sqrt_ = Sqrt_Op();
......@@ -135,24 +206,24 @@ Tensor Tensor::mean() const {
return mean_.getOutput(0)->clone();
}
Tensor& Tensor::operator=(const Tensor& other) {
if (this == &other) {
return *this;
}
resize(other.dims(), other.strides());
setDataType(other.dataType(), false); // do not convert existing data
if (other.hasImpl()) {
if (hasImpl()) {
copyFrom(other);
} else {
// Perform a shallow copy only
setImpl(other.mImpl, other.mImplOffset);
}
} else {
setImpl(nullptr);
}
return *this;
}
// Tensor& Tensor::operator=(const Tensor& other) {
// if (this == &other) {
// return *this;
// }
// resize(other.dims(), other.strides());
// setDataType(other.dataType(), false); // do not convert existing data
// if (other.hasImpl()) {
// if (hasImpl()) {
// // copyFrom(other);
// // } else {
// // Perform a shallow copy only
// setImpl(other.mImpl, other.mImplOffset);
// }
// } else {
// setImpl(nullptr);
// }
// return *this;
// }
void Tensor::setBackend(const std::string &name, DeviceIdx_t device, bool copyFrom) {
......
......@@ -407,18 +407,18 @@ void Aidge::Node::resetConnections(bool includeLearnableParam) {
///////////////////////////////////////////////////////
Aidge::NodePtr Aidge::Node::cloneSharedOperators() const {
return std::make_shared<Node>(mOperator, mAttrs);
return std::make_shared<Node>(mOperator, std::make_shared<DynamicAttributes>(*mAttrs));
}
Aidge::NodePtr Aidge::Node::cloneSharedProducers() const {
std::shared_ptr<Operator> op =
(mOperator->type() == Producer_Op::Type) ? mOperator : mOperator->clone();
return std::make_shared<Node>(op, mAttrs);
return std::make_shared<Node>(op, std::make_shared<DynamicAttributes>(*mAttrs));
}
Aidge::NodePtr Aidge::Node::clone() const {
return std::make_shared<Node>(mOperator->clone(), mAttrs);
return std::make_shared<Node>(mOperator->clone(), std::make_shared<DynamicAttributes>(*mAttrs));
}
std::set<Aidge::NodePtr> Aidge::Node::getNodeDelta(int delta, std::set<Aidge::NodePtr> nodeSee) {
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/operator/Flatten.hpp"
#include <cstddef> // std::size_t
#include <cstdint> // std::int64_t
#include <memory>
#include <stdexcept> // std::runtime_error
#include <string>
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
void Aidge::Flatten_OpImpl::forward() {
const Flatten_Op& op = dynamic_cast<const Flatten_Op&>(mOp);
op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
}
//////////////////////////////////////////////////
const std::string Aidge::Flatten_Op::Type = "Flatten";
Aidge::Flatten_Op::Flatten_Op(const std::int64_t axis)
: OperatorTensor(Type, {InputCategory::Data}, 1),
mAttributes(std::make_shared<Attributes_>(
attr<FlattenAttr::Axis>(axis)))
{
mImpl = std::make_shared<Flatten_OpImpl>(*this);
}
Aidge::Flatten_Op::Flatten_Op(const Aidge::Flatten_Op& op)
: OperatorTensor(op),
mAttributes(op.mAttributes)
{
if (!op.backend().empty()) {
SET_IMPL_MACRO(Flatten_Op, *this, op.backend());
}
else {
mImpl = std::make_shared<Flatten_OpImpl>(*this);
}
}
std::shared_ptr<Aidge::Operator> Aidge::Flatten_Op::clone() const {
return std::make_shared<Flatten_Op>(*this);
}
bool Aidge::Flatten_Op::forwardDims(bool /*allowDataDependency*/) {
if (inputsAssociated()) {
const auto inDims(getInput(0)->dims());
const auto firstDim = std::accumulate(inDims.begin(), inDims.begin() + axis(), 1ULL, std::multiplies<DimSize_t>());
mOutputs[0]->resize({firstDim, getInput(0)->size() / firstDim});
return true;
}
return false;
}
void Aidge::Flatten_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
if (Registrar<Flatten_Op>::exists({name})){
SET_IMPL_MACRO(Flatten_Op, *this, name);
}
else {
mImpl = std::make_shared<Flatten_OpImpl>(*this);
}
mOutputs[0]->setBackend(name, device);
}
std::set<std::string> Aidge::Flatten_Op::getAvailableBackends() const {
return Registrar<Flatten_Op>::getKeys();
}
//////////////////////////////////////////////
std::shared_ptr<Aidge::Node> Aidge::Flatten(std::int64_t axis,
const std::string &name)
{
return std::make_shared<Node>(std::make_shared<Flatten_Op>(axis), name);
}
\ No newline at end of file
......@@ -96,7 +96,9 @@ void Aidge::MetaOperator_Op::setBackend(const std::string &name, Aidge::DeviceId
for(auto i: mGraph->inputNodes()){
auto op_i = std::static_pointer_cast<OperatorTensor>(i->getOperator());
for(std::size_t in_idx=0; in_idx < op_i->nbInputs(); ++in_idx){
op_i->getInput(in_idx)->setBackend(name, device);
if (op_i->getInput(in_idx)) {
op_i->getInput(in_idx)->setBackend(name, device);
}
}
}
for(auto o: mGraph->outputNodes()){
......
......@@ -33,7 +33,7 @@ void Aidge::Pop_OpImpl::forward() {
const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
assert(op.getInput(0) && "missing input #0");
*op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()});
*op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()}).clone();
}
//////////////////////////////////////////////////////////
......
......@@ -44,7 +44,7 @@ Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, boo
attr<ProdAttr::Constant>(constant)))
{
mOutputs[0] = tensor; // copy the pointer of the Tensor
if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
if (mOutputs[0] && mOutputs[0]->hasImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
SET_IMPL_MACRO(Producer_Op, *this, mOutputs[0]->getImpl()->backend());
}
else {
......@@ -61,7 +61,7 @@ Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
: OperatorTensor(op),
mAttributes(op.mAttributes)
{
mOutputs[0] = std::make_shared<Tensor>(*(op.getOutput(0)));
*mOutputs[0] = *(op.getOutput(0));
if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
SET_IMPL_MACRO(Producer_Op, *this, mOutputs[0]->getImpl()->backend());
}
......@@ -71,7 +71,12 @@ Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
}
std::shared_ptr<Aidge::Operator> Aidge::Producer_Op::clone() const {
return std::make_shared<Producer_Op>(*this);
// mOutput cannot be nullptr because of OperatorTensor constructor
std::shared_ptr<Tensor> newTensor = std::make_shared<Tensor>(mOutputs[0]->clone());
std::shared_ptr<Producer_Op> newOp = std::make_shared<Producer_Op>(newTensor, constant());
return newOp;
}
void Aidge::Producer_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
......
......@@ -18,6 +18,10 @@
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
//Caution: This operator is now deprecated and should no longer be used.
//It has been replaced by the MetaOperator "Quantizer" (located directly in aidge_quantization).
const std::string Aidge::Scaling_Op::Type = "Scaling";
Aidge::Scaling_Op::Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned)
......@@ -26,12 +30,15 @@ Aidge::Scaling_Op::Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOu
attr<ScalingAttr::ScalingFactor>(scalingFactor),
attr<ScalingAttr::QuantizedNbBits>(nbBits),
attr<ScalingAttr::IsOutputUnsigned>(isOutputUnsigned)))
{}
{
Log::warn("Caution: The [Scaling] operator is now deprecated and should no longer be used.\nIt has been replaced by the MetaOperator [Quantizer] (located directly in aidge_quantization).");
}
Aidge::Scaling_Op::Scaling_Op(const Aidge::Scaling_Op& op)
: OperatorTensor(op),
mAttributes(op.mAttributes)
{
Log::warn("Caution: The [Scaling] operator is now deprecated and should no longer be used. \nIt has been replaced by the MetaOperator [Quantizer] (located directly in aidge_quantization).");
if (op.mImpl){
SET_IMPL_MACRO(Scaling_Op, *this, op.backend());
} else {
......
......@@ -24,6 +24,9 @@
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Types.h"
#include "aidge/data/Data.hpp"
#include "aidge/utils/Registrar.hpp"
const std::string Aidge::Slice_Op::Type = "Slice";
......@@ -43,17 +46,18 @@ Aidge::Slice_Op::Slice_Op(const std::vector<std::int64_t>& starts,
attr<SliceAttr::Ends>(ends),
attr<SliceAttr::Axes>(axes),
attr<SliceAttr::Steps>(steps)))
{}
{
mImpl = std::make_shared<Slice_OpImpl>(*this);
}
Aidge::Slice_Op::Slice_Op(const Aidge::Slice_Op &op)
: OperatorTensor(op),
mAttributes(op.mAttributes)
Aidge::Slice_Op::Slice_Op(const Aidge::Slice_Op& op)
: OperatorTensor(op), mAttributes(op.mAttributes)
{
if (!op.backend().empty()) {
SET_IMPL_MACRO(Slice_Op, *this, op.backend());
}
else {
mImpl = nullptr;
mImpl = std::make_shared<Slice_OpImpl>(*this);
}
}
......@@ -61,6 +65,82 @@ std::shared_ptr<Aidge::Operator> Aidge::Slice_Op::clone() const {
return std::make_shared<Slice_Op>(*this);
}
// Helper function to calculate the linear index for multi-dimensional data
size_t getLinearIndex(const std::vector<size_t>& dims, const std::vector<size_t>& indices) {
size_t linearIndex = 0;
size_t stride = 1;
for (int i = dims.size() - 1; i >= 0; --i) {
linearIndex += indices[i] * stride;
stride *= dims[i];
}
return linearIndex;
}
void Aidge::Slice_OpImpl::forward() {
const Slice_Op& op = dynamic_cast<const Slice_Op&>(mOp);
if (!op.getInput(0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", op.Type);
}
AIDGE_ASSERT((op.axes().size() == op.ends().size()) &&
(op.axes().size() == op.starts().size()),
"Starts, Ends and Axes arguments should be the same size.");
const std::vector<size_t> inputDims = op.getInput(0)->dims();
std::vector<size_t> indices(inputDims.size(), 0); // Initialize indices for each dimension
// Create an array of ranges for each axis
std::vector<std::vector<int>> ranges(inputDims.size());
// Generate ranges dynamically for each dimension
for (size_t axisIdx = 0; axisIdx < inputDims.size(); ++axisIdx) {
if (std::find(op.axes().begin(), op.axes().end(), axisIdx) != op.axes().end()) {
// This axis is being sliced
int start = op.starts()[axisIdx];
int end = op.ends()[axisIdx];
int step = op.steps()[axisIdx];
start = start >= 0 ? start: start + inputDims[axisIdx];
end = end >= 0 ? end: end + inputDims[axisIdx];
// Generate the range of indices for this axis
for (int idx = start; (step > 0) ? (idx < end) : (idx > end); idx += step) {
ranges[axisIdx].push_back(idx);
}
} else {
// This axis is not being sliced, keep its full range (just one index in the range)
ranges[axisIdx].push_back(0);
}
}
// Use iterative stack to handle all dimensions dynamically
std::vector<size_t> currentIndex(inputDims.size(), 0); // Track current index in each dimension
std::vector<size_t> stackPointer(inputDims.size(), 0); // Pointers to ranges for each dimension
size_t dim = 0; // Start at the first dimension
size_t offset = 0; // Offset in the output tensor
while (dim < inputDims.size()) {
if (stackPointer[dim] < ranges[dim].size()) {
// Set the current index for this dimension
currentIndex[dim] = ranges[dim][stackPointer[dim]];
stackPointer[dim]++;
if (dim == inputDims.size() - 1) {
// We've reached the last dimension, process this index combination
size_t linearIndex = getLinearIndex(inputDims, currentIndex);
op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(linearIndex), 1, offset);
offset++;
} else {
// Move to the next dimension
dim++;
}
} else {
// Reset this dimension and move back to the previous one
stackPointer[dim] = 0;
dim--;
}
}
}
bool Aidge::Slice_Op::dimsForwarded() const {
if ((getInput(1) && !getInput(1)->undefined())
......@@ -191,7 +271,7 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
}
}
const std::size_t sliceLength = static_cast<std::size_t>(std::ceil((static_cast<float>(end) - static_cast<float>(start)) / static_cast<float>(step)));
const std::size_t sliceLength = static_cast<std::size_t>(std::ceil((static_cast<float>(end) - static_cast<float>(start)) / static_cast<float>((step))));
// Check if slice length is valid
if (sliceLength > getInput(0)->dims()[axis])
{
......@@ -208,7 +288,12 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
}
void Aidge::Slice_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
SET_IMPL_MACRO(Slice_Op, *this, name);
if (Registrar<Slice_Op>::exists({name})){
SET_IMPL_MACRO(Slice_Op, *this, name);
}
else {
mImpl = std::make_shared<Slice_OpImpl>(*this);
}
mOutputs[0]->setBackend(name, device);
}
......
......@@ -120,7 +120,27 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
));
}
SECTION("copy constructor / copy assignment operator") {
Tensor t1 = Array1D<int, 2>{{1, 2}};
Tensor t2, t3;
REQUIRE_NOTHROW(t3 = t1);
REQUIRE(t1 == t3);
REQUIRE_NOTHROW(t2 = Tensor(t1));
REQUIRE(t1 == t2);
t1.set<int>(0, 10);
// check copies are shallow
REQUIRE(t2.get<int>(0) == 10);
REQUIRE(t3.get<int>(0) == 10);
// set already existing Tensor
Tensor t4 = Array1D<int, 1>{{11}};
REQUIRE_NOTHROW(t4 = t1);
REQUIRE(t4 == t1);
REQUIRE(t4.size() == 2);
}
SECTION("move constructor / move assignment operator") {
......
......@@ -816,7 +816,7 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
}
}
TEST_CASE("[GraphView] clone") {
TEST_CASE("[GraphView] clone", "[GraphView][Core][Clone]") {
auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
auto conv1 = Conv(3, 32, {3, 3}, "conv1");
auto conv2 = Conv(32, 64, {3, 3}, "conv2");
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Flatten.hpp"
#include <memory>
using namespace Aidge;
TEST_CASE("[cpu/operator] Flatten(forward)") {
std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array4D<int32_t,1,2,3,5> {
{
{
{
{ 1, 2, 3, 4, 5},
{ 6, 7, 8, 9, 10},
{11, 12, 13, 14, 15}
},
{
{16, 17, 18, 19, 20},
{21, 22, 23, 24, 25},
{26, 27, 28, 29, 30}
}
}
}
});
SECTION("Default (axis = 1)") {
std::shared_ptr<Node> myFlatten = Flatten();
auto op = std::static_pointer_cast<OperatorTensor>(myFlatten -> getOperator());
op->associateInput(0, input);
op->setDataType(DataType::Int32);
op->setBackend("cpu");
myFlatten->forward();
auto expectedOutput = input->clone();
expectedOutput.resize({1, input->size()});
REQUIRE(op->getOutput(0)->dims() == expectedOutput.dims());
REQUIRE(*(op->getOutput(0)) == expectedOutput);
}
SECTION("Axis = 0") {
std::shared_ptr<Node> myFlatten = Flatten(0);
auto op = std::static_pointer_cast<OperatorTensor>(myFlatten -> getOperator());
op->associateInput(0, input);
op->setDataType(DataType::Int32);
op->setBackend("cpu");
myFlatten->forward();
auto expectedOutput = input->clone();
expectedOutput.resize({1, input->size()});
REQUIRE(op->getOutput(0)->dims() == expectedOutput.dims());
REQUIRE(*(op->getOutput(0)) == expectedOutput);
}
SECTION("Axis = 2") {
std::shared_ptr<Node> myFlatten = Flatten(2);
auto op = std::static_pointer_cast<OperatorTensor>(myFlatten -> getOperator());
op->associateInput(0, input);
op->setDataType(DataType::Int32);
op->setBackend("cpu");
myFlatten->forward();
auto expectedOutput = input->clone();
expectedOutput.resize({2, input->size() / 2});
REQUIRE(op->getOutput(0)->dims() == expectedOutput.dims());
REQUIRE(*(op->getOutput(0)) == expectedOutput);
}
SECTION("Axis = 4") {
std::shared_ptr<Node> myFlatten = Flatten(4);
auto op = std::static_pointer_cast<OperatorTensor>(myFlatten -> getOperator());
op->associateInput(0, input);
op->setDataType(DataType::Int32);
op->setBackend("cpu");
myFlatten->forward();
auto expectedOutput = input->clone();
expectedOutput.resize({input->size(), 1});
REQUIRE(op->getOutput(0)->dims() == expectedOutput.dims());
REQUIRE(*(op->getOutput(0)) == expectedOutput);
}
}
\ No newline at end of file
......@@ -16,21 +16,22 @@
#include "aidge/operator/Pop.hpp"
#include "aidge/utils/TensorUtils.hpp"
using Aidge::Tensor;
using Aidge::Pop;
using namespace Aidge;
TEST_CASE("[cpu/operator] Pop(forward)", "[Pop][CPU]") {
std::shared_ptr<Tensor> pop1 = std::make_shared<Tensor>(Aidge::Array1D<int,3>{{4,5,6}});
std::shared_ptr<Tensor> pop2 = std::make_shared<Tensor>(Aidge::Array1D<int,3>{{1,2,3}});
std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Aidge::Array2D<int,2,3>{{{1,2,3}, {4,5,6}}});
std::shared_ptr<Tensor> pop1 = std::make_shared<Tensor>(Array1D<int,3>{{4,5,6}});
std::shared_ptr<Tensor> pop2 = std::make_shared<Tensor>(Array1D<int,3>{{1,2,3}});
std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<int,2,3>{{{1,2,3}, {4,5,6}}});
auto pop = Aidge::Pop("pop");
pop->getOperator()->associateInput(0, input);
pop->getOperator()->setBackend("cpu");
pop->getOperator()->setDataType(Aidge::DataType::Int32);
auto pop = Pop("pop");
std::shared_ptr<Pop_Op> op = std::static_pointer_cast<Pop_Op>(pop->getOperator());
op->associateInput(0, input);
op->setBackend("cpu");
op->setDataType(DataType::Int32);
op->forwardDims();
REQUIRE_NOTHROW(pop->forward());
REQUIRE(*std::static_pointer_cast<Aidge::OperatorTensor>(pop->getOperator())->getOutput(0) == *pop2);
REQUIRE(*op->getOutput(0) == *pop2);
REQUIRE_NOTHROW(pop->forward());
REQUIRE(*std::static_pointer_cast<Aidge::OperatorTensor>(pop->getOperator())->getOutput(0) == *pop1);
REQUIRE(*op->getOutput(0) == *pop1);
}