Skip to content
Snippets Groups Projects
Commit bf014a73 authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge branch 'ui_parameters' into 'dev'

Improve UI for Operator/Node/GraphView/Tensor

See merge request !145
parents 1eccdb62 a09e120a
No related branches found
No related tags found
2 merge requests!152Update Aidge export to take a graph view has an argument instead of a...,!145Improve UI for Operator/Node/GraphView/Tensor
Pipeline #49604 passed
Showing
with 329 additions and 171 deletions
from aidge_core.aidge_export_aidge.utils import operator_register
from aidge_core.aidge_export_aidge import ROOT_EXPORT
from aidge_core import DataType, ExportNode, generate_file, generate_str
from aidge_core import dtype, ExportNode, generate_file, generate_str
import numpy as np
from pathlib import Path
# Convert aidge datatype to C++ type
datatype_converter = {
DataType.Float64 : "double",
DataType.Float32 : "float",
DataType.Float16 : "half_float::half",
DataType.Int8 : "int8_t",
DataType.Int16 : "int16_t",
DataType.Int32 : "int32_t",
DataType.Int64 : "int64_t",
DataType.UInt8 : "uint8_t",
DataType.UInt16 : "uint16_t",
DataType.UInt32 : "uint32_t",
DataType.UInt64 : "uint64_t"
dtype.float64 : "double",
dtype.float32 : "float",
dtype.float16 : "half_float::half",
dtype.int8 : "int8_t",
dtype.int16 : "int16_t",
dtype.int32 : "int32_t",
dtype.int64 : "int64_t",
dtype.uint8 : "uint8_t",
dtype.uint16 : "uint16_t",
dtype.uint32 : "uint32_t",
dtype.uint64 : "uint64_t"
}
......
......@@ -20,10 +20,7 @@ class ExportNode(ABC):
self.node = aidge_node
self.operator = aidge_node.get_operator()
self.name = self.node.name()
self.attributes = {} # Attributes are auto fetched from aidge operators
if isinstance(self.operator, aidge_core.Attributes):
for attr_name in self.operator.get_attrs_name():
self.attributes[attr_name] = self.operator.get_attr(attr_name)
self.attributes = self.operator.attr.dict() if self.operator.attr is not None else {} # Attributes are auto fetched from aidge operators
# rename is_leaf ?
self.is_last = len(self.node.get_children()) == 0
......
......@@ -30,42 +30,39 @@ class test_operator_binding(unittest.TestCase):
self.assertNotEqual(gop.name(), "")
def test_param_bool(self):
self.generic_operator.add_attr("bool", True)
self.assertEqual(self.generic_operator.has_attr("bool"), True)
self.assertEqual(self.generic_operator.get_attr("bool"), True)
self.assertEqual(self.generic_operator.get_attr_type("bool"), "bool")
self.assertEqual(self.generic_operator.get_attrs_name(), {"bool"})
self.generic_operator.del_attr("bool")
self.assertEqual(self.generic_operator.has_attr("bool"), False)
self.assertEqual(len(self.generic_operator.get_attrs_name()), 0)
self.generic_operator.attr.add_attr("bool", True)
self.assertEqual(self.generic_operator.attr.has_attr("bool"), True)
self.assertEqual(self.generic_operator.attr.get_attr("bool"), True)
self.generic_operator.attr.del_attr("bool")
self.assertEqual(self.generic_operator.attr.has_attr("bool"), False)
def test_param_int(self):
self.generic_operator.add_attr("int", 1)
self.assertEqual(self.generic_operator.get_attr("int"), 1)
self.generic_operator.attr.add_attr("int", 1)
self.assertEqual(self.generic_operator.attr.get_attr("int"), 1)
def test_param_float(self):
self.generic_operator.add_attr("float", 2.0)
self.assertEqual(self.generic_operator.get_attr("float"), 2.0)
self.generic_operator.attr.add_attr("float", 2.0)
self.assertEqual(self.generic_operator.attr.get_attr("float"), 2.0)
def test_param_str(self):
self.generic_operator.add_attr("str", "value")
self.assertEqual(self.generic_operator.get_attr("str"), "value")
self.generic_operator.attr.add_attr("str", "value")
self.assertEqual(self.generic_operator.attr.get_attr("str"), "value")
def test_param_l_int(self):
self.generic_operator.add_attr("l_int", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
self.assertEqual(self.generic_operator.get_attr("l_int"), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
self.generic_operator.attr.add_attr("l_int", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
self.assertEqual(self.generic_operator.attr.get_attr("l_int"), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
def test_param_l_bool(self):
self.generic_operator.add_attr("l_bool", [True, False, False, True])
self.assertEqual(self.generic_operator.get_attr("l_bool"), [True, False, False, True])
self.generic_operator.attr.add_attr("l_bool", [True, False, False, True])
self.assertEqual(self.generic_operator.attr.get_attr("l_bool"), [True, False, False, True])
def test_param_l_float(self):
self.generic_operator.add_attr("l_float", [2.0, 1.0])
self.assertEqual(self.generic_operator.get_attr("l_float"), [2.0, 1.0])
self.generic_operator.attr.add_attr("l_float", [2.0, 1.0])
self.assertEqual(self.generic_operator.attr.get_attr("l_float"), [2.0, 1.0])
def test_param_l_str(self):
self.generic_operator.add_attr("l_str", ["ok"])
self.assertEqual(self.generic_operator.get_attr("l_str"), ["ok"])
self.generic_operator.attr.add_attr("l_str", ["ok"])
self.assertEqual(self.generic_operator.attr.get_attr("l_str"), ["ok"])
def test_dynamicattribute_binding(self):
# Check original C++ attributes are binded
......@@ -76,20 +73,20 @@ class test_operator_binding(unittest.TestCase):
self.assertEqual(attrs.get_attr("b"), "test")
self.assertEqual(attrs.has_attr("c"), True)
self.assertEqual(attrs.get_attr("c"), [True, False, True])
self.assertEqual(attrs.get_attrs_name(), {"a", "b", "c"})
self.assertEqual(attrs.dict().keys(), {"a", "b", "c"})
self.assertEqual(attrs.has_attr("d"), False)
# Add Python attributes
attrs.add_attr("d", 18.56)
self.assertEqual(attrs.get_attr("d"), 18.56)
self.assertEqual(attrs.has_attr("d"), True)
self.assertEqual(attrs.get_attrs_name(), {"a", "b", "c", "d"})
self.assertEqual(attrs.dict().keys(), {"a", "b", "c", "d"})
self.assertEqual(attrs.has_attr("e"), False)
# Check that added Python attribute is accessible in C++
# Return the value of an attribute named "d" of type float64 (double in C++)
self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 18.56)
attrs.set_attr("d", 23.89)
attrs.d = 23.89
self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 23.89)
def test_forward_dims(self):
......@@ -129,18 +126,18 @@ class test_operator_binding(unittest.TestCase):
myVar = 2
myBool = True
# Test dynamic attribute set
gop = aidge_core.GenericOperator("test", 1, 0, 1, "FictiveName", myVar=myVar).get_operator()
gop.myBool = myBool
gop = aidge_core.GenericOperator("test", 1, 0, 1, "FictiveName", my_var=myVar).get_operator()
gop.attr.my_bool = myBool
# Test variable set by kwargs
self.assertEqual(gop.myVar, myVar)
self.assertEqual(gop.attr.my_var, myVar)
# Test set attr
self.assertEqual(gop.myBool, myBool)
self.assertEqual(gop.attr.my_bool, myBool)
# Test static attribute set !
prod = aidge_core.Producer([1]).get_operator()
self.assertEqual(prod.Constant, False)
prod.Constant = True # By default Constant is False
self.assertEqual(prod.Constant, True)
self.assertEqual(prod.attr.constant, False)
prod.attr.constant = True # By default Constant is False
self.assertEqual(prod.attr.constant, True)
......
......@@ -29,7 +29,7 @@ class test_attributes(unittest.TestCase):
conv_op = aidge_core.Conv2D(in_channels , out_channels, k_dims).get_operator()
self.assertEqual(conv_op.in_channels(), in_channels)
self.assertEqual(conv_op.out_channels(), out_channels)
self.assertEqual(conv_op.get_attr("KernelDims"), k_dims)
self.assertEqual(conv_op.attr.get_attr("kernel_dims"), k_dims)
def test_fc(self):
in_channels = 4
......@@ -65,7 +65,7 @@ class test_attributes(unittest.TestCase):
def test_leaky_relu(self):
negative_slope = 0.25
leakyrelu_op = aidge_core.LeakyReLU(negative_slope).get_operator()
self.assertEqual(leakyrelu_op.get_attr("NegativeSlope"), negative_slope)
self.assertEqual(leakyrelu_op.attr.get_attr("negative_slope"), negative_slope)
if __name__ == '__main__':
unittest.main()
......@@ -42,7 +42,7 @@ class test_tensor(unittest.TestCase):
np_array = np.arange(9).reshape(1,1,3,3).astype(np.int32)
# Numpy -> Tensor
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.DataType.Int32)
self.assertEqual(t.dtype(), aidge_core.dtype.int32)
for i_t, i_n in zip(t, np_array.flatten()):
self.assertTrue(i_t == i_n)
for i,j in zip(t.dims(), np_array.shape):
......@@ -62,7 +62,7 @@ class test_tensor(unittest.TestCase):
np_array = np.arange(9).reshape(1,1,3,3).astype(np.int64)
# Numpy -> Tensor
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.DataType.Int64)
self.assertEqual(t.dtype(), aidge_core.dtype.int64)
for i_t, i_n in zip(t, np_array.flatten()):
self.assertTrue(i_t == i_n)
for i,j in zip(t.dims(), np_array.shape):
......@@ -73,7 +73,7 @@ class test_tensor(unittest.TestCase):
np_array = np.random.rand(1, 1, 3, 3).astype(np.float32)
# Numpy -> Tensor
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.DataType.Float32)
self.assertEqual(t.dtype(), aidge_core.dtype.float32)
for i_t, i_n in zip(t, np_array.flatten()):
self.assertTrue(i_t == i_n) # TODO : May need to change this to a difference
for i,j in zip(t.dims(), np_array.shape):
......
......@@ -79,7 +79,7 @@ constexpr std::array<DataFormatTranspose, 7> DataFormatTransposeDict = {{
* Get the DataFormatTranspose array to transpose data from src to dst DataFormat.
* @param src Source DataFormat
* @param dst Destinatin DataFormat
* @return DataFormatTranspose Permutation array to achieve a transposition
* @return DataFormatTranspose Permutation array to achieve a transposition
* from src to dst DataFormat.
*/
constexpr inline DataFormatTranspose getDataFormatTranspose(const DataFormat& src, const DataFormat& dst) {
......
......@@ -20,9 +20,18 @@
#include <utility>
#include <vector>
#ifdef PYBIND
#include <pybind11/pybind11.h>
#include <fmt/format.h>
#endif
#include "aidge/graph/Node.hpp"
#include "aidge/utils/Types.h"
#ifdef PYBIND
namespace py = pybind11;
#endif
namespace Aidge {
enum class DataType;
......@@ -218,7 +227,7 @@ public:
* GraphView object's Nodes, by calling Node::forwardDims().
* This function verifies the following conditions:
* - Every node will forwardDims() regardless of if dims were previously forwarded or not;
* - forwadDims() calls are made in node dependencies order, because if dims have changed
* - forwadDims() calls are made in node dependencies order, because if dims have changed
* at any point in the graph, it must de propagated correctly to all succeeding nodes;
* - It handles cyclic dependencies correctly (currently only induced by the Memorize_Op).
*/
......@@ -266,7 +275,7 @@ public:
* @brief Get the Nodes pointed to by the GraphView object.
* @return std::set<NodePtr>
*/
inline const std::set<NodePtr>& getNodes() const { return mNodes; }
inline const std::set<NodePtr>& getNodes() const noexcept { return mNodes; }
/**
* @brief Get the operator with the corresponding name if it is in the
......@@ -460,8 +469,8 @@ public:
* @return true replacement has been performed
* @return false no replacement has been performed
*/
static bool replace(const std::shared_ptr<GraphView>& oldG, const std::shared_ptr<GraphView>& newG);
static bool replace(const std::set<NodePtr>& oldNodes, const std::set<NodePtr>& newNodes);
static bool replace(const std::shared_ptr<GraphView>& oldG, const std::shared_ptr<GraphView>& newG);
/**
* @brief Clone the GraphView with shared Operators. It is a new GraphView, with cloned Nodes, but the new Nodes refer to the same Operators as the original ones.
......@@ -509,6 +518,11 @@ public:
*/
void updateInputsOutputs();
#ifdef PYBIND
std::string repr() const {
return fmt::format("GraphView(name='{}', Nodes: {} (inputs: {}, outputs: {}))", name(), mNodes.size(), mInputNodes.size(), mOutputNodes.size());
}
#endif
private:
///////////////////////////////////////////////////////
// TENSOR MANAGEMENT
......
......@@ -19,10 +19,19 @@
#include <vector>
#include <utility>
#ifdef PYBIND
#include <pybind11/pybind11.h>
#include <fmt/format.h>
#endif
#include "aidge/graph/Connector.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/utils/Types.h"
#ifdef PYBIND
namespace py = pybind11;
#endif
namespace Aidge {
using NodePtr = std::shared_ptr<Node>;
......@@ -423,6 +432,27 @@ public:
std::set<NodePtr> getNodeDelta(int delta,std::set<Aidge::NodePtr> nodeSee);
#ifdef PYBIND
std::string repr() const {
std::string nodeString{fmt::format("Node(name='{}', optype='{}'", name(), type())};
if (mParents.size() > 0) {
std::vector<std::int8_t> connectedParents(mParents.size(), 0);
for (std::size_t i = 0; i < nbInputs(); ++i) {
if (mParents[i])
connectedParents[i] = std::int8_t(1);
}
nodeString = fmt::format("{}, parents: {}", nodeString, connectedParents);
}
if (mChildren.size() > 0) {
std::vector<std::vector<std::int8_t>> connectedChildren{};
for (std::size_t i = 0; i < nbOutputs(); ++i) {
connectedChildren.push_back(std::vector<std::int8_t>(mChildren[i].size(), std::int8_t(1)));
}
nodeString = fmt::format("{}, children: {}", nodeString, connectedChildren);
}
return fmt::format("{})", nodeString);
}
#endif
private:
///////////////////////////////////////////////////////
......
......@@ -28,27 +28,31 @@ enum class AvgPoolingAttr { StrideDims, KernelDims };
template <DimIdx_t DIM>
class AvgPooling_Op : public OperatorTensor,
public Registrable<AvgPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
public StaticAttributes<AvgPoolingAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>> {
public Registrable<AvgPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)> {
public:
static const std::string Type;
AvgPooling_Op() = delete;
private:
using Attributes_ = StaticAttributes<AvgPoolingAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>>;
template <AvgPoolingAttr e>
using attr = typename Attributes_::template attr<e>;
const std::shared_ptr<Attributes_> mAttributes;
public:
AvgPooling_Op() = delete;
constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
: OperatorTensor(Type, {InputCategory::Data}, 1),
Attributes_(attr<AvgPoolingAttr::StrideDims>(stride_dims),
attr<AvgPoolingAttr::KernelDims>(kernel_dims)) {}
mAttributes(std::make_shared<Attributes_>(
attr<AvgPoolingAttr::StrideDims>(stride_dims),
attr<AvgPoolingAttr::KernelDims>(kernel_dims)))
{}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
......@@ -76,6 +80,10 @@ public:
void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<AvgPoolingAttr::StrideDims>(); }
inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<AvgPoolingAttr::KernelDims>(); }
static const std::vector<std::string> getInputsName() {
return {"data_input"};
}
......@@ -101,8 +109,6 @@ inline std::shared_ptr<Node> AvgPooling(
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
return AvgPooling(to_array(kernel_dims), name, stride_dims);
}
} // namespace Aidge
extern template class Aidge::AvgPooling_Op<1>;
......@@ -112,8 +118,10 @@ extern template class Aidge::AvgPooling_Op<4>;
namespace {
template <>
const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims",
"KernelDims"};
const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {
"StrideDims",
"KernelDims"
};
}
#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
......@@ -28,21 +28,31 @@ enum class BatchNormAttr { Epsilon, Momentum };
template <DimIdx_t DIM>
class BatchNorm_Op : public OperatorTensor,
public Registrable<BatchNorm_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
public StaticAttributes<BatchNormAttr, float, float> {
public Registrable<BatchNorm_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)> {
public:
static const std::string Type;
BatchNorm_Op() = delete;
private:
using Attributes_ = StaticAttributes<BatchNormAttr, float, float>;
template <BatchNormAttr e>
using attr = typename Attributes_::template attr<e>;
const std::shared_ptr<Attributes_> mAttributes;
public:
BatchNorm_Op() = delete;
constexpr BatchNorm_Op(float epsilon, float momentum)
: OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::Param, InputCategory::Param, InputCategory::Param}, 1),
Attributes_(attr<BatchNormAttr::Epsilon>(epsilon),
attr<BatchNormAttr::Momentum>(momentum)) {}
: OperatorTensor(Type,
{InputCategory::Data,
InputCategory::Param,
InputCategory::Param,
InputCategory::Param,
InputCategory::Param},
1),
mAttributes(std::make_shared<Attributes_>(
attr<BatchNormAttr::Epsilon>(epsilon),
attr<BatchNormAttr::Momentum>(momentum))) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
......@@ -72,6 +82,10 @@ public:
void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
inline float& epsilon() const { return mAttributes->template getAttr<BatchNormAttr::Epsilon>(); }
inline float& momentum() const { return mAttributes->template getAttr<BatchNormAttr::Momentum>(); }
static const std::vector<std::string> getInputsName() {
return {"data_input", "scale", "shift", "mean", "variance"};
}
......
......@@ -19,8 +19,8 @@
#include "aidge/utils/Registrar.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
......@@ -30,21 +30,31 @@ public:
void forward() override;
};
enum class CastAttr { TargetType };
class Cast_Op : public OperatorTensor,
public Registrable<Cast_Op, std::string, std::unique_ptr<OperatorImpl>(const Cast_Op&)> {
public:
static const std::string Type;
Cast_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {
mImpl = std::make_shared<Cast_OpImpl>(*this);
}
private:
using Attributes_ = StaticAttributes<CastAttr, DataType>;
template <CastAttr e>
using attr = typename Attributes_::template attr<e>;
const std::shared_ptr<Attributes_> mAttributes;
public:
Cast_Op() = delete;
Cast_Op(const DataType targetType);
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Cast_Op(const Cast_Op& op)
: OperatorTensor(op)
: OperatorTensor(op),
mAttributes(op.mAttributes)
{
if (!op.backend().empty()) {
SET_IMPL_MACRO(Cast_Op, *this, op.backend());
......@@ -64,6 +74,9 @@ public:
void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
inline DataType& targetType() const { return mAttributes->template getAttr<CastAttr::TargetType>(); }
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
......@@ -72,9 +85,15 @@ public:
}
};
inline std::shared_ptr<Node> Cast(const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Cast_Op>(), name);
inline std::shared_ptr<Node> Cast(const DataType targetType, const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Cast_Op>(targetType), name);
}
} // namespace Aidge
namespace {
template <>
const char* const EnumStrings<Aidge::CastAttr>::data[] = { "TargetType" };
}
#endif /* AIDGE_CORE_OPERATOR_CAST_H_ */
\ No newline at end of file
#endif /* AIDGE_CORE_OPERATOR_CAST_H_ */
......@@ -28,25 +28,32 @@
namespace Aidge {
class Concat_OpImpl : public OperatorImpl {
public:
Concat_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
Concat_OpImpl(const Operator& op, const std::string& backend = "")
: OperatorImpl(op, backend)
{}
void forward() override;
};
enum class ConcatAttr { Axis };
class Concat_Op : public OperatorTensor,
public Registrable<Concat_Op, std::string, std::shared_ptr<OperatorImpl>(const Concat_Op&)>,
public StaticAttributes<ConcatAttr, DimSize_t> {
public Registrable<Concat_Op, std::string, std::shared_ptr<OperatorImpl>(const Concat_Op&)> {
public:
static const std::string Type;
using Attributes_ = StaticAttributes<ConcatAttr, DimSize_t>;
private:
using Attributes_ = StaticAttributes<ConcatAttr, std::int32_t>;
template <ConcatAttr e>
using attr = typename Attributes_::template attr<e>;
const std::shared_ptr<Attributes_> mAttributes;
Concat_Op(const IOIndex_t nbIn, const DimSize_t axis)
public:
Concat_Op() = delete;
Concat_Op(const IOIndex_t nbIn, const std::int32_t axis)
: OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1),
Attributes_(attr<ConcatAttr::Axis>(axis))
mAttributes(std::make_shared<Attributes_>(
attr<ConcatAttr::Axis>(axis)))
{
if (nbIn == 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
......@@ -60,7 +67,7 @@ public:
*/
Concat_Op(const Concat_Op& op)
: OperatorTensor(op),
Attributes_(op)
mAttributes(op.mAttributes)
{
if (!op.backend().empty()) {
SET_IMPL_MACRO(Concat_Op, *this, op.backend());
......@@ -82,6 +89,9 @@ public:
void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
inline std::int32_t& axis() const { return mAttributes->template getAttr<ConcatAttr::Axis>(); }
static const std::vector<std::string> getInputsName(){
return {"data_input_0", "data_input_n"};
}
......@@ -90,7 +100,7 @@ public:
}
};
inline std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const DimIdx_t axis = 0, const std::string& name = "") {
inline std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const std::int32_t axis = 0, const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Concat_Op>(nbIn, axis), name);
}
}
......
......@@ -34,33 +34,32 @@ enum class ConvAttr { StrideDims, DilationDims, KernelDims };
template <DimIdx_t DIM>
class Conv_Op : public OperatorTensor,
public Registrable<Conv_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
public StaticAttributes<ConvAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>> {
public Registrable<Conv_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)> {
public:
static const std::string Type;
Conv_Op() = delete;
private:
using Attributes_ = StaticAttributes<ConvAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>>;
template <ConvAttr e>
using attr = typename Attributes_::template attr<e>;
const std::shared_ptr<Attributes_> mAttributes;
public:
Conv_Op() = delete;
constexpr Conv_Op(const std::array<DimSize_t, DIM> &kernelDims,
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
: OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
Attributes_(attr<ConvAttr::StrideDims>(strideDims),
attr<ConvAttr::DilationDims>(dilationDims),
// attr<ConvAttr::InChannels>(inChannels),
// attr<ConvAttr::OutChannels>(outChannels),
attr<ConvAttr::KernelDims>(kernelDims)) {}
mAttributes(std::make_shared<Attributes_>(
attr<ConvAttr::StrideDims>(strideDims),
attr<ConvAttr::DilationDims>(dilationDims),
attr<ConvAttr::KernelDims>(kernelDims)))
{}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
......@@ -113,6 +112,12 @@ public:
return getInput(1)->template dims<DIM+2>()[0];
}
inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvAttr::StrideDims>(); }
inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvAttr::DilationDims>(); }
inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvAttr::KernelDims>(); }
static const std::vector<std::string> getInputsName(){
return {"data_input", "weight", "bias"};
}
......
......@@ -33,30 +33,32 @@ enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims };
template <DimIdx_t DIM>
class ConvDepthWise_Op : public OperatorTensor,
public Registrable<ConvDepthWise_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
public StaticAttributes<ConvDepthWiseAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>> {
public Registrable<ConvDepthWise_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)> {
public:
static const std::string Type;
ConvDepthWise_Op() = delete;
private:
using Attributes_ = StaticAttributes<ConvDepthWiseAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>>;
template <ConvDepthWiseAttr e>
using attr = typename Attributes_::template attr<e>;
const std::shared_ptr<Attributes_> mAttributes;
public:
ConvDepthWise_Op() = delete;
constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
: OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {}
mAttributes(std::make_shared<Attributes_>(
attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)))
{}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
......@@ -89,6 +91,11 @@ public:
return getInput(1)->template dims<DIM+2>()[0];
}
inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>(); }
inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>(); }
inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>(); }
static const std::vector<std::string> getInputsName(){
return {"data_input", "weight", "bias"};
}
......
......@@ -42,9 +42,9 @@ public:
FC_Op(const FC_Op& op)
: OperatorTensor(op)
{
if (op.mImpl){
if (op.mImpl) {
SET_IMPL_MACRO(FC_Op, *this, op.backend());
}else{
} else {
mImpl = nullptr;
}
}
......
......@@ -12,7 +12,7 @@
#ifndef AIDGE_CORE_OPERATOR_GATHER_H_
#define AIDGE_CORE_OPERATOR_GATHER_H_
#include <cstdint> // std::int64_t
#include <cstdint> // std::int8_t, std::int64_t
#include <memory>
#include <string>
#include <vector>
......@@ -36,21 +36,31 @@ enum class GatherAttr { Axis, Indices, GatheredShape };
class Gather_Op : public OperatorTensor,
public Registrable<Gather_Op,
std::string,
std::shared_ptr<OperatorImpl>(const Gather_Op&)>,
public StaticAttributes<GatherAttr, std::int8_t, std::vector<int64_t>, std::vector<DimSize_t>> {
std::shared_ptr<OperatorImpl>(const Gather_Op&)> {
public:
static const std::string Type;
using Attributes_ = StaticAttributes<GatherAttr,
std::int8_t,
std::vector<int64_t>,
std::vector<DimSize_t>>;
private:
template <GatherAttr e>
using attr = typename Attributes_::template attr<e>;
const std::shared_ptr<Attributes_> mAttributes;
public:
Gather_Op() = delete;
using Attributes_ = StaticAttributes<GatherAttr, std::int8_t, std::vector<int64_t>, std::vector<DimSize_t>>;
template <GatherAttr e> using attr = typename Attributes_::template attr<e>;
Gather_Op(std::int8_t axis, const std::vector<int64_t>& indices, const std::vector<DimSize_t>& gatheredShape)
: OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
Attributes_(attr<GatherAttr::Axis>(axis),
attr<GatherAttr::Indices>(indices),
attr<GatherAttr::GatheredShape>(gatheredShape))
Gather_Op(std::int8_t axis,
const std::vector<int64_t>& indices,
const std::vector<DimSize_t>& gatheredShape)
: OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
mAttributes(std::make_shared<Attributes_>(
attr<GatherAttr::Axis>(axis),
attr<GatherAttr::Indices>(indices),
attr<GatherAttr::GatheredShape>(gatheredShape)))
{
mImpl = std::make_shared<Gather_OpImpl>(*this);
}
......@@ -61,7 +71,7 @@ public:
*/
Gather_Op(const Gather_Op& op)
: OperatorTensor(op),
Attributes_(op)
mAttributes(op.mAttributes)
{
if (!op.backend().empty()) {
SET_IMPL_MACRO(Gather_Op, *this, op.backend());
......@@ -84,6 +94,11 @@ public:
void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
inline std::int8_t& axis() const { return mAttributes -> getAttr<GatherAttr::Axis>(); }
inline std::vector<int64_t>& indices() const { return mAttributes -> getAttr<GatherAttr::Indices>(); }
inline std::vector<DimSize_t>& gatheredShape() const { return mAttributes -> getAttr<GatherAttr::GatheredShape>(); }
static const std::vector<std::string> getInputsName(){
return {"data_input", "indices"};
}
......
......@@ -26,13 +26,14 @@
namespace Aidge {
class GenericOperator_Op
: public OperatorTensor,
public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>,
public DynamicAttributes {
public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)> {
private:
using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>;
ComputeDimsFunc mForwardDims;
const std::shared_ptr<DynamicAttributes> mAttributes;
public:
GenericOperator_Op(const std::string& type, const std::vector<InputCategory>& inputsCategory, IOIndex_t nbOut)
: OperatorTensor(type, inputsCategory, nbOut)
......@@ -42,10 +43,11 @@ public:
GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut)
: OperatorTensor(type, [nbData, nbParam]() {
std::vector<InputCategory> inputsCategory(nbData, InputCategory::Data);
inputsCategory.resize(nbData + nbParam, InputCategory::Param);
return inputsCategory;
}(), nbOut)
std::vector<InputCategory> inputsCategory(nbData, InputCategory::Data);
inputsCategory.resize(nbData + nbParam, InputCategory::Param);
return inputsCategory;
}(), nbOut),
mAttributes(std::make_shared<DynamicAttributes>())
{
mImpl = std::make_shared<OperatorImpl>(*this);
}
......@@ -55,7 +57,8 @@ public:
* @param op Operator to copy.
*/
GenericOperator_Op(const GenericOperator_Op& op)
: OperatorTensor(op)
: OperatorTensor(op),
mAttributes(op.attributes() ? op.mAttributes : std::make_shared<DynamicAttributes>())
{
mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
}
......@@ -74,6 +77,22 @@ public:
bool forwardDims(bool allowDataDependency = false) override final;
void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
template <class T>
inline T& getAttr(const std::string& name)
{ return mAttributes -> template getAttr<T>(name); }
template <class T>
inline const T& getAttr(const std::string& name) const
{ return mAttributes -> template getAttr<T>(name); }
///\brief Add a new Attribute, identified by its name. If it already exists, asserts.
///\tparam T expected Attribute type
///\param name Attribute name
///\param value Attribute value
template <class T>
inline void addAttr(const std::string& name, const T& value) const
{ mAttributes -> template addAttr<T>(name, value); }
// Helper functions that can be used with setForwardDims():
static const ComputeDimsFunc Identity;
......
......@@ -12,16 +12,16 @@
#ifndef AIDGE_CORE_OPERATOR_LEAKYRELU_H_
#define AIDGE_CORE_OPERATOR_LEAKYRELU_H_
#include <vector>
#include <memory>
#include <vector>
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
......@@ -30,20 +30,24 @@ enum class LeakyReLUAttr {
};
class LeakyReLU_Op : public OperatorTensor,
public Registrable<LeakyReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)>,
public StaticAttributes<LeakyReLUAttr, float> {
public Registrable<LeakyReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)> {
public:
static const std::string Type;
LeakyReLU_Op() = delete;
private:
using Attributes_ = StaticAttributes<LeakyReLUAttr, float>;
template <LeakyReLUAttr e> using attr = typename Attributes_::template attr<e>;
const std::shared_ptr<Attributes_> mAttributes;
public:
LeakyReLU_Op() = delete;
LeakyReLU_Op(float negativeSlope)
: OperatorTensor(Type, {InputCategory::Data}, 1),
Attributes_(
attr<LeakyReLUAttr::NegativeSlope>(negativeSlope))
mAttributes(
std::make_shared<Attributes_>(
attr<LeakyReLUAttr::NegativeSlope>(negativeSlope)))
{}
/**
......@@ -52,7 +56,7 @@ public:
*/
LeakyReLU_Op(const LeakyReLU_Op& op)
: OperatorTensor(op),
Attributes_(op)
mAttributes(op.mAttributes)
{
if (op.mImpl){
SET_IMPL_MACRO(LeakyReLU_Op, *this, op.backend());
......@@ -76,6 +80,9 @@ public:
mOutputs[0]->setBackend(name, device);
}
inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
inline float& negativeSlope() const noexcept { return mAttributes -> getAttr<LeakyReLUAttr::NegativeSlope>(); }
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
......
......@@ -34,30 +34,31 @@ enum class MaxPoolingAttr { StrideDims, KernelDims, CeilMode };
template <DimIdx_t DIM>
class MaxPooling_Op : public OperatorTensor,
public Registrable<MaxPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
public StaticAttributes<MaxPoolingAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
bool> {
public Registrable<MaxPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)> {
public:
static const std::string Type;
MaxPooling_Op() = delete;
using Attributes_ = StaticAttributes<MaxPoolingAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
bool>;
private:
template <MaxPoolingAttr e>
using attr = typename Attributes_::template attr<e>;
const std::shared_ptr<Attributes_> mAttributes;
public:
MaxPooling_Op() = delete;
constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
bool ceil_mode = false)
: OperatorTensor(Type, {InputCategory::Data}, 1),
Attributes_(attr<MaxPoolingAttr::StrideDims>(stride_dims),
attr<MaxPoolingAttr::KernelDims>(kernel_dims),
attr<MaxPoolingAttr::CeilMode>(ceil_mode))
mAttributes(std::make_shared<Attributes_>(
attr<MaxPoolingAttr::StrideDims>(stride_dims),
attr<MaxPoolingAttr::KernelDims>(kernel_dims),
attr<MaxPoolingAttr::CeilMode>(ceil_mode)))
{}
/**
......@@ -66,7 +67,7 @@ public:
*/
MaxPooling_Op(const MaxPooling_Op<DIM>& op)
: OperatorTensor(op),
Attributes_(op)
mAttributes(op.mAttributes)
{
if (op.mImpl) {
SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.backend());
......@@ -90,17 +91,17 @@ public:
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
std::function<float(float)> roundingFunction;
if (this->template getAttr<MaxPoolingAttr::CeilMode>()) {
if (mAttributes->template getAttr<MaxPoolingAttr::CeilMode>()) {
roundingFunction = [](float x) { return std::ceil(x); };
} else {
roundingFunction = [](float x) { return std::floor(x); };
}
for (std::size_t dim = 0; dim < this->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
for (std::size_t dim = 0; dim < mAttributes->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
roundingFunction(static_cast<float>(inputDims[dim+2] -
this->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
static_cast<float>(this->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
mAttributes->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
static_cast<float>(mAttributes->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
}
outputDims[1] = inputDims[1];
outputDims[0] = inputDims[0];
......@@ -116,6 +117,11 @@ public:
mOutputs[0]->setBackend(name, device);
}
inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<MaxPoolingAttr::StrideDims>(); }
inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<MaxPoolingAttr::KernelDims>(); }
inline bool& ceilMode() const { return mAttributes->template getAttr<MaxPoolingAttr::CeilMode>(); }
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
......
......@@ -37,20 +37,25 @@ public:
enum class MemorizeAttr { ScheduleStep, ForwardStep, EndStep };
class Memorize_Op : public OperatorTensor,
public Registrable<Memorize_Op, std::string, std::unique_ptr<OperatorImpl>(const Memorize_Op&)>,
public StaticAttributes<MemorizeAttr, unsigned int, unsigned int, unsigned int> {
public Registrable<Memorize_Op, std::string, std::unique_ptr<OperatorImpl>(const Memorize_Op&)> {
public:
static const std::string Type;
using Attributes_ = StaticAttributes<MemorizeAttr, unsigned int, unsigned int, unsigned int>;
private:
using Attributes_ = StaticAttributes<MemorizeAttr, std::uint32_t, std::uint32_t, std::uint32_t>;
template <MemorizeAttr e>
using attr = typename Attributes_::template attr<e>;
const std::shared_ptr<Attributes_> mAttributes;
Memorize_Op(const unsigned int endStep)
public:
Memorize_Op() = delete;
Memorize_Op(const std::uint32_t endStep)
: OperatorTensor(Type, {InputCategory::Data, InputCategory::Param}, 2),
Attributes_(attr<MemorizeAttr::ScheduleStep>(0),
attr<MemorizeAttr::ForwardStep>(0),
attr<MemorizeAttr::EndStep>(endStep))
mAttributes(std::make_shared<Attributes_>(
attr<MemorizeAttr::ScheduleStep>(0),
attr<MemorizeAttr::ForwardStep>(0),
attr<MemorizeAttr::EndStep>(endStep)))
{
mOutputs[1] = mOutputs[0];
}
......@@ -62,7 +67,7 @@ public:
*/
Memorize_Op(const Memorize_Op& op)
: OperatorTensor(op),
Attributes_(op)
mAttributes(op.mAttributes)
{
if (op.mImpl) {
SET_IMPL_MACRO(Memorize_Op, *this, op.backend());
......@@ -87,6 +92,11 @@ public:
void updateConsummerProducer() override;
void forward() override;
inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
inline std::uint32_t& scheduleStep() const { return mAttributes->template getAttr<MemorizeAttr::ScheduleStep>(); }
inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<MemorizeAttr::ForwardStep>(); }
inline std::uint32_t& endStep() const { return mAttributes->template getAttr<MemorizeAttr::EndStep>(); }
static const std::vector<std::string> getInputsName(){
return {"data_input", "data_input_init"};
}
......@@ -95,7 +105,7 @@ public:
}
};
inline std::shared_ptr<Node> Memorize(const unsigned int endStep, const std::string& name = "") {
inline std::shared_ptr<Node> Memorize(const std::uint32_t endStep, const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Memorize_Op>(endStep), name);
}
} // namespace Aidge
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment