Skip to content
Snippets Groups Projects
Commit 97f10abb authored by Grégoire Kubler's avatar Grégoire Kubler
Browse files

Merge branch 'dev' into feat/release_pip

parents 9e2c710f 6a2d6585
No related branches found
No related tags found
2 merge requests!212Version 0.3.0,!116feat/release_pip
Pipeline #51003 failed
Showing
with 88 additions and 39 deletions
......@@ -73,15 +73,26 @@ class test_operator_binding(unittest.TestCase):
self.assertEqual(attrs.get_attr("b"), "test")
self.assertEqual(attrs.has_attr("c"), True)
self.assertEqual(attrs.get_attr("c"), [True, False, True])
self.assertEqual(attrs.dict().keys(), {"a", "b", "c"})
self.assertEqual(attrs.dict().keys(), {"a", "b", "c", "mem", "impl"})
self.assertEqual(attrs.has_attr("d"), False)
self.assertEqual(attrs.has_attr("mem.a"), True)
self.assertEqual(attrs.get_attr("mem.a"), 1)
self.assertEqual(attrs.has_attr("mem.data.b"), True)
self.assertEqual(attrs.get_attr("mem.data.b"), 1.0)
self.assertEqual(attrs.get_attr("mem").get_attr("data").get_attr("b"), 1.0)
self.assertEqual(attrs.has_attr("impl.c"), True)
self.assertEqual(attrs.get_attr("impl.c"), "test")
# Add Python attributes
attrs.add_attr("d", 18.56)
self.assertEqual(attrs.get_attr("d"), 18.56)
self.assertEqual(attrs.has_attr("d"), True)
self.assertEqual(attrs.dict().keys(), {"a", "b", "c", "d"})
self.assertEqual(attrs.dict().keys(), {"a", "b", "c", "d", "mem", "impl"})
self.assertEqual(attrs.has_attr("e"), False)
attrs.add_attr("mem.data.c", 19.36)
self.assertEqual(attrs.get_attr("mem.data.c"), 19.36)
self.assertEqual(attrs.has_attr("mem.data.c"), True)
self.assertEqual(attrs.dict().keys(), {"a", "b", "c", "d", "mem", "impl"})
# Check that added Python attribute is accessible in C++
# Return the value of an attribute named "d" of type float64 (double in C++)
......@@ -89,6 +100,23 @@ class test_operator_binding(unittest.TestCase):
attrs.d = 23.89
self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 23.89)
op = aidge_core.GenericOperatorOp("any_type", 1,0,1)
with self.assertRaises(RuntimeError):
op.attr.something
op.attr.something = aidge_core.DynamicAttributes()
try:
self.assertEqual(str(op.attr), "AttrDict({'something': AttrDict({})})")
except Exception:
self.fail("op.attr.something raised Exception unexpectedly!")
op.attr.something.arg1 = 4
self.assertEqual(op.attr.something.arg1, 4)
# auto create the namespace another_thing (not enabled)
#op.attr.another_thing.arg = 44
#self.assertEqual(op.attr.another_thing.arg, 44)
def test_forward_dims(self):
in_dims=[25, 25]
input = aidge_core.Producer(in_dims, name="In")
......
......@@ -132,10 +132,14 @@ static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
{"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int32_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int16(
{"cpu", DataType::Int16}, Aidge::TensorImpl_cpu<int16_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_UInt16(
{"cpu", DataType::UInt16}, Aidge::TensorImpl_cpu<uint16_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int8(
{"cpu", DataType::Int8}, Aidge::TensorImpl_cpu<int8_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_UInt64(
{"cpu", DataType::UInt64}, Aidge::TensorImpl_cpu<uint64_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_UInt32(
{"cpu", DataType::UInt32}, Aidge::TensorImpl_cpu<uint32_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_UInt16(
{"cpu", DataType::UInt16}, Aidge::TensorImpl_cpu<uint16_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_UInt8(
{"cpu", DataType::UInt8}, Aidge::TensorImpl_cpu<uint8_t>::create);
} // namespace
......
......@@ -57,7 +57,8 @@ class Tensor : public Data,
/**
* @brief Construct a new empty Tensor object.
* It has the features of an undefined scalar.
* It is considered undefined, i.e. dims can't be forwarded from such a Tensor.
* @ref undefined() method for details
*/
Tensor(DataType dtype = DataType::Float32, DataFormat dformat = DataFormat::Default)
: Data(Type),
......@@ -65,7 +66,7 @@ class Tensor : public Data,
mDataFormat(dformat),
mDims(std::vector<DimSize_t>({})),
mStrides({1}),
mSize(1)
mSize(0)
{
// ctor
}
......@@ -523,14 +524,30 @@ public:
void resize(const std::vector<DimSize_t> &dims, std::vector<DimSize_t> strides = std::vector<DimSize_t>());
/**
* @brief Return if the Tensor object has at leastone element.
* @return true
* @return false
* @brief Return whether the Tensor object as a rank of 0, i.e. dimensions == {}.
* For defined Tensors, this implies that the Tensor is scalar.
* For backward compatibility reasons, it is valid to call this predicate
* even on undefined Tensors, in which case it returns true.
* Hence before test the rank with this method, always check that the
* Tensor is not undefined().
* In particular for operations such as forwardDims(), one should always
* use undefined() to test whether the Tensor dimensions have been defined.
* In this case empty() can be used to distinguish scalars from N-D Tensors.
* @return true if rank is 0 or the tensor is undefined
*/
bool empty() const { return mDims.empty(); }
// bool newempty() const noexcept {
// return mSize == 0;
// }
/**
* @brief Returns whether the Tensor object is undefined.
* An undefined Tensor is equivalent to a tensor for which dimensions have not
* been defined yet. Hence, dimensions forwarding can't be done from undefined tensors.
* The only cases where a tensor is undefined is after the default constructor
* and before any call to resize().
* Also, as soon as the resize() method has been called, the Tensor is irreversibly defined.
* @ref empty() method for distinguishing an undefined from a scalar
* @return true if undefined
*/
bool undefined() const { return mSize == 0; }
/**
* @brief Set each element of the tensor to zero.
......
......@@ -119,8 +119,8 @@ extern template class Aidge::AvgPooling_Op<4>;
namespace {
template <>
const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {
"StrideDims",
"KernelDims"
"stride_dims",
"kernel_dims"
};
}
......
......@@ -111,7 +111,7 @@ extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t
namespace {
template <>
const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "Epsilon", "Momentum" };
const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "epsilon", "momentum" };
}
#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
......@@ -93,7 +93,7 @@ inline std::shared_ptr<Node> Cast(const DataType targetType, const std::string&
namespace {
template <>
const char* const EnumStrings<Aidge::CastAttr>::data[] = { "TargetType" };
const char* const EnumStrings<Aidge::CastAttr>::data[] = { "target_type" };
}
#endif /* AIDGE_CORE_OPERATOR_CAST_H_ */
......@@ -108,7 +108,7 @@ inline std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const std::int32_t axi
namespace {
template <>
const char* const EnumStrings<Aidge::ConcatAttr>::data[] = {
"Axis"
"axis"
};
}
......
......@@ -178,9 +178,9 @@ extern template class Aidge::Conv_Op<2>;
namespace {
template <>
const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
"StrideDims",
"DilationDims",
"KernelDims"
"stride_dims",
"dilation_dims",
"kernel_dims"
};
}
......
......@@ -140,8 +140,8 @@ extern template class Aidge::ConvDepthWise_Op<2>;
namespace {
template <>
const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims",
"KernelDims"};
const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"stride_dims", "dilation_dims",
"kernel_dims"};
}
#endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
......@@ -133,10 +133,10 @@ extern template class Aidge::Fold_Op<2>;
namespace {
template <>
const char *const EnumStrings<Aidge::FoldAttr>::data[] = {
"OutputDims",
"StrideDims",
"DilationDims",
"KernelDims"
"output_dims",
"stride_dims",
"dilation_dims",
"kernel_dims"
};
}
......
......@@ -114,7 +114,7 @@ inline std::shared_ptr<Node> Gather(std::int8_t axis = 0, const std::vector<int6
namespace {
template <>
const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"Axis", "Indices", "GatheredShape"};
const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"axis", "indices", "gathered_shape"};
}
#endif /* AIDGE_CORE_OPERATOR_GATHER_H_ */
......@@ -76,7 +76,7 @@ public:
* @return false Input has no dimensions or is a nullptr.
*/
bool dimsForwarded() const override final {
return mInputs[0] ? (mInputs[0]->empty() ? false : mInputs[0]->dims() == mOutputs[0]->dims()) : false;
return mInputs[0] ? (mInputs[0]->undefined() ? false : mInputs[0]->dims() == mOutputs[0]->dims()) : false;
}
......
......@@ -99,7 +99,7 @@ inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::st
namespace {
template <>
const char* const EnumStrings<Aidge::LeakyReLUAttr>::data[]
= {"NegativeSlope"};
= {"negative_slope"};
}
#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
......@@ -156,7 +156,7 @@ inline std::shared_ptr<Node> MaxPooling(
namespace {
template <>
const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"StrideDims", "KernelDims", "CeilMode"};
const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"stride_dims", "kernel_dims", "ceil_mode"};
}
#endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
......@@ -113,9 +113,9 @@ inline std::shared_ptr<Node> Memorize(const std::uint32_t endStep, const std::st
namespace {
template <>
const char *const EnumStrings<Aidge::MemorizeAttr>::data[] = {
"ScheduleStep",
"ForwardStep",
"EndStep"
"schedule_step",
"forward_step",
"end_step"
};
}
......
......@@ -109,7 +109,7 @@ public:
* The pointer itself is not changed, thus keeping the current connections.
* @param inputIdx Index of the input to set.
*/
virtual void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) = 0;
virtual void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) const = 0;
virtual std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const = 0;
std::shared_ptr<Hook> getHook(const std::string& hookName) {
......
......@@ -62,7 +62,7 @@ public:
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final;
// output management
void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) override;
void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) const override;
virtual const std::shared_ptr<Tensor>& getOutput(const IOIndex_t outputIdx) const;
std::shared_ptr<Aidge::Data> getRawOutput(const Aidge::IOIndex_t outputIdx) const override final;
///////////////////////////////////////////////////
......
......@@ -139,7 +139,7 @@ extern template class Aidge::Pad_Op<2>;
namespace {
template <>
const char *const EnumStrings<Aidge::PadAttr>::data[] = {"BeginEndBorders", "BorderType", "BorderValue"};
const char *const EnumStrings<Aidge::PadAttr>::data[] = {"begin_end_borders", "border_type", "border_value"};
template <>
const char *const EnumStrings<Aidge::PadBorderType>::data[] = {"Constant", "Edge", "Reflect", "Wrap"};
......
......@@ -100,7 +100,7 @@ inline std::shared_ptr<Node> Pop(const std::string& name = "") {
namespace {
template <>
const char *const EnumStrings<Aidge::PopAttr>::data[] = {
"ForwardStep"
"forward_step"
};
}
......
......@@ -115,7 +115,7 @@ public:
// fmt::print("Basic Producer backward() function.\n");
}
void setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) override {
void setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) const override {
if (mAttributes->template getAttr<ProdAttr::Constant>()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer is constant, cannot update output.");
}
......@@ -160,7 +160,7 @@ std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode, const IOInde
namespace {
template <>
const char *const EnumStrings<Aidge::ProdAttr>::data[] = {
"Constant"
"constant"
};
}
#endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment