Skip to content
Snippets Groups Projects
Commit f27194c2 authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge remote-tracking branch 'origin/dev' into ui_parameters

parents cb41ace7 1eccdb62
No related branches found
No related tags found
2 merge requests!152Update Aidge export to take a graph view has an argument instead of a...,!145Improve UI for Operator/Node/GraphView/Tensor
Pipeline #49377 failed
Showing
with 103 additions and 88 deletions
......@@ -41,6 +41,7 @@ class test_OperatorImpl(unittest.TestCase):
generic_matmul_op = matmul.get_operator()
generic_matmul_op.set_forward_dims(lambda x: x)
generic_matmul_op.set_impl(testImpl(generic_matmul_op))
generic_matmul_op.set_input(0, aidge_core.Tensor(np.arange(18).reshape(1,2,3,3)))
generic_matmul_op.forward()
self.assertEqual(GLOBAL_CPT, 1)
......
......@@ -34,10 +34,8 @@ class test_attributes(unittest.TestCase):
def test_fc(self):
in_channels = 4
out_channels = 8
nb_bias = True
fc_op = aidge_core.FC(in_channels, out_channels, nb_bias).get_operator()
fc_op = aidge_core.FC(in_channels, out_channels).get_operator()
self.assertEqual(fc_op.out_channels(), out_channels)
self.assertEqual(fc_op.attr.get_attr("no_bias"), nb_bias)
def test_producer_1D(self):
dims = [5]
......
......@@ -59,6 +59,7 @@
#include "aidge/operator/ReduceMean.hpp"
#include "aidge/operator/ReLU.hpp"
#include "aidge/operator/Reshape.hpp"
#include "aidge/operator/Resize.hpp"
#include "aidge/operator/Shape.hpp"
#include "aidge/operator/Scaling.hpp"
#include "aidge/operator/Slice.hpp"
......
......@@ -283,6 +283,11 @@ class Tensor : public Data,
*/
Tensor operator/(const Tensor& other) const;
/**
* @brief Element-wise sqrt operation for Tensor.
* @return Tensor
*/
Tensor sqrt() const;
~Tensor() noexcept;
......@@ -545,23 +550,16 @@ public:
inline void print() const { fmt::print("{}\n", toString()); }
std::shared_ptr<Tensor> grad() {
return mGrad;
}
void setGrad(std::shared_ptr<Tensor> newGrad) {
mGrad = newGrad;
}
/**
* @brief Associate the gradient with a Tensor instance and set its implementation
* if none was previously set.
* @brief Get the gradient Tensor. If not initialized, set a Tensor instance
* and set its implementation if none was previously set.
* @note Dimensions for the Tensor instance are copied from the original current Tensor.
* @note If a Tensor instance was already associated, only the implementation is created
* with values set to 0.
* @note If Tensor instance and implementation already existed for the gradient
* nothing is done.
*/
void initGrad() {
std::shared_ptr<Tensor> grad() {
if (!mGrad) {
mGrad = std::make_shared<Tensor>(mDims);
}
......@@ -571,6 +569,11 @@ public:
mGrad->setBackend(hasImpl() ? mImpl->backend() : "cpu");
mGrad->zeros();
}
return mGrad;
}
void setGrad(std::shared_ptr<Tensor> newGrad) {
mGrad = newGrad;
}
/**
......
......@@ -193,9 +193,14 @@ public:
*/
inline IOIndex_t getFirstFreeDataInput() const {
IOIndex_t i = 0;
for (; (i < nbData()) && (input(i).second != gk_IODefaultIndex); ++i) {}
// assert((i<nbData()) && "No free data input for Node");
return (i < nbData()) ? i : gk_IODefaultIndex;
for (; i < nbInputs(); ++i) {
if ((inputCategory(i) == InputCategory::Data || inputCategory(i) == InputCategory::OptionalData)
&& input(i).second == gk_IODefaultIndex)
{
break;
}
}
return (i < nbInputs()) ? i : gk_IODefaultIndex;
}
......@@ -227,13 +232,12 @@ public:
inline IOIndex_t nbInputs() const noexcept { return getOperator()->nbInputs(); }
/**
* @brief Number of input specifically for data.
* @brief Category of a specific input (Data or Param, optional or not).
* Data inputs exclude inputs expecting parameters (weights or bias).
* @details [data, data, weight, bias] => 2
* @return IOIndex_t
* @return InputCategory
*/
inline IOIndex_t nbData() const noexcept {
return getOperator()->nbData();
inline InputCategory inputCategory(IOIndex_t idx) const {
return getOperator()->inputCategory(idx);
}
/**
......
......@@ -29,7 +29,7 @@ public:
static const std::string Type;
Add_Op(const IOIndex_t nbIn)
: OperatorTensor(Type, nbIn, 0, 1)
: OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1)
{
if (nbIn == 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
......
......@@ -48,7 +48,7 @@ public:
constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
: OperatorTensor(Type, 1, 0, 1),
: OperatorTensor(Type, {InputCategory::Data}, 1),
mAttributes(std::make_shared<Attributes_>(
attr<AvgPoolingAttr::StrideDims>(stride_dims),
attr<AvgPoolingAttr::KernelDims>(kernel_dims)))
......
......@@ -43,7 +43,13 @@ public:
BatchNorm_Op() = delete;
constexpr BatchNorm_Op(float epsilon, float momentum)
: OperatorTensor(Type, 1, 4, 1),
: OperatorTensor(Type,
{InputCategory::Data,
InputCategory::Param,
InputCategory::Param,
InputCategory::Param,
InputCategory::Param},
1),
mAttributes(std::make_shared<Attributes_>(
attr<BatchNormAttr::Epsilon>(epsilon),
attr<BatchNormAttr::Momentum>(momentum))) {}
......
......@@ -51,7 +51,7 @@ public:
Concat_Op() = delete;
Concat_Op(const IOIndex_t nbIn, const std::int32_t axis)
: OperatorTensor(Type, nbIn, 0, 1),
: OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1),
mAttributes(std::make_shared<Attributes_>(
attr<ConcatAttr::Axis>(axis)))
{
......
......@@ -30,7 +30,7 @@
#include "aidge/utils/Types.h"
namespace Aidge {
enum class ConvAttr { StrideDims, DilationDims, KernelDims, NoBias };
enum class ConvAttr { StrideDims, DilationDims, KernelDims };
template <DimIdx_t DIM>
class Conv_Op : public OperatorTensor,
......@@ -41,10 +41,9 @@ public:
private:
using Attributes_ = StaticAttributes<ConvAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
bool>;
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>>;
template <ConvAttr e>
using attr = typename Attributes_::template attr<e>;
const std::shared_ptr<Attributes_> mAttributes;
......@@ -54,14 +53,12 @@ public:
constexpr Conv_Op(const std::array<DimSize_t, DIM> &kernelDims,
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
bool noBias = false)
: OperatorTensor(Type, 1, 2, 1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
: OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
mAttributes(std::make_shared<Attributes_>(
attr<ConvAttr::StrideDims>(strideDims),
attr<ConvAttr::DilationDims>(dilationDims),
attr<ConvAttr::KernelDims>(kernelDims),
attr<ConvAttr::NoBias>(noBias)))
attr<ConvAttr::KernelDims>(kernelDims)))
{}
/**
......@@ -119,7 +116,6 @@ public:
inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvAttr::StrideDims>(); }
inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvAttr::DilationDims>(); }
inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvAttr::KernelDims>(); }
inline bool& noBias() const { return mAttributes->template getAttr<ConvAttr::NoBias>(); }
static const std::vector<std::string> getInputsName(){
......@@ -153,10 +149,11 @@ inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
bool noBias = false) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims, noBias), name);
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
addProducer(conv, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
if (!noBias) {
addProducer(conv, 2, {outChannels}, "b"); // already sets bias dims
}
return conv;
}
......@@ -175,6 +172,7 @@ inline std::shared_ptr<Node> Conv(
}
} // namespace Aidge
extern template class Aidge::Conv_Op<1>;
extern template class Aidge::Conv_Op<2>;
namespace {
......@@ -182,8 +180,7 @@ template <>
const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
"StrideDims",
"DilationDims",
"KernelDims",
"NoBias"
"KernelDims"
};
}
......
......@@ -29,7 +29,7 @@
#include "aidge/utils/Types.h"
namespace Aidge {
enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims, NoBias };
enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims };
template <DimIdx_t DIM>
class ConvDepthWise_Op : public OperatorTensor,
......@@ -39,10 +39,9 @@ public:
private:
using Attributes_ = StaticAttributes<ConvDepthWiseAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
bool>;
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>>;
template <ConvDepthWiseAttr e>
using attr = typename Attributes_::template attr<e>;
const std::shared_ptr<Attributes_> mAttributes;
......@@ -53,14 +52,12 @@ public:
constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
bool no_bias=false)
: OperatorTensor(Type, 1, 2, 1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
: OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
mAttributes(std::make_shared<Attributes_>(
attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
attr<ConvDepthWiseAttr::KernelDims>(kernel_dims),
attr<ConvDepthWiseAttr::NoBias>(no_bias)))
attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)))
{}
/**
......@@ -98,7 +95,6 @@ public:
inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>(); }
inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>(); }
inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>(); }
inline bool& noBias() const { return mAttributes->template getAttr<ConvDepthWiseAttr::NoBias>(); }
static const std::vector<std::string> getInputsName(){
return {"data_input", "weight", "bias"};
......@@ -117,9 +113,11 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
bool noBias=false) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims, noBias), name);
auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
addProducer(convDW, 2, {(noBias ? 0 : nbChannels)}, "b");
if (!noBias) {
addProducer(convDW, 2, {nbChannels}, "b");
}
return convDW;
}
......@@ -137,12 +135,13 @@ inline std::shared_ptr<Node> ConvDepthWise(
}
} // namespace Aidge
extern template class Aidge::ConvDepthWise_Op<1>;
extern template class Aidge::ConvDepthWise_Op<2>;
namespace {
template <>
const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims",
"KernelDims", "NoBias"};
"KernelDims"};
}
#endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
......@@ -30,7 +30,7 @@ class Div_Op : public OperatorTensor,
public:
static const std::string Type;
Div_Op() : OperatorTensor(Type, 2, 0, 1) {}
Div_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
......
......@@ -29,7 +29,7 @@ class Erf_Op : public OperatorTensor,
public:
static const std::string Type;
Erf_Op() : OperatorTensor(Type, 1, 0, 1) {}
Erf_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
......
......@@ -24,8 +24,6 @@
#include "aidge/utils/Registrar.hpp"
namespace Aidge {
enum class FCAttr { NoBias };
class FC_Op : public OperatorTensor,
public Registrable<FC_Op,
std::string,
......@@ -33,18 +31,8 @@ class FC_Op : public OperatorTensor,
public:
static const std::string Type;
private:
template <FCAttr e>
using attr = typename StaticAttributes<FCAttr, bool>::template attr<e>;
const std::shared_ptr<StaticAttributes<FCAttr, bool>> mAttributes;
public:
FC_Op() = delete;
FC_Op(bool noBias)
: OperatorTensor(Type, 1, 2, 1),
mAttributes(std::make_shared<StaticAttributes<FCAttr, bool>>(
attr<FCAttr::NoBias>(noBias)))
FC_Op()
: OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1)
{}
/**
......@@ -52,8 +40,7 @@ public:
* @param op Operator to copy.
*/
FC_Op(const FC_Op& op)
: OperatorTensor(op),
mAttributes(op.mAttributes)
: OperatorTensor(op)
{
if (op.mImpl) {
SET_IMPL_MACRO(FC_Op, *this, op.backend());
......@@ -82,8 +69,6 @@ public:
}
return getInput(1)->template dims<2>()[0];
}
inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
inline bool& noBias() const { return mAttributes -> getAttr<FCAttr::NoBias>(); }
static const std::vector<std::string> getInputsName() {
return {"data_input", "weight", "bias"};
......@@ -95,16 +80,13 @@ public:
inline std::shared_ptr<Node> FC(const DimSize_t inChannels, const DimSize_t outChannels, bool noBias = false, const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), name);
auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(), name);
addProducer(fc, 1, {outChannels, inChannels}, "w");
addProducer(fc, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
if (!noBias) {
addProducer(fc, 2, {outChannels}, "b"); // already sets bias dims
}
return fc;
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::FCAttr>::data[] = {"NoBias"};
}
#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
......@@ -56,7 +56,7 @@ public:
Gather_Op(std::int8_t axis,
const std::vector<int64_t>& indices,
const std::vector<DimSize_t>& gatheredShape)
: OperatorTensor(Type, 2, 0, 1),
: OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
mAttributes(std::make_shared<Attributes_>(
attr<GatherAttr::Axis>(axis),
attr<GatherAttr::Indices>(indices),
......
......@@ -35,8 +35,18 @@ private:
const std::shared_ptr<DynamicAttributes> mAttributes;
public:
GenericOperator_Op(const std::string& type, const std::vector<InputCategory>& inputsCategory, IOIndex_t nbOut)
: OperatorTensor(type, inputsCategory, nbOut)
{
mImpl = std::make_shared<OperatorImpl>(*this);
}
GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut)
: OperatorTensor(type, nbData, nbParam, nbOut),
: OperatorTensor(type, [nbData, nbParam]() {
std::vector<InputCategory> inputsCategory(nbData, InputCategory::Data);
inputsCategory.resize(nbData + nbParam, InputCategory::Param);
return inputsCategory;
}(), nbOut),
mAttributes(std::make_shared<DynamicAttributes>())
{
mImpl = std::make_shared<OperatorImpl>(*this);
......@@ -92,6 +102,20 @@ public:
}
};
/**
* @brief Fictive custom operator not associated with any implementation.
* Allows to import unknown operators and simulate new ones.
* @param type Type of the fictive operator.
* @param inputCategory List inputs with their category
* @param nbOut Number of output data.
* @param name (optional) name of the Operator.
* @return std::shared_ptr<Node> Node associated with the Generic Operator.
*/
inline std::shared_ptr<Node> GenericOperator(const std::string& type, const std::vector<InputCategory>& inputCategory, IOIndex_t nbOut,
const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, inputCategory, nbOut), name);
}
/**
* @brief Fictive custom operator not associated with any implementation.
* Allows to import unknown operators and simulate new ones.
......
......@@ -37,7 +37,7 @@ class GlobalAveragePooling_Op
public:
static const std::string Type;
GlobalAveragePooling_Op() : OperatorTensor(Type, 1, 0, 1) {}
GlobalAveragePooling_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
GlobalAveragePooling_Op(const GlobalAveragePooling_Op &op)
: OperatorTensor(op) {
......
......@@ -42,7 +42,7 @@ public:
static const std::string Type;
Identity_Op()
: OperatorTensor(Type, 1, 0, 1)
: OperatorTensor(Type, {InputCategory::Data}, 1)
{
mImpl = std::make_shared<OperatorImpl>(*this);
}
......
......@@ -44,7 +44,7 @@ public:
LeakyReLU_Op() = delete;
LeakyReLU_Op(float negativeSlope)
: OperatorTensor(Type, 1, 0, 1),
: OperatorTensor(Type, {InputCategory::Data}, 1),
mAttributes(
std::make_shared<Attributes_>(
attr<LeakyReLUAttr::NegativeSlope>(negativeSlope)))
......
......@@ -30,7 +30,7 @@ class Ln_Op : public OperatorTensor,
public:
static const std::string Type;
Ln_Op() : OperatorTensor(Type, 1, 0, 1) {}
Ln_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment