Skip to content
Snippets Groups Projects
Commit d3eb5091 authored by Cyril Moineau's avatar Cyril Moineau Committed by Maxence Naud
Browse files

Update clone function with SET_IMPL_MACRO.

parent 108727b7
No related branches found
No related tags found
2 merge requests!105version 0.2.0,!85Initial working python registrar.
Showing
with 102 additions and 26 deletions
......@@ -47,7 +47,11 @@ public:
Add_Op(const Add_Op& op)
: OperatorTensor(op)
{
mImpl = op.mImpl ? Registrar<Add_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl){
SET_IMPL_MACRO(Add_Op, *this, op.mOutputs[0]->getImpl()->backend());
}else{
mImpl = nullptr;
}
}
/**
......
......@@ -60,7 +60,11 @@ public:
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl){
SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
}else{
mImpl = nullptr;
}
}
/**
......
......@@ -54,7 +54,11 @@ public:
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl){
SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
}else{
mImpl = nullptr;
}
}
/**
......
......@@ -55,7 +55,11 @@ public:
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<Concat_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl){
SET_IMPL_MACRO(Concat_Op, *this, op.mOutputs[0]->getImpl()->backend());
}else{
mImpl = nullptr;
}
}
/**
......
......@@ -65,7 +65,11 @@ public:
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl){
SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
}else{
mImpl = nullptr;
}
}
/**
......
......@@ -67,7 +67,11 @@ public:
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl){
SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
}else{
mImpl = nullptr;
}
}
/**
......
......@@ -40,7 +40,11 @@ public:
Div_Op(const Div_Op& op)
: OperatorTensor(op)
{
mImpl = op.mImpl ? Registrar<Div_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl){
SET_IMPL_MACRO(Div_Op, *this, op.mOutputs[0]->getImpl()->backend());
}else{
mImpl = nullptr;
}
}
/**
......
......@@ -40,7 +40,11 @@ public:
Erf_Op(const Erf_Op& op)
: OperatorTensor(op)
{
mImpl = op.mImpl ? Registrar<Erf_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl){
SET_IMPL_MACRO(Erf_Op, *this, op.mOutputs[0]->getImpl()->backend());
}else{
mImpl = nullptr;
}
}
/**
......
......@@ -57,7 +57,11 @@ public:
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<FC_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl){
SET_IMPL_MACRO(FC_Op, *this, op.mOutputs[0]->getImpl()->backend());
}else{
mImpl = nullptr;
}
}
/**
......
......@@ -58,7 +58,11 @@ public:
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<Gather_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl){
SET_IMPL_MACRO(Gather_Op, *this, op.mOutputs[0]->getImpl()->backend());
}else{
mImpl = nullptr;
}
}
/**
......
......@@ -54,7 +54,11 @@ public:
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<LeakyReLU_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl){
SET_IMPL_MACRO(LeakyReLU_Op, *this, op.mOutputs[0]->getImpl()->backend());
}else{
mImpl = nullptr;
}
}
/**
......@@ -68,7 +72,6 @@ public:
void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
mImpl = Registrar<LeakyReLU_Op>::create(name)(*this);
SET_IMPL_MACRO(LeakyReLU_Op, *this, name);
mOutputs[0]->setBackend(name, device);
}
......
......@@ -64,7 +64,11 @@ public:
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<MaxPooling_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl){
SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
}else{
mImpl = nullptr;
}
}
/**
......
......@@ -39,7 +39,11 @@ public:
Move_Op(const Move_Op& op)
: OperatorTensor(op)
{
mImpl = op.mImpl ? Registrar<Move_Op>::create({mInputs[0]->getImpl()->backend(), mOutputs[0]->getImpl()->backend()})(*this) : nullptr;
if (op.mImpl){
SET_IMPL_MACRO(Move_Op, *this, op.mOutputs[0]->getImpl()->backend());
}else{
mImpl = nullptr;
}
}
/**
......@@ -72,4 +76,4 @@ inline std::shared_ptr<Node> Move(const std::string& name = "") {
}
}
#endif /* AIDGE_CORE_OPERATOR_MOVE_H_ */
\ No newline at end of file
#endif /* AIDGE_CORE_OPERATOR_MOVE_H_ */
......@@ -40,7 +40,11 @@ public:
Pow_Op(const Pow_Op& op)
: OperatorTensor(op)
{
mImpl = op.mImpl ? Registrar<Pow_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl){
SET_IMPL_MACRO(Pow_Op, *this, op.mOutputs[0]->getImpl()->backend());
}else{
mImpl = nullptr;
}
}
/**
......
......@@ -67,9 +67,11 @@ public:
for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) {
mOutputs[i] = std::make_shared<Tensor>(*(op.getOutput(i)));
}
mImpl = (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}))
? Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this)
: std::make_shared<OperatorImpl>(*this);
if (op.mImpl){
SET_IMPL_MACRO(Producer_Op, *this, op.mOutputs[0]->getImpl()->backend());
}else{
mImpl = nullptr;
}
}
/**
......
......@@ -39,7 +39,11 @@ public:
ReLU_Op(const ReLU_Op& op)
: OperatorTensor(op)
{
mImpl = op.mImpl ? Registrar<ReLU_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl){
SET_IMPL_MACRO(ReLU_Op, *this, op.mOutputs[0]->getImpl()->backend());
}else{
mImpl = nullptr;
}
}
/**
......
......@@ -57,7 +57,11 @@ class ReduceMean_Op : public OperatorTensor,
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<ReduceMean_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl){
SET_IMPL_MACRO(ReduceMean_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
}else{
mImpl = nullptr;
}
}
/**
......
......@@ -53,7 +53,11 @@ public:
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<Reshape_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl){
SET_IMPL_MACRO(Reshape_Op, *this, op.mOutputs[0]->getImpl()->backend());
}else{
mImpl = nullptr;
}
}
/**
......
......@@ -55,7 +55,11 @@ public:
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<Scaling_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl){
SET_IMPL_MACRO(Reshape_Op, *this, op.mOutputs[0]->getImpl()->backend());
} else {
mImpl = nullptr;
}
}
/**
......@@ -95,4 +99,4 @@ const char* const EnumStrings<Aidge::ScalingAttr>::data[]
= {"scalingFactor", "quantizedNbBits", "isOutputUnsigned"};
}
#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
\ No newline at end of file
#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
......@@ -55,8 +55,11 @@ public:
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<Slice_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this)
: nullptr;
if (op.mImpl){
SET_IMPL_MACRO(Slice_Op, *this, op.mOutputs[0]->getImpl()->backend());
}else{
mImpl = nullptr;
}
}
public:
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment