Skip to content
Snippets Groups Projects
Commit 764074f5 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Merge branch 'simpl_op_impl' into 'master'

Update with default operator impl

See merge request !17
parents b4c8c60e 61b0402b
No related branches found
No related tags found
1 merge request!17Update with default operator impl
Pipeline #33223 passed
Showing
with 61 additions and 622 deletions
......@@ -55,6 +55,8 @@ class test_scheduler(unittest.TestCase):
graph_view.set_datatype(aidge_core.DataType.Float32)
graph_view.set_backend("cpu")
graph_view.forward_dims()
scheduler = aidge_core.SequentialScheduler(graph_view)
scheduler.generate_scheduling()
......@@ -80,6 +82,8 @@ class test_scheduler(unittest.TestCase):
graph_view.set_datatype(aidge_core.DataType.Float32)
graph_view.set_backend("cpu")
graph_view.forward_dims()
scheduler = aidge_core.SequentialScheduler(graph_view)
scheduler.generate_scheduling()
......
......@@ -53,152 +53,51 @@ class AddImplBackward_cpu<3>
template <DimIdx_t NUM>
class AddImpl_cpu : public OperatorImpl {
private:
const Add_Op<NUM>& mOp;
std::array<NbElts_t, NUM> mNbConsumedData = {};
std::array<NbElts_t, 1> mNbProducedData = {};
public:
AddImpl_cpu(const Add_Op<NUM>& op) : mOp(op) {}
public:
AddImpl_cpu(const Add_Op<NUM>& op) : OperatorImpl(op) {}
static std::unique_ptr<AddImpl_cpu<NUM>> create(const Add_Op<NUM>& op) {
return std::make_unique<AddImpl_cpu<NUM>>(op);
}
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final {
assert(mOp.getInput(inputIdx) && "requires valid input");
// Requires the whole tensors
const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->dims();
return std::accumulate(inputDims.begin(), inputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
}
NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final {
// for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
return 0;
}
NbElts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t>& /*inputsSize*/) const override final {
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
(void) outputIdx;
const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
}
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final {
assert(inputIdx < mNbConsumedData.size());
return mNbConsumedData[inputIdx];
}
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final {
assert(outputIdx < mNbProducedData.size());
return mNbProducedData[outputIdx];
}
void updateConsummerProducer() override final;
void forward() override {
// nothing
}
void backward() override { printf("Not implemented yet.\n"); }
};
template <>
class AddImpl_cpu<1> : public OperatorImpl {
private:
const Add_Op<1>& mOp;
std::array<NbElts_t, 1> mNbConsumedData;
std::array<NbElts_t, 1> mNbProducedData;
public:
AddImpl_cpu(const Add_Op<1>& op) : mOp(op), mNbConsumedData({0}), mNbProducedData({0}) {}
public:
AddImpl_cpu(const Add_Op<1>& op) : OperatorImpl(op) {}
static std::unique_ptr<AddImpl_cpu<1>> create(const Add_Op<1>& op) {
return std::make_unique<AddImpl_cpu<1>>(op);
}
public:
NbElts_t getNbRequiredData(const IOIndex_t /*inputIdx*/) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/,
const std::vector<DimSize_t> &/*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t /*inputIdx*/) const override final;
NbElts_t getNbProducedData(const IOIndex_t /*outputIdx*/) const override final;
void updateConsummerProducer() override final;
void forward() override;
void backward() override;
};
template <>
class AddImpl_cpu<2> : public OperatorImpl {
private:
const Add_Op<2>& mOp;
std::array<NbElts_t, 2> mNbConsumedData;
std::array<NbElts_t, 1> mNbProducedData;
public:
AddImpl_cpu(const Add_Op<2>& op) : mOp(op), mNbConsumedData({0, 0}), mNbProducedData({0}) {}
public:
AddImpl_cpu(const Add_Op<2>& op) : OperatorImpl(op) {}
static std::unique_ptr<AddImpl_cpu<2>> create(const Add_Op<2>& op) {
return std::make_unique<AddImpl_cpu<2>>(op);
}
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/,
const std::vector<DimSize_t>& /*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t /*outputIdx*/) const override final;
void updateConsummerProducer() override final;
void forward() override;
void backward() override;
};
template <>
class AddImpl_cpu<3> : public OperatorImpl {
private:
const Add_Op<3>& mOp;
std::array<NbElts_t, 3> mNbConsumedData;
std::array<NbElts_t, 1> mNbProducedData;
public:
AddImpl_cpu(const Add_Op<3>& op) : mOp(op), mNbConsumedData({0, 0, 0}), mNbProducedData({0}) {}
public:
AddImpl_cpu(const Add_Op<3>& op) : OperatorImpl(op) {}
static std::unique_ptr<AddImpl_cpu<3>> create(const Add_Op<3>& op) {
return std::make_unique<AddImpl_cpu<3>>(op);
}
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward() override;
void backward() override;
};
namespace {
......
......@@ -36,29 +36,15 @@ class AvgPoolingImpl2DBackward_cpu
void(const AvgPooling_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, void *)> {};
class AvgPoolingImpl2D_cpu : public OperatorImpl {
private:
const AvgPooling_Op<2> &mOp;
std::array<NbElts_t, 1> mNbConsumedData;
std::array<NbElts_t, 1> mNbProducedData;
public:
AvgPoolingImpl2D_cpu(const AvgPooling_Op<2> &op) : mOp(op), mNbConsumedData({0}), mNbProducedData({0}) {}
public:
AvgPoolingImpl2D_cpu(const AvgPooling_Op<2> &op) : OperatorImpl(op) {}
static std::unique_ptr<AvgPoolingImpl2D_cpu> create(const AvgPooling_Op<2> &op) {
return std::make_unique<AvgPoolingImpl2D_cpu>(op);
}
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward() override;
void backward() override;
};
namespace {
......
......@@ -51,29 +51,15 @@ class BatchNormImpl2DBackward_cpu
void *)> {};
class BatchNormImpl2D_cpu : public OperatorImpl {
private:
const BatchNorm_Op<2> &mOp;
std::array<NbElts_t, 5> mNbConsumedData;
std::array<NbElts_t, 1> mNbProducedData;
public:
BatchNormImpl2D_cpu(const BatchNorm_Op<2> &op) : mOp(op), mNbConsumedData({0, 0, 0, 0, 0}), mNbProducedData({0}) {}
public:
BatchNormImpl2D_cpu(const BatchNorm_Op<2> &op) : OperatorImpl(op) {}
static std::unique_ptr<BatchNormImpl2D_cpu> create(const BatchNorm_Op<2> &op) {
return std::make_unique<BatchNormImpl2D_cpu>(op);
}
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward() override;
void backward() override;
};
namespace {
......
......@@ -38,29 +38,15 @@ class ConvDepthWiseImpl2DBackward_cpu
const void *, const void *, void *)> {};
class ConvDepthWiseImpl2D_cpu : public OperatorImpl {
private:
const ConvDepthWise_Op<2> &mOp;
std::array<NbElts_t, 3> mNbConsumedData;
std::array<NbElts_t, 1> mNbProducedData;
public:
ConvDepthWiseImpl2D_cpu(const ConvDepthWise_Op<2> &op) : mOp(op), mNbConsumedData({0, 0, 0}), mNbProducedData({0}) {}
public:
ConvDepthWiseImpl2D_cpu(const ConvDepthWise_Op<2> &op) : OperatorImpl(op) {}
static std::unique_ptr<ConvDepthWiseImpl2D_cpu> create(const ConvDepthWise_Op<2> &op) {
return std::make_unique<ConvDepthWiseImpl2D_cpu>(op);
}
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward() override;
void backward() override;
};
namespace {
......
......@@ -38,29 +38,16 @@ class ConvImpl2DBackward_cpu
const void *, const void *, void *)> {};
class ConvImpl2D_cpu : public OperatorImpl {
private:
const Conv_Op<2> &mOp;
std::array<NbElts_t, 3> mNbConsumedData;
std::array<NbElts_t, 1> mNbProducedData;
public:
ConvImpl2D_cpu(const Conv_Op<2> &op) : mOp(op), mNbConsumedData({0, 0, 0}), mNbProducedData({0}) {}
ConvImpl2D_cpu(const Conv_Op<2>& op) : OperatorImpl(op) {}
static std::unique_ptr<ConvImpl2D_cpu> create(const Conv_Op<2> &op) {
return std::make_unique<ConvImpl2D_cpu>(op);
}
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward() override;
void backward() override;
};
namespace {
......
......@@ -34,26 +34,14 @@ class FCImplBackward_cpu : public Registrable<FCImplBackward_cpu,
const void *, const void *, const void *, void *)> {};
class FCImpl_cpu : public OperatorImpl {
private:
const FC_Op &mOp;
std::array<NbElts_t, 3> mNbConsumedData;
std::array<NbElts_t, 1> mNbProducedData;
public:
FCImpl_cpu(const FC_Op &op) : OperatorImpl(op) {}
public:
FCImpl_cpu(const FC_Op &op) : mOp(op), mNbConsumedData({0, 0, 0}), mNbProducedData({0}) {}
static std::unique_ptr<FCImpl_cpu> create(const FC_Op &op) {
return std::make_unique<FCImpl_cpu>(op);
}
static std::unique_ptr<FCImpl_cpu> create(const FC_Op &op) { return std::make_unique<FCImpl_cpu>(op); }
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward() override;
void backward() override;
};
namespace {
......
......@@ -31,28 +31,15 @@ class LeakyReLUImplBackward_cpu
};
class LeakyReLUImpl_cpu : public OperatorImpl {
private:
const LeakyReLU_Op& mOp;
std::array<NbElts_t, 1> mNbConsumedData;
std::array<NbElts_t, 1> mNbProducedData;
public:
LeakyReLUImpl_cpu(const LeakyReLU_Op& op) : mOp(op), mNbConsumedData({0}), mNbProducedData({0}) {}
public:
LeakyReLUImpl_cpu(const LeakyReLU_Op& op) : OperatorImpl(op) {}
static std::unique_ptr<LeakyReLUImpl_cpu> create(const LeakyReLU_Op& op) {
return std::make_unique<LeakyReLUImpl_cpu>(op);
}
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward() override;
void backward() override;
};
namespace {
......
......@@ -35,37 +35,14 @@ class MatMulImplBackward_cpu
const void *, const void *, void *)> {};
class MatMulImpl_cpu : public OperatorImpl {
private:
const MatMul_Op &mOp;
std::array<NbElts_t, 2> mNbConsumedData;
std::array<NbElts_t, 1> mNbProducedData;
public:
MatMulImpl_cpu(const MatMul_Op &op)
: mOp(op),
mNbConsumedData({0, 0}),
mNbProducedData({0})
{
// ctor
}
MatMulImpl_cpu(const MatMul_Op &op): OperatorImpl(op) {}
static std::unique_ptr<MatMulImpl_cpu> create(const MatMul_Op &op)
{
static std::unique_ptr<MatMulImpl_cpu> create(const MatMul_Op &op) {
return std::make_unique<MatMulImpl_cpu>(op);
}
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/,
const std::vector<DimSize_t> & /*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward() override;
void backward() override;
};
namespace {
......
......@@ -36,29 +36,15 @@ class MaxPoolingImpl2DBackward_cpu
void(const MaxPooling_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, void *)> {};
class MaxPoolingImpl2D_cpu : public OperatorImpl {
private:
const MaxPooling_Op<2> &mOp;
std::array<NbElts_t, 1> mNbConsumedData;
std::array<NbElts_t, 1> mNbProducedData;
public:
MaxPoolingImpl2D_cpu(const MaxPooling_Op<2> &op) : mOp(op), mNbConsumedData({0}), mNbProducedData({0}) {}
public:
MaxPoolingImpl2D_cpu(const MaxPooling_Op<2> &op) : OperatorImpl(op) {}
static std::unique_ptr<MaxPoolingImpl2D_cpu> create(const MaxPooling_Op<2> &op) {
return std::make_unique<MaxPoolingImpl2D_cpu>(op);
}
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward() override;
void backward() override;
};
namespace {
......
......@@ -38,29 +38,15 @@ class PadImpl2DBackward_cpu
void *)> {};
class PadImpl2D_cpu : public OperatorImpl {
private:
const Pad_Op<2> &mOp;
std::array<NbElts_t, 1> mNbConsumedData = {0};
std::array<NbElts_t, 1> mNbProducedData = {0};
public:
PadImpl2D_cpu(const Pad_Op<2> &op) : mOp(op) {}
public:
PadImpl2D_cpu(const Pad_Op<2> &op) : OperatorImpl(op) {}
static std::unique_ptr<PadImpl2D_cpu> create(const Pad_Op<2> &op) {
return std::make_unique<PadImpl2D_cpu>(op);
}
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward() override;
void backward() override;
};
namespace {
......
......@@ -21,31 +21,19 @@
namespace Aidge {
class ProducerImpl_cpu : public OperatorImpl {
private:
const Producer_Op &mOp;
public:
ProducerImpl_cpu(const Producer_Op &op) : mOp(op) {}
public:
ProducerImpl_cpu(const Producer_Op &op) : OperatorImpl(op) {}
static std::unique_ptr<ProducerImpl_cpu> create(const Producer_Op &op) {
return std::make_unique<ProducerImpl_cpu>(op);
}
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward() override;
void backward() override;
};
namespace {
static Registrar<Producer_Op> registrarProducer1DImpl_cpu("cpu", Aidge::ProducerImpl_cpu::create);
static Registrar<Producer_Op> registrarProducerImpl_cpu("cpu", Aidge::ProducerImpl_cpu::create);
} // namespace
} // namespace Aidge
......
......@@ -31,28 +31,15 @@ class ReLUImplBackward_cpu
};
class ReLUImpl_cpu : public OperatorImpl {
protected:
const ReLU_Op& mOp;
std::array<NbElts_t, 1> mNbConsumedData;
std::array<NbElts_t, 1> mNbProducedData;
public:
ReLUImpl_cpu(const ReLU_Op& op) : mOp(op), mNbConsumedData({0}), mNbProducedData({0}) {}
public:
ReLUImpl_cpu(const ReLU_Op& op) : OperatorImpl(op) {}
static std::unique_ptr<ReLUImpl_cpu> create(const ReLU_Op& op) {
return std::make_unique<ReLUImpl_cpu>(op);
}
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward() override;
void backward() override;
};
namespace {
......
......@@ -32,31 +32,15 @@ class ScalingImplBackward_cpu
};
class ScalingImpl_cpu : public OperatorImpl {
private:
const Scaling_Op& mOp;
std::array<NbElts_t, 1> mNbConsumedData;
std::array<NbElts_t, 1> mNbProducedData;
public:
ScalingImpl_cpu(const Scaling_Op& op) : mOp(op), mNbConsumedData({0}), mNbProducedData({0}) {}
public:
ScalingImpl_cpu(const Scaling_Op& op) : OperatorImpl(op) {}
static std::unique_ptr<ScalingImpl_cpu> create(const Scaling_Op& op) {
//std::cout << "ScalingImpl_cpu create" << std::endl;
return std::make_unique<ScalingImpl_cpu>(op);
}
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward() override;
void backward() override;
};
namespace {
......
......@@ -31,28 +31,15 @@ class SoftmaxImplBackward_cpu
};
class SoftmaxImpl_cpu : public OperatorImpl {
private:
const Softmax_Op& mOp;
std::array<NbElts_t, 1> mNbConsumedData;
std::array<NbElts_t, 1> mNbProducedData;
public:
SoftmaxImpl_cpu(const Softmax_Op& op) : mOp(op), mNbConsumedData({0}), mNbProducedData({0}) {}
public:
SoftmaxImpl_cpu(const Softmax_Op& op) : OperatorImpl(op) {}
static std::unique_ptr<SoftmaxImpl_cpu> create(const Softmax_Op& op) {
return std::make_unique<SoftmaxImpl_cpu>(op);
}
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward() override;
void backward() override;
};
namespace {
......
......@@ -25,38 +25,12 @@
// AddImpl_cpu<1>
//////////////////////////////////
Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbRequiredData(Aidge::IOIndex_t /*inputIdx*/) const {
assert(mOp.getInput(0) && "requires valid input");
// Requires the whole tensors
return static_cast<int>(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size());
}
Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
// for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
// this implementation can be in-place
return 0;
}
Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getRequiredMemory(const Aidge::IOIndex_t /*outputIdx*/, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
// Requires the whole tensors, regardless of available data on inputs
return std::static_pointer_cast<Tensor>(mOp.getOutput(0))->size();
}
Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbConsumedData(Aidge::IOIndex_t /*inputIdx*/) const {
return mNbConsumedData[0];
}
Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
return mNbProducedData[0];
}
void Aidge::AddImpl_cpu<1>::updateConsummerProducer(){
// Update producer-consumer data
for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::AddImpl_cpu<1>::forward() {
// FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0");
// Find the correct kernel type
......@@ -68,11 +42,6 @@ void Aidge::AddImpl_cpu<1>::forward() {
kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
mOp.getInput(0)->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr());
}
void Aidge::AddImpl_cpu<1>::backward() {
printf("Not implemented yet.\n");
}
......@@ -80,68 +49,26 @@ void Aidge::AddImpl_cpu<1>::backward() {
// AddImpl_cpu<2>
//////////////////////////////////
Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
assert(mOp.getInput(inputIdx) && "requires valid input");
// Requires the whole tensors
const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->dims();
return std::accumulate(inputDims.begin(), inputDims.end(),
NbElts_t(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
// for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
// this implementation of add can be in-place
return 0;
}
Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
(void) outputIdx; // avoid unused warning
const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
return std::accumulate(outputDims.begin(), outputDims.end(),
NbElts_t(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
assert(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size());
return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
}
Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
return mNbProducedData[0];
}
void Aidge::AddImpl_cpu<2>::updateConsummerProducer(){
// Update producer-consumer data
for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::AddImpl_cpu<2>::forward() {
// FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0");
assert(mOp.mInputs[1] && "missing input #1");
assert(mOp.getInput(1) && "missing input #1");
// Find the correct kernel type
auto kernelFunc = Registrar<AddImplForward_cpu<2>>::create({
mOp.getInput(0)->dataType(),
mOp.mInputs[1]->dataType(),
mOp.getInput(1)->dataType(),
mOp.getOutput(0)->dataType()});
// Call kernel
kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
mOp.getInput(0)->getImpl()->rawPtr(),
mOp.mInputs[1]->getImpl()->rawPtr(),
mOp.getInput(1)->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr());
}
void Aidge::AddImpl_cpu<2>::backward() {
printf("Not implemented yet.\n");
}
......@@ -149,70 +76,27 @@ void Aidge::AddImpl_cpu<2>::backward() {
// AddImpl_cpu<3>
//////////////////////////////////
Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
assert(mOp.getInput(inputIdx) && "requires valid input");
// Requires the whole tensors
const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->dims();
return std::accumulate(inputDims.begin(), inputDims.end(),
Aidge::NbElts_t(1), std::multiplies<Aidge::NbElts_t>());
}
Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
// for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
// this implementation of add can be in-place
return 0;
}
Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
(void) outputIdx; // avoid unused warning
const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
return std::accumulate(outputDims.begin(), outputDims.end(),
NbElts_t(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
assert(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size());
return mNbConsumedData[inputIdx];
}
Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
assert(static_cast<std::size_t>(outputIdx) < mNbProducedData.size());
return mNbProducedData[static_cast<std::size_t>(outputIdx)];
}
void Aidge::AddImpl_cpu<3>::updateConsummerProducer(){
// Update producer-consumer data
for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::AddImpl_cpu<3>::forward() {
// FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0");
assert(mOp.mInputs[1] && "missing input #1");
assert(mOp.mInputs[2] && "missing input #2");
assert(mOp.getInput(1) && "missing input #1");
assert(mOp.getInput(2) && "missing input #2");
// Find the correct kernel type
auto kernelFunc = Registrar<AddImplForward_cpu<3>>::create({
mOp.getInput(0)->dataType(),
mOp.mInputs[1]->dataType(),
mOp.mInputs[2]->dataType(),
mOp.getInput(1)->dataType(),
mOp.getInput(2)->dataType(),
mOp.getOutput(0)->dataType()});
// Call kernel
kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
mOp.getInput(0)->getImpl()->rawPtr(),
mOp.mInputs[1]->getImpl()->rawPtr(),
mOp.mInputs[2]->getImpl()->rawPtr(),
mOp.getInput(1)->getImpl()->rawPtr(),
mOp.getInput(2)->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr());
}
void Aidge::AddImpl_cpu<3>::backward() {
printf("Not implemented yet.\n");
}
......@@ -20,49 +20,12 @@
#include "aidge/backend/cpu/operator/AvgPoolingImpl.hpp"
#include "aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp"
Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
assert(mOp.getInput(inputIdx) && "requires valid input");
// Requires the whole tensors
const auto &inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->dims();
return std::accumulate(inputDims.begin(), inputDims.end(), Aidge::NbElts_t(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
// for the direct convolution algorithm, convolutions can be in-place, if
// there is no padding!
// this implementation can be in-place
return 0;
}
Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
const std::vector<Aidge::DimSize_t> & /*inputsSize*/) const {
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
(void) outputIdx;
const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
assert(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size());
return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
}
Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
return mNbProducedData[static_cast<std::size_t>(outputIdx)];
}
void Aidge::AvgPoolingImpl2D_cpu::updateConsummerProducer(){
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
}
void Aidge::AvgPoolingImpl2D_cpu::forward() {
// FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0");
// Find the correct kernel type
......@@ -70,11 +33,8 @@ void Aidge::AvgPoolingImpl2D_cpu::forward() {
Registrar<AvgPoolingImpl2DForward_cpu>::create({mOp.getInput(0)->dataType(), mOp.getOutput(0)->dataType()});
// Call kernel
kernelFunc(mOp.getStaticAttributes(),
kernelFunc(dynamic_cast<const AvgPooling_Op<2>&>(mOp).getStaticAttributes(),
mOp.getInput(0)->dims<4>(),
mOp.getInput(0)->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr());
}
void Aidge::AvgPoolingImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
......@@ -19,50 +19,12 @@
#include "aidge/backend/cpu/operator/BatchNormImpl.hpp"
#include "aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp"
Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
assert(mOp.getInput(inputIdx) && "requires valid input");
// Requires the whole tensors
const auto &inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->dims();
return std::accumulate(inputDims.begin(), inputDims.end(), Aidge::NbElts_t(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
// for the direct convolution algorithm, convolutions can be in-place, if
// there is no padding!
// this implementation can be in-place
return 0;
}
Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
(void) outputIdx;
const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
assert(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size());
return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
}
Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
return mNbProducedData[static_cast<std::size_t>(outputIdx)];
}
void Aidge::BatchNormImpl2D_cpu::updateConsummerProducer(){
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
}
void Aidge::BatchNormImpl2D_cpu::forward() {
// FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0");
assert(mOp.getInput(1) && "missing input #1");
assert(mOp.getInput(2) && "missing input #2");
......@@ -76,7 +38,7 @@ void Aidge::BatchNormImpl2D_cpu::forward() {
mOp.getOutput(0)->dataType()});
// Call kernel
kernelFunc(mOp.getStaticAttributes(),
kernelFunc(dynamic_cast<const BatchNorm_Op<2>&>(mOp).getStaticAttributes(),
mOp.getInput(0)->dims<4>(),
mOp.getInput(0)->getImpl()->rawPtr(),
mOp.getInput(1)->getImpl()->rawPtr(),
......@@ -85,8 +47,4 @@ void Aidge::BatchNormImpl2D_cpu::forward() {
mOp.getInput(4)->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr(),
true);
}
void Aidge::BatchNormImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
......@@ -21,50 +21,12 @@
#include "aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp"
#include "aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp"
Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
assert(mOp.getInput(inputIdx) && "requires valid input");
// Requires the whole tensors
const auto &inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->dims();
return std::accumulate(inputDims.begin(), inputDims.end(), Aidge::NbElts_t(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
// for the direct convolution algorithm, convolutions can be in-place, if
// there is no padding!
// this implementation can be in-place
return 0;
}
Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
(void) outputIdx;
const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
assert(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size());
return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
}
Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
return mNbProducedData[static_cast<std::size_t>(outputIdx)];
}
void Aidge::ConvDepthWiseImpl2D_cpu::updateConsummerProducer(){
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
}
void Aidge::ConvDepthWiseImpl2D_cpu::forward() {
// FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0");
assert(mOp.getInput(1) && "missing input #1");
assert(mOp.getInput(2) && "missing input #2");
......@@ -77,9 +39,7 @@ void Aidge::ConvDepthWiseImpl2D_cpu::forward() {
mOp.getInput(2)->dataType(), mOp.getOutput(0)->dataType()});
// Call kernel
kernelFunc(mOp.getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
kernelFunc(dynamic_cast<const ConvDepthWise_Op<2>&>(mOp).getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(),
mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr());
}
void Aidge::ConvDepthWiseImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
......@@ -21,48 +21,11 @@
#include "aidge/backend/cpu/operator/ConvImpl.hpp"
#include "aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp"
Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
assert(mOp.getInput(inputIdx) && "requires valid input");
// Requires the whole tensors
const auto &inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->dims();
return std::accumulate(inputDims.begin(), inputDims.end(), Aidge::NbElts_t(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
// for the direct convolution algorithm, convolutions can be in-place, if
// there is no padding!
// this implementation can be in-place
return 0;
}
Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
(void) outputIdx;
const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
assert(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size());
return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
}
Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
return mNbProducedData[static_cast<std::size_t>(outputIdx)];
}
void Aidge::ConvImpl2D_cpu::updateConsummerProducer(){
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
}
void Aidge::ConvImpl2D_cpu::forward() {
// FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0");
......@@ -75,11 +38,7 @@ void Aidge::ConvImpl2D_cpu::forward() {
mOp.getInput(2)->dataType(), mOp.getOutput(0)->dataType()});
// Call kernel
kernelFunc(mOp.getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
kernelFunc(dynamic_cast<const Conv_Op<2>&>(mOp).getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(),
mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr());
}
void Aidge::ConvImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment