Skip to content
Snippets Groups Projects
Commit cbcd268c authored by Maxence Naud's avatar Maxence Naud
Browse files

Uniformize operators and apply new class OperatorTensor induced changes in every operator

- Change parent class from Operator to OperatorTensor
- Remove shared and not customed functions from operators
- Uniformize operators behaviour:
    - inputs are set to nullptr at initialization by default
    - parameters whose size can be computed at initialization are (FC, ConvDepthWise)
    - Many more checks in functions with AIDGE_THROW_OR_ABORT()
parent 46e15f55
No related branches found
No related tags found
No related merge requests found
Showing
with 421 additions and 1499 deletions
...@@ -19,29 +19,25 @@ ...@@ -19,29 +19,25 @@
#include <vector> #include <vector>
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
#include "aidge/utils/ErrorHandling.hpp"
namespace Aidge { namespace Aidge {
class Add_Op : public Operator, class Add_Op : public OperatorTensor,
public Registrable<Add_Op, std::string, std::unique_ptr<OperatorImpl>(const Add_Op&)> { public Registrable<Add_Op, std::string, std::unique_ptr<OperatorImpl>(const Add_Op&)> {
private:
// FIXME: change accessibility
std::vector<std::shared_ptr<Tensor>> mInputs;
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public: public:
static constexpr const char* Type = "Add"; static constexpr const char* Type = "Add";
Add_Op(const IOIndex_t nbIn) Add_Op(const IOIndex_t nbIn)
: Operator(Type), : OperatorTensor(Type, nbIn, 0, 1)
mInputs(std::vector<std::shared_ptr<Tensor>>(nbIn, std::make_shared<Tensor>()))
{ {
assert(nbIn > 0 && "Add should have at least one input"); if (nbIn == 0) {
setDatatype(DataType::Float32); AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
}
} }
/** /**
...@@ -49,14 +45,9 @@ public: ...@@ -49,14 +45,9 @@ public:
* @param op Operator to copy. * @param op Operator to copy.
*/ */
Add_Op(const Add_Op& op) Add_Op(const Add_Op& op)
: Operator(Type), : OperatorTensor(op)
mInputs(std::vector<std::shared_ptr<Tensor>>(op.nbInputs())),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{ {
// cpy-ctor mImpl = op.mImpl ? Registrar<Add_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
assert(op.nbInputs() > 0 && "Add should have at least one input");
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Add_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
} }
/** /**
...@@ -76,88 +67,25 @@ public: ...@@ -76,88 +67,25 @@ public:
// return *in; // return *in;
// } // }
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(static_cast<std::size_t>(inputIdx) < nbInputs() && "wrong inputIdx for Add operator.");
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInputs[0]->empty()) {
const auto expectedDims = mInputs[0]->dims();
std::size_t nonEmptyInputTensor = 1;
for (; nonEmptyInputTensor < nbInputs() && (!mInputs[nonEmptyInputTensor]->empty()); ++nonEmptyInputTensor) {
assert(expectedDims == mInputs[nonEmptyInputTensor]->dims());
}
if (nonEmptyInputTensor == nbInputs()) {
mOutput->resize(expectedDims);
}
}
}
bool outputDimsForwarded() const override final {
std::size_t forwarded = 0;
for (; forwarded < nbInputs() && (!mInputs[forwarded]->empty()); ++forwarded) {}
return ((forwarded==nbInputs()) && !(mOutput->empty()));
}
// void checkDims() const override final { // void checkDims() const override final {
// assert(outputDimsForwarded()); // assert(outputDimsForwarded());
// for (const auto& in : mInputs) { // for (const auto& in : mInputs) {
// assert(in->dims() == mOutput->dims()); // assert(in->dims() == mOutputs[0]->dims());
// } // }
// } // }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(static_cast<std::size_t>(inputIdx) < nbInputs() && "wrong inputIdx for Add operator.");
return *(mInputs[inputIdx].get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(static_cast<std::size_t>(inputIdx) < nbInputs() && "wrong inputIdx for Add operator.");
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "Add Operators has only 1 outputs");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(static_cast<std::size_t>(inputIdx) < nbInputs() && "wrong inputIdx for Add operator.");
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string& name) override { void setBackend(const std::string& name) override {
mImpl = Registrar<Add_Op>::create(name)(*this); mImpl = Registrar<Add_Op>::create(name)(*this);
mOutput->setBackend(name); mOutputs[0]->setBackend(name);
// FIXME: temporary workaround // FIXME: temporary workaround
for (std::size_t i = 0; i < nbInputs(); ++i) { for (std::size_t i = 0; i < nbInputs(); ++i) {
mInputs[i]->setBackend(name); getInput(i)->setBackend(name);
} }
} }
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
for (std::size_t i = 0; i < nbInputs(); ++i) {
mInputs[i]->setDatatype(datatype);
}
}
inline IOIndex_t nbInputs() const noexcept override final { return mInputs.size(); }
inline IOIndex_t nbDataInputs() const noexcept override final { return mInputs.size(); }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
return {"data_input_0", "data_input_n"}; return {"data_input_0", "data_input_n"};
} }
......
...@@ -50,22 +50,17 @@ public: ...@@ -50,22 +50,17 @@ public:
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
: OperatorTensor(Type, 1, 0, 1), : OperatorTensor(Type, 1, 0, 1),
Attributes_(attr<AvgPoolingAttr::StrideDims>(stride_dims), Attributes_(attr<AvgPoolingAttr::StrideDims>(stride_dims),
attr<AvgPoolingAttr::KernelDims>(kernel_dims)) { attr<AvgPoolingAttr::KernelDims>(kernel_dims)) {}
setDataType(DataType::Float32);
}
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
AvgPooling_Op(const AvgPooling_Op<DIM>& op) AvgPooling_Op(const AvgPooling_Op<DIM>& op)
: OperatorTensor(Type, 1, 0, 1), : OperatorTensor(op),
Attributes_(op), Attributes_(op)
mOutput(std::make_shared<Tensor>(*op.mOutput))
{ {
// cpy-ctor mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
setDataType(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
} }
/** /**
...@@ -78,18 +73,23 @@ public: ...@@ -78,18 +73,23 @@ public:
void computeOutputDims() override final { void computeOutputDims() override final {
if (!*mInputs[0]->empty()) { // check inputs have been associated
std::array<DimSize_t, DIM + 2> outputDims = {}; if (!getInput(0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
}
if (!(getInput(0)->empty())) {
std::array<DimSize_t, DIM + 2> outputDims;
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->dims<DIM+2>());
outputDims[0] = inputDims[0];
outputDims[1] = inputDims[1];
for (std::size_t dim = 0; dim < this->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) { for (std::size_t dim = 0; dim < this->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
outputDims[dim+2] = 1 + static_cast<DimSize_t>( outputDims[dim+2] = 1 + static_cast<DimSize_t>(
std::floor(static_cast<float>(*mInputs[0]->dims()[dim+2] - std::floor(static_cast<float>(inputDims[dim+2] -
this->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) / this->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) /
static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim]))); static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
} }
outputDims[1] = *mInputs[0]->dims()[1]; getOutput(0)->resize(outputDims);
outputDims[0] = *mInputs[0]->dims()[0];
mOutputs[0]->resize(outputDims);
} }
} }
...@@ -132,10 +132,10 @@ public: ...@@ -132,10 +132,10 @@ public:
void setBackend(const std::string &name) override { void setBackend(const std::string &name) override {
mImpl = Registrar<AvgPooling_Op<DIM>>::create(name)(*this); mImpl = Registrar<AvgPooling_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name); mOutputs[0]->setBackend(name);
// FIXME: temporary workaround // FIXME: temporary workaround
mInput->setBackend(name); getInput(0)->setBackend(name);
} }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
......
...@@ -19,27 +19,20 @@ ...@@ -19,27 +19,20 @@
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp" #include "aidge/operator/Producer.hpp"
#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
namespace Aidge { namespace Aidge {
enum class BatchNormAttr { Epsilon, Momentum };
enum class BatchNormAttr { Epsilon, Momentum };
template <DimIdx_t DIM> template <DimIdx_t DIM>
class BatchNorm_Op : public Operator, class BatchNorm_Op : public OperatorTensor,
public Registrable<BatchNorm_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>, public Registrable<BatchNorm_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
public StaticAttributes<BatchNormAttr, float, float> { public StaticAttributes<BatchNormAttr, float, float> {
public: public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, 5> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
std::make_shared<Tensor>(), std::make_shared<Tensor>(),
std::make_shared<Tensor>()};
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char *Type = "BatchNorm"; static constexpr const char *Type = "BatchNorm";
BatchNorm_Op() = delete; BatchNorm_Op() = delete;
...@@ -49,25 +42,19 @@ public: ...@@ -49,25 +42,19 @@ public:
using attr = typename Attributes_::template attr<e>; using attr = typename Attributes_::template attr<e>;
constexpr BatchNorm_Op(float epsilon, float momentum) constexpr BatchNorm_Op(float epsilon, float momentum)
: Operator(Type), : OperatorTensor(Type, 1, 4, 1),
Attributes_(attr<BatchNormAttr::Epsilon>(epsilon), Attributes_(attr<BatchNormAttr::Epsilon>(epsilon),
attr<BatchNormAttr::Momentum>(momentum)), attr<BatchNormAttr::Momentum>(momentum)) {}
mOutput(std::make_shared<Tensor>()) {
setDatatype(DataType::Float32);
}
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
BatchNorm_Op(const BatchNorm_Op<DIM>& op) BatchNorm_Op(const BatchNorm_Op<DIM>& op)
: Operator(Type), : OperatorTensor(op),
Attributes_(op), Attributes_(op)
mOutput(std::make_shared<Tensor>(*op.mOutput))
{ {
// cpy-ctor mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
} }
/** /**
...@@ -87,83 +74,41 @@ public: ...@@ -87,83 +74,41 @@ public:
// return *in; // return *in;
// } // }
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 5 && "operators supports only 5 inputs");
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final { void computeOutputDims() override final {
if (!mInputs[0]->empty()) { // check inputs have been associated
for (std::size_t i = nbDataInputs(); i < nbInputs(); ++i) { bool associated = true;
if(mInputs[i]->size() != mInputs[0]->dims()[1]) { for (IOIndex_t i = 0; i < nbInputs(); ++i) {
mInputs[i]->resize(std::array<DimSize_t, 1>({mInputs[0]->dims()[1]})); associated &= !(getInput(i)->empty());
}
if (associated) {
const DimSize_t nbChannels = getInput(0)->dims()[1];
for (std::size_t i = nbData(); i < nbInputs(); ++i) {
if(getInput(i)->size() != nbChannels) {
// /!\ Input size should be handled BEFORE calling this function
// This should raise an error
getInput(i)->resize(std::array<DimSize_t, 1>({getInput(0)->dims()[1]}));
} }
} }
mOutput->resize(mInputs[0]->dims()); mOutputs[0]->resize(getInput(0)->dims());
} }
} }
bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 5 && "operators supports only 5 inputs");
return *(mInputs[inputIdx].get()); }
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 5 && "BatchNorm Operators supports only 5 inputs");
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "BatchNorm Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 5 && "operators supports only 5 inputs");
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string &name) override { void setBackend(const std::string &name) override {
mImpl = Registrar<BatchNorm_Op<DIM>>::create(name)(*this); mImpl = Registrar<BatchNorm_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name); mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
mInputs[1]->setBackend(name);
mInputs[2]->setBackend(name);
mInputs[3]->setBackend(name);
mInputs[4]->setBackend(name);
}
void setDatatype(const DataType &datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround // FIXME: temporary workaround
mInputs[1]->setDatatype(datatype); getInput(1)->setBackend(name);
mInputs[2]->setDatatype(datatype); getInput(2)->setBackend(name);
mInputs[3]->setDatatype(datatype); getInput(3)->setBackend(name);
mInputs[4]->setDatatype(datatype); getInput(4)->setBackend(name);
} }
inline IOIndex_t nbInputs() const noexcept override final { return 5; } static const std::vector<std::string> getInputsName() {
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input", "scale", "shift", "mean", "variance"}; return {"data_input", "scale", "shift", "mean", "variance"};
} }
static const std::vector<std::string> getOutputsName(){ static const std::vector<std::string> getOutputsName() {
return {"data_output"}; return {"data_output"};
} }
}; };
...@@ -187,4 +132,4 @@ template <> ...@@ -187,4 +132,4 @@ template <>
const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "Epsilon", "Momentum" }; const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "Epsilon", "Momentum" };
} }
#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_ #endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
\ No newline at end of file
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include <vector> #include <vector>
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/StaticAttributes.hpp"
...@@ -28,14 +28,9 @@ ...@@ -28,14 +28,9 @@
namespace Aidge { namespace Aidge {
enum class ConcatAttr { Axis }; enum class ConcatAttr { Axis };
class Concat_Op : public Operator, class Concat_Op : public OperatorTensor,
public Registrable<Concat_Op, std::string, std::unique_ptr<OperatorImpl>(const Concat_Op&)>, public Registrable<Concat_Op, std::string, std::unique_ptr<OperatorImpl>(const Concat_Op&)>,
public StaticAttributes<ConcatAttr, DimSize_t> { public StaticAttributes<ConcatAttr, DimSize_t> {
private:
// FIXME: change accessibility
std::vector<std::shared_ptr<Tensor>> mInputs;
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public: public:
static constexpr const char* Type = "Concat"; static constexpr const char* Type = "Concat";
...@@ -44,12 +39,12 @@ public: ...@@ -44,12 +39,12 @@ public:
using attr = typename Attributes_::template attr<e>; using attr = typename Attributes_::template attr<e>;
Concat_Op(const IOIndex_t nbIn, const DimSize_t axis) Concat_Op(const IOIndex_t nbIn, const DimSize_t axis)
: Operator(Type), : OperatorTensor(Type, nbIn, 0, 1),
mInputs(std::vector<std::shared_ptr<Tensor>>(nbIn, std::make_shared<Tensor>())),
Attributes_(attr<ConcatAttr::Axis>(axis)) Attributes_(attr<ConcatAttr::Axis>(axis))
{ {
assert(nbIn > 0 && "Concat should have at least one input"); if (nbIn == 0) {
setDatatype(DataType::Float32); AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
}
} }
/** /**
...@@ -57,15 +52,10 @@ public: ...@@ -57,15 +52,10 @@ public:
* @param op Operator to copy. * @param op Operator to copy.
*/ */
Concat_Op(const Concat_Op& op) Concat_Op(const Concat_Op& op)
: Operator(Type), : OperatorTensor(op),
Attributes_(op), Attributes_(op)
mInputs(std::vector<std::shared_ptr<Tensor>>(op.nbInputs(), std::make_shared<Tensor>())),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{ {
// cpy-ctor mImpl = op.mImpl ? Registrar<Concat_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
assert(op.nbInputs() > 0 && "Concat should have at least one input");
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Concat_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
} }
/** /**
...@@ -85,90 +75,32 @@ public: ...@@ -85,90 +75,32 @@ public:
// return *in; // return *in;
// } // }
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(static_cast<std::size_t>(inputIdx) < nbInputs() && "wrong inputIdx for Concat operator.");
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final { void computeOutputDims() override final {
bool computable = !(mInputs[0]->empty()) && (getAttr<ConcatAttr::Axis>() < mInputs[0]->nbDims());
for (const auto& input : mInputs) {
computable &= !(input->empty());
computable &= (input->nbDims() == mInputs[0]->nbDims());
}
// Every input is non-empty with the same number of dimensions // Every input is non-empty with the same number of dimensions
if (computable) { bool associated = (getInput(0) != nullptr);
auto outputDims = mInputs[0]->dims(); associated &= !(getInput(0)->empty()) && (getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims()); // do not compute anything if no input
auto outputDims = getInput(0)->dims();
for (std::size_t i = 1; i < nbInputs(); ++i) { const auto firstInputNbDims = getInput(0) -> nbDims();
outputDims[getAttr<ConcatAttr::Axis>()] += mInputs[i]->dims()[getAttr<ConcatAttr::Axis>()]; for (IOIndex_t i = 1; i < nbInputs(); ++i) {
if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
}
associated &= (getInput(i)->nbDims() == firstInputNbDims);
for (DimSize_t dim = 0; dim < firstInputNbDims; ++dim) {
if (dim == getAttr<ConcatAttr::Axis>()) {
outputDims[dim] += getInput(i)->dims()[dim];
}
else {
associated &= (getInput(i)->dims()[dim] == outputDims[dim]);
}
} }
mOutput->resize(outputDims);
}
}
bool outputDimsForwarded() const override final {
return !(mOutput->empty());
}
// void checkDims() const override final {
// assert(outputDimsForwarded());
// for (const auto& in : mInputs) {
// assert(in->dims() == mOutput->dims());
// }
// }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(static_cast<std::size_t>(inputIdx) < nbInputs() && "wrong inputIdx for Concat operator.");
return *(mInputs[inputIdx].get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(static_cast<std::size_t>(inputIdx) < nbInputs() && "wrong inputIdx for Concat operator.");
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "Concat Operators has only 1 outputs");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(static_cast<std::size_t>(inputIdx) < nbInputs() && "wrong inputIdx for Concat operator.");
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string& name) override {
mImpl = Registrar<Concat_Op>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
for (std::size_t i = 0; i < nbInputs(); ++i) {
mInputs[i]->setBackend(name);
} }
} if (associated) {
getOutput(0)->resize(outputDims);
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
for (std::size_t i = 0; i < nbInputs(); ++i) {
mInputs[i]->setDatatype(datatype);
} }
} }
inline IOIndex_t nbInputs() const noexcept override final { return mInputs.size(); }
inline IOIndex_t nbDataInputs() const noexcept override final { return mInputs.size(); }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
return {"data_input_0", "data_input_n"}; return {"data_input_0", "data_input_n"};
} }
......
...@@ -45,32 +45,27 @@ public: ...@@ -45,32 +45,27 @@ public:
template <ConvAttr e> template <ConvAttr e>
using attr = typename Attributes_::template attr<e>; using attr = typename Attributes_::template attr<e>;
constexpr Conv_Op(DimSize_t in_channels, constexpr Conv_Op(DimSize_t inChannels,
DimSize_t out_channels, DimSize_t outChannels,
const std::array<DimSize_t, DIM> &kernel_dims, const std::array<DimSize_t, DIM> &kernelDims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
: OperatorTensor(Type, 1, 2, 1), : OperatorTensor(Type, 1, 2, 1),
Attributes_(attr<ConvAttr::StrideDims>(stride_dims), Attributes_(attr<ConvAttr::StrideDims>(strideDims),
attr<ConvAttr::DilationDims>(dilation_dims), attr<ConvAttr::DilationDims>(dilationDims),
attr<ConvAttr::InChannels>(in_channels), attr<ConvAttr::InChannels>(inChannels),
attr<ConvAttr::OutChannels>(out_channels), attr<ConvAttr::OutChannels>(outChannels),
attr<ConvAttr::KernelDims>(kernel_dims)) { attr<ConvAttr::KernelDims>(kernelDims)) {}
setDataType(DataType::Float32);
}
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
Conv_Op(const Conv_Op<DIM>& op) Conv_Op(const Conv_Op<DIM>& op)
: OperatorTensor(Type, 1, 2, 1), : OperatorTensor(op),
Attributes_(op), Attributes_(op)
mOutput(std::make_shared<Tensor>(*op.mOutput))
{ {
// cpy-ctor mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
setDataType(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
} }
/** /**
...@@ -95,8 +90,17 @@ public: ...@@ -95,8 +90,17 @@ public:
// } // }
void computeOutputDims() override final { void computeOutputDims() override final {
if (!mInputs[0]->empty()) { // check inputs have been associated
std::array<DimSize_t, DIM + 2> outputDims = {}; bool associated = true;
for (IOIndex_t i = 0; i < 3; ++i) {
if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
}
associated &= !(getInput(i)->empty());
}
if (associated) {
std::array<DimSize_t, DIM + 2> outputDims{};
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->dims<DIM+2>());
for (std::size_t dim = 0; dim < this->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) { for (std::size_t dim = 0; dim < this->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
const DimSize_t kernelExtent = this->template getAttr<ConvAttr::DilationDims>()[dim] * const DimSize_t kernelExtent = this->template getAttr<ConvAttr::DilationDims>()[dim] *
...@@ -104,13 +108,13 @@ public: ...@@ -104,13 +108,13 @@ public:
1; 1;
outputDims[dim+2] = 1 + static_cast<DimSize_t>( outputDims[dim+2] = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent) / floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim]))); static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim])));
} }
outputDims[1] = this->template getAttr<ConvAttr::OutChannels>(); outputDims[1] = this->template getAttr<ConvAttr::OutChannels>();
outputDims[0] = mInputs[0]->dims()[0]; outputDims[0] = inputDims[0];
mOutput->resize(outputDims); mOutputs[0]->resize(outputDims);
} }
} }
...@@ -167,11 +171,11 @@ public: ...@@ -167,11 +171,11 @@ public:
void setBackend(const std::string &name) override { void setBackend(const std::string &name) override {
mImpl = Registrar<Conv_Op<DIM>>::create(name)(*this); mImpl = Registrar<Conv_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name); mOutputs[0]->setBackend(name);
// FIXME: temporary workaround // FIXME: temporary workaround
mInputs[1]->setBackend(name); getInput(1)->setBackend(name);
mInputs[2]->setBackend(name); getInput(2)->setBackend(name);
} }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
...@@ -183,32 +187,32 @@ public: ...@@ -183,32 +187,32 @@ public:
}; };
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Conv(DimSize_t in_channels, inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
DimSize_t out_channels, DimSize_t outChannels,
const std::array<DimSize_t, DIM> &kernel_dims, const std::array<DimSize_t, DIM> &kernelDims,
const std::string& name = "", const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) { const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
// FIXME: properly handle default w&b initialization in every cases // FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported"); static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims), name); auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims), name);
// addProducer(conv, 1, append(append(kernel_dims, in_channels), out_channels), "w"); // addProducer(conv, 1, append(append(kernel_dims, in_channels), out_channels), "w");
addProducer(conv, 1, append(out_channels, append(in_channels, kernel_dims)), "w"); addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
addProducer(conv, 2, std::array<DimSize_t, 1>({out_channels}), "b"); addProducer(conv, 2, std::array<DimSize_t, 1>({outChannels}), "b");
return conv; return conv;
} }
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM> template <DimSize_t DIM>
inline std::shared_ptr<Node> Conv( inline std::shared_ptr<Node> Conv(
DimSize_t in_channels, DimSize_t inChannels,
DimSize_t out_channels, DimSize_t outChannels,
DimSize_t const (&kernel_dims)[DIM], DimSize_t const (&kernelDims)[DIM],
const std::string& name = "", const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) { const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported"); static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
return Conv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, dilation_dims); return Conv(inChannels, outChannels, to_array(kernelDims), name, strideDims, dilationDims);
} }
} // namespace Aidge } // namespace Aidge
...@@ -223,4 +227,4 @@ const char *const EnumStrings<Aidge::ConvAttr>::data[] = { ...@@ -223,4 +227,4 @@ const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
}; };
} }
#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */ #endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
\ No newline at end of file
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp" #include "aidge/operator/Producer.hpp"
#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
...@@ -29,20 +29,14 @@ namespace Aidge { ...@@ -29,20 +29,14 @@ namespace Aidge {
enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims }; enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims };
template <DimIdx_t DIM> template <DimIdx_t DIM>
class ConvDepthWise_Op : public Operator, class ConvDepthWise_Op : public OperatorTensor,
public Registrable<ConvDepthWise_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>, public Registrable<ConvDepthWise_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
public StaticAttributes<ConvDepthWiseAttr, public StaticAttributes<ConvDepthWiseAttr,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
DimSize_t, DimSize_t,
std::array<DimSize_t, DIM>> { std::array<DimSize_t, DIM>> {
public: public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
std::make_shared<Tensor>()};
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char *Type = "ConvDepthWise"; static constexpr const char *Type = "ConvDepthWise";
ConvDepthWise_Op() = delete; ConvDepthWise_Op() = delete;
...@@ -58,26 +52,21 @@ class ConvDepthWise_Op : public Operator, ...@@ -58,26 +52,21 @@ class ConvDepthWise_Op : public Operator,
constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims, constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
: Operator(Type), : OperatorTensor(Type, 1, 2, 1),
Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims), Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
attr<ConvDepthWiseAttr::DilationDims>(dilation_dims), attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
attr<ConvDepthWiseAttr::Channels>(0), attr<ConvDepthWiseAttr::Channels>(0),
attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) { attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {}
setDatatype(DataType::Float32);
}
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op) ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op)
: Operator(Type), : OperatorTensor(op),
Attributes_(op), Attributes_(op)
mOutput(std::make_shared<Tensor>(*op.mOutput))
{ {
// cpy-ctor mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
} }
/** /**
...@@ -88,16 +77,20 @@ class ConvDepthWise_Op : public Operator, ...@@ -88,16 +77,20 @@ class ConvDepthWise_Op : public Operator,
return std::make_shared<ConvDepthWise_Op<DIM>>(*this); return std::make_shared<ConvDepthWise_Op<DIM>>(*this);
} }
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final { void computeOutputDims() override final {
if (!mInputs[0]->empty()) { // check inputs have been associated
// TODO : add a check of inputs dimensions ?
bool associated = true;
for (IOIndex_t i = 0; i < 3; ++i) {
if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
}
associated &= !(getInput(i)->empty());
}
if (associated) {
std::array<DimSize_t, DIM + 2> outputDims = {}; std::array<DimSize_t, DIM + 2> outputDims = {};
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->dims<DIM+2>());
for (std::size_t dim = 0; dim < this->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) { for (std::size_t dim = 0; dim < this->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
const DimSize_t kernelExtent = this->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] * const DimSize_t kernelExtent = this->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
...@@ -105,10 +98,10 @@ class ConvDepthWise_Op : public Operator, ...@@ -105,10 +98,10 @@ class ConvDepthWise_Op : public Operator,
1; 1;
outputDims[dim+2] = 1 + static_cast<DimSize_t>( outputDims[dim+2] = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent) / floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim]))); static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
} }
this->template getAttr<ConvDepthWiseAttr::Channels>() = mInputs[0]->dims()[1]; this->template getAttr<ConvDepthWiseAttr::Channels>() = inputDims[1];
// std::array<DimSize_t, DIM+2> weightDims = append(mInputs[0]->dims()[1],append(1, this->template getAttr<ConvDepthWiseAttr::KernelDims>())); // std::array<DimSize_t, DIM+2> weightDims = append(mInputs[0]->dims()[1],append(1, this->template getAttr<ConvDepthWiseAttr::KernelDims>()));
// if (mInputs[1]->empty()) { // if (mInputs[1]->empty()) {
// mInputs[1]->resize(weightDims); // mInputs[1]->resize(weightDims);
...@@ -116,14 +109,12 @@ class ConvDepthWise_Op : public Operator, ...@@ -116,14 +109,12 @@ class ConvDepthWise_Op : public Operator,
// if (mInputs[2]->empty()) { // if (mInputs[2]->empty()) {
// mInputs[2]->resize({mInputs[0]->dims()[1]}); // mInputs[2]->resize({mInputs[0]->dims()[1]});
// } // }
outputDims[1] = mInputs[0]->dims()[1]; outputDims[1] = inputDims[1];
outputDims[0] = mInputs[0]->dims()[0]; outputDims[0] = inputDims[0];
mOutput->resize(outputDims); mOutputs[0]->resize(outputDims);
} }
} }
bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override { // std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
// if (outputIdx != 0) { // if (outputIdx != 0) {
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor."); // AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
...@@ -160,57 +151,15 @@ class ConvDepthWise_Op : public Operator, ...@@ -160,57 +151,15 @@ class ConvDepthWise_Op : public Operator,
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); // AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
// } // }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
return *(mInputs[inputIdx].get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "ConvDepthWise Operators supports only 3 inputs");
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "ConvDepthWise Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string &name) override { void setBackend(const std::string &name) override {
mImpl = Registrar<ConvDepthWise_Op<DIM>>::create(name)(*this); mImpl = Registrar<ConvDepthWise_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name); mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
mInputs[1]->setBackend(name);
mInputs[2]->setBackend(name);
}
void setDatatype(const DataType &datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround // FIXME: temporary workaround
mInputs[0]->setDatatype(datatype); getInput(1)->setBackend(name);
mInputs[1]->setDatatype(datatype); getInput(2)->setBackend(name);
mInputs[2]->setDatatype(datatype);
} }
inline IOIndex_t nbInputs() const noexcept override final { return 3; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
return {"data_input", "weight", "bias"}; return {"data_input", "weight", "bias"};
} }
...@@ -220,27 +169,29 @@ class ConvDepthWise_Op : public Operator, ...@@ -220,27 +169,29 @@ class ConvDepthWise_Op : public Operator,
}; };
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> ConvDepthWise(const std::array<DimSize_t, DIM> &kernel_dims, inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
const std::array<DimSize_t, DIM> &kernelDims,
const std::string& name = "", const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) { const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
// FIXME: properly handle default w&b initialization in every cases // FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported"); static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), name); auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims), name);
addProducer(convDW, 1, std::array<DimSize_t,0>({}), "w"); addProducer(convDW, 1, append(nbChannels, append(1, kernelDims)), "w");
addProducer(convDW, 2, std::array<DimSize_t,0>({}), "b"); addProducer(convDW, 2, std::array<DimSize_t, 1>({nbChannels}), "b");
return convDW; return convDW;
} }
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM> template <DimSize_t DIM>
inline std::shared_ptr<Node> ConvDepthWise( inline std::shared_ptr<Node> ConvDepthWise(
DimSize_t const (&kernel_dims)[DIM], const DimSize_t nbChannels,
DimSize_t const (&kernelDims)[DIM],
const std::string& name = "", const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) { const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported"); static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
return ConvDepthWise(to_array(kernel_dims), name, stride_dims, dilation_dims); return ConvDepthWise(nbChannels, to_array(kernelDims), name, strideDims, dilationDims);
} }
} // namespace Aidge } // namespace Aidge
......
...@@ -17,42 +17,31 @@ ...@@ -17,42 +17,31 @@
#include <vector> #include <vector>
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
#include "aidge/utils/ErrorHandling.hpp"
namespace Aidge { namespace Aidge {
class Div_Op : public Operator, class Div_Op : public OperatorTensor,
public Registrable<Div_Op, std::string, std::unique_ptr<OperatorImpl>(const Div_Op&)> { public Registrable<Div_Op, std::string, std::unique_ptr<OperatorImpl>(const Div_Op&)> {
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public: public:
static constexpr const char* Type = "Div"; static constexpr const char* Type = "Div";
Div_Op() Div_Op() : OperatorTensor(Type, 2, 0, 1) {}
: Operator(Type)
{
setDatatype(DataType::Float32);
}
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
Div_Op(const Div_Op& op) Div_Op(const Div_Op& op)
: Operator(Type), : OperatorTensor(op)
mOutput(std::make_shared<Tensor>(*op.mOutput))
{ {
// cpy-ctor mImpl = op.mImpl ? Registrar<Div_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Div_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
} }
/** /**
...@@ -63,73 +52,15 @@ public: ...@@ -63,73 +52,15 @@ public:
return std::make_shared<Div_Op>(*this); return std::make_shared<Div_Op>(*this);
} }
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 2 && "operator supports only 2 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInputs[0]->empty())
mOutput->resize(mInputs[0]->dims());
}
bool outputDimsForwarded() const override final {
return !(mOutput->empty());
}
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
return *(mInputs[inputIdx].get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert((inputIdx < 2) && "Div Operator has 2 inputs");
(void) inputIdx; // avoid unused warning
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Div Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 2 && "operator supports only 2 inputs");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string& name) override { void setBackend(const std::string& name) override {
mImpl = Registrar<Div_Op>::create(name)(*this); mImpl = Registrar<Div_Op>::create(name)(*this);
mOutput->setBackend(name); mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
mInputs[0]->setBackend(name);
mInputs[1]->setBackend(name);
}
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround // FIXME: temporary workaround
mInputs[0]->setDatatype(datatype); getInput(0)->setBackend(name);
mInputs[1]->setDatatype(datatype); getInput(1)->setBackend(name);
} }
inline IOIndex_t nbInputs() const noexcept override final { return 2; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
return {"data_input"}; return {"data_input"};
} }
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp" #include "aidge/operator/Producer.hpp"
#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
...@@ -29,16 +29,11 @@ ...@@ -29,16 +29,11 @@
namespace Aidge { namespace Aidge {
enum class FCAttr { OutChannels, NoBias }; enum class FCAttr { OutChannels, NoBias };
class FC_Op : public Operator, class FC_Op : public OperatorTensor,
public Registrable<FC_Op, public Registrable<FC_Op,
std::string, std::string,
std::unique_ptr<OperatorImpl>(const FC_Op &)>, std::unique_ptr<OperatorImpl>(const FC_Op &)>,
public StaticAttributes<FCAttr, DimSize_t, bool> { public StaticAttributes<FCAttr, DimSize_t, bool> {
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(), std::make_shared<Tensor>()};
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public: public:
static constexpr const char* Type = "FC"; static constexpr const char* Type = "FC";
...@@ -48,26 +43,21 @@ public: ...@@ -48,26 +43,21 @@ public:
template <FCAttr e> using attr = typename Attributes_::template attr<e>; template <FCAttr e> using attr = typename Attributes_::template attr<e>;
FC_Op(DimSize_t out_channels, bool noBias) FC_Op(DimSize_t out_channels, bool noBias)
: Operator(Type), : OperatorTensor(Type, 1, 2, 1),
Attributes_( Attributes_(
attr<FCAttr::OutChannels>(out_channels), attr<FCAttr::OutChannels>(out_channels),
attr<FCAttr::NoBias>(noBias)) attr<FCAttr::NoBias>(noBias))
{ {}
setDatatype(DataType::Float32);
}
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
FC_Op(const FC_Op& op) FC_Op(const FC_Op& op)
: Operator(Type), : OperatorTensor(op),
Attributes_(op), Attributes_(op)
mOutput(std::make_shared<Tensor>(*op.mOutput))
{ {
// cpy-ctor mImpl = op.mImpl ? Registrar<FC_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<FC_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
} }
/** /**
...@@ -78,7 +68,7 @@ public: ...@@ -78,7 +68,7 @@ public:
return std::make_shared<FC_Op>(*this); return std::make_shared<FC_Op>(*this);
} }
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final {
assert(inputIdx < 3 && "operators supports only 3 inputs"); assert(inputIdx < 3 && "operators supports only 3 inputs");
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type"); assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
if (inputIdx == 2) { if (inputIdx == 2) {
...@@ -86,78 +76,35 @@ public: ...@@ -86,78 +76,35 @@ public:
assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1); assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1);
} }
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data); mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
if (inputIdx == 0 && mInputs[0]->nbDims() == 1) if (inputIdx == 0 && getInput(0)->nbDims() == 1)
mInputs[inputIdx]->resize(std::array<DimSize_t, 2>({1, mInputs[inputIdx]->size()})); mInputs[inputIdx]->resize(std::array<DimSize_t, 2>({1, getInput(inputIdx)->size()}));
} }
void computeOutputDims() override final { void computeOutputDims() override final {
if (!mInputs[0]->empty()) { bool associated = true;
// <in_features**, out_channels> for (IOIndex_t i = 0; i < nbInputs(); ++i) {
std::array<DimSize_t, 2> weightDims = {this->template getAttr<FCAttr::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())}; if (!getInput(i)) {
// <out_channels, batch> AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template getAttr<FCAttr::OutChannels>()}; }
associated &= !(getInput(i)->empty());
mInputs[1]->resize(weightDims); }
mOutput->resize(outputDims); if (associated) {
// <batch, OutChannels>
mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<FCAttr::OutChannels>()});
} }
}
bool outputDimsForwarded() const override final {
return !(mOutput->empty());
}
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
return *(mInputs[inputIdx].get()); }
inline Tensor& output(const IOIndex_t /*inputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "FC Operators supports only 3 inputs");
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "FC Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
} }
void setBackend(const std::string& name) override { void setBackend(const std::string& name) override {
mImpl = Registrar<FC_Op>::create(name)(*this); mImpl = Registrar<FC_Op>::create(name)(*this);
mOutput->setBackend(name); mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
mInputs[0]->setBackend(name);
mInputs[1]->setBackend(name);
mInputs[2]->setBackend(name);
}
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround // FIXME: temporary workaround
mInputs[0]->setDatatype(datatype); getInput(0)->setBackend(name);
mInputs[1]->setDatatype(datatype); getInput(1)->setBackend(name);
mInputs[2]->setDatatype(datatype); getInput(2)->setBackend(name);
} }
inline IOIndex_t nbInputs() const noexcept override final { return 3; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
return {"data_input", "weight", "bias"}; return {"data_input", "weight", "bias"};
} }
...@@ -166,11 +113,11 @@ public: ...@@ -166,11 +113,11 @@ public:
} }
}; };
inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const std::string& name = "") { inline std::shared_ptr<Node> FC(DimSize_t inChannels, DimSize_t outChannels, bool noBias = false, const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases // FIXME: properly handle default w&b initialization in every cases
auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(out_channels, noBias), name); auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(outChannels, noBias), name);
addProducer(fc, 1, std::array<DimSize_t, 2>({out_channels, 1}), "w"); addProducer(fc, 1, std::array<DimSize_t, 2>({outChannels, inChannels}), "w");
addProducer(fc, 2, (noBias ? std::array<DimSize_t, 1>({0}) : std::array<DimSize_t, 1>({out_channels})), "b"); // already sets bias dims addProducer(fc, 2, (noBias ? std::array<DimSize_t, 1>({0}) : std::array<DimSize_t, 1>({outChannels})), "b"); // already sets bias dims
return fc; return fc;
} }
} // namespace Aidge } // namespace Aidge
...@@ -181,4 +128,4 @@ const char *const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels", ...@@ -181,4 +128,4 @@ const char *const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels",
"NoBias"}; "NoBias"};
} }
#endif /* AIDGE_CORE_OPERATOR_FC_H_ */ #endif /* AIDGE_CORE_OPERATOR_FC_H_ */
\ No newline at end of file
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include <cstring> #include <cstring>
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/utils/DynamicAttributes.hpp" #include "aidge/utils/DynamicAttributes.hpp"
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
...@@ -27,50 +27,26 @@ ...@@ -27,50 +27,26 @@
namespace Aidge { namespace Aidge {
class GenericOperator_Op class GenericOperator_Op
: public Operator, : public OperatorTensor,
public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>, public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>,
public DynamicAttributes { public DynamicAttributes {
private: private:
using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>; using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>;
IOIndex_t mNbDataIn;
IOIndex_t mNbIn;
IOIndex_t mNbOut;
std::vector<std::shared_ptr<Tensor>> mInputs;
std::vector<std::shared_ptr<Tensor>> mOutputs;
ComputeDimsFunc mComputeOutputDims; ComputeDimsFunc mComputeOutputDims;
public: public:
GenericOperator_Op(const char *type, IOIndex_t nbDataIn, IOIndex_t nbIn, IOIndex_t nbOut) GenericOperator_Op(const char *type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut)
: Operator(type), mNbDataIn(nbDataIn), mNbIn(nbIn), mNbOut(nbOut) : OperatorTensor(type, nbData, nbParam, nbOut)
{ {}
mInputs = std::vector<std::shared_ptr<Tensor>>(nbIn);
for (std::size_t i = 0; i < nbIn; ++i) {
mInputs[i] = std::make_shared<Tensor>();
}
mOutputs = std::vector<std::shared_ptr<Tensor>>(nbOut);
for (std::size_t i = 0; i < nbOut; ++i) {
mOutputs[i] = std::make_shared<Tensor>();
}
}
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
GenericOperator_Op(const GenericOperator_Op& op) GenericOperator_Op(const GenericOperator_Op& op)
: Operator(op.type().c_str()), mNbDataIn(op.mNbDataIn), mNbIn(op.mNbIn), mNbOut(op.mNbOut) : OperatorTensor(op)
{ {}
// cpy-ctor
mInputs = std::vector<std::shared_ptr<Tensor>>(mNbIn);
for (std::size_t i = 0; i < mNbIn; ++i) {
mInputs[i] = std::make_shared<Tensor>();
}
mOutputs = std::vector<std::shared_ptr<Tensor>>(mNbOut);
for (std::size_t i = 0; i < mNbOut; ++i) {
mOutputs[i] = std::make_shared<Tensor>(*op.mOutputs[i]);
}
}
/** /**
* @brief Clone the operator using its copy-constructor. * @brief Clone the operator using its copy-constructor.
...@@ -87,28 +63,19 @@ class GenericOperator_Op ...@@ -87,28 +63,19 @@ class GenericOperator_Op
mComputeOutputDims = func; mComputeOutputDims = func;
} }
// Override Virtual Opertor methods
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < mNbIn && "operators supports only x inputs");
if (strcmp(data->type(), Tensor::Type) == 0) {
// TODO: associate input only if of type Tensor, otherwise do nothing
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
}
void computeOutputDims() override final { void computeOutputDims() override final {
if (mComputeOutputDims) { if (mComputeOutputDims) {
std::vector<std::vector<size_t>> inputsDims(mNbIn, std::vector<size_t>()); std::vector<std::vector<size_t>> inputsDims(nbInputs(), std::vector<size_t>());
for (std::size_t i = 0; i < mNbIn; ++i) { for (std::size_t i = 0; i < nbInputs(); ++i) {
if (mInputs[i]) { if (getInput(i)) {
inputsDims[i] = mInputs[i]->dims(); inputsDims[i] = getInput(i)->dims();
} }
} }
const auto& outputsDims = mComputeOutputDims(inputsDims); const auto& outputsDims = mComputeOutputDims(inputsDims);
assert(outputsDims.size() == mNbOut && "The provided ComputeDimsFunc function returns the wrong number of outputs"); assert(outputsDims.size() == nbOutputs() && "The provided ComputeDimsFunc function returns the wrong number of outputs");
for (std::size_t i = 0; i < mNbOut; ++i) { for (std::size_t i = 0; i < nbOutputs(); ++i) {
mOutputs[i]->resize(outputsDims[i]); mOutputs[i]->resize(outputsDims[i]);
} }
} }
...@@ -127,47 +94,11 @@ class GenericOperator_Op ...@@ -127,47 +94,11 @@ class GenericOperator_Op
} }
} }
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert((inputIdx < mNbIn) && "input index out of range for this instance of GenericOperator");
printf("Info: using getRawInput() on a GenericOperator.\n");
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert((inputIdx < mNbIn) && "input index out of range for this instance of GenericOperator");
printf("Info: using input() on a GenericOperator.\n");
return *mInputs[inputIdx];
}
std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert((inputIdx < mNbIn) && "input index out of range for this instance of GenericOperator");
printf("Info: using getInput() on a GenericOperator.\n");
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx < mNbOut) && "output index out of range for this instance of GenericOperator");
printf("Info: using getOutput() on a GenericOperator.\n");
return mOutputs[outputIdx];
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx < mNbOut) && "output index out of range for this instance of GenericOperator");
printf("Info: using getRawOutput() on a GenericOperator.\n");
return std::static_pointer_cast<Data>(mOutputs[outputIdx]);
}
Tensor& output(const IOIndex_t outputIdx) const override final {
assert((outputIdx < mNbOut) && "output index out of range for this instance of GenericOperator");
printf("Info: using output() on a GenericOperator.\n");
return *mOutputs[outputIdx];
}
~GenericOperator_Op() = default; ~GenericOperator_Op() = default;
void setBackend(const std::string & /*name*/) override { printf("setBackend: not available yet.\n"); } void setBackend(const std::string & /*name*/) override { printf("setBackend: not available yet.\n"); }
void setDatatype(const DataType & /*datatype*/) override { printf("setDatatype: not available yet.\n"); } void setDataType(const DataType& /*datatype*/) const override { printf("setDataType: not available yet.\n"); }
void forward() override final { void forward() override final {
if(mImpl){ if(mImpl){
mImpl->forward(); mImpl->forward();
...@@ -182,9 +113,6 @@ class GenericOperator_Op ...@@ -182,9 +113,6 @@ class GenericOperator_Op
printf("backward: No implementation is linked.\n"); printf("backward: No implementation is linked.\n");
} }
} }
inline IOIndex_t nbInputs() const noexcept override final { return mNbIn; };
inline IOIndex_t nbDataInputs() const noexcept override final { return mNbDataIn; };
inline IOIndex_t nbOutputs() const noexcept override final { return mNbOut; };
}; };
/** /**
...@@ -197,9 +125,9 @@ class GenericOperator_Op ...@@ -197,9 +125,9 @@ class GenericOperator_Op
* @param name (optional) name of the Operator. * @param name (optional) name of the Operator.
* @return std::shared_ptr<Node> Node associated with the Generic Operator. * @return std::shared_ptr<Node> Node associated with the Generic Operator.
*/ */
inline std::shared_ptr<Node> GenericOperator(const char *type, IOIndex_t nbDataIn, IOIndex_t nbIn, IOIndex_t nbOut, inline std::shared_ptr<Node> GenericOperator(const char *type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut,
const std::string& name = "") { const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbDataIn, nbIn, nbOut), name); return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbData, nbParam, nbOut), name);
} }
} // namespace Aidge } // namespace Aidge
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/data/Data.hpp" #include "aidge/data/Data.hpp"
...@@ -29,14 +29,9 @@ enum class LeakyReLUAttr { ...@@ -29,14 +29,9 @@ enum class LeakyReLUAttr {
NegativeSlope NegativeSlope
}; };
class LeakyReLU_Op : public Operator, class LeakyReLU_Op : public OperatorTensor,
public Registrable<LeakyReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const LeakyReLU_Op&)>, public Registrable<LeakyReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const LeakyReLU_Op&)>,
public StaticAttributes<LeakyReLUAttr, float> { public StaticAttributes<LeakyReLUAttr, float> {
public:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public: public:
static constexpr const char* Type = "LeakyReLU"; static constexpr const char* Type = "LeakyReLU";
...@@ -46,25 +41,20 @@ public: ...@@ -46,25 +41,20 @@ public:
template <LeakyReLUAttr e> using attr = typename Attributes_::template attr<e>; template <LeakyReLUAttr e> using attr = typename Attributes_::template attr<e>;
LeakyReLU_Op(float negativeSlope) LeakyReLU_Op(float negativeSlope)
: Operator(Type), : OperatorTensor(Type, 1, 0, 1),
Attributes_( Attributes_(
attr<LeakyReLUAttr::NegativeSlope>(negativeSlope)) attr<LeakyReLUAttr::NegativeSlope>(negativeSlope))
{ {}
setDatatype(DataType::Float32);
}
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
LeakyReLU_Op(const LeakyReLU_Op& op) LeakyReLU_Op(const LeakyReLU_Op& op)
: Operator(Type), : OperatorTensor(op),
Attributes_(op), Attributes_(op)
mOutput(std::make_shared<Tensor>(*op.mOutput))
{ {
// cpy-ctor mImpl = op.mImpl ? Registrar<LeakyReLU_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<LeakyReLU_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
} }
/** /**
...@@ -75,69 +65,17 @@ public: ...@@ -75,69 +65,17 @@ public:
return std::make_shared<LeakyReLU_Op>(*this); return std::make_shared<LeakyReLU_Op>(*this);
} }
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx == 0 && "operator supports only 1 input");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInput->empty())
mOutput->resize(mInput->dims());
}
bool outputDimsForwarded() const override final {
return !(mOutput->empty());
}
inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert((inputIdx == 0) && "LeakyReLU Operator has only 1 input");
(void) inputIdx; // avoid unused warning
return mInput;
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "LeakyReLU Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operator supports only 1 input");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInput);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
void setBackend(const std::string& name) override { void setBackend(const std::string& name) override {
mImpl = Registrar<LeakyReLU_Op>::create(name)(*this); mImpl = Registrar<LeakyReLU_Op>::create(name)(*this);
mOutput->setBackend(name); mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround // FIXME: temporary workaround
mInput->setDatatype(datatype); getInput(0)->setBackend(name);
} }
inline IOIndex_t nbInputs() const noexcept override final { return 1; } static const std::vector<std::string> getInputsName(){
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input"}; return {"data_input"};
} }
static const std::vector<std::string> getOutputsName(){ static const std::vector<std::string> getOutputsName(){
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp" #include "aidge/operator/Producer.hpp"
#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
...@@ -29,15 +29,11 @@ ...@@ -29,15 +29,11 @@
namespace Aidge { namespace Aidge {
enum class MatMulAttr { OutChannels }; enum class MatMulAttr { OutChannels };
class MatMul_Op : public Operator, class MatMul_Op : public OperatorTensor,
public Registrable<MatMul_Op, public Registrable<MatMul_Op,
std::string, std::string,
std::unique_ptr<OperatorImpl>(const MatMul_Op &)>, std::unique_ptr<OperatorImpl>(const MatMul_Op &)>,
public StaticAttributes<MatMulAttr, DimSize_t> { public StaticAttributes<MatMulAttr, DimSize_t> {
public:
std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public: public:
static constexpr const char* Type = "MatMul"; static constexpr const char* Type = "MatMul";
...@@ -47,25 +43,20 @@ public: ...@@ -47,25 +43,20 @@ public:
template <MatMulAttr e> using attr = typename Attributes_::template attr<e>; template <MatMulAttr e> using attr = typename Attributes_::template attr<e>;
MatMul_Op(DimSize_t out_channels) MatMul_Op(DimSize_t out_channels)
: Operator(Type), : OperatorTensor(Type, 1, 1, 1),
Attributes_( Attributes_(
attr<MatMulAttr::OutChannels>(out_channels)) attr<MatMulAttr::OutChannels>(out_channels))
{ {}
setDatatype(DataType::Float32);
}
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
MatMul_Op(const MatMul_Op& op) MatMul_Op(const MatMul_Op& op)
: Operator(Type), : OperatorTensor(op),
Attributes_(op), Attributes_(op)
mOutput(std::make_shared<Tensor>(*op.mOutput))
{ {
// cpy-ctor mImpl = op.mImpl ? Registrar<MatMul_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<MatMul_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
} }
/** /**
...@@ -76,78 +67,31 @@ public: ...@@ -76,78 +67,31 @@ public:
return std::make_shared<MatMul_Op>(*this); return std::make_shared<MatMul_Op>(*this);
} }
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 2 && "operators supports only 2 inputs");
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final { void computeOutputDims() override final {
if (!mInputs[0]->empty()) { bool associated = true;
// <in_features**, out_channels> for (IOIndex_t i = 0; i < nbInputs(); ++i) {
std::array<DimSize_t, 2> weightDims = {this->template getAttr<MatMulAttr::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())}; if (!getInput(i)) {
// <out_channels, batch> AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template getAttr<MatMulAttr::OutChannels>()}; }
associated &= !(getInput(i)->empty());
mInputs[1]->resize(weightDims); }
mOutput->resize(outputDims); if (associated) {
// <batch, OutChannels>
mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<MatMulAttr::OutChannels>()});
} }
}
bool outputDimsForwarded() const override final {
return !(mOutput->empty());
}
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 2 && "operators supports only 2 inputs");
return *(mInputs[inputIdx].get()); }
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 2 && "MatMul Operators has 2 inputs");
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "MatMul Operators has 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 2 && "operators supports only 2 inputs");
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
} }
void setBackend(const std::string& name) override { void setBackend(const std::string& name) override {
mImpl = Registrar<MatMul_Op>::create(name)(*this); mImpl = Registrar<MatMul_Op>::create(name)(*this);
mOutput->setBackend(name); mOutputs[0]->setBackend(name);
// FIXME: temporary workaround // FIXME: temporary workaround
mInputs[0]->setBackend(name); getInput(0)->setBackend(name);
mInputs[1]->setBackend(name); getInput(1)->setBackend(name);
} }
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
mInputs[0]->setDatatype(datatype);
mInputs[1]->setDatatype(datatype);
}
inline IOIndex_t nbInputs() const noexcept override final { return 2; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
return {"data_input", "weight"}; return {"data_input", "weight"};
} }
...@@ -156,10 +100,10 @@ public: ...@@ -156,10 +100,10 @@ public:
} }
}; };
inline std::shared_ptr<Node> MatMul(DimSize_t out_channels, const std::string& name = "") { inline std::shared_ptr<Node> MatMul(DimSize_t inChannels, DimSize_t outChannels, const std::string& name = "") {
// FIXME: properly handle default w initialization in every cases // FIXME: properly handle default w initialization in every cases
auto matmul = std::make_shared<Node>(std::make_shared<MatMul_Op>(out_channels), name); auto matmul = std::make_shared<Node>(std::make_shared<MatMul_Op>(outChannels), name);
addProducer(matmul, 1, std::array<DimSize_t, 2>({out_channels, 1}), "w"); addProducer(matmul, 1, std::array<DimSize_t, 2>({outChannels, inChannels}), "w");
return matmul; return matmul;
} }
} // namespace Aidge } // namespace Aidge
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp" #include "aidge/operator/Producer.hpp"
#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
...@@ -29,17 +29,12 @@ namespace Aidge { ...@@ -29,17 +29,12 @@ namespace Aidge {
enum class MaxPoolingAttr { StrideDims, KernelDims, CeilMode }; enum class MaxPoolingAttr { StrideDims, KernelDims, CeilMode };
template <DimIdx_t DIM> template <DimIdx_t DIM>
class MaxPooling_Op : public Operator, class MaxPooling_Op : public OperatorTensor,
public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>, public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
public StaticAttributes<MaxPoolingAttr, public StaticAttributes<MaxPoolingAttr,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
bool> { bool> {
private:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public: public:
static constexpr const char *Type = "MaxPooling"; static constexpr const char *Type = "MaxPooling";
...@@ -55,26 +50,21 @@ public: ...@@ -55,26 +50,21 @@ public:
constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims, constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
bool ceil_mode = false) bool ceil_mode = false)
: Operator(Type), : OperatorTensor(Type, 1, 0, 1),
Attributes_(attr<MaxPoolingAttr::StrideDims>(stride_dims), Attributes_(attr<MaxPoolingAttr::StrideDims>(stride_dims),
attr<MaxPoolingAttr::KernelDims>(kernel_dims), attr<MaxPoolingAttr::KernelDims>(kernel_dims),
attr<MaxPoolingAttr::CeilMode>(ceil_mode)), attr<MaxPoolingAttr::CeilMode>(ceil_mode))
mOutput(std::make_shared<Tensor>()) { {}
setDatatype(DataType::Float32);
}
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
MaxPooling_Op(const MaxPooling_Op<DIM>& op) MaxPooling_Op(const MaxPooling_Op<DIM>& op)
: Operator(Type), : OperatorTensor(op),
Attributes_(op), Attributes_(op)
mOutput(std::make_shared<Tensor>(*op.mOutput))
{ {
// cpy-ctor mImpl = op.mImpl ? Registrar<MaxPooling_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<MaxPooling_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
} }
/** /**
...@@ -85,17 +75,14 @@ public: ...@@ -85,17 +75,14 @@ public:
return std::make_shared<MaxPooling_Op<DIM>>(*this); return std::make_shared<MaxPooling_Op<DIM>>(*this);
} }
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 1 && "operators supports only 3 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final { void computeOutputDims() override final {
if (!mInput->empty()) { if (!getInput(0)) {
std::array<DimSize_t, DIM + 2> outputDims = {}; AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
}
if (!(getInput(0)->empty())) {
std::array<DimSize_t, DIM + 2> outputDims{};
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->dims<DIM+2>());
std::function<float(float)> roundingFunction; std::function<float(float)> roundingFunction;
if (this->template getAttr<MaxPoolingAttr::CeilMode>()) { if (this->template getAttr<MaxPoolingAttr::CeilMode>()) {
...@@ -106,69 +93,25 @@ public: ...@@ -106,69 +93,25 @@ public:
for (std::size_t dim = 0; dim < this->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) { for (std::size_t dim = 0; dim < this->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
outputDims[dim+2] = 1 + static_cast<DimSize_t>( outputDims[dim+2] = 1 + static_cast<DimSize_t>(
roundingFunction(static_cast<float>(mInput->dims()[dim+2] - roundingFunction(static_cast<float>(inputDims[dim+2] -
this->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) / this->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
static_cast<float>(this->template getAttr<MaxPoolingAttr::StrideDims>()[dim]))); static_cast<float>(this->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
} }
outputDims[1] = mInput->dims()[1]; outputDims[1] = inputDims[1];
outputDims[0] = mInput->dims()[0]; outputDims[0] = inputDims[0];
mOutput->resize(outputDims); mOutputs[0]->resize(outputDims);
} }
} }
bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return *(mInput.get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "MaxPooling Operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return mInput;
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "MaxPooling Operators has only 1 outputs");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInput);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string &name) override { void setBackend(const std::string &name) override {
mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this); mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name); mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType &datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround // FIXME: temporary workaround
mInput->setDatatype(datatype); getInput(0)->setBackend(name);
} }
inline IOIndex_t nbInputs() const noexcept override final { return 1; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
return {"data_input"}; return {"data_input"};
} }
......
...@@ -12,26 +12,24 @@ ...@@ -12,26 +12,24 @@
#ifndef AIDGE_CORE_OPERATOR_METAOPERATOR_H_ #ifndef AIDGE_CORE_OPERATOR_METAOPERATOR_H_
#define AIDGE_CORE_OPERATOR_METAOPERATOR_H_ #define AIDGE_CORE_OPERATOR_METAOPERATOR_H_
#include "aidge/operator/Operator.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/graph/GraphView.hpp" #include "aidge/graph/GraphView.hpp"
#include "aidge/graph/OpArgs.hpp" #include "aidge/graph/OpArgs.hpp"
#include "aidge/scheduler/Scheduler.hpp" #include "aidge/scheduler/Scheduler.hpp"
namespace Aidge { namespace Aidge {
class MetaOperator_Op : public Operator, class MetaOperator_Op : public OperatorTensor,
public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::unique_ptr<OperatorImpl>(const MetaOperator_Op &)> { public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::unique_ptr<OperatorImpl>(const MetaOperator_Op &)> {
public: public:
std::vector<std::shared_ptr<Tensor>> mInputs; // outputs shared with micro-graph output Tensors
std::vector<std::shared_ptr<Tensor>> mOutputs; // These are shared with micro-graph outputs tensors
// Micro-graph handling: // Micro-graph handling:
std::shared_ptr<GraphView> mGraph; // Meta operator micro-graph std::shared_ptr<GraphView> mGraph; // Meta operator micro-graph
std::shared_ptr<SequentialScheduler> mScheduler; std::shared_ptr<SequentialScheduler> mScheduler;
// Need to store an ordored list of input/output operators for the micro-graph, // Need to store an ordored list of input/output operators for the micro-graph,
// because input/output nodes in a GraphView are unordered. // because input/output nodes in a GraphView are unordered.
// TODO: refactor GraphView to handle ordered input/output? // TODO: refactor GraphView to handle ordered input/output?
std::vector<std::pair<std::shared_ptr<Operator>, IOIndex_t>> mInputOps; std::vector<std::pair<std::shared_ptr<OperatorTensor>, IOIndex_t>> mInputOps;
std::vector<std::pair<std::shared_ptr<Operator>, IOIndex_t>> mOutputOps; std::vector<std::pair<std::shared_ptr<OperatorTensor>, IOIndex_t>> mOutputOps;
public: public:
MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph, MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph,
...@@ -43,11 +41,9 @@ public: ...@@ -43,11 +41,9 @@ public:
* @param op Operator to copy. * @param op Operator to copy.
*/ */
MetaOperator_Op(const MetaOperator_Op& op) MetaOperator_Op(const MetaOperator_Op& op)
: Operator(op.type().c_str()), : OperatorTensor(op),
mGraph(op.mGraph->clone()) mGraph(op.mGraph->clone())
{ {}
// cpy-ctor
}
/** /**
* @brief Clone the operator using its copy-constructor. * @brief Clone the operator using its copy-constructor.
...@@ -65,7 +61,7 @@ public: ...@@ -65,7 +61,7 @@ public:
return mScheduler; return mScheduler;
} }
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final {
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type"); assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
const auto& inputOp = mInputOps[inputIdx]; const auto& inputOp = mInputOps[inputIdx];
...@@ -86,38 +82,6 @@ public: ...@@ -86,38 +82,6 @@ public:
} }
} }
bool outputDimsForwarded() const override final { return !(mOutputs[0]->empty()); }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx < mInputs.size() && "inputIdx out of range");
return *(mInputs[inputIdx].get());
}
inline Tensor& output(const IOIndex_t outputIdx) const override final {
assert(outputIdx < mOutputs.size() && "outputIdx out of range");
return *(mOutputs[outputIdx].get());
}
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < mInputs.size() && "inputIdx out of range");
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx < mOutputs.size() && "outputIdx out of range");
return mOutputs[outputIdx];
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < mInputs.size() && "inputIdx out of range");
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx < mOutputs.size() && "outputIdx out of range");
return std::static_pointer_cast<Data>(mOutputs[outputIdx]);
}
void setBackend(const std::string &name) override { void setBackend(const std::string &name) override {
if (Registrar<MetaOperator_Op>::exists({name, type()})) { if (Registrar<MetaOperator_Op>::exists({name, type()})) {
...@@ -131,17 +95,13 @@ public: ...@@ -131,17 +95,13 @@ public:
mGraph->setBackend(name); mGraph->setBackend(name);
} }
void setDatatype(const DataType &datatype) override { void setDataType(const DataType &datatype) const override {
// The micro-graph should always be set to the right data type, since it // The micro-graph should always be set to the right data type, since it
// shares input/output tensors. // shares input/output tensors.
// Input/output tensors data type are updated here. // Input/output tensors data type are updated here.
mGraph->setDatatype(datatype); mGraph->setDataType(datatype);
} }
inline IOIndex_t nbInputs() const noexcept override final { return mGraph->inputs().size(); }
inline IOIndex_t nbDataInputs() const noexcept override final { return mGraph->dataInputs().size(); }
inline IOIndex_t nbOutputs() const noexcept override final { return mGraph->outputs().size(); }
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override; NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override; NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override;
NbElts_t getNbProducedData(IOIndex_t outputIdx) const override; NbElts_t getNbProducedData(IOIndex_t outputIdx) const override;
......
...@@ -12,47 +12,38 @@ ...@@ -12,47 +12,38 @@
#ifndef AIDGE_CORE_OPERATOR_MUL_H_ #ifndef AIDGE_CORE_OPERATOR_MUL_H_
#define AIDGE_CORE_OPERATOR_MUL_H_ #define AIDGE_CORE_OPERATOR_MUL_H_
#include <cassert>
#include <memory> #include <memory>
#include <string>
#include <vector> #include <vector>
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
namespace Aidge { namespace Aidge {
class Mul_Op : public Operator, /**
* @brief Tensor element-wise multiplication.
*/
class Mul_Op : public OperatorTensor,
public Registrable<Mul_Op, std::string, std::unique_ptr<OperatorImpl>(const Mul_Op&)> { public Registrable<Mul_Op, std::string, std::unique_ptr<OperatorImpl>(const Mul_Op&)> {
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public: public:
static constexpr const char* Type = "Mul"; static constexpr const char* Type = "Mul";
Mul_Op() Mul_Op() : OperatorTensor(Type, 2, 0, 1) {}
: Operator(Type)
{
setDatatype(DataType::Float32);
}
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
* but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
Mul_Op(const Mul_Op& op) Mul_Op(const Mul_Op& op)
: Operator(Type), : OperatorTensor(op)
mOutput(std::make_shared<Tensor>(*op.mOutput))
{ {
// cpy-ctor mImpl = op.mImpl ? Registrar<Mul_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Mul_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
} }
/** /**
...@@ -63,73 +54,16 @@ public: ...@@ -63,73 +54,16 @@ public:
return std::make_shared<Mul_Op>(*this); return std::make_shared<Mul_Op>(*this);
} }
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 2 && "operator supports only 2 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInputs[0]->empty())
mOutput->resize(mInputs[0]->dims());
}
bool outputDimsForwarded() const override final {
return !(mOutput->empty());
}
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
return *(mInputs[inputIdx].get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert((inputIdx < 2) && "Mul Operator has 2 inputs");
(void) inputIdx; // avoid unused warning
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Mul Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 2 && "operator supports only 2 inputs");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string& name) override { void setBackend(const std::string& name) override {
mImpl = Registrar<Mul_Op>::create(name)(*this); mImpl = Registrar<Mul_Op>::create(name)(*this);
mOutput->setBackend(name); mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
mInputs[0]->setBackend(name);
mInputs[1]->setBackend(name);
}
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround // FIXME: temporary workaround
mInputs[0]->setDatatype(datatype); getInput(0)->setBackend(name);
mInputs[1]->setDatatype(datatype); getInput(1)->setBackend(name);
} }
inline IOIndex_t nbInputs() const noexcept override final { return 2; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
return {"data_input"}; return {"data_input"};
} }
...@@ -141,6 +75,6 @@ public: ...@@ -141,6 +75,6 @@ public:
inline std::shared_ptr<Node> Mul(const std::string& name = "") { inline std::shared_ptr<Node> Mul(const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Mul_Op>(), name); return std::make_shared<Node>(std::make_shared<Mul_Op>(), name);
} }
} } // namespace Aidge
#endif /* AIDGE_CORE_OPERATOR_MUL_H_ */ #endif /* AIDGE_CORE_OPERATOR_MUL_H_ */
\ No newline at end of file
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp" #include "aidge/operator/Producer.hpp"
#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
...@@ -30,17 +30,12 @@ enum class PadAttr { BeginEndBorders, BorderType, BorderValue }; ...@@ -30,17 +30,12 @@ enum class PadAttr { BeginEndBorders, BorderType, BorderValue };
enum class PadBorderType { Constant, Edge, Reflect, Wrap }; enum class PadBorderType { Constant, Edge, Reflect, Wrap };
template <DimIdx_t DIM> template <DimIdx_t DIM>
class Pad_Op : public Operator, class Pad_Op : public OperatorTensor,
public Registrable<Pad_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Pad_Op<DIM> &)>, public Registrable<Pad_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
public StaticAttributes<PadAttr, public StaticAttributes<PadAttr,
std::array<DimSize_t, 2*DIM>, std::array<DimSize_t, 2*DIM>,
PadBorderType, PadBorderType,
double> { double> {
private:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public: public:
static constexpr const char *Type = "Pad"; static constexpr const char *Type = "Pad";
...@@ -56,25 +51,19 @@ public: ...@@ -56,25 +51,19 @@ public:
constexpr Pad_Op(const std::array<DimSize_t, 2*DIM> &beginEndTuples, constexpr Pad_Op(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
const PadBorderType &borderType = PadBorderType::Constant, const PadBorderType &borderType = PadBorderType::Constant,
double borderValue = 0.0) double borderValue = 0.0)
: Operator(Type), : OperatorTensor(Type, 1, 0, 1),
Attributes_(attr<PadAttr::BeginEndBorders>(beginEndTuples), Attributes_(attr<PadAttr::BeginEndBorders>(beginEndTuples),
attr<PadAttr::BorderType>(borderType), attr<PadAttr::BorderType>(borderType),
attr<PadAttr::BorderValue>(borderValue)) { attr<PadAttr::BorderValue>(borderValue)) {}
setDatatype(DataType::Float32);
}
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
Pad_Op(const Pad_Op& op) Pad_Op(const Pad_Op& op)
: Operator(Type), : OperatorTensor(op),
Attributes_(op), Attributes_(op)
mOutput(std::make_shared<Tensor>(*op.mOutput)) {}
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
}
/** /**
* @brief Clone the operator using its copy-constructor. * @brief Clone the operator using its copy-constructor.
...@@ -84,82 +73,38 @@ public: ...@@ -84,82 +73,38 @@ public:
return std::make_shared<Pad_Op<DIM>>(*this); return std::make_shared<Pad_Op<DIM>>(*this);
} }
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 1 && "operators supports only 3 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final { void computeOutputDims() override final {
if (!mInput->empty()) { bool associated = true;
std::array<DimSize_t, DIM + 2> outputDims = {}; for (IOIndex_t i = 0; i < nbInputs(); ++i) {
if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
}
associated &= !(getInput(i)->empty());
}
if (associated) {
std::array<DimSize_t, DIM + 2> outputDims{};
const std::array<DimSize_t, DIM + 2> inputDims = getInput(0)->dims<DIM+2>();
for (std::size_t dim = 0; dim < DIM; ++dim) { for (std::size_t dim = 0; dim < DIM; ++dim) {
outputDims[dim+2] = this->template getAttr<PadAttr::BeginEndBorders>()[2*dim] outputDims[dim+2] = this->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
+ mInput->dims()[dim+2] + inputDims[dim+2]
+ this->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1]; + this->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
} }
outputDims[1] = mInput->dims()[1]; outputDims[1] = inputDims[1];
outputDims[0] = mInput->dims()[0]; outputDims[0] = inputDims[0];
mOutput->resize(outputDims); mOutputs[0]->resize(outputDims);
} }
} }
bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return *(mInput.get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "Pad Operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return mInput;
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "Pad Operators has only 1 outputs");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInput);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string &name) override { void setBackend(const std::string &name) override {
mImpl = Registrar<Pad_Op<DIM>>::create(name)(*this); mImpl = Registrar<Pad_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name); mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType &datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround // FIXME: temporary workaround
mInput->setDatatype(datatype); getInput(0)->setBackend(name);
} }
inline IOIndex_t nbInputs() const noexcept override final { return 1; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
return {"data_input"}; return {"data_input"};
} }
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include <vector> #include <vector>
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/data/Data.hpp" #include "aidge/data/Data.hpp"
...@@ -26,33 +26,21 @@ ...@@ -26,33 +26,21 @@
namespace Aidge { namespace Aidge {
class Pow_Op : public Operator, class Pow_Op : public OperatorTensor,
public Registrable<Pow_Op, std::string, std::unique_ptr<OperatorImpl>(const Pow_Op&)> { public Registrable<Pow_Op, std::string, std::unique_ptr<OperatorImpl>(const Pow_Op&)> {
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public: public:
static constexpr const char* Type = "Pow"; static constexpr const char* Type = "Pow";
Pow_Op() Pow_Op() : OperatorTensor(Type, 2, 0, 1) {}
: Operator(Type)
{
setDatatype(DataType::Float32);
}
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
Pow_Op(const Pow_Op& op) Pow_Op(const Pow_Op& op)
: Operator(Type), : OperatorTensor(op)
mOutput(std::make_shared<Tensor>(*op.mOutput))
{ {
// cpy-ctor mImpl = op.mImpl ? Registrar<Pow_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Pow_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
} }
/** /**
...@@ -63,73 +51,16 @@ public: ...@@ -63,73 +51,16 @@ public:
return std::make_shared<Pow_Op>(*this); return std::make_shared<Pow_Op>(*this);
} }
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 2 && "operator supports only 2 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInputs[0]->empty())
mOutput->resize(mInputs[0]->dims());
}
bool outputDimsForwarded() const override final {
return !(mOutput->empty());
}
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
return *(mInputs[inputIdx].get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert((inputIdx < 2) && "Pow Operator has 2 inputs");
(void) inputIdx; // avoid unused warning
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Pow Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 2 && "operator supports only 2 inputs");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string& name) override { void setBackend(const std::string& name) override {
mImpl = Registrar<Pow_Op>::create(name)(*this); mImpl = Registrar<Pow_Op>::create(name)(*this);
mOutput->setBackend(name); mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
mInputs[0]->setBackend(name);
mInputs[1]->setBackend(name);
}
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround // FIXME: temporary workaround
mInputs[0]->setDatatype(datatype); getInput(0)->setBackend(name);
mInputs[1]->setDatatype(datatype); getInput(1)->setBackend(name);
} }
inline IOIndex_t nbInputs() const noexcept override final { return 2; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
return {"data_input"}; return {"data_input"};
} }
...@@ -141,6 +72,6 @@ public: ...@@ -141,6 +72,6 @@ public:
inline std::shared_ptr<Node> Pow(const std::string& name = "") { inline std::shared_ptr<Node> Pow(const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Pow_Op>(), name); return std::make_shared<Node>(std::make_shared<Pow_Op>(), name);
} }
} } // namespace Aidge
#endif /* AIDGE_CORE_OPERATOR_POW_H_ */ #endif /* AIDGE_CORE_OPERATOR_POW_H_ */
\ No newline at end of file
...@@ -18,49 +18,40 @@ ...@@ -18,49 +18,40 @@
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
namespace Aidge { namespace Aidge {
class Producer_Op class Producer_Op
: public Operator, : public OperatorTensor,
public Registrable<Producer_Op, std::string, std::unique_ptr<OperatorImpl>( public Registrable<Producer_Op, std::string, std::unique_ptr<OperatorImpl>(
const Producer_Op &)> { const Producer_Op &)> {
private:
std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public: public:
static constexpr const char* Type = "Producer"; static constexpr const char* Type = "Producer";
template <std::size_t DIM> template <std::size_t DIM>
Producer_Op(const std::array<DimSize_t, DIM>& dims) Producer_Op(const std::array<DimSize_t, DIM>& dims)
: Operator(Type) : OperatorTensor(Type, 0, 0, 1)
{ {
//ctor mOutputs[0]->resize(dims);
setDatatype(DataType::Float32);
mOutput->resize(dims);
} }
Producer_Op(const std::shared_ptr<Tensor> tensor) Producer_Op(const std::shared_ptr<Tensor> tensor)
: Operator(Type), : OperatorTensor(Type, 0, 0, 1)
mOutput(tensor)
{ {
setDatatype(tensor->dataType()); mOutputs[0] = tensor; // copy the pointer of the Tensor
} }
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op OperatorTensor to copy.
*/ */
Producer_Op(const Producer_Op& op) Producer_Op(const Producer_Op& op)
: Operator(Type), : OperatorTensor(op)
mOutput(std::make_shared<Tensor>(*op.mOutput))
{ {
// cpy-ctor mImpl = op.mImpl ? Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Producer_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
} }
/** /**
...@@ -71,8 +62,8 @@ public: ...@@ -71,8 +62,8 @@ public:
return std::make_shared<Producer_Op>(*this); return std::make_shared<Producer_Op>(*this);
} }
void associateInput(const IOIndex_t /*inputIdx*/, std::shared_ptr<Data> /*data*/) override final { void associateInput(const IOIndex_t /*inputIdx*/, const std::shared_ptr<Data>& /*data*/) override final {
assert(false && "Producer operator takes no input"); AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer operator takes no input.");
} }
/** /**
...@@ -81,8 +72,8 @@ public: ...@@ -81,8 +72,8 @@ public:
* *
* @param newOutput Tensor containing the values to copy * @param newOutput Tensor containing the values to copy
*/ */
void setOutputTensor(const Tensor& newOutput) { void setOutput(const std::shared_ptr<Tensor>& newOutput) {
*mOutput = newOutput; mOutputs[0] = newOutput;
} }
void computeOutputDims() override final {} void computeOutputDims() override final {}
...@@ -90,48 +81,13 @@ public: ...@@ -90,48 +81,13 @@ public:
bool outputDimsForwarded() const override final {return true;} bool outputDimsForwarded() const override final {return true;}
[[noreturn]] inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); }
assert(false);
exit(-1);
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t /*inputIdx*/) const override final {
assert(false && "Producer Operator has no input");
return nullptr;
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Producer Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t /*inputIdx*/) const override final {
assert(false && "Producer operator takes no input");
return nullptr;
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
inline const std::vector<DimSize_t> dims() const noexcept { return mOutput->dims(); }
void setBackend(const std::string& name) override { void setBackend(const std::string& name) override {
mImpl = Registrar<Producer_Op>::create(name)(*this); mImpl = Registrar<Producer_Op>::create(name)(*this);
mOutput->setBackend(name); mOutputs[0]->setBackend(name);
}
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
} }
inline IOIndex_t nbInputs() const noexcept override final { return 0; };
inline IOIndex_t nbDataInputs() const noexcept override final { return 0; };
inline IOIndex_t nbOutputs() const noexcept override final { return 1; };
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
return {}; return {};
} }
...@@ -181,4 +137,4 @@ void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, Dim ...@@ -181,4 +137,4 @@ void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, Dim
} }
} // namespace Aidge } // namespace Aidge
#endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */ #endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
\ No newline at end of file
...@@ -17,42 +17,29 @@ ...@@ -17,42 +17,29 @@
#include <vector> #include <vector>
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
namespace Aidge { namespace Aidge {
class ReLU_Op : public Operator, class ReLU_Op : public OperatorTensor,
public Registrable<ReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const ReLU_Op&)> { public Registrable<ReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const ReLU_Op&)> {
public:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public: public:
static constexpr const char* Type = "ReLU"; static constexpr const char* Type = "ReLU";
ReLU_Op() ReLU_Op() : OperatorTensor(Type, 1, 0, 1) {}
: Operator(Type)
{
setDatatype(DataType::Float32);
}
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
ReLU_Op(const ReLU_Op& op) ReLU_Op(const ReLU_Op& op)
: Operator(Type), : OperatorTensor(op)
mOutput(std::make_shared<Tensor>(*op.mOutput))
{ {
// cpy-ctor mImpl = op.mImpl ? Registrar<ReLU_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<ReLU_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
} }
/** /**
...@@ -63,68 +50,15 @@ public: ...@@ -63,68 +50,15 @@ public:
return std::make_shared<ReLU_Op>(*this); return std::make_shared<ReLU_Op>(*this);
} }
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx == 0 && "operator supports only 1 input");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInput->empty())
mOutput->resize(mInput->dims());
}
bool outputDimsForwarded() const override final {
return !(mOutput->empty());
}
inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert((inputIdx == 0) && "ReLU Operator has only 1 input");
(void) inputIdx; // avoid unused warning
return mInput;
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "ReLU Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operator supports only 1 input");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInput);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string& name) override { void setBackend(const std::string& name) override {
mImpl = Registrar<ReLU_Op>::create(name)(*this); mImpl = Registrar<ReLU_Op>::create(name)(*this);
mOutput->setBackend(name); mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround // FIXME: temporary workaround
mInput->setDatatype(datatype); getInput(0)->setBackend(name);
} }
inline IOIndex_t nbInputs() const noexcept override final { return 1; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
return {"data_input"}; return {"data_input"};
} }
...@@ -138,4 +72,4 @@ inline std::shared_ptr<Node> ReLU(const std::string& name = "") { ...@@ -138,4 +72,4 @@ inline std::shared_ptr<Node> ReLU(const std::string& name = "") {
} }
} }
#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */ #endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
\ No newline at end of file
...@@ -15,14 +15,11 @@ ...@@ -15,14 +15,11 @@
#include <vector> #include <vector>
#include <memory> #include <memory>
#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
...@@ -31,14 +28,9 @@ enum class ScalingAttr { ...@@ -31,14 +28,9 @@ enum class ScalingAttr {
scalingFactor, quantizedNbBits, isOutputUnsigned scalingFactor, quantizedNbBits, isOutputUnsigned
}; };
class Scaling_Op : public Operator, class Scaling_Op : public OperatorTensor,
public Registrable<Scaling_Op, std::string, std::unique_ptr<OperatorImpl>(const Scaling_Op&)>, public Registrable<Scaling_Op, std::string, std::unique_ptr<OperatorImpl>(const Scaling_Op&)>,
public StaticAttributes<ScalingAttr, float, size_t, bool> { public StaticAttributes<ScalingAttr, float, size_t, bool> {
public:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public: public:
static constexpr const char* Type = "Scaling"; static constexpr const char* Type = "Scaling";
...@@ -48,27 +40,22 @@ public: ...@@ -48,27 +40,22 @@ public:
template <ScalingAttr e> using attr = typename Attributes_::template attr<e>; template <ScalingAttr e> using attr = typename Attributes_::template attr<e>;
Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned) Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned)
: Operator(Type), : OperatorTensor(Type, 1, 0, 1),
Attributes_( Attributes_(
attr<ScalingAttr::scalingFactor>(scalingFactor), attr<ScalingAttr::scalingFactor>(scalingFactor),
attr<ScalingAttr::quantizedNbBits>(nbBits), attr<ScalingAttr::quantizedNbBits>(nbBits),
attr<ScalingAttr::isOutputUnsigned>(isOutputUnsigned)) { attr<ScalingAttr::isOutputUnsigned>(isOutputUnsigned))
{}
setDatatype(DataType::Float32);
}
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
Scaling_Op(const Scaling_Op& op) Scaling_Op(const Scaling_Op& op)
: Operator(Type), : OperatorTensor(op),
Attributes_(op), Attributes_(op)
mOutput(std::make_shared<Tensor>(*op.mOutput))
{ {
// cpy-ctor mImpl = op.mImpl ? Registrar<Scaling_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Scaling_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
} }
/** /**
...@@ -79,79 +66,17 @@ public: ...@@ -79,79 +66,17 @@ public:
return std::make_shared<Scaling_Op>(*this); return std::make_shared<Scaling_Op>(*this);
} }
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx == 0 && "operator supports only 1 input");
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
(void) inputIdx; //avoid unused warning
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInput->empty())
mOutput->resize(mInput->dims());
}
bool outputDimsForwarded() const override final {
return !(mOutput->empty());
}
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert((inputIdx == 0) && "Scaling Operator has only 1 input");
(void) inputIdx; // avoid unused warning
return *(mInput.get());
}
inline Tensor& output(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Scaling Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return *(mOutput.get());
}
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert((inputIdx == 0) && "Scaling Operator has only 1 input");
(void) inputIdx; // avoid unused warning
return mInput;
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Scaling Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operator supports only 1 input");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInput);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning;
return mOutput;
}
void setBackend(const std::string& name) override { void setBackend(const std::string& name) override {
mImpl = Registrar<Scaling_Op>::create(name)(*this); mImpl = Registrar<Scaling_Op>::create(name)(*this);
mOutput->setBackend(name); mOutputs[0]->setBackend(name);
// FIXME: temporary workaround // FIXME: temporary workaround
mInput->setBackend(name); mInputs[0]->setBackend(name);
} }
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround static const std::vector<std::string> getInputsName() {
mInput->setDatatype(datatype);
}
inline IOIndex_t nbInputs() const noexcept override final { return 1; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input"}; return {"data_input"};
} }
static const std::vector<std::string> getOutputsName(){ static const std::vector<std::string> getOutputsName() {
return {"data_output"}; return {"data_output"};
} }
}; };
...@@ -164,8 +89,7 @@ inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::stri ...@@ -164,8 +89,7 @@ inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::stri
inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, std::size_t quantizedNbBits=8, bool isOutputUnsigned=true, const std::string& name = "") { inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, std::size_t quantizedNbBits=8, bool isOutputUnsigned=true, const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor,quantizedNbBits, isOutputUnsigned), name); return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor,quantizedNbBits, isOutputUnsigned), name);
} }
} // namespace Aidge
}
namespace { namespace {
template <> template <>
...@@ -173,4 +97,4 @@ const char* const EnumStrings<Aidge::ScalingAttr>::data[] ...@@ -173,4 +97,4 @@ const char* const EnumStrings<Aidge::ScalingAttr>::data[]
= {"scalingFactor", "quantizedNbBits", "isOutputUnsigned"}; = {"scalingFactor", "quantizedNbBits", "isOutputUnsigned"};
} }
#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */ #endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
\ No newline at end of file
...@@ -16,10 +16,9 @@ ...@@ -16,10 +16,9 @@
#include <vector> #include <vector>
#include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
...@@ -29,14 +28,9 @@ enum class SliceAttr { Beginning, SliceDims }; ...@@ -29,14 +28,9 @@ enum class SliceAttr { Beginning, SliceDims };
template <DimIdx_t DIM> template <DimIdx_t DIM>
class Slice_Op class Slice_Op
: public Operator, : public OperatorTensor,
public Registrable<Slice_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Slice_Op<DIM> &)>, public Registrable<Slice_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Slice_Op<DIM> &)>,
public StaticAttributes<SliceAttr, std::size_t, std::array<DimSize_t, DIM>> { public StaticAttributes<SliceAttr, std::size_t, std::array<DimSize_t, DIM>> {
public:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public: public:
static constexpr const char *Type = "Slice"; static constexpr const char *Type = "Slice";
...@@ -47,12 +41,10 @@ public: ...@@ -47,12 +41,10 @@ public:
using attr = typename Attributes_::template attr<e>; using attr = typename Attributes_::template attr<e>;
Slice_Op(std::size_t beginningPos, std::array<DimSize_t, DIM> sliceDims) Slice_Op(std::size_t beginningPos, std::array<DimSize_t, DIM> sliceDims)
: Operator(Type), : OperatorTensor(Type, 1, 0, 1),
Attributes_(attr<SliceAttr::Beginning>(beginningPos), Attributes_(attr<SliceAttr::Beginning>(beginningPos),
attr<SliceAttr::SliceDims>(sliceDims)) attr<SliceAttr::SliceDims>(sliceDims))
{ {}
setDatatype(DataType::Float32);
}
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
...@@ -60,13 +52,10 @@ public: ...@@ -60,13 +52,10 @@ public:
* @param op Operator to copy. * @param op Operator to copy.
*/ */
Slice_Op(const Slice_Op &op) Slice_Op(const Slice_Op &op)
: Operator(Type), : OperatorTensor(op),
Attributes_(op), Attributes_(op)
mOutput(std::make_shared<Tensor>(*op.mOutput))
{ {
// cpy-ctor mImpl = op.mImpl ? Registrar<Slice_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this)
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Slice_Op<DIM>>::create(mOutput->getImpl()->backend())(*this)
: nullptr; : nullptr;
} }
...@@ -77,91 +66,49 @@ public: ...@@ -77,91 +66,49 @@ public:
*/ */
std::shared_ptr<Operator> clone() const override { return std::make_shared<Slice_Op>(*this); } std::shared_ptr<Operator> clone() const override { return std::make_shared<Slice_Op>(*this); }
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx == 0 && "operator supports only 1 input");
(void)inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final { void computeOutputDims() override final {
if (!mInput->empty()) { if (!getInput(0) || (getInput(0)->empty())) {
// Check input dimensions is compatible with slice dimensions AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
if (mInput->nbDims() != DIM) { }
printf("Error: input and slice dimensions are not the same size.\n"); // Check input dimensions is compatible with slice dimensions
exit(-1); if (getInput(0)->nbDims() != DIM) {
} AIDGE_THROW_OR_ABORT(std::runtime_error, "Error: input and slice dimensions are not the same size.");
std::array<DimSize_t, DIM> outputDims; }
std::array<DimSize_t, DIM> outputDims;
// Check that the sliced Tensor is actually part of the input Tensor const std::array<DimSize_t, DIM> inputDims = getInput(0)->dims<DIM>();
// For a 5*5 tensor ('x') and a 3*3 slice kernel ('o'):
// xxxxx xxxxx // Check that the sliced Tensor is actually part of the input Tensor
// xxxxx xxxxx // For a 5*5 tensor ('x') and a 3*3 slice kernel ('o'):
// xxooo --> ok xxxoo --> out of bound // xxxxx xxxxx
// xxooo xxxoo // xxxxx xxxxx
// xxooo xxxoo // xxooo --> ok xxxoo --> out of bound
std::vector<std::size_t> beginningCoords = mInput->getCoord(this->template getAttr<SliceAttr::Beginning>()); // xxooo xxxoo
for (std::size_t i = 0; i < DIM; ++i) { // xxooo xxxoo
if (beginningCoords[i] + this->template getAttr<SliceAttr::SliceDims>()[i] > mInput->dims()[i]) { std::vector<std::size_t> beginningCoords = mInputs[0]->getCoord(this->template getAttr<SliceAttr::Beginning>());
printf("ROI of Slice operator out of bounds"); for (std::size_t i = 0; i < DIM; ++i) {
exit(-1); if (beginningCoords[i] + this->template getAttr<SliceAttr::SliceDims>()[i] > inputDims[i]) {
} else { AIDGE_THROW_OR_ABORT(std::runtime_error, "ROI of Slice operator out of bounds");
outputDims[i] = this->template getAttr<SliceAttr::SliceDims>()[i]; } else {
} outputDims[i] = this->template getAttr<SliceAttr::SliceDims>()[i];
} }
mOutput->resize(outputDims);
} }
} mOutputs[0]->resize(outputDims);
bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
inline Tensor &input(const IOIndex_t /*inputIdx*/) const override final {
return *(mInput.get());
}
inline Tensor &output(const IOIndex_t /*outputIdx*/) const override final {
return *(mOutput.get());
}
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert((inputIdx == 0) && "Slice Operator has only 1 input");
(void)inputIdx; // avoid unused warning
return mInput;
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Slice Operator has only 1 output");
(void)outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operator supports only 1 input");
(void)inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInput);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void)outputIdx; // avoid unused warning
return mOutput;
} }
void setBackend(const std::string &name) { void setBackend(const std::string &name) {
mImpl = Registrar<Slice_Op>::create(name)(*this); mImpl = Registrar<Slice_Op>::create(name)(*this);
mOutput->setBackend(name); mOutputs[0]->setBackend(name);
// FIXME: temporary workaround // FIXME: temporary workaround
mInput->setBackend(name); getInput(0)->setBackend(name);
} }
void setDatatype(const DataType &datatype) {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround static const std::vector<std::string> getInputsName(){
mInput->setDatatype(datatype); return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
} }
inline IOIndex_t nbInputs() const noexcept override final { return 1; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
}; };
template <std::size_t DIM> template <std::size_t DIM>
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment