Skip to content
Snippets Groups Projects
Commit db15f04f authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge branch 'OperatorTensor' into 'main'

Remove Operator reference to Tensor

See merge request !46
parents 3cd94d47 a986a64d
No related branches found
No related tags found
2 merge requests!46Remove Operator reference to Tensor,!20Draft: Introduction of Tiling
Pipeline #34825 passed
Showing
with 522 additions and 654 deletions
......@@ -37,15 +37,15 @@ class ExportNode(ABC):
for idx, parent_node in enumerate(self.node.get_parents()):
self.inputs.append(parent_node)
if parent_node is not None:
self.inputs_dims.append(self.operator.input(idx).dims())
self.inputs_dims.append(self.operator.get_input(idx).dims())
else:
self.inputs_dims.append(None)
for idx, child_node in enumerate(self.node.get_children()):
self.outputs.append(child_node)
# Dirty hot fix, change it quickly
self.outputs_dims.append(self.operator.output(0).dims())
self.outputs_dims.append(self.operator.get_output(0).dims())
@abstractmethod
def export(self, export_folder:str, list_configs:list):
......
......@@ -16,14 +16,14 @@ class test_operator_binding(unittest.TestCase):
Can be remove in later stage of the developpement.
"""
def setUp(self):
self.generic_operator = aidge_core.GenericOperator("FakeConv", 1, 1, 1).get_operator()
self.generic_operator = aidge_core.GenericOperator("FakeConv", 1, 0, 1).get_operator()
def tearDown(self):
pass
def test_default_name(self):
op_type = "Conv"
gop = aidge_core.GenericOperator(op_type, 1, 1, 1, "FictiveName")
gop = aidge_core.GenericOperator(op_type, 1, 0, 1, "FictiveName")
# check node name is not operator type
self.assertNotEqual(gop.name(), "Conv")
# check node name is not default
......@@ -95,12 +95,12 @@ class test_operator_binding(unittest.TestCase):
def test_compute_output_dims(self):
in_dims=[25, 25]
input = aidge_core.Producer(in_dims, name="In")
genOp = aidge_core.GenericOperator("genOp", 1, 1, 1, name="genOp")
genOp = aidge_core.GenericOperator("genOp", 1, 0, 1, name="genOp")
_ = aidge_core.sequential([input, genOp])
self.assertListEqual(genOp.get_operator().output(0).dims(), [])
self.assertListEqual(genOp.get_operator().get_output(0).dims(), [])
genOp.get_operator().set_compute_output_dims(lambda x:x)
genOp.get_operator().compute_output_dims()
self.assertListEqual(genOp.get_operator().output(0).dims(), in_dims)
self.assertListEqual(genOp.get_operator().get_output(0).dims(), in_dims)
def test_set_impl(self):
......@@ -116,7 +116,7 @@ class test_operator_binding(unittest.TestCase):
"""
self.idx += 1
generic_node = aidge_core.GenericOperator("Relu", 1, 1, 1, name="myReLu")
generic_node = aidge_core.GenericOperator("Relu", 1, 0, 1, name="myReLu")
generic_op = generic_node.get_operator()
customImpl = PythonCustomImpl(generic_op)
......
......@@ -32,15 +32,17 @@ class test_attributes(unittest.TestCase):
self.assertEqual(conv_op.get_attr("KernelDims"), k_dims)
def test_fc(self):
in_channels = 4
out_channels = 8
nb_bias = True
fc_op = aidge_core.FC(out_channels, nb_bias).get_operator()
fc_op = aidge_core.FC(in_channels, out_channels, nb_bias).get_operator()
self.assertEqual(fc_op.get_attr("OutChannels"), out_channels)
self.assertEqual(fc_op.get_attr("NoBias"), nb_bias)
def test_matmul(self):
in_channels = 4
out_channels = 8
matmul_op = aidge_core.MatMul(out_channels).get_operator()
matmul_op = aidge_core.MatMul(in_channels, out_channels).get_operator()
self.assertEqual(matmul_op.get_attr("OutChannels"), out_channels)
def test_producer_1D(self):
......
......@@ -22,8 +22,8 @@ class test_recipies(unittest.TestCase):
def test_remove_flatten(self):
graph_view = aidge_core.sequential([
aidge_core.GenericOperator("Flatten", 1, 1, 1, name="Flatten0"),
aidge_core.FC(50, name='0')
aidge_core.GenericOperator("Flatten", 1, 0, 1, name="Flatten0"),
aidge_core.FC(10, 50, name='0')
])
old_nodes = graph_view.get_nodes()
aidge_core.remove_flatten(graph_view)
......@@ -33,10 +33,10 @@ class test_recipies(unittest.TestCase):
self.assertTrue(all([i in old_nodes for i in graph_view.get_nodes()]))
def test_fuse_matmul_add(self):
matmul0 = aidge_core.GenericOperator("MatMul", 1, 2, 1, name="MatMul0")
add0 = aidge_core.Add(name="Add0")
matmul1 = aidge_core.GenericOperator("MatMul", 1, 2, 1, name="MatMul1")
add1 = aidge_core.Add(name="Add1")
matmul0 = aidge_core.MatMul(1, 1, name="MatMul0")
add0 = aidge_core.Add(2, name="Add0")
matmul1 = aidge_core.MatMul(1, 1, name="MatMul1")
add1 = aidge_core.Add(2, name="Add1")
graph_view = aidge_core.sequential([matmul0, add0, matmul1, add1])
......
......@@ -14,8 +14,10 @@
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/backend/TensorImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Connector.hpp"
#include "aidge/graph/GraphView.hpp"
#include "aidge/graph/Node.hpp"
......@@ -25,9 +27,11 @@
#include "aidge/graphmatching/SeqStm.hpp"
#include "aidge/graphmatching/StmFactory.hpp"
#include "aidge/graphmatching/Utile.hpp"
#include "aidge/operator/Add.hpp"
#include "aidge/operator/AvgPooling.hpp"
#include "aidge/operator/BatchNorm.hpp"
#include "aidge/operator/Concat.hpp"
#include "aidge/operator/Conv.hpp"
#include "aidge/operator/ConvDepthWise.hpp"
#include "aidge/operator/Div.hpp"
......@@ -44,14 +48,18 @@
#include "aidge/operator/Pow.hpp"
#include "aidge/operator/ReLU.hpp"
#include "aidge/operator/Scaling.hpp"
#include "aidge/operator/Slice.hpp"
#include "aidge/operator/Softmax.hpp"
#include "aidge/operator/Sqrt.hpp"
#include "aidge/operator/Sub.hpp"
#include "aidge/scheduler/Scheduler.hpp"
#include "aidge/recipies/Recipies.hpp"
#include "aidge/utils/Attributes.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/DynamicAttributes.hpp"
#include "aidge/utils/Recipies.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
//#include "aidge/utilsParsing/AstNode.hpp"
......
......@@ -299,7 +299,7 @@ class Tensor : public Data,
*/
Tensor &operator=(const Tensor &t) {
resize(t.dims());
setDatatype(t.dataType());
setDataType(t.dataType());
if (t.hasImpl()) {
setBackend(t.mImpl->backend());
mImpl->copy(t.mImpl->rawPtr(), size());
......@@ -362,7 +362,7 @@ class Tensor : public Data,
* if the Tensor has already been initialized.
* @param dt DataType.
*/
void setDatatype(const DataType dt) {
void setDataType(const DataType dt) {
if (mImpl && (dataType() != dt)) {
// get ptr before changing Tensor backend or the type difference will trigger a warning
const void *data = mImpl->rawPtr();
......
......@@ -83,4 +83,4 @@ class Connector {
std::shared_ptr<GraphView> generateGraph(std::vector<Connector> ctors);
} // namespace Aidge
#endif /* AIDGE_CORE_GRAPH_CONNECTOR_H_ */
#endif /* AIDGE_CORE_GRAPH_CONNECTOR_H_ */
\ No newline at end of file
......@@ -162,6 +162,21 @@ public:
std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>> outputs(
std::string nodeName) const;
/**
* @brief Assert Datatype, Backend, data format and dimensions along the GraphView are coherent.
* If not, apply the required transformations.
* @details Sets the GraphView ready for computation in four steps:
* 1 - Assert input Tensors' datatype is compatible with each Operator's datatype.
* If not, a conversion Operator is inserted.
* 2 - Assert input Tensors' backend is compatible with each Operator's backend.
* If not, add a Transmitter Operator.
* 3 - Assert data format (NCHW, NHWC, ...) of each Operator's input Tensor is
* compatible with the selected kernel.
* If not, add a Transpose Operator.
* 4 - Propagate Tensor dimensions through the consecutive Operators.
*/
void compile(const std::string& backend, const Aidge::DataType datatype);
/**
* @brief Compute dimensions of input/output Tensors for each Operator of the
* GraphView object's Nodes.
......@@ -171,7 +186,7 @@ public:
/** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
void setBackend(const std::string &backend);
/** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
void setDatatype(const DataType &datatype);
void setDataType(const DataType &datatype);
///////////////////////////////////////////////////////
// TOPOLOGY
......
......@@ -163,13 +163,6 @@ public:
return std::pair<NodePtr, IOIndex_t>(mParents[inID], mIdOutParents[inID]);
}
/**
* @brief Set fix value for the specified input by creating a Producer wrapping the given Tensor.
*
* @param idx Input index.
* @param tensor Constant Tensor to add as parent for specified index.
*/
void setInput(const IOIndex_t idx, const std::shared_ptr<Tensor> tensor);
/**
* @brief Get the lowest index in the InputData Parent list equal to the
......@@ -178,9 +171,9 @@ public:
*/
inline IOIndex_t getFirstFreeDataInput() const {
IOIndex_t i = 0;
for (; (i < nbDataInputs()) && (input(i).second != gk_IODefaultIndex); ++i) {}
// assert((i<nbDataInputs()) && "No free data input for Node");
return (i < nbDataInputs()) ? i : gk_IODefaultIndex;
for (; (i < nbData()) && (input(i).second != gk_IODefaultIndex); ++i) {}
// assert((i<nbData()) && "No free data input for Node");
return (i < nbData()) ? i : gk_IODefaultIndex;
}
......@@ -214,8 +207,8 @@ public:
* @details [data, data, weight, bias] => 2
* @return IOIndex_t
*/
inline IOIndex_t nbDataInputs() const noexcept {
return getOperator()->nbDataInputs();
inline IOIndex_t nbData() const noexcept {
return getOperator()->nbData();
}
/**
......
......@@ -21,8 +21,6 @@ class GraphParser{
*/
GraphParser(const std::string gRegexExpressions);
virtual ~GraphParser() = default;
/**
* @brief AST graph creation function
* @return The AST tree
......@@ -31,7 +29,7 @@ class GraphParser{
/**
* @brief get the query that be use in the parsing
* @brief get the query that be use in the parsing
* @return query
*/
const std::string getQuery();
......
#ifndef AIDGE_CORE_MATCH_RESULT_H_
#define AIDGE_CORE_MATCH_RESULT_H_
#include <cstddef>
#include <map>
#include <memory>
#include <string>
#include <set>
#include <vector>
#include <map>
#include "aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp"
#include "aidge/graph/Node.hpp"
......@@ -12,23 +14,25 @@
namespace Aidge{
/**
* @brief contained the result of one match and the associate key , the query and the start node
* @brief contained the result of one match and the associate key , the query and the start node
*/
class MatchSolution{
private:
std::map<std::string,std::set<NodePtr>> mSolution;
std::map<std::string, std::set<NodePtr>> mSolution;
const std::string mQueryFrom;
const std::vector<NodePtr> mStartNode;
public:
MatchSolution(std::vector<std::shared_ptr<FsmRunTimeContext>>& precedence,const std::string query,const std::vector<NodePtr> startNode);
const std::set<NodePtr> & at(const std::string key);
const std::set<NodePtr> getAll();
inline const std::set<NodePtr>& at(const std::string key) {
return mSolution[key];
}
const std::set<NodePtr> getAll();
bool areCompatible(std::shared_ptr<MatchSolution> solution);
const std::string& getQuery(){ return mQueryFrom ;}
const std::vector<NodePtr>& getStartNode(){ return mStartNode ;}
inline const std::string& getQuery() const noexcept { return mQueryFrom; }
inline const std::vector<NodePtr>& getStartNode() const noexcept { return mStartNode; }
};
......@@ -59,15 +63,15 @@ public:
MatchResult(std::vector<std::shared_ptr<FsmRunTimeContext>> allValid, std::size_t nbSubStm,
const std::string& query,const std::vector<NodePtr>& startNodes);
virtual ~MatchResult() = default;
/**
* @brief get the set of the node match for une expression
* @return the set of node of the graph that corresponding to an expression
*/
std::shared_ptr<MatchSolution> getBiggerSolution(void);
std::vector<std::shared_ptr<MatchSolution>> getSolutions(void);
inline std::vector<std::shared_ptr<MatchSolution>> getSolutions(void) const noexcept {
return mSolve;
}
private:
......@@ -75,7 +79,6 @@ private:
* @brief recurrent function use to init mSolve in the constructor
*
**/
void _generateCombination( std::size_t idxSubStm, std::vector<std::shared_ptr<FsmRunTimeContext>>& precedence,const std::string& query,const std::vector<NodePtr>& startNodes);
};
......
......@@ -38,7 +38,6 @@ class ConditionalParser{
*/
ConditionalParser(const std::string ConditionalExpressions);
virtual ~ConditionalParser() = default;
/**
* @brief AST graph creation function
* @return The AST tree
......
......@@ -16,52 +16,38 @@
#include <vector>
#include <cmath>
#include <memory>
#include <array>
#include <vector>
#include "aidge/utils/Registrar.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/utils/Types.h"
#include "aidge/utils/ErrorHandling.hpp"
namespace Aidge {
template <std::size_t NUM>
class Add_Op : public Operator,
public Registrable<Add_Op<NUM>, std::string, std::unique_ptr<OperatorImpl>(const Add_Op<NUM>&)> {
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, NUM> mInputs;
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
class Add_Op : public OperatorTensor,
public Registrable<Add_Op, std::string, std::unique_ptr<OperatorImpl>(const Add_Op&)> {
public:
static constexpr const char* Type = "Add";
constexpr Add_Op()
: Operator(Type)
Add_Op(const IOIndex_t nbIn)
: OperatorTensor(Type, nbIn, 0, 1)
{
assert(NUM > 0 && "Add should have at least one input");
for (std::size_t i = 0; i<NUM; ++i) {
mInputs[i] = std::make_shared<Tensor>();
if (nbIn == 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
}
setDatatype(DataType::Float32);
}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Add_Op(const Add_Op<NUM>& op)
: Operator(Type),
mOutput(std::make_shared<Tensor>(*op.mOutput))
Add_Op(const Add_Op& op)
: OperatorTensor(op)
{
// cpy-ctor
assert(NUM > 0 && "Add should have at least one input");
for (std::size_t i = 0; i<NUM; ++i) {
mInputs[i] = std::make_shared<Tensor>();
}
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Add_Op<NUM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
mImpl = op.mImpl ? Registrar<Add_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
}
/**
......@@ -81,88 +67,26 @@ public:
// return *in;
// }
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInputs[0]->empty()) {
const auto expectedDims = mInputs[0]->dims();
std::size_t nonEmptyInputTensor = 1;
for (; nonEmptyInputTensor<NUM && (!mInputs[nonEmptyInputTensor]->empty()); ++nonEmptyInputTensor) {
assert(expectedDims == mInputs[nonEmptyInputTensor]->dims());
}
if (nonEmptyInputTensor == NUM) {
mOutput->resize(expectedDims);
}
}
}
bool outputDimsForwarded() const override final {
std::size_t forwarded = 0;
for (; forwarded < NUM && (!mInputs[forwarded]->empty()); ++forwarded) {}
return ((forwarded==NUM) && !(mOutput->empty()));
}
// void checkDims() const override final {
// assert(outputDimsForwarded());
// for (const auto& in : mInputs) {
// assert(in->dims() == mOutput->dims());
// assert(in->dims() == mOutputs[0]->dims());
// }
// }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
return *(mInputs[inputIdx].get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "Add Operators has only 1 outputs");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string& name) override {
mImpl = Registrar<Add_Op<NUM>>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
for (std::size_t i = 0; i < NUM; ++i) {
mInputs[i]->setBackend(name);
}
}
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
mImpl = Registrar<Add_Op>::create(name)(*this);
mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
for (std::size_t i = 0; i < NUM; ++i) {
mInputs[i]->setDatatype(datatype);
for (std::size_t i = 0; i < nbInputs(); ++i) {
getInput(i)->setBackend(name);
}
}
inline IOIndex_t nbInputs() const noexcept override final { return NUM; }
inline IOIndex_t nbDataInputs() const noexcept override final { return NUM; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
static const std::vector<std::string> getInputsName(){
return {"data_input_0", "data_input_n"};
}
static const std::vector<std::string> getOutputsName(){
......@@ -170,9 +94,8 @@ public:
}
};
template <std::size_t NUM>
inline std::shared_ptr<Node> Add(const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Add_Op<NUM>>(), name);
inline std::shared_ptr<Node> Add(const IOIndex_t nbIn, const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Add_Op>(nbIn), name);
}
}
......
......@@ -19,7 +19,7 @@
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp"
......@@ -29,15 +29,11 @@ namespace Aidge {
enum class AvgPoolingAttr { StrideDims, KernelDims };
template <DimIdx_t DIM>
class AvgPooling_Op : public Operator,
class AvgPooling_Op : public OperatorTensor,
public Registrable<AvgPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
public StaticAttributes<AvgPoolingAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>> {
private:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char *Type = "AvgPooling";
......@@ -52,24 +48,19 @@ public:
constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
: Operator(Type),
: OperatorTensor(Type, 1, 0, 1),
Attributes_(attr<AvgPoolingAttr::StrideDims>(stride_dims),
attr<AvgPoolingAttr::KernelDims>(kernel_dims)) {
setDatatype(DataType::Float32);
}
attr<AvgPoolingAttr::KernelDims>(kernel_dims)) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
AvgPooling_Op(const AvgPooling_Op<DIM>& op)
: Operator(Type),
Attributes_(op),
mOutput(std::make_shared<Tensor>(*op.mOutput))
: OperatorTensor(op),
Attributes_(op)
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
}
/**
......@@ -80,83 +71,73 @@ public:
return std::make_shared<AvgPooling_Op<DIM>>(*this);
}
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 1 && "operators supports only 3 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInput->empty()) {
std::array<DimSize_t, DIM + 2> outputDims = {};
// check inputs have been associated
if (!getInput(0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
}
if (!(getInput(0)->empty())) {
std::array<DimSize_t, DIM + 2> outputDims;
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
outputDims[0] = inputDims[0];
outputDims[1] = inputDims[1];
for (std::size_t dim = 0; dim < this->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
std::floor(static_cast<float>(mInput->dims()[dim+2] -
std::floor(static_cast<float>(inputDims[dim+2] -
this->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) /
static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
}
outputDims[1] = mInput->dims()[1];
outputDims[0] = mInput->dims()[0];
mOutput->resize(outputDims);
getOutput(0)->resize(outputDims);
}
}
bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return *(mInput.get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "AvgPooling Operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return mInput;
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "AvgPooling Operators has only 1 outputs");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInput);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
// if (outputIdx != 0) {
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
// }
// if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
// // Offset
// const auto outputIdxDims = mOutput->getCoord(firstIdx);
// std::vector<DimSize_t> inputIdxDims = outputIdxDims;
// for (DimIdx_t i = 0; i < (DIM+2); ++i) {
// if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
// }
// }
// // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
// // Width
// std::vector<DimSize_t> inputDims;
// inputDims.push_back(outputDims[0]); // same batch value
// inputDims.push_back(outputDims[1]); // same channel value
// for (DimIdx_t i = 0; i < DIM; ++i) {
// inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
// * this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]
// + 1
// + (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
// inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
// }
// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res = std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>();
// res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInput->getIdx(inputIdxDims), inputDims));
// return res;
// }
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
// }
void setBackend(const std::string &name) override {
mImpl = Registrar<AvgPooling_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType &datatype) override {
mOutput->setDatatype(datatype);
mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
mInput->setDatatype(datatype);
getInput(0)->setBackend(name);
}
inline IOIndex_t nbInputs() const noexcept override final { return 1; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
......@@ -190,4 +171,4 @@ const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims",
"KernelDims"};
}
#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
\ No newline at end of file
......@@ -19,27 +19,20 @@
#include "aidge/utils/Types.h"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp"
namespace Aidge {
enum class BatchNormAttr { Epsilon, Momentum };
enum class BatchNormAttr { Epsilon, Momentum };
template <DimIdx_t DIM>
class BatchNorm_Op : public Operator,
class BatchNorm_Op : public OperatorTensor,
public Registrable<BatchNorm_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
public StaticAttributes<BatchNormAttr, float, float> {
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, 5> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
std::make_shared<Tensor>(), std::make_shared<Tensor>(),
std::make_shared<Tensor>()};
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char *Type = "BatchNorm";
BatchNorm_Op() = delete;
......@@ -49,25 +42,19 @@ public:
using attr = typename Attributes_::template attr<e>;
constexpr BatchNorm_Op(float epsilon, float momentum)
: Operator(Type),
: OperatorTensor(Type, 1, 4, 1),
Attributes_(attr<BatchNormAttr::Epsilon>(epsilon),
attr<BatchNormAttr::Momentum>(momentum)),
mOutput(std::make_shared<Tensor>()) {
setDatatype(DataType::Float32);
}
attr<BatchNormAttr::Momentum>(momentum)) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
BatchNorm_Op(const BatchNorm_Op<DIM>& op)
: Operator(Type),
Attributes_(op),
mOutput(std::make_shared<Tensor>(*op.mOutput))
: OperatorTensor(op),
Attributes_(op)
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
}
/**
......@@ -87,83 +74,41 @@ public:
// return *in;
// }
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 5 && "operators supports only 5 inputs");
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInputs[0]->empty()) {
for (std::size_t i = nbDataInputs(); i < nbInputs(); ++i) {
if(mInputs[i]->size() != mInputs[0]->dims()[1]) {
mInputs[i]->resize(std::array<DimSize_t, 1>({mInputs[0]->dims()[1]}));
// check inputs have been associated
bool associated = true;
for (IOIndex_t i = 0; i < nbInputs(); ++i) {
associated &= !(getInput(i)->empty());
}
if (associated) {
const DimSize_t nbChannels = getInput(0)->dims()[1];
for (std::size_t i = nbData(); i < nbInputs(); ++i) {
if(getInput(i)->size() != nbChannels) {
// /!\ Input size should be handled BEFORE calling this function
// This should raise an error
getInput(i)->resize(std::array<DimSize_t, 1>({getInput(0)->dims()[1]}));
}
}
mOutput->resize(mInputs[0]->dims());
mOutputs[0]->resize(getInput(0)->dims());
}
}
bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 5 && "operators supports only 5 inputs");
return *(mInputs[inputIdx].get()); }
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 5 && "BatchNorm Operators supports only 5 inputs");
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "BatchNorm Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 5 && "operators supports only 5 inputs");
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string &name) override {
mImpl = Registrar<BatchNorm_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInputs[1]->setBackend(name);
mInputs[2]->setBackend(name);
mInputs[3]->setBackend(name);
mInputs[4]->setBackend(name);
}
void setDatatype(const DataType &datatype) override {
mOutput->setDatatype(datatype);
mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
mInputs[1]->setDatatype(datatype);
mInputs[2]->setDatatype(datatype);
mInputs[3]->setDatatype(datatype);
mInputs[4]->setDatatype(datatype);
getInput(1)->setBackend(name);
getInput(2)->setBackend(name);
getInput(3)->setBackend(name);
getInput(4)->setBackend(name);
}
inline IOIndex_t nbInputs() const noexcept override final { return 5; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
static const std::vector<std::string> getInputsName() {
return {"data_input", "scale", "shift", "mean", "variance"};
}
static const std::vector<std::string> getOutputsName(){
static const std::vector<std::string> getOutputsName() {
return {"data_output"};
}
};
......@@ -187,4 +132,4 @@ template <>
const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "Epsilon", "Momentum" };
}
#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_CONCAT_H_
#define AIDGE_CORE_OPERATOR_CONCAT_H_
#include <numeric>
#include <vector>
#include <cmath>
#include <memory>
#include <vector>
#include "aidge/utils/Registrar.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
enum class ConcatAttr { Axis };
class Concat_Op : public OperatorTensor,
public Registrable<Concat_Op, std::string, std::unique_ptr<OperatorImpl>(const Concat_Op&)>,
public StaticAttributes<ConcatAttr, DimSize_t> {
public:
static constexpr const char* Type = "Concat";
using Attributes_ = StaticAttributes<ConcatAttr, DimSize_t>;
template <ConcatAttr e>
using attr = typename Attributes_::template attr<e>;
Concat_Op(const IOIndex_t nbIn, const DimSize_t axis)
: OperatorTensor(Type, nbIn, 0, 1),
Attributes_(attr<ConcatAttr::Axis>(axis))
{
if (nbIn == 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
}
}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Concat_Op(const Concat_Op& op)
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<Concat_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Concat_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Concat_Op>(*this);
}
// Data operator[](const char* inputName) override final {
// std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
// (strcmp(inputName, "weight") ? mInputs[1] :
// (strcmp(inputName, "bias") ? mInputs[2] :
// nullptr));
// assert((in!=nullptr) && "No such parameter");
// return *in;
// }
void computeOutputDims() override final {
// Every input is non-empty with the same number of dimensions
bool associated = (getInput(0) != nullptr);
associated &= !(getInput(0)->empty()) && (getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims()); // do not compute anything if no input
auto outputDims = getInput(0)->dims();
const auto firstInputNbDims = getInput(0) -> nbDims();
for (IOIndex_t i = 1; i < nbInputs(); ++i) {
if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
}
associated &= (getInput(i)->nbDims() == firstInputNbDims);
for (DimSize_t dim = 0; dim < firstInputNbDims; ++dim) {
if (dim == getAttr<ConcatAttr::Axis>()) {
outputDims[dim] += getInput(i)->dims()[dim];
}
else {
associated &= (getInput(i)->dims()[dim] == outputDims[dim]);
}
}
}
if (associated) {
getOutput(0)->resize(outputDims);
}
}
void setBackend(const std::string& name) override {
mImpl = Registrar<Concat_Op>::create(name)(*this);
mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
for (std::size_t i = 0; i < nbInputs(); ++i) {
getInput(i)->setBackend(name);
}
}
static const std::vector<std::string> getInputsName(){
return {"data_input_0", "data_input_n"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
inline std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const DimIdx_t axis = 0, const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Concat_Op>(nbIn, axis), name);
}
}
namespace {
template <>
const char* const EnumStrings<Aidge::ConcatAttr>::data[] = {
"Axis"
};
}
#endif /* AIDGE_CORE_OPERATOR_CONCAT_H_ */
......@@ -14,12 +14,13 @@
#include <array>
#include <cmath>
#include <cstddef>
#include <numeric>
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp"
......@@ -29,17 +30,12 @@ namespace Aidge {
enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims };
template <DimIdx_t DIM>
class Conv_Op : public Operator,
class Conv_Op : public OperatorTensor,
public Registrable<Conv_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
public StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t,
DimSize_t, std::array<DimSize_t, DIM>> {
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
std::make_shared<Tensor>()};
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
public:
static constexpr const char *Type = "Conv";
Conv_Op() = delete;
......@@ -49,32 +45,27 @@ public:
template <ConvAttr e>
using attr = typename Attributes_::template attr<e>;
constexpr Conv_Op(DimSize_t in_channels,
DimSize_t out_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
: Operator(Type),
Attributes_(attr<ConvAttr::StrideDims>(stride_dims),
attr<ConvAttr::DilationDims>(dilation_dims),
attr<ConvAttr::InChannels>(in_channels),
attr<ConvAttr::OutChannels>(out_channels),
attr<ConvAttr::KernelDims>(kernel_dims)) {
setDatatype(DataType::Float32);
}
constexpr Conv_Op(DimSize_t inChannels,
DimSize_t outChannels,
const std::array<DimSize_t, DIM> &kernelDims,
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
: OperatorTensor(Type, 1, 2, 1),
Attributes_(attr<ConvAttr::StrideDims>(strideDims),
attr<ConvAttr::DilationDims>(dilationDims),
attr<ConvAttr::InChannels>(inChannels),
attr<ConvAttr::OutChannels>(outChannels),
attr<ConvAttr::KernelDims>(kernelDims)) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Conv_Op(const Conv_Op<DIM>& op)
: Operator(Type),
Attributes_(op),
mOutput(std::make_shared<Tensor>(*op.mOutput))
: OperatorTensor(op),
Attributes_(op)
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
}
/**
......@@ -98,16 +89,18 @@ public:
// }
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInputs[0]->empty()) {
std::array<DimSize_t, DIM + 2> outputDims = {};
// check inputs have been associated
bool associated = true;
for (IOIndex_t i = 0; i < 3; ++i) {
if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
}
associated &= !(getInput(i)->empty());
}
if (associated) {
std::array<DimSize_t, DIM + 2> outputDims{};
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
for (std::size_t dim = 0; dim < this->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
const DimSize_t kernelExtent = this->template getAttr<ConvAttr::DilationDims>()[dim] *
......@@ -115,68 +108,76 @@ public:
1;
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent) /
floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim])));
}
outputDims[1] = this->template getAttr<ConvAttr::OutChannels>();
outputDims[0] = mInputs[0]->dims()[0];
mOutput->resize(outputDims);
outputDims[0] = inputDims[0];
mOutputs[0]->resize(outputDims);
}
}
bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
return *(mInputs[inputIdx].get()); }
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "Conv Operators supports only 3 inputs");
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Conv Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
// if (outputIdx != 0) {
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
// }
// if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
// // Offset
// const auto outputIdxDims = mOutput->getCoord(firstIdx);
// auto inputIdxDims = outputIdxDims; // batch idx is the same
// inputIdxDims[1] = 0; // each channel is used so start with the first one
// for (DimIdx_t i = 0; i < (DIM+2); ++i) {
// if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
// }
// }
// // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
// // Input
// // same batch value, every input channel is used
// std::vector<DimSize_t> inputDims{outputDims[0], mInputs[0]->dims()[1]};
// for (DimIdx_t i = 0; i < DIM; ++i) {
// inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
// * this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
// + 1
// + (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
// * this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
// inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
// }
// // Weight
// // same output value, every input channel is used
// std::vector<DimSize_t> weightDims{outputDims[0], mInputs[0]->dims()[1]};
// weightDims.insert(weightDims.end(), this->template getAttr<ConvAttr::KernelDims>()[0], this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(DIM)]);
// std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
// weightIdxDims[0] = outputIdxDims[1];
// // Bias
// const std::vector<DimSize_t> biasDims{outputDims[0]};
// const std::vector<DimSize_t> biasIdxDims{outputIdxDims[1]};
// // Result
// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
// res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims));
// res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[1]->getIdx(weightIdxDims), weightDims));
// res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[2]->getIdx(biasIdxDims), biasDims));
// return res;
// }
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
// }
void setBackend(const std::string &name) override {
mImpl = Registrar<Conv_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInputs[1]->setBackend(name);
mInputs[2]->setBackend(name);
}
void setDatatype(const DataType &datatype) override {
mOutput->setDatatype(datatype);
mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
mInputs[0]->setDatatype(datatype);
mInputs[1]->setDatatype(datatype);
mInputs[2]->setDatatype(datatype);
getInput(1)->setBackend(name);
getInput(2)->setBackend(name);
}
inline IOIndex_t nbInputs() const noexcept override final { return 3; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input", "weight", "bias"};
}
......@@ -186,32 +187,32 @@ public:
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Conv(DimSize_t in_channels,
DimSize_t out_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
DimSize_t outChannels,
const std::array<DimSize_t, DIM> &kernelDims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims), name);
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims), name);
// addProducer(conv, 1, append(append(kernel_dims, in_channels), out_channels), "w");
addProducer(conv, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
addProducer(conv, 2, std::array<DimSize_t, 1>({out_channels}), "b");
addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
addProducer(conv, 2, std::array<DimSize_t, 1>({outChannels}), "b");
return conv;
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> Conv(
DimSize_t in_channels,
DimSize_t out_channels,
DimSize_t const (&kernel_dims)[DIM],
DimSize_t inChannels,
DimSize_t outChannels,
DimSize_t const (&kernelDims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
return Conv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, dilation_dims);
return Conv(inChannels, outChannels, to_array(kernelDims), name, strideDims, dilationDims);
}
} // namespace Aidge
......@@ -226,4 +227,4 @@ const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
};
}
#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
\ No newline at end of file
......@@ -19,7 +19,7 @@
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp"
......@@ -29,20 +29,14 @@ namespace Aidge {
enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims };
template <DimIdx_t DIM>
class ConvDepthWise_Op : public Operator,
class ConvDepthWise_Op : public OperatorTensor,
public Registrable<ConvDepthWise_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
public StaticAttributes<ConvDepthWiseAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
DimSize_t,
std::array<DimSize_t, DIM>> {
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
std::make_shared<Tensor>()};
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
public:
static constexpr const char *Type = "ConvDepthWise";
ConvDepthWise_Op() = delete;
......@@ -55,29 +49,25 @@ class ConvDepthWise_Op : public Operator,
template <ConvDepthWiseAttr e>
using attr = typename Attributes_::template attr<e>;
constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
constexpr ConvDepthWise_Op(const DimSize_t nbChannels,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
: Operator(Type),
: OperatorTensor(Type, 1, 2, 1),
Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
attr<ConvDepthWiseAttr::Channels>(0),
attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {
setDatatype(DataType::Float32);
}
attr<ConvDepthWiseAttr::Channels>(nbChannels),
attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op)
: Operator(Type),
Attributes_(op),
mOutput(std::make_shared<Tensor>(*op.mOutput))
: OperatorTensor(op),
Attributes_(op)
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
}
/**
......@@ -88,16 +78,20 @@ class ConvDepthWise_Op : public Operator,
return std::make_shared<ConvDepthWise_Op<DIM>>(*this);
}
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInputs[0]->empty()) {
// check inputs have been associated
// TODO : add a check of inputs dimensions ?
bool associated = true;
for (IOIndex_t i = 0; i < 3; ++i) {
if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
}
associated &= !(getInput(i)->empty());
}
if (associated) {
std::array<DimSize_t, DIM + 2> outputDims = {};
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
for (std::size_t dim = 0; dim < this->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
const DimSize_t kernelExtent = this->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
......@@ -105,10 +99,9 @@ class ConvDepthWise_Op : public Operator,
1;
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent) /
floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
}
this->template getAttr<ConvDepthWiseAttr::Channels>() = mInputs[0]->dims()[1];
// std::array<DimSize_t, DIM+2> weightDims = append(mInputs[0]->dims()[1],append(1, this->template getAttr<ConvDepthWiseAttr::KernelDims>()));
// if (mInputs[1]->empty()) {
// mInputs[1]->resize(weightDims);
......@@ -116,66 +109,57 @@ class ConvDepthWise_Op : public Operator,
// if (mInputs[2]->empty()) {
// mInputs[2]->resize({mInputs[0]->dims()[1]});
// }
outputDims[1] = mInputs[0]->dims()[1];
outputDims[0] = mInputs[0]->dims()[0];
mOutput->resize(outputDims);
outputDims[1] = inputDims[1];
outputDims[0] = inputDims[0];
mOutputs[0]->resize(outputDims);
}
}
bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
return *(mInputs[inputIdx].get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "ConvDepthWise Operators supports only 3 inputs");
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "ConvDepthWise Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
// if (outputIdx != 0) {
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
// }
// if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
// // Offset
// const auto outputIdxDims = mOutput->getCoord(firstIdx);
// auto inputIdxDims = outputIdxDims; // batch idx is the same
// for (DimIdx_t i = 0; i < (DIM+2); ++i) {
// if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
// }
// }
// // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
// // Width
// std::vector<DimSize_t> inputDims;
// inputDims.push_back(outputDims[0]); // same batch value
// inputDims.push_back(outputDims[1]); // same channel value
// for (DimIdx_t i = 0; i < DIM; ++i) {
// inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
// * this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
// + 1
// + (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
// * this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
// inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
// }
// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res = std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>();
// res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims));
// return res;
// }
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
// }
void setBackend(const std::string &name) override {
mImpl = Registrar<ConvDepthWise_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInputs[1]->setBackend(name);
mInputs[2]->setBackend(name);
}
void setDatatype(const DataType &datatype) override {
mOutput->setDatatype(datatype);
mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
mInputs[0]->setDatatype(datatype);
mInputs[1]->setDatatype(datatype);
mInputs[2]->setDatatype(datatype);
getInput(1)->setBackend(name);
getInput(2)->setBackend(name);
}
inline IOIndex_t nbInputs() const noexcept override final { return 3; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input", "weight", "bias"};
}
......@@ -185,27 +169,29 @@ class ConvDepthWise_Op : public Operator,
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> ConvDepthWise(const std::array<DimSize_t, DIM> &kernel_dims,
inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
const std::array<DimSize_t, DIM> &kernelDims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), name);
addProducer(convDW, 1, std::array<DimSize_t,0>({}), "w");
addProducer(convDW, 2, std::array<DimSize_t,0>({}), "b");
auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims), name);
addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
addProducer(convDW, 2, std::array<DimSize_t, 1>({nbChannels}), "b");
return convDW;
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> ConvDepthWise(
DimSize_t const (&kernel_dims)[DIM],
const DimSize_t nbChannels,
DimSize_t const (&kernelDims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
return ConvDepthWise(to_array(kernel_dims), name, stride_dims, dilation_dims);
return ConvDepthWise(nbChannels, to_array(kernelDims), name, strideDims, dilationDims);
}
} // namespace Aidge
......
......@@ -17,42 +17,30 @@
#include <vector>
#include "aidge/utils/Registrar.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
class Div_Op : public Operator,
class Div_Op : public OperatorTensor,
public Registrable<Div_Op, std::string, std::unique_ptr<OperatorImpl>(const Div_Op&)> {
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char* Type = "Div";
Div_Op()
: Operator(Type)
{
setDatatype(DataType::Float32);
}
Div_Op() : OperatorTensor(Type, 2, 0, 1) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Div_Op(const Div_Op& op)
: Operator(Type),
mOutput(std::make_shared<Tensor>(*op.mOutput))
: OperatorTensor(op)
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Div_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
mImpl = op.mImpl ? Registrar<Div_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
}
/**
......@@ -63,73 +51,18 @@ public:
return std::make_shared<Div_Op>(*this);
}
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 2 && "operator supports only 2 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInputs[0]->empty())
mOutput->resize(mInputs[0]->dims());
}
bool outputDimsForwarded() const override final {
return !(mOutput->empty());
}
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
return *(mInputs[inputIdx].get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert((inputIdx < 2) && "Div Operator has 2 inputs");
(void) inputIdx; // avoid unused warning
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Div Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 2 && "operator supports only 2 inputs");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void computeOutputDims() override final;
void setBackend(const std::string& name) override {
mImpl = Registrar<Div_Op>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInputs[0]->setBackend(name);
mInputs[1]->setBackend(name);
}
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
mInputs[0]->setDatatype(datatype);
mInputs[1]->setDatatype(datatype);
getInput(0)->setBackend(name);
getInput(1)->setBackend(name);
}
inline IOIndex_t nbInputs() const noexcept override final { return 2; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
......
......@@ -21,7 +21,7 @@
#include "aidge/utils/Types.h"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp"
......@@ -29,16 +29,11 @@
namespace Aidge {
enum class FCAttr { OutChannels, NoBias };
class FC_Op : public Operator,
class FC_Op : public OperatorTensor,
public Registrable<FC_Op,
std::string,
std::unique_ptr<OperatorImpl>(const FC_Op &)>,
public StaticAttributes<FCAttr, DimSize_t, bool> {
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(), std::make_shared<Tensor>()};
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char* Type = "FC";
......@@ -48,26 +43,21 @@ public:
template <FCAttr e> using attr = typename Attributes_::template attr<e>;
FC_Op(DimSize_t out_channels, bool noBias)
: Operator(Type),
Attributes_(
attr<FCAttr::OutChannels>(out_channels),
attr<FCAttr::NoBias>(noBias))
{
setDatatype(DataType::Float32);
}
: OperatorTensor(Type, 1, 2, 1),
Attributes_(
attr<FCAttr::OutChannels>(out_channels),
attr<FCAttr::NoBias>(noBias))
{}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
FC_Op(const FC_Op& op)
: Operator(Type),
Attributes_(op),
mOutput(std::make_shared<Tensor>(*op.mOutput))
: OperatorTensor(op),
Attributes_(op)
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<FC_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
mImpl = op.mImpl ? Registrar<FC_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
}
/**
......@@ -78,7 +68,7 @@ public:
return std::make_shared<FC_Op>(*this);
}
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
if (inputIdx == 2) {
......@@ -86,78 +76,35 @@ public:
assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1);
}
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
if (inputIdx == 0 && mInputs[0]->nbDims() == 1)
mInputs[inputIdx]->resize(std::array<DimSize_t, 2>({1, mInputs[inputIdx]->size()}));
if (inputIdx == 0 && getInput(0)->nbDims() == 1)
mInputs[inputIdx]->resize(std::array<DimSize_t, 2>({1, getInput(inputIdx)->size()}));
}
void computeOutputDims() override final {
if (!mInputs[0]->empty()) {
// <in_features**, out_channels>
std::array<DimSize_t, 2> weightDims = {this->template getAttr<FCAttr::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
// <out_channels, batch>
std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template getAttr<FCAttr::OutChannels>()};
mInputs[1]->resize(weightDims);
mOutput->resize(outputDims);
bool associated = true;
for (IOIndex_t i = 0; i < nbInputs(); ++i) {
if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
}
associated &= !(getInput(i)->empty());
}
if (associated) {
// <batch, OutChannels>
mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<FCAttr::OutChannels>()});
}
}
bool outputDimsForwarded() const override final {
return !(mOutput->empty());
}
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
return *(mInputs[inputIdx].get()); }
inline Tensor& output(const IOIndex_t /*inputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "FC Operators supports only 3 inputs");
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "FC Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string& name) override {
mImpl = Registrar<FC_Op>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInputs[0]->setBackend(name);
mInputs[1]->setBackend(name);
mInputs[2]->setBackend(name);
}
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
mInputs[0]->setDatatype(datatype);
mInputs[1]->setDatatype(datatype);
mInputs[2]->setDatatype(datatype);
getInput(0)->setBackend(name);
getInput(1)->setBackend(name);
getInput(2)->setBackend(name);
}
inline IOIndex_t nbInputs() const noexcept override final { return 3; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input", "weight", "bias"};
}
......@@ -166,11 +113,11 @@ public:
}
};
inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const std::string& name = "") {
inline std::shared_ptr<Node> FC(DimSize_t inChannels, DimSize_t outChannels, bool noBias = false, const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(out_channels, noBias), name);
addProducer(fc, 1, std::array<DimSize_t, 2>({out_channels, 1}), "w");
addProducer(fc, 2, (noBias ? std::array<DimSize_t, 1>({0}) : std::array<DimSize_t, 1>({out_channels})), "b"); // already sets bias dims
auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(outChannels, noBias), name);
addProducer(fc, 1, std::array<DimSize_t, 2>({outChannels, inChannels}), "w");
addProducer(fc, 2, (noBias ? std::array<DimSize_t, 1>({0}) : std::array<DimSize_t, 1>({outChannels})), "b"); // already sets bias dims
return fc;
}
} // namespace Aidge
......@@ -181,4 +128,4 @@ const char *const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels",
"NoBias"};
}
#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment