Skip to content
Snippets Groups Projects
Commit f47faada authored by Grégoire Kubler's avatar Grégoire Kubler
Browse files

Merge branch 'dev' of https://gitlab.eclipse.org/eclipse/aidge/aidge_core into...

Merge branch 'dev' of https://gitlab.eclipse.org/eclipse/aidge/aidge_core into fix/scheduler_exec_time
parents 14b20fe1 a0b56cd9
No related branches found
No related tags found
2 merge requests!105version 0.2.0,!100fix/scheduler_exec_time
Showing
with 637 additions and 465 deletions
"""
Copyright (c) 2023 CEA-List
This program and the accompanying materials are made available under the
terms of the Eclipse Public License 2.0 which is available at
http://www.eclipse.org/legal/epl-2.0.
SPDX-License-Identifier: EPL-2.0
"""
import unittest
import aidge_core
from functools import reduce
import numpy as np
GLOBAL_CPT = 0
class testImpl(aidge_core.OperatorImpl):
def __init__(self, op: aidge_core.Operator):
aidge_core.OperatorImpl.__init__(self, op, 'cpu') # Required to avoid type error !
def forward(self):
global GLOBAL_CPT
GLOBAL_CPT += 1
class test_OperatorImpl(unittest.TestCase):
"""Test Op
"""
def setUp(self):
global GLOBAL_CPT
GLOBAL_CPT = 0
def tearDown(self):
pass
def test_setImplementation(self):
"""Test setting an implementation manually
"""
global GLOBAL_CPT
matmul = aidge_core.GenericOperator("MatMul", 1, 0, 1, name="MatMul0")
generic_matmul_op = matmul.get_operator()
generic_matmul_op.set_compute_output_dims(lambda x: x)
generic_matmul_op.set_impl(testImpl(generic_matmul_op))
generic_matmul_op.forward()
self.assertEqual(GLOBAL_CPT, 1)
def test_Registrar_setOp(self):
"""Test registering an implementation
"""
global GLOBAL_CPT
aidge_core.register_ConvOp2D("cpu", testImpl)
self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D())
conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
conv.get_operator().set_backend("cpu")
conv.get_operator().forward()
self.assertEqual(GLOBAL_CPT, 1)
def test_Registrar_setGraphView(self):
"""Test registering an implementation
"""
global GLOBAL_CPT
aidge_core.register_ConvOp2D("cpu", testImpl)
aidge_core.register_ProducerOp("cpu", testImpl)
self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D())
conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
model = aidge_core.sequential([conv])
model.set_backend("cpu")
conv.get_operator().forward()
self.assertEqual(GLOBAL_CPT, 1)
if __name__ == '__main__':
unittest.main()
......@@ -108,7 +108,7 @@ class test_operator_binding(unittest.TestCase):
"""Dummy implementation to test that C++ call python code
"""
def __init__(self, op: aidge_core.Operator):
aidge_core.OperatorImpl.__init__(self, op) # Recquired to avoid type error !
aidge_core.OperatorImpl.__init__(self, op, 'test_impl') # Recquired to avoid type error !
self.idx = 0
def forward(self):
......
......@@ -23,15 +23,17 @@
#include "aidge/data/Tensor.hpp"
#include "aidge/data/Database.hpp"
#include "aidge/data/DataProvider.hpp"
#include "aidge/graph/Connector.hpp"
#include "aidge/graph/GraphView.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/graph/OpArgs.hpp"
#include "aidge/graphmatching/Match.hpp"
#include "aidge/graphmatching/NodeRegex.hpp"
#include "aidge/graphmatching/SeqStm.hpp"
#include "aidge/graphmatching/StmFactory.hpp"
#include "aidge/graphmatching/Utile.hpp"
#include "aidge/graphRegex/GraphRegex.hpp"
#include "aidge/filler/Filler.hpp"
#include "aidge/nodeTester/ConditionalInterpreter.hpp"
#include "aidge/operator/Add.hpp"
#include "aidge/operator/AvgPooling.hpp"
......
......@@ -9,12 +9,12 @@
*
********************************************************************************/
#ifndef AIDGE_OPERATORIMPL_H_
#define AIDGE_OPERATORIMPL_H_
#ifndef AIDGE_BACKEND_OPERATORIMPL_H_
#define AIDGE_BACKEND_OPERATORIMPL_H_
#include <cstddef>
#include <string>
#include <vector>
#include <memory>
#include "aidge/utils/Types.h"
namespace Aidge {
......@@ -22,10 +22,13 @@ class Operator;
class OperatorImpl {
public:
OperatorImpl(const Operator& op);
OperatorImpl(const Operator& op, const std::string& backend);
virtual void forward();
virtual void backward();
const std::string& backend() const noexcept {
return mBackend;
}
/**
* @brief Minimum amount of data from a specific input required by the
* implementation to be run.
......@@ -73,9 +76,10 @@ public:
protected:
const Operator &mOp;
const std::string mBackend;
std::vector<NbElts_t> mNbConsumedData;
std::vector<NbElts_t> mNbProducedData;
};
} // namespace Aidge
#endif /* AIDGE_OPERATORIMPL_H_ */
#endif /* AIDGE_BACKEND_OPERATORIMPL_H_ */
......@@ -72,7 +72,7 @@ private:
class TensorImpl {
protected:
const char *mBackend;
const std::string mBackend;
/// @brief Device id.
const DeviceIdx_t mDevice;
/// Number of elements (to be) stored.
......@@ -81,7 +81,7 @@ protected:
public:
TensorImpl() = delete;
TensorImpl(const char *backend, DeviceIdx_t device, std::vector<DimSize_t> dims)
TensorImpl(const std::string& backend, DeviceIdx_t device, std::vector<DimSize_t> dims)
: mBackend(backend),
mDevice(device)
{
......@@ -97,7 +97,7 @@ public:
* Return the (backend, device) pair for this implementation.
*/
std::pair<std::string, DeviceIdx_t> device() const noexcept {
return std::make_pair(std::string(mBackend), mDevice);
return std::make_pair(mBackend, mDevice);
}
/**
......@@ -171,22 +171,30 @@ public:
};
/**
* Set the size, in number of elements, that must be stored.
* @brief Set the size, in number of elements, that must be stored.
*/
virtual void resize(std::vector<DimSize_t> dims) {
mNbElts = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
}
/**
* Return the number of elements stored.
* @brief Return the number of elements stored.
*/
inline std::size_t size() const noexcept { return mNbElts; }
/**
* Return the size (in bytes) of one element (scalar).
* @brief Return the size (in bytes) of one element (scalar).
*/
virtual std::size_t scalarSize() const noexcept = 0;
constexpr const char *backend() const { return mBackend; }
/**
* @brief Set every element of the implementation to zero.
*/
virtual void zeros() {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Function not implented");
}
const std::string backend() const { return mBackend; }
/**
* @brief Copy from another backend.
......
......@@ -14,7 +14,6 @@
#include "aidge/backend/TensorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/data/half.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/utils/ErrorHandling.hpp"
......@@ -31,21 +30,12 @@ private:
std::unique_ptr<T[]> mDataOwner;
public:
static constexpr const char *Backend = "cpu";
static const std::string Backend;
public:
TensorImpl_cpu(DeviceIdx_t device, std::vector<DimSize_t> dims) : TensorImpl(Backend, device, dims) {}
bool operator==(const TensorImpl &otherImpl) const override final {
const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl);
AIDGE_INTERNAL_ASSERT(typedOtherImpl.size() >= mNbElts);
std::size_t i = 0;
for (; i < mNbElts &&
*static_cast<const T*>(rawPtr(i)) == *static_cast<const T*>(typedOtherImpl.rawPtr(i));
++i) {
}
return i == mNbElts;
}
bool operator==(const TensorImpl &other) const override final;
static std::shared_ptr<TensorImpl_cpu> create(DeviceIdx_t device, std::vector<DimSize_t> dims) {
return std::make_shared<TensorImpl_cpu<T>>(device, dims);
......@@ -53,6 +43,8 @@ public:
inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
void zeros() override final;
void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
const T* srcT = static_cast<const T *>(src);
T* dstT = static_cast<T *>(rawPtr(offset));
......@@ -62,64 +54,7 @@ public:
std::copy(srcT, srcT + length, dstT);
}
void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final {
if (length == 0) {
return;
}
T* dstT = static_cast<T *>(rawPtr(offset));
AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
switch (srcDt)
{
case DataType::Float64:
std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length,
dstT);
break;
case DataType::Float32:
std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length,
dstT);
break;
case DataType::Float16:
std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length,
dstT);
break;
case DataType::Int64:
std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length,
dstT);
break;
case DataType::UInt64:
std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length,
dstT);
break;
case DataType::Int32:
std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length,
dstT);
break;
case DataType::UInt32:
std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length,
dstT);
break;
case DataType::Int16:
std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length,
dstT);
break;
case DataType::UInt16:
std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length,
dstT);
break;
case DataType::Int8:
std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
dstT);
break;
case DataType::UInt8:
std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
dstT);
break;
default:
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type.");
break;
}
}
void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final;
void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final {
AIDGE_ASSERT(device.first == Backend, "backend must match");
......@@ -176,6 +111,10 @@ private:
}
};
template <typename T>
const std::string TensorImpl_cpu<T>::Backend = "cpu";
namespace {
static Registrar<Tensor> registrarTensorImpl_cpu_Float64(
{"cpu", DataType::Float64}, Aidge::TensorImpl_cpu<double>::create);
......
......@@ -52,6 +52,7 @@ public:
return mType;
}
virtual ~Data() = default;
virtual std::string toString() const = 0;
private:
const std::string mType;
......@@ -84,4 +85,4 @@ namespace Aidge {
inline auto format_as(DataType dt) { return EnumStrings<Aidge::DataType>::data[static_cast<int>(dt)]; }
}
#endif /* AIDGE_DATA_H_ */
\ No newline at end of file
#endif /* AIDGE_DATA_H_ */
This diff is collapsed.
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_FILLER_H_
#define AIDGE_CORE_FILLER_H_
#include <memory>
#include <random> // normal_distribution, uniform_real_distribution
#include "aidge/data/Tensor.hpp"
namespace Aidge {
inline void calculateFanInFanOut(std::shared_ptr<Tensor> tensor,
unsigned int& fanIn, unsigned int& fanOut) {
AIDGE_ASSERT(
tensor->nbDims() == 4,
"Tensor need to have 4 dimensions to compute FanIn and FanOut.");
// Warning: This function suppose NCXX data layout.
// Aidge currently only support NCHW but this maybe not be true in the
// future.
DimSize_t batchSize = tensor->dims()[0];
DimSize_t channelSize = tensor->dims()[1];
AIDGE_ASSERT(batchSize != 0,
"Cannot calculate FanIn if tensor batch size is 0.");
AIDGE_ASSERT(channelSize != 0,
"Cannot calculate FanOut if tensor channel size is 0.");
fanIn = static_cast<unsigned int>(tensor->size() / batchSize);
fanOut = static_cast<unsigned int>(tensor->size() / channelSize);
}
enum VarianceNorm { FanIn, Average, FanOut };
template <typename T>
void constantFiller(std::shared_ptr<Tensor> tensor, T constantValue);
template <typename T>
void normalFiller(std::shared_ptr<Tensor> tensor, double mean = 0.0,
double stdDev = 1.0);
template <typename T>
void uniformFiller(std::shared_ptr<Tensor> tensor, T min, T max);
template <typename T>
void xavierUniformFiller(std::shared_ptr<Tensor> tensor, T scaling = 1.0,
VarianceNorm varianceNorm = FanIn);
template <typename T>
void xavierNormalFiller(std::shared_ptr<Tensor> tensor, T scaling = 1.0,
VarianceNorm varianceNorm = FanIn);
template <typename T>
void heFiller(std::shared_ptr<Tensor> tensor, VarianceNorm varianceNorm = FanIn,
T meanNorm = 0.0, T scaling = 1.0);
} // namespace Aidge
#endif /* AIDGE_CORE_FILLER_H_ */
......@@ -62,11 +62,7 @@ public:
return mNodes == gv.mNodes;
}
NodePtr operator[](const std::string& name)
{
assert(mNodeRegistry.find(name) != mNodeRegistry.end() && "Could not find Node in the GraphView.");
return mNodeRegistry.at(name);
}
const NodePtr operator[](const std::string& nodeName) const;
///////////////////////////////////////////////////////
// FUNCTIONAL DESCRIPTION
......@@ -82,14 +78,14 @@ public:
* @brief Name of the node.
* @return std::string
*/
std::string name() const;
inline std::string name() const noexcept { return mName; }
/**
* @brief Set the node name.
* @warning Undefined behaviour when several Nodes have the same name.
* @param name New name for the node.
*/
void setName(const std::string &name);
inline void setName(const std::string &name) { mName = name; }
/**
* @brief Save the GraphView as a Mermaid graph in a .md file at the
......@@ -98,16 +94,16 @@ public:
*/
void save(const std::string& path, bool verbose = false, bool showProducers = true) const;
void logOutputs(const std::string& dirName) const;
/**
* Check that a node is in the current GraphView.
* @param nodePtr Node to check
* @return bool True is nodePtr belongs to the GraphView.
*/
inline bool inView(NodePtr nodePtr) const {
return mNodes.find(nodePtr) != mNodes.end();
}
bool inView(const NodePtr& nodePtr) const;
NodePtr getRootNode() {
inline NodePtr rootNode() const noexcept {
return mRootNode;
}
......@@ -118,41 +114,32 @@ public:
///////////////////////////////////////////////////////
public:
/** @brief Get reference to the set of input Nodes. */
inline std::set<NodePtr> inputNodes() const noexcept {
std::set<NodePtr> nodes;
for (auto node : mInputNodes) {
if (node.first != nullptr) {
nodes.insert(node.first);
}
}
return nodes;
}
std::set<NodePtr> inputNodes() const;
/** @brief Get reference to the set of output Nodes. */
inline std::set<NodePtr> outputNodes() const noexcept {
std::set<NodePtr> nodes;
for (auto node : mOutputNodes) {
if (node.first != nullptr) {
nodes.insert(node.first);
}
}
return nodes;
}
std::set<NodePtr> outputNodes() const;
/** @brief Assess if the given Node is an input Node of the GraphView object. */
inline bool isInputNode(NodePtr nodePtr) const {
const auto nodes = inputNodes();
return (nodes.find(nodePtr) != nodes.end()) ? true : false;
}
bool isInputNode(const NodePtr& nodePtr) const;
/** @brief Assess if the given Node is an output Node of the GraphView object. */
inline bool isOutputNode(NodePtr nodePtr) const {
const auto nodes = outputNodes();
return (nodes.find(nodePtr) != nodes.end()) ? true : false;
}
bool isOutputNode(const NodePtr& nodePtr) const;
void setOrderedInputs(const std::vector<std::pair<NodePtr, IOIndex_t>>& inputs);
void setOrderedOutputs(const std::vector<std::pair<NodePtr, IOIndex_t>>& outputs);
inline const std::vector<std::pair<NodePtr, IOIndex_t>>& getOrderedInputs() const { return mInputNodes; };
inline const std::vector<std::pair<NodePtr, IOIndex_t>>& getOrderedOutputs() const { return mOutputNodes; };
/**
* @brief Get inputs of the current GraphView with their associated id.
* The rank of the nodes are their rank in the vector.
* @return const std::vector<std::pair<NodePtr, IOIndex_t>>&
*/
inline const std::vector<std::pair<NodePtr, IOIndex_t>>& getOrderedInputs() const noexcept { return mInputNodes; };
/**
* @brief Get outputs of the current GraphView with their associated id.
* The rank of the nodes are their rank in the vector.
* @return const std::vector<std::pair<NodePtr, IOIndex_t>>&
*/
inline const std::vector<std::pair<NodePtr, IOIndex_t>>& getOrderedOutputs() const noexcept { return mOutputNodes; };
/**
* @brief List outside data input connections of the GraphView.
......@@ -214,7 +201,7 @@ public:
* If not, add a Transpose Operator.
* 4 - Propagate Tensor dimensions through the consecutive Operators.
*/
void compile(const std::string& backend, const Aidge::DataType datatype, DeviceIdx_t device = 0);
void compile(const std::string& backend = "cpu", const Aidge::DataType datatype = DataType::Float32, DeviceIdx_t device = 0);
/**
* @brief Compute dimensions of input/output Tensors for each Operator of the
......@@ -223,9 +210,9 @@ public:
void forwardDims(const std::vector<std::vector<DimSize_t>> dims = {});
/** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
void setBackend(const std::string &backend, DeviceIdx_t device = 0);
void setBackend(const std::string& backend, const DeviceIdx_t device = 0) const;
/** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
void setDataType(const DataType &datatype);
void setDataType(const DataType& datatype) const;
///////////////////////////////////////////////////////
// TOPOLOGY
......@@ -283,7 +270,7 @@ public:
* added to the list, and so on.
* - Any remaining nodes have no path to the root node and are added in
* arbitrary order. In this case, the ranking is not garanteed to be unique.
*
*
* If the ranking cannot be garanteed to be unique, the second item indicates
* the rank from which unicity cannot be garanteed.
* @return std::pair<std::vector<NodePtr>, size_t> Pair with the list of ranked
......@@ -379,11 +366,10 @@ public:
* @param toTensor Input Tensor ID of the new Node. Default to gk_IODefaultIndex, meaning
* first available data input for the Node.
*/
inline void addChild(NodePtr toOtherNode, std::string fromOutNodeName,
inline void addChild(NodePtr toOtherNode, const std::string& fromOutNodeName,
const IOIndex_t fromTensor = IOIndex_t(0),
IOIndex_t toTensor = gk_IODefaultIndex) {
assert(mNodeRegistry.find(fromOutNodeName) != mNodeRegistry.end() &&
"No Node with this name found in the GraphView.");
AIDGE_ASSERT(mNodeRegistry.find(fromOutNodeName) != mNodeRegistry.end(), "No node named {} in graph {}.", fromOutNodeName, name());
addChild(toOtherNode, mNodeRegistry.at(fromOutNodeName), fromTensor, toTensor);
}
......@@ -524,7 +510,6 @@ private:
// TOPOLOGY
///////////////////////////////////////////////////////
void _forwardDims(std::set<NodePtr> listNodes);
};
/**
......
......@@ -12,15 +12,11 @@
#ifndef AIDGE_CORE_OPERATOR_ADD_H_
#define AIDGE_CORE_OPERATOR_ADD_H_
#include <numeric>
#include <vector>
#include <cmath>
#include <memory>
#include <string>
#include <vector>
#include "aidge/utils/Registrar.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/utils/Types.h"
#include "aidge/utils/ErrorHandling.hpp"
......@@ -28,7 +24,7 @@
namespace Aidge {
class Add_Op : public OperatorTensor,
public Registrable<Add_Op, std::string, std::unique_ptr<OperatorImpl>(const Add_Op&)> {
public Registrable<Add_Op, std::string, std::shared_ptr<OperatorImpl>(const Add_Op&)> {
public:
static const std::string Type;
......@@ -44,11 +40,7 @@ public:
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Add_Op(const Add_Op& op)
: OperatorTensor(op)
{
mImpl = op.mImpl ? Registrar<Add_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
}
Add_Op(const Add_Op& op);
/**
* @brief Clone the operator using its copy-constructor.
......@@ -70,10 +62,7 @@ public:
void computeOutputDims() override final;
void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
mImpl = Registrar<Add_Op>::create(name)(*this);
mOutputs[0]->setBackend(name, device);
}
void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
static const std::vector<std::string> getInputsName() {
return {"data_input_0", "data_input_n"};
......
......@@ -13,14 +13,18 @@
#define AIDGE_CORE_OPERATOR_AVGPOOLING_H_
#include <array>
#include <numeric>
#include <cmath> // std::floor
#include <cstddef> // std::size_t
#include <string>
#include <utility> // std::pair
#include <vector>
#include <cmath>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
......@@ -30,7 +34,7 @@ enum class AvgPoolingAttr { StrideDims, KernelDims };
template <DimIdx_t DIM>
class AvgPooling_Op : public OperatorTensor,
public Registrable<AvgPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
public Registrable<AvgPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
public StaticAttributes<AvgPoolingAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>> {
......@@ -60,7 +64,11 @@ public:
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl) {
SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.backend());
} else {
mImpl = nullptr;
}
}
/**
......@@ -97,8 +105,7 @@ public:
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>>
computeReceptiveField(const std::vector<DimSize_t>& firstEltDims,
const std::vector<DimSize_t>& outputDims,
const IOIndex_t outputIdx = 0) const override final
{
const IOIndex_t outputIdx = 0) const override final {
if (outputIdx != 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
}
......@@ -137,7 +144,7 @@ public:
void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
mImpl = Registrar<AvgPooling_Op<DIM>>::create(name)(*this);
SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, name);
mOutputs[0]->setBackend(name, device);
}
......@@ -149,8 +156,8 @@ public:
}
};
template <DimIdx_t DIM>
const std::string AvgPooling_Op<DIM>::Type = "AvgPooling";
template <Aidge::DimIdx_t DIM>
const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling";
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
......@@ -177,4 +184,4 @@ const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims",
"KernelDims"};
}
#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
\ No newline at end of file
#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
......@@ -30,7 +30,7 @@ enum class BatchNormAttr { Epsilon, Momentum };
template <DimIdx_t DIM>
class BatchNorm_Op : public OperatorTensor,
public Registrable<BatchNorm_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
public Registrable<BatchNorm_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
public StaticAttributes<BatchNormAttr, float, float> {
public:
static const std::string Type;
......@@ -54,7 +54,11 @@ public:
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl){
SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.backend());
}else{
mImpl = nullptr;
}
}
/**
......@@ -95,7 +99,7 @@ public:
}
void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
mImpl = Registrar<BatchNorm_Op<DIM>>::create(name)(*this);
SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, name);
mOutputs[0]->setBackend(name, device);
// By default, automatically set backend for scale, shift, mean and variance
......@@ -136,4 +140,4 @@ template <>
const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "Epsilon", "Momentum" };
}
#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
\ No newline at end of file
#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
......@@ -39,7 +39,11 @@ public:
Cast_Op(const Cast_Op& op)
: OperatorTensor(op)
{
mImpl = op.mImpl ? Registrar<Cast_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl) {
SET_IMPL_MACRO(Cast_Op, *this, op.backend());
} else {
mImpl = nullptr;
}
}
/**
......@@ -50,12 +54,7 @@ public:
return std::make_shared<Cast_Op>(*this);
}
void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
if (Registrar<Cast_Op>::exists({name})) {
mImpl = Registrar<Cast_Op>::create({name})(*this);
}
mOutputs[0]->setBackend(name, device);
}
void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
void forward() override;
......
......@@ -12,16 +12,16 @@
#ifndef AIDGE_CORE_OPERATOR_CONCAT_H_
#define AIDGE_CORE_OPERATOR_CONCAT_H_
#include <numeric>
#include <vector>
#include <cmath>
#include <memory>
#include <stdexcept>
#include <string>
#include <vector>
#include "aidge/utils/Registrar.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
......@@ -29,7 +29,7 @@ namespace Aidge {
enum class ConcatAttr { Axis };
class Concat_Op : public OperatorTensor,
public Registrable<Concat_Op, std::string, std::unique_ptr<OperatorImpl>(const Concat_Op&)>,
public Registrable<Concat_Op, std::string, std::shared_ptr<OperatorImpl>(const Concat_Op&)>,
public StaticAttributes<ConcatAttr, DimSize_t> {
public:
static const std::string Type;
......@@ -55,7 +55,11 @@ public:
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<Concat_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl){
SET_IMPL_MACRO(Concat_Op, *this, op.backend());
}else{
mImpl = nullptr;
}
}
/**
......@@ -66,51 +70,9 @@ public:
return std::make_shared<Concat_Op>(*this);
}
// Data operator[](const char* inputName) override final {
// std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
// (strcmp(inputName, "weight") ? mInputs[1] :
// (strcmp(inputName, "bias") ? mInputs[2] :
// nullptr));
// assert((in!=nullptr) && "No such parameter");
// return *in;
// }
void computeOutputDims() override final {
// Every input is non-empty with the same number of dimensions
bool associated = (getInput(0) != nullptr);
associated &= !(getInput(0)->empty()) && (getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims()); // do not compute anything if no input
auto outputDims = getInput(0)->dims();
const auto firstInputNbDims = getInput(0) -> nbDims();
for (IOIndex_t i = 1; i < nbInputs(); ++i) {
if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
}
void computeOutputDims() override final;
if (getInput(i)->nbDims() == firstInputNbDims) {
for (DimSize_t dim = 0; dim < firstInputNbDims; ++dim) {
if (dim == getAttr<ConcatAttr::Axis>()) {
outputDims[dim] += getInput(i)->dims()[dim];
}
else {
associated &= (getInput(i)->dims()[dim] == outputDims[dim]);
}
}
}
else {
associated = false;
break;
}
}
if (associated) {
getOutput(0)->resize(outputDims);
}
}
void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
mImpl = Registrar<Concat_Op>::create(name)(*this);
mOutputs[0]->setBackend(name, device);
}
void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
static const std::vector<std::string> getInputsName(){
return {"data_input_0", "data_input_n"};
......
......@@ -13,35 +13,48 @@
#define AIDGE_CORE_OPERATOR_CONV_H_
#include <array>
#include <cmath>
#include <cstddef>
#include <numeric>
#include <cmath> // std::floor
#include <cstddef> // std::size_t
#include <string>
#include <utility> // std::pair
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp" // SET_IMPL_MACRO
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims };
enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims, NoBias };
template <DimIdx_t DIM>
class Conv_Op : public OperatorTensor,
public Registrable<Conv_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
public StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t,
DimSize_t, std::array<DimSize_t, DIM>> {
public Registrable<Conv_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
public StaticAttributes<ConvAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
DimSize_t,
DimSize_t,
std::array<DimSize_t, DIM>,
bool> {
public:
static const std::string Type;
Conv_Op() = delete;
using Attributes_ = StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
DimSize_t, DimSize_t, std::array<DimSize_t, DIM>>;
using Attributes_ = StaticAttributes<ConvAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
DimSize_t,
DimSize_t,
std::array<DimSize_t, DIM>,
bool>;
template <ConvAttr e>
using attr = typename Attributes_::template attr<e>;
......@@ -49,13 +62,15 @@ public:
DimSize_t outChannels,
const std::array<DimSize_t, DIM> &kernelDims,
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
bool noBias = false)
: OperatorTensor(Type, 1, 2, 1),
Attributes_(attr<ConvAttr::StrideDims>(strideDims),
attr<ConvAttr::DilationDims>(dilationDims),
attr<ConvAttr::InChannels>(inChannels),
attr<ConvAttr::OutChannels>(outChannels),
attr<ConvAttr::KernelDims>(kernelDims)) {}
attr<ConvAttr::KernelDims>(kernelDims),
attr<ConvAttr::NoBias>(noBias)) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
......@@ -65,7 +80,11 @@ public:
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl) {
SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend());
} else {
mImpl = nullptr;
}
}
/**
......@@ -118,8 +137,10 @@ public:
}
}
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>>
computeReceptiveField(const std::vector<DimSize_t>& firstEltDims,
const std::vector<DimSize_t>& outputDims,
const IOIndex_t outputIdx = 0) const override {
if (outputIdx != 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
}
......@@ -159,22 +180,25 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> co
std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
weightIdxDims[0] = firstEltDims[1];
// Bias
const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
// Result
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
// Bias
if (! this->template getAttr<ConvAttr::NoBias>()){
const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
}
return res;
}
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
}
void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
mImpl = Registrar<Conv_Op<DIM>>::create(name)(*this);
SET_IMPL_MACRO(Conv_Op<DIM>, *this, name);
mOutputs[0]->setBackend(name, device);
// By default, automatically set backend for weight and bias inputs
......@@ -211,12 +235,14 @@ inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
const std::array<DimSize_t, DIM> &kernelDims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
bool noBias = false) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims), name);
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims, noBias), name);
addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
addProducer(conv, 2, {outChannels}, "b");
addProducer(conv, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
return conv;
}
......@@ -228,9 +254,10 @@ inline std::shared_ptr<Node> Conv(
DimSize_t const (&kernelDims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
bool noBias = false) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
return Conv(inChannels, outChannels, to_array(kernelDims), name, strideDims, dilationDims);
return Conv(inChannels, outChannels, to_array(kernelDims), name, strideDims, dilationDims, noBias);
}
} // namespace Aidge
......@@ -241,8 +268,9 @@ const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
"DilationDims",
"InChannels",
"OutChannels",
"KernelDims"
"KernelDims",
"NoBias"
};
}
#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
\ No newline at end of file
#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
......@@ -13,29 +13,33 @@
#define AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_
#include <array>
#include <cmath>
#include <numeric>
#include <cmath> // std::floor
#include <cstddef> // std::size_t
#include <string>
#include <utility> // std::pair
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims };
enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims, NoBias };
template <DimIdx_t DIM>
class ConvDepthWise_Op : public OperatorTensor,
public Registrable<ConvDepthWise_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
public Registrable<ConvDepthWise_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
public StaticAttributes<ConvDepthWiseAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
DimSize_t,
std::array<DimSize_t, DIM>> {
std::array<DimSize_t, DIM>,
bool> {
public:
static const std::string Type;
......@@ -45,19 +49,22 @@ public:
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
DimSize_t,
std::array<DimSize_t, DIM>>;
std::array<DimSize_t, DIM>,
bool>;
template <ConvDepthWiseAttr e>
using attr = typename Attributes_::template attr<e>;
constexpr ConvDepthWise_Op(const DimSize_t nbChannels,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
bool no_bias=false)
: OperatorTensor(Type, 1, 2, 1),
Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
attr<ConvDepthWiseAttr::Channels>(nbChannels),
attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {}
attr<ConvDepthWiseAttr::KernelDims>(kernel_dims),
attr<ConvDepthWiseAttr::NoBias>(no_bias)) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
......@@ -67,7 +74,11 @@ public:
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl){
SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend());
}else{
mImpl = nullptr;
}
}
/**
......@@ -153,22 +164,24 @@ public:
std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
weightIdxDims[0] = firstEltDims[1];
// Bias
const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
// Result
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
// Bias
if (! this->template getAttr<ConvDepthWiseAttr::NoBias>()){
const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
}
return res;
}
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
}
void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
mImpl = Registrar<ConvDepthWise_Op<DIM>>::create(name)(*this);
SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, name);
mOutputs[0]->setBackend(name, device);
// By default, automatically set backend for weight and bias inputs
......@@ -192,12 +205,13 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
const std::array<DimSize_t, DIM> &kernelDims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
bool noBias=false) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims), name);
auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims, noBias), name);
addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
addProducer(convDW, 2, {nbChannels}, "b");
addProducer(convDW, 2, {(noBias ? 0 : nbChannels)}, "b");
return convDW;
}
......@@ -208,16 +222,17 @@ inline std::shared_ptr<Node> ConvDepthWise(
DimSize_t const (&kernelDims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
bool noBias=false) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
return ConvDepthWise(nbChannels, to_array(kernelDims), name, strideDims, dilationDims);
return ConvDepthWise(nbChannels, to_array(kernelDims), name, strideDims, dilationDims, noBias);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims", "Channels",
"KernelDims"};
"KernelDims", "NoBias"};
}
#endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
......@@ -12,21 +12,20 @@
#ifndef AIDGE_CORE_OPERATOR_DIV_H_
#define AIDGE_CORE_OPERATOR_DIV_H_
#include <cassert>
#include <memory>
#include <string>
#include <vector>
#include "aidge/utils/Registrar.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
class Div_Op : public OperatorTensor,
public Registrable<Div_Op, std::string, std::unique_ptr<OperatorImpl>(const Div_Op&)> {
public Registrable<Div_Op, std::string, std::shared_ptr<OperatorImpl>(const Div_Op&)> {
public:
static const std::string Type;
......@@ -40,7 +39,11 @@ public:
Div_Op(const Div_Op& op)
: OperatorTensor(op)
{
mImpl = op.mImpl ? Registrar<Div_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl) {
SET_IMPL_MACRO(Div_Op, *this, op.backend());
} else {
mImpl = nullptr;
}
}
/**
......@@ -53,11 +56,7 @@ public:
void computeOutputDims() override final;
void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
mImpl = Registrar<Div_Op>::create(name)(*this);
mOutputs[0]->setBackend(name, device);
}
void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
static const std::vector<std::string> getInputsName(){
return {"data_input_1", "data_input_2"};
......
......@@ -12,22 +12,20 @@
#ifndef AIDGE_CORE_OPERATOR_ERF_H_
#define AIDGE_CORE_OPERATOR_ERF_H_
#include <cassert>
#include <memory>
#include <string>
#include <vector>
#include "aidge/utils/Registrar.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
class Erf_Op : public OperatorTensor,
public Registrable<Erf_Op, std::string, std::unique_ptr<OperatorImpl>(const Erf_Op&)> {
public Registrable<Erf_Op, std::string, std::shared_ptr<OperatorImpl>(const Erf_Op&)> {
public:
static const std::string Type;
......@@ -40,7 +38,11 @@ public:
Erf_Op(const Erf_Op& op)
: OperatorTensor(op)
{
mImpl = op.mImpl ? Registrar<Erf_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl) {
SET_IMPL_MACRO(Erf_Op, *this, op.backend());
} else {
mImpl = nullptr;
}
}
/**
......@@ -51,10 +53,7 @@ public:
return std::make_shared<Erf_Op>(*this);
}
void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
mImpl = Registrar<Erf_Op>::create(name)(*this);
mOutputs[0]->setBackend(name, device);
}
void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
static const std::vector<std::string> getInputsName(){
return {"data_input"};
......
......@@ -13,13 +13,10 @@
#define AIDGE_CORE_OPERATOR_FC_H_
#include <array>
#include <cmath>
#include <numeric>
#include <memory>
#include <vector>
#include "aidge/utils/Types.h"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
......@@ -32,7 +29,7 @@ enum class FCAttr { OutChannels, NoBias };
class FC_Op : public OperatorTensor,
public Registrable<FC_Op,
std::string,
std::unique_ptr<OperatorImpl>(const FC_Op &)>,
std::shared_ptr<OperatorImpl>(const FC_Op &)>,
public StaticAttributes<FCAttr, DimSize_t, bool> {
public:
static const std::string Type;
......@@ -57,53 +54,26 @@ public:
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<FC_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
if (op.mImpl){
SET_IMPL_MACRO(FC_Op, *this, op.backend());
}else{
mImpl = nullptr;
}
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::FC_Op
*/
std::shared_ptr<Operator> clone() const override {
std::shared_ptr<Operator> clone() const override final {
return std::make_shared<FC_Op>(*this);
}
void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
assert(data->type() == Tensor::Type && "input data must be of Tensor type");
// TODO: FIXME: check this, because data dims may not be initialized at this point...
//if (inputIdx == 2) {
// assert(std::dynamic_pointer_cast<Tensor>(data)->size() == ((this->template getAttr<FCAttr::NoBias>()) == false ? static_cast<std::size_t>(this->template getAttr<FCAttr::OutChannels>()) : 0));
// assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1);
//}
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
if (inputIdx == 0 && getInput(0)->nbDims() == 1)
mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()});
}
void computeOutputDims() override final {
bool associated = true;
for (IOIndex_t i = 0; i < nbInputs(); ++i) {
if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
}
associated &= !(getInput(i)->empty());
}
if (associated) {
// <batch, OutChannels>
mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<FCAttr::OutChannels>()});
}
}
void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
mImpl = Registrar<FC_Op>::create(name)(*this);
mOutputs[0]->setBackend(name, device);
void computeOutputDims() override final;
// By default, automatically set backend for weight and bias inputs
getInput(1)->setBackend(name, device);
getInput(2)->setBackend(name, device);
}
void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
static const std::vector<std::string> getInputsName(){
return {"data_input", "weight", "bias"};
......@@ -128,4 +98,4 @@ const char *const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels",
"NoBias"};
}
#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
\ No newline at end of file
#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment