Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • eclipse/aidge/aidge_core
  • hrouis/aidge_core
  • mszczep/aidge_core
  • oantoni/aidge_core
  • cguillon/aidge_core
  • jeromeh/aidge_core
  • axelfarr/aidge_core
  • cmoineau/aidge_core
  • noamzerah/aidge_core
  • lrakotoarivony/aidge_core
  • silvanosky/aidge_core
  • maab05/aidge_core
  • mick94/aidge_core
  • lucaslopez/aidge_core_ll
  • wboussella/aidge_core
  • farnez/aidge_core
  • mnewson/aidge_core
17 results
Show changes
Commits on Source (23)
Showing
with 510 additions and 24 deletions
......@@ -14,11 +14,13 @@
#include <cstddef>
#include <vector>
#include <memory>
#include "aidge/utils/Types.h"
namespace Aidge {
class OperatorImpl {
public:
virtual void forward(){};
virtual void backward(){};
......
......@@ -348,6 +348,37 @@ public:
*/
void updateOutputNodes();
/**
* @brief Clone the GraphView with shared Operators. It is a new GraphView, with cloned Nodes, but the new Nodes refer to the same Operators as the original ones.
* @return std::shared_ptr<GraphView>
*/
inline std::shared_ptr<GraphView> cloneSharedOperators() const {
return cloneCallback(&Node::cloneSharedOperators);
}
/**
* @brief Clone the GraphView with shared Producers. All the other Operators are copied.
* @return std::shared_ptr<GraphView>
*/
inline std::shared_ptr<GraphView> cloneSharedProducers() const {
return cloneCallback(&Node::cloneSharedProducers);
}
/**
* @brief Clone the GraphView. Everything is cloned: Nodes and Operators.
* @return std::shared_ptr<GraphView>
*/
inline std::shared_ptr<GraphView> clone() const {
return cloneCallback(&Node::clone);
}
/**
* @brief Clone the current GraphView using a callback function for the Node cloning, allowing to specify how each Node should be cloned or replaced by another Node type, or removed (i.e. replaced by identity). When a Node is removed, the clone() method automatically finds the next valid parent in line, going backward in the graph and connects it if that makes sense without ambiguity (effectively treating the removed Node as an identity operation).
* @param cloneNode Callback function to clone a node
* @return std::shared_ptr<GraphView>
*/
std::shared_ptr<GraphView> cloneCallback(NodePtr(*cloneNode)(NodePtr)) const;
private:
///////////////////////////////////////////////////////
// TENSOR MANAGEMENT
......
......@@ -350,6 +350,55 @@ public:
*/
void resetConnections(bool includeLearnableParam = false);
///////////////////////////////////////////////////////
// CLONE
///////////////////////////////////////////////////////
/**
* @brief Clone the current Node. The Operator attribute of the new Node is not copied but shared with the current Node. The new node has no connection.
* @return NodePtr
*/
NodePtr cloneSharedOperators() const;
/**
* @brief Clone the Node. Every attribute is copied, even Operator pointer except for Producers for which it is shared. The new Node has no connection.
* @return NodePtr
*/
NodePtr cloneSharedProducers() const;
/**
* @brief Clone the Node and its Operator. The new Node has no connection.
* @return NodePtr
*/
NodePtr clone() const;
/**
* @brief Callback function to clone the Node keeping the same Operator object instance. The new Node has no connection.
* @param node Node to clone.
* @return NodePtr
*/
static NodePtr cloneSharedOperators(NodePtr node) {
return node->cloneSharedOperators();
}
/**
* @brief Callback function to clone the Node. Every attribute is copied, even Operator pointer except for Producers for which it is shared. The new Node has no connection.
* @param node Node to clone.
* @return NodePtr
*/
static NodePtr cloneSharedProducers(NodePtr node) {
return node->cloneSharedProducers();
}
/**
* @brief Callback function to clone the Node and its Operator. The new Node has no connection.
* @param node Node to clone.
* @return NodePtr
*/
static NodePtr clone(NodePtr node) {
return node->clone();
}
private:
///////////////////////////////////////////////////////
// OPERATORS
......
......@@ -32,14 +32,13 @@ class Add_Op : public Operator,
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, NUM> mInputs;
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>(shared_from_this());
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char* Type = "Add";
constexpr Add_Op()
: Operator(Type),
mOutput(std::make_shared<Tensor>())
: Operator(Type)
{
assert(NUM > 0 && "Add should have at least one input");
for (std::size_t i = 0; i<NUM; ++i) {
......@@ -48,6 +47,31 @@ public:
setDatatype(DataType::Float32);
}
/**
* @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Add_Op(const Add_Op<NUM>& op)
: Operator(Type),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{
// cpy-ctor
assert(NUM > 0 && "Add should have at least one input");
for (std::size_t i = 0; i<NUM; ++i) {
mInputs[i] = std::make_shared<Tensor>();
}
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Add_Op<NUM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Add_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Add_Op>(*this);
}
// Data operator[](const char* inputName) override final {
// std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
// (strcmp(inputName, "weight") ? mInputs[1] :
......
......@@ -58,11 +58,32 @@ public:
: Operator(Type),
Parameterizable_(param<AvgPoolingParam::StrideDims>(stride_dims),
param<AvgPoolingParam::KernelDims>(kernel_dims),
param<AvgPoolingParam::PaddingDims>(padding_dims)),
mOutput(std::make_shared<Tensor>()) {
param<AvgPoolingParam::PaddingDims>(padding_dims)) {
setDatatype(DataType::Float32);
}
/**
* @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
AvgPooling_Op(const AvgPooling_Op<DIM>& op)
: Operator(Type),
Parameterizable_(op),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::AvgPooling_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<AvgPooling_Op<DIM>>(*this);
}
constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 1 && "operators supports only 3 inputs");
(void) inputIdx; // avoid unused warning
......
......@@ -51,11 +51,32 @@ public:
constexpr BatchNorm_Op(float epsilon, float momentum)
: Operator(Type),
Parameterizable_(param<BatchNormParam::Epsilon>(epsilon),
param<BatchNormParam::Momentum>(momentum)),
mOutput(std::make_shared<Tensor>()) {
param<BatchNormParam::Momentum>(momentum)) {
setDatatype(DataType::Float32);
}
/**
* @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
BatchNorm_Op(const BatchNorm_Op<DIM>& op)
: Operator(Type),
Parameterizable_(op),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::BatchNorm_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<BatchNorm_Op<DIM>>(*this);
}
// Data operator[](const char* inputName) override final {
// std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
// (strcmp(inputName, "weight") ? mInputs[1] :
......
......@@ -61,11 +61,32 @@ public:
param<ConvParam::InChannels>(in_channels),
param<ConvParam::OutChannels>(out_channels),
param<ConvParam::KernelDims>(kernel_dims),
param<ConvParam::PaddingDims>(padding_dims)),
mOutput(std::make_shared<Tensor>()) {
param<ConvParam::PaddingDims>(padding_dims)) {
setDatatype(DataType::Float32);
}
/**
* @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Conv_Op(const Conv_Op<DIM>& op)
: Operator(Type),
Parameterizable_(op),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Conv_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Conv_Op<DIM>>(*this);
}
// Data operator[](const char* inputName) override final {
// std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
// (strcmp(inputName, "weight") ? mInputs[1] :
......
......@@ -66,11 +66,32 @@ class ConvDepthWise_Op : public Operator,
param<ConvDepthWiseParam::DilationDims>(dilation_dims),
param<ConvDepthWiseParam::Channels>(0),
param<ConvDepthWiseParam::KernelDims>(kernel_dims),
param<ConvDepthWiseParam::PaddingDims>(padding_dims)),
mOutput(std::make_shared<Tensor>()) {
param<ConvDepthWiseParam::PaddingDims>(padding_dims)) {
setDatatype(DataType::Float32);
}
/**
* @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op)
: Operator(Type),
Parameterizable_(op),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::ConvDepthWise_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<ConvDepthWise_Op<DIM>>(*this);
}
constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
......
......@@ -51,12 +51,33 @@ public:
: Operator(Type),
Parameterizable_(
param<FCParam::OutChannels>(out_channels),
param<FCParam::NoBias>(noBias)),
mOutput(std::make_shared<Tensor>())
param<FCParam::NoBias>(noBias))
{
setDatatype(DataType::Float32);
}
/**
* @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
FC_Op(const FC_Op& op)
: Operator(Type),
Parameterizable_(op),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<FC_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::FC_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<FC_Op>(*this);
}
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
......
......@@ -16,6 +16,7 @@
#include <vector>
#include <string>
#include <cassert>
#include <cstring>
#include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp"
......@@ -28,12 +29,15 @@ class GenericOperator_Op
: public Operator,
public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)> {
private:
using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>;
CParameter mParams;
IOIndex_t mNbDataIn;
IOIndex_t mNbIn;
IOIndex_t mNbOut;
std::vector<std::shared_ptr<Tensor>> mInputs;
std::vector<std::shared_ptr<Tensor>> mOutputs;
ComputeDimsFunc mComputeOutputDims;
public:
GenericOperator_Op(const char *type, IOIndex_t nbDataIn, IOIndex_t nbIn, IOIndex_t nbOut)
......@@ -49,6 +53,32 @@ class GenericOperator_Op
}
}
/**
* @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
GenericOperator_Op(const GenericOperator_Op& op)
: Operator(op.type().c_str()), mParams(op.mParams), mNbDataIn(op.mNbDataIn), mNbIn(op.mNbIn), mNbOut(op.mNbOut)
{
// cpy-ctor
mInputs = std::vector<std::shared_ptr<Tensor>>(mNbIn);
for (std::size_t i = 0; i < mNbIn; ++i) {
mInputs[i] = std::make_shared<Tensor>();
}
mOutputs = std::vector<std::shared_ptr<Tensor>>(mNbOut);
for (std::size_t i = 0; i < mNbOut; ++i) {
mOutputs[i] = std::make_shared<Tensor>(*op.mOutputs[i]);
}
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::GenericOperator_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<GenericOperator_Op>(*this);
}
/**
* @brief Get the Parameter object identified by its name.
* @tparam T expected parameter type.
......@@ -84,23 +114,55 @@ class GenericOperator_Op
mParams.Add<T>(key, std::forward<T>(value));
}
// Helper functions that can be used with setComputeOutputDims():
static const ComputeDimsFunc Identity;
void setComputeOutputDims(ComputeDimsFunc func) {
mComputeOutputDims = func;
}
std::string getParameterType(std::string const &key) { return mParams.getParamType(key); }
std::vector<std::string> getParametersName() { return mParams.getParametersName(); }
// Override Virtual Opertor methods
void associateInput(const IOIndex_t /*inputIdx*/, std::shared_ptr<Data> /*data*/) override final {
printf("Info: using associateInput() on a GenericOperator.\n");
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < mNbIn && "operators supports only x inputs");
if (strcmp(data->type(), Tensor::Type) == 0) {
// TODO: associate input only if of type Tensor, otherwise do nothing
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
}
void computeOutputDims() override final {
assert(false && "Cannot compute output dim of a GenericOperator");
if (mComputeOutputDims) {
std::vector<std::vector<size_t>> inputsDims(mNbIn, std::vector<size_t>());
for (std::size_t i = 0; i < mNbIn; ++i) {
if (mInputs[i]) {
inputsDims[i] = mInputs[i]->dims();
}
}
const auto& outputsDims = mComputeOutputDims(inputsDims);
assert(outputsDims.size() == mNbOut && "The provided ComputeDimsFunc function returns the wrong number of outputs");
for (std::size_t i = 0; i < mNbOut; ++i) {
mOutputs[i]->resize(outputsDims[i]);
}
}
else {
assert(false && "Cannot compute output dim of a GenericOperator");
}
}
bool outputDimsForwarded() const override final {
assert(false && "GenericOperator cannot forward dims");
return false;
if (mComputeOutputDims) {
return !(mOutputs[0]->empty());
}
else {
assert(false && "GenericOperator cannot forward dims");
return false;
}
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
......
......@@ -53,6 +53,28 @@ public:
setDatatype(DataType::Float32);
}
/**
* @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
LeakyReLU_Op(const LeakyReLU_Op& op)
: Operator(Type),
Parameterizable_(op),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<LeakyReLU_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::LeakyReLU_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<LeakyReLU_Op>(*this);
}
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx == 0 && "operator supports only 1 input");
(void) inputIdx; // avoid unused warning
......
......@@ -49,12 +49,33 @@ public:
Matmul_Op(DimSize_t out_channels)
: Operator(Type),
Parameterizable_(
param<MatmulParam::OutChannels>(out_channels)),
mOutput(std::make_shared<Tensor>())
param<MatmulParam::OutChannels>(out_channels))
{
setDatatype(DataType::Float32);
}
/**
* @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Matmul_Op(const Matmul_Op& op)
: Operator(Type),
Parameterizable_(op),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Matmul_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Matmul_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Matmul_Op>(*this);
}
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 2 && "operators supports only 2 inputs");
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
......
......@@ -63,6 +63,28 @@ public:
setDatatype(DataType::Float32);
}
/**
* @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
MaxPooling_Op(const MaxPooling_Op<DIM>& op)
: Operator(Type),
Parameterizable_(op),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<MaxPooling_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::MaxPooling_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<MaxPooling_Op<DIM>>(*this);
}
constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 1 && "operators supports only 3 inputs");
(void) inputIdx; // avoid unused warning
......
......@@ -21,6 +21,25 @@ public:
: Operator("MetaOp")
{
}
/**
* @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
MetaOperator(const MetaOperator& op)
: Operator("MetaOp")
{
// cpy-ctor
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Matmul_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<MetaOperator>(*this);
}
~MetaOperator() = default;
};
}
......
......@@ -35,8 +35,18 @@ private:
public:
Operator() = delete;
Operator(const char* type) : mType(type) {}
virtual std::shared_ptr<Operator> clone() const = 0;
virtual ~Operator();
Operator(const Operator& op):
std::enable_shared_from_this<Operator>()
{
mType = op.mType;
mImpl = nullptr;
// Implementation is never cloned. It is up to the non-abstract Operator copy-constructor to create a new implementation matching the copied Operator implementation.
// See https://gitlab.eclipse.org/eclipse/aidge/aidge_core/-/merge_requests/8#note_1214050 for the discussion.
// Hooks are not copied.
}
public:
......
......@@ -29,15 +29,14 @@ class Producer_Op
public Registrable<Producer_Op, std::string, std::unique_ptr<OperatorImpl>(
const Producer_Op &)> {
private:
std::shared_ptr<Tensor> mOutput;
std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char* Type = "Producer";
template <std::size_t DIM>
Producer_Op(const std::array<DimSize_t, DIM>& dims)
: Operator(Type),
mOutput(std::make_shared<Tensor>())
: Operator(Type)
{
//ctor
setDatatype(DataType::Float32);
......@@ -51,6 +50,27 @@ public:
setDatatype(tensor->dataType());
}
/**
* @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Producer_Op(const Producer_Op& op)
: Operator(Type),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Producer_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Producer_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Producer_Op>(*this);
}
void associateInput(const IOIndex_t /*inputIdx*/, std::shared_ptr<Data> /*data*/) override final {
assert(false && "Producer operator takes no input");
}
......
......@@ -42,6 +42,27 @@ public:
setDatatype(DataType::Float32);
}
/**
* @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
ReLU_Op(const ReLU_Op& op)
: Operator(Type),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<ReLU_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::ReLU_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<ReLU_Op>(*this);
}
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx == 0 && "operator supports only 1 input");
(void) inputIdx; // avoid unused warning
......
......@@ -55,6 +55,28 @@ public:
setDatatype(DataType::Float32);
}
/**
* @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Scaling_Op(const Scaling_Op& op)
: Operator(Type),
Parameterizable_(op),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Scaling_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Scaling_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Scaling_Op>(*this);
}
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx == 0 && "operator supports only 1 input");
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
......@@ -84,7 +106,7 @@ public:
}
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert((inputIdx == 0) && "Scaling Operator has only 1 input");
(void) inputIdx; // avoid unused warning
return mInput;
......
......@@ -42,6 +42,27 @@ public:
setDatatype(DataType::Float32);
}
/**
* @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Softmax_Op(const Softmax_Op& op)
: Operator(Type),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Softmax_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Softmax_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Softmax_Op>(*this);
}
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx == 0 && "operator supports only 1 input");
(void) inputIdx; // avoid unused warning
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_RECIPIES_LABELGRAPH_H_
#define AIDGE_RECIPIES_LABELGRAPH_H_
#include "aidge/graph/GraphView.hpp"
#include "aidge/graph/Node.hpp"
namespace Aidge {
NodePtr nodeLabel(NodePtr node);
/**
* @brief Generate the graph for the pixel-wise labels corresponding to a data graph, taking into account the scaling changes (padding, stride, pooling...).
* @details Right now, the behavior is to replace the following operators:
* - Conv: MaxPooling
* - ConvDepthWie: MaxPooling
* - AvgPooling: MaxPooling
* - MaxPooling: MaxPooling
* - all others: identity (removed)
* @param graph Data graph
* @param return Computing graph for the labels derived from the data graph
*/
std::shared_ptr<GraphView> labelGraph(std::shared_ptr<GraphView> graph);
} // namespace Aidge
#endif /* AIDGE_RECIPIES_LABELGRAPH_H_ */