Skip to content
Snippets Groups Projects
Commit 61d71c0c authored by vincent  lorrain's avatar vincent lorrain
Browse files

Merge remote-tracking branch 'origin/main' into Refactoring/GraphRegex

parents 09f5072d 50f86506
No related branches found
No related tags found
1 merge request!29GraphRegex interface class
Pipeline #34056 passed
Showing
with 1038 additions and 94 deletions
......@@ -31,18 +31,23 @@
#include "aidge/operator/BatchNorm.hpp"
#include "aidge/operator/Conv.hpp"
#include "aidge/operator/ConvDepthWise.hpp"
#include "aidge/operator/Div.hpp"
#include "aidge/operator/FC.hpp"
#include "aidge/operator/GenericOperator.hpp"
#include "aidge/operator/MatMul.hpp"
#include "aidge/operator/MaxPooling.hpp"
#include "aidge/operator/MetaOperator.hpp"
#include "aidge/operator/MetaOperatorDefs.hpp"
#include "aidge/operator/Mul.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/operator/Pad.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/operator/Pow.hpp"
#include "aidge/operator/ReLU.hpp"
#include "aidge/operator/Softmax.hpp"
#include "aidge/operator/Scaling.hpp"
#include "aidge/operator/Softmax.hpp"
#include "aidge/operator/Sqrt.hpp"
#include "aidge/operator/Sub.hpp"
#include "aidge/scheduler/Scheduler.hpp"
#include "aidge/utils/Attributes.hpp"
#include "aidge/utils/StaticAttributes.hpp"
......
......@@ -322,26 +322,33 @@ public:
/**
* @brief Insert a node (newParentNode) as a parent of the passed node (childNode).
*
*
* @param childNode Node that gets a new parent.
* @param newParentNode Inserted Node.
* @param childInputTensorIdx Index of the input Tensor for the childNode linked to the inserted Node output.
* @param newParentInputTensorIdx Index of the input Tensor for the newParentNode linked to the former parent of childNode.
* @param newParentOutputTensorIdx Index of the output Tensor for the newParentNode linked to the childNode's input Tensor.
*/
void insertParent(NodePtr childNode,
NodePtr newParentNode,
IOIndex_t childInputTensorIdx,
IOIndex_t newParentInputTensorIdx,
void insertParent(NodePtr childNode,
NodePtr newParentNode,
IOIndex_t childInputTensorIdx,
IOIndex_t newParentInputTensorIdx,
IOIndex_t newParentOutputTensorIdx);
/**
* @brief Replace the current GraphView with the set of given Nodes if possible
* @param newNodes Set of Nodes.
* @brief Replace a set of Nodes in every available GraphView with a new set of Nodes if possible.
* Both sets should include all the necessary Producers.
* @details Replaced Nodes are removed from any GraphView pointing at them all.
* The oldNodes set should have only one input/output
* Tensor for automatic connections of newNodes set.
* @param oldNodes actual set of shared_ptr<Node> to replace.
* @param newNodes new set of shared_ptr<Node>.
* @return true
* @return false
*/
bool replaceWith(std::set<NodePtr> newNodes);
static bool replace(const std::set<NodePtr>& oldNodes, const std::set<NodePtr>& newNodes);
void updateInputNodes();
/**
* @brief Process from zero the set of output Nodes.
......@@ -379,6 +386,12 @@ public:
*/
std::shared_ptr<GraphView> cloneCallback(NodePtr(*cloneNode)(NodePtr)) const;
/**
* @brief Get the sum of the number of free dataInput connection for all inputNodes of the GraphView object.
* @return IOIndex_t
*/
IOIndex_t getNbFreeDataInputs() const;
private:
///////////////////////////////////////////////////////
// TENSOR MANAGEMENT
......@@ -390,12 +403,6 @@ private:
*/
IOIndex_t getNbDataInputs() const;
/**
* @brief Get the sum of the number of free dataInput connection for all inputNodes of the GraphView object.
* @return IOIndex_t
*/
IOIndex_t getNbFreeDataInputs() const;
/**
* @brief Update the set of inputNodes with a new Node, checking if it can be
* added and removing any Node not part of mInputNode anymore.
......
......@@ -258,9 +258,7 @@ public:
}
inline void removeView(const std::shared_ptr<GraphView> &graphPtr) {
std::set<std::weak_ptr<GraphView>, weakCompare>::const_iterator viewIt = mViews.cbegin();
for (; (viewIt != mViews.cend()) && ((*viewIt).lock() != graphPtr) ; ++viewIt) {}
mViews.erase(*viewIt);
mViews.erase(graphPtr);
}
/**
......@@ -402,7 +400,7 @@ public:
/**
* @brief Get the set of pointers to connected node at a distance of a delta.
* @details the recution are cut
* @details the recution are cut
* Return a nullptr is nofing found.
* @param delta Input delta.
* @return std::shared_ptr<Node>
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_DIV_H_
#define AIDGE_CORE_OPERATOR_DIV_H_
#include <cassert>
#include <memory>
#include <vector>
#include "aidge/utils/Registrar.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
class Div_Op : public Operator,
public Registrable<Div_Op, std::string, std::unique_ptr<OperatorImpl>(const Div_Op&)> {
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char* Type = "Div";
Div_Op()
: Operator(Type)
{
setDatatype(DataType::Float32);
}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Div_Op(const Div_Op& op)
: Operator(Type),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Div_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Div_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Div_Op>(*this);
}
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 2 && "operator supports only 2 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInputs[0]->empty())
mOutput->resize(mInputs[0]->dims());
}
bool outputDimsForwarded() const override final {
return !(mOutput->empty());
}
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
return *(mInputs[inputIdx].get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert((inputIdx < 2) && "Div Operator has 2 inputs");
(void) inputIdx; // avoid unused warning
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Div Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 2 && "operator supports only 2 inputs");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string& name) override {
mImpl = Registrar<Div_Op>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInputs[0]->setBackend(name);
mInputs[1]->setBackend(name);
}
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
mInputs[0]->setDatatype(datatype);
mInputs[1]->setDatatype(datatype);
}
inline IOIndex_t nbInputs() const noexcept override final { return 2; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
inline std::shared_ptr<Node> Div(const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Div_Op>(), name);
}
}
#endif /* AIDGE_CORE_OPERATOR_DIV_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_MUL_H_
#define AIDGE_CORE_OPERATOR_MUL_H_
#include <cassert>
#include <memory>
#include <vector>
#include "aidge/utils/Registrar.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
class Mul_Op : public Operator,
public Registrable<Mul_Op, std::string, std::unique_ptr<OperatorImpl>(const Mul_Op&)> {
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char* Type = "Mul";
Mul_Op()
: Operator(Type)
{
setDatatype(DataType::Float32);
}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Mul_Op(const Mul_Op& op)
: Operator(Type),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Mul_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Mul_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Mul_Op>(*this);
}
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 2 && "operator supports only 2 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInputs[0]->empty())
mOutput->resize(mInputs[0]->dims());
}
bool outputDimsForwarded() const override final {
return !(mOutput->empty());
}
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
return *(mInputs[inputIdx].get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert((inputIdx < 2) && "Mul Operator has 2 inputs");
(void) inputIdx; // avoid unused warning
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Mul Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 2 && "operator supports only 2 inputs");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string& name) override {
mImpl = Registrar<Mul_Op>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInputs[0]->setBackend(name);
mInputs[1]->setBackend(name);
}
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
mInputs[0]->setDatatype(datatype);
mInputs[1]->setDatatype(datatype);
}
inline IOIndex_t nbInputs() const noexcept override final { return 2; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
inline std::shared_ptr<Node> Mul(const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Mul_Op>(), name);
}
}
#endif /* AIDGE_CORE_OPERATOR_MUL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_POW_H_
#define AIDGE_CORE_OPERATOR_POW_H_
#include <cassert>
#include <memory>
#include <vector>
#include "aidge/utils/Registrar.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
class Pow_Op : public Operator,
public Registrable<Pow_Op, std::string, std::unique_ptr<OperatorImpl>(const Pow_Op&)> {
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char* Type = "Pow";
Pow_Op()
: Operator(Type)
{
setDatatype(DataType::Float32);
}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Pow_Op(const Pow_Op& op)
: Operator(Type),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Pow_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Pow_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Pow_Op>(*this);
}
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 2 && "operator supports only 2 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInputs[0]->empty())
mOutput->resize(mInputs[0]->dims());
}
bool outputDimsForwarded() const override final {
return !(mOutput->empty());
}
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
return *(mInputs[inputIdx].get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert((inputIdx < 2) && "Pow Operator has 2 inputs");
(void) inputIdx; // avoid unused warning
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Pow Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 2 && "operator supports only 2 inputs");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string& name) override {
mImpl = Registrar<Pow_Op>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInputs[0]->setBackend(name);
mInputs[1]->setBackend(name);
}
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
mInputs[0]->setDatatype(datatype);
mInputs[1]->setDatatype(datatype);
}
inline IOIndex_t nbInputs() const noexcept override final { return 2; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
inline std::shared_ptr<Node> Pow(const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Pow_Op>(), name);
}
}
#endif /* AIDGE_CORE_OPERATOR_POW_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_SQRT_H_
#define AIDGE_CORE_OPERATOR_SQRT_H_
#include <cassert>
#include <memory>
#include <vector>
#include "aidge/utils/Registrar.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
class Sqrt_Op : public Operator,
public Registrable<Sqrt_Op, std::string, std::unique_ptr<OperatorImpl>(const Sqrt_Op&)> {
public:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char* Type = "Sqrt";
Sqrt_Op()
: Operator(Type)
{
setDatatype(DataType::Float32);
}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Sqrt_Op(const Sqrt_Op& op)
: Operator(Type),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Sqrt_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Sqrt_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Sqrt_Op>(*this);
}
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx == 0 && "operator supports only 1 input");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInput->empty())
mOutput->resize(mInput->dims());
}
bool outputDimsForwarded() const override final {
return !(mOutput->empty());
}
inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert((inputIdx == 0) && "Sqrt Operator has only 1 input");
(void) inputIdx; // avoid unused warning
return mInput;
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Sqrt Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operator supports only 1 input");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInput);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string& name) override {
mImpl = Registrar<Sqrt_Op>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
mInput->setDatatype(datatype);
}
inline IOIndex_t nbInputs() const noexcept override final { return 1; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
inline std::shared_ptr<Node> Sqrt(const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Sqrt_Op>(), name);
}
}
#endif /* AIDGE_CORE_OPERATOR_SQRT_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_SUB_H_
#define AIDGE_CORE_OPERATOR_SUB_H_
#include <cassert>
#include <memory>
#include <vector>
#include "aidge/utils/Registrar.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
class Sub_Op : public Operator,
public Registrable<Sub_Op, std::string, std::unique_ptr<OperatorImpl>(const Sub_Op&)> {
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char* Type = "Sub";
Sub_Op()
: Operator(Type)
{
setDatatype(DataType::Float32);
}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Sub_Op(const Sub_Op& op)
: Operator(Type),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
mImpl = op.mImpl ? Registrar<Sub_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Sub_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Sub_Op>(*this);
}
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 2 && "operator supports only 2 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInputs[0]->empty())
mOutput->resize(mInputs[0]->dims());
}
bool outputDimsForwarded() const override final {
return !(mOutput->empty());
}
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
return *(mInputs[inputIdx].get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert((inputIdx < 2) && "Sub Operator has 2 inputs");
(void) inputIdx; // avoid unused warning
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Sub Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 2 && "operator supports only 2 inputs");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string& name) override {
mImpl = Registrar<Sub_Op>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInputs[0]->setBackend(name);
mInputs[1]->setBackend(name);
}
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
mInputs[0]->setDatatype(datatype);
mInputs[1]->setDatatype(datatype);
}
inline IOIndex_t nbInputs() const noexcept override final { return 2; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
inline std::shared_ptr<Node> Sub(const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Sub_Op>(), name);
}
}
#endif /* AIDGE_CORE_OPERATOR_SUB_H_ */
......@@ -12,6 +12,9 @@
#ifndef AIDGE_CORE_UTILS_RECIPIES_H_
#define AIDGE_CORE_UTILS_RECIPIES_H_
#include <memory>
#include <set>
#include "aidge/graph/Node.hpp"
#include "aidge/graph/GraphView.hpp"
......@@ -47,7 +50,7 @@ void removeFlatten(std::set<std::shared_ptr<Node>> nodes);
* @param graphView Graph view to use graph matching on, in order to apply transfomrations.
*/
void removeFlatten(std::shared_ptr<GraphView> graphView);
// FUSE BN + FC || CONV -> FC || CONV
/**
......
......@@ -26,7 +26,7 @@ void init_GraphView(py::module& m) {
.def("save", &GraphView::save, py::arg("path"), py::arg("verbose") = false,
R"mydelimiter(
Save the GraphView as a Mermaid graph in a .md file at the specified location.
:param path: save location
:type path: str
)mydelimiter")
......@@ -34,14 +34,14 @@ void init_GraphView(py::module& m) {
.def("get_output_nodes", &GraphView::outputNodes,
R"mydelimiter(
Get set of output Nodes.
:rtype: list[Node]
)mydelimiter")
.def("get_input_nodes", &GraphView::inputNodes,
R"mydelimiter(
Get set of input Nodes.
:rtype: list[Node]
)mydelimiter")
......@@ -49,7 +49,7 @@ void init_GraphView(py::module& m) {
py::arg("other_node"), py::arg("include_learnable_parameters") = true,
R"mydelimiter(
Include a Node to the current GraphView object.
:param other_node: Node to add
:type oth_Node: Node
:param includeLearnableParameter: include non-data inputs, like weights and biases. Default True.
......@@ -66,18 +66,20 @@ void init_GraphView(py::module& m) {
py::arg("fromTensor") = 0U, py::arg("toTensor") = gk_IODefaultIndex,
R"mydelimiter(
Include a Node to the current GraphView object.
:param other_node: Node to add
:type oth_Node: Node
:param includeLearnableParameter: include non-data inputs, like weights and biases. Default True.
:type includeLearnableParameter
)mydelimiter")
.def("replace_with", &GraphView::replaceWith, py::arg("new_nodes"),
.def_static("replace", &GraphView::replace, py::arg("old_nodes"), py::arg("new_nodes"),
R"mydelimiter(
Replace the current GraphView with the set of given Nodes if possible.
:param new_nodes: Nodes with connections already taken care of.
Replace the old set of Nodes with the new set of given Nodes if possible in every GraphView.
:param old_nodes: Nodes actually connected in GraphViews.
:type old_nodes: Node
:param new_nodes: Nodes with inner connections already taken care of.
:type new_nodes: Node
:return: Whether any replacement has been made.
:rtype: bool
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <pybind11/pybind11.h>
#include "aidge/operator/Div.hpp"
#include "aidge/operator/Operator.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Div(py::module& m) {
py::class_<Div_Op, std::shared_ptr<Div_Op>, Operator>(m, "DivOp", py::multiple_inheritance())
.def("get_inputs_name", &Div_Op::getInputsName)
.def("get_outputs_name", &Div_Op::getOutputsName);
m.def("Div", &Div, py::arg("name") = "");
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <pybind11/pybind11.h>
#include "aidge/operator/Mul.hpp"
#include "aidge/operator/Operator.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Mul(py::module& m) {
py::class_<Mul_Op, std::shared_ptr<Mul_Op>, Operator>(m, "MulOp", py::multiple_inheritance())
.def("get_inputs_name", &Mul_Op::getInputsName)
.def("get_outputs_name", &Mul_Op::getOutputsName);
m.def("Mul", &Mul, py::arg("name") = "");
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <pybind11/pybind11.h>
#include "aidge/operator/Pow.hpp"
#include "aidge/operator/Operator.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Pow(py::module& m) {
py::class_<Pow_Op, std::shared_ptr<Pow_Op>, Operator>(m, "PowOp", py::multiple_inheritance())
.def("get_inputs_name", &Pow_Op::getInputsName)
.def("get_outputs_name", &Pow_Op::getOutputsName);
m.def("Pow", &Pow, py::arg("name") = "");
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <pybind11/pybind11.h>
#include "aidge/operator/Sqrt.hpp"
#include "aidge/operator/Operator.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Sqrt(py::module& m) {
py::class_<Sqrt_Op, std::shared_ptr<Sqrt_Op>, Operator>(m, "SqrtOp", py::multiple_inheritance())
.def("get_inputs_name", &Sqrt_Op::getInputsName)
.def("get_outputs_name", &Sqrt_Op::getOutputsName);
m.def("Sqrt", &Sqrt, py::arg("name") = "");
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <pybind11/pybind11.h>
#include "aidge/operator/Sub.hpp"
#include "aidge/operator/Operator.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Sub(py::module& m) {
py::class_<Sub_Op, std::shared_ptr<Sub_Op>, Operator>(m, "SubOp", py::multiple_inheritance())
.def("get_inputs_name", &Sub_Op::getInputsName)
.def("get_outputs_name", &Sub_Op::getOutputsName);
m.def("Sub", &Sub, py::arg("name") = "");
}
} // namespace Aidge
......@@ -25,15 +25,20 @@ void init_AvgPooling(py::module&);
void init_BatchNorm(py::module&);
void init_Conv(py::module&);
void init_ConvDepthWise(py::module&);
void init_Div(py::module&);
void init_FC(py::module&);
void init_GenericOperator(py::module&);
void init_LeakyReLU(py::module&);
void init_MatMul(py::module&);
void init_MaxPooling(py::module&);
void init_MetaOperatorDefs(py::module&);
void init_Mul(py::module&);
void init_Producer(py::module&);
void init_Pow(py::module&);
void init_ReLU(py::module&);
void init_Softmax(py::module&);
void init_Sqrt(py::module&);
void init_Sub(py::module&);
void init_Node(py::module&);
void init_GraphView(py::module&);
......@@ -67,14 +72,19 @@ void init_Aidge(py::module& m){
init_BatchNorm(m);
init_Conv(m);
init_ConvDepthWise(m);
init_Div(m);
init_FC(m);
init_GenericOperator(m);
init_LeakyReLU(m);
init_MatMul(m);
init_MaxPooling(m);
init_MetaOperatorDefs(m);
init_Mul(m);
init_Pow(m);
init_ReLU(m);
init_Softmax(m);
init_Sqrt(m);
init_Sub(m);
init_Producer(m);
init_Match(m);
......
......@@ -17,6 +17,7 @@
#include "aidge/utils/Types.h"
#include "aidge/graph/GraphView.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
///////////////////////////////////////////////////////
// FUNCTIONAL DESCRIPTION
......@@ -529,38 +530,72 @@ void Aidge::GraphView::insertParent(NodePtr childNode,
}
bool Aidge::GraphView::replaceWith(std::set<std::shared_ptr<Node>> newNodes) {
// TODO : only supports one input/output node for now
assert(mNodes.size()>0 && "There must be at least one Node to replace");
bool Aidge::GraphView::replace(const std::set<Aidge::NodePtr>& oldNodes, const std::set<Aidge::NodePtr>& newNodes) {
bool replacable;
std::shared_ptr<Node> previousInputNode = (*inputNodes().begin());
std::shared_ptr<Node> previousOutputNode = (*outputNodes().begin());
std::shared_ptr<Node> newOutputNode;
// TODO: handle case where an oldNodes parameter does not come from a Producer but another Node (not included in oldNodes)
// How to distinguish it from data input?
// TODO: Parameter Tensors could be identified with their dimensions
// TODO: Take GraphView as input parameters since new Nodes should be connected whatever.
// It also avoids specifying each producer since they are automatically included
auto gNew = std::make_shared<GraphView>();
gNew->add(newNodes, false);
auto oldG = std::make_shared<GraphView>("oldG");
oldG->add(oldNodes, false);
auto newG = std::make_shared<GraphView>("newG");
newG->add(newNodes, false);
if (newNodes.empty()) {
replacable = (outputNodes().size() == 1) &&
(inputNodes().size() == 1) &&
((*outputNodes().begin())->nbOutputs() == 1) &&
((*inputNodes().begin())->nbDataInputs() == 1);
newOutputNode = previousInputNode->input(0).first;
} else {
newOutputNode = (*gNew->outputNodes().begin());
replacable = (outputNodes().size() == gNew->outputNodes().size()) &&
(outputNodes().size() == 1) &&
(previousOutputNode->nbOutputs() == newOutputNode->nbOutputs());
}
if ((oldG->inputNodes().size() == 0) || (oldG->outputNodes().size() != 1)) {
return false;
}
if (!(newNodes.empty()) && ((newG->inputNodes().size() == 0) ||
(newG->outputNodes().size() != 1))) {
return false;
}
// there is at least one inputNode in the old/new GraphView
std::shared_ptr<Node> firstPreviousInputNode = (*(oldG->inputNodes()).begin());
std::shared_ptr<Node> firstPreviousOutputNode = (*(oldG->outputNodes()).begin());
// find Node to link to new input Node
//compute number of input for firstPreviousInputNode not in oldNodes set
std::size_t nbExternalInputs = 0;
std::shared_ptr<Node> externalInput = nullptr;
IOIndex_t externalInputId = gk_IODefaultIndex;
for (const auto& input : firstPreviousInputNode->inputs()) {
if (oldNodes.find(input.first) == oldNodes.end()) { // Node connected to another Node outside of oldG
nbExternalInputs++;
externalInput = input.first;
externalInputId = input.second;
}
}
if (nbExternalInputs > 1) {
AIDGE_INTERNAL_ASSERT("To many input to link for oldNodes set");
}
if (oldG->inputNodes().size() > 1){
// one or no input has been identified. Checking every input points to the same source
for (const auto& previousInputNode : oldG->inputNodes()) {
for (const auto& input : previousInputNode->inputs()) {
if (oldNodes.find(input.first) == oldNodes.end()) {
if ( (externalInput != input.first) || (externalInputId != input.second) ) {
return false; // an inputNode points to an external Node different from the registered one
}
}
}
}
}
if (firstPreviousOutputNode->nbOutputs() != 1) {
return false;
}
if (replacable) {
auto copyOutputs = previousOutputNode->outputs();
// find Node to replicate output connections
std::shared_ptr<Node> newOutputNode = newNodes.empty() ? externalInput : *(newG->outputNodes().begin());
auto copyOutputs = firstPreviousOutputNode->outputs();
// manage Views for newNodes
// only keep common views to each node for the new set
std::set<std::shared_ptr<GraphView>> commonGraphViews = (*mNodes.begin())->views();
for (const auto& nodePtr : mNodes) {
std::set<std::shared_ptr<GraphView>> commonGraphViews = (*oldNodes.begin())->views();
for (const auto& nodePtr : oldNodes) {
const auto nodeView = nodePtr->views();
std::set<std::shared_ptr<GraphView>> intersection;
std::set_intersection(commonGraphViews.begin(), commonGraphViews.end(),
......@@ -568,32 +603,59 @@ bool Aidge::GraphView::replaceWith(std::set<std::shared_ptr<Node>> newNodes) {
std::inserter(intersection, intersection.begin()));
commonGraphViews = intersection;
}
commonGraphViews.erase(oldG);
commonGraphViews.erase(newG);
// clean Nodes to replace
std::set<std::shared_ptr<Node>> copyNode = mNodes;
for (auto& nodePtr : copyNode) { nodePtr->resetConnections(true); }
// Do not include common Nodes to avoid cleaning Producers linked to newNodes
std::set<std::shared_ptr<Node>> nodesToClean;
std::set_difference(oldNodes.begin(), oldNodes.end(),
newNodes.begin(), newNodes.end(),
std::inserter(nodesToClean, nodesToClean.begin()));
for (auto& nodePtr : nodesToClean) { nodePtr->resetConnections(true); }
// copy output connections
if (newOutputNode) {
for (IOIndex_t o = 0; o < previousOutputNode->nbOutputs(); ++o) {
auto outputPairs = copyOutputs[o];
for (const auto& onePair : outputPairs) {
newOutputNode->addChild(onePair.first, o, onePair.second);
for (IOIndex_t o = 0; o < firstPreviousOutputNode->nbOutputs(); ++o) {
auto outputPairs = copyOutputs[o];
for (const auto& onePair : outputPairs) {
newOutputNode->addChild(onePair.first, o, onePair.second);
}
}
}
}
// copy input connections
if (!newNodes.empty() && externalInput) {
for (const auto& newInputNode : newG->inputNodes()) {
IOIndex_t inputId = 0;
for (const auto& input : newInputNode->inputs()) {
if (newNodes.find(input.first) == newNodes.end()) {
externalInput->addChild(newInputNode, externalInputId, inputId);
}
inputId++;
}
}
}
// insert new Nodes in the right GraphViews
for (auto& graphPtr : commonGraphViews) {
graphPtr->add(newNodes, false);
if (newNodes.empty()) {
graphPtr->updateInputNodes();
graphPtr->updateOutputNodes();
}
for (const auto& graphPtr : commonGraphViews) {
graphPtr->add(newNodes, false);
if (newNodes.empty()) {
graphPtr->updateInputNodes();
graphPtr->updateOutputNodes();
}
}
}
return replacable;
for (const auto& node : oldNodes) {
node->removeView(oldG);
}
for (const auto& node : newNodes) {
node->removeView(newG);
}
return true;
}
void Aidge::GraphView::updateInputNodes() {
mInputNodes.clear();
for (const std::shared_ptr<Node>& go_ptr : mNodes) {
......
......@@ -116,15 +116,14 @@ void Aidge::fuseBatchNorm(std::set<std::shared_ptr<Node>> nodes){
bias->set<float>(output, biasValue);
}
auto g = std::make_shared<GraphView>();
g->add(std::set<std::shared_ptr<Node>>({
GraphView::replace(std::set<std::shared_ptr<Node>>({
batchnorm,
batchnorm->input(1).first,
batchnorm->input(2).first,
batchnorm->input(3).first,
batchnorm->input(4).first
}));
g->replaceWith({});
}), {});
}
......
......@@ -20,6 +20,8 @@
#include "aidge/graph/Node.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/operator/GenericOperator.hpp"
#include "aidge/utils/ErrorHandling.hpp"
// Graph Regex
#include "aidge/graphmatching/GRegex.hpp"
#include "aidge/graphmatching/NodeRegex.hpp"
......@@ -47,34 +49,32 @@ void Aidge::fuseMulAdd(std::set<std::shared_ptr<Node>> nodes){
// Step 1 : Create FC
// Fetch the output dimension throught the bias size
auto producer_add_bias = add->input(1);
Tensor& bias_tensor = (producer_add_bias.first)->getOperator()->output(0);
std::shared_ptr<Node> bias = (add->getParent(1)) ? add->getParent(1)->cloneSharedOperators() : nullptr;
if (!(matmul->getParent(1))) {
AIDGE_INTERNAL_ASSERT("No weight detected to produce the fuseMulAdd recipe.");
}
std::shared_ptr<Node> weight = matmul->getParent(1)->cloneSharedOperators();
DimSize_t outSize = weight->getOperator()->output(0).dims<2>()[1];
// Instanciate FC
//std::shared_ptr<Node> fc = FC(dim[0], false, "Fc");
std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(bias_tensor.dims()[0], false));
std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(outSize, bias ? false : true));
// Step 2 : Branch existing producers & create the others
// link weights & bias
if (matmul->getParent(1)==nullptr) {
matmul->getParent(0)->addChild(fc, 0, 1);
printf("MatMul out[1] == nullptr !\n");
} else {
printf("MatMul out[1] != nullptr !\n");
if (matmul->getParent(0)!=nullptr)
matmul->getParent(0)->addChild(fc, 0, 0);
matmul->input(1).first->addChild(fc, 0, 1);
weight->addChild(fc, 0, 1);
if (bias) {
bias->addChild(fc, 0, 2);
}
(producer_add_bias.first)->addChild(fc,0,2);
// Step 3 : Update all graphviews that contains at least one node to replace
// Case 1 : If all nodes are in a graph view : delete old nodes & branch input & output
// Case 2 : If not all nodes are in a graph view : only delete the nodes from the graphview
// Maybe create a central mechanism to update automatically all graph views rather than each node have graphview presence memory ?
auto nodeToReplace = std::make_shared<GraphView>();
nodeToReplace->add(nodes, false);
nodeToReplace->replaceWith({fc});
// Maybe create a central mechanism to update automatically all graph views rather than each node have graphview presence memory?
auto newNodes = std::set<std::shared_ptr<Node>>({fc, weight, fc->getParent(2)});
GraphView::replace({matmul, add, add->getParent(1), matmul->getParent(1)}, newNodes);
}
......
......@@ -30,10 +30,8 @@ namespace Aidge {
flatten = element;
}
}
auto g = std::make_shared<GraphView>();
// TODO : avoid using replace_with and use a remove method instead
g->add(std::set<std::shared_ptr<Node>>({flatten}));
g->replaceWith({});
GraphView::replace({flatten}, {});
}
void removeFlatten(std::shared_ptr<GraphView> graphView){
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment