Skip to content
Snippets Groups Projects
Commit 5b85ced9 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files
parents c89ffa58 6b5468bd
No related branches found
No related tags found
1 merge request!152Update Aidge export to take a graph view has an argument instead of a...
Pipeline #48715 passed
......@@ -49,23 +49,19 @@ class test_recipes(unittest.TestCase):
add0 = aidge_core.Add(2, name="Add0")
matmul1 = aidge_core.MatMul(name="MatMul1")
add1 = aidge_core.Add(2, name="Add1")
graph_view = aidge_core.sequential([matmul0, add0, matmul1, add1])
w0 = aidge_core.Producer([1, 1], name="W0")
w0.add_child(matmul0, 0, 1)
graph_view.add(w0)
w0.add_child(matmul0, 0, 0)
b0 = aidge_core.Producer([1], name="B0")
b0.add_child(add0, 0, 1)
graph_view.add(b0)
w1 = aidge_core.Producer([1, 1], name="W1")
w1.add_child(matmul1, 0, 1)
graph_view.add(w1)
w1.add_child(matmul1, 0, 0)
b1 = aidge_core.Producer([1], name="B1")
b1.add_child(add1, 0, 1)
graph_view = aidge_core.sequential([matmul0, add0, matmul1, add1])
graph_view.add(w0)
graph_view.add(b0)
graph_view.add(w1)
graph_view.add(b1)
old_nodes = graph_view.get_nodes()
......
......@@ -24,10 +24,7 @@
#include "aidge/backend/TensorImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/operator/Add.hpp"
#include "aidge/operator/Div.hpp"
#include "aidge/operator/Mul.hpp"
#include "aidge/operator/Sub.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/utils/ArrayHelpers.hpp"
......@@ -251,21 +248,7 @@ class Tensor : public Data,
* @param other
* @return Tensor
*/
Tensor operator+(const Tensor& other) const {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
auto add_ = Add_Op(2);
add_.associateInput(0, std::make_shared<Tensor>(*this));
add_.associateInput(1, std::make_shared<Tensor>(other));
add_.setDataType(dataType());
add_.setDataFormat(dataFormat());
add_.setBackend(mImpl->backend());
add_.forward();
// using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
return add_.getOutput(0)->clone();
}
Tensor operator+(const Tensor& other) const;
/**
* @brief Element-wise substraction operation for two ``Tensor``s.
......@@ -276,21 +259,7 @@ class Tensor : public Data,
* @param other
* @return Tensor
*/
Tensor operator-(const Tensor& other) const {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
auto sub_ = Sub_Op();
sub_.associateInput(0, std::make_shared<Tensor>(*this));
sub_.associateInput(1, std::make_shared<Tensor>(other));
sub_.setDataType(dataType());
sub_.setDataFormat(dataFormat());
sub_.setBackend(mImpl->backend());
sub_.forward();
// using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
return sub_.getOutput(0)->clone();
}
Tensor operator-(const Tensor& other) const;
/**
* @brief Element-wise multiplication operation for two ``Tensor``s.
......@@ -301,21 +270,7 @@ class Tensor : public Data,
* @param other
* @return Tensor
*/
Tensor operator*(const Tensor& other) const {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
auto mul_ = Mul_Op();
mul_.associateInput(0, std::make_shared<Tensor>(*this));
mul_.associateInput(1, std::make_shared<Tensor>(other));
mul_.setDataType(dataType());
mul_.setDataFormat(dataFormat());
mul_.setBackend(mImpl->backend());
mul_.forward();
// using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
return mul_.getOutput(0)->clone();
}
Tensor operator*(const Tensor& other) const;
/**
* @brief Element-wise division operation for two ``Tensor``s.
......@@ -326,21 +281,8 @@ class Tensor : public Data,
* @param other
* @return Tensor
*/
Tensor operator/(const Tensor& other) const {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
auto div_ = Div_Op();
div_.associateInput(0, std::make_shared<Tensor>(*this));
div_.associateInput(1, std::make_shared<Tensor>(other));
div_.setDataType(dataType());
div_.setDataFormat(dataFormat());
div_.setBackend(mImpl->backend());
div_.forward();
// using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
return div_.getOutput(0)->clone();
}
Tensor operator/(const Tensor& other) const;
~Tensor() noexcept;
......@@ -432,7 +374,7 @@ public:
/**
* @brief Set the DataFormat of the Tensor and transpose data, only
* if the Tensor has already been initialized and copyTrans is true.
* In this case, a transposition occurs only if both previous format and
* In this case, a transposition occurs only if both previous format and
* new format are different from DataFormat::Default.
* @param df New DataFormat
* @param copyTrans If true (default), when both previous format and new
......@@ -512,6 +454,18 @@ public:
*/
constexpr std::size_t size() const noexcept { return mSize; }
/**
* @brief Return the current capacity of the tensor, i.e. the actual memory
* currently being allocated. It can be different from the size:
* - Capacity can be 0 if the tensor memory was not yet initialized (because
* of lazy initialization, memory is allocated only when it needs to be
* accessed the first time).
* - Capacity can be > size if the tensor was downsized but memory was not
* reallocated.
*/
inline std::size_t capacity() const noexcept { return mImpl->capacity(); }
/**
* @brief Change the dimensions of the Tensor object according to the given argument.
* If the overall size is not changed (meaning we actually only performed a
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_LN_H_
#define AIDGE_CORE_OPERATOR_LN_H_
#include <cassert>
#include <memory>
#include <vector>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
class Ln_Op : public OperatorTensor,
public Registrable<Ln_Op, std::string, std::unique_ptr<OperatorImpl>(const Ln_Op&)> {
public:
static const std::string Type;
Ln_Op() : OperatorTensor(Type, 1, 0, 1) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Ln_Op(const Ln_Op& op)
: OperatorTensor(op)
{
if (op.mImpl){
SET_IMPL_MACRO(Ln_Op, *this, op.backend());
} else {
mImpl = nullptr;
}
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Ln_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Ln_Op>(*this);
}
void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
inline std::shared_ptr<Node> Ln(const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Ln_Op>(), name);
}
}
#endif /* AIDGE_CORE_OPERATOR_LN_H_ */
......@@ -17,7 +17,6 @@
#include <numeric>
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
......
......@@ -77,7 +77,7 @@ struct Registrar {
static auto create(const registrar_key& key){
const auto it = C::registry().find(key);
AIDGE_ASSERT(it != C::registry().cend(), "missing or invalid registrar key: {}\nDid you include/import the corresponding module?", key);
AIDGE_ASSERT(it != C::registry().cend(), "missing or invalid registrar key: {} for registrable object {}\nDid you include/import the corresponding module?\nIf so, it is possible that the object is not yet supported.", key, typeid(C).name());
return (*it).second;
}
......
......@@ -85,6 +85,7 @@ void init_Tensor(py::module& m){
.def("dtype", &Tensor::dataType)
.def("init_grad", &Tensor::initGrad)
.def("size", &Tensor::size)
.def("capacity", &Tensor::capacity)
.def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize)
.def("has_impl", &Tensor::hasImpl)
.def("get_coord", &Tensor::getCoord)
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <pybind11/pybind11.h>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Ln.hpp"
#include "aidge/operator/OperatorTensor.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Ln(py::module& m) {
py::class_<Ln_Op, std::shared_ptr<Ln_Op>, OperatorTensor>(m, "LnOp", py::multiple_inheritance())
.def(py::init<>())
.def_static("get_inputs_name", &Ln_Op::getInputsName)
.def_static("get_outputs_name", &Ln_Op::getOutputsName);
m.def("Ln", &Ln, py::arg("name") = "");
}
} // namespace Aidge
......@@ -41,6 +41,7 @@ void init_Operator(py::module& m){
.def("forward", &Operator::forward)
// py::keep_alive forbide Python to garbage collect the implementation lambda as long as the Operator is not deleted !
.def("set_impl", &Operator::setImpl, py::arg("implementation"), py::keep_alive<1, 2>())
.def("type", &Operator::type)
.def("get_impl", &Operator::getImpl)
.def("get_hook", &Operator::getHook)
.def("add_hook", &Operator::addHook)
......
......@@ -16,8 +16,105 @@
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/operator/Add.hpp"
#include "aidge/operator/Div.hpp"
#include "aidge/operator/Mul.hpp"
#include "aidge/operator/Sub.hpp"
#include "aidge/operator/Transpose.hpp"
#include "aidge/utils/Types.h"
/**
* @brief Element-wise addition operation for two ``Tensor``s.
* @note ``Tensor``s should be stored on the same backend.
* @todo If input ``Tensor``s have a different dataType, the output should
* have the dataType of the ``Tensor`` with the highest precision.
*
* @param other
* @return Tensor
*/
Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
auto add_ = Add_Op(2);
add_.associateInput(0, std::make_shared<Tensor>(*this));
add_.associateInput(1, std::make_shared<Tensor>(other));
add_.setDataType(dataType());
add_.setDataFormat(dataFormat());
add_.setBackend(mImpl->backend());
add_.forward();
// using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
return add_.getOutput(0)->clone();
}
/**
* @brief Element-wise substraction operation for two ``Tensor``s.
* @note ``Tensor``s should be stored on the same backend.
* @todo If input ``Tensor``s have a different dataType, the output should
* have the dataType of the ``Tensor`` with the highest precision.
*
* @param other
* @return Tensor
*/
Aidge::Tensor Aidge::Tensor::operator-(const Aidge::Tensor& other) const {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
auto sub_ = Sub_Op();
sub_.associateInput(0, std::make_shared<Tensor>(*this));
sub_.associateInput(1, std::make_shared<Tensor>(other));
sub_.setDataType(dataType());
sub_.setDataFormat(dataFormat());
sub_.setBackend(mImpl->backend());
sub_.forward();
// using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
return sub_.getOutput(0)->clone();
}
/**
* @brief Element-wise multiplication operation for two ``Tensor``s.
* @note ``Tensor``s should be stored on the same backend.
* @todo If input ``Tensor``s have a different dataType, the output should
* have the dataType of the ``Tensor`` with the highest precision.
*
* @param other
* @return Tensor
*/
Aidge::Tensor Aidge::Tensor::operator*(const Aidge::Tensor& other) const {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
auto mul_ = Mul_Op();
mul_.associateInput(0, std::make_shared<Tensor>(*this));
mul_.associateInput(1, std::make_shared<Tensor>(other));
mul_.setDataType(dataType());
mul_.setDataFormat(dataFormat());
mul_.setBackend(mImpl->backend());
mul_.forward();
// using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
return mul_.getOutput(0)->clone();
}
Aidge::Tensor Aidge::Tensor::operator/(const Aidge::Tensor& other) const {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
auto div_ = Div_Op();
div_.associateInput(0, std::make_shared<Tensor>(*this));
div_.associateInput(1, std::make_shared<Tensor>(other));
div_.setDataType(dataType());
div_.setDataFormat(dataFormat());
div_.setBackend(mImpl->backend());
div_.forward();
// using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
return div_.getOutput(0)->clone();
}
Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
if (this == &other) {
return *this;
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/operator/Ln.hpp"
#include <memory>
#include <string>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/Types.h"
const std::string Aidge::Ln_Op::Type = "Ln";
void Aidge::Ln_Op::setBackend(const std::string& name, DeviceIdx_t device) {
mImpl = Registrar<Ln_Op>::create(name)(*this);
mOutputs[0]->setBackend(name, device);
}
......@@ -52,6 +52,12 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
&& matmulNode->getParent(0) && matmulNode->getParent(0)->getOperator()->type() != Producer_Op::Type))
{
weight = matmulNode->getParent(1);
// Transpose weights because weight Tensor is in first input
auto weightOpTensor = std::static_pointer_cast<OperatorTensor>(weight->getOperator());
const std::shared_ptr<Aidge::Tensor>& weightTensor = weightOpTensor->getOutput(0);
std::vector<DimSize_t> shape = weightTensor->dims();
std::reverse(shape.begin(), shape.end());
weightTensor->copyTranspose(*weightTensor, std::vector<Aidge::DimSize_t>({1ul, 0ul}));
}
else if ((matmulNode->getParent(0) && !matmulNode->getParent(1))
|| (matmulNode->getParent(0) && matmulNode->getParent(0)->getOperator()->type() == Producer_Op::Type
......@@ -82,7 +88,7 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
break;
}
}
AIDGE_ASSERT(outSize, "Couldnt get output number of channels for FC operator.");
AIDGE_ASSERT(outSize, "Could not get output number of channels for FC operator.");
// Instanciate FC
std::string fcName = matmulNode->name();
......@@ -138,4 +144,4 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::GraphView> graphView){
}
}
\ No newline at end of file
}
......@@ -17,6 +17,7 @@
#include "aidge/graph/GraphView.hpp"
#include "aidge/graph/Testing.hpp"
#include "aidge/graph/OpArgs.hpp"
#include "aidge/operator/Add.hpp"
#include "aidge/operator/ReLU.hpp"
#include "aidge/operator/MetaOperatorDefs.hpp"
#include "aidge/operator/Producer.hpp"
......@@ -367,7 +368,7 @@ TEST_CASE("[core/graph] Matching") {
g1->save("graph_single_pass");
auto gm = SinglePassGraphMatching(g1);
const auto start = std::chrono::system_clock::now();
const auto results = gm.match("Conv->ReLU#;ReLU#->Dummy");
const auto end = std::chrono::system_clock::now();
......
......@@ -31,11 +31,11 @@ TEST_CASE("GraphRegexUser") {
g1->addChild(fc, "c");
g1->addChild(conv2, "c1");
g1->addChild(fc2, "c2");
///
std::shared_ptr<GraphRegex> sut = std::make_shared<GraphRegex>();
sut->setNodeKey("C",+[](NodePtr NodeOp){return NodeOp->type() == "FC";});
sut->setNodeKey("A","C($)==True");
sut->addQuery("A");
auto match = sut->match(g1);
......@@ -163,14 +163,14 @@ TEST_CASE("GraphRegexUser") {
auto w1 = Producer({5,5},"W1");
auto input = Producer({2,5}, "input");
input->addChild(matmul0, 0, 0);
w0->addChild(matmul0, 0, 1);
input->addChild(matmul0, 0, 1);
w0->addChild(matmul0, 0, 0);
matmul0->addChild(add0, 0, 0);
b0->addChild(add0, 0, 1);
add0->addChild(matmul1, 0, 0);
w1->addChild(matmul1, 0, 1);
add0->addChild(matmul1, 0, 1);
w1->addChild(matmul1, 0, 0);
matmul1->addChild(add1, 0, 0);
b1->addChild(add1, 0, 1);
......@@ -201,4 +201,4 @@ TEST_CASE("GraphRegexUser") {
}
}
\ No newline at end of file
}
......@@ -42,8 +42,8 @@ TEST_CASE("[cpu/recipes] FuseMulAdd", "[FuseMulAdd][recipes]") {
matmul0->addChild(add0, 0, 0);
b0->addChild(add0, 0, 1);
add0->addChild(matmul1, 0, 0);
w1->addChild(matmul1, 0, 1);
add0->addChild(matmul1, 0, 1);
w1->addChild(matmul1, 0, 0);
matmul1->addChild(add1, 0, 0);
b1->addChild(add1, 0, 1);
......@@ -56,14 +56,14 @@ TEST_CASE("[cpu/recipes] FuseMulAdd", "[FuseMulAdd][recipes]") {
std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
REQUIRE(((matmul0->getParent(0) == input) && (matmul0->getParent(1) == w0)));
REQUIRE(((add0->getParent(0) == matmul0) && (add0->getParent(1) == b0)));
REQUIRE(((matmul1->getParent(0) == add0) && (matmul1->getParent(1) == w1)));
REQUIRE(((matmul1->getParent(1) == add0) && (matmul1->getParent(0) == w1)));
REQUIRE(((add1->getParent(0) == matmul1) && (add1->getParent(1) == b1)));
// Transform GraphView inplace
fuseMulAdd(g);
// Check new GraphView
std::set<std::shared_ptr<Node>> newNodes = g->getNodes();
std::set<std::shared_ptr<Node>> newNodes = g->getNodes();
REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
REQUIRE(newNodes.size() == 6);
for (const auto& node : newNodes) {
......@@ -71,4 +71,4 @@ TEST_CASE("[cpu/recipes] FuseMulAdd", "[FuseMulAdd][recipes]") {
}
}
} // namespace Aidge
\ No newline at end of file
} // namespace Aidge
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment