Skip to content
Snippets Groups Projects
Commit b4db32e4 authored by Olivier BICHLER's avatar Olivier BICHLER Committed by Maxence Naud
Browse files

Multiple fixes

parent 8059dcbf
No related branches found
No related tags found
2 merge requests!318[Upd] release verision 0.5.0,!312Fixed adaptToBackend and added unit test
......@@ -328,8 +328,11 @@ public:
* Default to 0.
* @param otherInId ID of the other Node input to connect to the current Node.
* Default to the first available data input.
*
* @note otherNode shared_ptr is passed by refenrece in order to be able to detect
* possible dangling connection situations in debug using ref counting.
*/
void addChild(NodePtr otherNode,
void addChild(const NodePtr& otherNode,
const IOIndex_t outId = IOIndex_t(0),
IOIndex_t otherInId = gk_IODefaultIndex);
......@@ -517,8 +520,11 @@ private:
* @param otherNode
* @param outId
* @param otherInId
*
* @note otherNode shared_ptr is passed by refenrece in order to be able to detect
* possible dangling connection situations in debug using ref counting.
*/
void addChildOp(NodePtr otherNode, const IOIndex_t outId,
void addChildOp(const NodePtr& otherNode, const IOIndex_t outId,
const IOIndex_t otherInId);
/**
......
......@@ -242,12 +242,13 @@ std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getAdaptation(const ImplSpec&
auto op = std::static_pointer_cast<OperatorTensor>(mOp.clone());
auto node = std::make_shared<Node>(op);
auto adaptedGraph = std::make_shared<GraphView>();
adaptedGraph->add(node);
// Adapt inputs
for (size_t i = 0; i < requiredSpecs.inputs.size(); ++i) {
const auto IOSpec = (i < spec.inputs.size()) ? spec.inputs[i] : spec.inputs.back();
const ImplSpec::IOSpec& requiredIOSpec = requiredSpecs.inputs[i];
std::shared_ptr<Node> parent = node;
// Input type
if (requiredIOSpec.type != DataType::Any
......@@ -255,8 +256,9 @@ std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getAdaptation(const ImplSpec&
&& requiredIOSpec.type != IOSpec.type)
{
const auto cast = Cast(IOSpec.type);
cast->getOperator()->setBackend(node->getOperator()->backend());
cast->addChild(parent, 0, i);
cast->getOperator()->setBackend(op->backend());
cast->addChild(node, 0, i);
adaptedGraph->add(cast);
op->getInput(i)->setDataType(IOSpec.type);
}
......@@ -270,8 +272,9 @@ std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getAdaptation(const ImplSpec&
auto transposeOp = Transpose(std::vector<DimSize_t>(transpose.begin(), transpose.end()));
transposeOp->getOperator()->setDataFormat(IOSpec.format);
transposeOp->getOperator()->setDataType(requiredIOSpec.type);
transposeOp->getOperator()->setBackend(node->getOperator()->backend());
transposeOp->addChild(parent, 0, i);
transposeOp->getOperator()->setBackend(op->backend());
transposeOp->addChild(node, 0, i);
adaptedGraph->add(transposeOp);
op->getInput(i)->setDataFormat(IOSpec.format);
}
......@@ -300,7 +303,6 @@ std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getAdaptation(const ImplSpec&
for (size_t i = 0; i < requiredSpecs.outputs.size(); ++i) {
const auto IOSpec = (i < spec.outputs.size()) ? spec.outputs[i] : spec.outputs.back();
const ImplSpec::IOSpec& requiredIOSpec = requiredSpecs.outputs[i];
std::shared_ptr<Node> parent = node;
// Output type
if (requiredIOSpec.type != DataType::Any
......@@ -308,8 +310,9 @@ std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getAdaptation(const ImplSpec&
&& requiredIOSpec.type != IOSpec.type)
{
const auto cast = Cast(requiredIOSpec.type);
cast->getOperator()->setBackend(node->getOperator()->backend());
parent->addChild(cast, i, 0);
cast->getOperator()->setBackend(op->backend());
node->addChild(cast, i, 0);
adaptedGraph->add(cast);
op->getOutput(i)->setDataType(IOSpec.type);
}
......@@ -323,8 +326,9 @@ std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getAdaptation(const ImplSpec&
auto transposeOp = Transpose(std::vector<DimSize_t>(transpose.begin(), transpose.end()));
transposeOp->getOperator()->setDataFormat(requiredIOSpec.format);
transposeOp->getOperator()->setDataType(requiredIOSpec.type);
transposeOp->getOperator()->setBackend(node->getOperator()->backend());
parent->addChild(transposeOp, i, 0);
transposeOp->getOperator()->setBackend(op->backend());
node->addChild(transposeOp, i, 0);
adaptedGraph->add(transposeOp);
op->getOutput(i)->setDataFormat(IOSpec.format);
}
......@@ -349,7 +353,6 @@ std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getAdaptation(const ImplSpec&
}
}
auto adaptedGraph = getConnectedGraphView(node);
if (adaptedGraph->getNodes().size() > 1) {
return MetaOperator(std::string("Adapted_" + op->type()).c_str(), adaptedGraph);
}
......
......@@ -34,7 +34,7 @@ Aidge::DataFormatTranspose Aidge::getDataFormatTranspose(const DataFormat& src,
srcToDst[i] = dstDefToFormat[srcFormatToDef[i]] - 1;
}
else {
srcToDst[i] = i;
srcToDst[i] = srcFormatToDef[i];
}
}
......
......@@ -593,6 +593,7 @@ void Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t>& tran
}
}
AIDGE_ASSERT(src.getImpl(), "Tensor::copyTranspose(): an implementation is required for src Tensor!");
AIDGE_ASSERT(mImpl, "Tensor::copyTranspose(): an implementation is required, use setBackend() first!");
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, newDims);
......
......@@ -105,6 +105,10 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
? "<em>" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + "</em>"
: "\"" + node_ptr->name() + "<br/><sub><em>(" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + ")</em></sub>\"";
if (verbose) {
givenName += "<br/><span style='color:white; background-color: purple; float: right'>" + node_ptr->getOperator()->backend() + "</span>";
}
std::string nodeCls = "";
if (node_ptr->type() == "Producer") {
nodeCls = ":::producerCls";
......
......@@ -201,10 +201,15 @@ Aidge::Node::outputs() const {
std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>> Aidge::Node::output(
Aidge::IOIndex_t outId) const {
std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> listOutputs =
std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(mIdInChildren[outId].size());
std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>();
for (std::size_t i = 0; i < mIdInChildren[outId].size(); ++i) {
listOutputs[i] = std::pair<std::shared_ptr<Node>, IOIndex_t>(mChildren[outId][i].lock(),
mIdInChildren[outId][i]);
if (std::shared_ptr<Node> child = mChildren[outId][i].lock()) {
listOutputs.push_back(std::pair<std::shared_ptr<Node>, IOIndex_t>(child,
mIdInChildren[outId][i]));
}
else {
Log::warn("Node::output(): dangling connection at index #{} of output #{} for node {} (of type {})", i, outId, name(), type());
}
}
return listOutputs;
}
......@@ -255,7 +260,7 @@ void Aidge::Node::setInputId(const IOIndex_t inId, const IOIndex_t newNodeoutId)
// TOPOLOGY
///////////////////////////////////////////////////////
void Aidge::Node::addChildOp(std::shared_ptr<Node> otherNode, const IOIndex_t outId,
void Aidge::Node::addChildOp(const std::shared_ptr<Node>& otherNode, const IOIndex_t outId,
const IOIndex_t otherInId) {
AIDGE_ASSERT(otherInId < otherNode->nbInputs(),
"Input index (#{}) of the node {} (of type {}) is out of bound (it has {} inputs), when trying to add it as a child of node {} (of type {})",
......@@ -263,6 +268,11 @@ void Aidge::Node::addChildOp(std::shared_ptr<Node> otherNode, const IOIndex_t ou
AIDGE_ASSERT(outId < nbOutputs(),
"Output index (#{}) of the node {} (of type {}) is out of bound (it has {} outputs), when trying to add the child node {} (of type {})",
outId, name(), type(), nbOutputs(), otherNode->name(), otherNode->type());
if (otherNode.use_count() == 1) {
Log::debug("Node::addChild(): the node {} (of type {}) only holds a weak reference to the added child node {} (of type {})."
"If the child node goes out of scope, it will be destructed, leading to a dangling connection."
"To avoid this message, consider adding the child node to a GraphView first.", name(), type(), otherNode->name(), otherNode->type());
}
if (otherNode->input(otherInId).second != gk_IODefaultIndex) {
Log::notice("the {}-th Parent of the child node {} (of type {}) already existed", otherInId, otherNode->name(), otherNode->type());
}
......@@ -284,7 +294,7 @@ void Aidge::Node::addChildView(std::shared_ptr<GraphView> otherGraph, const IOIn
addChildOp(otherInId.first, outId, otherInId.second);
}
void Aidge::Node::addChild(std::shared_ptr<Node> otherNode, const IOIndex_t outId,
void Aidge::Node::addChild(const std::shared_ptr<Node>& otherNode, const IOIndex_t outId,
IOIndex_t otherInId) {
if (otherNode) {
otherInId =
......@@ -342,10 +352,17 @@ bool Aidge::Node::removeParent(const IOIndex_t inId) {
std::set<std::shared_ptr<Aidge::Node>> Aidge::Node::getChildren() const {
std::set<std::shared_ptr<Node>> children;
size_t outId = 0;
for (const auto& childrenOfOneOutput : mChildren) {
for (const auto& oneChild : childrenOfOneOutput) {
children.insert(oneChild.lock());
if (std::shared_ptr<Node> child = oneChild.lock()) {
children.insert(child);
}
else {
Log::warn("Node::getChildren(): dangling connection at output #{} for node {} (of type {})", outId, name(), type());
}
}
++outId;
}
return children;
}
......@@ -363,7 +380,12 @@ std::vector<std::shared_ptr<Aidge::Node>> Aidge::Node::getChildren(const IOIndex
assert((outId < nbOutputs()) && "Output index out of bound.");
std::vector<std::shared_ptr<Node>> children;
for (std::size_t i = 0; i < mChildren[outId].size(); ++i) {
children.push_back(mChildren[outId][i].lock());
if (std::shared_ptr<Node> child = mChildren[outId][i].lock()) {
children.push_back(child);
}
else {
Log::warn("Node::getChildren(): dangling connection at index #{} of output #{} for node {} (of type {})", i, outId, name(), type());
}
}
return children;
}
......
......@@ -36,9 +36,19 @@ Aidge::OperatorTensor::OperatorTensor(const OperatorTensor& other)
mInputs(std::vector<std::shared_ptr<Tensor>>(other.nbInputs(), nullptr)),
mOutputs(std::vector<std::shared_ptr<Tensor>>(other.nbOutputs())) {
for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) {
// The characteristics of the output tensors are copied for two reasons:
// - Consistency with the operator: the output tensors backend should
// match the operator backend, which is always copied.
// - The user would expect that the data type and format are copied as
// well.
// Data is never copied in clone().
mOutputs[i] = std::make_shared<Tensor>();
// mOutputs[i] = std::make_shared<Tensor>(*(other.getOutput(i)));
// datatype already copied
mOutputs[i]->setDataType(other.getOutput(i)->dataType());
mOutputs[i]->setDataFormat(other.getOutput(i)->dataFormat());
if (!other.getOutput(i)->backend().empty()) {
mOutputs[i]->setBackend(other.getOutput(i)->backend());
}
}
}
......
......@@ -17,8 +17,10 @@
#include "aidge/graph/OpArgs.hpp"
#include "aidge/operator/Conv.hpp"
#include "aidge/operator/ReLU.hpp"
#include "aidge/operator/Transpose.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/recipes/Recipes.hpp"
#include "aidge/scheduler/SequentialScheduler.hpp"
namespace Aidge {
......@@ -44,6 +46,10 @@ public:
std::set<ImplSpec> implSpecsSet = Registrar<OperatorImpl_dummy>::getKeys();
return std::vector<ImplSpec>(implSpecsSet.begin(), implSpecsSet.end());
}
void forward() override {
fmt::println("forward: {}", mOp.type());
}
};
// Register it
......@@ -73,13 +79,14 @@ REGISTRAR(Tensor, {"dummy", DataType::Float32}, Registrar<Tensor>::create({"cpu"
TEST_CASE("[cpu/recipes] AdaptToBackend", "[AdaptToBackend][recipes]") {
auto g1 = Sequential({
Producer({16, 3, 224, 224}, "dataProvider"),
Conv(3, 32, {3, 3}, "conv1"),
Producer({1, 3, 22, 22}, "dataProvider"),
Conv(3, 4, {3, 3}, "conv1"),
ReLU("relu1"),
Conv(32, 64, {3, 3}, "conv2"),
Conv(4, 8, {3, 3}, "conv2"),
ReLU("relu2"),
Conv(64, 10, {1, 1}, "conv3")
Conv(8, 10, {1, 1}, "conv3")
});
g1->setBackend("dummy");
auto convOp = std::static_pointer_cast<Conv2D_Op>(g1->getNode("conv1")->getOperator());
REQUIRE(convOp->getInput(0)->dataFormat() == DataFormat::Default);
......@@ -90,13 +97,21 @@ TEST_CASE("[cpu/recipes] AdaptToBackend", "[AdaptToBackend][recipes]") {
adaptToBackend(g1);
g1->save("adapttobackend_after", true);
// FIXME: the last ~> should be ->, but no match in this case!
auto matches = SinglePassGraphMatching(g1).match("Conv2D#<-Transpose<-Producer;Conv2D#<1-Transpose<-Producer;Conv2D#<2-Producer;Conv2D#~>Transpose->ReLU");
auto matches = SinglePassGraphMatching(g1).match("Conv2D#<-Transpose<-Producer;Conv2D#<1-Transpose<-Producer;Conv2D#<2-Producer;Conv2D#->Transpose#->ReLU");
REQUIRE(matches.size() == 1);
convOp = std::static_pointer_cast<Conv2D_Op>(matches.begin()->graph->rootNode()->getOperator());
auto outTransOp = std::static_pointer_cast<Transpose_Op>(matches.begin()->anchors.at("Transpose").at("#")->getOperator());
REQUIRE(convOp->getInput(0)->dataFormat() == DataFormat::NHWC);
REQUIRE(convOp->getInput(1)->dataFormat() == DataFormat::NHWC);
REQUIRE(convOp->getOutput(0)->dataFormat() == DataFormat::NHWC);
REQUIRE(outTransOp->getOutput(0)->dataFormat() == DataFormat::Default);
// TODO: uncomment when support of NHWC will be implemented in Conv_Op::forwardDims()
//REQUIRE(g1->forwardDims());
//g1->save("adapttobackend_after_forwarddims", true);
//SequentialScheduler sched(g1);
//sched.forward();
}
} // namespace Aidge
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment