Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • eclipse/aidge/aidge_core
  • hrouis/aidge_core
  • mszczep/aidge_core
  • oantoni/aidge_core
  • cguillon/aidge_core
  • jeromeh/aidge_core
  • axelfarr/aidge_core
  • cmoineau/aidge_core
  • noamzerah/aidge_core
  • lrakotoarivony/aidge_core
  • silvanosky/aidge_core
  • maab05/aidge_core
  • mick94/aidge_core
  • lucaslopez/aidge_core_ll
  • wboussella/aidge_core
  • farnez/aidge_core
  • mnewson/aidge_core
17 results
Show changes
......@@ -67,7 +67,7 @@ Aidge::Connector Aidge::Node::operator()(const std::vector<Connector>& ctors) {
}
// Skip to next possible input idx
for (; idx < nbInputs() && (inputCategory(idx) != InputCategory::Data && inputCategory(idx) != InputCategory::OptionalData); ++idx) {}
for (; idx < nbInputs() && (inputCategory(idx) != InputCategory::Data); ++idx) {}
AIDGE_ASSERT(idx == nbInputs(), "Missing an input connector for Data input#{}", idx);
return Connector(shared_from_this());
......@@ -454,35 +454,6 @@ Aidge::NodePtr Aidge::Node::clone() const {
return std::make_shared<Node>(mOperator->clone(), std::make_shared<DynamicAttributes>(*mAttrs));
}
std::set<Aidge::NodePtr> Aidge::Node::getNodeDelta(int delta, std::set<Aidge::NodePtr> nodeSee) {
std::set<Aidge::NodePtr> out;
nodeSee.insert(shared_from_this());
if (delta == 0) {
out.insert(shared_from_this());
} else if (delta > 0) {
for (const NodePtr& node : getChildren()) {
if (nodeSee.find(node) == nodeSee.end()) { // loop avoidance
for (const NodePtr& ch : node->getNodeDelta(delta - 1, nodeSee)) {
out.insert(ch);
}
}
}
} else {
for (const NodePtr& node : getParents()) {
if (nodeSee.find(node) == nodeSee.end()) { // loop avoidance
for (const NodePtr& pr : node->getNodeDelta(delta + 1, nodeSee)) {
out.insert(pr);
}
}
}
}
return out;
}
Aidge::Node::~Node() = default;
// namespace Aidge {
......
......@@ -47,17 +47,28 @@ std::shared_ptr<Aidge::Operator> Aidge::AvgPooling_Op<DIM>::clone() const {
template <Aidge::DimIdx_t DIM>
bool Aidge::AvgPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
if (inputsAssociated()) {
std::array<DimSize_t, DIM + 2> outputDims;
std::array<DimSize_t, DIM + 2> outputDims{};
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
outputDims[0] = inputDims[0];
outputDims[1] = inputDims[1];
for (std::size_t dim = 0; dim < mAttributes->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
std::function<float(float)> roundingFunction;
if (mAttributes->template getAttr<AvgPoolingAttr::CeilMode>()) {
roundingFunction = [](float x) { return std::ceil(x); };
} else {
roundingFunction = [](float x) { return std::floor(x); };
}
for (std::size_t dim = 0; dim < mAttributes->template getAttr<AvgPoolingAttr::KernelDims>().size(); ++dim) {
const auto kernelDim = mAttributes->template getAttr<AvgPoolingAttr::KernelDims>()[dim];
const auto strideDim = mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[dim];
const auto dilationDim = mAttributes->template getAttr<AvgPoolingAttr::Dilations>()[dim];
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
std::floor(static_cast<float>(inputDims[dim+2] -
mAttributes->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) /
static_cast<float>(mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
roundingFunction(static_cast<float>(inputDims[dim+2] -
(kernelDim - 1) * dilationDim - 1) /
static_cast<float>(strideDim)));
}
outputDims[1] = inputDims[1];
outputDims[0] = inputDims[0];
getOutput(0)->resize(outputDims);
return true;
}
......@@ -96,7 +107,8 @@ Aidge::AvgPooling_Op<DIM>::computeReceptiveField(const std::vector<Aidge::DimSiz
inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
* mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]
+ 1
+ (mAttributes->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
+ (mAttributes->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
* mAttributes->template getAttr<AvgPoolingAttr::Dilations>()[static_cast<std::size_t>(i)]);
inputIdxDims[2+i] *= mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
}
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
......@@ -128,10 +140,12 @@ template class Aidge::AvgPooling_Op<4>;
template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
std::shared_ptr<Aidge::Node> Aidge::AvgPooling(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
const std::string& name,
const std::array<Aidge::DimSize_t, DIM> &stride_dims) {
const std::array<Aidge::DimSize_t, DIM> &stride_dims,
const std::array<Aidge::DimSize_t, DIM> &dilations,
bool ceil_mode) {
AIDGE_ASSERT(DIM<=MaxDim, "Too many kernel dimensions required by {}, not supported", AvgPooling_Op<DIM>::Type);
return std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims), name);
return std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilations, ceil_mode), name);
}
template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&);
template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&);
template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, const std::array<Aidge::DimSize_t, 3>&);
template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, bool);
template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, bool);
template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, const std::array<Aidge::DimSize_t, 3>&, const std::array<Aidge::DimSize_t, 3>&, bool);
/********************************************************************************
* Copyright (c) 2024 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <cstddef> // std::size_t
#include <memory>
#include <stdexcept> // std::runtime_error
#include <string>
#include <vector>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Equal.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Types.h"
const std::string Aidge::Equal_Op::Type = "Equal";
bool Aidge::Equal_Op::forwardDims(bool /*allowDataDependency*/) {
if (inputsAssociated()) {
const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
std::size_t out_id = outDims.size() - 1;
std::size_t low_id = lowDims.size() - 1;
std::size_t i = 0;
while (i++ < lowDims.size()) {
if (outDims[out_id] == 1) {
outDims[out_id] = lowDims[low_id];
}
else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for Equal Operation: {} for input#0 vs {} for input#1",
inputsDims0, inputsDims1);
}
--out_id;
--low_id;
}
mOutputs[0]->resize(outDims);
return true;
}
return false;
}
void Aidge::Equal_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
SET_IMPL_MACRO(Equal_Op, *this, name);
mOutputs[0]->setBackend(name, device);
}
std::set<std::string> Aidge::Equal_Op::getAvailableBackends() const {
return Registrar<Equal_Op>::getKeys();
}
......@@ -45,7 +45,7 @@ Aidge::GenericOperator_Op::GenericOperator_Op(const std::string& type,
Aidge::GenericOperator_Op::GenericOperator_Op(const Aidge::GenericOperator_Op& op)
: OperatorTensor(op),
mForwardDims(op.mForwardDims),
mAttributes(op.attributes() ? op.mAttributes : std::make_shared<DynamicAttributes>())
mAttributes(std::make_shared<DynamicAttributes>(*op.mAttributes))
{
mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
}
......
......@@ -25,11 +25,13 @@ const std::string Aidge::MaxPooling_Op<DIM>::Type = "MaxPooling" + std::to_strin
template <Aidge::DimIdx_t DIM>
Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
const std::array<Aidge::DimSize_t, DIM> &stride_dims,
const std::array<Aidge::DimSize_t, DIM> &dilations,
bool ceil_mode)
: OperatorTensor(Type, {InputCategory::Data}, 1),
mAttributes(std::make_shared<Attributes_>(
attr<MaxPoolingAttr::StrideDims>(stride_dims),
attr<MaxPoolingAttr::KernelDims>(kernel_dims),
attr<MaxPoolingAttr::Dilations>(dilations),
attr<MaxPoolingAttr::CeilMode>(ceil_mode)))
{}
......@@ -63,11 +65,15 @@ bool Aidge::MaxPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
roundingFunction = [](float x) { return std::floor(x); };
}
for (std::size_t dim = 0; dim < mAttributes->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
for (std::size_t dim = 0; dim < mAttributes->template getAttr<MaxPoolingAttr::KernelDims>().size(); ++dim) {
const auto kernelDim = mAttributes->template getAttr<MaxPoolingAttr::KernelDims>()[dim];
const auto strideDim = mAttributes->template getAttr<MaxPoolingAttr::StrideDims>()[dim];
const auto dilationDim = mAttributes->template getAttr<MaxPoolingAttr::Dilations>()[dim];
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
roundingFunction(static_cast<float>(inputDims[dim+2] -
mAttributes->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
static_cast<float>(mAttributes->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
roundingFunction(static_cast<float>(inputDims[dim+2] -
(kernelDim - 1) * dilationDim - 1) /
static_cast<float>(strideDim)));
}
outputDims[1] = inputDims[1];
outputDims[0] = inputDims[0];
......@@ -98,12 +104,13 @@ template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
std::shared_ptr<Aidge::Node> Aidge::MaxPooling(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
const std::string& name,
const std::array<Aidge::DimSize_t, DIM> &stride_dims,
const std::array<Aidge::DimSize_t, DIM> &dilations,
bool ceil_mode)
{
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
return std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, ceil_mode), name);
return std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilations, ceil_mode), name);
}
template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, bool);
template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, bool);
template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, const std::array<Aidge::DimSize_t, 3>&, bool);
template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, bool);
template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, bool);
template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, const std::array<Aidge::DimSize_t, 3>&, const std::array<Aidge::DimSize_t, 3>&, bool);
......@@ -54,8 +54,31 @@ Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shar
}
}
Aidge::MetaOperator_Op::MetaOperator_Op(const MetaOperator_Op& op)
: OperatorTensor(op),
mGraph(op.mGraph->clone()), // Clone the micro-graph for isolation
mAttributes(std::make_shared<DynamicAttributes>(*op.mAttributes)) // Clone attributes
{
// Associate outputs to micro-graph outputs for custom implementation
for (size_t outputIdx = 0; outputIdx < mOutputs.size(); ++outputIdx) {
const auto& outputOp = mGraph->getOrderedOutputs()[outputIdx];
if (outputOp.first) {
mOutputs[outputIdx] = std::dynamic_pointer_cast<Tensor>(outputOp.first->getOperator()->getRawOutput(outputOp.second));
}
}
// Attributes are already cloned.
}
std::shared_ptr<Aidge::Operator> Aidge::MetaOperator_Op::clone() const {
return std::make_shared<MetaOperator_Op>(type(), mGraph->clone());
auto metaOp = std::make_shared<MetaOperator_Op>(*this);
if (mImpl) {
// Only setBackend() is mImpl is not nullptr.
// The inner-graph backend is already set in MetaOperator_Op copy
// construtor, when the graph is cloned.
metaOp->setBackend(mImpl->backend());
}
return metaOp;
}
void Aidge::MetaOperator_Op::associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) {
......@@ -250,6 +273,21 @@ void Aidge::MetaOperator_Op::forward() {
}
}
void Aidge::MetaOperator_Op::backward() {
if (mImpl) {
// A custom implementation exists for this meta operator
mImpl->backward();
}
else {
// No custom implementation, use the individual operators implementations
if (!mScheduler) {
mScheduler = std::make_shared<SequentialScheduler>(mGraph, mUpperNode.lock());
mScheduler->generateScheduling();
}
mScheduler->backward();
}
}
/////////////////////////////////////////////////
std::shared_ptr<Aidge::Node> Aidge::MetaOperator(const char *type,
......
......@@ -23,11 +23,8 @@
namespace Aidge {
std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
const DimSize_t hiddenChannel,
const DimSize_t seqLength,
bool noBias,
const std::string& name)
std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
const std::string& name)
{
// Construct micro-graph
auto input = Identity((!name.empty()) ? name + "_input" : "");
......@@ -113,7 +110,18 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
{hiddenState, 1}, {cellState, 1}});
microGraph->setOrderedOutputs({{hiddenState, 0}, {cellState, 0}});
auto metaOp = MetaOperator("LSTM", microGraph, {}, name);
return std::make_shared<MetaOperator_Op>("LSTM", microGraph);
}
std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
const DimSize_t hiddenChannel,
const DimSize_t seqLength,
bool noBias,
const std::string& name)
{
auto op = LSTM_Op(seqLength, name);
auto metaOp = std::make_shared<Node>(op, name);
op->setUpperNode(metaOp);
addProducer(metaOp, 1, {hiddenChannel, inChannel}, "wi");
addProducer(metaOp, 2, {hiddenChannel, inChannel}, "wo");
addProducer(metaOp, 3, {hiddenChannel, inChannel}, "wf");
......@@ -135,93 +143,4 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
return metaOp;
}
std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength)
{
// Construct micro-graph
auto input = Identity("");
auto hiddenState = Memorize(seqLength, "");
auto cellState = Memorize(seqLength, "");
auto add = Add("");
// Forget gate
auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
input->addChild(forgetGateX, 0, 0);
auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
hiddenState->addChild(forgetGateH, 1, 0);
auto forgetGate = Add("");
forgetGateX->addChild(forgetGate, 0, 0);
forgetGateH->addChild(forgetGate, 0, 1);
auto forgetGateAct = Sigmoid("");
auto forgetGateMul = Mul("");
forgetGate->addChild(forgetGateAct, 0, 0);
forgetGateAct->addChild(forgetGateMul, 0, 0);
forgetGateMul->addChild(add, 0, 0);
cellState->addChild(forgetGateMul, 1, 1);
// Input gate
auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
input->addChild(inputGateX, 0, 0);
auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
hiddenState->addChild(inputGateH, 1, 0);
auto inputGate = Add("");
inputGateX->addChild(inputGate, 0, 0);
inputGateH->addChild(inputGate, 0, 1);
auto inputGateAct = Sigmoid("");
auto inputGateMul = Mul("");
inputGate->addChild(inputGateAct, 0, 0);
inputGateAct->addChild(inputGateMul, 0, 0);
inputGateMul->addChild(add, 0, 1);
// Candidate for cell update
auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
input->addChild(cellCandidateX, 0, 0);
auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
hiddenState->addChild(cellCandidateH, 1, 0);
auto cellCandidate = Add("");
cellCandidateX->addChild(cellCandidate, 0, 0);
cellCandidateH->addChild(cellCandidate, 0, 1);
auto cellCandidateAct = Tanh("");
cellCandidate->addChild(cellCandidateAct, 0, 0);
cellCandidateAct->addChild(inputGateMul, 0, 1);
// Output gate
auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
input->addChild(outputGateX, 0, 0);
auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
hiddenState->addChild(outputGateH, 1, 0);
auto outputGate = Add("");
outputGateX->addChild(outputGate, 0, 0);
outputGateH->addChild(outputGate, 0, 1);
auto outputGateAct = Sigmoid("");
auto outputGateMul = Mul("");
outputGate->addChild(outputGateAct, 0, 0);
outputGateAct->addChild(outputGateMul, 0, 0);
// Updated cell state to help determine new hidden state
auto cellUpdatedAct = Tanh("");
add->addChild(cellUpdatedAct, 0, 0);
cellUpdatedAct->addChild(outputGateMul, 0, 1);
outputGateMul->addChild(hiddenState, 0, 0);
add->addChild(cellState, 0, 0);
std::shared_ptr<GraphView> microGraph = std::make_shared<GraphView>();
microGraph->add(input);
microGraph->add({hiddenState, cellState, add,
forgetGateX, forgetGateH, forgetGate, forgetGateAct, forgetGateMul,
inputGateX, inputGateH, inputGate, inputGateAct, inputGateMul,
cellCandidateX, cellCandidateH, cellCandidate, cellCandidateAct,
outputGateX, outputGateH, outputGate, outputGateAct, outputGateMul,
cellUpdatedAct}, false);
microGraph->setOrderedInputs({{input, 0},
{inputGateX, 1}, {outputGateX, 1}, {forgetGateX, 1}, {cellCandidateX, 1},
{inputGateH, 1}, {outputGateH, 1}, {forgetGateH, 1}, {cellCandidateH, 1},
{inputGateX, 2}, {outputGateX, 2}, {forgetGateX, 2}, {cellCandidateX, 2},
{inputGateH, 2}, {outputGateH, 2}, {forgetGateH, 2}, {cellCandidateH, 2},
{hiddenState, 1}, {cellState, 1}});
microGraph->setOrderedOutputs({{hiddenState, 0}, {cellState, 0}});
return std::make_shared<MetaOperator_Op>("LSTM", microGraph);
}
} // namespace Aidge
......@@ -34,31 +34,35 @@ template <std::array<DimSize_t, 1>::size_type DIM>
std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name,
const std::array<DimSize_t, DIM> &stride_dims,
const std::array<DimSize_t, 2*DIM> &padding_dims)
const std::array<DimSize_t, DIM> &dilations,
const std::array<DimSize_t, 2*DIM> &padding_dims,
bool ceil_mode)
{
auto graph = Sequential({
Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
AvgPooling(kernel_dims, (!name.empty()) ? name + "_avgpooling" : "", stride_dims)
AvgPooling(kernel_dims, (!name.empty()) ? name + "_avgpooling" : "", stride_dims, dilations, ceil_mode)
});
return MetaOperator(("PaddedAvgPooling" + std::to_string(DIM) + "D").c_str(), graph, {}, name);
}
template std::shared_ptr<Node> PaddedAvgPooling<1>(const std::array<DimSize_t,1>&, const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
template std::shared_ptr<Node> PaddedAvgPooling<2>(const std::array<DimSize_t,2>&, const std::string&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&);
template std::shared_ptr<Node> PaddedAvgPooling<1>(const std::array<DimSize_t,1>&, const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&, bool);
template std::shared_ptr<Node> PaddedAvgPooling<2>(const std::array<DimSize_t,2>&, const std::string&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&, bool);
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
std::shared_ptr<Node> PaddedAvgPooling(const DimSize_t (&kernel_dims)[DIM],
const std::string& name,
const std::array<DimSize_t, DIM> &stride_dims,
const std::array<DimSize_t, 2*DIM> &padding_dims)
const std::array<DimSize_t, DIM> &dilations,
const std::array<DimSize_t, 2*DIM> &padding_dims,
bool ceil_mode)
{
return PaddedAvgPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
return PaddedAvgPooling(to_array(kernel_dims), name, stride_dims, dilations, padding_dims, ceil_mode);
}
template std::shared_ptr<Node> PaddedAvgPooling<1>(const DimSize_t (&kernel_dims)[1], const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
template std::shared_ptr<Node> PaddedAvgPooling<2>(const DimSize_t (&kernel_dims)[2], const std::string&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&);
template std::shared_ptr<Node> PaddedAvgPooling<1>(const DimSize_t (&kernel_dims)[1], const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&, bool);
template std::shared_ptr<Node> PaddedAvgPooling<2>(const DimSize_t (&kernel_dims)[2], const std::string&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&, bool);
//////////////////////////////////
......@@ -68,17 +72,19 @@ template std::shared_ptr<Node> PaddedAvgPooling<2>(const DimSize_t (&kernel_dims
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims,
const std::array<DimSize_t, 2*DIM> &padding_dims)
const std::array<DimSize_t, DIM> &dilations,
const std::array<DimSize_t, 2*DIM> &padding_dims,
bool ceil_mode)
{
auto graph = Sequential({
Pad<DIM>(padding_dims, ""),
AvgPooling(kernel_dims, "", stride_dims)
AvgPooling(kernel_dims, "", stride_dims, dilations, ceil_mode)
});
return std::make_shared<MetaOperator_Op>(("PaddedAvgPooling" + std::to_string(DIM) + "D").c_str(), graph);
}
template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<1>(const std::array<DimSize_t,1>&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<2>(const std::array<DimSize_t,2>&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&);
template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<1>(const std::array<DimSize_t,1>&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&, bool);
template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<2>(const std::array<DimSize_t,2>&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&, bool);
} // namespace Aidge
......@@ -24,6 +24,8 @@ namespace Aidge {
* @brief Initialize console log level from environment. If compile mode is
* DEBUG, then the default level is Log::Level::Debug, else it is
* Log::Level::Notice.
*
* WARNING: Do not use this variable directly, use getConsoleLevel() instead.
*/
Log::Level Log::mConsoleLevel = []() {
#ifndef NDEBUG
......@@ -58,7 +60,7 @@ bool Log::mConsoleColor = []() {
*/
Log::Level Log::mFileLevel = []() {
#ifndef NDEBUG
constexpr Level defaultLevel = Level::Debug;
constexpr Log::Level defaultLevel = Level::Debug;
#else
constexpr Log::Level defaultLevel = Level::Notice;
#endif
......@@ -102,8 +104,13 @@ void Log::log(Level level, const std::string& msg) {
while (start < text.size()) {
std::size_t lineWidth = 0;
std::size_t current = start;
while (current < text.size() && lineWidth < width) {
bool inPath = false;
while (current < text.size() && (lineWidth < width || inPath)) {
if (inPath){
if (text[current] == ' ' || text[current] == '\n'){
inPath = false;
}
}
if (text[current] == '\033') {
// Found ANSI escape sequence, skip until 'm'
std::size_t ansiEnd = text.find('m', current);
......@@ -119,6 +126,9 @@ void Log::log(Level level, const std::string& msg) {
// Handle explicit line break
break;
} else {
if(!inPath && (text[current] == '/' || text[current] == '\\')) {
inPath = true;
}
// Normal character, increase line width
++lineWidth;
++current;
......@@ -162,9 +172,9 @@ void Log::log(Level level, const std::string& msg) {
// Get the string representation of the log level
const auto levelStr = EnumStrings<Level>::data[static_cast<std::size_t>(level)];
const std::size_t levelIndentSizes[6] = {10, 9, 11, 12, 10, 10};
const std::size_t width = 80 - levelIndentSizes[static_cast<std::size_t>(level)];
const std::size_t width = 100 - levelIndentSizes[static_cast<std::size_t>(level)];
if (level >= mConsoleLevel) {
if (level >= getConsoleLevel()) {
for (const auto& context : mContext) {
fmt::println("Context: {}", context);
}
......
......@@ -659,9 +659,12 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, conv1, conv2, conv3, conv4, concat, other2}));
GraphView::replace({conv1, conv2, conv3, conv4, concat}, {myConv});
// This doesn't make sense in the general case: replace() cannot possibly
// know how to match the inputs of the 4 conv with the single input of myCond
// The implicit assumption here is that they are the same!
//GraphView::replace({conv1, conv2, conv3, conv4, concat}, {myConv});
REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, myConv, other2}));
//REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, myConv, other2}));
}
SECTION("replace same input category 1") {
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/GraphView.hpp"
#include "aidge/operator/Conv.hpp"
#include "aidge/operator/GenericOperator.hpp"
#include "aidge/operator/Producer.hpp"
using namespace Aidge;
TEST_CASE("get Delta") {
std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 0, 1, "c1");
std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
std::shared_ptr<Node> conv3 = GenericOperator("Conv", 1, 0, 1, "c3");
std::shared_ptr<Node> conv3_5 = GenericOperator("Conv", 1, 0, 1, "c3.5");
std::shared_ptr<Node> conv4 = GenericOperator("Conv", 1, 0, 1, "c4");
std::shared_ptr<Node> conv5 = GenericOperator("Conv", 1, 0, 1, "c5");
g1->add(conv);
g1->addChild(conv1, "c");
std::set<Aidge::NodePtr> see;
conv->getNodeDelta(1,see);
SECTION("Self return") {
see.clear();
REQUIRE(conv->getNodeDelta(0,see) == std::set<std::shared_ptr<Node>>{conv});
}
SECTION("child") {
see.clear();
REQUIRE(conv->getNodeDelta(1,see) == std::set<std::shared_ptr<Node>>{conv1});
}
}
\ No newline at end of file
0.5.0
0.5.1