Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • eclipse/aidge/aidge_core
  • hrouis/aidge_core
  • mszczep/aidge_core
  • oantoni/aidge_core
  • cguillon/aidge_core
  • jeromeh/aidge_core
  • axelfarr/aidge_core
  • cmoineau/aidge_core
  • noamzerah/aidge_core
  • lrakotoarivony/aidge_core
  • silvanosky/aidge_core
  • maab05/aidge_core
  • mick94/aidge_core
  • lucaslopez/aidge_core_ll
  • wboussella/aidge_core
  • farnez/aidge_core
  • mnewson/aidge_core
17 results
Show changes
Commits on Source (24)
Showing
with 703 additions and 22 deletions
......@@ -103,6 +103,22 @@ class Tensor : public Data,
resize(dims);
}
/**
* @brief Construct a new Tensor object from the 1-dimension Vector helper.
* @tparam T datatype
*/
template <typename T>
constexpr Tensor(Vector<T> &&arr)
: Data(Type),
mDataType(NativeType<T>::type),
mDims({arr.data.size()}),
mStrides({1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {arr.data.size()})),
mSize(arr.data.size())
{
mImpl->copyFromHost(&arr.data[0], arr.data.size());
}
/**
* @brief Construct a new Tensor object from the 1-dimension Array helper.
* @tparam T datatype
......@@ -203,6 +219,12 @@ class Tensor : public Data,
*/
Tensor &operator=(const Tensor& other);
template <typename T>
constexpr Tensor &operator=(Vector<T> &&arr) {
*this = Tensor(std::move(arr));
return *this;
}
template <typename T, std::size_t SIZE_0>
constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
*this = Tensor(std::move(arr));
......
......@@ -400,10 +400,17 @@ public:
addChild(toOtherNode, mNodeRegistry.at(fromOutNodeName), fromTensor, toTensor);
}
inline void updateNodeName(const std::string& oldName, const std::string& newName){
AIDGE_ASSERT(mNodeRegistry.find(oldName) != mNodeRegistry.end(), "No node named {} in graph {}, the graph may be corrupted !", oldName, name());
mNodeRegistry[newName] = mNodeRegistry[oldName];
mNodeRegistry.erase(oldName);
inline void updateNodeName(NodePtr nodeToRename, const std::string& newName){
const std::string& oldName = nodeToRename->name();
AIDGE_ASSERT(mNodeRegistry.find(newName) != mNodeRegistry.end(), "Name {} is already used in graph {}.", newName, name());
if (nodeToRename->name() != ""){ // Case node already had a name
AIDGE_ASSERT(mNodeRegistry.find(oldName) != mNodeRegistry.end(), "No node named {} in graph {}, the graph may be corrupted !", oldName, name());
mNodeRegistry[newName] = mNodeRegistry[oldName];
mNodeRegistry.erase(oldName);
}else{ // Case node did not had a name
mNodeRegistry[newName] = nodeToRename;
}
}
/**
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_FOLD_H_
#define AIDGE_CORE_OPERATOR_FOLD_H_
#include <array>
#include <cmath> // std::floor
#include <cstddef> // std::size_t
#include <string>
#include <utility> // std::pair
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp" // SET_IMPL_MACRO
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
enum class FoldAttr { OutputDims, StrideDims, DilationDims, KernelDims };
template <DimIdx_t DIM>
class Fold_Op : public OperatorTensor,
public Registrable<Fold_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Fold_Op<DIM> &)> {
public:
static const std::string Type;
private:
using Attributes_ = StaticAttributes<FoldAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>>;
template <FoldAttr e> using attr = typename Attributes_::template attr<e>;
const std::shared_ptr<Attributes_> mAttributes;
public:
Fold_Op() = delete;
constexpr Fold_Op(const std::array<DimSize_t, DIM> &outputDims,
const std::array<DimSize_t, DIM> &kernelDims,
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
: OperatorTensor(Type, {InputCategory::Data}, 1),
mAttributes(std::make_shared<Attributes_>(
attr<FoldAttr::OutputDims>(outputDims),
attr<FoldAttr::StrideDims>(strideDims),
attr<FoldAttr::DilationDims>(dilationDims),
attr<FoldAttr::KernelDims>(kernelDims))) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
* input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Fold_Op(const Fold_Op<DIM> &op)
: OperatorTensor(op),
mAttributes(op.mAttributes)
{
if (!op.backend().empty()) {
SET_IMPL_MACRO(Fold_Op<DIM>, *this, op.backend());
}
else {
mImpl = nullptr;
}
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Fold_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Fold_Op<DIM>>(*this);
}
bool forwardDims(bool /*allowDataDependency*/ = false) override final;
void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
inline std::array<DimSize_t, DIM>& outputDims() const { return mAttributes->template getAttr<FoldAttr::OutputDims>(); }
inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<FoldAttr::StrideDims>(); }
inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<FoldAttr::DilationDims>(); }
inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<FoldAttr::KernelDims>(); }
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Fold(const std::array<DimSize_t, DIM> &outputDims,
const std::array<DimSize_t, DIM> &kernelDims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Fold, not supported");
return std::make_shared<Node>(std::make_shared<Fold_Op<static_cast<DimIdx_t>(DIM)>>(outputDims, kernelDims, strideDims, dilationDims), name);
}
template <DimSize_t DIM>
inline std::shared_ptr<Node> Fold(
DimSize_t const (&outputDims)[DIM],
DimSize_t const (&kernelDims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Fold, not supported");
return Fold(to_array(outputDims), to_array(kernelDims), name, strideDims, dilationDims);
}
} // namespace Aidge
extern template class Aidge::Fold_Op<2>;
namespace {
template <>
const char *const EnumStrings<Aidge::FoldAttr>::data[] = {
"OutputDims",
"StrideDims",
"DilationDims",
"KernelDims"
};
}
#endif /* AIDGE_CORE_OPERATOR_FOLD_H_ */
......@@ -108,6 +108,7 @@ public:
Elts_t getNbProducedData(IOIndex_t outputIdx) const override;
void updateConsummerProducer() override;
void resetConsummerProducer() override;
void forward() override;
void backward() override {
AIDGE_THROW_OR_ABORT(std::runtime_error, "backward() not implemented yet for a MetaOperator");
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_UNFOLD_H_
#define AIDGE_CORE_OPERATOR_UNFOLD_H_
#include <array>
#include <cmath> // std::floor
#include <cstddef> // std::size_t
#include <string>
#include <utility> // std::pair
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp" // SET_IMPL_MACRO
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
template <DimIdx_t DIM>
class Unfold_OpImpl : public OperatorImpl {
public:
Unfold_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
void forward() override;
};
enum class UnfoldAttr { StrideDims, DilationDims, KernelDims };
template <DimIdx_t DIM>
class Unfold_Op : public OperatorTensor,
public Registrable<Unfold_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Unfold_Op<DIM> &)> {
public:
static const std::string Type;
private:
using Attributes_ = StaticAttributes<UnfoldAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>>;
template <UnfoldAttr e> using attr = typename Attributes_::template attr<e>;
const std::shared_ptr<Attributes_> mAttributes;
public:
Unfold_Op() = delete;
constexpr Unfold_Op(const std::array<DimSize_t, DIM> &kernelDims,
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
: OperatorTensor(Type, {InputCategory::Data}, 1),
mAttributes(std::make_shared<Attributes_>(
attr<UnfoldAttr::StrideDims>(strideDims),
attr<UnfoldAttr::DilationDims>(dilationDims),
attr<UnfoldAttr::KernelDims>(kernelDims)))
{
mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
* input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Unfold_Op(const Unfold_Op<DIM> &op)
: OperatorTensor(op),
mAttributes(op.mAttributes)
{
if (!op.backend().empty()) {
SET_IMPL_MACRO(Unfold_Op<DIM>, *this, op.backend());
}
else {
mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
}
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Unfold_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Unfold_Op>(*this);
}
bool forwardDims(bool /*allowDataDependency*/ = false) override final;
void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<UnfoldAttr::StrideDims>(); }
inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<UnfoldAttr::DilationDims>(); }
inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<UnfoldAttr::KernelDims>(); }
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Unfold(const std::array<DimSize_t, DIM> &kernelDims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Unfold, not supported");
return std::make_shared<Node>(std::make_shared<Unfold_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
}
template <DimSize_t DIM>
inline std::shared_ptr<Node> Unfold(
DimSize_t const (&kernelDims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Unfold, not supported");
return Unfold(to_array(kernelDims), name, strideDims, dilationDims);
}
} // namespace Aidge
extern template class Aidge::Unfold_Op<2>;
namespace {
template <>
const char *const EnumStrings<Aidge::UnfoldAttr>::data[] = {
"StrideDims",
"DilationDims",
"KernelDims"
};
}
#endif /* AIDGE_CORE_OPERATOR_UNFOLD_H_ */
......@@ -144,6 +144,13 @@ void expandMetaOps(std::shared_ptr<GraphView> graph, bool recursive = false);
*/
size_t fuseToMetaOps(std::shared_ptr<GraphView> graph, const std::string& query, const std::string& type = "");
/**
* Transform Conv layers with MatMul.
* @param graph Graph to manipulate
* @return size_t Number of replacement
*/
size_t convToMatMul(std::shared_ptr<GraphView> graph);
} // namespace Aidge
#endif /* AIDGE_CORE_UTILS_RECIPES_H_ */
......@@ -101,6 +101,13 @@ constexpr std::array<T, N + 1> append(T t, std::array<T, N> a) {
}
// Generic helper for initializing a Tensor
template <typename T>
struct Vector {
Vector(const std::vector<T>& data_) : data(data_) {}
template <typename U> Vector(const std::vector<U>& data_) : data(data_.begin(), data_.end()) {}
std::vector<T> data;
};
template <typename T, std::size_t SIZE_0>
struct Array1D {
T data[SIZE_0];
......
......@@ -51,6 +51,7 @@ void addCtor(py::class_<Tensor,
return newTensor;
}), py::arg("array"), py::arg("backend")="cpu")
.def(py::init<T>(), py::arg("val"))
.def("__setitem__", (void (Tensor::*)(std::size_t, T)) &Tensor::set)
.def("__setitem__", (void (Tensor::*)(std::vector<std::size_t>, T)) &Tensor::set)
;
......@@ -73,6 +74,7 @@ void init_Tensor(py::module& m){
(m,"Tensor", py::multiple_inheritance(), py::buffer_protocol());
pyClassTensor.def(py::init<>())
.def(py::init<const std::vector<std::size_t>&>(), py::arg("dims"))
.def(py::self + py::self)
.def(py::self - py::self)
.def(py::self * py::self)
......@@ -86,7 +88,7 @@ void init_Tensor(py::module& m){
.def("dtype", &Tensor::dataType)
.def("size", &Tensor::size)
.def("capacity", &Tensor::capacity)
.def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize)
.def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize, py::arg("dims"), py::arg("strides") = std::vector<DimSize_t>())
.def("has_impl", &Tensor::hasImpl)
.def("get_coord", &Tensor::getCoord)
.def("get_idx", &Tensor::getIdx)
......
......@@ -21,7 +21,7 @@ namespace Aidge {
void init_Softmax(py::module& m) {
py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance())
.def(py::init<std::size_t>(), py::arg("axis"))
.def(py::init<std::int32_t>(), py::arg("axis"))
.def_static("get_inputs_name", &Softmax_Op::getInputsName)
.def_static("get_outputs_name", &Softmax_Op::getOutputsName);
declare_registrable<Softmax_Op>(m, "SoftmaxOp");
......
......@@ -175,10 +175,16 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
}
size_t inputIdx = 0;
for (auto input : mInputNodes) {
for (const auto& input : mInputNodes) {
if (input.first != nullptr) {
fmt::print(fp.get(), "input{}((in#{})):::inputCls--->|\"&rarr;{}\"|{}_{}\n", inputIdx, inputIdx,
const auto& op_ = std::dynamic_pointer_cast<OperatorTensor>(input.first->getOperator());
if (op_->getInput(input.second) && (!op_->getInput(input.second)->empty())) {
fmt::print(fp.get(), "input{}((in#{})):::inputCls--->|\"&rarr;{}{}\"|{}_{}\n", inputIdx, inputIdx,
input.second, op_->getInput(input.second)->dims(), input.first->type(), namePtrTable.at(input.first));
} else {
fmt::print(fp.get(), "input{}((in#{})):::inputCls--->|\"&rarr;{}\"|{}_{}\n", inputIdx, inputIdx,
input.second, input.first->type(), namePtrTable.at(input.first));
}
}
else {
fmt::print(fp.get(), "input{}((in#{})):::inputCls\n", inputIdx, inputIdx);
......@@ -287,13 +293,13 @@ void Aidge::GraphView::setOrderedInputs(const std::vector<std::pair<NodePtr, IOI
// it into account.
if (input.first != nullptr) {
auto it = std::find(ignoredInputs.begin(), ignoredInputs.end(), input);
AIDGE_ASSERT(it != ignoredInputs.end(), "unknown or duplicate input");
AIDGE_ASSERT(it != ignoredInputs.end(), "unknown or duplicate input: {} (of type {})", input.first->name(), input.first->type());
ignoredInputs.erase(it);
++nbInputs;
}
}
AIDGE_ASSERT(nbInputs <= mInputNodes.size(), "too many specified number of inputs");
AIDGE_ASSERT(nbInputs <= mInputNodes.size(), "too many specified number of inputs: {} specified vs {} available", nbInputs, mInputNodes.size());
mInputNodes = inputs;
mInputNodes.insert(mInputNodes.end(), ignoredInputs.begin(), ignoredInputs.end());
......@@ -308,13 +314,13 @@ void Aidge::GraphView::setOrderedOutputs(const std::vector<std::pair<NodePtr, IO
// it into account.
if (output.first != nullptr) {
auto it = std::find(ignoredOutputs.begin(), ignoredOutputs.end(), output);
AIDGE_ASSERT(it != ignoredOutputs.end(), "unknown or duplicate output");
AIDGE_ASSERT(it != ignoredOutputs.end(), "unknown or duplicate output: {} (of type {})", output.first->name(), output.first->type());
ignoredOutputs.erase(it);
++nbOutputs;
}
}
AIDGE_ASSERT(nbOutputs <= mOutputNodes.size(), "too many specified number of outputs");
AIDGE_ASSERT(nbOutputs <= mOutputNodes.size(), "too many specified number of outputs: {} specified vs {} available", nbOutputs, mOutputNodes.size());
mOutputNodes = outputs;
mOutputNodes.insert(mOutputNodes.end(), ignoredOutputs.begin(), ignoredOutputs.end());
......
......@@ -64,7 +64,7 @@ Aidge::Connector Aidge::Node::operator()(const std::vector<Connector>& ctors) {
///////////////////////////////////////////////////////
void Aidge::Node::setName(const std::string& name) {
for (auto graphView : views()) graphView->updateNodeName(mName, name);
for (auto graphView : views()) graphView->updateNodeName(shared_from_this(), name);
mName = name;
}
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/operator/Fold.hpp"
#include <cmath> // std::floor
#include <cstddef> // std::size_t
#include <stdexcept> // std::runtime_error
#include <string>
#include <utility> // std::pair
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
template <Aidge::DimIdx_t DIM>
const std::string Aidge::Fold_Op<DIM>::Type = "Fold";
template <Aidge::DimIdx_t DIM>
bool Aidge::Fold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
if (inputsAssociated()) {
auto dims(getInput(0)->dims());
DimSize_t k = 1;
DimSize_t l = 1;
for (std::size_t dim = 0; dim < this->kernelDims().size() ; ++dim) {
const DimSize_t kernelExtent = this->dilationDims()[dim] *
(this->kernelDims()[dim] - 1) + 1;
k *= this->kernelDims()[dim];
l *= 1 + static_cast<DimSize_t>(
floor(static_cast<float>(this->outputDims()[dim] - kernelExtent) /
static_cast<float>(this->strideDims()[dim])));
}
AIDGE_ASSERT(dims[dims.size() - 2] % k == 0 , "Fold: input number of channels ({}) is not divisible by the product of provided kernel dims ({})!",
dims[dims.size() - 2], k);
AIDGE_ASSERT(dims[dims.size() - 1] == l, "Fold: mismatch between expected input 3rd dim {} and provided input 3rd dim {}",
dims[dims.size() - 1], l);
dims[dims.size() - 2] /= k;
dims.pop_back();
dims.insert(dims.end(), this->outputDims().begin(), this->outputDims().end());
mOutputs[0]->resize(dims);
return true;
}
return false;
}
template <Aidge::DimIdx_t DIM>
void Aidge::Fold_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
SET_IMPL_MACRO(Fold_Op<DIM>, *this, name);
mOutputs[0]->setBackend(name, device);
}
template class Aidge::Fold_Op<2>;
\ No newline at end of file
......@@ -53,7 +53,7 @@ bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
dims0.insert(dims0.cbegin(), dims1.size() - dims0.size(), std::size_t(1));
}
AIDGE_ASSERT(dims0[dims_size-1] == dims1[dims_size-2], "Incompatible matrices sizes.");
AIDGE_ASSERT(dims0[dims_size-1] == dims1[dims_size-2], "Incompatible matrices sizes: {} vs {}.", dims0, dims1);
std::vector<std::size_t> outDims = std::vector<std::size_t>(dims_size-2, 1);
for (std::size_t i = 0; i < dims_size-2; ++i) {
......
......@@ -134,6 +134,20 @@ Aidge::Elts_t Aidge::MetaOperator_Op::getNbProducedData(IOIndex_t outputIdx) con
}
}
void Aidge::MetaOperator_Op::resetConsummerProducer() {
if (mImpl) {
mImpl->resetConsummerProducer();
}
else {
if (!mScheduler) {
// Lazy initialization
mScheduler = std::make_shared<SequentialScheduler>(mGraph, mUpperNode.lock());
}
mScheduler->resetScheduling();
}
}
void Aidge::MetaOperator_Op::updateConsummerProducer() {
if (mImpl) {
mImpl->updateConsummerProducer();
......
......@@ -73,14 +73,12 @@ bool Aidge::Reshape_Op::forwardDims(bool allowDataDependency) {
{
int64_t dimSize = this->shape()[i];
if (dimSize < 0) {
if (foundNegativeDimension) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Found more than one negative dimension in Reshape Operator.");
}
AIDGE_ASSERT(!foundNegativeDimension, "Found more than one negative dimension in Reshape Operator: {}.", this->shape());
foundNegativeDimension = true;
dimSize = 1;
negativeIndex = static_cast<DimIdx_t>(i);
}
else if (dimSize == 0 && !mAttributes->template getAttr<ReshapeAttr::AllowZero>())
else if (dimSize == 0 && !this->allowZero())
{
dimSize = getInput(0) -> dims()[i];
}
......
......@@ -147,13 +147,13 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
static_cast<DimSize_t>(this->ends()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
const std::int64_t step = this->steps()[i];
AIDGE_ASSERT(step != 0, "Slice_Op: Step must be a non-zero value!");
AIDGE_ASSERT(step != 0, "Slice_Op: Step ({}) must have a non-zero value on axis {}!", this->steps(), axis);
if(step * (static_cast<int64_t>(end) - static_cast<int64_t>(start)) < 0) {
if(step < 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: Step is negative we must have End < Start", type());
AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: Step ({}) is negative, we must have End ({}) < Start ({}) on axis {}", type(), step, end, start, axis);
}
else {
AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: Step is positive we must have Start < End", type());
AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: Step ({}) is positive, we must have Start ({}) < End ({}) on axis {}", type(), step, start, end, axis);
}
}
......@@ -161,7 +161,8 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
// Check if slice length is valid
if (sliceLength > getInput(0)->dims()[axis])
{
AIDGE_THROW_OR_ABORT(std::runtime_error, "Slice_Op: ROI of Slice operator out of bounds");
AIDGE_THROW_OR_ABORT(std::runtime_error, "Slice_Op: ROI ({}) of Slice operator out of bounds ({}) on axis {}, with (Start, End, Step) = ({}, {}, {})",
sliceLength, getInput(0)->dims()[axis], axis, start, end, step);
}
outDims[axis] = sliceLength;
}
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/operator/Unfold.hpp"
#include <cmath> // std::floor
#include <cstddef> // std::size_t
#include <stdexcept> // std::runtime_error
#include <string>
#include <utility> // std::pair
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
template <Aidge::DimIdx_t DIM>
void Aidge::Unfold_OpImpl<DIM>::forward() {
const Unfold_Op<DIM>& op = dynamic_cast<const Unfold_Op<DIM>&>(mOp);
const auto kernelDims = op.kernelDims();
const auto dilationDims = op.dilationDims();
const auto strideDims = op.strideDims();
const DimSize_t inHeight = op.getInput(0)->dims()[2];
const DimSize_t inWidth = op.getInput(0)->dims()[3];
const DimSize_t inChannels = op.getInput(0)->dims()[1];
const DimSize_t kernelExtentHeight = op.dilationDims()[0] *
(op.kernelDims()[0] - 1) + 1;
const DimSize_t outHeight = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(inHeight - kernelExtentHeight) /
static_cast<float>(op.strideDims()[0])));
const DimSize_t kernelExtentWidth = op.dilationDims()[1] *
(op.kernelDims()[1] - 1) + 1;
const DimSize_t outWidth = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(inWidth - kernelExtentWidth) /
static_cast<float>(op.strideDims()[1])));
const DimSize_t outChannels = op.getOutput(0)->dims()[1];
for (DimSize_t n = 0; n < op.getOutput(0)->dims()[0]; ++n) {
for (DimSize_t outC = 0; outC < outChannels; ++outC) {
const auto inOffsetW = outC % kernelDims[1];
const auto inOffsetH = (outC / kernelDims[1]) % kernelDims[0];
const auto inC = outC / kernelDims[0] / kernelDims[1];
for (DimSize_t outH = 0; outH < outHeight; ++outH) {
const auto inH = outH * strideDims[0] + inOffsetH * dilationDims[0];
for (DimSize_t outW = 0; outW < outWidth; ++outW) {
const auto inW = outW * strideDims[1] + inOffsetW * dilationDims[1];
op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(((n * inChannels + inC) * inHeight + inH) * inWidth + inW), 1,
((n * outChannels + outC) * outHeight + outH) * outWidth + outW);
}
}
}
}
}
template <Aidge::DimIdx_t DIM>
const std::string Aidge::Unfold_Op<DIM>::Type = "Unfold";
template <Aidge::DimIdx_t DIM>
bool Aidge::Unfold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
if (inputsAssociated()) {
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
DimSize_t k = 1;
DimSize_t l = 1;
for (std::size_t dim = 0; dim < this->kernelDims().size() ; ++dim) {
const DimSize_t kernelExtent = this->dilationDims()[dim] *
(this->kernelDims()[dim] - 1) + 1;
k *= this->kernelDims()[dim];
l *= 1 + static_cast<DimSize_t>(
floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
static_cast<float>(this->strideDims()[dim])));
}
mOutputs[0]->resize({inputDims[0], inputDims[1] * k, l});
return true;
}
return false;
}
template <Aidge::DimIdx_t DIM>
void Aidge::Unfold_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
if (Registrar<Unfold_Op<DIM>>::exists({name})){
SET_IMPL_MACRO(Unfold_Op<DIM>, *this, name);
}
else {
mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
}
mOutputs[0]->setBackend(name, device);
}
template class Aidge::Unfold_OpImpl<2>;
template class Aidge::Unfold_Op<2>;
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <memory>
#include "aidge/graph/Node.hpp"
#include "aidge/graph/GraphView.hpp"
#include "aidge/graph/Matching.hpp"
#include "aidge/operator/Add.hpp"
#include "aidge/operator/Conv.hpp"
#include "aidge/operator/Unfold.hpp"
#include "aidge/operator/Fold.hpp"
#include "aidge/operator/Reshape.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/operator/MatMul.hpp"
#include "aidge/recipes/Recipes.hpp"
size_t Aidge::convToMatMul(std::shared_ptr<GraphView> graphView) {
const auto matches = SinglePassGraphMatching(graphView).match("Conv");
size_t nbReplaced = 0;
for (const auto& match : matches) {
const auto convNode = match.startNode;
const std::shared_ptr<Conv_Op<2>> convOp =
std::static_pointer_cast<Conv_Op<2>>(convNode->getOperator());
AIDGE_ASSERT(convOp->getOutput(0) && !convOp->getOutput(0)->empty(),
"Output dims must have been forwarded in order to apply convToMatMul for Conv {}", convNode->name());
//const auto nbDims = convOp->getOutput(0)->dims().size();
//const std::array<DimSize_t, 2> outputDims = {convOp->getOutput(0)->dims()[nbDims - 2], convOp->getOutput(0)->dims()[nbDims - 1]};
const auto wShape = convOp->getInput(1)->dims();
const auto wFlattenSize = std::accumulate(wShape.cbegin() + 1, wShape.cend(), DimSize_t(1), std::multiplies<DimSize_t>());
auto microGraph = std::make_shared<GraphView>();
auto unfold = Unfold(convOp->kernelDims(),
(!convNode->name().empty()) ? convNode->name() + "_unfold" : "",
convOp->strideDims(),
convOp->dilationDims());
auto wReshapeProd = Producer(std::make_shared<Tensor>(Vector<int64_t>{{static_cast<int64_t>(convOp->getInput(1)->dims()[0]), static_cast<int64_t>(wFlattenSize)}}),
(!convNode->name().empty()) ? convNode->name() + "_w_reshape_shape_prod" : "",
true);
auto wReshape = Reshape({},
false,
(!convNode->name().empty()) ? convNode->name() + "_w_reshape" : "");
auto matMul = MatMul((!convNode->name().empty()) ? convNode->name() + "_matmul" : "");
auto reshapeProd = Producer(std::make_shared<Tensor>(Vector<int64_t>(convOp->getOutput(0)->dims())),
(!convNode->name().empty()) ? convNode->name() + "_reshape_shape_prod" : "",
true);
auto reshape = Reshape({},
false,
(!convNode->name().empty()) ? convNode->name() + "_reshape" : "");
//auto fold = Fold(outputDims,
// convOp->kernelDims(),
// (!convNode->name().empty()) ? convNode->name() + "_unfold" : "",
// convOp->strideDims(),
// convOp->dilationDims());
wReshapeProd->addChild(wReshape, 0, 1);
wReshape->addChild(matMul, 0, 0);
unfold->addChild(matMul, 0, 1);
reshapeProd->addChild(reshape, 0, 1);
matMul->addChild(reshape, 0, 0);
//matMul->addChild(fold, 0, 0);
microGraph->add({unfold, wReshapeProd, wReshape, matMul, reshapeProd, reshape}, false);
//microGraph->add({unfold, wReshapeProd, wReshape, matMul, fold}, false);
// Handle bias
if (convOp->getInput(2) && !convOp->getInput(2)->empty()) {
auto add = Add(2, (!convNode->name().empty()) ? convNode->name() + "_add" : "");
auto bReshapeProd = Producer(std::make_shared<Tensor>(Vector<int64_t>{{1, static_cast<int64_t>(convOp->getInput(2)->size()), 1, 1}}),
(!convNode->name().empty()) ? convNode->name() + "_b_reshape_shape_prod" : "",
true);
auto bReshape = Reshape({},
false,
(!convNode->name().empty()) ? convNode->name() + "_b_reshape" : "");
bReshapeProd->addChild(bReshape, 0, 1);
bReshape->addChild(add, 0, 1);
reshape->addChild(add, 0, 0);
//fold->addChild(add, 0, 0);
microGraph->add({reshape, add, bReshapeProd, bReshape}, false);
//microGraph->add({fold, add}, false);
microGraph->setOrderedInputs({{unfold, 0}, {wReshape, 0}, {bReshape, 0}});
}
else {
// Add a dummy 3rd input in order for replace() to work
microGraph->setOrderedInputs({{unfold, 0}, {wReshape, 0}, {nullptr, 0}});
}
auto gConv = std::make_shared<GraphView>();
gConv->add(convNode, false);
const auto success = GraphView::replace(gConv, microGraph);
if (!success) {
Log::notice("Could not replace Conv {} with MatMul", convNode->name());
}
else {
++nbReplaced;
}
}
Log::info("Replaced {} (out of {}) matching Conv with MatMul", nbReplaced, matches.size());
return nbReplaced;
}
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include "aidge/recipes/Recipes.hpp"
#include "aidge/operator/Conv.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/graph/OpArgs.hpp"
#include <cstddef>
using namespace Aidge;
TEST_CASE("[ConvToMatMul] conv") {
auto conv1 = Conv(3, 32, {3, 3}, "conv1");
auto conv2 = Conv(32, 64, {3, 3}, "conv2", {1, 1}, {1, 1}, true);
auto conv3 = Conv(64, 10, {1, 1}, "conv3", {2, 2});
auto g1 = Sequential({
Producer({16, 3, 224, 224}, "dataProvider"),
conv1,
conv2,
conv3
});
g1->forwardDims();
g1->save("convToMatMul_before");
REQUIRE(convToMatMul(g1) == 3);
g1->save("convToMatMul_after");
}