Skip to content
Snippets Groups Projects
Commit 8833a2bd authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Merge branch 'genericop_from_op' into 'dev'

Added possibility to create a GenericOp from any Operator

See merge request !272
parents 0da4f0a8 49400010
No related branches found
No related tags found
2 merge requests!318[Upd] release verision 0.5.0,!272[Add] Possibility to create a GenericOperator from any Operator
Pipeline #61411 passed
......@@ -75,6 +75,10 @@ public:
inline void addAttr(const std::string& name, const T& value) const
{ mAttributes -> template addAttr<T>(name, value); }
inline void setAttrs(const std::map<std::string, future_std::any>& attrs) {
*mAttributes = attrs;
}
// Helper functions that can be used with setForwardDims():
static const ComputeDimsFunc Identity;
static const ComputeDimsFunc InputIdentity(IOIndex_t inputIdx, IOIndex_t nbOutputs);
......@@ -84,9 +88,9 @@ public:
};
/**
* @brief Fictive custom operator not associated with any implementation.
* @brief Generic operator not associated with any implementation.
* Allows to import unknown operators and simulate new ones.
* @param type Type of the fictive operator.
* @param type Type of the generic operator.
* @param inputCategory List inputs with their category
* @param nbOut Number of output data.
* @param name (optional) name of the Operator.
......@@ -96,9 +100,9 @@ std::shared_ptr<Node> GenericOperator(const std::string& type, const std::vector
const std::string& name = "");
/**
* @brief Fictive custom operator not associated with any implementation.
* @brief Generic operator not associated with any implementation.
* Allows to import unknown operators and simulate new ones.
* @param type Type of the fictive operator.
* @param type Type of the generic operator.
* @param nbData Number of input data.
* @param nbParam Number of parameters.
* @param nbOut Number of output data.
......@@ -107,6 +111,18 @@ std::shared_ptr<Node> GenericOperator(const std::string& type, const std::vector
*/
std::shared_ptr<Node> GenericOperator(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut,
const std::string& name = "");
/**
* @brief Generic operator not associated with any implementation.
* Create a generic operator from another existing operator.
* @param type Type of the generic operator.
* @param op Original operator from witch one wants to derive a generic operator.
* @param name (optional) name of the Operator.
* @return std::shared_ptr<Node> Node associated with the Generic Operator.
*/
std::shared_ptr<Aidge::Node> GenericOperator(const std::string& type,
std::shared_ptr<OperatorTensor> op,
const std::string& name = "");
} // namespace Aidge
#endif /* AIDGE_CORE_OPERATOR_GENERICOPERATOR_H_ */
......@@ -196,6 +196,10 @@ public:
return mOperatorType;
}
inline std::vector<InputCategory> inputCategory() const {
return mInputsCategory;
}
inline InputCategory inputCategory(IOIndex_t idx) const {
// AIDGE_ASSERT(idx < mInputsCategory.size(), "Input #{} out of range (number of inputs is {})", idx, mInputsCategory.size());
return mInputsCategory.at(idx);
......
......@@ -180,6 +180,19 @@ size_t convToMatMul(std::shared_ptr<GraphView> graph);
*/
void adaptToBackend(std::shared_ptr<GraphView> graph);
// /**
// * @brief The node passed contains an operator which input of index 1 is supposed be be weights of type Int4, Int3, Int2, binary.
// * This recipie only operates memory transformations on the weight tensor.
// * First, permutes the dimensions to match the dataformat NHWC
// * Second, compact the last dimension (Channel dimension) into int8_t
// *
// * @param node Node
// */
// void applyWeightInterleaving(std::shared_ptr<Node> node);
void toGenericOp(std::shared_ptr<Node> node);
} // namespace Aidge
#endif /* AIDGE_CORE_UTILS_RECIPES_H_ */
......@@ -34,6 +34,23 @@ namespace py = pybind11;
namespace Aidge {
#if __cplusplus >= 201703L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L)
#define AIDGE_DYNATTR_HAVE_CPP17
#endif
#if defined(AIDGE_DYNATTR_HAVE_CPP17) || defined(__cpp_lib_void_t)
using std::void_t;
#else
template <typename...>
using void_t = void;
#endif
// Detection idiom to check if a type T has a less-than operator
template <typename T, typename = void>
struct has_less_than_operator : std::false_type {};
template <typename T>
struct has_less_than_operator<T, void_t<decltype(std::declval<T>() < std::declval<T>())>> : std::true_type {};
///\todo store also a fix-sized code that indicates the type
///\todo managing complex types or excluding non-trivial, non-aggregate types
......@@ -41,6 +58,10 @@ class DynamicAttributes : public Attributes {
public:
DynamicAttributes() = default;
DynamicAttributes(const std::map<std::string, future_std::any>& attrs): mAttrs(attrs) {}
DynamicAttributes& operator=(const std::map<std::string, future_std::any>& attrs) {
mAttrs = attrs;
return *this;
}
/**
* \brief Returning an Attribute identified by its name
......@@ -340,6 +361,14 @@ public:
}
};
template<typename T>
static inline typename std::enable_if<!has_less_than_operator<T>::value, void>::type makeTypeConditionallyAvailable() {}
template<typename T>
static inline typename std::enable_if<has_less_than_operator<T>::value, void>::type makeTypeConditionallyAvailable() {
mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>()));
}
// Stores typed utils functions for each attribute type ever used
static std::map<std::type_index, std::unique_ptr<AnyUtils_>> mAnyUtils;
};
......@@ -403,6 +432,30 @@ namespace std {
return seed;
}
};
// Special case for std::array
template <typename T, std::size_t N>
struct hash<std::array<T, N>> {
std::size_t operator()(const std::array<T, N>& iterable) const {
std::size_t seed = 0;
for (const auto& v : iterable) {
// Recursively hash the value pointed by the iterator
Aidge::hash_combine(seed, std::hash<T>()(v));
}
return seed;
}
};
// Specialization of std::hash for std::pair<T1, T2>
template <typename T1, typename T2>
struct hash<std::pair<T1, T2>> {
std::size_t operator()(const std::pair<T1, T2>& p) const {
std::size_t seed = 0;
Aidge::hash_combine(seed, std::hash<T1>()(p.first));
Aidge::hash_combine(seed, std::hash<T2>()(p.second));
return seed;
}
};
}
namespace future_std {
......
......@@ -24,6 +24,7 @@
#endif
#include "aidge/utils/Attributes.hpp"
#include "aidge/utils/DynamicAttributes.hpp"
#include "aidge/utils/ErrorHandling.hpp"
namespace Aidge {
......@@ -322,7 +323,11 @@ private:
inline typename std::enable_if<I == sizeof...(Tp), void>::type appendAttr(const std::tuple<Tp...>& /*t*/, std::map<std::string, future_std::any>& /*attrs*/) const {}
template<std::size_t I = 0, typename... Tp>
inline typename std::enable_if<I < sizeof...(Tp), void>::type appendAttr(const std::tuple<Tp...>& t, std::map<std::string, future_std::any>& attrs) const {
inline typename std::enable_if<I < sizeof...(Tp), void>::type appendAttr(const std::tuple<Tp...>& t, std::map<std::string, future_std::any>& attrs) const {
// Ensure that the type will be known to DynamicAttributes
using ElementType = typename std::tuple_element<I,std::tuple<Tp...>>::type;
DynamicAttributes::makeTypeConditionallyAvailable<ElementType>();
attrs.insert(std::make_pair(EnumStrings<ATTRS_ENUM>::data[I], future_std::any(std::get<I>(t))));
appendAttr<I + 1, Tp...>(t, attrs);
}
......
......@@ -39,6 +39,30 @@ void init_GenericOperator(py::module& m) {
.def("set_forward_dims", &GenericOperator_Op::setForwardDims, py::arg("computation_function"));
// &GenericOperator
m.def("GenericOperator",
[]( const std::string& type,
const std::vector<Aidge::InputCategory>& inputCategory,
IOIndex_t nbOut,
const std::string& name,
const py::kwargs kwargs){
std::shared_ptr<Node> genericNode = GenericOperator(
type,
inputCategory,
nbOut,
name
);
if (kwargs){
std::shared_ptr<GenericOperator_Op> gop = std::static_pointer_cast<GenericOperator_Op>(genericNode->getOperator());
std::shared_ptr<DynamicAttributes> attr = std::dynamic_pointer_cast<DynamicAttributes>(gop->attributes());
for (auto item : kwargs) {
std::string key = py::cast<std::string>(item.first);
py::object value = py::reinterpret_borrow<py::object>(item.second);
attr->setAttrPy(key, std::move(value));
}
}
return genericNode;
}, py::arg("type"), py::arg("input_category"), py::arg("nb_out"), py::arg("name") = "");
m.def("GenericOperator",
[]( const std::string& type,
IOIndex_t nbData,
......@@ -65,6 +89,8 @@ void init_GenericOperator(py::module& m) {
return genericNode;
}, py::arg("type"), py::arg("nb_data"), py::arg("nb_param"), py::arg("nb_out"), py::arg("name") = "");
m.def("GenericOperator", py::overload_cast<const std::string&, std::shared_ptr<OperatorTensor>, const std::string&>(&GenericOperator), py::arg("type"), py::arg("op"), py::arg("name") = "");
declare_registrable<GenericOperator_Op>(m, "GenericOperatorOp");
}
} // namespace Aidge
......@@ -45,7 +45,14 @@ void init_Operator(py::module& m){
.def("get_raw_input", &Operator::getRawInput, py::arg("inputIdx"))
.def("nb_inputs", &Operator::nbInputs)
.def("nb_outputs", &Operator::nbOutputs)
.def("input_category", &Operator::inputCategory, py::arg("idx"),
.def("input_category", static_cast<std::vector<InputCategory>(Operator::*)() const>(&Operator::inputCategory),
R"mydelimiter(
Category of the inputs (Data or Param, optional or not).
Data inputs exclude inputs expecting parameters (weights or bias).
:rtype: list(InputCategory)
)mydelimiter")
.def("input_category", static_cast<InputCategory(Operator::*)(IOIndex_t) const>(&Operator::inputCategory), py::arg("idx"),
R"mydelimiter(
Category of a specific input (Data or Param, optional or not).
Data inputs exclude inputs expecting parameters (weights or bias).
......
......@@ -144,6 +144,13 @@ void init_Recipes(py::module &m)
:param graph_view: Graph view on which we want to apply the recipe
:type graph_view: :py:class:`aidge_core.GraphView`
)mydelimiter");
m.def("to_generic_op", toGenericOp, py::arg("node"), R"mydelimiter(
Transform to a Generic Operator.
:param node: Node which Operator will turn into a Generic Operator
:type graph_view: :py:class:`aidge_core.Node`
)mydelimiter");
}
} // namespace Aidge
......@@ -22,7 +22,8 @@
Aidge::GenericOperator_Op::GenericOperator_Op(const std::string& type,
const std::vector<Aidge::InputCategory>& inputsCategory,
Aidge::IOIndex_t nbOut)
: OperatorTensor(type, inputsCategory, nbOut)
: OperatorTensor(type, inputsCategory, nbOut),
mAttributes(std::make_shared<DynamicAttributes>())
{
mImpl = std::make_shared<OperatorImpl>(*this);
}
......@@ -73,7 +74,8 @@ bool Aidge::GenericOperator_Op::forwardDims(bool /*allowDataDependency*/) {
}
const auto& outputsDims = mForwardDims(inputsDims);
AIDGE_ASSERT((outputsDims.size() == nbOutputs()), "The provided ComputeDimsFunc function returns the wrong number of outputs");
AIDGE_ASSERT(!outputsDims.empty(), "The provided ComputeDimsFunc cannot compute the output dims (an empty vector was returned)");
AIDGE_ASSERT(outputsDims.size() == nbOutputs(), "The provided ComputeDimsFunc function returned the wrong number of outputs: {}, but {} are expected", outputsDims.size(), nbOutputs());
for (std::size_t i = 0; i < nbOutputs(); ++i) {
mOutputs[i]->resize(outputsDims[i]);
}
......@@ -117,3 +119,40 @@ std::shared_ptr<Aidge::Node> Aidge::GenericOperator(const std::string& type,
const std::string& name) {
return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbData, nbParam, nbOut), name);
}
std::shared_ptr<Aidge::Node> Aidge::GenericOperator(const std::string& type,
std::shared_ptr<OperatorTensor> op,
const std::string& name)
{
// Create a generic op with the same inputs/outputs
auto genericOp = std::make_shared<GenericOperator_Op>(type, op->inputCategory(), op->nbOutputs());
// Copy attributes
genericOp->setAttrs(op->attributes()->getAttrs());
// Set a default forward dims if possible
if (op->dimsForwarded()) {
auto opInputDims = std::vector<std::vector<DimSize_t>>(op->nbInputs());
for (size_t i = 0; i < op->nbInputs(); ++i) {
opInputDims[i] = op->getInput(i)->dims();
}
auto opOutputDims = std::vector<std::vector<DimSize_t>>(op->nbOutputs());
for (size_t o = 0; o < op->nbOutputs(); ++o) {
opOutputDims[o] = op->getOutput(o)->dims();
}
genericOp->setForwardDims([opInputDims, opOutputDims](const std::vector<std::vector<std::size_t>>& inputsDims) {
// Check input dims
for (size_t i = 0; i < opInputDims.size(); ++i) {
if (inputsDims[i] != opInputDims[i]) {
// No matching => unable to compute output dims!
return std::vector<std::vector<std::size_t>>();
}
}
return opOutputDims;
});
}
return std::make_shared<Node>(genericOp, name);
}
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <memory>
#include "aidge/graph/Node.hpp"
#include "aidge/graph/GraphView.hpp"
#include "aidge/operator/GenericOperator.hpp"
#include "aidge/recipes/Recipes.hpp"
void Aidge::toGenericOp(std::shared_ptr<Node> node) {
auto newGenOp = {GenericOperator(node->type(), std::dynamic_pointer_cast<Aidge::OperatorTensor>(node->getOperator()), node->name())};
auto OldOp = {node};
GraphView::replace(OldOp, newGenOp);
}
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <memory>
#include <set>
#include "aidge/graph/GraphView.hpp"
#include "aidge/graph/OpArgs.hpp"
#include "aidge/operator/Conv.hpp"
#include "aidge/operator/FC.hpp"
#include "aidge/operator/ReLU.hpp"
#include "aidge/operator/GenericOperator.hpp"
#include "aidge/recipes/Recipes.hpp"
namespace Aidge {
TEST_CASE("[graph/convert] toGenericOp", "[toGenericOp][recipies]") {
// Create a convolution operator
std::shared_ptr<GraphView> g =
Sequential({
Conv(1, 3, {3, 3}, "conv1"),
ReLU(),
Conv(3, 4, {1, 1}, "conv2"),
ReLU(),
Conv(4, 3, {1, 1}, "conv3"),
ReLU(),
FC(2028, 256, false, "fc1"),
ReLU(),
FC(256, 10, false, "fc2")});
// NCHW - MNIST DATA like
g->forwardDims({{5, 1, 28, 28}});
SECTION("Test Operator to Generic Operator") {
auto convOp = g->getNode("conv2");
// Convert to GenericOperator
toGenericOp(convOp);
auto newGenOp = g->getNode("conv2");
// Ensure the conversion
REQUIRE(newGenOp->type() == "Conv2D");
const auto convOpAttr = convOp->getOperator()->attributes()->getAttrs();
const auto newGenOpAttr = (newGenOp->getOperator()->attributes()->getAttrs());
REQUIRE((!(newGenOpAttr < convOpAttr) && !(convOpAttr < newGenOpAttr)));
}
SECTION("Test MetaOperator to Generic Operator") {
const auto nbFused = fuseToMetaOps(g, "Conv2D->ReLU->FC", "ConvReLUFC");
REQUIRE(nbFused == 1);
std::shared_ptr<Node> metaOpNode;
const auto nodes = g->getNodes(); // g nodes gets modified in the loop!
for (const auto& nodePtr : nodes)
{
if (nodePtr->type() == "ConvReLUFC")
{
nodePtr->setName("ConvReLUFC_0");
metaOpNode = nodePtr;
// Convert to GenericOperator
toGenericOp(nodePtr);
}
}
REQUIRE(metaOpNode);
REQUIRE(!metaOpNode->getOperator()->isAtomic());
auto newGenOp = g->getNode("ConvReLUFC_0");
// Ensure the conversion
REQUIRE(newGenOp->type() == "ConvReLUFC");
REQUIRE(std::dynamic_pointer_cast<GenericOperator_Op>(newGenOp->getOperator()));
const auto metaOpAttr = *std::static_pointer_cast<DynamicAttributes>(metaOpNode->getOperator()->attributes());
const auto newGenOpAttr = *std::static_pointer_cast<DynamicAttributes>(newGenOp->getOperator()->attributes());
REQUIRE((!(newGenOpAttr < metaOpAttr) && !(metaOpAttr < newGenOpAttr)));
}
}
} // namespace Aidge
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment