Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • eclipse/aidge/aidge_core
  • hrouis/aidge_core
  • mszczep/aidge_core
  • oantoni/aidge_core
  • cguillon/aidge_core
  • jeromeh/aidge_core
  • axelfarr/aidge_core
  • cmoineau/aidge_core
  • noamzerah/aidge_core
  • lrakotoarivony/aidge_core
  • silvanosky/aidge_core
  • maab05/aidge_core
  • mick94/aidge_core
  • lucaslopez/aidge_core_ll
  • wboussella/aidge_core
  • farnez/aidge_core
  • mnewson/aidge_core
17 results
Show changes
Commits on Source (35)
Showing
with 757 additions and 113 deletions
# Version 0.4.0 (February 2025)
# Version 0.5.1 (February 13, 2025)
# Version 0.5.0 (January 31, 2025)
# Version 0.4.0 (December 2024)
......
@PACKAGE_INIT@
include(CMakeFindDependencyMacro)
find_dependency(fmt @FMT_VERSION@)
find_dependency(fmt @FMT_MIN_VERSION@)
find_dependency(Threads)
set(AIDGE_REQUIRES_PYTHON @AIDGE_REQUIRES_PYTHON@)
set(AIDGE_PYTHON_HAS_EMBED @AIDGE_PYTHON_HAS_EMBED@)
......
......@@ -136,7 +136,8 @@ class StaticAnalysisExt(aidge_core.StaticAnalysis):
bot += serie
else:
plt.bar(names_only, values)
ax.yaxis.minorticks_on()
if callable(getattr(ax.yaxis, 'minorticks_on', None)):
ax.yaxis.minorticks_on() # introduced in matplotlib 3.9.x
plt.grid(axis='y', which='major', linestyle='--', color='gray')
plt.grid(axis='y', which='minor', linestyle=':', color='lightgray')
formatter0 = matplotlib.ticker.EngFormatter(unit='')
......@@ -171,7 +172,8 @@ class StaticAnalysisExt(aidge_core.StaticAnalysis):
left += serie
else:
plt.barh(names_only, values)
ax.xaxis.minorticks_on()
if callable(getattr(ax.xaxis, 'minorticks_on', None)):
ax.xaxis.minorticks_on() # introduced in matplotlib 3.9.x
plt.grid(axis='x', which='major', linestyle='--', color='gray')
plt.grid(axis='x', which='minor', linestyle=':', color='lightgray')
formatter0 = matplotlib.ticker.EngFormatter(unit='')
......
......@@ -13,6 +13,9 @@ import threading
import subprocess
import pathlib
from typing import List
from inspect import signature
from functools import wraps
from typing import Union, _SpecialForm, List, Mapping, Dict, Tuple, get_origin, get_args
def template_docstring(template_keyword, text_to_replace):
......@@ -37,6 +40,108 @@ def template_docstring(template_keyword, text_to_replace):
return dec
def is_instance_of(obj, typ) -> bool:
"""Check if an object is an instance of a type.
With a special handling for subscripted types.
"""
origin = get_origin(typ)
args = get_args(typ)
# If it's not a generic type, fallback to normal isinstance check
if origin is None:
return isinstance(obj, typ)
# Check if the object is of the expected container type
if not isinstance(obj, origin):
return False
# Handle specific cases for List, Dict, Tuple
if origin in (list, set):
return all(is_instance_of(item, args[0]) for item in obj)
if origin is dict:
return all(is_instance_of(k, args[0]) and is_instance_of(v, args[1]) for k, v in obj.items())
if origin is tuple:
if len(args) == 2 and args[1] is ...: # Handles Tuple[X, ...]
return all(is_instance_of(item, args[0]) for item in obj)
return len(obj) == len(args) and all(is_instance_of(item, t) for item, t in zip(obj, args))
raise NotImplementedError(f"Type {origin} is not supported")
def type_to_str(typ) -> str:
"""Return a string describing the type given as an argument.
With a special handling for subscripted types.
This gives a more detail than the __name__ attribute of the type.
Example: dict[str, list[list[int]]] instead of dict.
"""
origin = get_origin(typ)
args = get_args(typ)
if origin is None:
return typ.__name__
if origin in (list, set):
return f"{origin.__name__}[{type_to_str(args[0])}]"
if origin is dict:
return f"{origin.__name__}[{type_to_str(args[0])}, {type_to_str(args[1])}]"
if origin is tuple:
if len(args) == 2 and args[1] is ...:
return f"{origin.__name__}[{type_to_str(args[0])}, ...]"
return f"{origin.__name__}[{', '.join(type_to_str(t) for t in args)}]"
raise NotImplementedError(f"Type {origin} is not supported")
def var_to_type_str(var) -> str:
"""Return a string describing the type of a variable.
With a special handling for subscripted types.
"""
typ = type(var)
if typ is list and var:
return f"list[{var_to_type_str(var[0])}]"
if typ is set and var:
return f"set[{var_to_type_str(next(iter(var)))}]"
if typ is dict and var:
key_type = var_to_type_str(next(iter(var.keys())))
value_type = var_to_type_str(next(iter(var.values())))
return f"dict[{key_type}, {value_type}]"
if typ is tuple and var:
return f"tuple[{', '.join(var_to_type_str(v) for v in var)}]"
return typ.__name__
def check_types(f):
"""Decorator used to automatically check type of functions/methods.
To do so, we use type annotation available since Python 3.5 https://docs.python.org/3/library/typing.html.
Typing check is done with an handling of subscripted types (List, Dict, Tuple).
"""
sig = signature(f)
# Dictionary key : param name, value : annotation
args_types = {p.name: p.annotation \
for p in sig.parameters.values()}
@wraps(f)
def decorated(*args, **kwargs):
bind = sig.bind(*args, **kwargs)
obj_name = ""
# Check if we are in a method !
if "self" in sig.parameters:
obj_name = f"{bind.args[0].__class__.__name__}."
for value, typ in zip(bind.args, args_types.items()):
annotation_type = typ[1]
if annotation_type == sig.empty:
pass
if type(annotation_type) is _SpecialForm and annotation_type._name == "Any": # check if Any
continue
if value is None: # None value is always accepted
continue
if hasattr(annotation_type, "__origin__") and annotation_type.__origin__ is Union: # check if Union
# Types are contained in the __args__ attribute which is a list
# isinstance only support type or tuple, so we convert to tuple
annotation_type = tuple(annotation_type.__args__)
if annotation_type != sig.empty and not is_instance_of(value, annotation_type):
raise TypeError(f'In {obj_name}{f.__name__}: \"{typ[0]}\" parameter must be of type {type_to_str(annotation_type)} but is of type {var_to_type_str(value)} instead.')
return f(*args, **kwargs)
return decorated
def run_command(command: List[str], cwd: pathlib.Path = None):
......
......@@ -47,6 +47,7 @@
#include "aidge/operator/Conv.hpp"
#include "aidge/operator/ConvDepthWise.hpp"
#include "aidge/operator/Div.hpp"
#include "aidge/operator/Equal.hpp"
#include "aidge/operator/Erf.hpp"
#include "aidge/operator/FC.hpp"
#include "aidge/operator/Gather.hpp"
......
......@@ -459,17 +459,6 @@ public:
return node->clone();
}
/**
* @brief Get the set of pointers to connected node at a distance of a delta.
* @details the recution are cut
* Return a nullptr is nofing found.
* @param delta Input delta.
* @return std::shared_ptr<Node>
*/
std::set<NodePtr> getNodeDelta(int delta,std::set<Aidge::NodePtr> nodeSee);
#ifdef PYBIND
std::string repr() const {
std::string nodeString{fmt::format("Node(name='{}', optype='{}'", name(), type())};
......
......@@ -33,12 +33,21 @@ enum class AvgPoolingAttr {
* Specifies the step size of the sliding window along each spatial dimension.
*/
StrideDims,
/**
* @brief Dilation along each spatial axis. Default value is 1.
*/
Dilations,
/**
* @brief Kernel dimensions for the pooling operation.
* Specifies the size of the pooling window along each spatial dimension.
*/
KernelDims
KernelDims,
/**
* @brief Flag indicating whether to use ceil or floor when calculating output size.
* - `true`: Use `ceil` for output size calculation.
* - `false`: Use `floor` for output size calculation.
*/
CeilMode
};
/**
......@@ -46,11 +55,30 @@ enum class AvgPoolingAttr {
*
* The AvgPooling operation computes the average value within sliding windows of specified size
* (kernel dimensions) over the input tensor. The stride dimensions determine how the window
* moves across the input. This operation is commonly used in neural networks to reduce the spatial
* dimensions while preserving features.
* moves across the input. The dilation parameter allows spacing between kernel elements, and
* `ceil_mode` determines whether to use ceiling instead of floor when computing the output shape.
* This operation is commonly used in neural networks to reduce spatial dimensions while preserving features.
*
* @tparam DIM Number of dimensions for the pooling operation.
*
* ### Output Shape Calculation
* - If `ceil_mode` is false:
* `output_size = floor((input_size - dilation * (kernel_size - 1) - 1) / stride + 1)`
* - If `ceil_mode` is true:
* `output_size = ceil((input_size - dilation * (kernel_size - 1) - 1) / stride + 1)`
*
* @example Example usage:
* - Input shape: (1, 3, 32, 32) // Batch size 1, 3 channels, 32x32 spatial dimensions
* - KernelDims: (2, 2)
* - StrideDims: (2, 2)
* - Dilation: (1, 1)
* - CeilMode: false
* - Output shape: (1, 3, 16, 16)
*
* @see OperatorTensor
* @see Registrable
*/
template <DimIdx_t DIM>
class AvgPooling_Op : public OperatorTensor,
public Registrable<AvgPooling_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>> {
......@@ -67,7 +95,9 @@ private:
*/
using Attributes_ = StaticAttributes<AvgPoolingAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>>;
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
bool>;
template <AvgPoolingAttr e>
using attr = typename Attributes_::template attr<e>;
......@@ -84,21 +114,27 @@ public:
/**
* @brief Constructs an AvgPooling operation with specified kernel and stride dimensions.
* @param kernel_dims Size of the pooling window for each spatial dimension.
* @param stride_dims Step size (stride) for sliding the pooling window across the input dimensions.
* @param[in] kernel_dims Size of the pooling window for each spatial dimension.
* @param[in] stride_dims Step size (stride) for sliding the pooling window across the input dimensions.
* Defaults to 1 for each dimension.
* @param[in] dilations Spatial dilations for the pooling operation.
* @param[in] ceil_mode Indicates whether to use ceil mode for output size calculation.
*/
constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1))
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1),
const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t, DIM>(1),
bool ceil_mode = false)
: OperatorTensor(Type, {InputCategory::Data}, 1),
mAttributes(std::make_shared<Attributes_>(
attr<AvgPoolingAttr::StrideDims>(stride_dims),
attr<AvgPoolingAttr::KernelDims>(kernel_dims)))
attr<AvgPoolingAttr::KernelDims>(kernel_dims),
attr<AvgPoolingAttr::Dilations>(dilations),
attr<AvgPoolingAttr::CeilMode>(ceil_mode)))
{}
/**
* @brief Copy-constructor.
* @param op AvgPooling_Op to copy.
* @param[in] op AvgPooling_Op to copy.
* @details Copies the operator attributes and its output tensor(s), but not
* its input tensors. The new operator has no associated input.
*/
......@@ -112,16 +148,16 @@ public:
/**
* @brief Calculates the output dimensions based on the input dimensions and operator attributes.
* @param allowDataDependency If true, considers data-dependent operations. Defaults to false.
* @param[in] allowDataDependency If true, considers data-dependent operations. Defaults to false.
* @return True if the dimensions are successfully calculated.
*/
bool forwardDims(bool /*allowDataDependency*/ = false) override final;
/**
* @brief Computes the receptive field of the operator.
* @param firstEltDims Dimensions of the first element.
* @param outputDims Dimensions of the output tensor.
* @param outputIdx Index of the output tensor. Defaults to 0.
* @param[in] firstEltDims Dimensions of the first element.
* @param[in] outputDims Dimensions of the output tensor.
* @param[in] outputIdx Index of the output tensor. Defaults to 0.
* @return A vector of pairs representing the receptive fields.
*/
std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
......@@ -131,8 +167,8 @@ public:
/**
* @brief Sets the backend for the operation.
* @param name Name of the backend.
* @param device Device index. Defaults to 0.
* @param[in] name Name of the backend.
* @param[in] device Device index. Defaults to 0.
*/
void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
......@@ -155,11 +191,23 @@ public:
inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<AvgPoolingAttr::StrideDims>(); }
/**
* @brief Accessor for the kernel dimensions.
* @return An array representing the kernel dimensions.
* @brief Accessor for dilations.
* @return An array representing spatial dilations.
*/
inline std::array<DimSize_t, DIM>& dilations() const { return mAttributes->template getAttr<AvgPoolingAttr::Dilations>(); }
/**
* @brief Accessor for kernel dimensions.
* @return An array representing kernel dimensions.
*/
inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<AvgPoolingAttr::KernelDims>(); }
/**
* @brief Accessor for ceil mode flag.
* @return Boolean value indicating whether ceil mode is enabled.
*/
inline bool& ceilMode() const { return mAttributes->template getAttr<AvgPoolingAttr::CeilMode>(); }
/**
* @brief Retrieves the names of the input tensors.
* @return A vector of strings representing the input tensors names.
......@@ -180,31 +228,39 @@ public:
/**
* @brief Creates an AvgPooling operator node.
* @tparam DIM Number of dimensions for the pooling operation.
* @param kernel_dims Size of the pooling window for each spatial dimension.
* @param name Name of the operator node. Defaults to an empty string.
* @param stride_dims Step size (stride) for sliding the pooling window across the input dimensions. Defaults to 1 for each dimension.
* @param[in] kernel_dims Size of the pooling window for each spatial dimension.
* @param[in] name Name of the operator node. Defaults to an empty string.
* @param[in] stride_dims Step size (stride) for sliding the pooling window across the input dimensions. Defaults to 1 for each dimension.
* @param[in] dilations Spatial dilations for the pooling operation.
* @param[in] ceil_mode Indicates whether to use ceil mode for output size calculation.
* @return A shared pointer to the created operator node.
*/
template <std::array<DimSize_t, 1>::size_type DIM>
std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1));
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t,DIM>(1),
bool ceil_mode=false);
/**
* @brief Overload of AvgPooling for C-style arrays.
* @tparam DIM Number of dimensions for the pooling operation.
* @param kernel_dims C-style array specifying the kernel dimensions.
* @param name Name of the operator node. Defaults to an empty string.
* @param stride_dims Step size (stride) for sliding the pooling window across the input dimensions. Defaults to 1 for each dimension.
* @param[in] kernel_dims C-style array specifying the kernel dimensions.
* @param[in] name Name of the operator node. Defaults to an empty string.
* @param[in] stride_dims Step size (stride) for sliding the pooling window across the input dimensions. Defaults to 1 for each dimension.
* @param[in] dilations Spatial dilations for the pooling operation.
* @param[in] ceil_mode Indicates whether to use ceil mode for output size calculation.
* @return A shared pointer to the created operator node.
*/
template <DimSize_t DIM>
inline std::shared_ptr<Node> AvgPooling(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t,DIM>(1),
bool ceil_mode=false) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
return AvgPooling(to_array(kernel_dims), name, stride_dims);
return AvgPooling(to_array(kernel_dims), name, stride_dims, dilations, ceil_mode);
}
} // namespace Aidge
......@@ -221,10 +277,7 @@ namespace {
* @brief String representation of the AvgPooling attributes.
*/
template <>
const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {
"stride_dims",
"kernel_dims"
};
const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = { "stride_dims", "kernel_dims", "dilations", "ceil_mode" };
}
#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
/********************************************************************************
* Copyright (c) 2024 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_EQUAL_H_
#define AIDGE_CORE_OPERATOR_EQUAL_H_
#include <memory>
#include <string>
#include <vector>
#include "aidge/utils/Registrar.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
/**
* @brief Tensor element-wise logical equal operation.
*/
class Equal_Op : public OperatorTensor,
public Registrable<Equal_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Equal_Op&)>> {
public:
static const std::string Type;
/**
* @brief Compute element-wise Equal operation on two given inputs.
* @details supports broadcasting of both operands.
*/
Equal_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
* but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Equal_Op(const Equal_Op& op)
: OperatorTensor(op)
{
if (op.mImpl) {
SET_IMPL_MACRO(Equal_Op, *this, op.backend());
} else {
mImpl = nullptr;
}
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Equal_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Equal_Op>(*this);
}
bool forwardDims(bool allowDataDependency = false) override final;
void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
std::set<std::string> getAvailableBackends() const override;
static const std::vector<std::string> getInputsName(){
return {"data_input_1", "data_input_2"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
inline std::shared_ptr<Node> Equal(const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Equal_Op>(), name);
}
} // namespace Aidge
#endif /* AIDGE_CORE_OPERATOR_EQUAL_H_ */
......@@ -41,7 +41,10 @@ enum class MaxPoolingAttr {
* Must be positive integers.
*/
StrideDims,
/**
* @brief Dilation along each spatial axis. Default value is 1.
*/
Dilations,
/**
* @brief Kernel dimensions specifying the size of the pooling window for each spatial dimension.
* For example, common kernel dimensions include 2x2 or 3x3.
......@@ -63,24 +66,28 @@ enum class MaxPoolingAttr {
* @brief Implements the MaxPooling operation over a specified input tensor.
*
* MaxPooling reduces spatial dimensions by applying a max filter over a sliding window.
* The resulting output tensor contains the maximum value within each window.
* The stride dimensions determine how the window moves across the input. The dilation
* parameter allows spacing between kernel elements, and `ceil_mode` determines whether
* to use ceiling instead of floor when computing the output shape.
*
* ### Output Shape Calculation
* - If `CeilMode` is false:
* `output_size = floor((input_size - kernel_size) / stride + 1)`
* - If `CeilMode` is true:
* `output_size = ceil((input_size - kernel_size) / stride + 1)`
* - If `ceil_mode` is false:
* `output_size = floor((input_size - dilation * (kernel_size - 1) - 1) / stride + 1)`
* - If `ceil_mode` is true:
* `output_size = ceil((input_size - dilation * (kernel_size - 1) - 1) / stride + 1)`
*
* @example Example usage:
* - Input shape: (1, 3, 32, 32) // Batch size 1, 3 channels, 32x32 spatial dimensions
* - KernelDims: (2, 2)
* - StrideDims: (2, 2)
* - Dilation: (1, 1)
* - CeilMode: false
* - Output shape: (1, 3, 16, 16)
*
* @see OperatorTensor
* @see Registrable
*/
template <DimIdx_t DIM>
class MaxPooling_Op : public OperatorTensor,
public Registrable<MaxPooling_Op<DIM>,
......@@ -91,6 +98,7 @@ public:
static const std::string Type; ///< Static identifier for this operator type.
using Attributes_ = StaticAttributes<MaxPoolingAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
bool>;
......@@ -107,15 +115,17 @@ public:
* @brief Constructor.
* @param[in] kernel_dims Size of the pooling window for each spatial dimension.
* @param[in] stride_dims Step size (stride) for sliding the pooling window across input dimensions.
* @param[in] dilations Spatial dilations for the pooling operation.
* @param[in] ceil_mode Indicates whether to use ceil or floor for output size calculation.
*/
MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t, DIM>(1),
bool ceil_mode = false);
/**
* @brief Copy-constructor.
* @param op MaxPooling_Op to copy.
* @param[in] op MaxPooling_Op to copy.
* @details Copies the operator attributes and its output tensor(s), but not
* its input tensors. The new operator has no associated input.
*/
......@@ -159,6 +169,12 @@ public:
*/
inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<MaxPoolingAttr::StrideDims>(); }
/**
* @brief Accessor for dilations.
* @return An array representing spatial dilations.
*/
inline std::array<DimSize_t, DIM>& dilations() const { return mAttributes->template getAttr<MaxPoolingAttr::Dilations>(); }
/**
* @brief Accessor for kernel dimensions.
* @return An array representing kernel dimensions.
......@@ -197,6 +213,7 @@ extern template class Aidge::MaxPooling_Op<3>;
* @param[in] kernel_dims Kernel dimensions specifying the size of the pooling window.
* @param[in] name Optional name for the operation.
* @param[in] stride_dims Stride dimensions specifying the step size for the pooling window.
* @param[in] dilations Spatial dilations for the pooling operation.
* @param[in] ceil_mode Indicates whether to use ceil mode for output size calculation.
* @return A shared pointer to a Node representing the MaxPooling operation.
*/
......@@ -204,6 +221,7 @@ template <std::array<DimSize_t, 1>::size_type DIM>
std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t,DIM>(1),
bool ceil_mode=false);
/**
......@@ -212,6 +230,7 @@ std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
* @param[in] kernel_dims C-style array of kernel dimensions.
* @param[in] name Optional name for the operation.
* @param[in] stride_dims Stride dimensions specifying the step size for the pooling window.
* @param[in] dilations Spatial dilations for the pooling operation.
* @param[in] ceil_mode Indicates whether to use ceil mode for output size calculation.
* @return A shared pointer to a Node representing the MaxPooling operation.
*/
......@@ -220,9 +239,10 @@ inline std::shared_ptr<Node> MaxPooling(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t,DIM>(1),
bool ceil_mode = false) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
return MaxPooling(to_array(kernel_dims), name, stride_dims, ceil_mode);
return MaxPooling(to_array(kernel_dims), name, stride_dims, dilations, ceil_mode);
}
} // namespace Aidge
......@@ -232,7 +252,7 @@ namespace {
* @brief String representations of MaxPooling attributes for debugging and logging.
*/
template <>
const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"stride_dims", "kernel_dims", "ceil_mode"};
const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"stride_dims", "kernel_dims", "dilations", "ceil_mode"};
}
#endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
......@@ -69,10 +69,7 @@ public:
*
* @param op The operator to copy.
*/
MetaOperator_Op(const MetaOperator_Op& op)
: OperatorTensor(op),
mGraph(op.mGraph->clone()) // Clone the micro-graph for isolation
{}
MetaOperator_Op(const MetaOperator_Op& op);
/**
* @brief Set the node for scheduling.
......@@ -206,13 +203,8 @@ public:
/**
* @brief Perform the backward pass for the operator.
*
* @note Currently not implemented.
*/
void backward() override {
AIDGE_THROW_OR_ABORT(std::runtime_error, "backward() not implemented yet for a MetaOperator");
}
void backward() override;
/**
* @brief Check if the operator is atomic.
*
......
......@@ -164,14 +164,18 @@ PaddedConvDepthWise(const DimSize_t nb_channels,
* @param[in] kernel_dims The dimensions of the pooling window.
* @param[in] name Optional name for the operation.
* @param[in] stride_dims The stride dimensions for pooling (default is 1).
* @param[in] dilations The spatial dilations for pooling (default is 1).
* @param[in] padding_dims Padding dimensions before pooling (default is 0).
* @param[in] ceil_mode Whether to use ceiling mode for pooling (default is false).
* @return A shared pointer to the Node representing the padded average pooling operation.
*/
template <std::array<DimSize_t, 1>::size_type DIM>
extern std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0));
const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
bool ceil_mode = false);
/**
* @brief Creates a padded average pooling operation as a MetaOperator.
......@@ -180,13 +184,17 @@ extern std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &
*
* @param[in] kernel_dims The dimensions of the pooling window.
* @param[in] stride_dims The stride dimensions for pooling (default is 1).
* @param[in] dilations The spatial dilations for pooling (default is 1).
* @param[in] padding_dims Padding dimensions before pooling (default is 0).
* @param[in] ceil_mode Whether to use ceiling mode for pooling (default is false).
* @return A shared pointer to the MetaOperator_Op representing the padded average pooling operation.
*/
template <std::array<DimSize_t, 1>::size_type DIM>
extern std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0));
const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
bool ceil_mode = false);
// Helper function for average pooling with C-style array for kernel_dims, enabling automatic DIM deduction.
template <DimSize_t DIM>
......@@ -195,8 +203,11 @@ PaddedAvgPooling(DimSize_t const (&kernel_dims)[DIM],
const std::string &name = "",
const std::array<DimSize_t, DIM> &stride_dims =
create_array<DimSize_t, DIM>(1),
const std::array<DimSize_t, DIM> &dilations =
create_array<DimSize_t, DIM>(1),
const std::array<DimSize_t, 2 * DIM> &padding_dims =
create_array<DimSize_t, 2 * DIM>(0));
create_array<DimSize_t, 2 * DIM>(0),
bool ceil_mode = false);
////////////////////////////////////////////////////////////////////////////////
......@@ -208,6 +219,7 @@ PaddedAvgPooling(DimSize_t const (&kernel_dims)[DIM],
* @param[in] kernel_dims The dimensions of the pooling window.
* @param[in] name Optional name for the operation.
* @param[in] stride_dims The stride dimensions for pooling (default is 1).
* @param[in] dilations The spatial dilations for pooling (default is 1).
* @param[in] padding_dims Padding dimensions before pooling (default is 0).
* @param[in] ceil_mode Whether to use ceiling mode for pooling (default is false).
* @return A shared pointer to the Node representing the padded max pooling operation.
......@@ -216,11 +228,12 @@ template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
bool ceil_mode = false) {
auto graph = Sequential({
Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
MaxPooling(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims, ceil_mode)
MaxPooling(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims, dilations, ceil_mode)
});
return MetaOperator(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), graph, {}, name);
......@@ -233,6 +246,7 @@ inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &
*
* @param[in] kernel_dims The dimensions of the pooling window.
* @param[in] stride_dims The stride dimensions for pooling (default is 1).
* @param[in] dilations The spatial dilations for pooling (default is 1).
* @param[in] padding_dims Padding dimensions before pooling (default is 0).
* @param[in] ceil_mode Whether to use ceiling mode for pooling (default is false).
* @return A shared pointer to the MetaOperator_Op representing the padded max pooling operation.
......@@ -240,11 +254,12 @@ inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<MetaOperator_Op> PaddedMaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
bool ceil_mode = false) {
auto graph = Sequential({
Pad<DIM>(padding_dims, ""),
MaxPooling(kernel_dims, "", stride_dims, ceil_mode)
MaxPooling(kernel_dims, "", stride_dims, dilations, ceil_mode)
});
return std::make_shared<MetaOperator_Op>(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), graph);
}
......@@ -255,11 +270,23 @@ inline std::shared_ptr<Node> PaddedMaxPooling(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
bool ceil_mode= false) {
return PaddedMaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims, ceil_mode);
return PaddedMaxPooling(to_array(kernel_dims), name, stride_dims, dilations, padding_dims, ceil_mode);
}
/**
* @brief Creates an LSTM (Long Short-Term Memory) operation as a MetaOperator.
*
* This function creates an LSTM operation as a MetaOperator for use in graph-based computation.
*
* @param[in] seq_length The length of the input sequence.
* @return A shared pointer to the MetaOperator_Op representing the LSTM operation.
*/
std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length,
const std::string &name = "");
/**
* @brief Creates an LSTM (Long Short-Term Memory) operator.
*
......@@ -278,16 +305,6 @@ std::shared_ptr<Node> LSTM(DimSize_t in_channels,
bool noBias = false,
const std::string &name = "");
/**
* @brief Creates an LSTM (Long Short-Term Memory) operation as a MetaOperator.
*
* This function creates an LSTM operation as a MetaOperator for use in graph-based computation.
*
* @param[in] seq_length The length of the input sequence.
* @return A shared pointer to the MetaOperator_Op representing the LSTM operation.
*/
std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length);
std::shared_ptr<MetaOperator_Op> LeakyOp();
std::shared_ptr<Node> Leaky(const int nbTimeSteps,
const float beta,
......
......@@ -118,12 +118,12 @@ public:
*/
Operator(const Operator& op):
std::enable_shared_from_this<Operator>(),
mType(op.mType),
mOperatorType(op.mOperatorType),
mInputsCategory(op.mInputsCategory),
mNbOut(op.mNbOut),
mBackEdges(op.mBackEdges)
{
mType = op.mType;
mImpl = nullptr;
// Implementation is never cloned. It is up to the non-abstract Operator copy-constructor to create a new implementation matching the copied Operator implementation.
// See https://gitlab.eclipse.org/eclipse/aidge/aidge_core/-/merge_requests/8#note_1214050 for the discussion.
......
......@@ -47,11 +47,19 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
:param stride_dims: The stride of the pooling operation. Specifies how much the kernel moves in each step.
By default, the stride is set to 1 for all dimensions.
:type stride_dims: List[int], optional
:param dilations: The dilation value along each spatial axis of filter.
:type dilations: List[int], optional
:param ceil_mode: Whether to use ceil or floor when calculating the output dimensions.
:type ceil_mode: bool, optional
)mydelimiter")
.def(py::init<const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &>(),
const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &,
bool>(),
py::arg("kernel_dims"),
py::arg("stride_dims") = create_array<DimSize_t, DIM>(1))
py::arg("stride_dims") = create_array<DimSize_t, DIM>(1),
py::arg("dilations") = create_array<DimSize_t, DIM>(1),
py::arg("ceil_mode") = false)
.def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
.def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
.def_readonly_static("Type", &AvgPooling_Op<DIM>::Type);
......@@ -60,14 +68,19 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::vector<DimSize_t>& stride_dims) {
const std::vector<DimSize_t>& stride_dims,
const std::vector<DimSize_t>& dilations,
bool ceil_mode) {
AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
AIDGE_ASSERT(dilations.size() == DIM, "dilations size [{}] does not match DIM [{}]", dilations.size(), DIM);
return AvgPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()));
return AvgPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilations.begin()), ceil_mode);
}, py::arg("kernel_dims"),
py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM, 1),
py::arg("dilations") = std::vector<DimSize_t>(DIM, 1),
py::arg("ceil_mode") = false,
R"mydelimiter(
Initialize a node containing an AvgPooling operator.
......@@ -75,6 +88,10 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
:param kernel_dims: Size of the kernel applied during pooling.
:type kernel_dims: List[int]
:param dilations: The dilation value along each spatial axis of filter.
:type dilations: List[int]
:param ceil_mode: Whether to use ceil or floor when calculating the output dimensions.
:type ceil_mode: bool
:param name: Name of the operator node (optional).
:type name: str
:param stride_dims: Stride dimensions for the pooling operation.
......
/********************************************************************************
* Copyright (c) 2024 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <pybind11/pybind11.h>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Equal.hpp"
#include "aidge/operator/OperatorTensor.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Equal(py::module& m) {
py::class_<Equal_Op, std::shared_ptr<Equal_Op>, OperatorTensor>(m, "Equal_Op", py::multiple_inheritance(),
R"mydelimiter( Initialize an Equal operator.)mydelimiter")
.def(py::init<>())
.def_static("get_inputs_name", &Equal_Op::getInputsName)
.def_static("get_outputs_name", &Equal_Op::getOutputsName);
declare_registrable<Equal_Op>(m, "EqualOp");
m.def("Equal", &Equal, py::arg("name") = "",
R"mydelimiter(
Initialize a node containing an Equal operator.
:param name : name of the node.
)mydelimiter");
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <memory>
#include <pybind11/pybind11.h>
#include <string>
#include <vector>
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Flatten.hpp"
#include "aidge/utils/Attributes.hpp"
#include "aidge/utils/Types.h"
namespace py = pybind11;
namespace Aidge {
void init_Flatten(py::module &m) {
py::class_<Flatten_Op, std::shared_ptr<Flatten_Op>, OperatorTensor>(
m, "FlattenOp", py::multiple_inheritance(),
R"mydelimiter(
Initialize flatten operator
:param axis : up to which input dimensions (exclusive) should be flattened to the outer dimension of the output
between [-r;r-1] with r = input_tensor.nbDims()
:type axes : :py:class: List[Int]
)mydelimiter")
.def("get_inputs_name", &Flatten_Op::getInputsName)
.def("get_outputs_name", &Flatten_Op::getOutputsName)
.def("axis", &Flatten_Op::axis);
// Here we bind the constructor of the Flatten Node. We add an argument
// for each attribute of the operator (in here we only have 'axis') and
// the last argument is the node's name.
m.def("Flatten", &Flatten, py::arg("axis") = 1,
py::arg("name") = "",
R"mydelimiter(
Initialize a node containing a flatten operator.
:param axis : up to which input dimensions (exclusive) should be flattened to the outer dimension of the output
between [-r;r-1] with r = input_tensor.nbDims()
:type axes : :py:class: List[Int]
:param name : name of the node.
)mydelimiter");
}
} // namespace Aidge
......@@ -37,14 +37,18 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
:type kernel_dims: List[int]
:param stride_dims: The stride (step size) to move the kernel over the input.
:type stride_dims: List[int]
:param dilations: The dilation value along each spatial axis of filter.
:type dilations: List[int]
:param ceil_mode: Whether to use ceil or floor when calculating the output dimensions.
:type ceil_mode: bool
)mydelimiter")
.def(py::init<const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &,
bool>(),
py::arg("kernel_dims"),
py::arg("stride_dims"),
py::arg("dilations"),
py::arg("ceil_mode"))
.def_static("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
.def_static("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
......@@ -55,14 +59,17 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &dilations,
bool ceil_mode) {
AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
AIDGE_ASSERT(dilations.size() == DIM, "dilations size [{}] does not match DIM [{}]", dilations.size(), DIM);
return MaxPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), ceil_mode);
return MaxPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilations.begin()), ceil_mode);
}, py::arg("kernel_dims"),
py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM, 1),
py::arg("dilations") = std::vector<DimSize_t>(DIM, 1),
py::arg("ceil_mode") = false,
R"mydelimiter(
Initialize a node containing a MaxPooling operator.
......@@ -75,6 +82,8 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
:type kernel_dims: List[int]
:param stride_dims: The stride (step size) to move the kernel over the input.
:type stride_dims: List[int]
:param dilations: The dilation value along each spatial axis of filter.
:type dilations: List[int]
:param ceil_mode: Whether to use ceil or floor when calculating the output dimensions.
:type ceil_mode: bool
:param name: Name of the node (optional).
......
......@@ -50,9 +50,11 @@ void init_Conv(py::module&);
void init_ConvDepthWise(py::module&);
void init_DepthToSpace(py::module&);
void init_Div(py::module&);
void init_Equal(py::module&);
void init_Erf(py::module&);
void init_Expand(py::module&);
void init_FC(py::module&);
void init_Flatten(py::module&);
void init_Gather(py::module&);
void init_GenericOperator(py::module&);
void init_GlobalAveragePooling(py::module&);
......@@ -149,9 +151,11 @@ void init_Aidge(py::module& m) {
init_ConstantOfShape(m);
init_DepthToSpace(m);
init_Div(m);
init_Equal(m);
init_Erf(m);
init_Expand(m);
init_FC(m);
init_Flatten(m);
init_Gather(m);
init_GenericOperator(m);
init_GlobalAveragePooling(m);
......
......@@ -74,13 +74,6 @@ Aidge::ImplSpec Aidge::OperatorImpl::getRequiredSpec() const {
requiredSpec.outputs.push_back({opTensor.getOutput(i)->dataType(), opTensor.getOutput(i)->dataFormat(), dims});
}
// Attributes
if (!mOp.isAtomic()) {
requiredSpec.attrs.setAttr("type:!", mOp.type()); // :! mandatory qualifier
}
else {
requiredSpec.attrs.setAttr("type", mOp.type());
}
const auto& inhAttrs = mOp.inheritedAttributes();
if (inhAttrs) {
......
......@@ -266,7 +266,12 @@ void Aidge::GraphView::logOutputs(const std::string& dirName) const {
AIDGE_THROW_OR_ABORT(std::runtime_error,
"Could not create graph view log file: {}", inputPath);
}
fmt::print(fp.get(), "{}\n", nodePtr->getOperator()->getRawOutput(outIdx)->toString().c_str());
auto oTensor = std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator())->getOutput(outIdx);
std::shared_ptr<Tensor> fallback;
const Tensor& localTensor = oTensor->refFrom(fallback, "cpu");
fmt::print(fp.get(), "{}\n", localTensor.toString().c_str());
}
}
}
......@@ -1109,6 +1114,28 @@ void Aidge::GraphView::insertParent(NodePtr childNode,
add(newParentNode);
}
/**
* Inputs conditions:
* | old \ new | 1 node, 1 input | >1 node, 1 input | 1 node, >1 inputs | >1 node, >1 inputs |
* | ------------------- | ---------------- | ----------------- | ------------------ | ------------------ |
* | 1 node, 1 input | trivial | trivial | broadcast | broadcast |
* | >1 node, 1 input | trivial | trivial | broadcast | broadcast |
* | 1 node, >1 inputs | (take first) | (take first) | same order | X |
* | >1 node, >1 inputs | X | X | X | X |
*
* Outputs conditions:
* | old \ new | 1 node, 1 output | >1 node, 1 output | 1 node, >1 outputs | >1 node, >1 outputs |
* | ------------------- | ---------------- | ----------------- | ------------------ | ------------------- |
* | 1 node, 1 output | trivial | trivial | take first | X |
* | >1 node, 1 output | trivial | trivial | take first | X |
* | 1 node, >1 outputs | (take first) | (take first) | same order | X |
* | >1 node, >1 outputs | X | X | X | X |
*
* Only the X cases cannot possibly be resolved deterministically with sets of node.
* These cases are therefore forbidden for the set-based `replace()` interface.
* The remaining cases are handled by the GraphView-based `replace()` interface.
* If they are not supported, the function returns false.
*/
bool Aidge::GraphView::replace(const std::set<Aidge::NodePtr>& oldNodes, const std::set<Aidge::NodePtr>& newNodes) {
// (1) create GraphViews from both sets of Nodes
auto oldG = std::make_shared<GraphView>("oldG");
......@@ -1116,6 +1143,14 @@ bool Aidge::GraphView::replace(const std::set<Aidge::NodePtr>& oldNodes, const s
auto newG = std::make_shared<GraphView>("newG");
newG->add(newNodes, false);
AIDGE_ASSERT(!((oldNodes.size() > 1 && oldG->getOrderedInputs().size() > 1) || (newNodes.size() > 1 && newG->getOrderedInputs().size() > 1 && oldG->getOrderedInputs().size() > 1)),
"GraphView::replace(): don't know how to match {} input(s) from {} node(s) (old set) to {} input(s) from {} node(s) (new set). Use GraphView instead of set in this case.",
oldG->getOrderedInputs().size(), oldNodes.size(), newG->getOrderedInputs().size(), newNodes.size());
AIDGE_ASSERT(!((oldNodes.size() > 1 && oldG->getOrderedOutputs().size() > 1) || (newNodes.size() > 1 && newG->getOrderedOutputs().size() > 1)),
"GraphView::replace(): don't know how to match {} output(s) from {} node(s) (old set) to {} output(s) from {} node(s) (new set). Use GraphView instead of set in this case.",
oldG->getOrderedOutputs().size(), oldNodes.size(), newG->getOrderedOutputs().size(), newNodes.size());
return GraphView::replace(oldG, newG);
}
......