Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • eclipse/aidge/aidge_core
  • hrouis/aidge_core
  • mszczep/aidge_core
  • oantoni/aidge_core
  • cguillon/aidge_core
  • jeromeh/aidge_core
  • axelfarr/aidge_core
  • cmoineau/aidge_core
  • noamzerah/aidge_core
  • lrakotoarivony/aidge_core
  • silvanosky/aidge_core
  • maab05/aidge_core
  • mick94/aidge_core
  • lucaslopez/aidge_core_ll
  • wboussella/aidge_core
  • farnez/aidge_core
  • mnewson/aidge_core
17 results
Show changes
Commits on Source (52)
Showing
with 394 additions and 167 deletions
"""
Copyright (c) 2023 CEA-List
This program and the accompanying materials are made available under the
terms of the Eclipse Public License 2.0 which is available at
http://www.eclipse.org/legal/epl-2.0.
SPDX-License-Identifier: EPL-2.0
"""
import unittest
import aidge_core
import inspect
import re
def is_snake_case(s: str) -> bool:
return bool(re.fullmatch(r'^[a-z]+(_[a-z]+)*$', s))
class test_naming(unittest.TestCase):
"""Test tensor binding
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_attributes_name(self):
for obj in inspect.getmembers(aidge_core):
if (inspect.isclass(obj[1]) and issubclass(obj[1], aidge_core.Operator) and obj[1] is not aidge_core.Operator) and hasattr(obj[1], "attributes_name"):
print(obj[0])
print(obj[1].attributes_name())
for attr_name in obj[1].attributes_name():
self.assertTrue(is_snake_case(attr_name), f"Operator {obj[0]} has an attribute {attr_name} that is not in snake_case.")
if __name__ == '__main__':
unittest.main()
......@@ -462,12 +462,33 @@ public:
* data is copy-transposed.
*/
void setDataFormat(const DataFormat df, bool copyTrans = true) {
if (mImpl && copyTrans && (dataFormat() != df) && df != DataFormat::Default && dataFormat() != DataFormat::Default) {
copyTranspose(*this, getDataFormatTranspose(dataFormat(), df));
if (!copyTrans || df == dataFormat()) {
mDataFormat = df;
return;
}
const auto transpose = getDataFormatTranspose(dataFormat(), df);
if (mImpl) {
copyTranspose(*this, transpose);
} else {
std::vector<DimSize_t> newDims;
for (std::size_t i = 0; i < dims().size(); ++i) {
newDims.push_back(dims()[transpose[i]]);
}
std::vector<std::size_t> newStrides(dims().size(), 1);
for (size_t i = 0; i < dims().size(); ++i) {
for (size_t j = i + 1; j < dims().size(); ++j) {
newStrides[i] *= newDims[j];
}
}
mDims = std::move(newDims);
mStrides = std::move(newStrides);
}
mDataFormat = df;
}
/**
* @brief Get the Impl object
* @return constexpr const std::shared_ptr<TensorImpl>&
......
......@@ -41,20 +41,28 @@ enum class ArgMaxAttr {
*/
SelectLastIndex
};
} // namespace Aidge
/**
* @brief Provides string representations for the ArgMaxAttr enumeration.
*/
namespace {
template <>
const char *const EnumStrings<Aidge::ArgMaxAttr>::data[] = {"axis", "keep_dims", "select_last_index"};
}
namespace Aidge {
/**
* @brief Description of the ArgMax operation on a Tensor.
*
* The ArgMax operation identifies the index of the maximum value along a specified axis of a Tensor.
*
* The output of the ArgMax operation can retain the dimensionality of the input Tensor or reduce
* it by removing the specified axis. Additionally, in cases where multiple maximum values exist,
* The output of the ArgMax operation can retain the dimensionality of the input Tensor or reduce
* it by removing the specified axis. Additionally, in cases where multiple maximum values exist,
* the user can specify whether to select the first or the last occurrence of the maximum value.
*
* Attributes:
* - `Axis`: The axis along which the ArgMax operation is performed. For example, if the axis is `0`,
* the operation is applied along rows; if it is `1`, it is applied along columns.
* - `KeepDims`: A boolean indicating whether to retain the reduced axis as a dimension of size `1`
* - `KeepDims`: A boolean indicating whether to retain the reduced axis as a dimension of size `1`
* (`true`) or to completely remove it (`false`).
* - `SelectLastIndex`: A boolean indicating how to handle ties (multiple maximum values along the axis):
* - If `true`, the last index of the maximum value is selected.
......@@ -177,6 +185,14 @@ public:
static const std::vector<std::string> getOutputsName() {
return {"data_output"};
}
/**
* @brief Retrieves the names of the attributes for the operator.
* @return A vector containing the attributes name.
*/
static const char* const* attributesName(){
return EnumStrings<Aidge::ArgMaxAttr>::data;
}
};
/**
......@@ -198,12 +214,6 @@ std::shared_ptr<Node> ArgMax(std::int32_t axis = 0,
} // namespace Aidge
/**
* @brief Provides string representations for the ArgMaxAttr enumeration.
*/
namespace {
template <>
const char *const EnumStrings<Aidge::ArgMaxAttr>::data[] = {"axis", "keep_dims", "select_last_index"};
}
#endif /* AIDGE_CORE_OPERATOR_ARGMAX_H_ */
......@@ -49,13 +49,23 @@ enum class AvgPoolingAttr {
*/
CeilMode
};
} // namespace Aidge
namespace {
/**
* @brief String representation of the AvgPooling attributes.
*/
template <>
const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {
"stride_dims", "kernel_dims", "dilations", "ceil_mode"
};
}
namespace Aidge {
/**
* @brief Class representing an Average Pooling operation.
*
* The AvgPooling operation computes the average value within sliding windows of specified size
* (kernel dimensions) over the input tensor. The stride dimensions determine how the window
* moves across the input. The dilation parameter allows spacing between kernel elements, and
* moves across the input. The dilation parameter allows spacing between kernel elements, and
* `ceil_mode` determines whether to use ceiling instead of floor when computing the output shape.
* This operation is commonly used in neural networks to reduce spatial dimensions while preserving features.
*
......@@ -223,6 +233,14 @@ public:
static const std::vector<std::string> getOutputsName() {
return {"data_output"};
}
/**
* @brief Retrieves the names of the attributes for the operator.
* @return A vector containing the attributes name.
*/
static const char* const* attributesName(){
return EnumStrings<Aidge::AvgPoolingAttr>::data;
}
};
/**
......@@ -272,12 +290,4 @@ extern template class Aidge::AvgPooling_Op<2>;
extern template class Aidge::AvgPooling_Op<3>;
extern template class Aidge::AvgPooling_Op<4>;
namespace {
/**
* @brief String representation of the AvgPooling attributes.
*/
template <>
const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = { "stride_dims", "kernel_dims", "dilations", "ceil_mode" };
}
#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
......@@ -50,7 +50,12 @@ enum class BatchNormAttr {
*/
TrainingMode
};
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "epsilon", "momentum", "training_mode" };
}
namespace Aidge {
/**
* @class BatchNorm_Op
* @brief Implements the Batch Normalization (BN) operation, a technique used to normalize the inputs of a layer.
......@@ -152,6 +157,14 @@ public:
static const std::vector<std::string> getOutputsName() {
return {"data_output"};
}
/**
* @brief Retrieves the names of the attributes for the operator.
* @return A vector containing the attributes name.
*/
static const char* const* attributesName(){
return EnumStrings<Aidge::BatchNormAttr>::data;
}
};
extern template class Aidge::BatchNorm_Op<2>;
......@@ -170,9 +183,4 @@ extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<2>(const DimSize_t
extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<3>(const DimSize_t, const float, const float, const bool, const std::string&);
extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t, const float, const float, const bool, const std::string&);
namespace {
template <>
const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "epsilon", "momentum", "training_mode" };
}
#endif /* AIDGE_CORE_OPERATOR_BATCHNORM_H_ */
......@@ -28,11 +28,19 @@ namespace Aidge {
enum class BitShiftAttr {
/**
*
*
*/
BitShiftdirection
};
}
namespace {
/**
* @brief Specialization of `EnumStrings` for `BitShiftAttr`.
*/
template <>
const char* const EnumStrings<Aidge::BitShiftAttr>::data[] = {"bit_shift_direction"};
}
namespace Aidge {
/**
* @class BitShift_Op
* @brief A tensor operator to perform element-wise bitwise shift operations on tensors.
......@@ -41,7 +49,7 @@ enum class BitShiftAttr {
* - **InputTensor**: The tensor whose elements will be shifted.
* - **ShiftAmount**: The tensor specifying the shift amount for each element.
*
* The shift is applied in the direction specified by the attribute `BitShiftdirection`,
* The shift is applied in the direction specified by the attribute `BitShiftdirection`,
* which can either be `left` or `right`.
*
* @see OperatorTensor
......@@ -147,6 +155,14 @@ public:
static const std::vector<std::string> getOutputsName() {
return { "OutputTensor" };
}
/**
* @brief Retrieves the names of the attributes for the operator.
* @return A vector containing the attributes name.
*/
static const char* const* attributesName(){
return EnumStrings<Aidge::BitShiftAttr>::data;
}
};
/**
......@@ -161,12 +177,6 @@ inline std::shared_ptr<Node> BitShift(const BitShift_Op::BitShiftDirection direc
} // namespace Aidge
namespace {
/**
* @brief Specialization of `EnumStrings` for `BitShiftAttr`.
*/
template <>
const char* const EnumStrings<Aidge::BitShiftAttr>::data[] = { "BitShiftdirection" };
}
#endif /* AIDGE_CORE_OPERATOR_BITSHIFT_H_ */
......@@ -40,7 +40,12 @@ enum class CastAttr {
*/
TargetType
};
} // namespace Aidge
namespace {
template <>
const char* const EnumStrings<Aidge::CastAttr>::data[] = { "target_type" };
}
namespace Aidge {
/**
* @brief Description of the Cast operation to convert a tensor's data type.
*
......@@ -137,6 +142,14 @@ public:
static const std::vector<std::string> getOutputsName() {
return {"data_output"};
}
/**
* @brief Retrieves the names of the attributes for the operator.
* @return A vector containing the attributes name.
*/
static const char* const* attributesName(){
return EnumStrings<Aidge::CastAttr>::data;
}
};
/**
......@@ -149,9 +162,4 @@ std::shared_ptr<Node> Cast(const DataType targetType, const std::string& name =
} // namespace Aidge
namespace {
template <>
const char* const EnumStrings<Aidge::CastAttr>::data[] = { "target_type" };
}
#endif /* AIDGE_CORE_OPERATOR_CAST_H_ */
......@@ -33,14 +33,23 @@ enum class ClipAttr {
Min, /**< Minimum value for clipping. */
Max /**< Maximum value for clipping. */
};
}
namespace {
/**
* @brief Specialization of EnumStrings for ClipAttr.
*/
template <>
const char* const EnumStrings<Aidge::ClipAttr>::data[] = { "min", "max" };
}
namespace Aidge {
/**
* @brief Description of the Clip operation to limit tensor values within a specified range.
*
* The Clip operator ensures tensor elements are within the range `[min, max]`.
* - Values less than `min` are set to `min`.
* - Values greater than `max` are set to `max`.
*
*
* The input and output Tensors have the same dimensions.
*
* ### Attributes:
......@@ -148,6 +157,14 @@ public:
static const std::vector<std::string> getOutputsName() {
return { "data_output" };
}
/**
* @brief Retrieves the names of the attributes for the operator.
* @return A vector containing the attributes name.
*/
static const char* const* attributesName(){
return EnumStrings<Aidge::ClipAttr>::data;
}
};
/**
......@@ -165,12 +182,4 @@ std::shared_ptr<Aidge::Node> Clip(
} // namespace Aidge
namespace {
/**
* @brief Specialization of EnumStrings for ClipAttr.
*/
template <>
const char* const EnumStrings<Aidge::ClipAttr>::data[] = { "min", "max" };
}
#endif /* AIDGE_CORE_OPERATOR_CLIP_H_ */
......@@ -56,9 +56,19 @@ enum class ConcatAttr {
*
* The specified axis determines the direction of concatenating.
*/
Axis
Axis
};
} // namespace Aidge
namespace {
/**
* @brief Specialization of EnumStrings for ConcatAttr.
*/
template <>
const char* const EnumStrings<Aidge::ConcatAttr>::data[] = {
"axis"
};
}
namespace Aidge {
/**
* @class Concat_Op
* @brief Implements the Concat operation to concatenate multiple tensors along a specified axis.
......@@ -107,7 +117,7 @@ public:
* @param[in] nbIn Number of input tensors.
* @param[in] axis Axis along which concatenation is performed.
*/
Concat_Op(const IOIndex_t nbIn, const std::int32_t axis);
Concat_Op(const IOIndex_t nbIn, const std::int32_t axis = 0);
/**
* @brief Copy-constructor. Copies the operator attributes and its output tensors,
......@@ -169,6 +179,14 @@ public:
static const std::vector<std::string> getOutputsName() {
return { "data_output" };
}
/**
* @brief Retrieves the names of the attributes for the operator.
* @return A vector containing the attributes name.
*/
static const char* const* attributesName(){
return EnumStrings<Aidge::ConcatAttr>::data;
}
};
/**
......@@ -182,14 +200,4 @@ std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const std::int32_t axis = 0,
} // namespace Aidge
namespace {
/**
* @brief Specialization of EnumStrings for ConcatAttr.
*/
template <>
const char* const EnumStrings<Aidge::ConcatAttr>::data[] = {
"axis"
};
}
#endif /* AIDGE_CORE_OPERATOR_CONCAT_H_ */
......@@ -39,6 +39,13 @@ enum class ConstantOfShapeAttr {
*/
Value,
};
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::ConstantOfShapeAttr>::data[] = {"value"};
} //namespace
namespace Aidge {
/**
* @brief This operator's purpose is to generate a tensor of shape given via
......@@ -63,7 +70,7 @@ private:
public:
/**
* @brief constructor for ConstantOfShape_op
* @param[in] value : a scalar tensor which holds the value that will
* @param[in] value : a scalar tensor which holds the value that will
* fill the output tensor
*/
ConstantOfShape_Op(const Tensor &value = Tensor(0.f))
......@@ -116,6 +123,14 @@ public:
static const std::vector<std::string> getOutputsName() {
return {"constant_of_shape"};
}
/**
* @brief Retrieves the names of the attributes for the operator.
* @return A vector containing the attributes name.
*/
static const char* const* attributesName(){
return EnumStrings<Aidge::ConstantOfShapeAttr>::data;
}
};
// helper with C-style array instead of std::array for kernel_dims to allow
......@@ -127,10 +142,5 @@ inline std::shared_ptr<Node> ConstantOfShape(const Tensor value = Tensor(0.f),
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::ConstantOfShapeAttr>::data[] = {"Value"};
}
#endif // AIDGE_CORE_OPERATOR_CONSTANT_OF_SHAPE_H_
......@@ -40,15 +40,24 @@ enum class ConvAttr {
DilationDims, // The dilation dimensions
KernelDims // The kernel dimensions
};
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
"stride_dims",
"dilation_dims",
"kernel_dims"
};
}
namespace Aidge {
/**
* @class Conv_Op
* @brief Convolution operator for performing a multi-dimensional convolution.
*
* The Conv_Op class implements a convolution operator for tensors with customizable
* kernel dimensions, stride, and dilation values. The operator performs a convolution
*
* The Conv_Op class implements a convolution operator for tensors with customizable
* kernel dimensions, stride, and dilation values. The operator performs a convolution
* operation on the input tensor and produces an output tensor.
*
*
* ### Attributes:
* - `strideDims`: Stride for each dimension of the input.
* - `dilationDims`: Dilation for each dimension of the input.
......@@ -63,7 +72,7 @@ enum class ConvAttr {
* - Stride dimensions: {1, 1} (stride of 1 in both height and width)
* - Dilation dimensions: {1, 1} (no dilation)
* - Padding: None
* - Output shape:
* - Output shape:
* (1, 64, (32−3+2×0)/1+1, (32−3+2×0)/1+1) = (1, 64, 30, 30)
*
* @see OperatorTensor
......@@ -163,6 +172,11 @@ public:
if (!getInput(1)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of input channel imposed.");
}
// check format
if(getInput(1)->dataFormat()==Aidge::DataFormat::NHWC)
return getInput(1)->template dims<DIM+2>()[DIM+1];
// default format is NCHW
return getInput(1)->template dims<DIM+2>()[1];
}
......@@ -175,6 +189,7 @@ public:
if (!getInput(1)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of output channel imposed.");
}
// first weight dimension for both NCHW (Cout,Cin,H,W) and NHWC (Cout,H,W,Cin) data format
return getInput(1)->template dims<DIM+2>()[0];
}
......@@ -209,6 +224,14 @@ public:
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
/**
* @brief Retrieves the names of the attributes for the operator.
* @return A vector containing the attributes name.
*/
static const char* const* attributesName(){
return EnumStrings<Aidge::ConvAttr>::data;
}
};
/**
......@@ -260,13 +283,5 @@ inline std::shared_ptr<Node> Conv(
extern template class Aidge::Conv_Op<1>;
extern template class Aidge::Conv_Op<2>;
namespace {
template <>
const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
"stride_dims",
"dilation_dims",
"kernel_dims"
};
}
#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
......@@ -34,15 +34,24 @@ enum class ConvDepthWiseAttr {
DilationDims, // The dilation dimensions for the convolution.
KernelDims // The kernel dimensions for the convolution.
};
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {
"stride_dims",
"dilation_dims",
"kernel_dims"
};
}
namespace Aidge {
/**
* @class ConvDepthWise_Op
* @brief Depthwise Convolution operator for performing a multi-dimensional depthwise convolution.
*
* The ConvDepthWise_Op class implements a depthwise convolution operator for tensors with customizable
* kernel dimensions, stride, and dilation values. It performs a depthwise convolution operation on the
*
* The ConvDepthWise_Op class implements a depthwise convolution operator for tensors with customizable
* kernel dimensions, stride, and dilation values. It performs a depthwise convolution operation on the
* input tensor and produces an output tensor.
*
*
* ### Attributes:
* - strideDims: Stride for each dimension of the input.
* - dilationDims: Dilation for each dimension of the input.
......@@ -189,6 +198,14 @@ public:
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
/**
* @brief Retrieves the names of the attributes for the operator.
* @return A vector containing the attributes name.
*/
static const char* const* attributesName(){
return EnumStrings<Aidge::ConvDepthWiseAttr>::data;
}
};
/**
......@@ -237,13 +254,4 @@ inline std::shared_ptr<Node> ConvDepthWise(
extern template class Aidge::ConvDepthWise_Op<1>;
extern template class Aidge::ConvDepthWise_Op<2>;
namespace {
template <>
const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {
"stride_dims",
"dilation_dims",
"kernel_dims"
};
}
#endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
......@@ -51,7 +51,12 @@ enum class DepthToSpaceAttr {
BlockSize, /**< The block size for rearranging depth to spatial dimensions. */
Mode /**< The mode for depth-to-space transformation. */
};
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::DepthToSpaceAttr>::data[] = { "block_size", "mode" };
}
namespace Aidge{
/**
* @class DepthToSpace_Op
* @brief Represents the DepthToSpace operation to rearrange data from depth to spatial dimensions.
......@@ -164,6 +169,14 @@ public:
static const std::vector<std::string> getOutputsName() {
return {"data_output"};
}
/**
* @brief Retrieves the names of the attributes for the operator.
* @return A vector containing the attributes name.
*/
static const char* const* attributesName(){
return EnumStrings<Aidge::DepthToSpaceAttr>::data;
}
};
/**
......@@ -179,9 +192,5 @@ std::shared_ptr<Node> DepthToSpace(const std::uint32_t blockSize,
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::DepthToSpaceAttr>::data[] = { "block_size", "mode" };
}
#endif //AIDGE_CORE_OPERATOR_DEPTHTOSPACE_H_
......@@ -54,7 +54,12 @@ enum class FlattenAttr {
*/
Axis
};
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::FlattenAttr>::data[] = { "axis" };
}
namespace Aidge {
/**
* @brief Description the Flatten operation to reshape a tensor into a 2D matrix.
*
......@@ -155,6 +160,14 @@ public:
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
/**
* @brief Retrieves the names of the attributes for the operator.
* @return A vector containing the attributes name.
*/
static const char* const* attributesName(){
return EnumStrings<Aidge::FlattenAttr>::data;
}
};
/**
......@@ -171,9 +184,5 @@ std::shared_ptr<Node> Flatten(std::int64_t axis = 1,
const std::string &name = "");
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::FlattenAttr>::data[] = { "axis" };
}
#endif /* AIDGE_CORE_OPERATOR_FLATTEN_H_ */
......@@ -64,7 +64,17 @@ enum class FoldAttr {
*/
KernelDims
};
} // namespace Aidge
namespace {
template <>
const char* const EnumStrings<Aidge::FoldAttr>::data[] = {
"output_dims",
"stride_dims",
"dilation_dims",
"kernel_dims"
};
}
namespace Aidge {
/**
* @class Fold_Op
* @brief Implements the Fold operation to combine or transform tensor dimensions.
......@@ -82,7 +92,7 @@ enum class FoldAttr {
* output height (out_h) = floor((input height - kernel height) / stride height) + 1
* output width (out_w) = floor((input width - kernel width) / stride width) + 1
* - The exact output shape will depend on these calculations for each spatial dimension (height, width) and the number of output channels.
*
*
* @example:
* - Input shape: (1, 16, 32, 32) // Batch size: 1, Channels: 16, Height: 32, Width: 32
* - Kernel dimensions: (3, 3) // 3x3 kernel
......@@ -210,11 +220,19 @@ public:
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
/**
* @brief Retrieves the names of the attributes for the operator.
* @return A vector containing the attributes name.
*/
static const char* const* attributesName(){
return EnumStrings<Aidge::FoldAttr>::data;
}
};
/**
* @brief Create a Fold operation node.
*
*
* This function creates a Fold operation node that applies a fold transformation
* to a tensor based on the specified attributes.
*
......@@ -247,14 +265,4 @@ extern template class Aidge::Fold_Op<2>;
} // namespace Aidge
namespace {
template <>
const char* const EnumStrings<Aidge::FoldAttr>::data[] = {
"output_dims",
"stride_dims",
"dilation_dims",
"kernel_dims"
};
}
#endif /* AIDGE_CORE_OPERATOR_FOLD_H_ */
......@@ -61,6 +61,12 @@ enum class GatherAttr {
GatheredShape
};
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"axis", "indices", "gathered_shape"};
}
namespace Aidge {
/**
* @brief Description for the Gather operation on an input tensor.
*
......@@ -184,6 +190,14 @@ public:
static const std::vector<std::string> getOutputsName() {
return {"data_output"};
}
/**
* @brief Retrieves the names of the attributes for the operator.
* @return A vector containing the attributes name.
*/
static const char* const* attributesName(){
return EnumStrings<Aidge::GatherAttr>::data;
}
};
/**
......@@ -205,9 +219,5 @@ std::shared_ptr<Node> Gather(std::int8_t axis = 0,
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"axis", "indices", "gathered_shape"};
}
#endif /* AIDGE_CORE_OPERATOR_GATHER_H_ */
......@@ -29,6 +29,16 @@ enum class GridSampleAttr {
PaddingMode, // Specifies how to handle out-of-boundary grid values.
AlignCorners // Determines whether grid values are normalized to align with the image corners.
};
} // namespace Aidge
namespace {
template <>
const char* const EnumStrings<Aidge::GridSampleAttr>::data[] = {
"mode",
"padding_mode",
"align_corners"
};
}
namespace Aidge {
/**
* @class GridSample_Op
......@@ -170,6 +180,14 @@ public:
static const std::vector<std::string> getOutputsName() {
return {"data_output"};
}
/**
* @brief Retrieves the names of the attributes for the operator.
* @return A vector containing the attributes name.
*/
static const char* const* attributesName(){
return EnumStrings<Aidge::GridSampleAttr>::data;
}
};
/**
......@@ -189,13 +207,4 @@ std::shared_ptr<Node> GridSample(
} // namespace Aidge
namespace {
template <>
const char* const EnumStrings<Aidge::GridSampleAttr>::data[] = {
"mode",
"padding_mode",
"align_corners"
};
}
#endif /* AIDGE_CORE_OPERATOR_GRIDSAMPLE_H_ */
......@@ -31,6 +31,15 @@ enum class HeavisideAttr {
*/
Value
};
} // namespace Aidge
namespace {
/**
* @brief Define string representations for Heaviside attributes.
*/
template <>
const char *const EnumStrings<Aidge::HeavisideAttr>::data[] = {"value"};
}
namespace Aidge {
/**
* @class Heaviside_Op
......@@ -110,6 +119,14 @@ public:
return {"output"};
}
/**
* @brief Retrieves the names of the attributes for the operator.
* @return A vector containing the attributes name.
*/
static const char* const* attributesName(){
return EnumStrings<Aidge::HeavisideAttr>::data;
}
/**
* @brief Get the attributes of the operator.
*/
......@@ -141,12 +158,5 @@ std::shared_ptr<Node> Heaviside(float value, const std::string &name = "");
} // namespace Aidge
namespace {
/**
* @brief Define string representations for Heaviside attributes.
*/
template <>
const char *const EnumStrings<Aidge::HeavisideAttr>::data[] = {"value"};
}
#endif /* AIDGE_CORE_OPERATOR_HEAVISIDE_H_ */
......@@ -30,20 +30,28 @@ enum class LRNAttr {
Bias, ///< Constant bias added to the normalization term.
Size ///< Number of channels to normalize over.
};
} // namespace Aidge
namespace {
/**
* @brief EnumStrings specialization for LRNAttr.
*/
template <>
const char *const EnumStrings<Aidge::LRNAttr>::data[] = {"alpha", "beta", "bias", "size", nullptr};
}
namespace Aidge {
/**
* @brief Description of a Local Response Normalization (LRN) operation on an input Tensor.
*
* LRN is a normalization technique that applies across channels in a local region
* to enhance generalization and promote competition between neurons. It is commonly
* LRN is a normalization technique that applies across channels in a local region
* to enhance generalization and promote competition between neurons. It is commonly
* used in Convolutional Neural Networks (CNNs).
*
* For each element x in the input Tensor, the function is defined as:
* `f(x) = x / (bias + alpha * sum(x_i^2))^beta`, where:
* - `x` is the current element being normalized.
* - The summation `sum(x_i^2)` is taken over a local region of `size` channels
* - The summation `sum(x_i^2)` is taken over a local region of `size` channels
* surrounding `x` (both before and after the current channel, if available).
* - `bias`, `alpha`, and `beta` are scalar hyperparameters controlling the
* - `bias`, `alpha`, and `beta` are scalar hyperparameters controlling the
* normalization behavior.
*
* Parameters:
......@@ -52,7 +60,7 @@ enum class LRNAttr {
* - `alpha`: A scaling factor for the squared sum of elements in the local region.
* - `beta`: The exponent applied to the normalization term.
*
* The input and output Tensors have the same shape. If the input Tensor has shape `(N, C, H, W)`,
* The input and output Tensors have the same shape. If the input Tensor has shape `(N, C, H, W)`,
* the output Tensor will also have shape `(N, C, H, W)`.
*
* @see OperatorTensor
......@@ -158,6 +166,14 @@ public:
static const std::vector<std::string> getOutputsName() {
return {"data_output"};
}
/**
* @brief Retrieves the names of the attributes for the operator.
* @return A vector containing the attributes name.
*/
static const char* const* attributesName(){
return EnumStrings<Aidge::LRNAttr>::data;
}
};
/**
......@@ -171,12 +187,4 @@ std::shared_ptr<Node> LRN(std::int32_t size, const std::string& name = "");
} // namespace Aidge
namespace {
/**
* @brief EnumStrings specialization for LRNAttr.
*/
template <>
const char *const EnumStrings<Aidge::LRNAttr>::data[] = {"alpha", "beta", "bias", "size"};
}
#endif /* AIDGE_CORE_OPERATOR_LRN_H_ */
......@@ -30,7 +30,13 @@ enum class LeakyReLUAttr {
*/
NegativeSlope
};
} // namespace Aidge
namespace {
template <>
const char* const EnumStrings<Aidge::LeakyReLUAttr>::data[]
= {"negative_slope"};
}
namespace Aidge{
/**
* @class LeakyReLU_Op
* @brief Implements the LeakyReLU activation function.
......@@ -77,7 +83,7 @@ public:
/**
* @brief Copy-constructor.
* @param[in] op LeakyReLU_Op to copy.
* @details Copies the operator attributes and its output tensor(s), but not its input tensors.
* @details Copies the operator attributes and its output tensor(s), but not its input tensors.
* The new operator has no associated input.
*/
LeakyReLU_Op(const LeakyReLU_Op& op);
......@@ -115,6 +121,14 @@ public:
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
/**
* @brief Retrieves the names of the attributes for the operator.
* @return A vector containing the attributes name.
*/
static const char* const* attributesName(){
return EnumStrings<Aidge::LeakyReLUAttr>::data;
}
};
/**
......@@ -127,10 +141,4 @@ public:
std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "");
}
namespace {
template <>
const char* const EnumStrings<Aidge::LeakyReLUAttr>::data[]
= {"negative_slope"};
}
#endif /* AIDGE_CORE_OPERATOR_LEAKYRELU_H_ */