From 41ede6260005859c49a8780ffef00a6bb1e1fbe4 Mon Sep 17 00:00:00 2001 From: Olivier BICHLER <olivier.bichler@cea.fr> Date: Thu, 4 Jan 2024 11:13:11 +0100 Subject: [PATCH] Remove unnecessary std::array (minor, style) --- include/aidge/operator/BatchNorm.hpp | 10 +++++----- include/aidge/operator/Conv.hpp | 3 +-- include/aidge/operator/ConvDepthWise.hpp | 2 +- include/aidge/operator/FC.hpp | 6 +++--- include/aidge/operator/MatMul.hpp | 2 +- include/aidge/operator/MetaOperatorDefs.hpp | 4 ++-- 6 files changed, 13 insertions(+), 14 deletions(-) diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp index 076739198..055c1b308 100644 --- a/include/aidge/operator/BatchNorm.hpp +++ b/include/aidge/operator/BatchNorm.hpp @@ -87,7 +87,7 @@ public: if(getInput(i)->size() != nbFeatures) { // /!\ Input size should be handled BEFORE calling this function // This should raise an error - getInput(i)->resize(std::array<DimSize_t, 1>({getInput(0)->dims()[1]})); + getInput(i)->resize({getInput(0)->dims()[1]}); } } mOutputs[0]->resize(getInput(0)->dims()); @@ -133,10 +133,10 @@ inline std::shared_ptr<Node> BatchNorm(const DimSize_t nbFeatures, const std::string& name = "") { static_assert(DIM<=MaxDim,"Too many kernel dimensions required by BatchNorm, not supported"); auto batchNorm = std::make_shared<Node>(std::make_shared<BatchNorm_Op<static_cast<DimIdx_t>(DIM)>>(epsilon, momentum), name); - addProducer(batchNorm, 1, std::array<DimSize_t,1>({nbFeatures}), "scale"); - addProducer(batchNorm, 2, std::array<DimSize_t,1>({nbFeatures}), "shift"); - addProducer(batchNorm, 3, std::array<DimSize_t,1>({nbFeatures}), "batch_mean"); - addProducer(batchNorm, 4, std::array<DimSize_t,1>({nbFeatures}), "batch_variance"); + addProducer(batchNorm, 1, {nbFeatures}, "scale"); + addProducer(batchNorm, 2, {nbFeatures}, "shift"); + addProducer(batchNorm, 3, {nbFeatures}, "batch_mean"); + addProducer(batchNorm, 4, {nbFeatures}, "batch_variance"); return batchNorm; } } // namespace Aidge diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp index 1ebda2c59..cbed859d1 100644 --- a/include/aidge/operator/Conv.hpp +++ b/include/aidge/operator/Conv.hpp @@ -209,9 +209,8 @@ inline std::shared_ptr<Node> Conv(DimSize_t inChannels, // FIXME: properly handle default w&b initialization in every cases static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported"); auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims), name); - // addProducer(conv, 1, append(append(kernel_dims, in_channels), out_channels), "w"); addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w"); - addProducer(conv, 2, std::array<DimSize_t, 1>({outChannels}), "b"); + addProducer(conv, 2, {outChannels}, "b"); return conv; } diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp index c97bbd21e..c9f172718 100644 --- a/include/aidge/operator/ConvDepthWise.hpp +++ b/include/aidge/operator/ConvDepthWise.hpp @@ -203,7 +203,7 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels, static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported"); auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims), name); addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w"); - addProducer(convDW, 2, std::array<DimSize_t, 1>({nbChannels}), "b"); + addProducer(convDW, 2, {nbChannels}, "b"); return convDW; } diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp index 545e923fb..52297525e 100644 --- a/include/aidge/operator/FC.hpp +++ b/include/aidge/operator/FC.hpp @@ -77,7 +77,7 @@ public: } mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data); if (inputIdx == 0 && getInput(0)->nbDims() == 1) - mInputs[inputIdx]->resize(std::array<DimSize_t, 2>({1, getInput(inputIdx)->size()})); + mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()}); } void computeOutputDims() override final { @@ -123,8 +123,8 @@ public: inline std::shared_ptr<Node> FC(DimSize_t inChannels, DimSize_t outChannels, bool noBias = false, const std::string& name = "") { // FIXME: properly handle default w&b initialization in every cases auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(outChannels, noBias), name); - addProducer(fc, 1, std::array<DimSize_t, 2>({outChannels, inChannels}), "w"); - addProducer(fc, 2, (noBias ? std::array<DimSize_t, 1>({0}) : std::array<DimSize_t, 1>({outChannels})), "b"); // already sets bias dims + addProducer(fc, 1, {outChannels, inChannels}, "w"); + addProducer(fc, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims return fc; } } // namespace Aidge diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp index 5b733f6a5..10488ed99 100644 --- a/include/aidge/operator/MatMul.hpp +++ b/include/aidge/operator/MatMul.hpp @@ -99,7 +99,7 @@ public: inline std::shared_ptr<Node> MatMul(DimSize_t inChannels, DimSize_t outChannels, const std::string& name = "") { // FIXME: properly handle default w initialization in every cases auto matmul = std::make_shared<Node>(std::make_shared<MatMul_Op>(outChannels), name); - addProducer(matmul, 1, std::array<DimSize_t, 2>({outChannels, inChannels}), "w"); + addProducer(matmul, 1, {outChannels, inChannels}, "w"); return matmul; } } // namespace Aidge diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp index 615b89604..7c1df0994 100644 --- a/include/aidge/operator/MetaOperatorDefs.hpp +++ b/include/aidge/operator/MetaOperatorDefs.hpp @@ -66,8 +66,8 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels, auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nb_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : ""); auto metaOp = MetaOperator("PaddedConvDepthWise", Sequential({pad, conv}), name); - addProducer(metaOp, 1, std::array<DimSize_t,0>({}), "w"); - addProducer(metaOp, 2, std::array<DimSize_t,0>({}), "b"); + addProducer(metaOp, 1, {}, "w"); + addProducer(metaOp, 2, {}, "b"); return metaOp; } -- GitLab