diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp index 076739198a816e47990b9a594ef9703fb39a4302..055c1b308470e3fe65693138c8e1e8f72ea62d5e 100644 --- a/include/aidge/operator/BatchNorm.hpp +++ b/include/aidge/operator/BatchNorm.hpp @@ -87,7 +87,7 @@ public: if(getInput(i)->size() != nbFeatures) { // /!\ Input size should be handled BEFORE calling this function // This should raise an error - getInput(i)->resize(std::array<DimSize_t, 1>({getInput(0)->dims()[1]})); + getInput(i)->resize({getInput(0)->dims()[1]}); } } mOutputs[0]->resize(getInput(0)->dims()); @@ -133,10 +133,10 @@ inline std::shared_ptr<Node> BatchNorm(const DimSize_t nbFeatures, const std::string& name = "") { static_assert(DIM<=MaxDim,"Too many kernel dimensions required by BatchNorm, not supported"); auto batchNorm = std::make_shared<Node>(std::make_shared<BatchNorm_Op<static_cast<DimIdx_t>(DIM)>>(epsilon, momentum), name); - addProducer(batchNorm, 1, std::array<DimSize_t,1>({nbFeatures}), "scale"); - addProducer(batchNorm, 2, std::array<DimSize_t,1>({nbFeatures}), "shift"); - addProducer(batchNorm, 3, std::array<DimSize_t,1>({nbFeatures}), "batch_mean"); - addProducer(batchNorm, 4, std::array<DimSize_t,1>({nbFeatures}), "batch_variance"); + addProducer(batchNorm, 1, {nbFeatures}, "scale"); + addProducer(batchNorm, 2, {nbFeatures}, "shift"); + addProducer(batchNorm, 3, {nbFeatures}, "batch_mean"); + addProducer(batchNorm, 4, {nbFeatures}, "batch_variance"); return batchNorm; } } // namespace Aidge diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp index 1ebda2c599023c300e258c2c45123d23a478a351..cbed859d122722c62e6b73af510b3f0c83ead749 100644 --- a/include/aidge/operator/Conv.hpp +++ b/include/aidge/operator/Conv.hpp @@ -209,9 +209,8 @@ inline std::shared_ptr<Node> Conv(DimSize_t inChannels, // FIXME: properly handle default w&b initialization in every cases static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported"); auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims), name); - // addProducer(conv, 1, append(append(kernel_dims, in_channels), out_channels), "w"); addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w"); - addProducer(conv, 2, std::array<DimSize_t, 1>({outChannels}), "b"); + addProducer(conv, 2, {outChannels}, "b"); return conv; } diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp index c97bbd21e664c0365b081da4e57dd3200e37ef8c..c9f1727183f63ba94ac404606e9d048414477954 100644 --- a/include/aidge/operator/ConvDepthWise.hpp +++ b/include/aidge/operator/ConvDepthWise.hpp @@ -203,7 +203,7 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels, static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported"); auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims), name); addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w"); - addProducer(convDW, 2, std::array<DimSize_t, 1>({nbChannels}), "b"); + addProducer(convDW, 2, {nbChannels}, "b"); return convDW; } diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp index 545e923fb08a8d71077340da2b0d2b3f052abc4b..52297525ec92063d7be6123b1853ff01af4ddbd5 100644 --- a/include/aidge/operator/FC.hpp +++ b/include/aidge/operator/FC.hpp @@ -77,7 +77,7 @@ public: } mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data); if (inputIdx == 0 && getInput(0)->nbDims() == 1) - mInputs[inputIdx]->resize(std::array<DimSize_t, 2>({1, getInput(inputIdx)->size()})); + mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()}); } void computeOutputDims() override final { @@ -123,8 +123,8 @@ public: inline std::shared_ptr<Node> FC(DimSize_t inChannels, DimSize_t outChannels, bool noBias = false, const std::string& name = "") { // FIXME: properly handle default w&b initialization in every cases auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(outChannels, noBias), name); - addProducer(fc, 1, std::array<DimSize_t, 2>({outChannels, inChannels}), "w"); - addProducer(fc, 2, (noBias ? std::array<DimSize_t, 1>({0}) : std::array<DimSize_t, 1>({outChannels})), "b"); // already sets bias dims + addProducer(fc, 1, {outChannels, inChannels}, "w"); + addProducer(fc, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims return fc; } } // namespace Aidge diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp index 5b733f6a57edb08cc35f912960398486f48acd27..10488ed99a049c0d90169bc6f9c848fe9081498f 100644 --- a/include/aidge/operator/MatMul.hpp +++ b/include/aidge/operator/MatMul.hpp @@ -99,7 +99,7 @@ public: inline std::shared_ptr<Node> MatMul(DimSize_t inChannels, DimSize_t outChannels, const std::string& name = "") { // FIXME: properly handle default w initialization in every cases auto matmul = std::make_shared<Node>(std::make_shared<MatMul_Op>(outChannels), name); - addProducer(matmul, 1, std::array<DimSize_t, 2>({outChannels, inChannels}), "w"); + addProducer(matmul, 1, {outChannels, inChannels}, "w"); return matmul; } } // namespace Aidge diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp index 615b8960403270efa1fe97235dbfeeb129338d5b..7c1df0994a46dc295d8147206371209f65590669 100644 --- a/include/aidge/operator/MetaOperatorDefs.hpp +++ b/include/aidge/operator/MetaOperatorDefs.hpp @@ -66,8 +66,8 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels, auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nb_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : ""); auto metaOp = MetaOperator("PaddedConvDepthWise", Sequential({pad, conv}), name); - addProducer(metaOp, 1, std::array<DimSize_t,0>({}), "w"); - addProducer(metaOp, 2, std::array<DimSize_t,0>({}), "b"); + addProducer(metaOp, 1, {}, "w"); + addProducer(metaOp, 2, {}, "b"); return metaOp; }