Skip to content
Snippets Groups Projects
Commit 41ede626 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Remove unnecessary std::array (minor, style)

parent b282d8a0
No related branches found
No related tags found
No related merge requests found
......@@ -87,7 +87,7 @@ public:
if(getInput(i)->size() != nbFeatures) {
// /!\ Input size should be handled BEFORE calling this function
// This should raise an error
getInput(i)->resize(std::array<DimSize_t, 1>({getInput(0)->dims()[1]}));
getInput(i)->resize({getInput(0)->dims()[1]});
}
}
mOutputs[0]->resize(getInput(0)->dims());
......@@ -133,10 +133,10 @@ inline std::shared_ptr<Node> BatchNorm(const DimSize_t nbFeatures,
const std::string& name = "") {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by BatchNorm, not supported");
auto batchNorm = std::make_shared<Node>(std::make_shared<BatchNorm_Op<static_cast<DimIdx_t>(DIM)>>(epsilon, momentum), name);
addProducer(batchNorm, 1, std::array<DimSize_t,1>({nbFeatures}), "scale");
addProducer(batchNorm, 2, std::array<DimSize_t,1>({nbFeatures}), "shift");
addProducer(batchNorm, 3, std::array<DimSize_t,1>({nbFeatures}), "batch_mean");
addProducer(batchNorm, 4, std::array<DimSize_t,1>({nbFeatures}), "batch_variance");
addProducer(batchNorm, 1, {nbFeatures}, "scale");
addProducer(batchNorm, 2, {nbFeatures}, "shift");
addProducer(batchNorm, 3, {nbFeatures}, "batch_mean");
addProducer(batchNorm, 4, {nbFeatures}, "batch_variance");
return batchNorm;
}
} // namespace Aidge
......
......@@ -209,9 +209,8 @@ inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims), name);
// addProducer(conv, 1, append(append(kernel_dims, in_channels), out_channels), "w");
addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
addProducer(conv, 2, std::array<DimSize_t, 1>({outChannels}), "b");
addProducer(conv, 2, {outChannels}, "b");
return conv;
}
......
......@@ -203,7 +203,7 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims), name);
addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
addProducer(convDW, 2, std::array<DimSize_t, 1>({nbChannels}), "b");
addProducer(convDW, 2, {nbChannels}, "b");
return convDW;
}
......
......@@ -77,7 +77,7 @@ public:
}
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
if (inputIdx == 0 && getInput(0)->nbDims() == 1)
mInputs[inputIdx]->resize(std::array<DimSize_t, 2>({1, getInput(inputIdx)->size()}));
mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()});
}
void computeOutputDims() override final {
......@@ -123,8 +123,8 @@ public:
inline std::shared_ptr<Node> FC(DimSize_t inChannels, DimSize_t outChannels, bool noBias = false, const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(outChannels, noBias), name);
addProducer(fc, 1, std::array<DimSize_t, 2>({outChannels, inChannels}), "w");
addProducer(fc, 2, (noBias ? std::array<DimSize_t, 1>({0}) : std::array<DimSize_t, 1>({outChannels})), "b"); // already sets bias dims
addProducer(fc, 1, {outChannels, inChannels}, "w");
addProducer(fc, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
return fc;
}
} // namespace Aidge
......
......@@ -99,7 +99,7 @@ public:
inline std::shared_ptr<Node> MatMul(DimSize_t inChannels, DimSize_t outChannels, const std::string& name = "") {
// FIXME: properly handle default w initialization in every cases
auto matmul = std::make_shared<Node>(std::make_shared<MatMul_Op>(outChannels), name);
addProducer(matmul, 1, std::array<DimSize_t, 2>({outChannels, inChannels}), "w");
addProducer(matmul, 1, {outChannels, inChannels}, "w");
return matmul;
}
} // namespace Aidge
......
......@@ -66,8 +66,8 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nb_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
auto metaOp = MetaOperator("PaddedConvDepthWise", Sequential({pad, conv}), name);
addProducer(metaOp, 1, std::array<DimSize_t,0>({}), "w");
addProducer(metaOp, 2, std::array<DimSize_t,0>({}), "b");
addProducer(metaOp, 1, {}, "w");
addProducer(metaOp, 2, {}, "b");
return metaOp;
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment