From d8349d4531eaa6703602ffd507f62c852f816a7f Mon Sep 17 00:00:00 2001 From: Olivier BICHLER <olivier.bichler@cea.fr> Date: Wed, 30 Aug 2023 14:21:29 +0200 Subject: [PATCH] Dropped usage of const char* for name, use std::string instead --- .gitlab/ci/test.gitlab-ci.yml | 1 + include/aidge/operator/Add.hpp | 2 +- include/aidge/operator/FC.hpp | 2 +- include/aidge/operator/LeakyReLU.hpp | 2 +- include/aidge/operator/Matmul.hpp | 2 +- include/aidge/operator/Producer.hpp | 6 +++--- include/aidge/operator/ReLU.hpp | 2 +- include/aidge/operator/Softmax.hpp | 2 +- python_binding/operator/pybind_AvgPooling.cpp | 10 +++++----- python_binding/operator/pybind_Conv.cpp | 12 ++++++------ python_binding/operator/pybind_ConvDepthWise.cpp | 12 ++++++------ python_binding/operator/pybind_Producer.cpp | 4 ++-- src/scheduler/Scheduler.cpp | 7 +++---- 13 files changed, 32 insertions(+), 32 deletions(-) diff --git a/.gitlab/ci/test.gitlab-ci.yml b/.gitlab/ci/test.gitlab-ci.yml index 1e67ce273..924fd995a 100644 --- a/.gitlab/ci/test.gitlab-ci.yml +++ b/.gitlab/ci/test.gitlab-ci.yml @@ -37,6 +37,7 @@ test:windows_cpp: - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) # Install dependencies - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y + - choco install python -Y # Update PATH - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User") script: diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp index c96b2c571..ff3d1888c 100644 --- a/include/aidge/operator/Add.hpp +++ b/include/aidge/operator/Add.hpp @@ -141,7 +141,7 @@ public: }; template <std::size_t NUM> -inline std::shared_ptr<Node> Add(const char* name = nullptr) { +inline std::shared_ptr<Node> Add(const std::string& name = "") { return std::make_shared<Node>(std::make_shared<Add_Op<NUM>>(), name); } } diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp index 6e4c54a03..db92dc9c7 100644 --- a/include/aidge/operator/FC.hpp +++ b/include/aidge/operator/FC.hpp @@ -139,7 +139,7 @@ public: inline IOIndex_t nbOutputs() const noexcept override final { return 1; } }; -inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const char* name = nullptr) { +inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const std::string& name = "") { // FIXME: properly handle default w&b initialization in every cases auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(out_channels, noBias), name); addProducer(fc, 1, {out_channels, 1}, "w"); diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp index 64587d51d..1dff2550a 100644 --- a/include/aidge/operator/LeakyReLU.hpp +++ b/include/aidge/operator/LeakyReLU.hpp @@ -117,7 +117,7 @@ public: inline IOIndex_t nbOutputs() const noexcept override final { return 1; } }; -inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const char* name = nullptr) { +inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "") { // FIXME: properly handle default w&b initialization in every cases return std::make_shared<Node>(std::make_shared<LeakyReLU_Op>(negativeSlope), name); } diff --git a/include/aidge/operator/Matmul.hpp b/include/aidge/operator/Matmul.hpp index b44e8a9b9..639b36691 100644 --- a/include/aidge/operator/Matmul.hpp +++ b/include/aidge/operator/Matmul.hpp @@ -129,7 +129,7 @@ public: inline IOIndex_t nbOutputs() const noexcept override final { return 1; } }; -inline std::shared_ptr<Node> Matmul(DimSize_t out_channels, const char* name = nullptr) { +inline std::shared_ptr<Node> Matmul(DimSize_t out_channels, const std::string& name = "") { // FIXME: properly handle default w&b initialization in every cases auto matmul = std::make_shared<Node>(std::make_shared<Matmul_Op>(out_channels), name); addProducer(matmul, 1, {1, out_channels}, "w"); diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp index 1f77400ce..2a2f8c053 100644 --- a/include/aidge/operator/Producer.hpp +++ b/include/aidge/operator/Producer.hpp @@ -128,17 +128,17 @@ inline std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, cons } template <std::array<DimSize_t, 1>::size_type DIM> -void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, const std::array<DimSize_t, DIM>& dims, const char* extension) { +void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, const std::array<DimSize_t, DIM>& dims, const std::string& extension) { assert(inputIdx != gk_IODefaultIndex); static_assert(DIM<=MaxDim,"Too many tensor dimensions required by addProducer, not supported"); - const char* prodName = otherNode->name().empty() ? nullptr : (otherNode->name() + std::string("_") + std::string(extension)).c_str(); + const std::string prodName = (otherNode->name().empty()) ? "" : (otherNode->name() + std::string("_") + extension); auto prod = Producer(dims, prodName); prod->addChild(otherNode, 0, inputIdx); otherNode->getOperator()->associateInput(inputIdx, prod->getOperator()->getRawOutput(0)); } template <std::size_t DIM> -void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const char* extension) { +void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const std::string& extension) { addProducer(otherNode, inputIdx, to_array(dims), extension); } } // namespace Aidge diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp index 3ea90462c..141bd3ae1 100644 --- a/include/aidge/operator/ReLU.hpp +++ b/include/aidge/operator/ReLU.hpp @@ -106,7 +106,7 @@ public: inline IOIndex_t nbOutputs() const noexcept override final { return 1; } }; -inline std::shared_ptr<Node> ReLU(const char* name = nullptr) { +inline std::shared_ptr<Node> ReLU(const std::string& name = "") { // FIXME: properly handle default w&b initialization in every cases return std::make_shared<Node>(std::make_shared<ReLU_Op>(), name); } diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp index 93eb262f7..64e713b33 100644 --- a/include/aidge/operator/Softmax.hpp +++ b/include/aidge/operator/Softmax.hpp @@ -106,7 +106,7 @@ public: inline IOIndex_t nbOutputs() const noexcept override final { return 1; } }; -inline std::shared_ptr<Node> Softmax(const char* name = nullptr) { +inline std::shared_ptr<Node> Softmax(const std::string& name = "") { // FIXME: properly handle default w&b initialization in every cases return std::make_shared<Node>(std::make_shared<Softmax_Op>(), name); } diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp index 66dadba72..ecbb743d3 100644 --- a/python_binding/operator/pybind_AvgPooling.cpp +++ b/python_binding/operator/pybind_AvgPooling.cpp @@ -37,10 +37,10 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) { py::arg("stride_dims"), py::arg("padding_dims")); - m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](std::vector<DimSize_t>& kernel_dims, - const char* name, - std::vector<DimSize_t> &stride_dims, - std::vector<DimSize_t> &padding_dims) { + m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, + const std::string& name, + const std::vector<DimSize_t> &stride_dims, + const std::vector<DimSize_t> &padding_dims) { // Lambda function wrapper because PyBind fails to convert const array. // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. if (kernel_dims.size() != DIM) { @@ -69,7 +69,7 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) { const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array; return AvgPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array)); }, py::arg("kernel_dims"), - py::arg("name") = nullptr, + py::arg("name") = "", py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0)); diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp index 3cf5d818f..7e366305f 100644 --- a/python_binding/operator/pybind_Conv.cpp +++ b/python_binding/operator/pybind_Conv.cpp @@ -44,11 +44,11 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) { m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels, DimSize_t out_channels, - std::vector<DimSize_t>& kernel_dims, - const char* name, - std::vector<DimSize_t> &stride_dims, - std::vector<DimSize_t> &padding_dims, - std::vector<DimSize_t> &dilation_dims) { + const std::vector<DimSize_t>& kernel_dims, + const std::string& name, + const std::vector<DimSize_t> &stride_dims, + const std::vector<DimSize_t> &padding_dims, + const std::vector<DimSize_t> &dilation_dims) { // Lambda function wrapper because PyBind fails to convert const array. // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. if (kernel_dims.size() != DIM) { @@ -87,7 +87,7 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) { }, py::arg("in_channels"), py::arg("out_channels"), py::arg("kernel_dims"), - py::arg("name") = nullptr, + py::arg("name") = "", py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0), py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1)); diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp index b64409bdb..8a81e7ba1 100644 --- a/python_binding/operator/pybind_ConvDepthWise.cpp +++ b/python_binding/operator/pybind_ConvDepthWise.cpp @@ -39,11 +39,11 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) { py::arg("padding_dims"), py::arg("dilation_dims")); - m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](std::vector<DimSize_t>& kernel_dims, - const char* name, - std::vector<DimSize_t> &stride_dims, - std::vector<DimSize_t> &padding_dims, - std::vector<DimSize_t> &dilation_dims) { + m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, + const std::string& name, + const std::vector<DimSize_t> &stride_dims, + const std::vector<DimSize_t> &padding_dims, + const std::vector<DimSize_t> &dilation_dims) { // Lambda function wrapper because PyBind fails to convert const array. // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. if (kernel_dims.size() != DIM) { @@ -80,7 +80,7 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) { const DimSize_t (&dilation_dims_array)[DIM] = tmp_dilation_dims_array; return ConvDepthWise<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array), to_array(dilation_dims_array)); }, py::arg("kernel_dims"), - py::arg("name") = nullptr, + py::arg("name") = "", py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0), py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1)); diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp index 5757891a3..ea9880800 100644 --- a/python_binding/operator/pybind_Producer.cpp +++ b/python_binding/operator/pybind_Producer.cpp @@ -25,7 +25,7 @@ namespace Aidge { template <DimIdx_t DIM> void declare_Producer(py::module &m) { // m.def(("Producer_" + std::to_string(DIM)+"D").c_str(), py::overload_cast<shared_ptr<Node>&>(&Producer<DIM>), py::arg("dims"), py::arg("name")); - m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::array<DimSize_t, DIM>&, const char*)>(&Producer), py::arg("dims"), py::arg("name") = nullptr); + m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::array<DimSize_t, DIM>&, const std::string&)>(&Producer), py::arg("dims"), py::arg("name") = ""); } @@ -36,7 +36,7 @@ void init_Producer(py::module &m) { "ProducerOp", py::multiple_inheritance()) .def("dims", &Producer_Op::dims); - m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const char*)>(&Producer), py::arg("tensor"), py::arg("name") = nullptr); + m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&)>(&Producer), py::arg("tensor"), py::arg("name") = ""); declare_Producer<1>(m); declare_Producer<2>(m); diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp index fce46397f..def7185f4 100644 --- a/src/scheduler/Scheduler.cpp +++ b/src/scheduler/Scheduler.cpp @@ -20,7 +20,7 @@ #include "aidge/graph/Node.hpp" #include "aidge/utils/Types.h" -void drawProgressBar(double progress, int barWidth, const char* additionalInfo = nullptr) { +void drawProgressBar(double progress, int barWidth, const std::string& additionalInfo = "") { putchar('['); int pos = static_cast<int>(barWidth * progress); for (int i = 0; i < barWidth; ++i) { @@ -29,7 +29,7 @@ void drawProgressBar(double progress, int barWidth, const char* additionalInfo = else putchar(' '); } - printf("] %d%% | %s\r", static_cast<int>(progress * 100), (additionalInfo ? additionalInfo : "")); + printf("] %d%% | %s\r", static_cast<int>(progress * 100), additionalInfo); fflush(stdout); } @@ -122,8 +122,7 @@ void Aidge::SequentialScheduler::forward(bool frowardDims, bool verbose) { else drawProgressBar(static_cast<float>(computationOver.size()) / static_cast<float>(computationNumber), 50, (std::string("running ") + runnable->type() + "_" + - std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))) - .c_str()); + std::to_string(reinterpret_cast<uintptr_t>(runnable.get())))); const auto tStart = std::chrono::high_resolution_clock::now(); runnable->forward(); const auto tEnd = std::chrono::high_resolution_clock::now(); -- GitLab