Skip to content
Snippets Groups Projects
Commit d8349d45 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Dropped usage of const char* for name, use std::string instead

parent ea1886d8
No related branches found
No related tags found
No related merge requests found
......@@ -37,6 +37,7 @@ test:windows_cpp:
- Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
# Install dependencies
- choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
- choco install python -Y
# Update PATH
- $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
script:
......
......@@ -141,7 +141,7 @@ public:
};
template <std::size_t NUM>
inline std::shared_ptr<Node> Add(const char* name = nullptr) {
inline std::shared_ptr<Node> Add(const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Add_Op<NUM>>(), name);
}
}
......
......@@ -139,7 +139,7 @@ public:
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
};
inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const char* name = nullptr) {
inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(out_channels, noBias), name);
addProducer(fc, 1, {out_channels, 1}, "w");
......
......@@ -117,7 +117,7 @@ public:
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
};
inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const char* name = nullptr) {
inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
return std::make_shared<Node>(std::make_shared<LeakyReLU_Op>(negativeSlope), name);
}
......
......@@ -129,7 +129,7 @@ public:
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
};
inline std::shared_ptr<Node> Matmul(DimSize_t out_channels, const char* name = nullptr) {
inline std::shared_ptr<Node> Matmul(DimSize_t out_channels, const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
auto matmul = std::make_shared<Node>(std::make_shared<Matmul_Op>(out_channels), name);
addProducer(matmul, 1, {1, out_channels}, "w");
......
......@@ -128,17 +128,17 @@ inline std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, cons
}
template <std::array<DimSize_t, 1>::size_type DIM>
void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, const std::array<DimSize_t, DIM>& dims, const char* extension) {
void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, const std::array<DimSize_t, DIM>& dims, const std::string& extension) {
assert(inputIdx != gk_IODefaultIndex);
static_assert(DIM<=MaxDim,"Too many tensor dimensions required by addProducer, not supported");
const char* prodName = otherNode->name().empty() ? nullptr : (otherNode->name() + std::string("_") + std::string(extension)).c_str();
const std::string prodName = (otherNode->name().empty()) ? "" : (otherNode->name() + std::string("_") + extension);
auto prod = Producer(dims, prodName);
prod->addChild(otherNode, 0, inputIdx);
otherNode->getOperator()->associateInput(inputIdx, prod->getOperator()->getRawOutput(0));
}
template <std::size_t DIM>
void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const char* extension) {
void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const std::string& extension) {
addProducer(otherNode, inputIdx, to_array(dims), extension);
}
} // namespace Aidge
......
......@@ -106,7 +106,7 @@ public:
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
};
inline std::shared_ptr<Node> ReLU(const char* name = nullptr) {
inline std::shared_ptr<Node> ReLU(const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
return std::make_shared<Node>(std::make_shared<ReLU_Op>(), name);
}
......
......@@ -106,7 +106,7 @@ public:
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
};
inline std::shared_ptr<Node> Softmax(const char* name = nullptr) {
inline std::shared_ptr<Node> Softmax(const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
return std::make_shared<Node>(std::make_shared<Softmax_Op>(), name);
}
......
......@@ -37,10 +37,10 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
py::arg("stride_dims"),
py::arg("padding_dims"));
m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](std::vector<DimSize_t>& kernel_dims,
const char* name,
std::vector<DimSize_t> &stride_dims,
std::vector<DimSize_t> &padding_dims) {
m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &padding_dims) {
// Lambda function wrapper because PyBind fails to convert const array.
// So we use a vector that we convert in this function to a const DimeSize_t [DIM] array.
if (kernel_dims.size() != DIM) {
......@@ -69,7 +69,7 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array;
return AvgPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array));
}, py::arg("kernel_dims"),
py::arg("name") = nullptr,
py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0));
......
......@@ -44,11 +44,11 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels,
DimSize_t out_channels,
std::vector<DimSize_t>& kernel_dims,
const char* name,
std::vector<DimSize_t> &stride_dims,
std::vector<DimSize_t> &padding_dims,
std::vector<DimSize_t> &dilation_dims) {
const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &padding_dims,
const std::vector<DimSize_t> &dilation_dims) {
// Lambda function wrapper because PyBind fails to convert const array.
// So we use a vector that we convert in this function to a const DimeSize_t [DIM] array.
if (kernel_dims.size() != DIM) {
......@@ -87,7 +87,7 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
}, py::arg("in_channels"),
py::arg("out_channels"),
py::arg("kernel_dims"),
py::arg("name") = nullptr,
py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0),
py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
......
......@@ -39,11 +39,11 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
py::arg("padding_dims"),
py::arg("dilation_dims"));
m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](std::vector<DimSize_t>& kernel_dims,
const char* name,
std::vector<DimSize_t> &stride_dims,
std::vector<DimSize_t> &padding_dims,
std::vector<DimSize_t> &dilation_dims) {
m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &padding_dims,
const std::vector<DimSize_t> &dilation_dims) {
// Lambda function wrapper because PyBind fails to convert const array.
// So we use a vector that we convert in this function to a const DimeSize_t [DIM] array.
if (kernel_dims.size() != DIM) {
......@@ -80,7 +80,7 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
const DimSize_t (&dilation_dims_array)[DIM] = tmp_dilation_dims_array;
return ConvDepthWise<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array), to_array(dilation_dims_array));
}, py::arg("kernel_dims"),
py::arg("name") = nullptr,
py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0),
py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
......
......@@ -25,7 +25,7 @@ namespace Aidge {
template <DimIdx_t DIM>
void declare_Producer(py::module &m) {
// m.def(("Producer_" + std::to_string(DIM)+"D").c_str(), py::overload_cast<shared_ptr<Node>&>(&Producer<DIM>), py::arg("dims"), py::arg("name"));
m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::array<DimSize_t, DIM>&, const char*)>(&Producer), py::arg("dims"), py::arg("name") = nullptr);
m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::array<DimSize_t, DIM>&, const std::string&)>(&Producer), py::arg("dims"), py::arg("name") = "");
}
......@@ -36,7 +36,7 @@ void init_Producer(py::module &m) {
"ProducerOp",
py::multiple_inheritance())
.def("dims", &Producer_Op::dims);
m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const char*)>(&Producer), py::arg("tensor"), py::arg("name") = nullptr);
m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&)>(&Producer), py::arg("tensor"), py::arg("name") = "");
declare_Producer<1>(m);
declare_Producer<2>(m);
......
......@@ -20,7 +20,7 @@
#include "aidge/graph/Node.hpp"
#include "aidge/utils/Types.h"
void drawProgressBar(double progress, int barWidth, const char* additionalInfo = nullptr) {
void drawProgressBar(double progress, int barWidth, const std::string& additionalInfo = "") {
putchar('[');
int pos = static_cast<int>(barWidth * progress);
for (int i = 0; i < barWidth; ++i) {
......@@ -29,7 +29,7 @@ void drawProgressBar(double progress, int barWidth, const char* additionalInfo =
else
putchar(' ');
}
printf("] %d%% | %s\r", static_cast<int>(progress * 100), (additionalInfo ? additionalInfo : ""));
printf("] %d%% | %s\r", static_cast<int>(progress * 100), additionalInfo);
fflush(stdout);
}
......@@ -122,8 +122,7 @@ void Aidge::SequentialScheduler::forward(bool frowardDims, bool verbose) {
else
drawProgressBar(static_cast<float>(computationOver.size()) / static_cast<float>(computationNumber), 50,
(std::string("running ") + runnable->type() + "_" +
std::to_string(reinterpret_cast<uintptr_t>(runnable.get())))
.c_str());
std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))));
const auto tStart = std::chrono::high_resolution_clock::now();
runnable->forward();
const auto tEnd = std::chrono::high_resolution_clock::now();
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment