Skip to content
Snippets Groups Projects
Commit 5f3ac9c8 authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge remote-tracking branch 'origin/main' into fix/GenericOp

parents 3a091045 49c815ad
No related branches found
No related tags found
2 merge requests!15Remove CParameter memory leak,!3[Fix] Memory leak due to containers serialization
Pipeline #31163 failed
Showing
with 43 additions and 42 deletions
......@@ -37,6 +37,7 @@ test:windows_cpp:
- Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
# Install dependencies
- choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
- choco install python -Y
# Update PATH
- $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
script:
......
......@@ -208,7 +208,7 @@ public:
* @brief Get the Nodes pointed to by the GraphView object.
* @return std::set<NodePtr>
*/
inline std::set<NodePtr> getNodes() const { return mNodes; }
inline const std::set<NodePtr>& getNodes() const { return mNodes; }
/**
* @brief Get the operator with the corresponding name if it is in the
......@@ -217,7 +217,7 @@ public:
* @return NodePtr returns a new empty node if the one asked for
* was not found.
*/
NodePtr getNode(const char *nodeName) const;
NodePtr getNode(const std::string& nodeName) const;
/**
* @brief Remove a Node from the current GraphView scope without affecting its connections.
......
......@@ -62,7 +62,7 @@ public:
* @param op Operator giving the Node its number of connections.
* @param name (optional) name for the Node.
*/
Node(std::shared_ptr<Operator> op, const char *name = nullptr);
Node(std::shared_ptr<Operator> op, const std::string& name = "");
virtual ~Node() = default;
......
......@@ -141,7 +141,7 @@ public:
};
template <std::size_t NUM>
inline std::shared_ptr<Node> Add(const char* name = nullptr) {
inline std::shared_ptr<Node> Add(const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Add_Op<NUM>>(), name);
}
}
......
......@@ -146,7 +146,7 @@ public:
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const char *name = nullptr,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
// FIXME: properly handle default w&b initialization in every cases
......@@ -158,7 +158,7 @@ inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel
template <DimSize_t DIM>
inline std::shared_ptr<Node> AvgPooling(
DimSize_t const (&kernel_dims)[DIM],
const char *name = nullptr,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
......
......@@ -144,7 +144,7 @@ public:
template <DimSize_t DIM>
inline std::shared_ptr<Node> BatchNorm(const float epsilon = 1.0e-5F,
const float momentum = 0.1F,
const char *name = nullptr) {
const std::string& name = "") {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by BatchNorm, not supported");
auto batchNorm = std::make_shared<Node>(std::make_shared<BatchNorm_Op<static_cast<DimIdx_t>(DIM)>>(epsilon, momentum), name);
addProducer(batchNorm, 1, std::array<DimSize_t,0>({}), "scale");
......
......@@ -166,7 +166,7 @@ template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Conv(DimSize_t in_channels,
DimSize_t out_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
const char *name = nullptr,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
......@@ -184,7 +184,7 @@ inline std::shared_ptr<Node> Conv(
DimSize_t in_channels,
DimSize_t out_channels,
DimSize_t const (&kernel_dims)[DIM],
const char *name = nullptr,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
......
......@@ -165,7 +165,7 @@ class ConvDepthWise_Op : public Operator,
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> ConvDepthWise(const std::array<DimSize_t, DIM> &kernel_dims,
const char *name = nullptr,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
......@@ -180,7 +180,7 @@ inline std::shared_ptr<Node> ConvDepthWise(const std::array<DimSize_t, DIM> &ker
template <DimSize_t DIM>
inline std::shared_ptr<Node> ConvDepthWise(
DimSize_t const (&kernel_dims)[DIM],
const char *name = nullptr,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
......
......@@ -139,7 +139,7 @@ public:
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
};
inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const char* name = nullptr) {
inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(out_channels, noBias), name);
addProducer(fc, 1, {out_channels, 1}, "w");
......
......@@ -163,7 +163,7 @@ class GenericOperator_Op
* @return std::shared_ptr<Node> Node associated with the Generic Operator.
*/
inline std::shared_ptr<Node> GenericOperator(const char *type, IOIndex_t nbDataIn, IOIndex_t nbIn, IOIndex_t nbOut,
const char *name = nullptr) {
const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbDataIn, nbIn, nbOut), name);
}
} // namespace Aidge
......
......@@ -117,7 +117,7 @@ public:
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
};
inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const char* name = nullptr) {
inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
return std::make_shared<Node>(std::make_shared<LeakyReLU_Op>(negativeSlope), name);
}
......
......@@ -129,7 +129,7 @@ public:
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
};
inline std::shared_ptr<Node> Matmul(DimSize_t out_channels, const char* name = nullptr) {
inline std::shared_ptr<Node> Matmul(DimSize_t out_channels, const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
auto matmul = std::make_shared<Node>(std::make_shared<Matmul_Op>(out_channels), name);
addProducer(matmul, 1, {1, out_channels}, "w");
......
......@@ -113,32 +113,32 @@ public:
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const char *name = nullptr) {
inline std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const std::string& name = "") {
static_assert(DIM<=MaxDim,"Too many tensor dimensions required by Producer, not supported");
return std::make_shared<Node>(std::make_shared<Producer_Op>(dims), name);
}
template <std::size_t DIM>
inline std::shared_ptr<Node> Producer(DimSize_t const (&dims)[DIM], const char *name = nullptr) {
inline std::shared_ptr<Node> Producer(DimSize_t const (&dims)[DIM], const std::string& name = "") {
return Producer(to_array(dims), name);
}
inline std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, const char *name = nullptr) {
inline std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Producer_Op>(tensor), name);
}
template <std::array<DimSize_t, 1>::size_type DIM>
void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, const std::array<DimSize_t, DIM>& dims, const char* extension) {
void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, const std::array<DimSize_t, DIM>& dims, const std::string& extension) {
assert(inputIdx != gk_IODefaultIndex);
static_assert(DIM<=MaxDim,"Too many tensor dimensions required by addProducer, not supported");
const char* prodName = otherNode->name().empty() ? nullptr : (otherNode->name() + std::string("_") + std::string(extension)).c_str();
const std::string prodName = (otherNode->name().empty()) ? "" : (otherNode->name() + std::string("_") + extension);
auto prod = Producer(dims, prodName);
prod->addChild(otherNode, 0, inputIdx);
otherNode->getOperator()->associateInput(inputIdx, prod->getOperator()->getRawOutput(0));
}
template <std::size_t DIM>
void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const char* extension) {
void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const std::string& extension) {
addProducer(otherNode, inputIdx, to_array(dims), extension);
}
} // namespace Aidge
......
......@@ -106,7 +106,7 @@ public:
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
};
inline std::shared_ptr<Node> ReLU(const char* name = nullptr) {
inline std::shared_ptr<Node> ReLU(const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
return std::make_shared<Node>(std::make_shared<ReLU_Op>(), name);
}
......
......@@ -106,7 +106,7 @@ public:
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
};
inline std::shared_ptr<Node> Softmax(const char* name = nullptr) {
inline std::shared_ptr<Node> Softmax(const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
return std::make_shared<Node>(std::make_shared<Softmax_Op>(), name);
}
......
......@@ -23,7 +23,7 @@ namespace Aidge {
template <std::size_t NUM> void declare_Add(py::module &m) {
py::class_<Add_Op<NUM>, std::shared_ptr<Add_Op<NUM>>, Operator>(m, "Add_Op", py::multiple_inheritance());
m.def("Add", &Add<NUM>, py::arg("name") = nullptr);
m.def("Add", &Add<NUM>, py::arg("name") = "");
}
void init_Add(py::module &m) {
......
......@@ -37,10 +37,10 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
py::arg("stride_dims"),
py::arg("padding_dims"));
m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](std::vector<DimSize_t>& kernel_dims,
const char* name,
std::vector<DimSize_t> &stride_dims,
std::vector<DimSize_t> &padding_dims) {
m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &padding_dims) {
// Lambda function wrapper because PyBind fails to convert const array.
// So we use a vector that we convert in this function to a const DimeSize_t [DIM] array.
if (kernel_dims.size() != DIM) {
......@@ -69,7 +69,7 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array;
return AvgPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array));
}, py::arg("kernel_dims"),
py::arg("name") = nullptr,
py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0));
......
......@@ -24,7 +24,7 @@ template <DimSize_t DIM>
void declare_BatchNormOp(py::module& m) {
py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Operator, PyAbstractParametrizable>(m, ("BatchNorm_Op" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance());
m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = nullptr);
m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = "");
}
void init_BatchNorm(py::module &m) {
......
......@@ -44,11 +44,11 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels,
DimSize_t out_channels,
std::vector<DimSize_t>& kernel_dims,
const char* name,
std::vector<DimSize_t> &stride_dims,
std::vector<DimSize_t> &padding_dims,
std::vector<DimSize_t> &dilation_dims) {
const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &padding_dims,
const std::vector<DimSize_t> &dilation_dims) {
// Lambda function wrapper because PyBind fails to convert const array.
// So we use a vector that we convert in this function to a const DimeSize_t [DIM] array.
if (kernel_dims.size() != DIM) {
......@@ -87,7 +87,7 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
}, py::arg("in_channels"),
py::arg("out_channels"),
py::arg("kernel_dims"),
py::arg("name") = nullptr,
py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0),
py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
......
......@@ -39,11 +39,11 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
py::arg("padding_dims"),
py::arg("dilation_dims"));
m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](std::vector<DimSize_t>& kernel_dims,
const char* name,
std::vector<DimSize_t> &stride_dims,
std::vector<DimSize_t> &padding_dims,
std::vector<DimSize_t> &dilation_dims) {
m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &padding_dims,
const std::vector<DimSize_t> &dilation_dims) {
// Lambda function wrapper because PyBind fails to convert const array.
// So we use a vector that we convert in this function to a const DimeSize_t [DIM] array.
if (kernel_dims.size() != DIM) {
......@@ -80,7 +80,7 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
const DimSize_t (&dilation_dims_array)[DIM] = tmp_dilation_dims_array;
return ConvDepthWise<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array), to_array(dilation_dims_array));
}, py::arg("kernel_dims"),
py::arg("name") = nullptr,
py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0),
py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment