Skip to content
Snippets Groups Projects
Commit 6cf3bdc3 authored by Maxence Naud's avatar Maxence Naud
Browse files

[Fix] set FC/ConvDepthWise parameter sizes at construction by adding the...

[Fix] set FC/ConvDepthWise parameter sizes at construction by adding the number of input channels in construction parameters
parent ca67f0bc
No related branches found
No related tags found
No related merge requests found
...@@ -49,13 +49,14 @@ public: ...@@ -49,13 +49,14 @@ public:
template <ConvDepthWiseAttr e> template <ConvDepthWiseAttr e>
using attr = typename Attributes_::template attr<e>; using attr = typename Attributes_::template attr<e>;
constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims, constexpr ConvDepthWise_Op(const DimSize_t nbChannels,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
: OperatorTensor(Type, 1, 2, 1), : OperatorTensor(Type, 1, 2, 1),
Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims), Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
attr<ConvDepthWiseAttr::DilationDims>(dilation_dims), attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
attr<ConvDepthWiseAttr::Channels>(0), attr<ConvDepthWiseAttr::Channels>(nbChannels),
attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {} attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {}
/** /**
...@@ -101,7 +102,6 @@ public: ...@@ -101,7 +102,6 @@ public:
floor(static_cast<float>(inputDims[dim+2] - kernelExtent) / floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim]))); static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
} }
this->template getAttr<ConvDepthWiseAttr::Channels>() = inputDims[1];
// std::array<DimSize_t, DIM+2> weightDims = append(mInputs[0]->dims()[1],append(1, this->template getAttr<ConvDepthWiseAttr::KernelDims>())); // std::array<DimSize_t, DIM+2> weightDims = append(mInputs[0]->dims()[1],append(1, this->template getAttr<ConvDepthWiseAttr::KernelDims>()));
// if (mInputs[1]->empty()) { // if (mInputs[1]->empty()) {
// mInputs[1]->resize(weightDims); // mInputs[1]->resize(weightDims);
...@@ -177,7 +177,7 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels, ...@@ -177,7 +177,7 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
// FIXME: properly handle default w&b initialization in every cases // FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported"); static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims), name); auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims), name);
addProducer(convDW, 1, append(nbChannels, append(1, kernelDims)), "w"); addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
addProducer(convDW, 2, std::array<DimSize_t, 1>({nbChannels}), "b"); addProducer(convDW, 2, std::array<DimSize_t, 1>({nbChannels}), "b");
return convDW; return convDW;
} }
......
...@@ -56,7 +56,8 @@ inline std::shared_ptr<Node> PaddedConv( ...@@ -56,7 +56,8 @@ inline std::shared_ptr<Node> PaddedConv(
} }
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> PaddedConvDepthWise(const std::array<DimSize_t, DIM> &kernel_dims, inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "", const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
...@@ -64,7 +65,7 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const std::array<DimSize_t, DIM ...@@ -64,7 +65,7 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const std::array<DimSize_t, DIM
{ {
// Construct micro-graph // Construct micro-graph
auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0); auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : ""); auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nb_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
// Need to specify the ordered list of input operators // Need to specify the ordered list of input operators
const std::vector<NodePtr> orderedInputNodes = {pad, conv}; const std::vector<NodePtr> orderedInputNodes = {pad, conv};
...@@ -77,13 +78,14 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const std::array<DimSize_t, DIM ...@@ -77,13 +78,14 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const std::array<DimSize_t, DIM
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM> template <DimSize_t DIM>
inline std::shared_ptr<Node> PaddedConvDepthWise( inline std::shared_ptr<Node> PaddedConvDepthWise(
const DimSize_t nb_channels,
DimSize_t const (&kernel_dims)[DIM], DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "", const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
{ {
return PaddedConvDepthWise(to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims); return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
} }
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/ConvDepthWise.hpp" #include "aidge/operator/ConvDepthWise.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
...@@ -26,19 +26,22 @@ namespace py = pybind11; ...@@ -26,19 +26,22 @@ namespace py = pybind11;
namespace Aidge { namespace Aidge {
template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) { template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Operator, Attributes>( py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, OperatorTensor, Attributes>(
m, ("ConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(), m, ("ConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(),
py::multiple_inheritance()) py::multiple_inheritance())
.def(py::init<const std::array<DimSize_t, DIM> &, .def(py::init<const DimSize_t,
const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &, const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &>(), const std::array<DimSize_t, DIM> &>(),
py::arg("nb_channels"),
py::arg("kernel_dims"), py::arg("kernel_dims"),
py::arg("stride_dims"), py::arg("stride_dims"),
py::arg("dilation_dims")) py::arg("dilation_dims"))
.def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName) .def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
.def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName); .def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName);
m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels,
const std::vector<DimSize_t>& kernel_dims,
const std::string& name, const std::string& name,
const std::vector<DimSize_t> &stride_dims, const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &dilation_dims) { const std::vector<DimSize_t> &dilation_dims) {
...@@ -46,8 +49,9 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) { ...@@ -46,8 +49,9 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM); AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [%ld] does not match DIM [%d]", dilation_dims.size(), DIM); AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [%ld] does not match DIM [%d]", dilation_dims.size(), DIM);
return ConvDepthWise<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin())); return ConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
}, py::arg("kernel_dims"), }, py::arg("nb_channenls"),
py::arg("kernel_dims"),
py::arg("name") = "", py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1)); py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
......
...@@ -13,18 +13,18 @@ ...@@ -13,18 +13,18 @@
#include "aidge/operator/FC.hpp" #include "aidge/operator/FC.hpp"
#include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
namespace py = pybind11; namespace py = pybind11;
namespace Aidge { namespace Aidge {
void declare_FC(py::module &m) { void declare_FC(py::module &m) {
py::class_<FC_Op, std::shared_ptr<FC_Op>, Operator, Attributes>(m, "FCOp", py::multiple_inheritance()) py::class_<FC_Op, std::shared_ptr<FC_Op>, OperatorTensor, Attributes>(m, "FCOp", py::multiple_inheritance())
.def("get_inputs_name", &FC_Op::getInputsName) .def("get_inputs_name", &FC_Op::getInputsName)
.def("get_outputs_name", &FC_Op::getOutputsName); .def("get_outputs_name", &FC_Op::getOutputsName);
m.def("FC", &FC, py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = ""); m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
} }
void init_FC(py::module &m) { void init_FC(py::module &m) {
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/MetaOperatorDefs.hpp" #include "aidge/operator/MetaOperatorDefs.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
namespace py = pybind11; namespace py = pybind11;
...@@ -49,7 +48,8 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) { ...@@ -49,7 +48,8 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
} }
template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) { template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
m.def(("PaddedConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, m.def(("PaddedConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels,
const std::vector<DimSize_t>& kernel_dims,
const std::string& name, const std::string& name,
const std::vector<DimSize_t> &stride_dims, const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &padding_dims, const std::vector<DimSize_t> &padding_dims,
...@@ -60,8 +60,9 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) { ...@@ -60,8 +60,9 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [%ld] does not match DIM [%d]", padding_dims.size(), 2*DIM); AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [%ld] does not match DIM [%d]", padding_dims.size(), 2*DIM);
AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [%ld] does not match DIM [%d]", dilation_dims.size(), DIM); AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [%ld] does not match DIM [%d]", dilation_dims.size(), DIM);
return PaddedConvDepthWise<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin())); return PaddedConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
}, py::arg("kernel_dims"), }, py::arg("nb_channels"),
py::arg("kernel_dims"),
py::arg("name") = "", py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0), py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment