Skip to content
Snippets Groups Projects
Commit 12241358 authored by Cyril Moineau's avatar Cyril Moineau Committed by Maxence Naud
Browse files

Update Conv, ConvDW, PaddedConv & PaddedConvDW to handle noBias parameter.

parent 93500677
No related branches found
No related tags found
2 merge requests!105version 0.2.0,!98Conv no bias
......@@ -27,21 +27,31 @@
#include "aidge/utils/Types.h"
namespace Aidge {
enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims };
enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims, NoBias };
template <DimIdx_t DIM>
class Conv_Op : public OperatorTensor,
public Registrable<Conv_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
public StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t,
DimSize_t, std::array<DimSize_t, DIM>> {
public StaticAttributes<ConvAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
DimSize_t,
DimSize_t,
std::array<DimSize_t, DIM>,
bool> {
public:
static const std::string Type;
Conv_Op() = delete;
using Attributes_ = StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
DimSize_t, DimSize_t, std::array<DimSize_t, DIM>>;
using Attributes_ = StaticAttributes<ConvAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
DimSize_t,
DimSize_t,
std::array<DimSize_t, DIM>,
bool>;
template <ConvAttr e>
using attr = typename Attributes_::template attr<e>;
......@@ -49,13 +59,15 @@ public:
DimSize_t outChannels,
const std::array<DimSize_t, DIM> &kernelDims,
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
bool noBias = false)
: OperatorTensor(Type, 1, 2, 1),
Attributes_(attr<ConvAttr::StrideDims>(strideDims),
attr<ConvAttr::DilationDims>(dilationDims),
attr<ConvAttr::InChannels>(inChannels),
attr<ConvAttr::OutChannels>(outChannels),
attr<ConvAttr::KernelDims>(kernelDims)) {}
attr<ConvAttr::KernelDims>(kernelDims),
attr<ConvAttr::NoBias>(noBias)) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
......@@ -163,15 +175,17 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> co
std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
weightIdxDims[0] = firstEltDims[1];
// Bias
const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
// Result
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
// Bias
if (! this->template getAttr<ConvAttr::NoBias>()){
const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
}
return res;
}
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
......@@ -215,12 +229,14 @@ inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
const std::array<DimSize_t, DIM> &kernelDims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
bool noBias = false) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims), name);
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims, noBias), name);
addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
addProducer(conv, 2, {outChannels}, "b");
addProducer(conv, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
return conv;
}
......@@ -232,9 +248,10 @@ inline std::shared_ptr<Node> Conv(
DimSize_t const (&kernelDims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
bool noBias = false) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
return Conv(inChannels, outChannels, to_array(kernelDims), name, strideDims, dilationDims);
return Conv(inChannels, outChannels, to_array(kernelDims), name, strideDims, dilationDims, noBias);
}
} // namespace Aidge
......@@ -245,7 +262,8 @@ const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
"DilationDims",
"InChannels",
"OutChannels",
"KernelDims"
"KernelDims",
"NoBias"
};
}
......
......@@ -26,7 +26,7 @@
#include "aidge/utils/Types.h"
namespace Aidge {
enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims };
enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims, NoBias };
template <DimIdx_t DIM>
class ConvDepthWise_Op : public OperatorTensor,
......@@ -35,7 +35,8 @@ class ConvDepthWise_Op : public OperatorTensor,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
DimSize_t,
std::array<DimSize_t, DIM>> {
std::array<DimSize_t, DIM>,
bool> {
public:
static const std::string Type;
......@@ -45,19 +46,22 @@ public:
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
DimSize_t,
std::array<DimSize_t, DIM>>;
std::array<DimSize_t, DIM>,
bool>;
template <ConvDepthWiseAttr e>
using attr = typename Attributes_::template attr<e>;
constexpr ConvDepthWise_Op(const DimSize_t nbChannels,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
bool no_bias=false)
: OperatorTensor(Type, 1, 2, 1),
Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
attr<ConvDepthWiseAttr::Channels>(nbChannels),
attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {}
attr<ConvDepthWiseAttr::KernelDims>(kernel_dims),
attr<ConvDepthWiseAttr::NoBias>(no_bias)) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
......@@ -157,15 +161,17 @@ public:
std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
weightIdxDims[0] = firstEltDims[1];
// Bias
const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
// Result
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
// Bias
if (! this->template getAttr<ConvDepthWiseAttr::NoBias>()){
const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
}
return res;
}
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
......@@ -196,12 +202,13 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
const std::array<DimSize_t, DIM> &kernelDims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
bool noBias=false) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims), name);
auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims, noBias), name);
addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
addProducer(convDW, 2, {nbChannels}, "b");
addProducer(convDW, 2, {(noBias ? 0 : nbChannels)}, "b");
return convDW;
}
......@@ -212,16 +219,17 @@ inline std::shared_ptr<Node> ConvDepthWise(
DimSize_t const (&kernelDims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
bool noBias=false) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
return ConvDepthWise(nbChannels, to_array(kernelDims), name, strideDims, dilationDims);
return ConvDepthWise(nbChannels, to_array(kernelDims), name, strideDims, dilationDims, noBias);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims", "Channels",
"KernelDims"};
"KernelDims", "NoBias"};
}
#endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
......@@ -35,11 +35,12 @@ inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
bool no_bias = false)
{
// Construct micro-graph
auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : "");
auto metaOp = MetaOperator("PaddedConv", Sequential({pad, conv}), name);
addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
......@@ -56,9 +57,10 @@ inline std::shared_ptr<Node> PaddedConv(
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
bool no_bias = false)
{
return PaddedConv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
return PaddedConv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
}
template <std::array<DimSize_t, 1>::size_type DIM>
......@@ -67,11 +69,12 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
bool no_bias = false)
{
// Construct micro-graph
auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nb_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nb_channels, kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : "");
auto metaOp = MetaOperator("PaddedConvDepthWise", Sequential({pad, conv}), name);
addProducer(metaOp, 1, append(nb_channels, append(DimSize_t(1), kernel_dims)), "w");
......@@ -87,9 +90,10 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
bool no_bias = false)
{
return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
}
template <std::array<DimSize_t, 1>::size_type DIM>
......
......@@ -23,6 +23,9 @@ template <DimSize_t DIM>
void declare_BatchNormOp(py::module& m) {
const std::string pyClassName("BatchNormOp" + std::to_string(DIM) + "D");
py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, pyClassName.c_str(), py::multiple_inheritance())
.def(py::init<float, float>(),
py::arg("epsilon"),
py::arg("momentum"))
.def("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
.def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
.def("attributes_name", &BatchNorm_Op<DIM>::staticGetAttrsName);
......
......@@ -33,12 +33,14 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
DimSize_t,
const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &>(),
const std::array<DimSize_t, DIM> &,
bool>(),
py::arg("in_channels"),
py::arg("out_channels"),
py::arg("kernel_dims"),
py::arg("stride_dims"),
py::arg("dilation_dims"))
py::arg("dilation_dims"),
py::arg("no_bias"))
.def("get_inputs_name", &Conv_Op<DIM>::getInputsName)
.def("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
.def("attributes_name", &Conv_Op<DIM>::staticGetAttrsName)
......@@ -51,18 +53,20 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &dilation_dims) {
const std::vector<DimSize_t> &dilation_dims,
bool noBias) {
AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
return Conv<DIM>(in_channels, out_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
return Conv<DIM>(in_channels, out_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()), noBias);
}, py::arg("in_channels"),
py::arg("out_channels"),
py::arg("kernel_dims"),
py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("no_bias") = false);
}
......
......@@ -33,11 +33,13 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
.def(py::init<const DimSize_t,
const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &>(),
const std::array<DimSize_t, DIM> &,
bool>(),
py::arg("nb_channels"),
py::arg("kernel_dims"),
py::arg("stride_dims"),
py::arg("dilation_dims"))
py::arg("dilation_dims"),
py::arg("no_bias"))
.def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
.def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
.def("attributes_name", &ConvDepthWise_Op<DIM>::staticGetAttrsName);
......@@ -46,17 +48,19 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &dilation_dims) {
const std::vector<DimSize_t> &dilation_dims,
bool no_bias) {
AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
return ConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
return ConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
}, py::arg("nb_channenls"),
py::arg("kernel_dims"),
py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("no_bias")= false);
}
......
......@@ -30,21 +30,23 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
const std::string& name,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &padding_dims,
const std::vector<DimSize_t> &dilation_dims)
const std::vector<DimSize_t> &dilation_dims,
bool no_bias)
{
AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
return PaddedConv<DIM>(in_channels, out_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
return PaddedConv<DIM>(in_channels, out_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
}, py::arg("in_channels"),
py::arg("out_channels"),
py::arg("kernel_dims"),
py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("no_bias")= false);
}
template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
......@@ -53,20 +55,22 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
const std::string& name,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &padding_dims,
const std::vector<DimSize_t> &dilation_dims)
const std::vector<DimSize_t> &dilation_dims,
bool no_bias)
{
AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
return PaddedConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
return PaddedConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
}, py::arg("nb_channels"),
py::arg("kernel_dims"),
py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("no_bias") = false);
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment