Skip to content
Snippets Groups Projects
Commit 3d9fb355 authored by Grégoire Kubler's avatar Grégoire Kubler Committed by Maxence Naud
Browse files

feat : [ADD] PaddedConvTranspose operator

parent 6793dc5f
No related branches found
No related tags found
No related merge requests found
This commit is part of merge request !319. Comments created here will be created in the context of that merge request.
...@@ -211,6 +211,48 @@ PaddedAvgPooling(DimSize_t const (&kernel_dims)[DIM], ...@@ -211,6 +211,48 @@ PaddedAvgPooling(DimSize_t const (&kernel_dims)[DIM],
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
template <std::array<DimSize_t, 1>::size_type DIM>
extern std::shared_ptr<Node> PaddedConvTranspose(
const DimSize_t &inChannels,
const DimSize_t &outChannels,
const std::array<DimSize_t, DIM> &kernelDims,
const std::array<DimSize_t, DIM> &strideDims =
create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims =
create_array<DimSize_t,DIM>(1),
const bool noBias = false,
const std::array<DimSize_t, 2*DIM> &paddingDims =
create_array<DimSize_t,2*DIM>(0),
const std::string& name = "");
template <std::array<DimSize_t, 1>::size_type DIM>
extern std::shared_ptr<Node> PaddedConvTranspose(const DimSize_t &inChannels,
const DimSize_t &outChannels,
DimSize_t const (&kernel_dims)[DIM],
const std::array<DimSize_t, DIM> &strideDims =
create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims =
create_array<DimSize_t,DIM>(1),
const bool noBias = false,
const std::array<DimSize_t, 2*DIM> &paddingDims =
create_array<DimSize_t,2*DIM>(0),
const std::string& name = "");
template <std::array<DimSize_t, 1>::size_type DIM>
extern std::shared_ptr<MetaOperator_Op> PaddedConvTranspose_Op(const DimSize_t &inChannels,
const DimSize_t &outChannels,
const std::array<DimSize_t, DIM> &kernelDims,
const std::array<DimSize_t, DIM> &strideDims =
create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims =
create_array<DimSize_t,DIM>(1),
const bool noBias = false,
const std::array<DimSize_t, 2*DIM> &paddingDims =
create_array<DimSize_t,2*DIM>(0),
const std::string& name = "");
////////////////////////////////////////////////////////////////////////////////
/** /**
* @brief Creates a padded max pooling operation. * @brief Creates a padded max pooling operation.
* *
......
...@@ -14,9 +14,7 @@ ...@@ -14,9 +14,7 @@
#include <string> #include <string>
#include <vector> #include <vector>
#include <array>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/MetaOperatorDefs.hpp" #include "aidge/operator/MetaOperatorDefs.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
...@@ -193,6 +191,71 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) { ...@@ -193,6 +191,71 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
)mydelimiter"); )mydelimiter");
} }
template <DimIdx_t DIM> void declare_PaddedConvTransposeOp(py::module &m) {
m.def(("PaddedConvTranspose" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t &in_channels,
const DimSize_t &out_channels,
const std::vector<DimSize_t>& kernel_dims,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &dilation_dims,
const bool no_bias,
const std::vector<DimSize_t> &padding_dims,
const std::string& name)
{
AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
return PaddedConvTranspose<DIM>(in_channels,
out_channels,
to_array<DIM>(kernel_dims.begin()),
to_array<DIM>(stride_dims.begin()),
to_array<DIM>(dilation_dims.begin()),
no_bias,
to_array<2*DIM>(padding_dims.begin()),
name);
}, py::arg("in_channels"),
py::arg("out_channels"),
py::arg("kernel_dims"),
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("no_bias")= false,
py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
py::arg("name") = "");
m.def(("PaddedConvTranspose" + std::to_string(DIM) + "DOp").c_str(), [](
const DimSize_t &inChannels,
const DimSize_t &outChannels,
const std::vector<DimSize_t>& kernel_dims,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &dilation_dims,
const bool no_bias,
const std::vector<DimSize_t> &padding_dims,
const std::string &name)
{
AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
return PaddedConvTranspose_Op<DIM>(inChannels,
outChannels,
to_array<DIM>(kernel_dims.begin()),
to_array<DIM>(stride_dims.begin()),
to_array<DIM>(dilation_dims.begin()),
no_bias,
to_array<2*DIM>(padding_dims.begin()),
name);
}, py::arg("in_channels"),
py::arg("out_channels"),
py::arg("kernel_dims"),
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("no_bias") = false,
py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
py::arg("name") = "");
}
template <DimIdx_t DIM> void declare_PaddedAvgPoolingOp(py::module &m) { template <DimIdx_t DIM> void declare_PaddedAvgPoolingOp(py::module &m) {
m.def(("PaddedAvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, m.def(("PaddedAvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name, const std::string& name,
...@@ -446,6 +509,8 @@ void init_MetaOperatorDefs(py::module &m) { ...@@ -446,6 +509,8 @@ void init_MetaOperatorDefs(py::module &m) {
declare_PaddedConvDepthWiseOp<1>(m); declare_PaddedConvDepthWiseOp<1>(m);
declare_PaddedConvDepthWiseOp<2>(m); declare_PaddedConvDepthWiseOp<2>(m);
// declare_PaddedConvDepthWiseOp<3>(m); // declare_PaddedConvDepthWiseOp<3>(m);
declare_PaddedConvTransposeOp<1>(m);
declare_PaddedConvTransposeOp<2>(m);
// declare_PaddedAvgPoolingOp<1>(m); // declare_PaddedAvgPoolingOp<1>(m);
declare_PaddedAvgPoolingOp<2>(m); declare_PaddedAvgPoolingOp<2>(m);
// declare_PaddedAvgPoolingOp<3>(m); // declare_PaddedAvgPoolingOp<3>(m);
......
/********************************************************************************
* Copyright (c) 2024 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/operator/MetaOperatorDefs.hpp"
#include <array>
#include <memory>
#include "aidge/graph/Node.hpp"
#include "aidge/graph/OpArgs.hpp"
#include "aidge/operator/ConvTranspose.hpp"
#include "aidge/operator/MetaOperator.hpp"
#include "aidge/operator/Pad.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
//////////////////////////////////
// Node functions
//////////////////////////////////
template <std::array<DimIdx_t, 1>::size_type DIM>
std::shared_ptr<Node>
PaddedConvTranspose(const DimSize_t &inChannels,
const DimSize_t &outChannels,
const std::array<DimSize_t, DIM> &kernelDims,
const std::array<DimSize_t, DIM> &strideDims,
const std::array<DimSize_t, DIM> &dilationDims,
const bool noBias,
const std::array<DimSize_t, 2 * DIM> &paddingDims,
const std::string &name) {
auto graph = Sequential(
{Pad<DIM>(paddingDims, (!name.empty()) ? name + "_pad" : ""),
ConvTranspose(inChannels,
outChannels,
kernelDims,
strideDims,
dilationDims,
noBias,
!name.empty() ? name + "_convTranspose" : "")});
auto metaOpNode = MetaOperator(
("PaddedConvTranspose" + std::to_string(DIM) + "D").c_str(),
graph,
{},
name);
addProducer(metaOpNode,
1,
append(outChannels, append(inChannels, kernelDims)),
"w");
if (!noBias) {
addProducer(metaOpNode, 2, {outChannels}, "b");
}
return metaOpNode;
}
template std::shared_ptr<Node>
PaddedConvTranspose<1>(const DimSize_t &,
const DimSize_t &,
const std::array<DimSize_t, 1> &,
const std::array<DimSize_t, 1> &,
const std::array<DimSize_t, 1> &,
const bool,
const std::array<DimSize_t, 2> &,
const std::string &);
template std::shared_ptr<Node>
PaddedConvTranspose<2>(const DimSize_t &,
const DimSize_t &,
const std::array<DimSize_t, 2> &,
const std::array<DimSize_t, 2> &,
const std::array<DimSize_t, 2> &,
const bool,
const std::array<DimSize_t, 4> &,
const std::string &);
template <std::array<DimIdx_t, 1>::size_type DIM>
extern std::shared_ptr<Node>
PaddedConvTranspose(const DimSize_t &inChannels,
const DimSize_t &outChannels,
DimSize_t const (&kernelDims)[DIM],
const std::array<DimSize_t, DIM> &strideDims,
const std::array<DimSize_t, DIM> &dilationDims,
const bool noBias,
const std::array<DimSize_t, 2 * DIM> &paddingDims,
const std::string &name) {
return PaddedConvTranspose<DIM>(inChannels,
outChannels,
to_array(kernelDims),
strideDims,
dilationDims,
noBias,
paddingDims,
name);
}
template std::shared_ptr<Node>
PaddedConvTranspose<1>(const DimSize_t &,
const DimSize_t &,
DimSize_t const (&)[1],
const std::array<DimSize_t, 1> &,
const std::array<DimSize_t, 1> &,
const bool,
const std::array<DimSize_t, 2> &,
const std::string &);
template std::shared_ptr<Node>
PaddedConvTranspose<2>(const DimSize_t &,
const DimSize_t &,
const DimSize_t (&)[2],
const std::array<DimSize_t, 2> &,
const std::array<DimSize_t, 2> &,
const bool,
const std::array<DimSize_t, 4> &,
const std::string &);
//////////////////////////////////
// Operator functions
//////////////////////////////////
template <std::array<DimIdx_t, 1>::size_type DIM>
std::shared_ptr<MetaOperator_Op>
PaddedConvTranspose_Op(const DimSize_t &inChannels,
const DimSize_t &outChannels,
const std::array<DimSize_t, DIM> &kernelDims,
const std::array<DimSize_t, DIM> &strideDims,
const std::array<DimSize_t, DIM> &dilationDims,
const bool noBias,
const std::array<DimSize_t, 2 * DIM> &paddingDims,
const std::string &name) {
auto pad = Pad<DIM>(paddingDims,
!name.empty() ? name + "_pad" : "pad",
PadBorderType::Constant,
0.0);
auto convTranspose = ConvTranspose(
inChannels,
outChannels,
kernelDims,
strideDims,
dilationDims,
noBias,
!name.empty() ? name + "_convtranspose" : "convTranspose");
return std::make_shared<MetaOperator_Op>(
("PaddedConvTranspose" + std::to_string(DIM) + "D").c_str(),
Sequential({pad, convTranspose}));
}
template std::shared_ptr<MetaOperator_Op>
PaddedConvTranspose_Op<1>(const DimSize_t &,
const DimSize_t &,
const std::array<DimSize_t, 1> &,
const std::array<DimSize_t, 1> &,
const std::array<DimSize_t, 1> &,
const bool,
const std::array<DimSize_t, 2> &,
const std::string &);
template std::shared_ptr<MetaOperator_Op>
PaddedConvTranspose_Op<2>(const DimSize_t &,
const DimSize_t &,
const std::array<DimSize_t, 2> &,
const std::array<DimSize_t, 2> &,
const std::array<DimSize_t, 2> &,
const bool,
const std::array<DimSize_t, 4> &,
const std::string &);
} // namespace Aidge
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment