Skip to content
Snippets Groups Projects
Commit 661f01fa authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Removed padding from conv and pool and added Pad operator

parent 46767f41
No related branches found
No related tags found
No related merge requests found
......@@ -26,15 +26,14 @@
#include "aidge/utils/Types.h"
namespace Aidge {
enum class AvgPoolingParam { StrideDims, KernelDims, PaddingDims };
enum class AvgPoolingParam { StrideDims, KernelDims };
template <DimIdx_t DIM>
class AvgPooling_Op : public Operator,
public Registrable<AvgPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
public Parameterizable<AvgPoolingParam,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, (DIM<<1) >> {
std::array<DimSize_t, DIM>> {
private:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
......@@ -47,19 +46,15 @@ public:
using Parameterizable_ = Parameterizable<AvgPoolingParam,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, (DIM<<1)> >;
std::array<DimSize_t, DIM>>;
template <AvgPoolingParam e>
using param = typename Parameterizable_::template param<e>;
constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0))
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
: Operator(Type),
Parameterizable_(param<AvgPoolingParam::StrideDims>(stride_dims),
param<AvgPoolingParam::KernelDims>(kernel_dims),
param<AvgPoolingParam::PaddingDims>(padding_dims)),
mOutput(std::make_shared<Tensor>()) {
param<AvgPoolingParam::KernelDims>(kernel_dims)) {
setDatatype(DataType::Float32);
}
......@@ -78,9 +73,7 @@ public:
for (std::size_t dim = 0; dim < this->template get<AvgPoolingParam::KernelDims>().size() ; ++dim) {
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
std::floor(static_cast<float>(mInput->dims()[dim+2] -
this->template get<AvgPoolingParam::KernelDims>()[dim] +
this->template get<AvgPoolingParam::PaddingDims>()[dim] +
this->template get<AvgPoolingParam::PaddingDims>()[dim+DIM]) /
this->template get<AvgPoolingParam::KernelDims>()[dim]) /
static_cast<float>(this->template get<AvgPoolingParam::StrideDims>()[dim])));
}
outputDims[1] = mInput->dims()[1];
......@@ -147,11 +140,10 @@ public:
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
auto avgPool = std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, padding_dims), name);
auto avgPool = std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims), name);
return avgPool;
}
......@@ -159,17 +151,16 @@ template <DimSize_t DIM>
inline std::shared_ptr<Node> AvgPooling(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
return AvgPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
return AvgPooling(to_array(kernel_dims), name, stride_dims);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::AvgPoolingParam>::data[] = {"StrideDims",
"KernelDims", "PaddingDims"};
"KernelDims"};
}
#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
......@@ -26,13 +26,13 @@
#include "aidge/utils/Types.h"
namespace Aidge {
enum class ConvParam { StrideDims, DilationDims, InChannels, OutChannels, KernelDims, PaddingDims };
enum class ConvParam { StrideDims, DilationDims, InChannels, OutChannels, KernelDims };
template <DimIdx_t DIM>
class Conv_Op : public Operator,
public Registrable<Conv_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
public Parameterizable<ConvParam, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t,
DimSize_t, std::array<DimSize_t, DIM>, std::array<DimSize_t, (DIM<<1) >> {
DimSize_t, std::array<DimSize_t, DIM>> {
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
......@@ -45,7 +45,7 @@ public:
Conv_Op() = delete;
using Parameterizable_ = Parameterizable<ConvParam, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
DimSize_t, DimSize_t, std::array<DimSize_t, DIM>, std::array<DimSize_t, (DIM<<1) >>;
DimSize_t, DimSize_t, std::array<DimSize_t, DIM>>;
template <ConvParam e>
using param = typename Parameterizable_::template param<e>;
......@@ -53,16 +53,13 @@ public:
DimSize_t out_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
: Operator(Type),
Parameterizable_(param<ConvParam::StrideDims>(stride_dims),
param<ConvParam::DilationDims>(dilation_dims),
param<ConvParam::InChannels>(in_channels),
param<ConvParam::OutChannels>(out_channels),
param<ConvParam::KernelDims>(kernel_dims),
param<ConvParam::PaddingDims>(padding_dims)),
mOutput(std::make_shared<Tensor>()) {
param<ConvParam::KernelDims>(kernel_dims)) {
setDatatype(DataType::Float32);
}
......@@ -96,9 +93,7 @@ public:
1;
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent +
this->template get<ConvParam::PaddingDims>()[dim] +
this->template get<ConvParam::PaddingDims>()[dim+DIM]) /
floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent) /
static_cast<float>(this->template get<ConvParam::StrideDims>()[dim])));
}
......@@ -168,11 +163,10 @@ inline std::shared_ptr<Node> Conv(DimSize_t in_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, padding_dims, dilation_dims), name);
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims), name);
// addProducer(conv, 1, append(append(kernel_dims, in_channels), out_channels), "w");
addProducer(conv, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
addProducer(conv, 2, {out_channels}, "b");
......@@ -186,17 +180,16 @@ inline std::shared_ptr<Node> Conv(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
return Conv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
return Conv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, dilation_dims);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::ConvParam>::data[] = {"StrideDims", "DilationDims", "InChannels", "OutChannels",
"KernelDims", "PaddingDims"};
"KernelDims"};
}
#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
......@@ -26,7 +26,7 @@
#include "aidge/utils/Types.h"
namespace Aidge {
enum class ConvDepthWiseParam { StrideDims, DilationDims, Channels, KernelDims, PaddingDims };
enum class ConvDepthWiseParam { StrideDims, DilationDims, Channels, KernelDims };
template <DimIdx_t DIM>
class ConvDepthWise_Op : public Operator,
......@@ -35,8 +35,7 @@ class ConvDepthWise_Op : public Operator,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
DimSize_t,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, (DIM<<1) >> {
std::array<DimSize_t, DIM>> {
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
......@@ -52,22 +51,18 @@ class ConvDepthWise_Op : public Operator,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
DimSize_t,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, (DIM<<1) >>;
std::array<DimSize_t, DIM>>;
template <ConvDepthWiseParam e>
using param = typename Parameterizable_::template param<e>;
constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
: Operator(Type),
Parameterizable_(param<ConvDepthWiseParam::StrideDims>(stride_dims),
param<ConvDepthWiseParam::DilationDims>(dilation_dims),
param<ConvDepthWiseParam::Channels>(0),
param<ConvDepthWiseParam::KernelDims>(kernel_dims),
param<ConvDepthWiseParam::PaddingDims>(padding_dims)),
mOutput(std::make_shared<Tensor>()) {
param<ConvDepthWiseParam::KernelDims>(kernel_dims)) {
setDatatype(DataType::Float32);
}
......@@ -88,9 +83,7 @@ class ConvDepthWise_Op : public Operator,
1;
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent +
this->template get<ConvDepthWiseParam::PaddingDims>()[dim] +
this->template get<ConvDepthWiseParam::PaddingDims>()[dim+DIM]) /
floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent) /
static_cast<float>(this->template get<ConvDepthWiseParam::StrideDims>()[dim])));
}
this->template get<ConvDepthWiseParam::Channels>() = mInputs[0]->dims()[1];
......@@ -167,11 +160,10 @@ template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> ConvDepthWise(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, padding_dims, dilation_dims), name);
auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), name);
addProducer(convDW, 1, std::array<DimSize_t,0>({}), "w");
addProducer(convDW, 2, std::array<DimSize_t,0>({}), "b");
return convDW;
......@@ -182,17 +174,16 @@ inline std::shared_ptr<Node> ConvDepthWise(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
return ConvDepthWise(to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
return ConvDepthWise(to_array(kernel_dims), name, stride_dims, dilation_dims);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::ConvDepthWiseParam>::data[] = {"StrideDims", "DilationDims", "Channels",
"KernelDims", "PaddingDims"};
"KernelDims"};
}
#endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
......@@ -26,15 +26,14 @@
#include "aidge/utils/Types.h"
namespace Aidge {
enum class MaxPoolingParam { StrideDims, KernelDims, PaddingDims };
enum class MaxPoolingParam { StrideDims, KernelDims };
template <DimIdx_t DIM>
class MaxPooling_Op : public Operator,
public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
public Parameterizable<MaxPoolingParam,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, (DIM<<1) >> {
std::array<DimSize_t, DIM>> {
private:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
......@@ -47,18 +46,15 @@ public:
using Parameterizable_ = Parameterizable<MaxPoolingParam,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, (DIM<<1)> >;
std::array<DimSize_t, DIM>>;
template <MaxPoolingParam e>
using param = typename Parameterizable_::template param<e>;
constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0))
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
: Operator(Type),
Parameterizable_(param<MaxPoolingParam::StrideDims>(stride_dims),
param<MaxPoolingParam::KernelDims>(kernel_dims),
param<MaxPoolingParam::PaddingDims>(padding_dims)),
param<MaxPoolingParam::KernelDims>(kernel_dims)),
mOutput(std::make_shared<Tensor>()) {
setDatatype(DataType::Float32);
}
......@@ -78,9 +74,7 @@ public:
for (std::size_t dim = 0; dim < this->template get<MaxPoolingParam::KernelDims>().size() ; ++dim) {
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
std::floor(static_cast<float>(mInput->dims()[dim+2] -
this->template get<MaxPoolingParam::KernelDims>()[dim] +
this->template get<MaxPoolingParam::PaddingDims>()[dim] +
this->template get<MaxPoolingParam::PaddingDims>()[dim+DIM]) /
this->template get<MaxPoolingParam::KernelDims>()[dim]) /
static_cast<float>(this->template get<MaxPoolingParam::StrideDims>()[dim])));
}
outputDims[1] = mInput->dims()[1];
......@@ -147,11 +141,10 @@ public:
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
auto avgPool = std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, padding_dims), name);
auto avgPool = std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims), name);
return avgPool;
}
......@@ -159,16 +152,15 @@ template <DimSize_t DIM>
inline std::shared_ptr<Node> MaxPooling(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
return MaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
return MaxPooling(to_array(kernel_dims), name, stride_dims);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::MaxPoolingParam>::data[] = {"StrideDims", "KernelDims", "PaddingDims"};
const char *const EnumStrings<Aidge::MaxPoolingParam>::data[] = {"StrideDims", "KernelDims"};
}
#endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_PAD_H_
#define AIDGE_CORE_OPERATOR_PAD_H_
#include <array>
#include <numeric>
#include <vector>
#include <cmath>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/Parameter.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
enum class PadParam { BeginEndTuples, Type, Value };
enum class PadParamType { Constant, Replicate, Reflect, Wrap };
template <DimIdx_t DIM>
class Pad_Op : public Operator,
public Registrable<Pad_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
public Parameterizable<PadParam,
std::array<std::array<DimSize_t, 2>, DIM>,
PadParamType,
double> {
private:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char *Type = "Pad";
Pad_Op() = delete;
using Parameterizable_ = Parameterizable<PadParam,
std::array<std::array<DimSize_t, 2>, DIM>,
PadParamType>;
template <PadParam e>
using param = typename Parameterizable_::template param<e>;
constexpr Pad_Op(const std::array<std::array<DimSize_t, 2>, DIM> &beginEndTuples,
const PadParamType &type = PadParamType::Constant,
double value = 0.0)
: Operator(Type),
Parameterizable_(param<PadParam::BeginEndTuples>(beginEndTuples),
param<PadParam::Type>(type),
param<PadParam::Value>(value)) {
setDatatype(DataType::Float32);
}
/**
* @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Pad_Op(const Pad_Op& op)
: Operator(Type),
Parameterizable_(op),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Pad_Op
* @param op Operator to copy.
*/
Operator* clone() const override {
return new Pad_Op<DIM>(*static_cast<const Pad_Op<DIM>*>(this));
}
constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 1 && "operators supports only 3 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
constexpr void computeOutputDims() override final {
if (!mInput->empty()) {
std::array<DimSize_t, DIM + 2> outputDims = {};
for (std::size_t dim = 0; dim < DIM; ++dim) {
outputDims[dim+2] = this->template get<PadParam::BeginEndTuples>()[dim][0]
+ this->template get<PadParam::BeginEndTuples>()[dim][1];
}
outputDims[1] = mInput->dims()[1];
outputDims[0] = mInput->dims()[0];
mOutput->resize(outputDims);
}
}
bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return *(mInput.get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "Pad Operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return mInput;
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "Pad Operators has only 1 outputs");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInput);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string &name) {
mImpl = Registrar<Pad_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType &datatype) {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
mInput->setDatatype(datatype);
}
inline IOIndex_t nbInputs() const noexcept override final { return 1; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Pad(const std::array<std::array<DimSize_t, 2>, DIM> &beginEndTuples,
const std::string& name = "",
const PadParamType &type = PadParamType::Constant,
double value = 0.0)
{
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, type, value), name);
return pad;
}
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Pad(const std::array<DimSize_t, DIM> &dimBeginEnd,
const std::string& name = "",
const PadParamType &type = PadParamType::Constant,
double value = 0.0)
{
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
std::array<std::array<DimSize_t, 2>, DIM> beginEndTuples;
for (size_t i = 0; i < DIM; ++i) {
beginEndTuples[i] = {dimBeginEnd[i], dimBeginEnd[i]};
}
auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, type, value), name);
return pad;
}
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> ZeroPad(const std::array<std::array<DimSize_t, 2>, DIM> &beginEndTuples,
const std::string& name = "")
{
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, PadParamType::Constant, 0.0), name);
return pad;
}
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> ZeroPad(const std::array<DimSize_t, DIM> &dimBeginEnd,
const std::string& name = "")
{
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
std::array<std::array<DimSize_t, 2>, DIM> beginEndTuples;
for (size_t i = 0; i < DIM; ++i) {
beginEndTuples[i] = {dimBeginEnd[i], dimBeginEnd[i]};
}
auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, PadParamType::Constant, 0.0), name);
return pad;
}
template <DimSize_t DIM>
inline std::shared_ptr<Node> Pad(
std::array<DimSize_t, 2> const (&beginEndTuples)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &type = PadParamType::Constant,
double value = 0.0)
{
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
return Pad(to_array(beginEndTuples), name, type, value);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::PadParam>::data[] = {"BeginEndTuples", "Type", "Value"};
const char *const EnumStrings<Aidge::PadParamType>::data[] = {"Constant", "Replicate", "Reflect", "Wrap"};
}
#endif /* AIDGE_CORE_OPERATOR_PAD_H_ */
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment