Skip to content
Snippets Groups Projects
Commit 05be732d authored by Grégoire Kubler's avatar Grégoire Kubler
Browse files

feat : [ADD] convtranspose forward 1D & 2D

parent f6266ac0
No related branches found
No related tags found
No related merge requests found
This commit is part of merge request !142. Comments created here will be created in the context of that merge request.
......@@ -27,6 +27,7 @@
#include "aidge/backend/cpu/operator/ClipImpl.hpp"
#include "aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp"
#include "aidge/backend/cpu/operator/ConvImpl.hpp"
#include "aidge/backend/cpu/operator/ConvTransposeImpl.hpp"
#include "aidge/backend/cpu/operator/ConstantOfShapeImpl.hpp"
#include "aidge/backend/cpu/operator/DivImpl.hpp"
#include "aidge/backend/cpu/operator/EqualImpl.hpp"
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_CONVTRANSPOSEIMPL_H_
#define AIDGE_CPU_OPERATOR_CONVTRANSPOSEIMPL_H_
#include <array>
#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
#include "aidge/operator/ConvTranspose.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
using std::array;
// Operator implementation entry point for the backend
using ConvTranspose1D_Op = ConvTranspose_Op<1>;
using ConvTransposeImpl1D_cpu =
OperatorImpl_cpu<ConvTranspose1D_Op,
void(const DimSize_t &,
const DimSize_t &,
const DimSize_t &,
const array<DimSize_t, 3> &,
const array<DimSize_t, 3> &,
const void *,
const void *,
const void *,
void *)>;
using ConvTranspose2D_Op = ConvTranspose_Op<2>;
using ConvTransposeImpl2D_cpu =
OperatorImpl_cpu<ConvTranspose2D_Op,
void(const array<DimSize_t, 2> &,
const array<DimSize_t, 2> &,
const array<DimSize_t, 2> &,
const array<DimSize_t, 4> &,
const array<DimSize_t, 4> &,
const void *,
const void *,
const void *,
void *)>;
// Implementation entry point registration to Operator
REGISTRAR(ConvTranspose1D_Op, "cpu", ConvTransposeImpl1D_cpu::create);
REGISTRAR(ConvTranspose2D_Op, "cpu", ConvTransposeImpl2D_cpu::create);
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_CONVTRANSPOSEIMPL_H_ */
/********************************************************************************
* Copyright (c) 2025 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_CONVTRANSPOSEIMPL_KERNELS_H_
#define AIDGE_CPU_OPERATOR_CONVTRANSPOSEIMPL_KERNELS_H_
#include <array>
#include "aidge/backend/cpu/operator/ConvTransposeImpl.hpp"
#include "aidge/utils/Registrar.hpp"
#include <aidge/backend/cpu/operator/ConvImpl_kernels.hpp>
#include <aidge/data/Data.hpp>
#include <aidge/data/half.hpp>
#include <aidge/scheduler/ProdConso.hpp>
#include <aidge/utils/Types.h>
namespace Aidge {
using std::array;
////////////////////////////////////////////////////////
////////////////////////////////////////////////////////
// 1D
////////////////////////////////////////////////////////
////////////////////////////////////////////////////////
/**
* @brief performs forward bias operation for convtranspose operator
*
* @tparam B Bias data type.
* @tparam O Output data type.
* @param[in] bias bias values
* @param[in] oDims dimensions of the output
* @param[in] oStrides nb of elements contained per dimension of the output
* @param[out] output
*/
template <class B, class O>
static void convTranspose1DForwardBias(const B *biases,
const array<DimSize_t, 3> &oDims,
const array<DimSize_t, 2> &oStrides,
O *output) {
array<DimSize_t, 2> outOffsets{0, 0};
for (DimSize_t batch = 0; batch < oDims[0]; ++batch) {
outOffsets[0] = batch * oStrides[0];
for (DimSize_t outCh = 0; outCh < oDims[1]; ++outCh) {
outOffsets[1] = outCh * oStrides[1] + outOffsets[0];
// If bias = nullptr, set B(0)
B biasVal = (biases != nullptr) ? biases[outCh] : B(0);
std::fill(output + outOffsets[1],
output + (outOffsets[1] + oDims[2]),
biasVal);
}
}
}
/**
* @brief forward kernel for convtranspose
* @note ConvTranspose forward is simply convolution backward kernel.
* Check convolution functions for more in-depth details on how the
subfunctions are built.
* @tparam I Input data type.
* @tparam W Weight data type.
* @tparam B Bias data type.
* @tparam O Output data type.
* @param[in] stride stride parameter of the convTranspose operator
* @param[in] dilation dilation parameter of the convTranspose operator
* @param[in] inputDims input dimensions
* @param[in] outputDims output tensor dimensions
* @param[in] oStrides nb of elements contained per dimension of the output
* @param[in] input_ values
* @param[in] weight_ values
* @param[in] biases_ values
* @param[out] output
*/
template <class I, class W, class B, class O>
void ConvTransposeImpl1D_cpu_forward_kernel(
const DimSize_t &stride,
const DimSize_t &dilation,
const DimSize_t &kernelDim,
const array<DimSize_t, 3> &inputDims,
const array<DimSize_t, 3> &outputDims,
const void *input_,
const void *weights_,
const void *biases_,
void *output_) {
const I *input = static_cast<const I *>(input_);
const W *weights = static_cast<const W *>(weights_);
O *output = static_cast<O *>(output_);
// {batch_stride, channel_stride, dim0_stride}
const array<DimSize_t, 2> inputStrides{inputDims[1] * inputDims[2],
inputDims[2]};
// {batch_stride, channel_stride, dim0_stride}
const array<DimSize_t, 2> outputStrides{outputDims[1] * outputDims[2],
outputDims[2]};
// NOTE: kernel dims = {inChannels, outChannels, kernelDims[0]}
const array<DimSize_t, 2> kernelStrides{
outputDims[1] * kernelDim,
kernelDim,
};
if (biases_ != nullptr) {
const B *biases = static_cast<const B *>(biases_);
convTranspose1DForwardBias(biases, outputDims, outputStrides, output);
}
conv1DBackwardInput(stride,
dilation,
kernelDim,
kernelStrides,
weights,
inputDims,
inputStrides,
input,
outputDims,
outputStrides,
output);
}
REGISTRAR(ConvTransposeImpl1D_cpu,
{{DataType::Any, DataFormat::NCHW},
{DataType::Int32, DataFormat::NCHW}},
{ProdConso::inPlaceModel,
ConvTransposeImpl1D_cpu_forward_kernel<std::int32_t,
std::int32_t,
std::int32_t,
std::int32_t>,
nullptr});
REGISTRAR(ConvTransposeImpl1D_cpu,
{{DataType::Any, DataFormat::NCHW},
{DataType::Float32, DataFormat::NCHW}},
{ProdConso::inPlaceModel,
ConvTransposeImpl1D_cpu_forward_kernel<float, float, float, float>,
nullptr});
REGISTRAR(ConvTransposeImpl1D_cpu,
{{DataType::Any, DataFormat::NCHW},
{DataType::Float16, DataFormat::NCHW}},
{ProdConso::inPlaceModel,
ConvTransposeImpl1D_cpu_forward_kernel<half_float::half,
half_float::half,
half_float::half,
half_float::half>,
nullptr});
REGISTRAR(
ConvTransposeImpl1D_cpu,
{{DataType::Any, DataFormat::NCHW}, {DataType::Float64, DataFormat::NCHW}},
{ProdConso::inPlaceModel,
ConvTransposeImpl1D_cpu_forward_kernel<double, double, double, double>,
nullptr});
////////////////////////////////////////////////////////
////////////////////////////////////////////////////////
// 2D
////////////////////////////////////////////////////////
////////////////////////////////////////////////////////
/**
* @brief performs forward bias operation for convtranspose operator
*
* @tparam B Bias data type.
* @tparam O Output data type.
* @param[in] bias bias values
* @param[in] oDims dimensions of the output
* @param[in] oStrides nb of elements contained per dimension of the output
* @param[out] output
*/
template <class B, class O>
static void convTranspose2DForwardBias(const B *biases,
const array<DimSize_t, 4> &oDims,
const array<DimSize_t, 3> &oStrides,
O *output) {
array<DimSize_t, 2> outOffsets{0, 0};
for (DimSize_t batch = 0; batch < oDims[0]; ++batch) {
outOffsets[0] = batch * oStrides[0];
for (DimSize_t outCh = 0; outCh < oDims[1]; ++outCh) {
outOffsets[1] = outCh * oStrides[1] + outOffsets[0];
// If bias = nullptr, set B(0)
B biasVal = (biases != nullptr) ? biases[outCh] : B(0);
std::fill(output + outOffsets[1],
(output + outOffsets[1]) + oStrides[1],
biasVal);
}
}
}
/**
* @brief forward kernel for convtranspose
* @note ConvTranspose forward is simply convolution backward kernel.
* Check convolution functions for more in-depth details on how the
subfunctions are built.
* @tparam I Input data type.
* @tparam W Weight data type.
* @tparam B Bias data type.
* @tparam O Output data type.
* @param[in] stride stride parameter of the convTranspose operator
* @param[in] dilation dilation parameter of the convTranspose operator
* @param[in] inputDims input dimensions
* @param[in] outputDims output tensor dimensions
* @param[in] oStrides nb of elements contained per dimension of the output
* @param[in] input_ values
* @param[in] weight_ values
* @param[in] biases_ values
* @param[out] output
*/
template <class I, class W, class B, class O>
void ConvTransposeImpl2D_cpu_forward_kernel(
const array<DimSize_t, 2> &stride,
const array<DimSize_t, 2> &dilation,
const array<DimSize_t, 2> &kernelDims,
const array<DimSize_t, 4> &inputDims,
const array<DimSize_t, 4> &outputDims,
const void *input_,
const void *weights_,
const void *biases_,
void *output_) {
auto input = static_cast<const I *>(input_);
auto weights = static_cast<const W *>(weights_);
auto output = static_cast<O *>(output_);
// {channel_stride, dim0_stride, dim1_stride}
const array<DimSize_t, 3> inputStrides{
inputDims[1] * inputDims[2] * inputDims[3],
inputDims[2] * inputDims[3],
inputDims[3]};
// {channel_stride, dim0_stride, dim1_stride}
const array<DimSize_t, 3> outputStrides{
outputDims[1] * outputDims[2] * outputDims[3],
outputDims[2] * outputDims[3],
outputDims[3]};
// NOTE: kernel dims = {inChannels, outChannels, kernelDims[0], kernelDims[1]}
const array<DimSize_t, 3> kernelStrides{
outputDims[1] * kernelDims[0] * kernelDims[1],
kernelDims[0] * kernelDims[1],
kernelDims[1],
};
if (biases_ != nullptr) {
auto biases = static_cast<const B *>(biases_);
convTranspose2DForwardBias(biases, outputDims, outputStrides, output);
}
conv2DBackwardInput(stride,
dilation,
kernelDims,
kernelStrides,
weights,
inputDims,
inputStrides,
input,
outputDims,
outputStrides,
output);
}
REGISTRAR(ConvTransposeImpl2D_cpu,
{{DataType::Any, DataFormat::NCHW},
{DataType::Int32, DataFormat::NCHW}},
{ProdConso::inPlaceModel,
ConvTransposeImpl2D_cpu_forward_kernel<std::int32_t,
std::int32_t,
std::int32_t,
std::int32_t>,
nullptr});
REGISTRAR(ConvTransposeImpl2D_cpu,
{{DataType::Any, DataFormat::NCHW},
{DataType::Float16, DataFormat::NCHW}},
{ProdConso::inPlaceModel,
ConvTransposeImpl2D_cpu_forward_kernel<half_float::half,
half_float::half,
half_float::half,
half_float::half>,
nullptr});
REGISTRAR(ConvTransposeImpl2D_cpu,
{{DataType::Any, DataFormat::NCHW},
{DataType::Float32, DataFormat::NCHW}},
{ProdConso::inPlaceModel,
ConvTransposeImpl2D_cpu_forward_kernel<float, float, float, float>,
nullptr});
REGISTRAR(
ConvTransposeImpl2D_cpu,
{{DataType::Any, DataFormat::NCHW}, {DataType::Float64, DataFormat::NCHW}},
{ProdConso::inPlaceModel,
ConvTransposeImpl2D_cpu_forward_kernel<double, double, double, double>,
nullptr});
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_CONVTRANSPOSEIMPL_KERNELS_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/backend/cpu/operator/ConvTransposeImpl.hpp"
#include "aidge/backend/cpu/operator/ConvTransposeImpl_kernels.hpp"
#include "aidge/operator/Conv.hpp"
template <> void Aidge::ConvTransposeImpl1D_cpu::forward() {
const auto &op = static_cast<const ConvTranspose_Op<1> &>(mOp);
AIDGE_ASSERT(op.getInput(0), "{}: missing data input (#0).", op.type());
AIDGE_ASSERT(op.getInput(1), "{}: missing bias input (#1).", op.type());
AIDGE_ASSERT(op.getInput(2), "{}: missing weight input (#1).", op.type());
std::shared_ptr<Tensor> inputDataFallback, inputWeightFallback,
inputBiasFallback;
const auto &inputData =
op.getInput(0)->refCastFrom(inputDataFallback, *op.getOutput(0));
const auto &inputWeight =
op.getInput(1)->refCastFrom(inputWeightFallback, *op.getOutput(0));
const auto &inputBias =
(op.getInput(2))
? op.getInput(2)->refCastFrom(inputBiasFallback, *op.getOutput(0))
: Tensor();
// Call kernel
const auto impl = Registrar<ConvTransposeImpl1D_cpu>::create(
getBestMatch(getRequiredSpec()));
impl.forward(op.strideDims()[0],
op.dilationDims()[0],
op.kernelDims()[0],
op.getInput(0)->template dims<3>(),
op.getOutput(0)->template dims<3>(),
inputData.getImpl()->hostPtr(),
inputWeight.getImpl()->hostPtr(),
op.getInput(2) ? inputBias.getImpl()->hostPtr() : nullptr,
op.getOutput(0)->getImpl()->rawPtr());
}
template <> void Aidge::ConvTransposeImpl1D_cpu::backward() {
AIDGE_THROW_OR_ABORT(
std::runtime_error,
"Backward not yet implemented for Conv_Op<1> on backend cpu");
}
template <> void Aidge::ConvTransposeImpl2D_cpu::forward() {
const auto &op = static_cast<const ConvTranspose_Op<2> &>(mOp);
AIDGE_ASSERT(op.getInput(0), "{}: missing data input (#0).", op.type());
AIDGE_ASSERT(op.getInput(1), "{}: missing bias input (#1).", op.type());
AIDGE_ASSERT(op.getInput(2), "{}: missing weight input (#1).", op.type());
std::shared_ptr<Tensor> inputDataFallback, inputWeightFallback,
inputBiasFallback;
const auto &inputData =
op.getInput(0)->refCastFrom(inputDataFallback, *op.getOutput(0));
const auto &inputWeight =
op.getInput(1)->refCastFrom(inputWeightFallback, *op.getOutput(0));
const auto &inputBias =
(op.getInput(2))
? op.getInput(2)->refCastFrom(inputBiasFallback, *op.getOutput(0))
: Tensor();
// Call kernel
const auto impl = Registrar<ConvTransposeImpl2D_cpu>::create(
getBestMatch(getRequiredSpec()));
impl.forward(op.strideDims(),
op.dilationDims(),
op.kernelDims(),
op.getInput(0)->template dims<4>(),
op.getOutput(0)->template dims<4>(),
inputData.getImpl()->hostPtr(),
inputWeight.getImpl()->hostPtr(),
op.getInput(2) ? inputBias.getImpl()->hostPtr() : nullptr,
op.getOutput(0)->getImpl()->rawPtr());
}
template <> void Aidge::ConvTransposeImpl2D_cpu::backward() {
AIDGE_THROW_OR_ABORT(
std::runtime_error,
"Backward not yet implemented for Conv_Op<2> on backend cpu");
}
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment