Skip to content
Snippets Groups Projects
Commit 9a0248f6 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Unified parameters

parent ba9ba4b7
No related branches found
No related tags found
2 merge requests!10Unified interface for attributes,!6Tensor setter getter
Pipeline #32211 failed
Showing
with 27 additions and 27 deletions
......@@ -29,11 +29,11 @@ namespace Aidge {
class AvgPoolingImpl2DForward_cpu
: public Registrable<AvgPoolingImpl2DForward_cpu,
std::tuple<DataType, DataType>,
void(const AvgPooling_Op<2>::Parameters &, const std::array<DimSize_t, 4> &, const void *, void *)> {};
void(const AvgPooling_Op<2>::Params &, const std::array<DimSize_t, 4> &, const void *, void *)> {};
class AvgPoolingImpl2DBackward_cpu
: public Registrable<AvgPoolingImpl2DBackward_cpu,
std::tuple<DataType, DataType>,
void(const AvgPooling_Op<2>::Parameters &, const std::array<DimSize_t, 4> &, const void *, void *)> {};
void(const AvgPooling_Op<2>::Params &, const std::array<DimSize_t, 4> &, const void *, void *)> {};
class AvgPoolingImpl2D_cpu : public OperatorImpl {
private:
......
......@@ -32,7 +32,7 @@ namespace Aidge {
* @param output_ Output Tensor.
*/
template <class I, class O>
void AvgPoolingImpl2D_cpu_forward_kernel(const AvgPooling_Op<2>::Parameters &params,
void AvgPoolingImpl2D_cpu_forward_kernel(const AvgPooling_Op<2>::Params &params,
const std::array<DimSize_t, 4> &dims,
const void *input_,
void *output_) {
......
......@@ -29,7 +29,7 @@ namespace Aidge {
class BatchNormImpl2DForward_cpu
: public Registrable<BatchNormImpl2DForward_cpu,
std::tuple<DataType, DataType, DataType>,
void(const BatchNorm_Op<2>::Parameters &,
void(const BatchNorm_Op<2>::Params &,
const std::array<DimSize_t, 4> &,
const void *,
const void *,
......@@ -41,7 +41,7 @@ class BatchNormImpl2DForward_cpu
class BatchNormImpl2DBackward_cpu
: public Registrable<BatchNormImpl2DBackward_cpu,
std::tuple<DataType, DataType, DataType>,
void(const BatchNorm_Op<2>::Parameters &,
void(const BatchNorm_Op<2>::Params &,
const std::array<DimSize_t, 4> &,
const void *,
const void *,
......
......@@ -37,7 +37,7 @@ namespace Aidge {
* @param output_ Output Tensor.
*/
template <class I, class P, class O>
void BatchNormImpl2D_cpu_forward_kernel(const BatchNorm_Op<2>::Parameters &params, const std::array<DimSize_t, 4> &dims,
void BatchNormImpl2D_cpu_forward_kernel(const BatchNorm_Op<2>::Params &params, const std::array<DimSize_t, 4> &dims,
const void *input_, const void *scale_, const void *shift_, void *batchMean_, void *batchVar_, void *output_, const bool freeze) {
// FIXME: missing convolution parameters as arguments
const I *input = static_cast<const I *>(input_);
......
......@@ -29,12 +29,12 @@ namespace Aidge {
class ConvDepthWiseImpl2DForward_cpu
: public Registrable<ConvDepthWiseImpl2DForward_cpu,
std::tuple<DataType, DataType, DataType, DataType>,
void(const ConvDepthWise_Op<2>::Parameters &, const std::array<DimSize_t, 4> &, const void *,
void(const ConvDepthWise_Op<2>::Params &, const std::array<DimSize_t, 4> &, const void *,
const void *, const void *, void *)> {};
class ConvDepthWiseImpl2DBackward_cpu
: public Registrable<ConvDepthWiseImpl2DBackward_cpu,
std::tuple<DataType, DataType, DataType, DataType>,
void(const ConvDepthWise_Op<2>::Parameters &, const std::array<DimSize_t, 4> &, const void *,
void(const ConvDepthWise_Op<2>::Params &, const std::array<DimSize_t, 4> &, const void *,
const void *, const void *, void *)> {};
class ConvDepthWiseImpl2D_cpu : public OperatorImpl {
......
......@@ -35,7 +35,7 @@ namespace Aidge {
* @param output_ Output Tensor.
*/
template <class I, class W, class B, class O>
void ConvDepthWiseImpl2D_cpu_forward_kernel(const ConvDepthWise_Op<2>::Parameters &params, const std::array<DimSize_t, 4> &dims,
void ConvDepthWiseImpl2D_cpu_forward_kernel(const ConvDepthWise_Op<2>::Params &params, const std::array<DimSize_t, 4> &dims,
const void *input_, const void *weights_, const void *biases_, void *output_) {
// FIXME: missing convolution parameters as arguments
const I *input = static_cast<const I *>(input_);
......
......@@ -29,12 +29,12 @@ namespace Aidge {
class ConvImpl2DForward_cpu
: public Registrable<ConvImpl2DForward_cpu,
std::tuple<DataType, DataType, DataType, DataType>,
void(const Conv_Op<2>::Parameters &, const std::array<DimSize_t, 4> &, const void *,
void(const Conv_Op<2>::Params &, const std::array<DimSize_t, 4> &, const void *,
const void *, const void *, void *)> {};
class ConvImpl2DBackward_cpu
: public Registrable<ConvImpl2DBackward_cpu,
std::tuple<DataType, DataType, DataType, DataType>,
void(const Conv_Op<2>::Parameters &, const std::array<DimSize_t, 4> &, const void *,
void(const Conv_Op<2>::Params &, const std::array<DimSize_t, 4> &, const void *,
const void *, const void *, void *)> {};
class ConvImpl2D_cpu : public OperatorImpl {
......
......@@ -35,7 +35,7 @@ namespace Aidge {
* @param output_ Output Tensor.
*/
template <class I, class W, class B, class O>
void ConvImpl2D_cpu_forward_kernel(const Conv_Op<2>::Parameters &params, const std::array<DimSize_t, 4> &dims,
void ConvImpl2D_cpu_forward_kernel(const Conv_Op<2>::Params &params, const std::array<DimSize_t, 4> &dims,
const void *input_, const void *weights_, const void *biases_, void *output_) {
// FIXME: missing convolution parameters as arguments
const I *input = static_cast<const I *>(input_);
......
......@@ -26,11 +26,11 @@ namespace Aidge {
// compute kernel registry for forward and backward
class FCImplForward_cpu : public Registrable<FCImplForward_cpu,
std::tuple<DataType, DataType, DataType, DataType>,
void(const FC_Op::Parameters &, const DimSize_t, const DimSize_t,
void(const FC_Op::Params &, const DimSize_t, const DimSize_t,
const void *, const void *, const void *, void *)> {};
class FCImplBackward_cpu : public Registrable<FCImplBackward_cpu,
std::tuple<DataType, DataType, DataType, DataType>,
void(const FC_Op::Parameters &, const DimSize_t, const DimSize_t,
void(const FC_Op::Params &, const DimSize_t, const DimSize_t,
const void *, const void *, const void *, void *)> {};
class FCImpl_cpu : public OperatorImpl {
......
......@@ -19,7 +19,7 @@
namespace Aidge {
// template <class I, class W, class B, class O>
// void FCImpl_cpu_forward_kernel(const FC_Op::Parameters& params, const std::array<DimSize_t, 4>& dims,
// void FCImpl_cpu_forward_kernel(const FC_Op::Params& params, const std::array<DimSize_t, 4>& dims,
// const void* input_, const void* weights_, const void* biases_, void* output_) {
// // FIXME: missing FC parameters as arguments
// const I* input = static_cast<const I*>(input_);
......@@ -53,7 +53,7 @@ namespace Aidge {
// }
// template <class I, class W, class B, class O>
// void FCImpl_cpu_forward_kernel(const FC_Op::Parameters& params, const std::array<DimSize_t, 2>& dims,
// void FCImpl_cpu_forward_kernel(const FC_Op::Params& params, const std::array<DimSize_t, 2>& dims,
// const void* input_, const void* weights_, const void* biases_, void* output_) {
// // FIXME: missing FC parameters as arguments
// const I* input = static_cast<const I*>(input_);
......@@ -83,7 +83,7 @@ namespace Aidge {
// }
template <class I, class W, class B, class O>
void FCImpl_cpu_forward_kernel(const FC_Op::Parameters& params, const DimSize_t batchSize, const DimSize_t oneInputSize,
void FCImpl_cpu_forward_kernel(const FC_Op::Params& params, const DimSize_t batchSize, const DimSize_t oneInputSize,
const void* input_, const void* weights_, const void* biases_, void* output_) {
// FIXME: missing FC parameters as arguments
const I* input = static_cast<const I*>(input_);
......
......@@ -24,10 +24,10 @@ namespace Aidge {
// compute kernel registry for forward and backward
class LeakyReLUImplForward_cpu
: public Registrable<LeakyReLUImplForward_cpu, std::tuple<DataType, DataType>, void(const LeakyReLU_Op::Parameters&, std::size_t, const void*, void*)> {
: public Registrable<LeakyReLUImplForward_cpu, std::tuple<DataType, DataType>, void(const LeakyReLU_Op::Params&, std::size_t, const void*, void*)> {
};
class LeakyReLUImplBackward_cpu
: public Registrable<LeakyReLUImplBackward_cpu, std::tuple<DataType, DataType>, void(const LeakyReLU_Op::Parameters&, std::size_t, const void*, void*)> {
: public Registrable<LeakyReLUImplBackward_cpu, std::tuple<DataType, DataType>, void(const LeakyReLU_Op::Params&, std::size_t, const void*, void*)> {
};
class LeakyReLUImpl_cpu : public OperatorImpl {
......
......@@ -18,7 +18,7 @@
namespace Aidge {
template <class I, class O>
void LeakyReLUImpl_cpu_forward_kernel(const LeakyReLU_Op::Parameters& params,
void LeakyReLUImpl_cpu_forward_kernel(const LeakyReLU_Op::Params& params,
std::size_t inputLenght,
const void* input_,
void* output_) {
......
......@@ -70,7 +70,7 @@ void Aidge::AvgPoolingImpl2D_cpu::forward() {
Registrar<AvgPoolingImpl2DForward_cpu>::create({mOp.getInput(0)->dataType(), mOp.getOutput(0)->dataType()});
// Call kernel
kernelFunc(mOp.getParams(),
kernelFunc(mOp.getStaticParameters(),
mOp.getInput(0)->dims<4>(),
mOp.getInput(0)->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr());
......
......@@ -76,7 +76,7 @@ void Aidge::BatchNormImpl2D_cpu::forward() {
mOp.getOutput(0)->dataType()});
// Call kernel
kernelFunc(mOp.getParams(),
kernelFunc(mOp.getStaticParameters(),
mOp.getInput(0)->dims<4>(),
mOp.getInput(0)->getImpl()->rawPtr(),
mOp.getInput(1)->getImpl()->rawPtr(),
......
......@@ -77,7 +77,7 @@ void Aidge::ConvDepthWiseImpl2D_cpu::forward() {
mOp.getInput(2)->dataType(), mOp.getOutput(0)->dataType()});
// Call kernel
kernelFunc(mOp.getParams(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
kernelFunc(mOp.getStaticParameters(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(),
mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr());
}
......
......@@ -75,7 +75,7 @@ void Aidge::ConvImpl2D_cpu::forward() {
mOp.getInput(2)->dataType(), mOp.getOutput(0)->dataType()});
// Call kernel
kernelFunc(mOp.getParams(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
kernelFunc(mOp.getStaticParameters(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(),
mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr());
......
......@@ -98,7 +98,7 @@ void Aidge::FCImpl_cpu::forward()
// Call kernel
// if (mOp.getInput(0)->nbDims() == 4) {
// kernelFunc(
// mOp.getParams(),
// mOp.getStaticParameters(),
// std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
// mOp.getInput(0)->getImpl()->rawPtr(),
// mOp.mInputs[1]->getImpl()->rawPtr(),
......@@ -107,7 +107,7 @@ void Aidge::FCImpl_cpu::forward()
// }
// else
kernelFunc(
mOp.getParams(),
mOp.getStaticParameters(),
mOp.getInput(0)->dims()[0],
mOp.getInput(0)->sizeM1(),
mOp.getInput(0)->getImpl()->rawPtr(),
......
......@@ -65,7 +65,7 @@ void Aidge::LeakyReLUImpl_cpu::forward() {
mOp.getOutput(0)->dataType()});
// Call kernel
kernelFunc(mOp.getParams(),
kernelFunc(mOp.getStaticParameters(),
std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
mOp.getInput(0)->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr());
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment