Skip to content
Snippets Groups Projects
Commit e7a524b0 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Added default implementation for several operators

parent 76ed60c4
No related branches found
No related tags found
2 merge requests!610.2.2,!54Make forwardDims() optional and handle data dependency & moved several operators impl to core
Pipeline #43447 failed
Showing
with 2 additions and 548 deletions
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include "aidge/backend/cpu/operator/AvgPoolingImpl.hpp" #include "aidge/backend/cpu/operator/AvgPoolingImpl.hpp"
#include "aidge/backend/cpu/operator/MaxPoolingImpl.hpp" #include "aidge/backend/cpu/operator/MaxPoolingImpl.hpp"
#include "aidge/backend/cpu/operator/BatchNormImpl.hpp" #include "aidge/backend/cpu/operator/BatchNormImpl.hpp"
#include "aidge/backend/cpu/operator/ConcatImpl.hpp"
#include "aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp" #include "aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp"
#include "aidge/backend/cpu/operator/ConvImpl.hpp" #include "aidge/backend/cpu/operator/ConvImpl.hpp"
#include "aidge/backend/cpu/operator/DivImpl.hpp" #include "aidge/backend/cpu/operator/DivImpl.hpp"
...@@ -26,14 +25,11 @@ ...@@ -26,14 +25,11 @@
#include "aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp" #include "aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp"
#include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp" #include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp"
#include "aidge/backend/cpu/operator/MatMulImpl.hpp" #include "aidge/backend/cpu/operator/MatMulImpl.hpp"
#include "aidge/backend/cpu/operator/MemorizeImpl.hpp"
#include "aidge/backend/cpu/operator/MulImpl.hpp" #include "aidge/backend/cpu/operator/MulImpl.hpp"
#include "aidge/backend/cpu/operator/PadImpl.hpp" #include "aidge/backend/cpu/operator/PadImpl.hpp"
#include "aidge/backend/cpu/operator/PopImpl.hpp"
#include "aidge/backend/cpu/operator/PowImpl.hpp" #include "aidge/backend/cpu/operator/PowImpl.hpp"
#include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp" #include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp"
#include "aidge/backend/cpu/operator/ReLUImpl.hpp" #include "aidge/backend/cpu/operator/ReLUImpl.hpp"
#include "aidge/backend/cpu/operator/ReshapeImpl.hpp"
#include "aidge/backend/cpu/operator/ScalingImpl.hpp" #include "aidge/backend/cpu/operator/ScalingImpl.hpp"
#include "aidge/backend/cpu/operator/SigmoidImpl.hpp" #include "aidge/backend/cpu/operator/SigmoidImpl.hpp"
#include "aidge/backend/cpu/operator/SliceImpl.hpp" #include "aidge/backend/cpu/operator/SliceImpl.hpp"
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_CONCATIMPL_H_
#define AIDGE_CPU_OPERATOR_CONCATIMPL_H_
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Concat.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include <memory>
#include <vector>
namespace Aidge {
// class Concat_Op<2>;
// compute kernel registry for forward and backward
class ConcatImplForward_cpu
: public Registrable<ConcatImplForward_cpu, std::tuple<DataType, DataType>, void(const Concat_Op::Attrs&,
const std::vector<DimSize_t>,
const std::vector<DimSize_t>&,
const std::vector<const void*>,
void*)> {};
class ConcatImplBackward_cpu
: public Registrable<ConcatImplBackward_cpu, std::tuple<DataType, DataType>, void(const Concat_Op::Attrs&,
const std::vector<DimSize_t>,
const std::vector<DimSize_t>&,
const std::vector<const void*>,
void*)> {};
class ConcatImpl_cpu : public OperatorImpl {
public:
ConcatImpl_cpu(const Concat_Op& op) : OperatorImpl(op, "cpu") {}
static std::unique_ptr<ConcatImpl_cpu> create(const Concat_Op& op) {
return std::make_unique<ConcatImpl_cpu>(op);
}
public:
void forward() override;
void backward() override;
};
namespace {
static Registrar<Concat_Op> registrarConcatImpl_cpu("cpu", Aidge::ConcatImpl_cpu::create);
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_CONCATIMPL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_CONCATIMPL_FORWARD_KERNEL_H_
#define AIDGE_CPU_OPERATOR_CONCATIMPL_FORWARD_KERNEL_H_
#include <algorithm>
#include <numeric>
#include <cstddef>
#include <vector>
#include "aidge/backend/cpu/operator/ConcatImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/operator/Concat.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
namespace Aidge {
template <class I, class O>
void ConcatImpl_cpu_forward_kernel(const Concat_Op::Attrs& attrs,
const std::vector<DimSize_t>& dimsFirstInput,
const std::vector<DimSize_t>& concatAxisValues,
const std::vector<const void*>& inputs_,
void* output_)
{
// FIXME: missing Concat attributes as arguments
std::vector<const I*> inputs;
for (const auto& input_ : inputs_) {
inputs.push_back(static_cast<const I*>(input_));
}
O* output = static_cast<O*>(output_);
DimSize_t outputAxisValue = std::accumulate(concatAxisValues.begin(), concatAxisValues.end(), 0);
DimSize_t prodDimLower = 1;
for (DimIdx_t i = 0; i < std::get<0>(attrs); ++i) {
prodDimLower *= dimsFirstInput[i];
}
DimSize_t prodDimHigher = 1;
for (DimIdx_t i = std::get<0>(attrs) + 1; static_cast<std::size_t>(i) < dimsFirstInput.size();
++i) {
prodDimHigher *= dimsFirstInput[i];
}
std::size_t oIndexStart = 0;
std::size_t oIndex = 0;
for (std::size_t inputId = 0; inputId < inputs.size(); ++inputId) {
oIndex = oIndexStart;
const DimSize_t iOffset = prodDimHigher*concatAxisValues[inputId];
for (std::size_t iIndex = 0; iIndex < prodDimLower; ++iIndex) {
std::copy(inputs[inputId] + iIndex*iOffset, inputs[inputId] + (iIndex+1)*iOffset, output + oIndex);
oIndex += prodDimHigher*outputAxisValue;
}
oIndexStart += concatAxisValues[inputId]*prodDimHigher;
}
}
namespace {
static Registrar<ConcatImplForward_cpu> registrarConcatImplForward_cpu_Float32(
{DataType::Float32, DataType::Float32}, Aidge::ConcatImpl_cpu_forward_kernel<float, float>);
static Registrar<ConcatImplForward_cpu> registrarConcatImplForward_cpu_Int32(
{DataType::Int32, DataType::Int32}, Aidge::ConcatImpl_cpu_forward_kernel<int, int>);
static Registrar<ConcatImplForward_cpu> registrarConcatImplForward_cpu_Float64(
{DataType::Float64, DataType::Float64},
Aidge::ConcatImpl_cpu_forward_kernel<double, double>);
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_CONCATIMPL_CPU_FORWARD_KERNEL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_MEMORIZEIMPL_H_
#define AIDGE_CPU_OPERATOR_MEMORIZEIMPL_H_
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Memorize.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include <memory>
#include <vector>
namespace Aidge {
class MemorizeImpl_cpu : public OperatorImpl {
public:
MemorizeImpl_cpu(const Memorize_Op& op) : OperatorImpl(op, "cpu") {}
static std::unique_ptr<MemorizeImpl_cpu> create(const Memorize_Op& op) {
return std::make_unique<MemorizeImpl_cpu>(op);
}
Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
Elts_t getRequiredMemory(const Aidge::IOIndex_t outputIdx,
const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const override final;
void updateConsummerProducer() override final;
void forward() override;
};
namespace {
static Registrar<Memorize_Op> registrarMemorizeImpl_cpu("cpu", Aidge::MemorizeImpl_cpu::create);
}
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_MEMORIZEIMPL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_POPIMPL_H_
#define AIDGE_CPU_OPERATOR_POPIMPL_H_
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Pop.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include <memory>
#include <vector>
namespace Aidge {
// class Pop_Op;
// compute kernel registry for forward and backward
class PopImplForward_cpu
: public Registrable<PopImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
};
class PopImplBackward_cpu
: public Registrable<PopImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
};
class PopImpl_cpu : public OperatorImpl {
public:
PopImpl_cpu(const Pop_Op& op) : OperatorImpl(op, "cpu") {}
static std::unique_ptr<PopImpl_cpu> create(const Pop_Op& op) {
return std::make_unique<PopImpl_cpu>(op);
}
Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
void forward() override;
};
namespace {
static Registrar<Pop_Op> registrarPopImpl_cpu("cpu", Aidge::PopImpl_cpu::create);
}
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_POPIMPL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_RESHAPEIMPL_H_
#define AIDGE_CPU_OPERATOR_RESHAPEIMPL_H_
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Reshape.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include <memory>
#include <vector>
namespace Aidge {
// class Reshape_Op;
// compute kernel registry for forward and backward
class ReshapeImplForward_cpu
: public Registrable<ReshapeImplForward_cpu, std::tuple<DataType, DataType>, void(std::size_t, const void*, void*)> {
};
class ReshapeImplBackward_cpu
: public Registrable<ReshapeImplBackward_cpu, std::tuple<DataType, DataType>, void(std::size_t, const void*, void*)> {
};
class ReshapeImpl_cpu : public OperatorImpl {
public:
ReshapeImpl_cpu(const Reshape_Op& op) : OperatorImpl(op, "cpu") {}
static std::unique_ptr<ReshapeImpl_cpu> create(const Reshape_Op& op) {
return std::make_unique<ReshapeImpl_cpu>(op);
}
Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
void forward() override;
};
namespace {
static Registrar<Reshape_Op> registrarReshapeImpl_cpu("cpu", Aidge::ReshapeImpl_cpu::create);
}
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_RESHAPEIMPL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_RESHAPEIMPL_FORWARD_KERNEL_H_
#define AIDGE_CPU_OPERATOR_RESHAPEIMPL_FORWARD_KERNEL_H_
#include "aidge/utils/Registrar.hpp"
#include <cmath>
#include "aidge/backend/cpu/operator/ReshapeImpl.hpp"
namespace Aidge {
template <class I, class O>
void ReshapeImpl_cpu_forward_kernel(std::size_t inputLength,
const void* input_,
void* output_) {
const I* input = static_cast<const I*>(input_);
O* output = static_cast<O*>(output_);
std::copy_n(input, inputLength, output);
}
namespace {
static Registrar<ReshapeImplForward_cpu> registrarReshapeImplForward_cpu_Float32(
{DataType::Float32, DataType::Float32},
Aidge::ReshapeImpl_cpu_forward_kernel<float, float>);
static Registrar<ReshapeImplForward_cpu> registrarReshapeImplForward_cpu_Int32(
{DataType::Int32, DataType::Int32},
Aidge::ReshapeImpl_cpu_forward_kernel<int, int>);
static Registrar<ReshapeImplForward_cpu> registrarReshapeImplForward_cpu_Float64(
{DataType::Float64, DataType::Float64},
Aidge::ReshapeImpl_cpu_forward_kernel<double, double>);
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_RESHAPEIMPL_FORWARD_KERNEL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <cassert>
#include <numeric> // std::accumulate
#include <vector>
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/backend/cpu/operator/ConcatImpl.hpp"
#include "aidge/backend/cpu/operator/ConcatImpl_forward_kernels.hpp"
void Aidge::ConcatImpl_cpu::forward() {
assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input in Concat operator");
DataType datatypeFirstInput = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType();
for (IOIndex_t i = 1; i < mOp.nbInputs(); ++i) {
assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(i)) && "missing input in Concat operator");
assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(i))->dataType() == datatypeFirstInput);
}
auto kernelFunc = Registrar<ConcatImplForward_cpu>::create({
datatypeFirstInput,
std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
std::vector<const void*> opInputs;
std::vector<DimSize_t> opInputAxis;
for (IOIndex_t i = 0; i < mOp.nbInputs(); ++i) {
opInputs.push_back(getCPUPtr(mOp.getRawInput(i)));
opInputAxis.push_back(std::static_pointer_cast<Tensor>(mOp.getRawInput(i))->dims()[dynamic_cast<const Concat_Op&>(mOp).template getAttr<DimSize_t>("Axis")]);
}
kernelFunc(dynamic_cast<const Concat_Op&>(mOp).getStaticAttributes(),
std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
opInputAxis,
opInputs,
getCPUPtr(mOp.getRawOutput(0)));
}
void Aidge::ConcatImpl_cpu::backward() { fmt::print("Not implemented yet.\n"); }
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <cassert>
#include <chrono> // std::chrono::milliseconds
#include <numeric> // std::accumulate
#include <thread> // std::this_thread::sleep_for
#include <vector>
#include "aidge/operator/Memorize.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include "aidge/backend/cpu/operator/MemorizeImpl.hpp"
Aidge::Elts_t Aidge::MemorizeImpl_cpu::getNbRequiredData(
Aidge::IOIndex_t inputIdx) const
{
const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
if (scheduleStep == 0 && inputIdx == 0) {
// No data input is required for the initial step.
// Initialization data is required however.
return Elts_t::NoneElts();
}
else if (scheduleStep > 0 && inputIdx == 1) {
// No initialization data is required after the initial step.
return Elts_t::NoneElts();
}
else {
return OperatorImpl::getNbRequiredData(inputIdx);
}
}
Aidge::Elts_t Aidge::MemorizeImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
assert(mOp.getRawOutput(outputIdx) && "requires valid output");
const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
if (endStep > 0 && outputIdx == 1 && scheduleStep >= endStep) {
return Elts_t::NoneElts();
}
else {
return Elts_t::DataElts(std::static_pointer_cast<Tensor>(mOp.getRawOutput(outputIdx))->size());
}
}
void Aidge::MemorizeImpl_cpu::updateConsummerProducer() {
OperatorImpl::updateConsummerProducer();
const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
AIDGE_ASSERT(endStep == 0 || scheduleStep <= endStep, "cannot update consumer producer anymore, number of cycles exceeded");
}
void Aidge::MemorizeImpl_cpu::forward() {
const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
const unsigned int forwardStep = op.template getAttr<MemorizeAttr::ForwardStep>();
const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
AIDGE_ASSERT(endStep == 0 || forwardStep <= endStep, "cannot forward anymore, number of cycles exceeded");
if (forwardStep == 0) {
op.getOutput(0)->getImpl()->copy(op.getInput(1)->getImpl()->rawPtr(), op.getInput(1)->size());
}
else {
op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
}
}
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <cassert>
#include <chrono> // std::chrono::milliseconds
#include <numeric> // std::accumulate
#include <thread> // std::this_thread::sleep_for
#include <vector>
#include "aidge/operator/Pop.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include "aidge/backend/cpu/operator/PopImpl.hpp"
Aidge::Elts_t Aidge::PopImpl_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
assert(mOp.getRawInput(inputIdx) && "requires valid input");
return Elts_t::DataElts(std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx))->size()
/ std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx))->dims()[0]);
}
void Aidge::PopImpl_cpu::forward() {
assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
const unsigned int forwardStep = op.template getAttr<PopAttr::ForwardStep>();
*std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))
= std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->extract({forwardStep});
}
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/backend/cpu/operator/ReshapeImpl.hpp"
#include "aidge/backend/cpu/operator/ReshapeImpl_forward_kernels.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Reshape.hpp"
#include "aidge/utils/Types.h"
#include "aidge/utils/ErrorHandling.hpp"
Aidge::Elts_t Aidge::ReshapeImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
// this implementation can be in-place
return Elts_t::DataElts(0);
}
void Aidge::ReshapeImpl_cpu::forward() {
const Reshape_Op& op_ = static_cast<const Reshape_Op&>(mOp);
AIDGE_ASSERT(op_.getInput(0)->size() == op_.getOutput(0)->size(),
"input must have the same overall size as shape");
// Find the correct kernel type
auto kernelFunc = Registrar<ReshapeImplForward_cpu>::create({
op_.getInput(0)->dataType(),
op_.getOutput(0)->dataType()});
// Call kernel
kernelFunc(op_.getInput(0)->size(),
op_.getInput(0)->getImpl()->rawPtr(),
op_.getOutput(0)->getImpl()->rawPtr());
}
...@@ -36,11 +36,6 @@ void Aidge::SliceImpl_cpu::forward() { ...@@ -36,11 +36,6 @@ void Aidge::SliceImpl_cpu::forward() {
std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr() std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()
); );
// each input is consumed by the minimum amount for a forward pass
mNbConsumedData[0] += getNbRequiredData(0);
mNbProducedData[0] += getRequiredMemory(0, {});
} }
void Aidge::SliceImpl_cpu::backward() { fmt::print("Not implemented yet.\n"); } void Aidge::SliceImpl_cpu::backward() { fmt::print("Not implemented yet.\n"); }
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/operator/Add.hpp" #include "aidge/operator/Add.hpp"
#include "aidge/operator/Concat.hpp"
#include "aidge/backend/cpu.hpp" #include "aidge/backend/cpu.hpp"
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/graph/GraphView.hpp" #include "aidge/graph/GraphView.hpp"
#include "aidge/graph/OpArgs.hpp" #include "aidge/graph/OpArgs.hpp"
#include "aidge/operator/Memorize.hpp"
#include "aidge/scheduler/SequentialScheduler.hpp" #include "aidge/scheduler/SequentialScheduler.hpp"
#include "aidge/scheduler/ParallelScheduler.hpp" #include "aidge/scheduler/ParallelScheduler.hpp"
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment