Skip to content
Snippets Groups Projects
Commit cd4ce708 authored by Thibault Allenet's avatar Thibault Allenet
Browse files

Merge branch 'master' into dataloader

parents 3444c174 9676eac9
No related branches found
No related tags found
2 merge requests!50version 0.2.0,!2Scheduler ConnectInput and tensor filling with offset tests
Pipeline #35469 failed
Showing
with 243 additions and 141 deletions
......@@ -15,7 +15,7 @@ stages:
include:
- local: '/.gitlab/ci/_global.gitlab-ci.yml'
- local: '/.gitlab/ci/static_analysis.gitlab-ci.yml'
# - local: '/.gitlab/ci/static_analysis.gitlab-ci.yml'
- local: '/.gitlab/ci/build.gitlab-ci.yml'
- local: '/.gitlab/ci/test.gitlab-ci.yml'
- local: '/.gitlab/ci/coverage.gitlab-ci.yml'
# - local: '/.gitlab/ci/coverage.gitlab-ci.yml'
......@@ -49,24 +49,24 @@ class test_recipies(unittest.TestCase):
np_shift = np.array([0.05]).astype(np.float32)
np_mean = np.array([0.05]).astype(np.float32)
np_var = np.array([0.05]).astype(np.float32)
conv.input(1)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_weights))
conv.input(2)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_bias))
bn.input(1)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_scale))
bn.input(2)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_shift))
bn.input(3)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_mean))
bn.input(4)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_var))
conv.input(1)[0].get_operator().set_output(0, aidge_core.Tensor(np_weights))
conv.input(2)[0].get_operator().set_output(0, aidge_core.Tensor(np_bias))
bn.input(1)[0].get_operator().set_output(0, aidge_core.Tensor(np_scale))
bn.input(2)[0].get_operator().set_output(0, aidge_core.Tensor(np_shift))
bn.input(3)[0].get_operator().set_output(0, aidge_core.Tensor(np_mean))
bn.input(4)[0].get_operator().set_output(0, aidge_core.Tensor(np_var))
scheduler0 = aidge_core.SequentialScheduler(graph_view)
scheduler0.forward()
for outNode in graph_view.get_output_nodes():
output_aidge0 = outNode.get_operator().output(0)
output_aidge0 = outNode.get_operator().get_output(0)
aidge_core.fuse_batchnorm(graph_view)
scheduler1 = aidge_core.SequentialScheduler(graph_view)
scheduler1.forward()
for outNode in graph_view.get_output_nodes():
output_aidge1 = outNode.get_operator().output(0)
output_aidge1 = outNode.get_operator().get_output(0)
self.assertTrue(aidge_core.approx_eq(output_aidge0, output_aidge1, 0.000001, 0.0001))
......
......@@ -22,30 +22,30 @@ class test_scheduler(unittest.TestCase):
gv.add(relu)
gv.add(input_node)
input_node.add_child(relu)
gv.set_datatype(aidge_core.DataType.Int32)
gv.set_backend("cpu")
input_node.add_child(relu)
scheduler = aidge_core.SequentialScheduler(gv)
scheduler.forward()
out_tensor = relu.get_operator().output(0)
out_tensor = relu.get_operator().get_output(0)
expected_out = [0,0,0,0,1,2]
for i in range(len(expected_out)):
self.assertEqual(expected_out[i], out_tensor[i])
def test_sequential_scheduling(self):
input_data = np.array([]).astype(np.float32)
input_data = np.array([0]).astype(np.float32)
input_tensor = aidge_core.Tensor(input_data)
input_node = aidge_core.Producer(input_tensor, "X")
graph_view = aidge_core.sequential([
aidge_core.FC(50, name='0'),
aidge_core.FC(50, name='1'),
aidge_core.FC(10, name='2'),
aidge_core.FC(1, 50, name='0'),
aidge_core.FC(50, 50, name='1'),
aidge_core.FC(50, 10, name='2'),
])
EXPECTED_SCHEDULE = ['0', '1', '2']
......@@ -64,14 +64,14 @@ class test_scheduler(unittest.TestCase):
def test_parallel_scheduling(self):
input_data = np.array([]).astype(np.float32)
input_data = np.array([0]).astype(np.float32)
input_tensor = aidge_core.Tensor(input_data)
input_node = aidge_core.Producer(input_tensor, "X")
graph_view = aidge_core.sequential([
aidge_core.FC(50, name='0'),
aidge_core.parallel([aidge_core.FC(50, name='1'), aidge_core.FC(50, name='3')]),
aidge_core.Add(name='2'),
aidge_core.FC(1, 50, name='0'),
aidge_core.parallel([aidge_core.FC(50, 50, name='1'), aidge_core.FC(50, 50, name='3')]),
aidge_core.Add(2, name='2'),
])
EXPECTED_SCHEDULE = [['0', '1', '3', '2'], ['0', '3', '1', '2']] # Both scheduling are valid !
......
......@@ -17,6 +17,7 @@
#include "aidge/backend/cpu/operator/AvgPoolingImpl.hpp"
#include "aidge/backend/cpu/operator/MaxPoolingImpl.hpp"
#include "aidge/backend/cpu/operator/BatchNormImpl.hpp"
#include "aidge/backend/cpu/operator/ConcatImpl.hpp"
#include "aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp"
#include "aidge/backend/cpu/operator/ConvImpl.hpp"
#include "aidge/backend/cpu/operator/DivImpl.hpp"
......@@ -29,6 +30,7 @@
#include "aidge/backend/cpu/operator/ProducerImpl.hpp"
#include "aidge/backend/cpu/operator/ReLUImpl.hpp"
#include "aidge/backend/cpu/operator/ScalingImpl.hpp"
#include "aidge/backend/cpu/operator/SliceImpl.hpp"
#include "aidge/backend/cpu/operator/SqrtImpl.hpp"
#include "aidge/backend/cpu/operator/SoftmaxImpl.hpp"
#include "aidge/backend/cpu/operator/SubImpl.hpp"
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_DATA_GETCPUPTR_H_
#define AIDGE_CPU_DATA_GETCPUPTR_H_
#include "aidge/data/Tensor.hpp"
namespace Aidge {
inline void *getCPUPtr(std::shared_ptr<Aidge::Data> const &data) {
return std::static_pointer_cast<Tensor>(data)->getImpl()->rawPtr();
}
} // namespace Aidge
#endif // AIDGE_CPU_DATA_GETCPUPTR_H_
\ No newline at end of file
......@@ -16,6 +16,7 @@
#include "aidge/operator/Add.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include <memory>
#include <vector>
......@@ -23,87 +24,39 @@ namespace Aidge {
// class Add_Op<2>;
// compute kernel registry for forward and backward
template <DimIdx_t NUM>
class AddImplForward_cpu;
template <DimIdx_t NUM>
class AddImplBackward_cpu;
template <>
class AddImplForward_cpu<1>
: public Registrable<AddImplForward_cpu<1>, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {};
template <>
class AddImplBackward_cpu<1>
: public Registrable<AddImplBackward_cpu<1>, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {};
template <>
class AddImplForward_cpu<2> : public Registrable<AddImplForward_cpu<2>, std::tuple<DataType, DataType, DataType>,
void(const std::size_t, const void*, const void*, void*)> {};
template <>
class AddImplBackward_cpu<2> : public Registrable<AddImplBackward_cpu<2>, std::tuple<DataType, DataType, DataType>,
void(const std::size_t, const void*, const void*, void*)> {};
template <>
class AddImplForward_cpu<3> : public Registrable<AddImplForward_cpu<3>, std::tuple<DataType, DataType, DataType, DataType>,
void(const std::size_t, const void*, const void*, const void*, void*)> {
};
template <>
class AddImplBackward_cpu<3>
: public Registrable<AddImplBackward_cpu<3>, std::tuple<DataType, DataType, DataType, DataType>,
void(const std::size_t, const void*, const void*, const void*, void*)> {};
class AddImplForward_cpu
: public Registrable<AddImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const std::vector<const void*>, void*)> {};
class AddImplBackward_cpu
: public Registrable<AddImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const std::vector<const void*>, void*)> {};
template <DimIdx_t NUM>
class AddImpl_cpu : public OperatorImpl {
public:
AddImpl_cpu(const Add_Op<NUM>& op) : OperatorImpl(op) {}
AddImpl_cpu(const Add_Op& op) : OperatorImpl(op) {}
static std::unique_ptr<AddImpl_cpu<NUM>> create(const Add_Op<NUM>& op) {
return std::make_unique<AddImpl_cpu<NUM>>(op);
static std::unique_ptr<AddImpl_cpu> create(const Add_Op& op) {
return std::make_unique<AddImpl_cpu>(op);
}
};
template <>
class AddImpl_cpu<1> : public OperatorImpl {
public:
AddImpl_cpu(const Add_Op<1>& op) : OperatorImpl(op) {}
static std::unique_ptr<AddImpl_cpu<1>> create(const Add_Op<1>& op) {
return std::make_unique<AddImpl_cpu<1>>(op);
}
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
void forward() override;
};
template <>
class AddImpl_cpu<2> : public OperatorImpl {
public:
AddImpl_cpu(const Add_Op<2>& op) : OperatorImpl(op) {}
static std::unique_ptr<AddImpl_cpu<2>> create(const Add_Op<2>& op) {
return std::make_unique<AddImpl_cpu<2>>(op);
}
NbElts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
void forward() override;
};
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
template <>
class AddImpl_cpu<3> : public OperatorImpl {
public:
AddImpl_cpu(const Add_Op<3>& op) : OperatorImpl(op) {}
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
static std::unique_ptr<AddImpl_cpu<3>> create(const Add_Op<3>& op) {
return std::make_unique<AddImpl_cpu<3>>(op);
}
void updateConsummerProducer() override final;
NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
void forward() override;
};
namespace {
static Registrar<Add_Op<1>> registrarAddImpl1I_cpu("cpu", Aidge::AddImpl_cpu<1>::create);
static Registrar<Add_Op<2>> registrarAddImpl2I_cpu("cpu", Aidge::AddImpl_cpu<2>::create);
static Registrar<Add_Op<3>> registrarAddImpl3I_cpu("cpu", Aidge::AddImpl_cpu<3>::create);
static Registrar<Add_Op> registrarAddImpl_cpu("cpu", Aidge::AddImpl_cpu::create);
} // namespace
} // namespace Aidge
......
......@@ -18,70 +18,30 @@
namespace Aidge {
template <class I1, class O>
void AddImpl1I_cpu_forward_kernel(const std::size_t inputLength, const void* input1_, void* output_) {
template <class I, class O>
void AddImpl_cpu_forward_kernel(const std::size_t inputLength, const std::vector<const void*> inputs_, void* output_) {
// FIXME: missing Add attributes as arguments
const I1* input1 = static_cast<const I1*>(input1_);
O* output = static_cast<O*>(output_);
for (std::size_t oIndex = 0; oIndex < inputLength; ++oIndex) {
output[oIndex] = input1[oIndex];
}
}
template <class I1, class I2, class O>
void AddImpl2I_cpu_forward_kernel(const std::size_t inputLength, const void* input1_, const void* input2_,
void* output_) {
// FIXME: missing Add attributes as arguments
const I1* input1 = static_cast<const I1*>(input1_);
const I2* input2 = static_cast<const I2*>(input2_);
O* output = static_cast<O*>(output_);
for (std::size_t oIndex = 0; oIndex < inputLength; ++oIndex) {
output[oIndex] = input1[oIndex] + input2[oIndex];
std::vector<const I*> inputs;
for (const auto& input_ : inputs_) {
inputs.push_back(static_cast<const I*>(input_));
}
}
template <class I1, class I2, class I3, class O>
void AddImpl3I_cpu_forward_kernel(const std::size_t inputLength, const void* input1_, const void* input2_,
const void* input3_, void* output_) {
// FIXME: missing Add attributes as arguments
const I1* input1 = static_cast<const I1*>(input1_);
const I2* input2 = static_cast<const I2*>(input2_);
const I3* input3 = static_cast<const I3*>(input3_);
O* output = static_cast<O*>(output_);
for (std::size_t oIndex = 0; oIndex < inputLength; ++oIndex) {
output[oIndex] = input1[oIndex] + input2[oIndex] + input3[oIndex];
}
for (std::size_t iIndex = 0; iIndex < inputs.size(); ++iIndex) {
for (std::size_t oIndex = 0; oIndex < inputLength; ++oIndex) {
output[oIndex] += inputs[iIndex][oIndex];
}
}
}
namespace {
static Registrar<AddImplForward_cpu<1>> registrarAddImpl1IForward_cpu_Float32(
{DataType::Float32, DataType::Float32}, Aidge::AddImpl1I_cpu_forward_kernel<float, float>);
static Registrar<AddImplForward_cpu<1>> registrarAddImpl1IForward_cpu_Int32(
{DataType::Int32, DataType::Int32}, Aidge::AddImpl1I_cpu_forward_kernel<int, int>);
static Registrar<AddImplForward_cpu<1>> registrarAddImpl1IForward_cpu_Float64(
{DataType::Float64, DataType::Float64}, Aidge::AddImpl1I_cpu_forward_kernel<double, double>);
static Registrar<AddImplForward_cpu<2>> registrarAddImpl2IForward_cpu_Float32(
{DataType::Float32, DataType::Float32, DataType::Float32},
Aidge::AddImpl2I_cpu_forward_kernel<float, float, float>);
static Registrar<AddImplForward_cpu<2>> registrarAddImpl2IForward_cpu_Int32(
{DataType::Int32, DataType::Int32, DataType::Int32}, Aidge::AddImpl2I_cpu_forward_kernel<int, int, int>);
static Registrar<AddImplForward_cpu<2>> registrarAddImpl2IForward_cpu_Float64(
{DataType::Float64, DataType::Float64, DataType::Float64}, Aidge::AddImpl2I_cpu_forward_kernel<double, double, double>);
static Registrar<AddImplForward_cpu<3>> registrarAddImpl3IForward_cpu_Float32(
{DataType::Float32, DataType::Float32, DataType::Float32, DataType::Float32},
Aidge::AddImpl3I_cpu_forward_kernel<float, float, float, float>);
static Registrar<AddImplForward_cpu<3>> registrarAddImpl3IForward_cpu_Int32(
{DataType::Int32, DataType::Int32, DataType::Int32, DataType::Int32},
Aidge::AddImpl3I_cpu_forward_kernel<int, int, int, int>);
static Registrar<AddImplForward_cpu<3>> registrarAddImpl3IForward_cpu_Float64(
{DataType::Float64, DataType::Float64, DataType::Float64, DataType::Float64},
Aidge::AddImpl3I_cpu_forward_kernel<double, double, double, double>);
static Registrar<AddImplForward_cpu> registrarAddImplForward_cpu_Float32(
{DataType::Float32, DataType::Float32}, Aidge::AddImpl_cpu_forward_kernel<float, float>);
static Registrar<AddImplForward_cpu> registrarAddImplForward_cpu_Int32(
{DataType::Int32, DataType::Int32}, Aidge::AddImpl_cpu_forward_kernel<int, int>);
static Registrar<AddImplForward_cpu> registrarAddImplForward_cpu_Float64(
{DataType::Float64, DataType::Float64}, Aidge::AddImpl_cpu_forward_kernel<double, double>);
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_ADDIMPL_CPU_FORWARD_KERNEL_H_ */
#endif /* AIDGE_CPU_OPERATOR_ADDIMPL_CPU_FORWARD_KERNEL_H_ */
\ No newline at end of file
......@@ -21,6 +21,7 @@
#include "aidge/operator/AvgPooling.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
namespace Aidge {
// class AvgPooling_Op;
......
......@@ -16,6 +16,7 @@
#include "aidge/backend/cpu/operator/AvgPoolingImpl.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include "aidge/data/Data.hpp"
#include <array>
#include <tuple>
......
......@@ -21,6 +21,7 @@
#include "aidge/operator/BatchNorm.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
namespace Aidge {
// class BatchNorm_Op;
......
......@@ -16,6 +16,7 @@
#include "aidge/backend/cpu/operator/BatchNormImpl.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include <array>
#include <cmath>
#include <algorithm>
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_CONCATIMPL_H_
#define AIDGE_CPU_OPERATOR_CONCATIMPL_H_
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Concat.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include <memory>
#include <vector>
namespace Aidge {
// class Concat_Op<2>;
// compute kernel registry for forward and backward
class ConcatImplForward_cpu
: public Registrable<ConcatImplForward_cpu, std::tuple<DataType, DataType>, void(const Concat_Op::Attrs&,
const std::vector<DimSize_t>,
const std::vector<DimSize_t>&,
const std::vector<const void*>,
void*)> {};
class ConcatImplBackward_cpu
: public Registrable<ConcatImplBackward_cpu, std::tuple<DataType, DataType>, void(const Concat_Op::Attrs&,
const std::vector<DimSize_t>,
const std::vector<DimSize_t>&,
const std::vector<const void*>,
void*)> {};
class ConcatImpl_cpu : public OperatorImpl {
public:
ConcatImpl_cpu(const Concat_Op& op) : OperatorImpl(op) {}
static std::unique_ptr<ConcatImpl_cpu> create(const Concat_Op& op) {
return std::make_unique<ConcatImpl_cpu>(op);
}
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
NbElts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward() override;
void backward() override;
};
namespace {
static Registrar<Concat_Op> registrarConcatImpl_cpu("cpu", Aidge::ConcatImpl_cpu::create);
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_CONCATIMPL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_CONCATIMPL_FORWARD_KERNEL_H_
#define AIDGE_CPU_OPERATOR_CONCATIMPL_FORWARD_KERNEL_H_
#include <algorithm>
#include <numeric>
#include <cstddef>
#include <vector>
#include "aidge/backend/cpu/operator/ConcatImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/operator/Concat.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
namespace Aidge {
template <class I, class O>
void ConcatImpl_cpu_forward_kernel(const Concat_Op::Attrs& attrs,
const std::vector<DimSize_t>& dimsFirstInput,
const std::vector<DimSize_t>& concatAxisValues,
const std::vector<const void*>& inputs_,
void* output_)
{
// FIXME: missing Concat attributes as arguments
std::vector<const I*> inputs;
for (const auto& input_ : inputs_) {
inputs.push_back(static_cast<const I*>(input_));
}
O* output = static_cast<O*>(output_);
DimSize_t outputAxisValue = std::accumulate(concatAxisValues.begin(), concatAxisValues.end(), 0);
DimSize_t prodDimLower = 1;
for (DimIdx_t i = 0; i < std::get<0>(attrs); ++i) {
prodDimLower *= dimsFirstInput[i];
}
DimSize_t prodDimHigher = 1;
for (DimIdx_t i = std::get<0>(attrs) + 1; static_cast<std::size_t>(i) < dimsFirstInput.size();
++i) {
prodDimHigher *= dimsFirstInput[i];
}
std::size_t oIndexStart = 0;
std::size_t oIndex = 0;
for (std::size_t inputId = 0; inputId < inputs.size(); ++inputId) {
oIndex = oIndexStart;
const DimSize_t iOffset = prodDimHigher*concatAxisValues[inputId];
for (std::size_t iIndex = 0; iIndex < prodDimLower; ++iIndex) {
std::copy(inputs[inputId] + iIndex*iOffset, inputs[inputId] + (iIndex+1)*iOffset, output + oIndex);
oIndex += prodDimHigher*outputAxisValue;
}
oIndexStart += concatAxisValues[inputId]*prodDimHigher;
}
}
namespace {
static Registrar<ConcatImplForward_cpu> registrarConcatImplForward_cpu_Float32(
{DataType::Float32, DataType::Float32}, Aidge::ConcatImpl_cpu_forward_kernel<float, float>);
static Registrar<ConcatImplForward_cpu> registrarConcatImplForward_cpu_Int32(
{DataType::Int32, DataType::Int32}, Aidge::ConcatImpl_cpu_forward_kernel<int, int>);
static Registrar<ConcatImplForward_cpu> registrarConcatImplForward_cpu_Float64(
{DataType::Float64, DataType::Float64},
Aidge::ConcatImpl_cpu_forward_kernel<double, double>);
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_CONCATIMPL_CPU_FORWARD_KERNEL_H_ */
......@@ -21,6 +21,7 @@
#include "aidge/operator/ConvDepthWise.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
namespace Aidge {
// class ConvDepthWise_Op;
......
......@@ -16,7 +16,9 @@
#include "aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include <cmath>
#include <cstddef>
#include <array>
#include <algorithm>
......
......@@ -21,6 +21,7 @@
#include "aidge/operator/Conv.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
namespace Aidge {
// class Conv_Op;
......
......@@ -16,6 +16,7 @@
#include "aidge/backend/cpu/operator/ConvImpl.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include <cmath>
#include <array>
#include <algorithm>
......
......@@ -16,6 +16,7 @@
#include "aidge/operator/Div.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include <memory>
#include <vector>
......
......@@ -16,6 +16,7 @@
#include "aidge/operator/FC.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include <memory>
#include <vector>
#include <array>
......
......@@ -16,6 +16,7 @@
#include "aidge/operator/LeakyReLU.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include <memory>
#include <vector>
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment