Skip to content
Snippets Groups Projects
Commit e93fe640 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Initial commit

parent 78246fa2
No related branches found
No related tags found
2 merge requests!93Release v0.3.0,!72Im2col
Pipeline #49570 failed
......@@ -21,6 +21,7 @@
#include "aidge/backend/cpu/operator/DivImpl.hpp"
#include "aidge/backend/cpu/operator/ErfImpl.hpp"
#include "aidge/backend/cpu/operator/FCImpl.hpp"
#include "aidge/backend/cpu/operator/FoldImpl.hpp"
#include "aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp"
#include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp"
#include "aidge/backend/cpu/operator/MatMulImpl.hpp"
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_FOLDIMPL_H_
#define AIDGE_CPU_OPERATOR_FOLDIMPL_H_
#include <array>
#include <memory>
#include <tuple>
#include <vector>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Fold.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
namespace Aidge {
class FoldImpl2DForward_cpu
: public Registrable<FoldImpl2DForward_cpu,
std::tuple<DataType, DataType>,
void(const Fold_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *,
void *)> {};
class FoldImpl2D_cpu : public OperatorImpl {
public:
FoldImpl2D_cpu(const Fold_Op<2> &op) : OperatorImpl(op, "cpu") {}
static std::unique_ptr<FoldImpl2D_cpu> create(const Fold_Op<2> &op) {
return std::make_unique<FoldImpl2D_cpu>(op);
}
void forward() override;
};
namespace {
// add cpu backend to Fold_Op<2> implementation registry
static Registrar<Fold_Op<2>> registrarFoldImpl2D_cpu("cpu", Aidge::FoldImpl2D_cpu::create);
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_FOLDIMPL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_FOLDIMPL_FORWARD_KERNEL_H_
#define AIDGE_CPU_OPERATOR_FOLDIMPL_FORWARD_KERNEL_H_
#include "aidge/utils/Registrar.hpp"
#include "aidge/backend/cpu/operator/FoldImpl.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include <cmath>
#include <array>
#include <algorithm>
namespace Aidge {
template <class I, class O>
void FoldImpl2D_cpu_forward_kernel(const Fold_Op<2>::Attrs &attrs, const std::array<DimSize_t, 4> &dims,
const void *input_, void *output_)
{
const I *input = static_cast<const I *>(input_);
O *output = static_cast<O *>(output_);
const auto kernelDims = std::get<3>(attrs);
const auto dilationDims = std::get<2>(attrs);
const auto strideDims = std::get<1>(attrs);
const DimSize_t inHeight = std::get<0>(attrs)[0];
const DimSize_t inWidth = std::get<0>(attrs)[1];
const DimSize_t kernelExtentHeight = dilationDims[0] *
(kernelDims[0] - 1) + 1;
const DimSize_t outHeight = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(inHeight - kernelExtentHeight) /
static_cast<float>(strideDims[0])));
const DimSize_t kernelExtentWidth = dilationDims[1] *
(kernelDims[1] - 1) + 1;
const DimSize_t outWidth = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(inWidth - kernelExtentWidth) /
static_cast<float>(strideDims[1])));
const DimSize_t outChannels = dims[1];
std::fill_n(output, outHeight * outWidth * outChannels, O(0));
for (DimSize_t outC = 0; outC < outChannels; ++outC) {
const auto inOffsetH = outC % kernelDims[0];
const auto inOffsetW = (outC / kernelDims[0]) % kernelDims[1];
const auto inC = outC / kernelDims[0] / kernelDims[1];
for (DimSize_t outH = 0; outH < outHeight; ++outH) {
const auto inH = outH * strideDims[1] + inOffsetH * dilationDims[1];
for (DimSize_t outW = 0; outW < outWidth; ++outW) {
const auto inW = outW * strideDims[0] + inOffsetW * dilationDims[0];
output[(inC * inHeight + inH) * inWidth + inW] += input[(outC * outHeight + outH) * outWidth + outW];
}
}
}
}
namespace {
static Registrar<FoldImpl2DForward_cpu> registrarFoldImpl2DForward_cpu_Float32(
{DataType::Float32, DataType::Float32},
Aidge::FoldImpl2D_cpu_forward_kernel<float, float>);
static Registrar<FoldImpl2DForward_cpu> registrarFoldImpl2DForward_cpu_Int32(
{DataType::Int32, DataType::Int32},
Aidge::FoldImpl2D_cpu_forward_kernel<int, int>);
static Registrar<FoldImpl2DForward_cpu> registrarFoldImpl2DForward_cpu_Float64(
{DataType::Float64, DataType::Float64},
Aidge::FoldImpl2D_cpu_forward_kernel<double, double>);
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_FOLDIMPL_FORWARD_KERNEL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <cassert>
#include <chrono> // std::chrono::milliseconds
#include <numeric> // std::accumulate
#include <thread> // std::this_thread::sleep_for
#include <vector>
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include "aidge/operator/Conv.hpp"
#include "aidge/backend/cpu/operator/FoldImpl.hpp"
#include "aidge/backend/cpu/operator/FoldImpl_forward_kernels.hpp"
void Aidge::FoldImpl2D_cpu::forward() {
assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
// Find the correct kernel type
auto kernelFunc =
Registrar<FoldImpl2DForward_cpu>::create({std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
// Call kernel
kernelFunc(dynamic_cast<const Fold_Op<2>&>(mOp).getStaticAttributes(),
std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>(),
getCPUPtr(mOp.getRawInput(0)),
getCPUPtr(mOp.getRawOutput(0)));
}
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <cstdlib>
#include <memory>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/GraphView.hpp"
#include "aidge/scheduler/SequentialScheduler.hpp"
#include "aidge/operator/Fold.hpp"
#include "aidge/operator/Unfold.hpp"
#include "aidge/operator/MatMul.hpp"
#include "aidge/operator/Reshape.hpp"
#include "aidge/backend/cpu.hpp"
using namespace Aidge;
TEST_CASE("[cpu/operator] Fold(forward)", "[Fold][CPU]") {
std::shared_ptr<Node> myUnfold = Unfold({3,3}, "myunfold");
std::shared_ptr<Node> myReshape = Reshape({9, 12}, "myreshape");
std::shared_ptr<Node> myMatMul = MatMul("mymatmul");
std::shared_ptr<Node> myFold = Fold({3,3}, {3,3}, "myfold");
myUnfold->addChild(myMatMul, 0, 1);
myReshape->addChild(myMatMul, 0, 0);
myMatMul->addChild(myFold, 0, 0);
std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,3,3,3> {
{
{
{{ 0, 1, 2},
{ 3, 4, 5},
{ 6, 7, 8}},
{{ 9, 10, 11},
{ 12, 13, 14},
{ 15, 16, 17}},
{{ 18, 19, 20},
{ 21, 22, 23},
{ 24, 25, 26}}
},
{
{{ 27, 28, 29},
{ 30, 31, 32},
{ 33, 34, 35}},
{{ 36, 37, 38},
{ 39, 40, 41},
{ 42, 43, 44}},
{{ 45, 46, 47},
{ 48, 49, 50},
{ 51, 52, 53}}
},
{
{{ 54, 55, 56},
{ 57, 58, 59},
{ 60, 61, 62}},
{{ 63, 64, 65},
{ 66, 67, 68},
{ 69, 70, 71}},
{{ 72, 73, 74},
{ 75, 76, 77},
{ 78, 79, 80}}
},
{
{{ 81, 82, 83},
{ 84, 85, 86},
{ 87, 88, 89}},
{{ 90, 91, 92},
{ 93, 94, 95},
{ 96, 97, 98}},
{{ 99, 100, 101},
{102, 103, 104},
{105, 106, 107}}
}
}
});
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
{
{
{{ 0, 1, 2, 3, 4},
{ 5, 6, 7, 8, 9},
{ 10, 11, 12, 13, 14},
{ 15, 16, 17, 18, 19},
{ 20, 21, 22, 23, 24}},
{{ 25, 26, 27, 28, 29},
{ 30, 31, 32, 33, 34},
{ 35, 36, 37, 38, 39},
{ 40, 41, 42, 43, 44},
{ 45, 46, 47, 48, 49}},
{{ 50, 51, 52, 53, 54},
{ 55, 56, 57, 58, 59},
{ 60, 61, 62, 63, 64},
{ 65, 66, 67, 68, 69},
{ 70, 71, 72, 73, 74}}
},
{
{{ 75, 76, 77, 78, 79},
{ 80, 81, 82, 83, 84},
{ 85, 86, 87, 88, 89},
{ 90, 91, 92, 93, 94},
{ 95, 96, 97, 98, 99}},
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}
}
}
});
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,3,3> {
{
{
{{ 15226, 15577, 15928},
{ 16981, 17332, 17683},
{ 18736, 19087, 19438}},
{{ 37818, 38898, 39978},
{ 43218, 44298, 45378},
{ 48618, 49698, 50778}},
{{ 60426, 62235, 64044},
{ 69471, 71280, 73089},
{ 78516, 80325, 82134}},
{{ 83016, 85554, 88092},
{ 95706, 98244, 100782},
{108396, 110934, 113472}}
},
{
{{ 41551, 41902, 42253},
{ 43306, 43657, 44008},
{ 45061, 45412, 45763}},
{{118818, 119898, 120978},
{124218, 125298, 126378},
{129618, 130698, 131778}},
{{196101, 197910, 199719},
{205146, 206955, 208764},
{214191, 216000, 217809}},
{{273366, 275904, 278442},
{286056, 288594, 291132},
{298746, 301284, 303822}}
}
}
});
auto opUnfold = std::static_pointer_cast<OperatorTensor>(myUnfold -> getOperator());
auto opReshape = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator());
auto opMatMul = std::static_pointer_cast<OperatorTensor>(myMatMul -> getOperator());
auto opFold = std::static_pointer_cast<OperatorTensor>(myFold -> getOperator());
opUnfold->associateInput(0,myInput);
opReshape->associateInput(0,myWeights);
auto g = getConnectedGraphView(myMatMul);
g->setDataType(DataType::Int32);
g->setBackend("cpu");
g->save("unfold_matmul_fold");
g->forwardDims();
g->save("unfold_matmul_fold");
SequentialScheduler scheduler(g);
scheduler.forward();
// op->getOutput(0)->print();
REQUIRE(*(opFold->getOutput(0)) == *myOutput);
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment