Skip to content
Snippets Groups Projects
Commit 9090eb3c authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Initial commit

parent 0d33ca05
No related branches found
No related tags found
3 merge requests!279v0.4.0,!253v0.4.0,!244Add MatMulTiling recipe
......@@ -126,8 +126,12 @@ static Registrar<Tensor> registrarTensorImpl_cpu_Float16(
{"cpu", DataType::Float16}, Aidge::TensorImpl_cpu<half_float::half>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int64(
{"cpu", DataType::Int64}, Aidge::TensorImpl_cpu<int64_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_UInt64(
{"cpu", DataType::UInt64}, Aidge::TensorImpl_cpu<uint64_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
{"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int32_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_UInt32(
{"cpu", DataType::UInt32}, Aidge::TensorImpl_cpu<uint32_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int16(
{"cpu", DataType::Int16}, Aidge::TensorImpl_cpu<int16_t>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_UInt16(
......
......@@ -103,6 +103,22 @@ class Tensor : public Data,
resize(dims);
}
/**
* @brief Construct a new Tensor object from the 1-dimension Vector helper.
* @tparam T datatype
*/
template <typename T>
constexpr Tensor(Vector<T> &&arr)
: Data(Type),
mDataType(NativeType<T>::type),
mDims({arr.data.size()}),
mStrides({1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {arr.data.size()})),
mSize(arr.data.size())
{
mImpl->copyFromHost(&arr.data[0], arr.data.size());
}
/**
* @brief Construct a new Tensor object from the 1-dimension Array helper.
* @tparam T datatype
......@@ -199,6 +215,12 @@ class Tensor : public Data,
*/
Tensor &operator=(const Tensor& other);
template <typename T>
constexpr Tensor &operator=(Vector<T> &&arr) {
*this = Tensor(std::move(arr));
return *this;
}
template <typename T, std::size_t SIZE_0>
constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
*this = Tensor(std::move(arr));
......
......@@ -123,6 +123,8 @@ void explicitCastMove(std::shared_ptr<GraphView> graphView);
*/
void expandMetaOps(std::shared_ptr<GraphView> graph, bool recursive = false);
void matMulTiling(NodePtr matMul, const std::vector<DimSize_t>& maxDims);
} // namespace Aidge
#endif /* AIDGE_CORE_UTILS_RECIPES_H_ */
......@@ -101,6 +101,11 @@ constexpr std::array<T, N + 1> append(T t, std::array<T, N> a) {
}
// Generic helper for initializing a Tensor
template <typename T>
struct Vector {
std::vector<T> data;
};
template <typename T, std::size_t SIZE_0>
struct Array1D {
T data[SIZE_0];
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <cassert>
#include <memory>
#include <set>
#include <string>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/GraphView.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/operator/MatMul.hpp"
#include "aidge/operator/Slice.hpp"
#include "aidge/operator/Identity.hpp"
#include "aidge/operator/Concat.hpp"
#include "aidge/recipes/Recipes.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Types.h"
// see https://en.wikipedia.org/wiki/Matrix_multiplication_algorithm
void Aidge::matMulTiling(NodePtr matMul, const std::vector<DimSize_t>& maxDims) {
if (matMul->getOperator()->type() != "MatMul") {
AIDGE_INTERNAL_ASSERT("Operator should be a MatMul.");
}
AIDGE_ASSERT(matMul->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
const auto& op = std::static_pointer_cast<OperatorTensor>(matMul->getOperator());
if (!op->dimsForwarded()) {
AIDGE_INTERNAL_ASSERT("Dimensions must be forwarded before any tiling");
}
const auto& in0Tensor = op->getInput(0);
const auto& in1Tensor = op->getInput(1);
const auto& outTensor = op->getOutput(0);
const auto& input0Dims = in0Tensor->dims();
const auto& input1Dims = in1Tensor->dims();
const auto& outputDims = outTensor->dims();
const auto& outputMatDims = std::vector<std::size_t>(outputDims.end() - 2, outputDims.end());;
if (outputMatDims[0] > maxDims[0]) {
const size_t axis = 0;
const auto splitIndex = outputMatDims[axis] / 2;
auto identity0 = Identity();
auto slice00 = Slice();
auto slice00_starts = Producer(std::make_shared<Tensor>(Vector<DimSize_t>{{0, 0}}), "", true);
slice00_starts->addChild(slice00, 0, 1);
auto slice00_ends = Producer(std::make_shared<Tensor>(Vector<DimSize_t>{{splitIndex, input0Dims[1]}}), "", true);
slice00_ends->addChild(slice00, 0, 2);
auto slice00_axes = Producer(std::make_shared<Tensor>(Vector<DimSize_t>{{0, 1}}), "", true);
slice00_axes->addChild(slice00, 0, 3);
auto matMul00 = MatMul();
auto identity1 = Identity();
auto slice01 = Slice();
auto slice01_starts = Producer(std::make_shared<Tensor>(Vector<DimSize_t>{{splitIndex, 0}}), "", true);
slice01_starts->addChild(slice01, 0, 1);
auto slice01_ends = Producer(std::make_shared<Tensor>(Vector<DimSize_t>{{input0Dims[0], input0Dims[1]}}), "", true);
slice01_ends->addChild(slice01, 0, 2);
auto slice01_axes = Producer(std::make_shared<Tensor>(Vector<DimSize_t>{{0, 1}}), "", true);
slice01_axes->addChild(slice01, 0, 3);
auto matMul01 = MatMul();
auto concat0 = Concat(2, axis);
identity0->addChild(slice00, 0, 0);
identity0->addChild(slice01, 0, 0);
identity1->addChild(matMul00, 0, 1);
identity1->addChild(matMul01, 0, 1);
slice00->addChild(matMul00, 0, 0);
slice01->addChild(matMul01, 0, 0);
matMul00->addChild(concat0, 0, 0);
matMul01->addChild(concat0, 0, 1);
auto gMatMul = std::make_shared<GraphView>();
gMatMul->add({matMul});
auto g = std::make_shared<GraphView>();
g->add({identity0, identity1});
g->add({slice00, matMul00, matMul01, slice01, concat0});
g->save("micrograph");
auto replaced = GraphView::replace(gMatMul, g);
if (replaced) {
g->forwardDims();
// Recursive tiling
MatMulTiling(matMul00, maxDims);
MatMulTiling(matMul01, maxDims);
}
else {
Log::warn("Unable to split MatMul {}", matMul->name());
}
}
else if (outputMatDims[1] > maxDims[1]) {
}
}
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include "aidge/recipes/Recipes.hpp"
#include "aidge/operator/MatMul.hpp"
#include "aidge/operator/AvgPooling.hpp"
#include "aidge/operator/MaxPooling.hpp"
#include "aidge/operator/GenericOperator.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/graph/OpArgs.hpp"
#include <cstddef>
using namespace Aidge;
TEST_CASE("[MatMulTiling]") {
auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
auto w1 = Producer({16, 3, 224, 224}, "w1");
auto matmul1 = MatMul("matmul1");
auto w2 = Producer({16, 3, 224, 224}, "w1");
auto matmul2 = MatMul("matmul2");
auto w3 = Producer({16, 3, 224, 224}, "w1");
auto matmul3 = MatMul("matmul3");
dataProvider->addChild(matmul1, 0, 0);
w1->addChild(matmul1, 0, 1);
matmul1->addChild(matmul2, 0, 0);
w2->addChild(matmul2, 0, 1);
matmul2->addChild(matmul3, 0, 0);
w3->addChild(matmul3, 0, 1);
auto g1 = getConnectedGraphView(matmul1);
g1->forwardDims();
g1->save("MatMulSplitting_graph");
matMulTiling(matmul1, {16, 16});
g1->save("MatMulSplitting_graph_split");
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment