Skip to content
Snippets Groups Projects
Commit 76f158b0 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Moved transpose default impl to core

parent 04e67d3f
No related branches found
No related tags found
No related merge requests found
......@@ -26,40 +26,47 @@
#include "aidge/utils/Types.h"
namespace Aidge {
class Transpose_OpImpl : public OperatorImpl {
public:
Transpose_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
void forward() override;
};
enum class TransposeAttr { OutputDimsOrder };
template <DimIdx_t DIM>
class Transpose_Op : public OperatorTensor,
public Registrable<Transpose_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op<DIM> &)>,
public StaticAttributes<TransposeAttr,
std::array<DimSize_t, DIM>> {
public Registrable<Transpose_Op, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op&)>,
public StaticAttributes<TransposeAttr, std::vector<DimSize_t>> {
public:
static const std::string Type;
Transpose_Op() = delete;
using Attributes_ = StaticAttributes<TransposeAttr,
std::array<DimSize_t, DIM>>;
using Attributes_ = StaticAttributes<TransposeAttr, std::vector<DimSize_t>>;
template <TransposeAttr e>
using attr = typename Attributes_::template attr<e>;
constexpr Transpose_Op(const std::array<DimSize_t, DIM> &output_dims_order)
Transpose_Op(const std::vector<DimSize_t> &output_dims_order)
: OperatorTensor(Type, 1, 0, 1),
Attributes_(attr<TransposeAttr::OutputDimsOrder>(output_dims_order)) { }
Attributes_(attr<TransposeAttr::OutputDimsOrder>(output_dims_order))
{
mImpl = std::make_shared<Transpose_OpImpl>(*this);
}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Transpose_Op(const Transpose_Op<DIM>& op)
Transpose_Op(const Transpose_Op& op)
: OperatorTensor(op),
Attributes_(op)
{
if (op.mImpl){
SET_IMPL_MACRO(Transpose_Op<DIM>, *this, op.backend());
}else{
mImpl = nullptr;
if (!op.backend().empty()) {
SET_IMPL_MACRO(Transpose_Op, *this, op.backend());
}
else {
mImpl = std::make_shared<Transpose_OpImpl>(*this);
}
}
......@@ -68,27 +75,12 @@ class Transpose_Op : public OperatorTensor,
* @see Operator::Transpose_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Transpose_Op<DIM>>(*this);
return std::make_shared<Transpose_Op>(*this);
}
bool forwardDims(bool /*allowDataDependency*/ = false) override final {
if (!getInput(0)->empty()) {
auto attr = (this)->getStaticAttributes();
const std::array<DimSize_t, DIM>& outDimsOrder = static_cast<const std::array<DimSize_t, DIM>&>(std::get<0>(attr));
std::vector<DimSize_t> outputDims;
for (std::size_t i = 0; i < DIM; ++i) {
outputDims.push_back(getInput(0)->dims()[outDimsOrder[i]]);
}
mOutputs[0]->resize(outputDims);
return true;
}
return false;
}
bool forwardDims(bool /*allowDataDependency*/ = false) override final;
void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
SET_IMPL_MACRO(Transpose_Op<DIM>, *this, name);
mOutputs[0]->setBackend(name, device);
}
void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
static const std::vector<std::string> getInputsName(){
return {"data_input"};
......@@ -98,26 +90,10 @@ class Transpose_Op : public OperatorTensor,
}
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Transpose(const std::array<DimSize_t, DIM> &output_dims_order,
inline std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &output_dims_order,
const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Transpose, not supported");
return std::make_shared<Node>(std::make_shared<Transpose_Op<static_cast<DimIdx_t>(DIM)>>(output_dims_order), name);
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> Transpose(
DimSize_t const (&output_dims_order)[DIM],
const std::string& name = "") {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Transpose, not supported");
return Transpose(to_array(output_dims_order), name);
return std::make_shared<Node>(std::make_shared<Transpose_Op>(output_dims_order), name);
}
template <DimIdx_t DIM>
const std::string Transpose_Op<DIM>::Type = "Transpose";
} // namespace Aidge
namespace {
......
......@@ -25,32 +25,19 @@
namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM>
void declare_Transpose(py::module &m) {
const std::string pyClassName("TransposeOp" + std::to_string(DIM) + "D");
py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, Attributes, OperatorTensor>(
m, ("TransposeOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
.def("get_inputs_name", &Transpose_Op<DIM>::getInputsName)
.def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName)
.def("attributes_name", &Transpose_Op<DIM>::staticGetAttrsName);
declare_registrable<Transpose_Op<DIM>>(m, pyClassName);
m.def(("Transpose" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& output_dims_order,
const std::string& name) {
AIDGE_ASSERT(output_dims_order.size() == DIM, "output_dims_order size [{}] does not match DIM [{}]", output_dims_order.size(), DIM);
return Transpose<DIM>(to_array<DIM>(output_dims_order.begin()), name);
}, py::arg("output_dims_order"),
py::arg("name") = "");
const std::string pyClassName("TransposeOp");
py::class_<Transpose_Op, std::shared_ptr<Transpose_Op>, Attributes, OperatorTensor>(
m, ("TransposeOp").c_str(), py::multiple_inheritance())
.def("get_inputs_name", &Transpose_Op::getInputsName)
.def("get_outputs_name", &Transpose_Op::getOutputsName)
.def("attributes_name", &Transpose_Op::staticGetAttrsName);
declare_registrable<Transpose_Op>(m, pyClassName);
m.def("Transpose", &Transpose, py::arg("output_dims_order"), py::arg("name") = "");
}
void init_Transpose(py::module &m) {
declare_Transpose<2>(m);
declare_Transpose<3>(m);
declare_Transpose<4>(m);
declare_Transpose<5>(m);
declare_Transpose<6>(m);
declare_Transpose(m);
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/operator/Transpose.hpp"
#include <cstddef> // std::size_t
#include <cstdint> // std::int64_t
#include <memory>
#include <stdexcept> // std::runtime_error
#include <string>
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
void Aidge::Transpose_OpImpl::forward() {
const Transpose_Op& op = dynamic_cast<const Transpose_Op&>(mOp);
const auto inputDims = op.getInput(0)->dims();
const auto outputDims = op.getOutput(0)->dims();
std::vector<std::size_t> outStrides(outputDims.size(), 1);
for (size_t i = 0; i < outputDims.size(); ++i) {
for (size_t j = i+1; j < outputDims.size(); ++j)
{
outStrides[i] *= outputDims[j];
}
}
std::vector<size_t> indices(outputDims.size(), 0);
for (size_t i = 0; i < op.getInput(0)->size(); ++i) {
size_t idx = 0;
// Permute indices based on OutputDimsOrder attr
for (int j = outputDims.size() -1; j >=0; --j) {
idx += indices[op.getAttr<std::vector<DimSize_t>>(0)[j]] * outStrides[j];
}
// Copy the value in output
op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(i), 1, idx);
// Update indices for the next iteration
for (int j = outputDims.size() - 1; j >= 0; --j) {
if (indices[j] < inputDims[j] - 1) {
indices[j]++;
break;
} else {
indices[j] = 0;
}
}
}
}
const std::string Aidge::Transpose_Op::Type = "Transpose";
bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
// check input has been associated
if (!getInput(0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
}
if (!getInput(0)->empty()) {
const auto& outDimsOrder = getAttr<std::vector<DimSize_t>>(0);
std::vector<DimSize_t> outputDims;
for (std::size_t i = 0; i < outDimsOrder.size(); ++i) {
outputDims.push_back(getInput(0)->dims()[outDimsOrder[i]]);
}
mOutputs[0]->resize(outputDims);
return true;
}
return false;
}
void Aidge::Transpose_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
if (Registrar<Transpose_Op>::exists({name})){
SET_IMPL_MACRO(Transpose_Op, *this, name);
}
else {
mImpl = std::make_shared<Transpose_OpImpl>(*this);
}
mOutputs[0]->setBackend(name, device);
}
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <memory>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Transpose.hpp"
using namespace Aidge;
TEST_CASE("[cpu/operator] Transpose(forward)") {
SECTION("3D Tensor") {
std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array3D<float,2,3,4> {
{
{{0.42507452, 0.11244237, 0.43243718, 0.62354952},
{0.90250170, 0.48719984, 0.45781207, 0.92536664},
{0.06348717, 0.91678733, 0.64452291, 0.00484818}},
{{0.66873497, 0.99508536, 0.55714869, 0.84887981},
{0.41666120, 0.92365038, 0.80034822, 0.38721532},
{0.52037925, 0.53937608, 0.66380072, 0.36330253}}
}
});
std::shared_ptr<Tensor> output = std::make_shared<Tensor>(Array3D<float,2,4,3> {
{
{{0.42507452, 0.90250170, 0.06348717},
{0.11244237, 0.48719984, 0.91678733},
{0.43243718, 0.45781207, 0.64452291},
{0.62354952, 0.92536664, 0.00484818}},
{{0.66873497, 0.41666120, 0.52037925},
{0.99508536, 0.92365038, 0.53937608},
{0.55714869, 0.80034822, 0.66380072},
{0.84887981, 0.38721532, 0.36330253}}
}
});
std::shared_ptr<Node> myTranspose = Transpose({0,2,1});
auto op = std::static_pointer_cast<OperatorTensor>(myTranspose -> getOperator());
op->associateInput(0,input);
op->setDataType(DataType::Float32);
op->setBackend("cpu");
myTranspose->forward();
REQUIRE(*(op->getOutput(0)) == *output);
}
SECTION("4D Tensor") {
std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array4D<int,2,3,1,4> {
{
{
{
{1, 2, 3, 4}
},
{
{5, 6, 7, 8}
},
{
{9, 10, 11, 12}
}
},
{
{
{13, 14, 15, 16}
},
{
{17, 18, 19, 20}
},
{
{21, 22, 23, 24}
}
}
}
});
std::shared_ptr<Tensor> output = std::make_shared<Tensor>(Array4D<int,2,4,1,3> {
{
{
{
{1, 5, 9}
},
{
{2, 6, 10}
},
{
{3, 7, 11}
},
{
{4, 8, 12}
}
},
{
{
{13, 17, 21}
},
{
{14, 18, 22}
},
{
{15, 19, 23}
},
{
{16, 20, 24}
}
}
}
});
std::shared_ptr<Node> myTranspose = Transpose({0,3,2,1});
auto op = std::static_pointer_cast<OperatorTensor>(myTranspose -> getOperator());
op->associateInput(0,input);
op->setDataType(DataType::Int32);
op->setBackend("cpu");
myTranspose->forward();
REQUIRE(*(op->getOutput(0)) == *output);
}
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment