Skip to content
Snippets Groups Projects
Commit c131ada5 authored by Thibault Allenet's avatar Thibault Allenet
Browse files

Merge dev into dataloader

parents 7fc2dd95 95077929
No related branches found
No related tags found
No related merge requests found
......@@ -26,7 +26,7 @@ namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, OperatorTensor, Attributes>(
py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Attributes, OperatorTensor>(
m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
py::multiple_inheritance())
.def(py::init<const std::array<DimSize_t, DIM> &,
......@@ -36,7 +36,8 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
py::arg("stride_dims"),
py::arg("ceil_mode"))
.def("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
.def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName);
.def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
.def("attributes_name", &MaxPooling_Op<DIM>::staticGetAttrsName);
m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
......
......@@ -25,7 +25,7 @@ namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Operator, Attributes>(
py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Attributes, Operator>(
m, ("PadOp" + std::to_string(DIM) + "D").c_str(),
py::multiple_inheritance())
.def(py::init<const std::array<DimSize_t, 2*DIM> &,
......@@ -36,6 +36,7 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
py::arg("borderValue") = 0.0)
.def("get_inputs_name", &Pad_Op<DIM>::getInputsName)
.def("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
.def("attributes_name", &Pad_Op<DIM>::staticGetAttrsName)
;
m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples,
......
......@@ -30,13 +30,14 @@ void declare_Producer(py::module &m) {
void init_Producer(py::module &m) {
py::class_<Producer_Op, std::shared_ptr<Producer_Op>, OperatorTensor, Attributes>(
py::class_<Producer_Op, std::shared_ptr<Producer_Op>, Attributes, OperatorTensor>(
m,
"ProducerOp",
py::multiple_inheritance())
.def("dims", &Producer_Op::dims)
.def("get_inputs_name", &Producer_Op::getInputsName)
.def("get_outputs_name", &Producer_Op::getOutputsName);
.def("get_outputs_name", &Producer_Op::getOutputsName)
.def("attributes_name", &Producer_Op::staticGetAttrsName);
m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&, bool)>(&Producer), py::arg("tensor"), py::arg("name") = "", py::arg("constant") = false);
declare_Producer<1>(m);
......
......@@ -24,10 +24,11 @@ namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM> void declare_ReduceMeanOp(py::module &m) {
py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, OperatorTensor, Attributes>(
py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, Attributes, OperatorTensor>(
m, ("ReduceMeanOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
.def("get_inputs_name", &ReduceMean_Op<DIM>::getInputsName)
.def("get_outputs_name", &ReduceMean_Op<DIM>::getOutputsName)
.def("attributes_name", &ReduceMean_Op<DIM>::staticGetAttrsName)
;
m.def(("ReduceMean" + std::to_string(DIM) + "D").c_str(), [](const std::vector<int>& axes,
......
......@@ -19,9 +19,10 @@ namespace py = pybind11;
namespace Aidge {
void init_Softmax(py::module& m) {
py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, OperatorTensor, Attributes>(m, "SoftmaxOp", py::multiple_inheritance())
py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, Attributes, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance())
.def("get_inputs_name", &Softmax_Op::getInputsName)
.def("get_outputs_name", &Softmax_Op::getOutputsName);
.def("get_outputs_name", &Softmax_Op::getOutputsName)
.def("attributes_name", &Softmax_Op::staticGetAttrsName);
m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = "");
}
......
......@@ -25,12 +25,13 @@
namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM>
template <DimIdx_t DIM>
void declare_Transpose(py::module &m) {
py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, OperatorTensor, Attributes>(
py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, Attributes, OperatorTensor>(
m, ("TransposeOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
.def("get_inputs_name", &Transpose_Op<DIM>::getInputsName)
.def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName);
.def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName)
.def("attributes_name", &Transpose_Op<DIM>::staticGetAttrsName);
m.def(("Transpose" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& output_dims_order,
const std::string& name) {
......
......@@ -11,6 +11,9 @@
#include <pybind11/pybind11.h>
#include "aidge/backend/cpu/data/TensorImpl.hpp" // This include add Tensor
namespace py = pybind11;
namespace Aidge {
......
#include <pybind11/pybind11.h>
#include "aidge/utils/Attributes.hpp"
#include "aidge/utils/DynamicAttributes.hpp"
#include "aidge/utils/StaticAttributes.hpp"
namespace py = pybind11;
namespace Aidge {
......@@ -21,11 +22,13 @@ void init_Attributes(py::module& m){
.def("has_attr", &Attributes::hasAttr, py::arg("name"))
.def("get_attr_type", &Attributes::getAttrType, py::arg("name"))
.def("get_attrs_name", &Attributes::getAttrsName)
.def("get_attr", &Attributes::getAttrPy, py::arg("name"));
.def("get_attr", &Attributes::getAttrPy, py::arg("name"))
.def("__getattr__", &Attributes::getAttrPy, py::arg("name"))
.def("set_attr", &Attributes::setAttrPy, py::arg("name"), py::arg("value"))
.def("__setattr__", &Attributes::setAttrPy, py::arg("name"), py::arg("value"));
py::class_<DynamicAttributes, std::shared_ptr<DynamicAttributes>, Attributes>(m, "DynamicAttributes")
.def("add_attr", &DynamicAttributes::addAttrPy, py::arg("name"), py::arg("value"))
.def("set_attr", &DynamicAttributes::setAttrPy, py::arg("name"), py::arg("value"))
.def("del_attr", &DynamicAttributes::delAttr, py::arg("name"));
m.def("test_DynamicAttributes_binding", &test_DynamicAttributes_binding);
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <array>
#include <catch2/catch_test_macros.hpp>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/TensorUtils.hpp"
#include "aidge/backend/cpu/data/TensorImpl.hpp"
using namespace Aidge;
TEST_CASE("Tensor creation") {
SECTION("from const array") {
Tensor x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor xFloat =
Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
SECTION("Tensor features") {
REQUIRE(x.nbDims() == 3);
REQUIRE(x.dims()[0] == 2);
REQUIRE(x.dims()[1] == 2);
REQUIRE(x.dims()[2] == 2);
REQUIRE(x.size() == 8);
}
SECTION("Access to array") {
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1);
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8);
}
SECTION("get function") {
REQUIRE(x.get<int>({0, 0, 0}) == 1);
REQUIRE(x.get<int>({0, 0, 1}) == 2);
REQUIRE(x.get<int>({0, 1, 1}) == 4);
REQUIRE(x.get<int>({1, 1, 0}) == 7);
x.set<int>({1, 1, 1}, 36);
REQUIRE(x.get<int>({1, 1, 1}) == 36);
}
SECTION("Pretty printing for debug") { REQUIRE_NOTHROW(x.print()); }
SECTION("Tensor (in)equality") {
REQUIRE(x == xCopy);
REQUIRE_FALSE(x == xFloat);
}
}
}
TEST_CASE("Tensor methods") {
Tensor x = Array3D<int, 2, 2, 2>{{
{{1, 2},
{3, 4}},
{{5, 6},
{7, 8}}
}};
Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor xFloat =
Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
SECTION("Tensor sharing") {
Tensor xCopyCtor(x);
REQUIRE(xCopyCtor.getImpl() == x.getImpl());
Tensor xEqOp = x;
REQUIRE(xEqOp.getImpl() == x.getImpl());
Tensor xCloned = x.clone();
REQUIRE(xCloned.getImpl() != x.getImpl());
REQUIRE(xCloned == x);
}
SECTION("Tensor extract") {
Tensor y = x.extract({0, 1});
REQUIRE(y.getImpl() == x.getImpl());
REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}}));
REQUIRE(y.isContiguous());
Tensor y2 = x.extract({0, 1, 1}, {2, 1, 1});
REQUIRE(y2.getImpl() == x.getImpl());
REQUIRE(!y2.isContiguous());
Tensor y3 = y2.clone();
REQUIRE(y3.isContiguous());
REQUIRE(approxEq<int>(y3, Array3D<int, 2, 1, 1>{{{{4}}, {{8}}}}));
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment