Skip to content
Snippets Groups Projects
Commit 0f1fb3b0 authored by Michal Szczepanski's avatar Michal Szczepanski
Browse files

Required input convertion to Optional intput for resize operator, pubind update

parent 78467ffe
No related branches found
No related tags found
2 merge requests!152Update Aidge export to take a graph view has an argument instead of a...,!125Operator resize
......@@ -18,6 +18,7 @@
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
......@@ -27,7 +28,7 @@ namespace Aidge {
enum class ResizeAttr { NoROI, NoScales, NoSizes };
class Resize_Op : public OperatorTensor,
public Registrable<Resize_Op, std::string, std::unique_ptr<OperatorImpl>(const Resize_Op&)>
public Registrable<Resize_Op, std::string, std::unique_ptr<OperatorImpl>(const Resize_Op&)>,
public StaticAttributes<ResizeAttr,
bool,
bool,
......@@ -45,19 +46,20 @@ public:
template <ResizeAttr e>
using attr = typename Attributes_::template attr<e>;
constexpr Resize_Op(const bool noROI, const bool noScales, const bool noSizes)
Resize_Op(const bool noROI, const bool noScales, const bool noSizes)
: OperatorTensor(Type, 1, 3, 1),
// input tensor, onnx optional input [roi/scales/sizes] *constant, output
Attributes_(attr<ResizeAttr::NoROI>( noROI),
attr<ResizeAttr::NoScales>( noScales),
attr<ResizeAttr::NoSizes>( noSizes)) {}
Attributes_(attr<ResizeAttr::NoROI>(noROI),
attr<ResizeAttr::NoScales>(noScales),
attr<ResizeAttr::NoSizes>(noSizes)) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
* but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Resize_Op(const Resize_Op& op)
: OperatorTensor(op)
: OperatorTensor(op),
Attributes_(op)
{
// copy an operator
if (!op.backend().empty()) {
......@@ -95,7 +97,7 @@ inline std::shared_ptr<Node> Resize(const std::size_t nbInputDims,
const bool noScales,
const bool noSizes,
const std::string &name = "") {
resize_node = std::make_shared<Node>(std::make_shared<Resize_Op>(noROI, noScales, noSizes), name);
auto resize_node = std::make_shared<Node>(std::make_shared<Resize_Op>(noROI, noScales, noSizes), name);
// create empty producers of the same as the rank of input size [nbInputDims]
addProducer(resize_node, 1, std::array<DimSize_t, 1>({noROI ? 0 : nbInputDims}), "roi"); // already sets roi dims
addProducer(resize_node, 2, std::array<DimSize_t, 1>({noScales ? 0 : nbInputDims}), "scales"); // already sets scales dims
......@@ -111,7 +113,7 @@ const char *const EnumStrings<Aidge::ResizeAttr>::data[] = {
"noROI",
"noScales",
"noSizes"
}; // do we keep it?
};
}
......
......@@ -18,10 +18,10 @@ namespace py = pybind11;
namespace Aidge {
void init_Resize(py::module& m) {
py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(m, "ResizeOp", py::multiple_inheritance())
.def("get_inputs_name", &Resize_Op::getInputsName)
.def("get_outputs_name", &Resize_Op::getOutputsName);
py::class_<Resize_Op, std::shared_ptr<Resize_Op>, Attributes, OperatorTensor>(m, "ResizeOp", py::multiple_inheritance())
.def_static("get_inputs_name", &Resize_Op::getInputsName)
.def_static("get_outputs_name", &Resize_Op::getOutputsName);
m.def("Resize", &Resize, py::arg("name") = "");
m.def("Resize", &Resize, py::arg("nb_input_dims"), py::arg("no_roi"), py::arg("no_scales"), py::arg("no_sizes"), py::arg("name") = "");
}
} // namespace Aidge
......@@ -53,10 +53,12 @@ bool Aidge::Resize_Op::forwardDims(bool /*allowDataDependency*/) {
float* scales = static_cast<float*>(getInput(1)->getImpl()->rawPtr()); //TODO cast according to tensor type
// std::vector<DimSize_t> outDims[ width_out = width_input_tensor*scales[3], \
// height_out = heigh_input_tensor*scales[2], \
// depth_input_tensor = depth_input_tensor*scales[1] \
// batch_input_tensor = batch_input_tensor*scales[0] ];
/*
std::vector<DimSize_t> outDims[ width_out = width_input_tensor*scales[3], \
height_out = heigh_input_tensor*scales[2], \
depth_input_tensor = depth_input_tensor*scales[1] \
batch_input_tensor = batch_input_tensor*scales[0] ];
*/
for (std::size_t dim=0; dim < getInput(1)->size(); ++dim) {
// todo verify if batch and depth is not 1 !
......@@ -71,10 +73,15 @@ bool Aidge::Resize_Op::forwardDims(bool /*allowDataDependency*/) {
}
if ((!getInput(0)->empty()) && (getInput(1)->empty()) && !getInput(2)->empty()) {
// condition 2. input tensor + sizes
// to verify 2 arg
// "Input 0 and 2 must be provided and input 1 must not be provided."
// "data_input" and "sizes"
/*
condition 2. input tensor + sizes
to verify 2 arg
"Input 0 and 2 must be provided and input 1 must not be provided."
"data_input" and "sizes"
*/
std::vector<DimSize_t> outDims = getInput(0)->dims();
fmt::print("input dims 0 = {}\n", outDims);
......@@ -86,10 +93,12 @@ bool Aidge::Resize_Op::forwardDims(bool /*allowDataDependency*/) {
float* sizes = static_cast<float*>(getInput(2)->getImpl()->rawPtr()); //TODO cast according to tensor type
// std::vector<DimSize_t> outDims[ width_out = sizes[0], \
/*
std::vector<DimSize_t> outDims[ width_out = sizes[0], \
// height_out = sizes[1], \
// depth_input_tensor, \
// batch_input_tensor];
*/
for (std::size_t dim=0; dim < getInput(1)->size(); ++dim) {
// todo verify if batch and depth is not 1 !
......@@ -102,4 +111,14 @@ bool Aidge::Resize_Op::forwardDims(bool /*allowDataDependency*/) {
}
return false;
}
void Aidge::Resize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
SET_IMPL_MACRO(Resize_Op, *this, name);
mOutputs[0]->setBackend(name, device);
// By default, automatically set backend for roi, scales and sizes inputs
getInput(1)->setBackend(name, device);
getInput(2)->setBackend(name, device);
getInput(3)->setBackend(name, device);
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment