Skip to content
Snippets Groups Projects
Commit 97ba9b77 authored by Michal Szczepanski's avatar Michal Szczepanski
Browse files

Add Resize operator with python binding

parent 97d2fef9
No related branches found
No related tags found
2 merge requests!152Update Aidge export to take a graph view has an argument instead of a...,!125Operator resize
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_Resize_H_
#define AIDGE_CORE_OPERATOR_Resize_H_
#include <cassert>
#include <memory>
#include <vector>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
class Resize_Op : public OperatorTensor,
public Registrable<Resize_Op, std::string, std::unique_ptr<OperatorImpl>(const Resize_Op&)>
{
public:
static const std::string Type; // du
Resize_Op() : OperatorTensor(Type, 2, 0, 1) {}
// 4 input 0 attribute 1 output
// "data_input", "roi", "scales", "sizes"
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Resize_Op(const Resize_Op& op)
: OperatorTensor(op)
{
// copy an operator
if (!op.backend().empty()) {
SET_IMPL_MACRO(Resize_Op, *this, op.backend());
}
else {
mImpl = nullptr;
}
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Resize_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Resize_Op>(*this);
}
// function see inputs
bool forwardDims(bool allowDataDependency = false) override final;
void setBackend(const std::string& name, DeviceIdx_t device = 0) override final{
// // to select which backend CUDA / CPU?
// mImpl = Registrar<Resize_Op>::create(name)(*this);
// mOutputs[0]->setBackend(name, device);
}
static const std::vector<std::string> getInputsName(){
return {"data_input", "roi ", "scales", "sizes"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
// WTF
inline std::shared_ptr<Node> Resize(const std::string &name = "") {
// FIXME: properly handle default w&b initialization in every cases
return std::make_shared<Node>(std::make_shared<Resize_Op>(), name);
}
} // namespace Aidge
#endif /* AIDGE_CORE_OPERATOR_Resize_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <pybind11/pybind11.h>
#include "aidge/operator/Resize.hpp"
#include "aidge/operator/OperatorTensor.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Resize(py::module& m) {
py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(m, "ResizeOp", py::multiple_inheritance())
.def("get_inputs_name", &Resize_Op::getInputsName)
.def("get_outputs_name", &Resize_Op::getOutputsName);
m.def("Resize", &Resize, py::arg("name") = "");
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/operator/Resize.hpp"
#include <cstddef> // std::size_t
#include <cstdint> // std::int64_t
#include <stdexcept> // std::runtime_error
#include <string>
#include <vector>
#include <fmt/core.h>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Types.h"
const std::string Aidge::Resize_Op::Type = "Resize";
bool Aidge::Resize_Op::forwardDims(bool /*allowDataDependency*/) {
// check input ("data_input","roi", "scales", "data_input","sizes") has been associated
for (size_t i = 0; i < 4; ++i) {
if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} not provided", type(), i);
}
}
// roi not implemented
// require scales of the input to have dims defined
if ((!getInput(0)->empty()) && (getInput(1)->empty() && !getInput(2)->empty() && getInput(3)->empty()) ) {
// condition 1. input data_input tensor + scales
std::vector<DimSize_t> outDims = getInput(0)->dims();
fmt::print("input dims 0 = {}\n", outDims);
fmt::print("input dims 1 = {}\n", getInput(1)->dims());
fmt::print("input dims 2 = {}\n", getInput(2)->dims());
fmt::print("input dims 3 = {}\n", getInput(3)->dims());
printf("Scales Input(1)");
float* scales = static_cast<float*>(getInput(1)->getImpl()->rawPtr()); //TODO cast according to tensor type
// std::vector<DimSize_t> outDims[ width_out = width_input_tensor*scales[3], \
// height_out = heigh_input_tensor*scales[2], \
// depth_input_tensor = depth_input_tensor*scales[1] \
// batch_input_tensor = batch_input_tensor*scales[0] ];
for (std::size_t dim=0; dim < getInput(1)->size(); ++dim) {
// todo verify if batch and depth is not 1 !
printf("{} {} \n", dim, scales[dim] );
outDims[dim] *= scales[dim];
}
mOutputs[0]->resize(outDims);
fmt::print("output dims 0 = {}\n", outDims);
return true;
}
if ((!getInput(0)->empty()) && (getInput(1)->empty()) && !getInput(2)->empty()) {
// condition 2. input tensor + sizes
// to verify 2 arg
// "Input 0 and 2 must be provided and input 1 must not be provided."
// "data_input" and "sizes"
std::vector<DimSize_t> outDims = getInput(0)->dims();
fmt::print("input dims 0 = {}\n", outDims);
fmt::print("input dims 1 = {}\n", getInput(1)->dims());
fmt::print("input dims 2 = {}\n", getInput(2)->dims());
fmt::print("input dims 3 = {}\n", getInput(3)->dims());
printf("Sizes Input(2)");
float* sizes = static_cast<float*>(getInput(2)->getImpl()->rawPtr()); //TODO cast according to tensor type
// std::vector<DimSize_t> outDims[ width_out = sizes[0], \
// height_out = sizes[1], \
// depth_input_tensor, \
// batch_input_tensor];
for (std::size_t dim=0; dim < getInput(1)->size(); ++dim) {
// todo verify if batch and depth is not 1 !
outDims[dim] = sizes[dim];
printf("{} {} \n", dim, sizes[dim] );
}
mOutputs[0]->resize(outDims);
fmt::print("output dims 0 = {}\n", outDims);
return true;
}
return false;
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment