Skip to content
Snippets Groups Projects
Commit 13b1b261 authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge remote-tracking branch 'origin/main' into fix/bundle

parents 15421bc8 f504d1ee
No related branches found
No related tags found
No related merge requests found
......@@ -15,7 +15,7 @@
#include <cstring>
#include <set>
#include <memory>
#include <numeric>
#include <numeric> // std::accumulate
#include <string>
#include <vector>
......@@ -327,11 +327,11 @@ class Tensor : public Data,
/**
* @brief Change the dimensions of the Tensor object according to the given argument.
* If the overall size is not changed (meaning we actually only performed a
* If the overall size is not changed (meaning we actually only performed a
* reshape), data is garanteed to remain valid.
* Otherwise, no garantee is provided regarding the validy of previous data
* (unlike std::vector). If the new overall size is larger than the previous
* one, all previous data is invalided. Otherwise, previous data may or may
* Otherwise, no garantee is provided regarding the validy of previous data
* (unlike std::vector). If the new overall size is larger than the previous
* one, all previous data is invalided. Otherwise, previous data may or may
* not remain valid, depending on the backend implementation.
* @tparam DIM Number of dimensions.
* @param dims New dimensions
......@@ -343,11 +343,11 @@ class Tensor : public Data,
/**
* @brief Change the dimensions of the Tensor object according to the given argument.
* If the overall size is not changed (meaning we actually only performed a
* If the overall size is not changed (meaning we actually only performed a
* reshape), data is garanteed to remain valid.
* Otherwise, no garantee is provided regarding the validy of previous data
* (unlike std::vector). If the new overall size is larger than the previous
* one, all previous data is invalided. Otherwise, previous data may or may
* Otherwise, no garantee is provided regarding the validy of previous data
* (unlike std::vector). If the new overall size is larger than the previous
* one, all previous data is invalided. Otherwise, previous data may or may
* not remain valid, depending on the backend implementation.
* @param dims New dimensions
*/
......@@ -424,7 +424,7 @@ class Tensor : public Data,
return std::string("?"); // To make Clang happy
};
if (dims().empty()) { return "{}"; }
if (dims().empty()) { return ptrToString(mDataType, mImpl->hostPtr(), 0); }
std::string res;
std::size_t dim = 0;
std::size_t counter = 0;
......@@ -546,22 +546,22 @@ class Tensor : public Data,
/**
* Copy-cast data from a Tensor.
* @param src Source tensor to copy-cast from.
* @param movedSrc shared_ptr to an indermediate Tensor that will
* contain the moved data if a device change should occur AND a type
* @param movedSrc shared_ptr to an indermediate Tensor that will
* contain the moved data if a device change should occur AND a type
* conversion is necessary (otherwise it remains unused).
* Any data already present will be overwritten. No new memory allocation
* will occur if movedSrc has already been allocated with the right
* Any data already present will be overwritten. No new memory allocation
* will occur if movedSrc has already been allocated with the right
* type/size/device.
* If required, memory is always allocated on current (destination)
* If required, memory is always allocated on current (destination)
* Tensor's device.
*/
void copyCastFrom(const Tensor& src, std::shared_ptr<Tensor>& movedSrc);
/**
* Copy-cast data from a Tensor.
* In case of both a device change AND a data type conversion, an
* In case of both a device change AND a data type conversion, an
* intermediate buffer on will be allocated and deallocated each time.
* If required, buffer's memory is always allocated on current (destination)
* If required, buffer's memory is always allocated on current (destination)
* Tensor's device.
* @param src Source tensor to copy-cast from.
*/
......@@ -579,7 +579,7 @@ class Tensor : public Data,
* The backend stays the same.
* @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
* The shared_ptr does not need to be initialized. No new memory allocation
* will occur if fallback has already been allocated with the right
* will occur if fallback has already been allocated with the right
* type/size/device.
* @param dt The desired data type.
* @return Reference to either itself or to fallback.
......@@ -594,7 +594,7 @@ class Tensor : public Data,
* The data type stays the same.
* @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
* The shared_ptr does not need to be initialized. No new memory allocation
* will occur if fallback has already been allocated with the right
* will occur if fallback has already been allocated with the right
* type/size/device.
* @param backend The desired backend.
* @param device The desired device.
......@@ -607,11 +607,11 @@ class Tensor : public Data,
* Return a reference to a Tensor on desired data type and backend/device:
* - itself, if already with the right characteristics;
* - the provided Tensor, overwritten with the copy-casted data.
* If required, fallback is always allocated on desired (destination)
* If required, fallback is always allocated on desired (destination)
* device.
* @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
* The shared_ptr does not need to be initialized. No new memory allocation
* will occur if fallback has already been allocated with the right
* will occur if fallback has already been allocated with the right
* type/size/device.
* @param dt The desired data type.
* @param backend The desired backend.
......@@ -628,11 +628,11 @@ class Tensor : public Data,
* (data type, backend/device) as targetReqs Tensor:
* - itself, if already with the right characteristics;
* - the provided Tensor, overwritten with the copy-casted data.
* If required, fallback is always allocated on current (destination)
* If required, fallback is always allocated on current (destination)
* Tensor's device.
* @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
* The shared_ptr does not need to be initialized. No new memory allocation
* will occur if fallback has already been allocated with the right
* will occur if fallback has already been allocated with the right
* type/size/device.
* @param targetReqs Tensor with the desired target characteristics.
* @return Reference to either itself or to fallback.
......@@ -644,15 +644,8 @@ class Tensor : public Data,
private:
///\bug not protected against overflow
std::size_t computeSize() {
if (mDims.empty()) {
mSize = DimSize_t(0);
}
else {
mSize = std::accumulate(mDims.begin(), mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>());
}
return mSize;
void computeSize() {
mSize = std::accumulate(mDims.begin(), mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>());
}
};
} // namespace Aidge
......
......@@ -40,7 +40,7 @@ public:
static const std::string Type;
Identity_Op()
: OperatorTensor(Type, 1, 0, 0)
: OperatorTensor(Type, 1, 0, 1)
{
mImpl = std::make_shared<OperatorImpl>(*this);
}
......@@ -101,7 +101,10 @@ public:
if (outputIdx >= nbInputs()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu outputs", type().c_str(), nbInputs());
}
return mInputs[outputIdx];
if (mInputs[outputIdx] == nullptr){
return mOutputs[outputIdx]; // Input is not initialized with empty tensor
}
return mInputs[outputIdx]; // Identity, so Output is Input
}
void setBackend(const std::string& /*name*/, DeviceIdx_t /*device*/ = 0) override final {
// setBackend do nothing, Identity node has no backend it just pass the same Tensor
......
......@@ -24,22 +24,32 @@
namespace Aidge {
enum class ProdAttr { Constant };
class Producer_Op
: public OperatorTensor,
public Registrable<Producer_Op, std::string, std::unique_ptr<OperatorImpl>(
const Producer_Op &)> {
const Producer_Op &)>,
public StaticAttributes<ProdAttr, bool> {
public:
static const std::string Type;
using Attributes_ = StaticAttributes<ProdAttr, bool>;
template <ProdAttr e>
using attr = typename Attributes_::template attr<e>;
template <std::size_t DIM>
Producer_Op(const std::array<DimSize_t, DIM>& dims)
: OperatorTensor(Type, 0, 0, 1)
Producer_Op(const std::array<DimSize_t, DIM>& dims,
bool constant = false)
: OperatorTensor(Type, 0, 0, 1),
Attributes_(attr<ProdAttr::Constant>(constant))
{
mOutputs[0]->resize(dims);
}
Producer_Op(const std::shared_ptr<Tensor> tensor)
: OperatorTensor(Type, 0, 0, 1)
Producer_Op(const std::shared_ptr<Tensor> tensor, bool constant = false)
: OperatorTensor(Type, 0, 0, 1),
Attributes_(attr<ProdAttr::Constant>(constant))
{
mOutputs[0] = tensor; // copy the pointer of the Tensor
}
......@@ -49,7 +59,8 @@ public:
* @param op OperatorTensor to copy.
*/
Producer_Op(const Producer_Op& op)
: OperatorTensor(op)
: OperatorTensor(op),
Attributes_(op)
{
for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) {
mOutputs[i] = std::make_shared<Tensor>(*(op.getOutput(i)));
......@@ -89,28 +100,41 @@ public:
}
public:
void forward() override final {
printf("Basic Producer forward() function.\n");
}
void backward() override final {
printf("Basic Producer backward() function.\n");
}
void forward() override final {
printf("Basic Producer forward() function.\n");
}
void backward() override final {
printf("Basic Producer backward() function.\n");
}
void setOutput(const Aidge::IOIndex_t outputIdx, std::shared_ptr<Aidge::Data>&& data) override {
if (getAttr<ProdAttr::Constant>()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer is constant, cannot update output.");
}
OperatorTensor::setOutput(outputIdx, std::move(data));
}
void setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) override {
if (getAttr<ProdAttr::Constant>()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer is constant, cannot update output.");
}
OperatorTensor::setOutput(outputIdx, data);
}
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const std::string& name = "") {
inline std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const std::string& name = "", bool constant = false) {
static_assert(DIM<=MaxDim,"Too many tensor dimensions required by Producer, not supported");
return std::make_shared<Node>(std::make_shared<Producer_Op>(dims), name);
return std::make_shared<Node>(std::make_shared<Producer_Op>(dims, constant), name);
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <std::size_t DIM>
inline std::shared_ptr<Node> Producer(DimSize_t const (&dims)[DIM], const std::string& name = "") {
return Producer(to_array(dims), name);
inline std::shared_ptr<Node> Producer(DimSize_t const (&dims)[DIM], const std::string& name = "", bool constant = false) {
return Producer(to_array(dims), name, constant);
}
inline std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Producer_Op>(tensor), name);
inline std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, const std::string& name = "", bool constant = false) {
return std::make_shared<Node>(std::make_shared<Producer_Op>(tensor, constant), name);
}
template <std::array<DimSize_t, 1>::size_type DIM>
......@@ -130,4 +154,10 @@ void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, Dim
}
} // namespace Aidge
#endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
\ No newline at end of file
namespace {
template <>
const char *const EnumStrings<Aidge::ProdAttr>::data[] = {
"Constant"
};
}
#endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
......@@ -42,7 +42,7 @@ void addCtor(py::class_<Tensor,
std::set<std::string> availableBackends = Tensor::getAvailableBackends();
if (availableBackends.find("cpu") != availableBackends.end()){
newTensor->setBackend("cpu");
newTensor->getImpl()->setRawPtr(static_cast<T*>(info.ptr), newTensor->size());
newTensor->getImpl()->copyFromHost(static_cast<T*>(info.ptr), newTensor->size());
}else{
printf("Warning : Could not use aidge_cpu backend, verify you have `import aidge_cpu`\n");
}
......@@ -95,7 +95,9 @@ void init_Tensor(py::module& m){
case DataType::Float32:
return py::cast(b.get<float>(idx));
case DataType::Int32:
return py::cast(b.get<int>(idx));
return py::cast(b.get<std::int32_t>(idx));
case DataType::Int64:
return py::cast(b.get<std::int64_t>(idx));
default:
return py::none();
}
......@@ -108,7 +110,9 @@ void init_Tensor(py::module& m){
case DataType::Float32:
return py::cast(b.get<float>(coordIdx));
case DataType::Int32:
return py::cast(b.get<int>(coordIdx));
return py::cast(b.get<std::int32_t>(coordIdx));
case DataType::Int64:
return py::cast(b.get<std::int64_t>(coordIdx));
default:
return py::none();
}
......@@ -137,7 +141,10 @@ void init_Tensor(py::module& m){
dataFormatDescriptor = py::format_descriptor<float>::format();
break;
case DataType::Int32:
dataFormatDescriptor = py::format_descriptor<int>::format();
dataFormatDescriptor = py::format_descriptor<std::int32_t>::format();
break;
case DataType::Int64:
dataFormatDescriptor = py::format_descriptor<std::int64_t>::format();
break;
default:
throw py::value_error("Unsupported data format");
......@@ -155,7 +162,8 @@ void init_Tensor(py::module& m){
// TODO : If the ctor with the right data type does not exist, pybind will always convert the data to INT !
// Need to find a way to avoid this !
addCtor<int>(pyClassTensor);
addCtor<std::int32_t>(pyClassTensor);
addCtor<std::int64_t>(pyClassTensor);
addCtor<float>(pyClassTensor);
// #if SIZE_MAX != 0xFFFFFFFF
addCtor<double>(pyClassTensor);
......
......@@ -122,7 +122,7 @@ void init_MetaOperatorDefs(py::module &m) {
declare_PaddedMaxPoolingOp<2>(m);
declare_PaddedMaxPoolingOp<3>(m);
py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, Operator>(m, "MetaOperator_Op", py::multiple_inheritance())
py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, OperatorTensor>(m, "MetaOperator_Op", py::multiple_inheritance())
.def("get_micro_graph", &MetaOperator_Op::getMicroGraph);
m.def("meta_operator", &MetaOperator,
......
......@@ -21,6 +21,9 @@ void init_OperatorTensor(py::module& m){
py::class_<OperatorTensor, std::shared_ptr<OperatorTensor>, Operator>(m, "OperatorTensor")
.def("get_output", &OperatorTensor::getOutput, py::arg("outputIdx"))
.def("get_input", &OperatorTensor::getInput, py::arg("inputIdx"))
.def("set_output", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setOutput, py::arg("outputIdx"), py::arg("data"))
.def("set_input", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setInput, py::arg("outputIdx"), py::arg("data"))
.def("output_dims_forwarded", &OperatorTensor::outputDimsForwarded)
;
}
......
......@@ -24,20 +24,20 @@ namespace Aidge {
template <DimIdx_t DIM>
void declare_Producer(py::module &m) {
// m.def(("Producer_" + std::to_string(DIM)+"D").c_str(), py::overload_cast<shared_ptr<Node>&>(&Producer<DIM>), py::arg("dims"), py::arg("name"));
m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::array<DimSize_t, DIM>&, const std::string&)>(&Producer), py::arg("dims"), py::arg("name") = "");
m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::array<DimSize_t, DIM>&, const std::string&, bool)>(&Producer), py::arg("dims"), py::arg("name") = "", py::arg("constant") = false);
}
void init_Producer(py::module &m) {
py::class_<Producer_Op, std::shared_ptr<Producer_Op>, OperatorTensor>(
py::class_<Producer_Op, std::shared_ptr<Producer_Op>, OperatorTensor, Attributes>(
m,
"ProducerOp",
py::multiple_inheritance())
.def("dims", &Producer_Op::dims)
.def("get_inputs_name", &Producer_Op::getInputsName)
.def("get_outputs_name", &Producer_Op::getOutputsName);
m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&)>(&Producer), py::arg("tensor"), py::arg("name") = "");
m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&, bool)>(&Producer), py::arg("tensor"), py::arg("name") = "", py::arg("constant") = false);
declare_Producer<1>(m);
declare_Producer<2>(m);
......
......@@ -13,4 +13,4 @@
#include "aidge/operator/Producer.hpp"
const std::string Aidge::Producer_Op::Type = "Producer";
\ No newline at end of file
const std::string Aidge::Producer_Op::Type = "Producer";
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment