Skip to content
Snippets Groups Projects
Commit f504d1ee authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge branch 'dev' into 'main'

dev

See merge request !67
parents 6c460029 cd269563
No related branches found
No related tags found
1 merge request!67dev
Pipeline #36929 passed
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#include <cstring> #include <cstring>
#include <set> #include <set>
#include <memory> #include <memory>
#include <numeric> #include <numeric> // std::accumulate
#include <string> #include <string>
#include <vector> #include <vector>
...@@ -327,11 +327,11 @@ class Tensor : public Data, ...@@ -327,11 +327,11 @@ class Tensor : public Data,
/** /**
* @brief Change the dimensions of the Tensor object according to the given argument. * @brief Change the dimensions of the Tensor object according to the given argument.
* If the overall size is not changed (meaning we actually only performed a * If the overall size is not changed (meaning we actually only performed a
* reshape), data is garanteed to remain valid. * reshape), data is garanteed to remain valid.
* Otherwise, no garantee is provided regarding the validy of previous data * Otherwise, no garantee is provided regarding the validy of previous data
* (unlike std::vector). If the new overall size is larger than the previous * (unlike std::vector). If the new overall size is larger than the previous
* one, all previous data is invalided. Otherwise, previous data may or may * one, all previous data is invalided. Otherwise, previous data may or may
* not remain valid, depending on the backend implementation. * not remain valid, depending on the backend implementation.
* @tparam DIM Number of dimensions. * @tparam DIM Number of dimensions.
* @param dims New dimensions * @param dims New dimensions
...@@ -343,11 +343,11 @@ class Tensor : public Data, ...@@ -343,11 +343,11 @@ class Tensor : public Data,
/** /**
* @brief Change the dimensions of the Tensor object according to the given argument. * @brief Change the dimensions of the Tensor object according to the given argument.
* If the overall size is not changed (meaning we actually only performed a * If the overall size is not changed (meaning we actually only performed a
* reshape), data is garanteed to remain valid. * reshape), data is garanteed to remain valid.
* Otherwise, no garantee is provided regarding the validy of previous data * Otherwise, no garantee is provided regarding the validy of previous data
* (unlike std::vector). If the new overall size is larger than the previous * (unlike std::vector). If the new overall size is larger than the previous
* one, all previous data is invalided. Otherwise, previous data may or may * one, all previous data is invalided. Otherwise, previous data may or may
* not remain valid, depending on the backend implementation. * not remain valid, depending on the backend implementation.
* @param dims New dimensions * @param dims New dimensions
*/ */
...@@ -424,7 +424,7 @@ class Tensor : public Data, ...@@ -424,7 +424,7 @@ class Tensor : public Data,
return std::string("?"); // To make Clang happy return std::string("?"); // To make Clang happy
}; };
if (dims().empty()) { return "{}"; } if (dims().empty()) { return ptrToString(mDataType, mImpl->hostPtr(), 0); }
std::string res; std::string res;
std::size_t dim = 0; std::size_t dim = 0;
std::size_t counter = 0; std::size_t counter = 0;
...@@ -546,22 +546,22 @@ class Tensor : public Data, ...@@ -546,22 +546,22 @@ class Tensor : public Data,
/** /**
* Copy-cast data from a Tensor. * Copy-cast data from a Tensor.
* @param src Source tensor to copy-cast from. * @param src Source tensor to copy-cast from.
* @param movedSrc shared_ptr to an indermediate Tensor that will * @param movedSrc shared_ptr to an indermediate Tensor that will
* contain the moved data if a device change should occur AND a type * contain the moved data if a device change should occur AND a type
* conversion is necessary (otherwise it remains unused). * conversion is necessary (otherwise it remains unused).
* Any data already present will be overwritten. No new memory allocation * Any data already present will be overwritten. No new memory allocation
* will occur if movedSrc has already been allocated with the right * will occur if movedSrc has already been allocated with the right
* type/size/device. * type/size/device.
* If required, memory is always allocated on current (destination) * If required, memory is always allocated on current (destination)
* Tensor's device. * Tensor's device.
*/ */
void copyCastFrom(const Tensor& src, std::shared_ptr<Tensor>& movedSrc); void copyCastFrom(const Tensor& src, std::shared_ptr<Tensor>& movedSrc);
/** /**
* Copy-cast data from a Tensor. * Copy-cast data from a Tensor.
* In case of both a device change AND a data type conversion, an * In case of both a device change AND a data type conversion, an
* intermediate buffer on will be allocated and deallocated each time. * intermediate buffer on will be allocated and deallocated each time.
* If required, buffer's memory is always allocated on current (destination) * If required, buffer's memory is always allocated on current (destination)
* Tensor's device. * Tensor's device.
* @param src Source tensor to copy-cast from. * @param src Source tensor to copy-cast from.
*/ */
...@@ -579,7 +579,7 @@ class Tensor : public Data, ...@@ -579,7 +579,7 @@ class Tensor : public Data,
* The backend stays the same. * The backend stays the same.
* @param fallback A shared_ptr to Tensor ready to be overwritten if necessary. * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
* The shared_ptr does not need to be initialized. No new memory allocation * The shared_ptr does not need to be initialized. No new memory allocation
* will occur if fallback has already been allocated with the right * will occur if fallback has already been allocated with the right
* type/size/device. * type/size/device.
* @param dt The desired data type. * @param dt The desired data type.
* @return Reference to either itself or to fallback. * @return Reference to either itself or to fallback.
...@@ -594,7 +594,7 @@ class Tensor : public Data, ...@@ -594,7 +594,7 @@ class Tensor : public Data,
* The data type stays the same. * The data type stays the same.
* @param fallback A shared_ptr to Tensor ready to be overwritten if necessary. * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
* The shared_ptr does not need to be initialized. No new memory allocation * The shared_ptr does not need to be initialized. No new memory allocation
* will occur if fallback has already been allocated with the right * will occur if fallback has already been allocated with the right
* type/size/device. * type/size/device.
* @param backend The desired backend. * @param backend The desired backend.
* @param device The desired device. * @param device The desired device.
...@@ -607,11 +607,11 @@ class Tensor : public Data, ...@@ -607,11 +607,11 @@ class Tensor : public Data,
* Return a reference to a Tensor on desired data type and backend/device: * Return a reference to a Tensor on desired data type and backend/device:
* - itself, if already with the right characteristics; * - itself, if already with the right characteristics;
* - the provided Tensor, overwritten with the copy-casted data. * - the provided Tensor, overwritten with the copy-casted data.
* If required, fallback is always allocated on desired (destination) * If required, fallback is always allocated on desired (destination)
* device. * device.
* @param fallback A shared_ptr to Tensor ready to be overwritten if necessary. * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
* The shared_ptr does not need to be initialized. No new memory allocation * The shared_ptr does not need to be initialized. No new memory allocation
* will occur if fallback has already been allocated with the right * will occur if fallback has already been allocated with the right
* type/size/device. * type/size/device.
* @param dt The desired data type. * @param dt The desired data type.
* @param backend The desired backend. * @param backend The desired backend.
...@@ -628,11 +628,11 @@ class Tensor : public Data, ...@@ -628,11 +628,11 @@ class Tensor : public Data,
* (data type, backend/device) as targetReqs Tensor: * (data type, backend/device) as targetReqs Tensor:
* - itself, if already with the right characteristics; * - itself, if already with the right characteristics;
* - the provided Tensor, overwritten with the copy-casted data. * - the provided Tensor, overwritten with the copy-casted data.
* If required, fallback is always allocated on current (destination) * If required, fallback is always allocated on current (destination)
* Tensor's device. * Tensor's device.
* @param fallback A shared_ptr to Tensor ready to be overwritten if necessary. * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
* The shared_ptr does not need to be initialized. No new memory allocation * The shared_ptr does not need to be initialized. No new memory allocation
* will occur if fallback has already been allocated with the right * will occur if fallback has already been allocated with the right
* type/size/device. * type/size/device.
* @param targetReqs Tensor with the desired target characteristics. * @param targetReqs Tensor with the desired target characteristics.
* @return Reference to either itself or to fallback. * @return Reference to either itself or to fallback.
...@@ -644,15 +644,8 @@ class Tensor : public Data, ...@@ -644,15 +644,8 @@ class Tensor : public Data,
private: private:
///\bug not protected against overflow ///\bug not protected against overflow
std::size_t computeSize() { void computeSize() {
if (mDims.empty()) { mSize = std::accumulate(mDims.begin(), mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>());
mSize = DimSize_t(0);
}
else {
mSize = std::accumulate(mDims.begin(), mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>());
}
return mSize;
} }
}; };
} // namespace Aidge } // namespace Aidge
......
...@@ -40,7 +40,7 @@ public: ...@@ -40,7 +40,7 @@ public:
static const std::string Type; static const std::string Type;
Identity_Op() Identity_Op()
: OperatorTensor(Type, 1, 0, 0) : OperatorTensor(Type, 1, 0, 1)
{ {
mImpl = std::make_shared<OperatorImpl>(*this); mImpl = std::make_shared<OperatorImpl>(*this);
} }
...@@ -101,7 +101,10 @@ public: ...@@ -101,7 +101,10 @@ public:
if (outputIdx >= nbInputs()) { if (outputIdx >= nbInputs()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu outputs", type().c_str(), nbInputs()); AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu outputs", type().c_str(), nbInputs());
} }
return mInputs[outputIdx]; if (mInputs[outputIdx] == nullptr){
return mOutputs[outputIdx]; // Input is not initialized with empty tensor
}
return mInputs[outputIdx]; // Identity, so Output is Input
} }
void setBackend(const std::string& /*name*/, DeviceIdx_t /*device*/ = 0) override final { void setBackend(const std::string& /*name*/, DeviceIdx_t /*device*/ = 0) override final {
// setBackend do nothing, Identity node has no backend it just pass the same Tensor // setBackend do nothing, Identity node has no backend it just pass the same Tensor
......
...@@ -24,22 +24,32 @@ ...@@ -24,22 +24,32 @@
namespace Aidge { namespace Aidge {
enum class ProdAttr { Constant };
class Producer_Op class Producer_Op
: public OperatorTensor, : public OperatorTensor,
public Registrable<Producer_Op, std::string, std::unique_ptr<OperatorImpl>( public Registrable<Producer_Op, std::string, std::unique_ptr<OperatorImpl>(
const Producer_Op &)> { const Producer_Op &)>,
public StaticAttributes<ProdAttr, bool> {
public: public:
static const std::string Type; static const std::string Type;
using Attributes_ = StaticAttributes<ProdAttr, bool>;
template <ProdAttr e>
using attr = typename Attributes_::template attr<e>;
template <std::size_t DIM> template <std::size_t DIM>
Producer_Op(const std::array<DimSize_t, DIM>& dims) Producer_Op(const std::array<DimSize_t, DIM>& dims,
: OperatorTensor(Type, 0, 0, 1) bool constant = false)
: OperatorTensor(Type, 0, 0, 1),
Attributes_(attr<ProdAttr::Constant>(constant))
{ {
mOutputs[0]->resize(dims); mOutputs[0]->resize(dims);
} }
Producer_Op(const std::shared_ptr<Tensor> tensor) Producer_Op(const std::shared_ptr<Tensor> tensor, bool constant = false)
: OperatorTensor(Type, 0, 0, 1) : OperatorTensor(Type, 0, 0, 1),
Attributes_(attr<ProdAttr::Constant>(constant))
{ {
mOutputs[0] = tensor; // copy the pointer of the Tensor mOutputs[0] = tensor; // copy the pointer of the Tensor
} }
...@@ -49,7 +59,8 @@ public: ...@@ -49,7 +59,8 @@ public:
* @param op OperatorTensor to copy. * @param op OperatorTensor to copy.
*/ */
Producer_Op(const Producer_Op& op) Producer_Op(const Producer_Op& op)
: OperatorTensor(op) : OperatorTensor(op),
Attributes_(op)
{ {
for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) { for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) {
mOutputs[i] = std::make_shared<Tensor>(*(op.getOutput(i))); mOutputs[i] = std::make_shared<Tensor>(*(op.getOutput(i)));
...@@ -89,28 +100,41 @@ public: ...@@ -89,28 +100,41 @@ public:
} }
public: public:
void forward() override final { void forward() override final {
printf("Basic Producer forward() function.\n"); printf("Basic Producer forward() function.\n");
} }
void backward() override final { void backward() override final {
printf("Basic Producer backward() function.\n"); printf("Basic Producer backward() function.\n");
} }
void setOutput(const Aidge::IOIndex_t outputIdx, std::shared_ptr<Aidge::Data>&& data) override {
if (getAttr<ProdAttr::Constant>()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer is constant, cannot update output.");
}
OperatorTensor::setOutput(outputIdx, std::move(data));
}
void setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) override {
if (getAttr<ProdAttr::Constant>()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer is constant, cannot update output.");
}
OperatorTensor::setOutput(outputIdx, data);
}
}; };
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const std::string& name = "") { inline std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const std::string& name = "", bool constant = false) {
static_assert(DIM<=MaxDim,"Too many tensor dimensions required by Producer, not supported"); static_assert(DIM<=MaxDim,"Too many tensor dimensions required by Producer, not supported");
return std::make_shared<Node>(std::make_shared<Producer_Op>(dims), name); return std::make_shared<Node>(std::make_shared<Producer_Op>(dims, constant), name);
} }
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <std::size_t DIM> template <std::size_t DIM>
inline std::shared_ptr<Node> Producer(DimSize_t const (&dims)[DIM], const std::string& name = "") { inline std::shared_ptr<Node> Producer(DimSize_t const (&dims)[DIM], const std::string& name = "", bool constant = false) {
return Producer(to_array(dims), name); return Producer(to_array(dims), name, constant);
} }
inline std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, const std::string& name = "") { inline std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, const std::string& name = "", bool constant = false) {
return std::make_shared<Node>(std::make_shared<Producer_Op>(tensor), name); return std::make_shared<Node>(std::make_shared<Producer_Op>(tensor, constant), name);
} }
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
...@@ -130,4 +154,10 @@ void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, Dim ...@@ -130,4 +154,10 @@ void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, Dim
} }
} // namespace Aidge } // namespace Aidge
#endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */ namespace {
\ No newline at end of file template <>
const char *const EnumStrings<Aidge::ProdAttr>::data[] = {
"Constant"
};
}
#endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
...@@ -42,7 +42,7 @@ void addCtor(py::class_<Tensor, ...@@ -42,7 +42,7 @@ void addCtor(py::class_<Tensor,
std::set<std::string> availableBackends = Tensor::getAvailableBackends(); std::set<std::string> availableBackends = Tensor::getAvailableBackends();
if (availableBackends.find("cpu") != availableBackends.end()){ if (availableBackends.find("cpu") != availableBackends.end()){
newTensor->setBackend("cpu"); newTensor->setBackend("cpu");
newTensor->getImpl()->setRawPtr(static_cast<T*>(info.ptr), newTensor->size()); newTensor->getImpl()->copyFromHost(static_cast<T*>(info.ptr), newTensor->size());
}else{ }else{
printf("Warning : Could not use aidge_cpu backend, verify you have `import aidge_cpu`\n"); printf("Warning : Could not use aidge_cpu backend, verify you have `import aidge_cpu`\n");
} }
...@@ -95,7 +95,9 @@ void init_Tensor(py::module& m){ ...@@ -95,7 +95,9 @@ void init_Tensor(py::module& m){
case DataType::Float32: case DataType::Float32:
return py::cast(b.get<float>(idx)); return py::cast(b.get<float>(idx));
case DataType::Int32: case DataType::Int32:
return py::cast(b.get<int>(idx)); return py::cast(b.get<std::int32_t>(idx));
case DataType::Int64:
return py::cast(b.get<std::int64_t>(idx));
default: default:
return py::none(); return py::none();
} }
...@@ -108,7 +110,9 @@ void init_Tensor(py::module& m){ ...@@ -108,7 +110,9 @@ void init_Tensor(py::module& m){
case DataType::Float32: case DataType::Float32:
return py::cast(b.get<float>(coordIdx)); return py::cast(b.get<float>(coordIdx));
case DataType::Int32: case DataType::Int32:
return py::cast(b.get<int>(coordIdx)); return py::cast(b.get<std::int32_t>(coordIdx));
case DataType::Int64:
return py::cast(b.get<std::int64_t>(coordIdx));
default: default:
return py::none(); return py::none();
} }
...@@ -137,7 +141,10 @@ void init_Tensor(py::module& m){ ...@@ -137,7 +141,10 @@ void init_Tensor(py::module& m){
dataFormatDescriptor = py::format_descriptor<float>::format(); dataFormatDescriptor = py::format_descriptor<float>::format();
break; break;
case DataType::Int32: case DataType::Int32:
dataFormatDescriptor = py::format_descriptor<int>::format(); dataFormatDescriptor = py::format_descriptor<std::int32_t>::format();
break;
case DataType::Int64:
dataFormatDescriptor = py::format_descriptor<std::int64_t>::format();
break; break;
default: default:
throw py::value_error("Unsupported data format"); throw py::value_error("Unsupported data format");
...@@ -155,7 +162,8 @@ void init_Tensor(py::module& m){ ...@@ -155,7 +162,8 @@ void init_Tensor(py::module& m){
// TODO : If the ctor with the right data type does not exist, pybind will always convert the data to INT ! // TODO : If the ctor with the right data type does not exist, pybind will always convert the data to INT !
// Need to find a way to avoid this ! // Need to find a way to avoid this !
addCtor<int>(pyClassTensor); addCtor<std::int32_t>(pyClassTensor);
addCtor<std::int64_t>(pyClassTensor);
addCtor<float>(pyClassTensor); addCtor<float>(pyClassTensor);
// #if SIZE_MAX != 0xFFFFFFFF // #if SIZE_MAX != 0xFFFFFFFF
addCtor<double>(pyClassTensor); addCtor<double>(pyClassTensor);
......
...@@ -122,7 +122,7 @@ void init_MetaOperatorDefs(py::module &m) { ...@@ -122,7 +122,7 @@ void init_MetaOperatorDefs(py::module &m) {
declare_PaddedMaxPoolingOp<2>(m); declare_PaddedMaxPoolingOp<2>(m);
declare_PaddedMaxPoolingOp<3>(m); declare_PaddedMaxPoolingOp<3>(m);
py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, Operator>(m, "MetaOperator_Op", py::multiple_inheritance()) py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, OperatorTensor>(m, "MetaOperator_Op", py::multiple_inheritance())
.def("get_micro_graph", &MetaOperator_Op::getMicroGraph); .def("get_micro_graph", &MetaOperator_Op::getMicroGraph);
m.def("meta_operator", &MetaOperator, m.def("meta_operator", &MetaOperator,
......
...@@ -21,6 +21,9 @@ void init_OperatorTensor(py::module& m){ ...@@ -21,6 +21,9 @@ void init_OperatorTensor(py::module& m){
py::class_<OperatorTensor, std::shared_ptr<OperatorTensor>, Operator>(m, "OperatorTensor") py::class_<OperatorTensor, std::shared_ptr<OperatorTensor>, Operator>(m, "OperatorTensor")
.def("get_output", &OperatorTensor::getOutput, py::arg("outputIdx")) .def("get_output", &OperatorTensor::getOutput, py::arg("outputIdx"))
.def("get_input", &OperatorTensor::getInput, py::arg("inputIdx")) .def("get_input", &OperatorTensor::getInput, py::arg("inputIdx"))
.def("set_output", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setOutput, py::arg("outputIdx"), py::arg("data"))
.def("set_input", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setInput, py::arg("outputIdx"), py::arg("data"))
.def("output_dims_forwarded", &OperatorTensor::outputDimsForwarded) .def("output_dims_forwarded", &OperatorTensor::outputDimsForwarded)
; ;
} }
......
...@@ -24,20 +24,20 @@ namespace Aidge { ...@@ -24,20 +24,20 @@ namespace Aidge {
template <DimIdx_t DIM> template <DimIdx_t DIM>
void declare_Producer(py::module &m) { void declare_Producer(py::module &m) {
// m.def(("Producer_" + std::to_string(DIM)+"D").c_str(), py::overload_cast<shared_ptr<Node>&>(&Producer<DIM>), py::arg("dims"), py::arg("name")); // m.def(("Producer_" + std::to_string(DIM)+"D").c_str(), py::overload_cast<shared_ptr<Node>&>(&Producer<DIM>), py::arg("dims"), py::arg("name"));
m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::array<DimSize_t, DIM>&, const std::string&)>(&Producer), py::arg("dims"), py::arg("name") = ""); m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::array<DimSize_t, DIM>&, const std::string&, bool)>(&Producer), py::arg("dims"), py::arg("name") = "", py::arg("constant") = false);
} }
void init_Producer(py::module &m) { void init_Producer(py::module &m) {
py::class_<Producer_Op, std::shared_ptr<Producer_Op>, OperatorTensor>( py::class_<Producer_Op, std::shared_ptr<Producer_Op>, OperatorTensor, Attributes>(
m, m,
"ProducerOp", "ProducerOp",
py::multiple_inheritance()) py::multiple_inheritance())
.def("dims", &Producer_Op::dims) .def("dims", &Producer_Op::dims)
.def("get_inputs_name", &Producer_Op::getInputsName) .def("get_inputs_name", &Producer_Op::getInputsName)
.def("get_outputs_name", &Producer_Op::getOutputsName); .def("get_outputs_name", &Producer_Op::getOutputsName);
m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&)>(&Producer), py::arg("tensor"), py::arg("name") = ""); m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&, bool)>(&Producer), py::arg("tensor"), py::arg("name") = "", py::arg("constant") = false);
declare_Producer<1>(m); declare_Producer<1>(m);
declare_Producer<2>(m); declare_Producer<2>(m);
......
...@@ -13,4 +13,4 @@ ...@@ -13,4 +13,4 @@
#include "aidge/operator/Producer.hpp" #include "aidge/operator/Producer.hpp"
const std::string Aidge::Producer_Op::Type = "Producer"; const std::string Aidge::Producer_Op::Type = "Producer";
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment