Skip to content
Snippets Groups Projects
Commit 879b09e8 authored by Cyril Moineau's avatar Cyril Moineau
Browse files

Merge branch 'dev' into 'main'

0.2.1

See merge request !119
parents ade77684 1891d576
No related branches found
No related tags found
1 merge request!1190.2.1
Pipeline #45652 canceled
Showing
with 148 additions and 44 deletions
......@@ -8,4 +8,5 @@ http://www.eclipse.org/legal/epl-2.0.
SPDX-License-Identifier: EPL-2.0
"""
from aidge_core.aidge_core import * # import so generated by PyBind
from aidge_core.export import ExportNode
from aidge_core.export import ExportNode, generate_file, generate_str
import aidge_core.utils
from .node_export import *
from .code_generation import *
import os
from jinja2 import Environment, FileSystemLoader
def generate_file(file_path: str, template_path: str, **kwargs) -> None:
"""Generate a file at `file_path` using the jinja template located at `file_path`.
kwargs are used to fill the template.
:param file_path: path where to generate the file
:type file_path: str
:param template_path: Path to the template to use for code generation
:type template_path: str
"""
# Get directory name of the file
dirname = os.path.dirname(file_path)
# If directory doesn't exist, create it
if not os.path.exists(dirname):
os.makedirs(dirname)
# Get directory name and name of the template
template_dir = os.path.dirname(template_path)
template_name = os.path.basename(template_path)
# Select template
template = Environment(loader=FileSystemLoader(
template_dir)).get_template(template_name)
# Generate file
content = template.render(kwargs)
with open(file_path, mode="w", encoding="utf-8") as message:
message.write(content)
def generate_str(template_path:str, **kwargs) -> str:
"""Generate a string using the jinja template located at `file_path`.
kwargs are used to fill the template.
:param template_path: Path to the template to use for code generation
:type template_path: str
:return: A string of the interpreted template
:rtype: str
"""
dirname = os.path.dirname(template_path)
filename = os.path.basename(template_path)
template = Environment(loader=FileSystemLoader(dirname)).get_template(filename)
return template.render(kwargs)
......@@ -39,7 +39,6 @@ class ExportNode(ABC):
if parent_node is not None:
self.inputs_dims.append(self.operator.get_input(idx).dims())
else:
print(self.operator.get_input(idx))
if self.operator.get_input(idx) is not None:
self.inputs_dims.append(self.operator.get_input(idx).dims())
else:
......
......@@ -39,7 +39,7 @@ class test_OperatorImpl(unittest.TestCase):
global GLOBAL_CPT
matmul = aidge_core.GenericOperator("MatMul", 1, 0, 1, name="MatMul0")
generic_matmul_op = matmul.get_operator()
generic_matmul_op.set_compute_output_dims(lambda x: x)
generic_matmul_op.set_forward_dims(lambda x: x)
generic_matmul_op.set_impl(testImpl(generic_matmul_op))
generic_matmul_op.forward()
self.assertEqual(GLOBAL_CPT, 1)
......@@ -52,6 +52,7 @@ class test_OperatorImpl(unittest.TestCase):
self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D())
conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
conv.get_operator().set_backend("cpu")
conv.get_operator().set_input(0, aidge_core.Tensor(np.arange(18).reshape(1,2,3,3)))
conv.get_operator().forward()
self.assertEqual(GLOBAL_CPT, 1)
......@@ -65,6 +66,7 @@ class test_OperatorImpl(unittest.TestCase):
conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
model = aidge_core.sequential([conv])
model.set_backend("cpu")
conv.get_operator().set_input(0, aidge_core.Tensor(np.arange(18).reshape(1,2,3,3)))
conv.get_operator().forward()
self.assertEqual(GLOBAL_CPT, 1)
......
......@@ -92,14 +92,14 @@ class test_operator_binding(unittest.TestCase):
attrs.set_attr("d", 23.89)
self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 23.89)
def test_compute_output_dims(self):
def test_forward_dims(self):
in_dims=[25, 25]
input = aidge_core.Producer(in_dims, name="In")
genOp = aidge_core.GenericOperator("genOp", 1, 0, 1, name="genOp")
_ = aidge_core.sequential([input, genOp])
self.assertListEqual(genOp.get_operator().get_output(0).dims(), [])
genOp.get_operator().set_compute_output_dims(lambda x:x)
genOp.get_operator().compute_output_dims()
genOp.get_operator().set_forward_dims(lambda x:x)
genOp.get_operator().forward_dims()
self.assertListEqual(genOp.get_operator().get_output(0).dims(), in_dims)
def test_set_impl(self):
......
def template_docstring(template_keyword, text_to_replace):
"""Method to template docstring
:param template: Template keyword to replace, in the documentation you template word must be between `{` `}`
:type template: str
:param text_to_replace: Text to replace your template with.
:type text_to_replace: str
"""
def dec(func):
if "{"+template_keyword+"}" not in func.__doc__:
raise RuntimeError(
f"The function {function.__name__} docstring does not contain the template keyword: {template_keyword}.")
func.__doc__ = func.__doc__.replace(
"{"+template_keyword+"}", text_to_replace)
return func
return dec
......@@ -23,7 +23,7 @@ class Operator;
class OperatorImpl {
public:
OperatorImpl(const Operator& op, const std::string& backend);
OperatorImpl(const Operator& op, const std::string& backend = "");
virtual void forward();
virtual void backward();
......
......@@ -23,6 +23,8 @@ namespace Aidge {
template <class T>
class TensorImpl_cpu : public TensorImpl {
static_assert(std::is_trivially_copyable<T>::value, "TensorImpl type should be trivially copyable");
private:
/// Pointer to the data and its capacity
future_std::span<T> mData;
......
......@@ -251,7 +251,6 @@ class Tensor : public Data,
auto add_ = Add_Op(2);
add_.associateInput(0, std::make_shared<Tensor>(*this));
add_.associateInput(1, std::make_shared<Tensor>(other));
add_.computeOutputDims();
add_.setDataType(dataType());
add_.setBackend(mImpl->backend());
add_.forward();
......@@ -275,7 +274,6 @@ class Tensor : public Data,
auto sub_ = Sub_Op();
sub_.associateInput(0, std::make_shared<Tensor>(*this));
sub_.associateInput(1, std::make_shared<Tensor>(other));
sub_.computeOutputDims();
sub_.setDataType(dataType());
sub_.setBackend(mImpl->backend());
sub_.forward();
......@@ -299,7 +297,6 @@ class Tensor : public Data,
auto mul_ = Mul_Op();
mul_.associateInput(0, std::make_shared<Tensor>(*this));
mul_.associateInput(1, std::make_shared<Tensor>(other));
mul_.computeOutputDims();
mul_.setDataType(dataType());
mul_.setBackend(mImpl->backend());
mul_.forward();
......@@ -323,7 +320,6 @@ class Tensor : public Data,
auto div_ = Div_Op();
div_.associateInput(0, std::make_shared<Tensor>(*this));
div_.associateInput(1, std::make_shared<Tensor>(other));
div_.computeOutputDims();
div_.setDataType(dataType());
div_.setBackend(mImpl->backend());
div_.forward();
......@@ -529,6 +525,7 @@ public:
template <typename expectedType>
const expectedType& get(std::size_t idx) const {
AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "wrong data type");
AIDGE_ASSERT(mImpl->hostPtr() != nullptr, "get() can only be used for backends providing a valid host pointer");
AIDGE_ASSERT(idx < mSize, "idx out of range");
return *reinterpret_cast<expectedType *>(mImpl->hostPtr(mImplOffset + idx));
}
......@@ -541,6 +538,7 @@ public:
template <typename expectedType>
void set(std::size_t idx, expectedType value){
AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "wrong data type");
AIDGE_ASSERT(mImpl->hostPtr() != nullptr, "get() can only be used for backends providing a valid host pointer");
AIDGE_ASSERT(idx < mSize, "idx out of range");
expectedType* dataPtr = static_cast<expectedType*>(mImpl->hostPtr(mImplOffset + idx));
*dataPtr = value;
......@@ -556,16 +554,11 @@ public:
inline void print() const { fmt::print("{}\n", toString()); }
std::shared_ptr<Tensor> grad() {
// if (!mGrad && mImpl) {
// mGrad = std::make_shared<Tensor>(mDims);
// mGrad->setDataType(mDataType);
// mGrad->setBackend(mImpl->backend());
// // if (mImpl) mGrad->setBackend(mImpl->backend());
// }
return mGrad;
}
void setGrad(std::shared_ptr<Tensor> newGrad) {
mGrad = newGrad;
}
/**
* @brief Associate the gradient with a Tensor instance and set its implementation
......@@ -576,7 +569,7 @@ public:
* @note If Tensor instance and implementation already existed for the gradient
* nothing is done.
*/
void initGradient() {
void initGrad() {
if (!mGrad) {
mGrad = std::make_shared<Tensor>(mDims);
}
......
......@@ -160,7 +160,7 @@ public:
/**
* @brief List outside input connections of the GraphView. The vector
* size is garanteed to match the number of outside inputs of the GraphView. If there is
* size is guaranteed to match the number of outside inputs of the GraphView. If there is
* no external connection to a given input, a pair of nullptr and gk_IODefaultIndex is returned.
* @return std::vector<std::pair<NodePtr, IOIndex_t>>
*/
......@@ -210,7 +210,7 @@ public:
* @brief Compute dimensions of input/output Tensors for each Operator of the
* GraphView object's Nodes.
*/
void forwardDims(const std::vector<std::vector<DimSize_t>> dims = {});
bool forwardDims(const std::vector<std::vector<DimSize_t>>& dims = {}, bool allowDataDependency = false);
/** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
void setBackend(const std::string& backend, const DeviceIdx_t device = 0) const;
......@@ -376,6 +376,12 @@ public:
addChild(toOtherNode, mNodeRegistry.at(fromOutNodeName), fromTensor, toTensor);
}
inline void updateNodeName(const std::string& oldName, const std::string& newName){
AIDGE_ASSERT(mNodeRegistry.find(oldName) != mNodeRegistry.end(), "No node named {} in graph {}, the graph may be corrupted !", oldName, name());
mNodeRegistry[newName] = mNodeRegistry[oldName];
mNodeRegistry.erase(oldName);
}
/**
* @brief Include a GraphView content in the current GraphView and link
* the two sets by linking one Node from each GraphView.
......@@ -480,6 +486,14 @@ public:
*/
IOIndex_t getNbFreeDataInputs() const;
/**
* @brief Force update of GraphView inputs/outputs.
* It may be necessary to force the update of GraphView inputs/outputs when
* connections are added or removed inside the GraphView **after** the nodes
* were added.
*/
void updateInputsOutputs();
private:
///////////////////////////////////////////////////////
// TENSOR MANAGEMENT
......
......@@ -235,8 +235,8 @@ public:
///////////////////////////////////////////////////////
/**
* @brief Vector of pointers to each GraphView containing the object
* @return std::vector<GraphView>
* @brief Set of pointers to each GraphView containing this Node
* @return std::set<GraphView>
*/
inline std::set<std::shared_ptr<GraphView>> views() const noexcept {
std::set<std::shared_ptr<GraphView>> res;
......@@ -460,10 +460,10 @@ private:
// OPERATOR FUNCTIONNAL but commented out to avoid iostream inclusion
// /**
// * @brief operator<< overload to ease print & debug of nodes
// * @param[inout] ostream to print to
// * @param[inout] ostream to print to
// * @param[in] n node to print
// */
// friend std::ostream& operator << (std::ostream& os, Node& n);
// friend std::ostream& operator << (std::ostream& os, Node& n);
};
} // namespace Aidge
......
......@@ -60,7 +60,7 @@ public:
// }
void computeOutputDims() override final;
bool forwardDims(bool allowDataDependency = false) override final;
void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
......
......@@ -65,7 +65,7 @@ public:
}
void computeOutputDims() override final;
bool forwardDims(bool /*allowDataDependency*/ = false) override final;
std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
......
......@@ -68,7 +68,7 @@ public:
// }
void computeOutputDims() override final;
bool forwardDims(bool /*allowDataDependency*/ = false) override final;
void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
......
......@@ -24,13 +24,20 @@
#include "aidge/utils/Types.h"
namespace Aidge {
class Cast_OpImpl : public OperatorImpl {
public:
Cast_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
void forward() override;
};
class Cast_Op : public OperatorTensor,
public Registrable<Cast_Op, std::string, std::unique_ptr<OperatorImpl>(const Cast_Op&)> {
public:
static const std::string Type;
Cast_Op() : OperatorTensor(Type, 1, 0, 1) {}
Cast_Op() : OperatorTensor(Type, 1, 0, 1) {
mImpl = std::make_shared<Cast_OpImpl>(*this);
}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
......@@ -39,10 +46,11 @@ public:
Cast_Op(const Cast_Op& op)
: OperatorTensor(op)
{
if (op.mImpl) {
if (!op.backend().empty()) {
SET_IMPL_MACRO(Cast_Op, *this, op.backend());
} else {
mImpl = nullptr;
}
else {
mImpl = std::make_shared<Cast_OpImpl>(*this);
}
}
......@@ -56,8 +64,6 @@ public:
void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
void forward() override;
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
......
......@@ -26,6 +26,12 @@
#include "aidge/utils/Types.h"
namespace Aidge {
class Concat_OpImpl : public OperatorImpl {
public:
Concat_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
void forward() override;
};
enum class ConcatAttr { Axis };
class Concat_Op : public OperatorTensor,
......@@ -45,6 +51,7 @@ public:
if (nbIn == 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
}
mImpl = std::make_shared<Concat_OpImpl>(*this);
}
/**
......@@ -55,10 +62,11 @@ public:
: OperatorTensor(op),
Attributes_(op)
{
if (op.mImpl){
if (!op.backend().empty()) {
SET_IMPL_MACRO(Concat_Op, *this, op.backend());
}else{
mImpl = nullptr;
}
else {
mImpl = std::make_shared<Concat_OpImpl>(*this);
}
}
......@@ -70,7 +78,7 @@ public:
return std::make_shared<Concat_Op>(*this);
}
void computeOutputDims() override final;
bool forwardDims(bool allowDataDependency = false) override final;
void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
......
......@@ -108,7 +108,7 @@ public:
// }
void computeOutputDims() override final {
bool forwardDims(bool /*allowDataDependency*/ = false) override final {
// check inputs have been associated
bool associated = true;
for (IOIndex_t i = 0; i < 3; ++i) {
......@@ -118,6 +118,17 @@ public:
associated &= !(getInput(i)->empty());
}
if (associated) {
AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
(getInput(0)->template dims<DIM+2>()[1] == this->template getAttr<ConvAttr::InChannels>()),
"Wrong input size for Conv operator.");
AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)) &&
(getInput(1)->template dims<DIM+2>()[1] == this->template getAttr<ConvAttr::InChannels>()) &&
(getInput(1)->template dims<DIM+2>()[0] == this->template getAttr<ConvAttr::OutChannels>()),
"Wrong weight size for Conv operator.");
if(!this->template getAttr<ConvAttr::NoBias>())
AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
(getInput(2)->template dims<1>()[0] == this->template getAttr<ConvAttr::OutChannels>()),
"Wrong bias size for Conv operator.");
std::array<DimSize_t, DIM + 2> outputDims{};
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
......@@ -135,6 +146,8 @@ public:
outputDims[0] = inputDims[0];
mOutputs[0]->resize(outputDims);
}
return associated;
}
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>>
......@@ -147,7 +160,7 @@ public:
if (firstEltDims.size() != outputDims.size()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
}
if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
// Offset
auto inputIdxDims = firstEltDims; // batch idx is the same
inputIdxDims[1] = 0; // each channel is used so start with the first one
......
......@@ -90,7 +90,7 @@ public:
}
void computeOutputDims() override final {
bool forwardDims(bool /*allowDataDependency*/ = false) override final {
// check inputs have been associated
// TODO : add a check of inputs dimensions ?
bool associated = true;
......@@ -124,6 +124,8 @@ public:
outputDims[0] = inputDims[0];
mOutputs[0]->resize(outputDims);
}
return associated;
}
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
......@@ -133,7 +135,7 @@ public:
if (firstEltDims.size() != outputDims.size()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
}
if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
// Offset
auto inputIdxDims = firstEltDims; // batch idx is the same
......
......@@ -54,7 +54,7 @@ public:
return std::make_shared<Div_Op>(*this);
}
void computeOutputDims() override final;
bool forwardDims(bool allowDataDependency = false) override final;
void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment