diff --git a/CHANGELOG b/CHANGELOG
index dc073620be324b16d904e0b05aec38f128c9b2e9..0031beb91337e681884cd5a1d8c420a099a27861 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,14 @@
+# Version 0.2.1 (May 14, 2024)
+
+* rework export mechanism 
+* change `Operator::computeOutputDims()` with `Operator::forwardDims()`
+* automatic docstring decorators for python
+* add implementation of Operators only performing data/format manipulation
+* add many assertions
+* bind Database and Scaling_Op
+
+# Version 0.2.0 (April 11, 2024)
+
 # Version 0.1.1 (January 29, 2024)
 
 [Add] Support of a negative value for Reshape Operator shape attribute.
diff --git a/aidge_core/export/node_export.py b/aidge_core/export/node_export.py
index 477989b037da6f6229bd275ff22974d9ef307848..7262e9a837424158b8896f305894dcc57769520c 100644
--- a/aidge_core/export/node_export.py
+++ b/aidge_core/export/node_export.py
@@ -39,7 +39,6 @@ class ExportNode(ABC):
             if parent_node is not None:
                 self.inputs_dims.append(self.operator.get_input(idx).dims())
             else:
-                print(self.operator.get_input(idx))
                 if self.operator.get_input(idx) is not None:
                     self.inputs_dims.append(self.operator.get_input(idx).dims())
                 else:
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 49ddfc4d76a0602c58c0c768b04ed4b4202f028d..aa1f4f697c1383d43ad17148170e68274bb13005 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -45,7 +45,7 @@ public:
     using attr = typename Attributes_::template attr<e>;
 
     Reshape_Op(const std::vector<std::int64_t>& shape)
-        : OperatorTensor(Type, 1, 0, 1),
+        : OperatorTensor(Type, 2, 0, 1),
           Attributes_(attr<ReshapeAttr::Shape>(shape))
     {
         mImpl = std::make_shared<Reshape_OpImpl>(*this);
@@ -87,7 +87,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Reshape(const std::vector<std::int64_t>& shape,
+inline std::shared_ptr<Node> Reshape(const std::vector<std::int64_t>& shape = {},
                                    		const std::string &name = "") {
     // FIXME: properly handle default w&b initialization in every cases
     return std::make_shared<Node>(std::make_shared<Reshape_Op>(shape), name);
diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp
index 0e336db28ddba4629e61d30e026befe4240c40b6..abda87e6249fb783830eacd3ac655299128a42ae 100644
--- a/python_binding/operator/pybind_Reshape.cpp
+++ b/python_binding/operator/pybind_Reshape.cpp
@@ -23,6 +23,6 @@ void init_Reshape(py::module& m) {
     .def("get_inputs_name", &Reshape_Op::getInputsName)
     .def("get_outputs_name", &Reshape_Op::getOutputsName);
     declare_registrable<Reshape_Op>(m, "ReshapeOp");
-    m.def("Reshape", &Reshape, py::arg("shape"), py::arg("name") = "");
+    m.def("Reshape", &Reshape, py::arg("shape") = std::vector<std::int64_t>(), py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 55fe69678d7d6582f13c48a285fb4f7bfa2a1419..0bc918995c55a914b29987506578491e2c86fae5 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -83,7 +83,6 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
     }
 
     fmt::print(fp.get(),
-                "```mermaid\n"
                 "%%{{init: {{'flowchart': {{ 'curve': 'monotoneY'}}, "
                 "'fontFamily': 'Verdana' }} }}%%\nflowchart TB\n\n");
 
@@ -205,7 +204,6 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
     fmt::print(fp.get(), "classDef producerCls_rootCls stroke:#f00,fill:#ccf\n");
     fmt::print(fp.get(), "classDef genericCls_rootCls stroke:#f00,fill:#f9f9ff,stroke-width:1px,stroke-dasharray: 5 5\n");
     fmt::print(fp.get(), "classDef metaCls_rootCls stroke:#f00,stroke-width:5px\n");
-    fmt::print(fp.get(), "```\n");
     fmt::print(fp.get(), "\n");
 }
 
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index ab53c094dac09879c1bec86509463aab2280ca92..0cce7a5b94692ba99923c1a866f88d9b5faee8a1 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -33,9 +33,8 @@ const std::string Aidge::Reshape_Op::Type = "Reshape";
 bool Aidge::Reshape_Op::forwardDims(bool /*allowDataDependency*/) {
     // check input has been associated
     if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
     }
-
     if (!getInput(0)->empty()) {
         std::vector<DimSize_t> outDims;
         // variables to handle a negative dimension
@@ -43,6 +42,45 @@ bool Aidge::Reshape_Op::forwardDims(bool /*allowDataDependency*/) {
         std::size_t outSize = 1;
         DimIdx_t negativeIndex = 0;
 
+        // Fill shape attr if empty
+        if (this->template getAttr<ReshapeAttr::Shape>().empty()) {
+            if (!getInput(1)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #1 should be associated with a Tensor", type());
+            }
+            if(!getInput(1)->empty()) {
+                this->template getAttr<ReshapeAttr::Shape>().clear(); // If both are provided input would override attrs
+                this->template getAttr<ReshapeAttr::Shape>().reserve(getInput(1)->size());
+                switch (mInputs[1]->dataType()) {
+                    case DataType::Float64:
+                        std::copy_n(static_cast<double*>(mInputs[1]->getImpl()->rawPtr()),
+                                    getInput(1)->size(),
+                                    std::back_inserter(this->template getAttr<ReshapeAttr::Shape>()));
+                        break;
+                    case DataType::Float32:
+                        std::copy_n(static_cast<float*>(mInputs[1]->getImpl()->rawPtr()),
+                                    getInput(1)->size(),
+                                    std::back_inserter(this->template getAttr<ReshapeAttr::Shape>()));
+                        break;
+                    case DataType::Int64:
+                        std::copy_n(static_cast<std::int64_t*>(mInputs[1]->getImpl()->rawPtr()),
+                                    getInput(1)->size(),
+                                    std::back_inserter(this->template getAttr<ReshapeAttr::Shape>()));
+                        break;
+                    case DataType::Int32:
+                        std::copy_n(static_cast<std::int32_t*>(mInputs[1]->getImpl()->rawPtr()),
+                                    getInput(1)->size(),
+                                    std::back_inserter(this->template getAttr<ReshapeAttr::Shape>()));
+                        break;
+                    default:
+                        AIDGE_THROW_OR_ABORT(std::runtime_error, "Shape input DataType is not supported.");
+                        break;
+                }
+            }
+            else {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Shape attribute or Input is needed");
+            }
+        }
+
         for(std::size_t i = 0; i < this->template getAttr<ReshapeAttr::Shape>().size(); ++i)
         {
             std::int64_t dimSize = this->template getAttr<ReshapeAttr::Shape>()[i];
@@ -54,6 +92,10 @@ bool Aidge::Reshape_Op::forwardDims(bool /*allowDataDependency*/) {
                 dimSize = 1;
                 negativeIndex = static_cast<DimIdx_t>(i);
             }
+            else if (dimSize == 0)
+            {
+                dimSize = getInput(0) -> dims()[i];
+            }
             outDims.push_back(static_cast<DimSize_t>(dimSize));
             outSize *= static_cast<DimSize_t>(dimSize);
         }
diff --git a/src/recipes/FuseMulAdd.cpp b/src/recipes/FuseMulAdd.cpp
index 9a89f8af583fa8567f62ed7c8f42d160d80a1e99..6c849c54a916af10d4d926e7e0d0c339e757e01b 100644
--- a/src/recipes/FuseMulAdd.cpp
+++ b/src/recipes/FuseMulAdd.cpp
@@ -38,11 +38,11 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
     // Fetch the output dimension throught the bias size
     std::shared_ptr<Node> bias = nullptr;
     if (addNode->getParent(0) == matmulNode) {
-        AIDGE_ASSERT(matmulNode->getParent(1), "No bias detected to produce the fuseMulAdd recipe.");
+        AIDGE_ASSERT(addNode->getParent(1), "No bias detected to produce the fuseMulAdd recipe.");
         bias = addNode->getParent(1);
     }
     else if (addNode->getParent(1) == matmulNode) {
-        AIDGE_ASSERT(matmulNode->getParent(0), "No bias detected to produce the fuseMulAdd recipe.");
+        AIDGE_ASSERT(addNode->getParent(0), "No bias detected to produce the fuseMulAdd recipe.");
         bias = addNode->getParent(0);
     }
 
diff --git a/unit_tests/operator/Test_ReshapeImpl.cpp b/unit_tests/operator/Test_ReshapeImpl.cpp
index 5d28005eb40534742aae495948e5269373b81ad1..2685518b5c49f13ff4aee4202163e3aa3267aa5f 100644
--- a/unit_tests/operator/Test_ReshapeImpl.cpp
+++ b/unit_tests/operator/Test_ReshapeImpl.cpp
@@ -20,48 +20,105 @@ using namespace Aidge;
 
 TEST_CASE("[cpu/operator] Reshape(forward)") {
     SECTION("1D Tensor") {
-        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array1D<float,6> {
-            {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,3> {
-            {
-                {1.0, 2.0, 3.0},
-                {4.0, 5.0, 6.0}
-            }
-        });
+        SECTION("Shape As Input") {
+            std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array1D<float,6> {
+                {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}
+            });
+            std::shared_ptr<Tensor> shape = std::make_shared<Tensor>(Array1D<float,2> {
+                {2, 3}
+            });
+            std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,3> {
+                {
+                    {1.0, 2.0, 3.0},
+                    {4.0, 5.0, 6.0}
+                }
+            });
 
-        std::shared_ptr<Node> myReshape = Reshape({2, 3});
-        auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator());
-        op->associateInput(0, input);
-        op->setDataType(DataType::Float32);
-        op->setBackend("cpu");
-        myReshape->forward();
+            std::shared_ptr<Node> myReshape = Reshape();
+            auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator());
+            op->associateInput(0, input);
+            op->associateInput(1, shape);
+            op->setDataType(DataType::Float32);
+            op->setBackend("cpu");
+            myReshape->forward();
 
-        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+            REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        }
+        SECTION("Shape As Input") {
+            std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array1D<float,6> {
+                {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}
+            });
+            std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,3> {
+                {
+                    {1.0, 2.0, 3.0},
+                    {4.0, 5.0, 6.0}
+                }
+            });
+
+            std::shared_ptr<Node> myReshape = Reshape({2, 3});
+            auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator());
+            op->associateInput(0, input);
+            op->setDataType(DataType::Float32);
+            op->setBackend("cpu");
+            myReshape->forward();
+
+            REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        }
     }
-    SECTION("2D Tensor") {
-        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<float,2,3> {
-            {
-                {1.0, 2.0, 3.0},
-                {4.0, 5.0, 6.0}
-            }
+    SECTION("2D Tensor - Shape Input") {
+        SECTION("Shape As Input") {
+            std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<float,2,3> {
+                {
+                    {1.0, 2.0, 3.0},
+                    {4.0, 5.0, 6.0}
+                }
+
+            });
+            std::shared_ptr<Tensor> shape = std::make_shared<Tensor>(Array1D<float,2> {
+                {3, 2}
+            });
+            std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,3,2> {
+                {
+                    {1.0, 2.0},
+                    {3.0, 4.0},
+                    {5.0, 6.0}
+                }
+            });
+
+            std::shared_ptr<Node> myReshape = Reshape();
+            auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator());
+            op->associateInput(0, input);
+            op->associateInput(1, shape);
+            op->setDataType(DataType::Float32);
+            op->setBackend("cpu");
+            myReshape->forward();
+
+            REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        }
+        SECTION("Shape As Attribute") {
+            std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<float,2,3> {
+                {
+                    {1.0, 2.0, 3.0},
+                    {4.0, 5.0, 6.0}
+                }
 
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,3,2> {
-            {
-                {1.0, 2.0},
-                {3.0, 4.0},
-                {5.0, 6.0}
-            }
-        });
+            });
+            std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,3,2> {
+                {
+                    {1.0, 2.0},
+                    {3.0, 4.0},
+                    {5.0, 6.0}
+                }
+            });
 
-        std::shared_ptr<Node> myReshape = Reshape({3, 2});
-        auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator());
-        op->associateInput(0, input);
-        op->setDataType(DataType::Float32);
-        op->setBackend("cpu");
-        myReshape->forward();
+            std::shared_ptr<Node> myReshape = Reshape({3, 2});
+            auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator());
+            op->associateInput(0, input);
+            op->setDataType(DataType::Float32);
+            op->setBackend("cpu");
+            myReshape->forward();
 
-        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+            REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        }
     }
 }
\ No newline at end of file
diff --git a/version.txt b/version.txt
index 0ea3a944b399d25f7e1b8fe684d754eb8da9fe7f..0c62199f16ac1e2d7f7ae75b420c1231325dff4e 100644
--- a/version.txt
+++ b/version.txt
@@ -1 +1 @@
-0.2.0
+0.2.1