From 2b6d2d786f695b9ff97454fce1915cf643b4a88b Mon Sep 17 00:00:00 2001 From: Olivier BICHLER <olivier.bichler@cea.fr> Date: Thu, 11 Apr 2024 17:19:14 +0200 Subject: [PATCH] Renamed computeOutputDims() with forwardDims() --- aidge_core/unit_tests/test_impl.py | 2 +- .../unit_tests/test_operator_binding.py | 6 +-- include/aidge/data/Tensor.hpp | 4 -- include/aidge/operator/Add.hpp | 2 +- include/aidge/operator/AvgPooling.hpp | 23 +--------- include/aidge/operator/BatchNorm.hpp | 20 +-------- include/aidge/operator/Concat.hpp | 2 +- include/aidge/operator/Conv.hpp | 4 +- include/aidge/operator/ConvDepthWise.hpp | 4 +- include/aidge/operator/Div.hpp | 2 +- include/aidge/operator/FC.hpp | 2 +- include/aidge/operator/Gather.hpp | 2 +- include/aidge/operator/GenericOperator.hpp | 12 +++--- .../aidge/operator/GlobalAveragePooling.hpp | 2 +- include/aidge/operator/Identity.hpp | 4 +- include/aidge/operator/MatMul.hpp | 2 +- include/aidge/operator/MaxPooling.hpp | 2 +- include/aidge/operator/Memorize.hpp | 4 +- include/aidge/operator/MetaOperator.hpp | 2 +- include/aidge/operator/Mul.hpp | 2 +- include/aidge/operator/OperatorTensor.hpp | 4 +- include/aidge/operator/Pad.hpp | 2 +- include/aidge/operator/Pop.hpp | 2 +- include/aidge/operator/Pow.hpp | 2 +- include/aidge/operator/Producer.hpp | 4 +- include/aidge/operator/ReduceMean.hpp | 2 +- include/aidge/operator/Reshape.hpp | 2 +- include/aidge/operator/Slice.hpp | 2 +- include/aidge/operator/Sub.hpp | 2 +- include/aidge/operator/Transpose.hpp | 2 +- .../operator/pybind_GenericOperator.cpp | 2 +- .../operator/pybind_OperatorTensor.cpp | 4 +- src/graph/GraphView.cpp | 4 +- src/operator/Add.cpp | 2 +- src/operator/AvgPooling.cpp | 4 +- src/operator/BatchNorm.cpp | 2 +- src/operator/Concat.cpp | 2 +- src/operator/Div.cpp | 2 +- src/operator/FC.cpp | 2 +- src/operator/Gather.cpp | 2 +- src/operator/GenericOperator.cpp | 10 ++--- src/operator/GlobalAveragePooling.cpp | 2 +- src/operator/MatMul.cpp | 2 +- src/operator/Memorize.cpp | 4 +- src/operator/Mul.cpp | 2 +- src/operator/OperatorTensor.cpp | 10 ++--- src/operator/Pop.cpp | 2 +- src/operator/Pow.cpp | 2 +- src/operator/ReduceMean.cpp | 2 +- src/operator/Reshape.cpp | 2 +- src/operator/Slice.cpp | 2 +- src/operator/Sub.cpp | 2 +- src/recipes/HorizontalTiling.cpp | 2 +- unit_tests/operator/Test_Div_Op.cpp | 16 +++---- .../operator/Test_GlobalAveragePooling_Op.cpp | 12 +++--- unit_tests/operator/Test_MatMul_Op.cpp | 38 ++++++++--------- unit_tests/operator/Test_MetaOperator.cpp | 8 ++-- unit_tests/operator/Test_MetaOperator.py | 42 +++++++++++++++++++ unit_tests/operator/Test_Mul_Op.cpp | 16 +++---- unit_tests/operator/Test_Pow_Op.cpp | 16 +++---- unit_tests/operator/Test_Sub_Op.cpp | 16 +++---- unit_tests/scheduler/Test_Scheduler.cpp | 4 +- 62 files changed, 182 insertions(+), 183 deletions(-) create mode 100644 unit_tests/operator/Test_MetaOperator.py diff --git a/aidge_core/unit_tests/test_impl.py b/aidge_core/unit_tests/test_impl.py index 6b83b048c..1a723a04a 100644 --- a/aidge_core/unit_tests/test_impl.py +++ b/aidge_core/unit_tests/test_impl.py @@ -39,7 +39,7 @@ class test_OperatorImpl(unittest.TestCase): global GLOBAL_CPT matmul = aidge_core.GenericOperator("MatMul", 1, 0, 1, name="MatMul0") generic_matmul_op = matmul.get_operator() - generic_matmul_op.set_compute_output_dims(lambda x: x) + generic_matmul_op.set_forward_dims(lambda x: x) generic_matmul_op.set_impl(testImpl(generic_matmul_op)) generic_matmul_op.forward() self.assertEqual(GLOBAL_CPT, 1) diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py index c94960733..164aee726 100644 --- a/aidge_core/unit_tests/test_operator_binding.py +++ b/aidge_core/unit_tests/test_operator_binding.py @@ -92,14 +92,14 @@ class test_operator_binding(unittest.TestCase): attrs.set_attr("d", 23.89) self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 23.89) - def test_compute_output_dims(self): + def test_forward_dims(self): in_dims=[25, 25] input = aidge_core.Producer(in_dims, name="In") genOp = aidge_core.GenericOperator("genOp", 1, 0, 1, name="genOp") _ = aidge_core.sequential([input, genOp]) self.assertListEqual(genOp.get_operator().get_output(0).dims(), []) - genOp.get_operator().set_compute_output_dims(lambda x:x) - genOp.get_operator().compute_output_dims() + genOp.get_operator().set_forward_dims(lambda x:x) + genOp.get_operator().forward_dims() self.assertListEqual(genOp.get_operator().get_output(0).dims(), in_dims) def test_set_impl(self): diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp index e686737a4..ead6c19fa 100644 --- a/include/aidge/data/Tensor.hpp +++ b/include/aidge/data/Tensor.hpp @@ -251,7 +251,6 @@ class Tensor : public Data, auto add_ = Add_Op(2); add_.associateInput(0, std::make_shared<Tensor>(*this)); add_.associateInput(1, std::make_shared<Tensor>(other)); - add_.computeOutputDims(); add_.setDataType(dataType()); add_.setBackend(mImpl->backend()); add_.forward(); @@ -275,7 +274,6 @@ class Tensor : public Data, auto sub_ = Sub_Op(); sub_.associateInput(0, std::make_shared<Tensor>(*this)); sub_.associateInput(1, std::make_shared<Tensor>(other)); - sub_.computeOutputDims(); sub_.setDataType(dataType()); sub_.setBackend(mImpl->backend()); sub_.forward(); @@ -299,7 +297,6 @@ class Tensor : public Data, auto mul_ = Mul_Op(); mul_.associateInput(0, std::make_shared<Tensor>(*this)); mul_.associateInput(1, std::make_shared<Tensor>(other)); - mul_.computeOutputDims(); mul_.setDataType(dataType()); mul_.setBackend(mImpl->backend()); mul_.forward(); @@ -323,7 +320,6 @@ class Tensor : public Data, auto div_ = Div_Op(); div_.associateInput(0, std::make_shared<Tensor>(*this)); div_.associateInput(1, std::make_shared<Tensor>(other)); - div_.computeOutputDims(); div_.setDataType(dataType()); div_.setBackend(mImpl->backend()); div_.forward(); diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp index 249303620..4ac14bdae 100644 --- a/include/aidge/operator/Add.hpp +++ b/include/aidge/operator/Add.hpp @@ -60,7 +60,7 @@ public: // } - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override; diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp index fafd3b2d0..af2993d67 100644 --- a/include/aidge/operator/AvgPooling.hpp +++ b/include/aidge/operator/AvgPooling.hpp @@ -65,28 +65,7 @@ public: } - bool computeOutputDims(bool /*allowDataDependency*/ = false) override final { - // check inputs have been associated - if (!getInput(0)) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type()); - } - if (!(getInput(0)->empty())) { - std::array<DimSize_t, DIM + 2> outputDims; - const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>()); - outputDims[0] = inputDims[0]; - outputDims[1] = inputDims[1]; - - for (std::size_t dim = 0; dim < this->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) { - outputDims[dim+2] = 1 + static_cast<DimSize_t>( - std::floor(static_cast<float>(inputDims[dim+2] - - this->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) / - static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim]))); - } - getOutput(0)->resize(outputDims); - return true; - } - return false; - } + bool forwardDims(bool /*allowDataDependency*/ = false) override final; std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>> diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp index 7f3b60a9f..aa53f8c43 100644 --- a/include/aidge/operator/BatchNorm.hpp +++ b/include/aidge/operator/BatchNorm.hpp @@ -68,25 +68,7 @@ public: // } - bool computeOutputDims(bool /*allowDataDependency*/ = false) override final { - // check inputs have been associated - bool associated = true; - for (IOIndex_t i = 0; i < nbInputs(); ++i) { - associated &= !(getInput(i)->empty()); - } - if (associated) { - const DimSize_t nbFeatures = getInput(0)->dims()[1]; - for (std::size_t i = nbData(); i < nbInputs(); ++i) { - if(getInput(i)->size() != nbFeatures) { - // /!\ Input size should be handled BEFORE calling this function - // This should raise an error - getInput(i)->resize({getInput(0)->dims()[1]}); - } - } - mOutputs[0]->resize(getInput(0)->dims()); - } - return associated; - } + bool forwardDims(bool /*allowDataDependency*/ = false) override final; void setBackend(const std::string &name, DeviceIdx_t device = 0) override final; diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp index 32a519dbc..a9a4c9253 100644 --- a/include/aidge/operator/Concat.hpp +++ b/include/aidge/operator/Concat.hpp @@ -78,7 +78,7 @@ public: return std::make_shared<Concat_Op>(*this); } - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override; diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp index f27c93422..d03bcda4e 100644 --- a/include/aidge/operator/Conv.hpp +++ b/include/aidge/operator/Conv.hpp @@ -108,7 +108,7 @@ public: // } - bool computeOutputDims(bool /*allowDataDependency*/ = false) override final { + bool forwardDims(bool /*allowDataDependency*/ = false) override final { // check inputs have been associated bool associated = true; for (IOIndex_t i = 0; i < 3; ++i) { @@ -149,7 +149,7 @@ public: if (firstEltDims.size() != outputDims.size()) { AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions."); } - if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) { + if ((outputDims.size() == (DIM+2)) && dimsForwarded()) { // Offset auto inputIdxDims = firstEltDims; // batch idx is the same inputIdxDims[1] = 0; // each channel is used so start with the first one diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp index 8ffe18c04..2337ff66f 100644 --- a/include/aidge/operator/ConvDepthWise.hpp +++ b/include/aidge/operator/ConvDepthWise.hpp @@ -90,7 +90,7 @@ public: } - bool computeOutputDims(bool /*allowDataDependency*/ = false) override final { + bool forwardDims(bool /*allowDataDependency*/ = false) override final { // check inputs have been associated // TODO : add a check of inputs dimensions ? bool associated = true; @@ -135,7 +135,7 @@ public: if (firstEltDims.size() != outputDims.size()) { AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions."); } - if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) { + if ((outputDims.size() == (DIM+2)) && dimsForwarded()) { // Offset auto inputIdxDims = firstEltDims; // batch idx is the same diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp index 043422ae2..566f4a6ae 100644 --- a/include/aidge/operator/Div.hpp +++ b/include/aidge/operator/Div.hpp @@ -54,7 +54,7 @@ public: return std::make_shared<Div_Op>(*this); } - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override; diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp index 6a562c59e..b97874f4e 100644 --- a/include/aidge/operator/FC.hpp +++ b/include/aidge/operator/FC.hpp @@ -71,7 +71,7 @@ public: void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final; - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override; diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp index feb2474b0..7534b6695 100644 --- a/include/aidge/operator/Gather.hpp +++ b/include/aidge/operator/Gather.hpp @@ -80,7 +80,7 @@ public: return std::make_shared<Gather_Op>(*this); } - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override; diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp index 49885f9fd..f0b7e92d7 100644 --- a/include/aidge/operator/GenericOperator.hpp +++ b/include/aidge/operator/GenericOperator.hpp @@ -31,7 +31,7 @@ class GenericOperator_Op private: using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>; - ComputeDimsFunc mComputeOutputDims; + ComputeDimsFunc mForwardDims; public: GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut) @@ -61,18 +61,18 @@ public: } public: - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; - bool outputDimsForwarded() const override final; + bool dimsForwarded() const override final; void setBackend(const std::string & /*name*/, DeviceIdx_t /*device*/ = 0) override { fmt::print("setBackend: not available yet.\n"); } void setDataType(const DataType& /*datatype*/) const override { fmt::print("setDataType: not available yet.\n"); } - // Helper functions that can be used with setComputeOutputDims(): + // Helper functions that can be used with setForwardDims(): static const ComputeDimsFunc Identity; static const ComputeDimsFunc InputIdentity(IOIndex_t inputIdx, IOIndex_t nbOutputs); - inline void setComputeOutputDims(ComputeDimsFunc func) { - mComputeOutputDims = func; + inline void setForwardDims(ComputeDimsFunc func) { + mForwardDims = func; } }; diff --git a/include/aidge/operator/GlobalAveragePooling.hpp b/include/aidge/operator/GlobalAveragePooling.hpp index 1552d0e08..74529a0ba 100644 --- a/include/aidge/operator/GlobalAveragePooling.hpp +++ b/include/aidge/operator/GlobalAveragePooling.hpp @@ -52,7 +52,7 @@ public: return std::make_shared<GlobalAveragePooling_Op>(*this); } - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string &name, DeviceIdx_t device = 0) override final; diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp index f49711837..367aa4e2d 100644 --- a/include/aidge/operator/Identity.hpp +++ b/include/aidge/operator/Identity.hpp @@ -63,7 +63,7 @@ public: return std::make_shared<Identity_Op>(*this); } - bool computeOutputDims(bool /*allowDataDependency*/ = false) override final { return true; } // Do nothing + bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; } // Do nothing /** * @brief Check if output dimensions have been computed. @@ -73,7 +73,7 @@ public: * @return true Input has dimensions. * @return false Input has no dimensions or is a nullptr. */ - bool outputDimsForwarded() const override final { + bool dimsForwarded() const override final { return mInputs[0] ? !mInputs[0]->empty() : false; } diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp index 6f7ac2348..580d720e6 100644 --- a/include/aidge/operator/MatMul.hpp +++ b/include/aidge/operator/MatMul.hpp @@ -64,7 +64,7 @@ public: * @note - Second input is 1-D: it is promoted to a matrix by appending a 1 to its * dimensions (D) -> (D,1). The appended 1 is removed after computation. */ - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp index 54eeccef7..8aff15826 100644 --- a/include/aidge/operator/MaxPooling.hpp +++ b/include/aidge/operator/MaxPooling.hpp @@ -84,7 +84,7 @@ public: } - bool computeOutputDims(bool /*allowDataDependency*/ = false) override final { + bool forwardDims(bool /*allowDataDependency*/ = false) override final { if (!getInput(0)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type()); } diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp index 6f668a942..6b0ace2eb 100644 --- a/include/aidge/operator/Memorize.hpp +++ b/include/aidge/operator/Memorize.hpp @@ -82,8 +82,8 @@ public: void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; - bool computeOutputDims(bool allowDataDependency = false) override final; - bool outputDimsForwarded() const override; + bool forwardDims(bool allowDataDependency = false) override final; + bool dimsForwarded() const override; void updateConsummerProducer() override; void forward() override; diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp index 44c52d9eb..c677da0f2 100644 --- a/include/aidge/operator/MetaOperator.hpp +++ b/include/aidge/operator/MetaOperator.hpp @@ -81,7 +81,7 @@ public: mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data); } - bool computeOutputDims(bool allowDataDependency = false) override final { + bool forwardDims(bool allowDataDependency = false) override final { // Check first that all required inputs are available, otherwise // mGraph->forwardDims() will fail! bool forwarded = true; diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp index 1ba0f5405..f53a38a82 100644 --- a/include/aidge/operator/Mul.hpp +++ b/include/aidge/operator/Mul.hpp @@ -57,7 +57,7 @@ public: return std::make_shared<Mul_Op>(*this); } - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override; diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp index c09b58939..6086c5145 100644 --- a/include/aidge/operator/OperatorTensor.hpp +++ b/include/aidge/operator/OperatorTensor.hpp @@ -80,8 +80,8 @@ public: * For each dataInput Tensor of the Operator, the first index and dimensions of the feature area. */ virtual std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const; - virtual bool computeOutputDims(bool allowDataDependency = false); - virtual bool outputDimsForwarded() const; + virtual bool forwardDims(bool allowDataDependency = false); + virtual bool dimsForwarded() const; /////////////////////////////////////////////////// virtual void setDataType(const DataType& dataType) const override; diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp index e13face55..a4e4ebdce 100644 --- a/include/aidge/operator/Pad.hpp +++ b/include/aidge/operator/Pad.hpp @@ -74,7 +74,7 @@ public: } - bool computeOutputDims(bool /*allowDataDependency*/ = false) override final { + bool forwardDims(bool /*allowDataDependency*/ = false) override final { bool associated = true; for (IOIndex_t i = 0; i < nbInputs(); ++i) { if (!getInput(i)) { diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp index 372faff6a..2219f30ec 100644 --- a/include/aidge/operator/Pop.hpp +++ b/include/aidge/operator/Pop.hpp @@ -76,7 +76,7 @@ public: void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void updateConsummerProducer() override; void forward() override; diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp index b83cf15d6..08c4de2a2 100644 --- a/include/aidge/operator/Pow.hpp +++ b/include/aidge/operator/Pow.hpp @@ -53,7 +53,7 @@ public: return std::make_shared<Pow_Op>(*this); } - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp index e21aa9aea..7e9072857 100644 --- a/include/aidge/operator/Producer.hpp +++ b/include/aidge/operator/Producer.hpp @@ -86,9 +86,9 @@ public: AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer operator takes no input."); } - bool computeOutputDims(bool /*allowDataDependency*/ = false) override final { return true; } + bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; } - inline bool outputDimsForwarded() const noexcept override final { return true; } + inline bool dimsForwarded() const noexcept override final { return true; } inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); } diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp index 25fba5e79..ff8d8b069 100644 --- a/include/aidge/operator/ReduceMean.hpp +++ b/include/aidge/operator/ReduceMean.hpp @@ -69,7 +69,7 @@ class ReduceMean_Op : public OperatorTensor, return std::make_shared<ReduceMean_Op>(*this); } - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string &name, DeviceIdx_t device = 0) override final; diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp index bf0f7ee34..49ddfc4d7 100644 --- a/include/aidge/operator/Reshape.hpp +++ b/include/aidge/operator/Reshape.hpp @@ -75,7 +75,7 @@ public: return std::make_shared<Reshape_Op>(*this); } - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp index 69278c59b..7db5867fe 100644 --- a/include/aidge/operator/Slice.hpp +++ b/include/aidge/operator/Slice.hpp @@ -69,7 +69,7 @@ public: */ std::shared_ptr<Operator> clone() const override { return std::make_shared<Slice_Op>(*this); } - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string &name, DeviceIdx_t device = 0) override { SET_IMPL_MACRO(Slice_Op, *this, name); diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp index 6969a6d83..e5d844285 100644 --- a/include/aidge/operator/Sub.hpp +++ b/include/aidge/operator/Sub.hpp @@ -57,7 +57,7 @@ public: return std::make_shared<Sub_Op>(*this); } - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp index 2bb18b019..db432f2da 100644 --- a/include/aidge/operator/Transpose.hpp +++ b/include/aidge/operator/Transpose.hpp @@ -71,7 +71,7 @@ class Transpose_Op : public OperatorTensor, return std::make_shared<Transpose_Op<DIM>>(*this); } - bool computeOutputDims(bool /*allowDataDependency*/ = false) override final { + bool forwardDims(bool /*allowDataDependency*/ = false) override final { if (!getInput(0)->empty()) { auto attr = (this)->getStaticAttributes(); const std::array<DimSize_t, DIM>& outDimsOrder = static_cast<const std::array<DimSize_t, DIM>&>(std::get<0>(attr)); diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp index 31ee946fc..897cd359a 100644 --- a/python_binding/operator/pybind_GenericOperator.cpp +++ b/python_binding/operator/pybind_GenericOperator.cpp @@ -25,7 +25,7 @@ void init_GenericOperator(py::module& m) { py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, DynamicAttributes, OperatorTensor>(m, "GenericOperatorOp", py::multiple_inheritance()) .def_readonly_static("identity", &GenericOperator_Op::Identity) - .def("set_compute_output_dims", &GenericOperator_Op::setComputeOutputDims, py::arg("computation_function")); + .def("set_forward_dims", &GenericOperator_Op::setForwardDims, py::arg("computation_function")); // &GenericOperator m.def("GenericOperator", diff --git a/python_binding/operator/pybind_OperatorTensor.cpp b/python_binding/operator/pybind_OperatorTensor.cpp index abf3f7e8f..4d4541ab3 100644 --- a/python_binding/operator/pybind_OperatorTensor.cpp +++ b/python_binding/operator/pybind_OperatorTensor.cpp @@ -30,8 +30,8 @@ void init_OperatorTensor(py::module& m){ .def("set_output", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setOutput, py::arg("outputIdx"), py::arg("data")) .def("set_input", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setInput, py::arg("outputIdx"), py::arg("data")) - .def("compute_output_dims", &OperatorTensor::computeOutputDims, py::arg("allow_data_dependency") = false) - .def("output_dims_forwarded", &OperatorTensor::outputDimsForwarded) + .def("forward_dims", &OperatorTensor::forwardDims, py::arg("allow_data_dependency") = false) + .def("dims_forwarded", &OperatorTensor::dimsForwarded) ; } } diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp index 88c7383a9..2ed5a02a4 100644 --- a/src/graph/GraphView.cpp +++ b/src/graph/GraphView.cpp @@ -431,8 +431,8 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_ const auto op = std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator()); // Recompute everytime, even if it was already computed in a // previous call of forwardDims(), as the graph may have changed! - op->computeOutputDims(allowDataDependency); - if (!op->outputDimsForwarded()) { + op->forwardDims(allowDataDependency); + if (!op->dimsForwarded()) { nextList.insert(nodePtr); } } diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp index 9f9ad681c..8fbb4cdf7 100644 --- a/src/operator/Add.cpp +++ b/src/operator/Add.cpp @@ -32,7 +32,7 @@ Aidge::Add_Op::Add_Op(const Add_Op& op) } } -bool Aidge::Add_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) { // check inputs have been associated bool associated = (nbInputs() > 0); // do not compute anything if no input for (IOIndex_t i = 0; i < nbInputs(); ++i) { diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp index acb097668..825fa5649 100644 --- a/src/operator/AvgPooling.cpp +++ b/src/operator/AvgPooling.cpp @@ -36,7 +36,7 @@ Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op): Operator } template <Aidge::DimIdx_t DIM> -void Aidge::AvgPooling_Op<DIM>::computeOutputDims() { +bool Aidge::AvgPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) { // check inputs have been associated if (!getInput(0)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type()); @@ -69,7 +69,7 @@ Aidge::AvgPooling_Op<DIM>::computeReceptiveField(const std::vector<Aidge::DimSiz if (firstEltDims.size() != outputDims.size()) { AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions."); } - if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) { + if ((outputDims.size() == (DIM+2)) && dimsForwarded()) { // Offset std::vector<DimSize_t> inputIdxDims = firstEltDims; diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp index b14f02388..488a77b8f 100644 --- a/src/operator/BatchNorm.cpp +++ b/src/operator/BatchNorm.cpp @@ -36,7 +36,7 @@ Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op): OperatorTen } template <Aidge::DimIdx_t DIM> -void Aidge::BatchNorm_Op<DIM>::computeOutputDims() { +bool Aidge::BatchNorm_Op<DIM>::forwardDims(bool /*allowDataDependency*/) { // check inputs have been associated bool associated = true; for (IOIndex_t i = 0; i < nbInputs(); ++i) { diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp index 929000a5f..68f37bc54 100644 --- a/src/operator/Concat.cpp +++ b/src/operator/Concat.cpp @@ -59,7 +59,7 @@ void Aidge::Concat_OpImpl::forward() { const std::string Aidge::Concat_Op::Type = "Concat"; -bool Aidge::Concat_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) { // Every input is non-empty with the same number of dimensions bool associated = (getInput(0) != nullptr); associated &= !(getInput(0)->empty()) && (getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims()); // do not compute anything if no input diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp index 0c43d7a3a..f22a93f80 100644 --- a/src/operator/Div.cpp +++ b/src/operator/Div.cpp @@ -22,7 +22,7 @@ const std::string Aidge::Div_Op::Type = "Div"; -bool Aidge::Div_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::Div_Op::forwardDims(bool /*allowDataDependency*/) { // check inputs have been associated if (!getInput(0) || !getInput(1)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected"); diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp index acb1896ff..ba7e29e7b 100644 --- a/src/operator/FC.cpp +++ b/src/operator/FC.cpp @@ -36,7 +36,7 @@ void Aidge::FC_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::sh mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()}); } -bool Aidge::FC_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) { bool associated = true; for (IOIndex_t i = 0; i < nbInputs(); ++i) { if (!getInput(i)) { diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp index 3b53aa5a2..7b0945271 100644 --- a/src/operator/Gather.cpp +++ b/src/operator/Gather.cpp @@ -52,7 +52,7 @@ void Aidge::Gather_OpImpl::forward() { const std::string Aidge::Gather_Op::Type = "Gather"; -bool Aidge::Gather_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::Gather_Op::forwardDims(bool /*allowDataDependency*/) { // check inputs have been associated if (!getInput(0)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected"); diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp index 0472a67cb..fdf3036fe 100644 --- a/src/operator/GenericOperator.cpp +++ b/src/operator/GenericOperator.cpp @@ -25,8 +25,8 @@ const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::Inpu return [nbOutputs, inputIdx](const std::vector<std::vector<std::size_t>>& inputsDims) { return std::vector<std::vector<std::size_t>>(nbOutputs, inputsDims[inputIdx]); }; } -bool Aidge::GenericOperator_Op::computeOutputDims(bool /*allowDataDependency*/) { - if (mComputeOutputDims) { +bool Aidge::GenericOperator_Op::forwardDims(bool /*allowDataDependency*/) { + if (mForwardDims) { std::vector<std::vector<std::size_t>> inputsDims(nbInputs(), std::vector<std::size_t>()); for (std::size_t i = 0; i < nbInputs(); ++i) { if (getInput(i)) { @@ -34,7 +34,7 @@ bool Aidge::GenericOperator_Op::computeOutputDims(bool /*allowDataDependency*/) } } - const auto& outputsDims = mComputeOutputDims(inputsDims); + const auto& outputsDims = mForwardDims(inputsDims); AIDGE_ASSERT((outputsDims.size() == nbOutputs()), "The provided ComputeDimsFunc function returns the wrong number of outputs"); for (std::size_t i = 0; i < nbOutputs(); ++i) { mOutputs[i]->resize(outputsDims[i]); @@ -47,8 +47,8 @@ bool Aidge::GenericOperator_Op::computeOutputDims(bool /*allowDataDependency*/) } } -bool Aidge::GenericOperator_Op::outputDimsForwarded() const { - if (mComputeOutputDims) { +bool Aidge::GenericOperator_Op::dimsForwarded() const { + if (mForwardDims) { return !(mOutputs[0]->empty()); } else { diff --git a/src/operator/GlobalAveragePooling.cpp b/src/operator/GlobalAveragePooling.cpp index a851faee8..b09426f8f 100644 --- a/src/operator/GlobalAveragePooling.cpp +++ b/src/operator/GlobalAveragePooling.cpp @@ -21,7 +21,7 @@ const std::string Aidge::GlobalAveragePooling_Op::Type = "GlobalAveragePooling"; -bool Aidge::GlobalAveragePooling_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::GlobalAveragePooling_Op::forwardDims(bool /*allowDataDependency*/) { // error checking if (!getInput(0)) { AIDGE_THROW_OR_ABORT(std::runtime_error, diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp index 223aeb93c..8f7548155 100644 --- a/src/operator/MatMul.cpp +++ b/src/operator/MatMul.cpp @@ -20,7 +20,7 @@ const std::string Aidge::MatMul_Op::Type = "MatMul"; -bool Aidge::MatMul_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) { if (!getInput(0) || !getInput(1)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Missing input. Cannot compute output dimensions for MatMul Operator."); } diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp index 4e802816a..e08b5f105 100644 --- a/src/operator/Memorize.cpp +++ b/src/operator/Memorize.cpp @@ -87,7 +87,7 @@ void Aidge::Memorize_Op::updateConsummerProducer() { this->template getAttr<MemorizeAttr::ForwardStep>() = 0; } -bool Aidge::Memorize_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::Memorize_Op::forwardDims(bool /*allowDataDependency*/) { for (size_t i = 0; i < 2; ++i) { if (!getInput(i)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i); @@ -110,7 +110,7 @@ bool Aidge::Memorize_Op::computeOutputDims(bool /*allowDataDependency*/) { return false; } -bool Aidge::Memorize_Op::outputDimsForwarded() const { +bool Aidge::Memorize_Op::dimsForwarded() const { // Only check the output dims bool forwarded = true; // check outputs have been filled diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp index 253c1ba2f..d4bfdc66b 100644 --- a/src/operator/Mul.cpp +++ b/src/operator/Mul.cpp @@ -23,7 +23,7 @@ const std::string Aidge::Mul_Op::Type = "Mul"; -bool Aidge::Mul_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::Mul_Op::forwardDims(bool /*allowDataDependency*/) { // check inputs have been associated if (!getInput(0) || !getInput(1)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected"); diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp index 8390ee406..2a60f580f 100644 --- a/src/operator/OperatorTensor.cpp +++ b/src/operator/OperatorTensor.cpp @@ -119,7 +119,7 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_ if (nbInputs() != nbData()) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator has attributes. Must be handled in an overrided function."); } - if (!outputDimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) { + if (!dimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); } for (DimIdx_t i = 0; i < outputDims.size(); ++i) { @@ -131,7 +131,7 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_ return std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>(nbData(),std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>(firstEltDims, outputDims)); } -bool Aidge::OperatorTensor::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::OperatorTensor::forwardDims(bool /*allowDataDependency*/) { // check inputs have been associated bool associated = (nbInputs() > 0); // do not compute anything if no input for (IOIndex_t i = 0; i < nbInputs(); ++i) { @@ -155,7 +155,7 @@ bool Aidge::OperatorTensor::computeOutputDims(bool /*allowDataDependency*/) { return associated; } -bool Aidge::OperatorTensor::outputDimsForwarded() const { +bool Aidge::OperatorTensor::dimsForwarded() const { bool forwarded = true; // check both inputs and outputs have been filled for (IOIndex_t i = 0; i < nbInputs(); ++i) { @@ -181,8 +181,8 @@ void Aidge::OperatorTensor::setDataType(const DataType& dataType) const { } void Aidge::OperatorTensor::forward() { - if (!outputDimsForwarded()) { - computeOutputDims(); + if (!dimsForwarded()) { + forwardDims(); } Operator::forward(); diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp index 6f09d402a..18325d80a 100644 --- a/src/operator/Pop.cpp +++ b/src/operator/Pop.cpp @@ -37,7 +37,7 @@ void Aidge::Pop_OpImpl::forward() { const std::string Aidge::Pop_Op::Type = "Pop"; -bool Aidge::Pop_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::Pop_Op::forwardDims(bool /*allowDataDependency*/) { // check inputs have been associated if (!getInput(0)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type()); diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp index 32194498b..6bdfb48d1 100644 --- a/src/operator/Pow.cpp +++ b/src/operator/Pow.cpp @@ -22,7 +22,7 @@ const std::string Aidge::Pow_Op::Type = "Pow"; -bool Aidge::Pow_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::Pow_Op::forwardDims(bool /*allowDataDependency*/) { // check inputs have been associated if (!getInput(0) || !getInput(1)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected"); diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp index f00ea98a9..28e39b6d3 100644 --- a/src/operator/ReduceMean.cpp +++ b/src/operator/ReduceMean.cpp @@ -26,7 +26,7 @@ const std::string Aidge::ReduceMean_Op::Type = "ReduceMean"; -bool Aidge::ReduceMean_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) { if (!getInput(0)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor"); } diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp index 8431971da..ab53c094d 100644 --- a/src/operator/Reshape.cpp +++ b/src/operator/Reshape.cpp @@ -30,7 +30,7 @@ void Aidge::Reshape_OpImpl::forward() { const std::string Aidge::Reshape_Op::Type = "Reshape"; -bool Aidge::Reshape_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::Reshape_Op::forwardDims(bool /*allowDataDependency*/) { // check input has been associated if (!getInput(0)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected"); diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp index 161f1d336..38316718c 100644 --- a/src/operator/Slice.cpp +++ b/src/operator/Slice.cpp @@ -24,7 +24,7 @@ const std::string Aidge::Slice_Op::Type = "Slice"; -bool Aidge::Slice_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::Slice_Op::forwardDims(bool /*allowDataDependency*/) { // check input have been associated if (!getInput(0) || (getInput(0)->empty())) { AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type()); diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp index 82b99b876..b3d8351de 100644 --- a/src/operator/Sub.cpp +++ b/src/operator/Sub.cpp @@ -24,7 +24,7 @@ const std::string Aidge::Sub_Op::Type = "Sub"; -bool Aidge::Sub_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::Sub_Op::forwardDims(bool /*allowDataDependency*/) { // check inputs have been associated if (!getInput(0) || !getInput(1)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected"); diff --git a/src/recipes/HorizontalTiling.cpp b/src/recipes/HorizontalTiling.cpp index 8e27fea58..7959e1b70 100644 --- a/src/recipes/HorizontalTiling.cpp +++ b/src/recipes/HorizontalTiling.cpp @@ -41,7 +41,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std: if (op->nbOutputs() != 1 || op->nbData() > 1) { AIDGE_INTERNAL_ASSERT("Only slice Operators with one output and at most one input for now."); } - if (!op->outputDimsForwarded()) { + if (!op->dimsForwarded()) { AIDGE_INTERNAL_ASSERT("Dimensions must be forwarded before any tiling"); } // start by doing a tiling with strict dimensions division diff --git a/unit_tests/operator/Test_Div_Op.cpp b/unit_tests/operator/Test_Div_Op.cpp index e659742c0..d11f72474 100644 --- a/unit_tests/operator/Test_Div_Op.cpp +++ b/unit_tests/operator/Test_Div_Op.cpp @@ -20,7 +20,7 @@ #include "aidge/operator/OperatorTensor.hpp" namespace Aidge { -TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]") { +TEST_CASE("[core/operator] Div_Op(forwardDims)", "[Div][forwardDims]") { constexpr std::uint16_t NBTRIALS = 10; // Create a random number generator @@ -42,7 +42,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims] /** * @todo Special case: scalar not handled yet by - * ``OperatorTensor::computeOutputDims()`` + * ``OperatorTensor::forwardDims()`` */ // SECTION("Scalar / Scalar") { // // input_0 @@ -51,7 +51,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims] // // input_1 // T1->resize({}); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>())); // } // SECTION("Scalar / +1-D") { @@ -69,7 +69,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims] // } // T1->resize(dims); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims()) == dims); // } // } @@ -88,7 +88,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims] // } // T0->resize(dims); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims()) == dims); // } // } @@ -103,7 +103,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims] T0->resize(dims0); T1->resize(dims0); - REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE_NOTHROW(op->forwardDims()); REQUIRE((op->getOutput(0)->dims()) == dims0); } @@ -126,7 +126,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims] T0->resize(dims0); T1->resize(dims1); - REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE_NOTHROW(op->forwardDims()); REQUIRE((op->getOutput(0)->dims()) == dimsOut); // input_0 - wrong @@ -137,7 +137,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims] } T1->resize(dims1_wrong); REQUIRE(dims0 != dims1_wrong); - REQUIRE_THROWS(op->computeOutputDims()); + REQUIRE_THROWS(op->forwardDims()); } } } diff --git a/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp b/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp index fcd848914..d20f689ab 100644 --- a/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp +++ b/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp @@ -21,8 +21,8 @@ #include "aidge/utils/Types.h" namespace Aidge { -TEST_CASE("[core/operator] GlobalAveragePooling_Op(computeOutputDims)", - "[GlobalAveragePooling][computeOutputDims]") { +TEST_CASE("[core/operator] GlobalAveragePooling_Op(forwardDims)", + "[GlobalAveragePooling][forwardDims]") { constexpr std::uint16_t NB_TRIALS = 10; // Create a random number generator std::random_device rd; @@ -39,7 +39,7 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(computeOutputDims)", // input_0 std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>(); SECTION("Un-connected input leads to failure.") { - REQUIRE_THROWS(op->computeOutputDims()); + REQUIRE_THROWS(op->forwardDims()); } op->associateInput(0, input_T); @@ -49,7 +49,7 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(computeOutputDims)", const std::size_t nb_dims = 0; std::vector<std::size_t> dims(nb_dims); input_T->resize(dims); - REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE_NOTHROW(op->forwardDims()); } } SECTION("Full tensor") { @@ -61,7 +61,7 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(computeOutputDims)", dims[i] = dimsDist(gen); } input_T->resize(dims); - REQUIRE_THROWS(op->computeOutputDims()); + REQUIRE_THROWS(op->forwardDims()); } } SECTION("nbDim > 3") { @@ -74,7 +74,7 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(computeOutputDims)", std::vector<DimSize_t> dims_out{dims[0], dims[1]}; input_T->resize(dims); op->setInput(0, input_T); - REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE_NOTHROW(op->forwardDims()); REQUIRE(op->getOutput(0)->dims() == dims_out); REQUIRE((op->getOutput(0)->dims().size()) == static_cast<size_t>(2)); } diff --git a/unit_tests/operator/Test_MatMul_Op.cpp b/unit_tests/operator/Test_MatMul_Op.cpp index 6c810e675..bdd1de87c 100644 --- a/unit_tests/operator/Test_MatMul_Op.cpp +++ b/unit_tests/operator/Test_MatMul_Op.cpp @@ -20,7 +20,7 @@ #include "aidge/operator/OperatorTensor.hpp" namespace Aidge { -TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutputDims]") { +TEST_CASE("[core/operator] MatMul_Op(forwardDims)", "[MatMul][forwardDims]") { // Create a random number generator std::random_device rd; std::mt19937 gen(rd()); @@ -43,13 +43,13 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu // T1->resize({}); // op -> associateInput(1,T1); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims()).empty()); // // input_1 - wrong // T1->resize({dist(gen)}); - // REQUIRE_THROWS(op->computeOutputDims()); + // REQUIRE_THROWS(op->forwardDims()); // } SECTION("1-D / N-D") { @@ -66,26 +66,26 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu // input_1 - right T1->resize({dim0}); - REQUIRE_NOTHROW(op -> computeOutputDims()); + REQUIRE_NOTHROW(op -> forwardDims()); REQUIRE((op->getOutput(0)->dims()).empty()); // input_1 - wrong T1->resize({dim0+1}); - REQUIRE_THROWS(op -> computeOutputDims()); + REQUIRE_THROWS(op -> forwardDims()); } SECTION("1-D / 2-D") { // input_1 - right const std::size_t dim1 = dist(gen); T1->resize({dim0,dim1}); - REQUIRE_NOTHROW(op -> computeOutputDims()); + REQUIRE_NOTHROW(op -> forwardDims()); REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim1})); // input_1 - wrong T1->resize({dim0+1,dim1}); - REQUIRE_THROWS(op -> computeOutputDims()); + REQUIRE_THROWS(op -> forwardDims()); } SECTION("1-D / +2-D") { // input_1 - right @@ -94,7 +94,7 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu const std::size_t dim3 = dist(gen); T1->resize({dim1,dim2,dim0,dim3}); - REQUIRE_NOTHROW(op -> computeOutputDims()); + REQUIRE_NOTHROW(op -> forwardDims()); REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim1,dim2,dim3})); } } @@ -114,26 +114,26 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu // input_1 - right T1->resize({dim1}); - REQUIRE_NOTHROW(op -> computeOutputDims()); + REQUIRE_NOTHROW(op -> forwardDims()); REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0})); // input_1 - wrong T1->resize({dim1+1}); - REQUIRE_THROWS(op -> computeOutputDims()); + REQUIRE_THROWS(op -> forwardDims()); } SECTION("2-D / 2-D") { // input_1 - right const std::size_t dim2 = dist(gen); T1->resize({dim1, dim2}); - REQUIRE_NOTHROW(op -> computeOutputDims()); + REQUIRE_NOTHROW(op -> forwardDims()); REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim2})); // input_1 - wrong T1->resize({dim1+1,dim2}); - REQUIRE_THROWS(op -> computeOutputDims()); + REQUIRE_THROWS(op -> forwardDims()); } SECTION("2-D / +2-D") { // input_1 - right @@ -142,13 +142,13 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu const std::size_t dim4 = dist(gen); T1->resize({dim3,dim4,dim1, dim2}); - REQUIRE_NOTHROW(op -> computeOutputDims()); + REQUIRE_NOTHROW(op -> forwardDims()); REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim3,dim4,dim0,dim2})); // input_1 - wrong T1->resize({dim3,dim4,dim1+1,dim2}); - REQUIRE_THROWS(op -> computeOutputDims()); + REQUIRE_THROWS(op -> forwardDims()); } } SECTION("+2-D / +2-D") { @@ -169,28 +169,28 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu // 1 const std::size_t dim5 = dist(gen); T1->resize({dim0,dim1,dim3,dim5}); - REQUIRE_NOTHROW(op -> computeOutputDims()); + REQUIRE_NOTHROW(op -> forwardDims()); REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1,dim2,dim5})); // 2 - input_1 broadcast T1->resize({1,dim1,dim3,dim5}); - REQUIRE_NOTHROW(op -> computeOutputDims()); + REQUIRE_NOTHROW(op -> forwardDims()); REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1,dim2,dim5})); // 3 - input_0 broadcast const std::size_t dim1_bigger = dist(gen) + 1; T1->resize({dim0,dim1_bigger,dim3,dim5}); - REQUIRE_NOTHROW(op -> computeOutputDims()); + REQUIRE_NOTHROW(op -> forwardDims()); REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1_bigger,dim2,dim5})); // 4 - input_0+input_1 broadcast T1->resize({1,dim1_bigger,dim3,dim5}); - REQUIRE_NOTHROW(op -> computeOutputDims()); + REQUIRE_NOTHROW(op -> forwardDims()); REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1_bigger,dim2,dim5})); // input_1 - wrong T1->resize({dim0+1,dim1,dim3,dim5}); - REQUIRE_THROWS(op -> computeOutputDims()); + REQUIRE_THROWS(op -> forwardDims()); } } } // namespace Aidge \ No newline at end of file diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp index cd42791e0..b15074d1b 100644 --- a/unit_tests/operator/Test_MetaOperator.cpp +++ b/unit_tests/operator/Test_MetaOperator.cpp @@ -41,9 +41,9 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") { myInput->resize({2,3,5,5}); std::shared_ptr<OperatorTensor> opTensor = std::static_pointer_cast<OperatorTensor>(op->getOperator()); opTensor->associateInput(0,myInput); - opTensor->computeOutputDims(); + opTensor->forwardDims(); - REQUIRE(opTensor->outputDimsForwarded()); + REQUIRE(opTensor->dimsForwarded()); REQUIRE(std::static_pointer_cast<Tensor>(opTensor->getRawOutput(0))->dims() == std::vector<size_t>({2,3,5,5})); REQUIRE(std::static_pointer_cast<Tensor>(opTensor->getRawInput(0)) == myInput); REQUIRE(microGraph->getOrderedInputs()[0].first->getOperator()->getRawInput(0) == myInput); @@ -74,9 +74,9 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") { op->associateInput(17, myInit); op->associateInput(18, myInit); - op->computeOutputDims(); + op->forwardDims(); microGraph->save("lstm_dims", true, true); - REQUIRE(op->outputDimsForwarded()); + REQUIRE(op->dimsForwarded()); //op->updateConsummerProducer(); // require implementation //auto microGraphScheduler = std::dynamic_pointer_cast<MetaOperator_Op>(op)->getMicroGraphScheduler(); diff --git a/unit_tests/operator/Test_MetaOperator.py b/unit_tests/operator/Test_MetaOperator.py new file mode 100644 index 000000000..a525c94b9 --- /dev/null +++ b/unit_tests/operator/Test_MetaOperator.py @@ -0,0 +1,42 @@ +import onnx +from onnx.backend.test.case.node.lstm import LSTMHelper +from onnx.backend.test.case.node import expect +import numpy as np + +input = np.array([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [[2.0, 3.0], [4.0, 5.0], [6.0, 7.0]]]).astype(np.float32) +print(input.shape) +input_size = 2 +hidden_size = 3 +weight_scale = 0.1 +number_of_gates = 4 + +node = onnx.helper.make_node( + "LSTM", inputs=["X", "W", "R"], outputs=["", "Y_h"], hidden_size=hidden_size +) + +W = weight_scale * np.ones( + (1, number_of_gates * hidden_size, input_size) +).astype(np.float32) +R = weight_scale * np.ones( + (1, number_of_gates * hidden_size, hidden_size) +).astype(np.float32) + +lstm = LSTMHelper(X=input, W=W, R=R) +_, Y_h = lstm.step() + +print(lstm.C_0 ) + +seq_length = input.shape[0] +batch_size = input.shape[1] + +print(seq_length) +print(np.split(input, input.shape[0], axis=0)) + +expect( + node, + inputs=[input, W, R], + outputs=[Y_h.astype(np.float32)], + name="test_lstm_defaults", +) + +print(Y_h) \ No newline at end of file diff --git a/unit_tests/operator/Test_Mul_Op.cpp b/unit_tests/operator/Test_Mul_Op.cpp index d3e0c5e08..f3f8fb952 100644 --- a/unit_tests/operator/Test_Mul_Op.cpp +++ b/unit_tests/operator/Test_Mul_Op.cpp @@ -20,7 +20,7 @@ #include "aidge/operator/OperatorTensor.hpp" namespace Aidge { -TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]") { +TEST_CASE("[core/operator] Mul_Op(forwardDims)", "[Mul][forwardDims]") { constexpr std::uint16_t NBTRIALS = 10; // Create a random number generator @@ -42,7 +42,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims] /** * @todo Special case: scalar not handled yet by - * ``OperatorTensor::computeOutputDims()`` + * ``OperatorTensor::forwardDims()`` */ // SECTION("Scalar / Scalar") { // // input_0 @@ -51,7 +51,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims] // // input_1 // T1->resize({}); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>())); // } // SECTION("Scalar / +1-D") { @@ -69,7 +69,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims] // } // T1->resize(dims); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims()) == dims); // } // } @@ -88,7 +88,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims] // } // T0->resize(dims); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims()) == dims); // } // } @@ -103,7 +103,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims] T0->resize(dims0); T1->resize(dims0); - REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE_NOTHROW(op->forwardDims()); REQUIRE((op->getOutput(0)->dims()) == dims0); } @@ -126,7 +126,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims] T0->resize(dims0); T1->resize(dims1); - REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE_NOTHROW(op->forwardDims()); REQUIRE((op->getOutput(0)->dims()) == dimsOut); // input_0 - wrong @@ -137,7 +137,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims] } T1->resize(dims1_wrong); REQUIRE(dims0 != dims1_wrong); - REQUIRE_THROWS(op->computeOutputDims()); + REQUIRE_THROWS(op->forwardDims()); } } } diff --git a/unit_tests/operator/Test_Pow_Op.cpp b/unit_tests/operator/Test_Pow_Op.cpp index c77615c11..4a8d242a3 100644 --- a/unit_tests/operator/Test_Pow_Op.cpp +++ b/unit_tests/operator/Test_Pow_Op.cpp @@ -20,7 +20,7 @@ #include "aidge/operator/OperatorTensor.hpp" namespace Aidge { -TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]") { +TEST_CASE("[core/operator] Pow_Op(forwardDims)", "[Pow][forwardDims]") { constexpr std::uint16_t NBTRIALS = 10; // Create a random number generator @@ -42,7 +42,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims] /** * @todo Special case: scalar not handled yet by - * ``OperatorTensor::computeOutputDims()`` + * ``OperatorTensor::forwardDims()`` */ // SECTION("Scalar / Scalar") { // // input_0 @@ -51,7 +51,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims] // // input_1 // T1->resize({}); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>())); // } // SECTION("Scalar / +1-D") { @@ -69,7 +69,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims] // } // T1->resize(dims); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims()) == dims); // } // } @@ -88,7 +88,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims] // } // T0->resize(dims); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims()) == dims); // } // } @@ -103,7 +103,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims] T0->resize(dims0); T1->resize(dims0); - REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE_NOTHROW(op->forwardDims()); REQUIRE((op->getOutput(0)->dims()) == dims0); } @@ -126,7 +126,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims] T0->resize(dims0); T1->resize(dims1); - REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE_NOTHROW(op->forwardDims()); REQUIRE((op->getOutput(0)->dims()) == dimsOut); // input_0 - wrong @@ -137,7 +137,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims] } T1->resize(dims1_wrong); REQUIRE(dims0 != dims1_wrong); - REQUIRE_THROWS(op->computeOutputDims()); + REQUIRE_THROWS(op->forwardDims()); } } } diff --git a/unit_tests/operator/Test_Sub_Op.cpp b/unit_tests/operator/Test_Sub_Op.cpp index b7b744410..329f3da79 100644 --- a/unit_tests/operator/Test_Sub_Op.cpp +++ b/unit_tests/operator/Test_Sub_Op.cpp @@ -20,7 +20,7 @@ #include "aidge/operator/OperatorTensor.hpp" namespace Aidge { -TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]") { +TEST_CASE("[core/operator] Sub_Op(forwardDims)", "[Sub][forwardDims]") { constexpr std::uint16_t NBTRIALS = 10; // Create a random number generator @@ -42,7 +42,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims] /** * @todo Special case: scalar not handled yet by - * ``OperatorTensor::computeOutputDims()`` + * ``OperatorTensor::forwardDims()`` */ // SECTION("Scalar / Scalar") { // // input_0 @@ -51,7 +51,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims] // // input_1 // T1->resize({}); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>())); // } // SECTION("Scalar / +1-D") { @@ -69,7 +69,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims] // } // T1->resize(dims); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims()) == dims); // } // } @@ -88,7 +88,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims] // } // T0->resize(dims); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims()) == dims); // } // } @@ -103,7 +103,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims] T0->resize(dims0); T1->resize(dims0); - REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE_NOTHROW(op->forwardDims()); REQUIRE((op->getOutput(0)->dims()) == dims0); } @@ -126,7 +126,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims] T0->resize(dims0); T1->resize(dims1); - REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE_NOTHROW(op->forwardDims()); REQUIRE((op->getOutput(0)->dims()) == dimsOut); // input_0 - wrong @@ -137,7 +137,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims] } T1->resize(dims1_wrong); REQUIRE(dims0 != dims1_wrong); - REQUIRE_THROWS(op->computeOutputDims()); + REQUIRE_THROWS(op->forwardDims()); } } } diff --git a/unit_tests/scheduler/Test_Scheduler.cpp b/unit_tests/scheduler/Test_Scheduler.cpp index e2c1a8fcb..ceaa5e301 100644 --- a/unit_tests/scheduler/Test_Scheduler.cpp +++ b/unit_tests/scheduler/Test_Scheduler.cpp @@ -54,7 +54,7 @@ TEST_CASE("randomScheduling", "[Scheduler][randomGen]") { if (unicity1) { for (auto &node : g1->getNodes()) { std::static_pointer_cast<GenericOperator_Op>(node->getOperator()) - ->setComputeOutputDims( + ->setForwardDims( GenericOperator_Op::InputIdentity(0, node->nbOutputs())); } @@ -97,7 +97,7 @@ TEST_CASE("randomScheduling", "[Scheduler][randomGen]") { // if (unicity1) { // for (auto &node : g1->getNodes()) { // std::static_pointer_cast<GenericOperator_Op>(node->getOperator()) - // ->setComputeOutputDims( + // ->setForwardDims( // GenericOperator_Op::InputIdentity(0, node->nbOutputs())); // } -- GitLab