diff --git a/aidge_core/unit_tests/test_impl.py b/aidge_core/unit_tests/test_impl.py index 6b83b048c15353c8e1ceb1032883cc791f4fad00..1a723a04a9f823c87338d479ae260d495db935a1 100644 --- a/aidge_core/unit_tests/test_impl.py +++ b/aidge_core/unit_tests/test_impl.py @@ -39,7 +39,7 @@ class test_OperatorImpl(unittest.TestCase): global GLOBAL_CPT matmul = aidge_core.GenericOperator("MatMul", 1, 0, 1, name="MatMul0") generic_matmul_op = matmul.get_operator() - generic_matmul_op.set_compute_output_dims(lambda x: x) + generic_matmul_op.set_forward_dims(lambda x: x) generic_matmul_op.set_impl(testImpl(generic_matmul_op)) generic_matmul_op.forward() self.assertEqual(GLOBAL_CPT, 1) diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py index c94960733b24444218b1209463adbda11b89f6e8..164aee726255e0478b629ee853d9a1f619945f3a 100644 --- a/aidge_core/unit_tests/test_operator_binding.py +++ b/aidge_core/unit_tests/test_operator_binding.py @@ -92,14 +92,14 @@ class test_operator_binding(unittest.TestCase): attrs.set_attr("d", 23.89) self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 23.89) - def test_compute_output_dims(self): + def test_forward_dims(self): in_dims=[25, 25] input = aidge_core.Producer(in_dims, name="In") genOp = aidge_core.GenericOperator("genOp", 1, 0, 1, name="genOp") _ = aidge_core.sequential([input, genOp]) self.assertListEqual(genOp.get_operator().get_output(0).dims(), []) - genOp.get_operator().set_compute_output_dims(lambda x:x) - genOp.get_operator().compute_output_dims() + genOp.get_operator().set_forward_dims(lambda x:x) + genOp.get_operator().forward_dims() self.assertListEqual(genOp.get_operator().get_output(0).dims(), in_dims) def test_set_impl(self): diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp index e686737a44928886f97ce636df8be6c883404e56..ead6c19fa5fe1e91ec1c24cf8dfee6146390477f 100644 --- a/include/aidge/data/Tensor.hpp +++ b/include/aidge/data/Tensor.hpp @@ -251,7 +251,6 @@ class Tensor : public Data, auto add_ = Add_Op(2); add_.associateInput(0, std::make_shared<Tensor>(*this)); add_.associateInput(1, std::make_shared<Tensor>(other)); - add_.computeOutputDims(); add_.setDataType(dataType()); add_.setBackend(mImpl->backend()); add_.forward(); @@ -275,7 +274,6 @@ class Tensor : public Data, auto sub_ = Sub_Op(); sub_.associateInput(0, std::make_shared<Tensor>(*this)); sub_.associateInput(1, std::make_shared<Tensor>(other)); - sub_.computeOutputDims(); sub_.setDataType(dataType()); sub_.setBackend(mImpl->backend()); sub_.forward(); @@ -299,7 +297,6 @@ class Tensor : public Data, auto mul_ = Mul_Op(); mul_.associateInput(0, std::make_shared<Tensor>(*this)); mul_.associateInput(1, std::make_shared<Tensor>(other)); - mul_.computeOutputDims(); mul_.setDataType(dataType()); mul_.setBackend(mImpl->backend()); mul_.forward(); @@ -323,7 +320,6 @@ class Tensor : public Data, auto div_ = Div_Op(); div_.associateInput(0, std::make_shared<Tensor>(*this)); div_.associateInput(1, std::make_shared<Tensor>(other)); - div_.computeOutputDims(); div_.setDataType(dataType()); div_.setBackend(mImpl->backend()); div_.forward(); diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp index 249303620c3f2c4683956c99862861bea127f6a8..4ac14bdaecd16e90586d14699f3b6f1bd6d88cab 100644 --- a/include/aidge/operator/Add.hpp +++ b/include/aidge/operator/Add.hpp @@ -60,7 +60,7 @@ public: // } - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override; diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp index fafd3b2d0686913c22548e54c44b0e9a24c91d83..af2993d67f16df498f13a0489a3837a8f9fc4a75 100644 --- a/include/aidge/operator/AvgPooling.hpp +++ b/include/aidge/operator/AvgPooling.hpp @@ -65,28 +65,7 @@ public: } - bool computeOutputDims(bool /*allowDataDependency*/ = false) override final { - // check inputs have been associated - if (!getInput(0)) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type()); - } - if (!(getInput(0)->empty())) { - std::array<DimSize_t, DIM + 2> outputDims; - const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>()); - outputDims[0] = inputDims[0]; - outputDims[1] = inputDims[1]; - - for (std::size_t dim = 0; dim < this->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) { - outputDims[dim+2] = 1 + static_cast<DimSize_t>( - std::floor(static_cast<float>(inputDims[dim+2] - - this->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) / - static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim]))); - } - getOutput(0)->resize(outputDims); - return true; - } - return false; - } + bool forwardDims(bool /*allowDataDependency*/ = false) override final; std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>> diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp index 7f3b60a9f871d790d4345e6197fdc17e7f1e7b62..aa53f8c43f0be2a0e094946d66fd263bc19e39f5 100644 --- a/include/aidge/operator/BatchNorm.hpp +++ b/include/aidge/operator/BatchNorm.hpp @@ -68,25 +68,7 @@ public: // } - bool computeOutputDims(bool /*allowDataDependency*/ = false) override final { - // check inputs have been associated - bool associated = true; - for (IOIndex_t i = 0; i < nbInputs(); ++i) { - associated &= !(getInput(i)->empty()); - } - if (associated) { - const DimSize_t nbFeatures = getInput(0)->dims()[1]; - for (std::size_t i = nbData(); i < nbInputs(); ++i) { - if(getInput(i)->size() != nbFeatures) { - // /!\ Input size should be handled BEFORE calling this function - // This should raise an error - getInput(i)->resize({getInput(0)->dims()[1]}); - } - } - mOutputs[0]->resize(getInput(0)->dims()); - } - return associated; - } + bool forwardDims(bool /*allowDataDependency*/ = false) override final; void setBackend(const std::string &name, DeviceIdx_t device = 0) override final; diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp index 32a519dbc750361c7ad1b6686d37a0766faf696e..a9a4c9253f3af9f9cd82390256ec70d066017cc5 100644 --- a/include/aidge/operator/Concat.hpp +++ b/include/aidge/operator/Concat.hpp @@ -78,7 +78,7 @@ public: return std::make_shared<Concat_Op>(*this); } - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override; diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp index f27c93422c30c15515698345628982d4ab64f59f..d03bcda4e4109def191456f744c47f11d39511ba 100644 --- a/include/aidge/operator/Conv.hpp +++ b/include/aidge/operator/Conv.hpp @@ -108,7 +108,7 @@ public: // } - bool computeOutputDims(bool /*allowDataDependency*/ = false) override final { + bool forwardDims(bool /*allowDataDependency*/ = false) override final { // check inputs have been associated bool associated = true; for (IOIndex_t i = 0; i < 3; ++i) { @@ -149,7 +149,7 @@ public: if (firstEltDims.size() != outputDims.size()) { AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions."); } - if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) { + if ((outputDims.size() == (DIM+2)) && dimsForwarded()) { // Offset auto inputIdxDims = firstEltDims; // batch idx is the same inputIdxDims[1] = 0; // each channel is used so start with the first one diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp index 8ffe18c0499edcf12ee940374d349874e1c415ac..2337ff66f00b932a190d5b1735d53df3da8ffdbf 100644 --- a/include/aidge/operator/ConvDepthWise.hpp +++ b/include/aidge/operator/ConvDepthWise.hpp @@ -90,7 +90,7 @@ public: } - bool computeOutputDims(bool /*allowDataDependency*/ = false) override final { + bool forwardDims(bool /*allowDataDependency*/ = false) override final { // check inputs have been associated // TODO : add a check of inputs dimensions ? bool associated = true; @@ -135,7 +135,7 @@ public: if (firstEltDims.size() != outputDims.size()) { AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions."); } - if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) { + if ((outputDims.size() == (DIM+2)) && dimsForwarded()) { // Offset auto inputIdxDims = firstEltDims; // batch idx is the same diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp index 043422ae20aa34f2380c1dce1b6fbc4308f99b30..566f4a6ae69b090b3a035b034406d463eeb77317 100644 --- a/include/aidge/operator/Div.hpp +++ b/include/aidge/operator/Div.hpp @@ -54,7 +54,7 @@ public: return std::make_shared<Div_Op>(*this); } - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override; diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp index 6a562c59ee88aed5d03db3662aee92ed7bfc21de..b97874f4e0deafd685453b3ce9865e65fafe7561 100644 --- a/include/aidge/operator/FC.hpp +++ b/include/aidge/operator/FC.hpp @@ -71,7 +71,7 @@ public: void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final; - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override; diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp index feb2474b030281771e8608497169100950161d28..7534b66951cc9d8074d0af7742ba5165013431f5 100644 --- a/include/aidge/operator/Gather.hpp +++ b/include/aidge/operator/Gather.hpp @@ -80,7 +80,7 @@ public: return std::make_shared<Gather_Op>(*this); } - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override; diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp index 49885f9fdae05a55552869a6543ef1810aa1dfae..f0b7e92d708dfef65eea0ec7649ccc8716533679 100644 --- a/include/aidge/operator/GenericOperator.hpp +++ b/include/aidge/operator/GenericOperator.hpp @@ -31,7 +31,7 @@ class GenericOperator_Op private: using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>; - ComputeDimsFunc mComputeOutputDims; + ComputeDimsFunc mForwardDims; public: GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut) @@ -61,18 +61,18 @@ public: } public: - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; - bool outputDimsForwarded() const override final; + bool dimsForwarded() const override final; void setBackend(const std::string & /*name*/, DeviceIdx_t /*device*/ = 0) override { fmt::print("setBackend: not available yet.\n"); } void setDataType(const DataType& /*datatype*/) const override { fmt::print("setDataType: not available yet.\n"); } - // Helper functions that can be used with setComputeOutputDims(): + // Helper functions that can be used with setForwardDims(): static const ComputeDimsFunc Identity; static const ComputeDimsFunc InputIdentity(IOIndex_t inputIdx, IOIndex_t nbOutputs); - inline void setComputeOutputDims(ComputeDimsFunc func) { - mComputeOutputDims = func; + inline void setForwardDims(ComputeDimsFunc func) { + mForwardDims = func; } }; diff --git a/include/aidge/operator/GlobalAveragePooling.hpp b/include/aidge/operator/GlobalAveragePooling.hpp index 1552d0e0889352c6cebc9d806fb4c33cb9092442..74529a0ba9481bf6280df8d3ce496f67635a5aef 100644 --- a/include/aidge/operator/GlobalAveragePooling.hpp +++ b/include/aidge/operator/GlobalAveragePooling.hpp @@ -52,7 +52,7 @@ public: return std::make_shared<GlobalAveragePooling_Op>(*this); } - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string &name, DeviceIdx_t device = 0) override final; diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp index f49711837b9b5c5126dce18a1864b2ed156af6f4..367aa4e2d68fb1095b1e3b3be76f6ab59439e47f 100644 --- a/include/aidge/operator/Identity.hpp +++ b/include/aidge/operator/Identity.hpp @@ -63,7 +63,7 @@ public: return std::make_shared<Identity_Op>(*this); } - bool computeOutputDims(bool /*allowDataDependency*/ = false) override final { return true; } // Do nothing + bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; } // Do nothing /** * @brief Check if output dimensions have been computed. @@ -73,7 +73,7 @@ public: * @return true Input has dimensions. * @return false Input has no dimensions or is a nullptr. */ - bool outputDimsForwarded() const override final { + bool dimsForwarded() const override final { return mInputs[0] ? !mInputs[0]->empty() : false; } diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp index 6f7ac2348ee775a4832edad499b1e47bb1a90b09..580d720e617e5b20c0acc7ce5e7f200fe5b25606 100644 --- a/include/aidge/operator/MatMul.hpp +++ b/include/aidge/operator/MatMul.hpp @@ -64,7 +64,7 @@ public: * @note - Second input is 1-D: it is promoted to a matrix by appending a 1 to its * dimensions (D) -> (D,1). The appended 1 is removed after computation. */ - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp index 54eeccef79564a1d5e57fef1ac7d9b52a2499c82..8aff1582604a9e23e248e7c01521567483c793ad 100644 --- a/include/aidge/operator/MaxPooling.hpp +++ b/include/aidge/operator/MaxPooling.hpp @@ -84,7 +84,7 @@ public: } - bool computeOutputDims(bool /*allowDataDependency*/ = false) override final { + bool forwardDims(bool /*allowDataDependency*/ = false) override final { if (!getInput(0)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type()); } diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp index 6f668a94238cf7ba62b3cf2776729ff9f41e5b1a..6b0ace2eb09fde069f8b9b104f92fc33811c25aa 100644 --- a/include/aidge/operator/Memorize.hpp +++ b/include/aidge/operator/Memorize.hpp @@ -82,8 +82,8 @@ public: void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; - bool computeOutputDims(bool allowDataDependency = false) override final; - bool outputDimsForwarded() const override; + bool forwardDims(bool allowDataDependency = false) override final; + bool dimsForwarded() const override; void updateConsummerProducer() override; void forward() override; diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp index 44c52d9eb32613e39844f1d29a6ee7cda6c21043..c677da0f2e34a299ddec6ee85f5a84616206193d 100644 --- a/include/aidge/operator/MetaOperator.hpp +++ b/include/aidge/operator/MetaOperator.hpp @@ -81,7 +81,7 @@ public: mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data); } - bool computeOutputDims(bool allowDataDependency = false) override final { + bool forwardDims(bool allowDataDependency = false) override final { // Check first that all required inputs are available, otherwise // mGraph->forwardDims() will fail! bool forwarded = true; diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp index 1ba0f5405d26d7a3ae9d2bcd7b6f154027820751..f53a38a82a6771e416435222137e72366f5f69f3 100644 --- a/include/aidge/operator/Mul.hpp +++ b/include/aidge/operator/Mul.hpp @@ -57,7 +57,7 @@ public: return std::make_shared<Mul_Op>(*this); } - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override; diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp index c09b589399070549c81bbcb1e84d6c7585703afe..6086c5145eb39cee081468ba91473dc983cfa35f 100644 --- a/include/aidge/operator/OperatorTensor.hpp +++ b/include/aidge/operator/OperatorTensor.hpp @@ -80,8 +80,8 @@ public: * For each dataInput Tensor of the Operator, the first index and dimensions of the feature area. */ virtual std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const; - virtual bool computeOutputDims(bool allowDataDependency = false); - virtual bool outputDimsForwarded() const; + virtual bool forwardDims(bool allowDataDependency = false); + virtual bool dimsForwarded() const; /////////////////////////////////////////////////// virtual void setDataType(const DataType& dataType) const override; diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp index e13face551c49eff927dca4a564731ea1d4ad927..a4e4ebdce801971de118ca8a263999046a13777d 100644 --- a/include/aidge/operator/Pad.hpp +++ b/include/aidge/operator/Pad.hpp @@ -74,7 +74,7 @@ public: } - bool computeOutputDims(bool /*allowDataDependency*/ = false) override final { + bool forwardDims(bool /*allowDataDependency*/ = false) override final { bool associated = true; for (IOIndex_t i = 0; i < nbInputs(); ++i) { if (!getInput(i)) { diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp index 372faff6a89e364e71f77df3bd4573705ab86fed..2219f30ec9db7acf55491882a78e7a1ed2931cf0 100644 --- a/include/aidge/operator/Pop.hpp +++ b/include/aidge/operator/Pop.hpp @@ -76,7 +76,7 @@ public: void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void updateConsummerProducer() override; void forward() override; diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp index b83cf15d6c05f9b202f40a3d51d9663b3222f5e0..08c4de2a254dd267eda4040b54108f93a0c2d922 100644 --- a/include/aidge/operator/Pow.hpp +++ b/include/aidge/operator/Pow.hpp @@ -53,7 +53,7 @@ public: return std::make_shared<Pow_Op>(*this); } - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp index e21aa9aea936568dfb5d5ddd40779dc0acc06160..7e9072857dae8fa3137065e5c47cc11d88d37efe 100644 --- a/include/aidge/operator/Producer.hpp +++ b/include/aidge/operator/Producer.hpp @@ -86,9 +86,9 @@ public: AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer operator takes no input."); } - bool computeOutputDims(bool /*allowDataDependency*/ = false) override final { return true; } + bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; } - inline bool outputDimsForwarded() const noexcept override final { return true; } + inline bool dimsForwarded() const noexcept override final { return true; } inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); } diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp index 25fba5e79f3b58d3d4a34dcc5ad3f0a6e8424d74..ff8d8b0696aafdab48cd37d049fa0473078d7ea6 100644 --- a/include/aidge/operator/ReduceMean.hpp +++ b/include/aidge/operator/ReduceMean.hpp @@ -69,7 +69,7 @@ class ReduceMean_Op : public OperatorTensor, return std::make_shared<ReduceMean_Op>(*this); } - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string &name, DeviceIdx_t device = 0) override final; diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp index bf0f7ee3492cf4e52b903401af57c701d24f9190..49ddfc4d76a0602c58c0c768b04ed4b4202f028d 100644 --- a/include/aidge/operator/Reshape.hpp +++ b/include/aidge/operator/Reshape.hpp @@ -75,7 +75,7 @@ public: return std::make_shared<Reshape_Op>(*this); } - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp index 69278c59b306e95b014043f009dc57ce46e3e41e..7db5867fe1ac512898356c21ff5cce685364f0f5 100644 --- a/include/aidge/operator/Slice.hpp +++ b/include/aidge/operator/Slice.hpp @@ -69,7 +69,7 @@ public: */ std::shared_ptr<Operator> clone() const override { return std::make_shared<Slice_Op>(*this); } - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string &name, DeviceIdx_t device = 0) override { SET_IMPL_MACRO(Slice_Op, *this, name); diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp index 6969a6d837e7288fcd20545837cd362c8d0f1027..e5d8442851c35e9232fdd77d862fb48b71c76f1f 100644 --- a/include/aidge/operator/Sub.hpp +++ b/include/aidge/operator/Sub.hpp @@ -57,7 +57,7 @@ public: return std::make_shared<Sub_Op>(*this); } - bool computeOutputDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp index 2bb18b0198cb69134636756169f7c53df778e94e..db432f2daa0232a52a012e80032beaca3ee9e5a1 100644 --- a/include/aidge/operator/Transpose.hpp +++ b/include/aidge/operator/Transpose.hpp @@ -71,7 +71,7 @@ class Transpose_Op : public OperatorTensor, return std::make_shared<Transpose_Op<DIM>>(*this); } - bool computeOutputDims(bool /*allowDataDependency*/ = false) override final { + bool forwardDims(bool /*allowDataDependency*/ = false) override final { if (!getInput(0)->empty()) { auto attr = (this)->getStaticAttributes(); const std::array<DimSize_t, DIM>& outDimsOrder = static_cast<const std::array<DimSize_t, DIM>&>(std::get<0>(attr)); diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp index 31ee946fc99df40133ff04965c762f9ddae0d131..897cd359a4b368dc599f37136ade3508b5ec5a76 100644 --- a/python_binding/operator/pybind_GenericOperator.cpp +++ b/python_binding/operator/pybind_GenericOperator.cpp @@ -25,7 +25,7 @@ void init_GenericOperator(py::module& m) { py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, DynamicAttributes, OperatorTensor>(m, "GenericOperatorOp", py::multiple_inheritance()) .def_readonly_static("identity", &GenericOperator_Op::Identity) - .def("set_compute_output_dims", &GenericOperator_Op::setComputeOutputDims, py::arg("computation_function")); + .def("set_forward_dims", &GenericOperator_Op::setForwardDims, py::arg("computation_function")); // &GenericOperator m.def("GenericOperator", diff --git a/python_binding/operator/pybind_OperatorTensor.cpp b/python_binding/operator/pybind_OperatorTensor.cpp index abf3f7e8f1a917d99649600a615357be7b7bb272..4d4541ab36468bc6b531e0242888dd70c5afc71f 100644 --- a/python_binding/operator/pybind_OperatorTensor.cpp +++ b/python_binding/operator/pybind_OperatorTensor.cpp @@ -30,8 +30,8 @@ void init_OperatorTensor(py::module& m){ .def("set_output", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setOutput, py::arg("outputIdx"), py::arg("data")) .def("set_input", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setInput, py::arg("outputIdx"), py::arg("data")) - .def("compute_output_dims", &OperatorTensor::computeOutputDims, py::arg("allow_data_dependency") = false) - .def("output_dims_forwarded", &OperatorTensor::outputDimsForwarded) + .def("forward_dims", &OperatorTensor::forwardDims, py::arg("allow_data_dependency") = false) + .def("dims_forwarded", &OperatorTensor::dimsForwarded) ; } } diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp index 88c7383a91c4927e984117f0dcc96a6b9c6d0bc9..2ed5a02a470e376da324e678e9875ac8bbe7b71f 100644 --- a/src/graph/GraphView.cpp +++ b/src/graph/GraphView.cpp @@ -431,8 +431,8 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_ const auto op = std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator()); // Recompute everytime, even if it was already computed in a // previous call of forwardDims(), as the graph may have changed! - op->computeOutputDims(allowDataDependency); - if (!op->outputDimsForwarded()) { + op->forwardDims(allowDataDependency); + if (!op->dimsForwarded()) { nextList.insert(nodePtr); } } diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp index 9f9ad681cf929435113541eaa18cfef403868d6c..8fbb4cdf77db1c14be05845bb33bb0fecf0fb049 100644 --- a/src/operator/Add.cpp +++ b/src/operator/Add.cpp @@ -32,7 +32,7 @@ Aidge::Add_Op::Add_Op(const Add_Op& op) } } -bool Aidge::Add_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) { // check inputs have been associated bool associated = (nbInputs() > 0); // do not compute anything if no input for (IOIndex_t i = 0; i < nbInputs(); ++i) { diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp index acb097668bce0ff6f335f577faed503e086db79f..825fa56498486802d9a52eaf68d60922380f6220 100644 --- a/src/operator/AvgPooling.cpp +++ b/src/operator/AvgPooling.cpp @@ -36,7 +36,7 @@ Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op): Operator } template <Aidge::DimIdx_t DIM> -void Aidge::AvgPooling_Op<DIM>::computeOutputDims() { +bool Aidge::AvgPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) { // check inputs have been associated if (!getInput(0)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type()); @@ -69,7 +69,7 @@ Aidge::AvgPooling_Op<DIM>::computeReceptiveField(const std::vector<Aidge::DimSiz if (firstEltDims.size() != outputDims.size()) { AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions."); } - if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) { + if ((outputDims.size() == (DIM+2)) && dimsForwarded()) { // Offset std::vector<DimSize_t> inputIdxDims = firstEltDims; diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp index b14f0238809b9ec9b6b186d093ecf3b1554865cb..488a77b8ff20d56dbe2b14e24c9c28bf09ba1e0e 100644 --- a/src/operator/BatchNorm.cpp +++ b/src/operator/BatchNorm.cpp @@ -36,7 +36,7 @@ Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op): OperatorTen } template <Aidge::DimIdx_t DIM> -void Aidge::BatchNorm_Op<DIM>::computeOutputDims() { +bool Aidge::BatchNorm_Op<DIM>::forwardDims(bool /*allowDataDependency*/) { // check inputs have been associated bool associated = true; for (IOIndex_t i = 0; i < nbInputs(); ++i) { diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp index 929000a5f4ceeb4c073b8edd919ac976fc651ae2..68f37bc54261d711bd821890a98dc269d45e24ae 100644 --- a/src/operator/Concat.cpp +++ b/src/operator/Concat.cpp @@ -59,7 +59,7 @@ void Aidge::Concat_OpImpl::forward() { const std::string Aidge::Concat_Op::Type = "Concat"; -bool Aidge::Concat_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) { // Every input is non-empty with the same number of dimensions bool associated = (getInput(0) != nullptr); associated &= !(getInput(0)->empty()) && (getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims()); // do not compute anything if no input diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp index 0c43d7a3a8a0cc969bd42ab02775727e00e0721a..f22a93f803417c028ba3860b154fb4b5182ba1d0 100644 --- a/src/operator/Div.cpp +++ b/src/operator/Div.cpp @@ -22,7 +22,7 @@ const std::string Aidge::Div_Op::Type = "Div"; -bool Aidge::Div_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::Div_Op::forwardDims(bool /*allowDataDependency*/) { // check inputs have been associated if (!getInput(0) || !getInput(1)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected"); diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp index acb1896ffe58557828d37484a56b8a21c37150dc..ba7e29e7b6543a570ceede6158bd306286037c10 100644 --- a/src/operator/FC.cpp +++ b/src/operator/FC.cpp @@ -36,7 +36,7 @@ void Aidge::FC_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::sh mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()}); } -bool Aidge::FC_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) { bool associated = true; for (IOIndex_t i = 0; i < nbInputs(); ++i) { if (!getInput(i)) { diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp index 3b53aa5a279d415df7bf686de41d0408e3fb7e0c..7b0945271660be8f309024f46c258e6a7e2193e5 100644 --- a/src/operator/Gather.cpp +++ b/src/operator/Gather.cpp @@ -52,7 +52,7 @@ void Aidge::Gather_OpImpl::forward() { const std::string Aidge::Gather_Op::Type = "Gather"; -bool Aidge::Gather_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::Gather_Op::forwardDims(bool /*allowDataDependency*/) { // check inputs have been associated if (!getInput(0)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected"); diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp index 0472a67cb6110800c5390658248875398d171506..fdf3036fe7eeccb2dfd9e21faf834e27854e45f3 100644 --- a/src/operator/GenericOperator.cpp +++ b/src/operator/GenericOperator.cpp @@ -25,8 +25,8 @@ const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::Inpu return [nbOutputs, inputIdx](const std::vector<std::vector<std::size_t>>& inputsDims) { return std::vector<std::vector<std::size_t>>(nbOutputs, inputsDims[inputIdx]); }; } -bool Aidge::GenericOperator_Op::computeOutputDims(bool /*allowDataDependency*/) { - if (mComputeOutputDims) { +bool Aidge::GenericOperator_Op::forwardDims(bool /*allowDataDependency*/) { + if (mForwardDims) { std::vector<std::vector<std::size_t>> inputsDims(nbInputs(), std::vector<std::size_t>()); for (std::size_t i = 0; i < nbInputs(); ++i) { if (getInput(i)) { @@ -34,7 +34,7 @@ bool Aidge::GenericOperator_Op::computeOutputDims(bool /*allowDataDependency*/) } } - const auto& outputsDims = mComputeOutputDims(inputsDims); + const auto& outputsDims = mForwardDims(inputsDims); AIDGE_ASSERT((outputsDims.size() == nbOutputs()), "The provided ComputeDimsFunc function returns the wrong number of outputs"); for (std::size_t i = 0; i < nbOutputs(); ++i) { mOutputs[i]->resize(outputsDims[i]); @@ -47,8 +47,8 @@ bool Aidge::GenericOperator_Op::computeOutputDims(bool /*allowDataDependency*/) } } -bool Aidge::GenericOperator_Op::outputDimsForwarded() const { - if (mComputeOutputDims) { +bool Aidge::GenericOperator_Op::dimsForwarded() const { + if (mForwardDims) { return !(mOutputs[0]->empty()); } else { diff --git a/src/operator/GlobalAveragePooling.cpp b/src/operator/GlobalAveragePooling.cpp index a851faee81367648b1cc1956ee03dd9d7b4f859f..b09426f8f835eda5600b630488ef18c5b08ba32a 100644 --- a/src/operator/GlobalAveragePooling.cpp +++ b/src/operator/GlobalAveragePooling.cpp @@ -21,7 +21,7 @@ const std::string Aidge::GlobalAveragePooling_Op::Type = "GlobalAveragePooling"; -bool Aidge::GlobalAveragePooling_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::GlobalAveragePooling_Op::forwardDims(bool /*allowDataDependency*/) { // error checking if (!getInput(0)) { AIDGE_THROW_OR_ABORT(std::runtime_error, diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp index 223aeb93ca565a3bf38518a6cd87fd0a32db26e0..8f7548155cde4c7187f7a7fe96a44c4accd2c302 100644 --- a/src/operator/MatMul.cpp +++ b/src/operator/MatMul.cpp @@ -20,7 +20,7 @@ const std::string Aidge::MatMul_Op::Type = "MatMul"; -bool Aidge::MatMul_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) { if (!getInput(0) || !getInput(1)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Missing input. Cannot compute output dimensions for MatMul Operator."); } diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp index 4e802816a13fcfb0fbfa266ca79baac3e6423a3b..e08b5f1054f07a9dcc1722d219ebce022f994d61 100644 --- a/src/operator/Memorize.cpp +++ b/src/operator/Memorize.cpp @@ -87,7 +87,7 @@ void Aidge::Memorize_Op::updateConsummerProducer() { this->template getAttr<MemorizeAttr::ForwardStep>() = 0; } -bool Aidge::Memorize_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::Memorize_Op::forwardDims(bool /*allowDataDependency*/) { for (size_t i = 0; i < 2; ++i) { if (!getInput(i)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i); @@ -110,7 +110,7 @@ bool Aidge::Memorize_Op::computeOutputDims(bool /*allowDataDependency*/) { return false; } -bool Aidge::Memorize_Op::outputDimsForwarded() const { +bool Aidge::Memorize_Op::dimsForwarded() const { // Only check the output dims bool forwarded = true; // check outputs have been filled diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp index 253c1ba2f2dbb7913352d388423e71013b6c0661..d4bfdc66b8c1071ba7d291e9d633e954516b18d0 100644 --- a/src/operator/Mul.cpp +++ b/src/operator/Mul.cpp @@ -23,7 +23,7 @@ const std::string Aidge::Mul_Op::Type = "Mul"; -bool Aidge::Mul_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::Mul_Op::forwardDims(bool /*allowDataDependency*/) { // check inputs have been associated if (!getInput(0) || !getInput(1)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected"); diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp index 8390ee406f766c8c2ea59de6bf5161c6e4f893bf..2a60f580f3279170a0f1ff417cea96ae7cfa981f 100644 --- a/src/operator/OperatorTensor.cpp +++ b/src/operator/OperatorTensor.cpp @@ -119,7 +119,7 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_ if (nbInputs() != nbData()) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator has attributes. Must be handled in an overrided function."); } - if (!outputDimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) { + if (!dimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); } for (DimIdx_t i = 0; i < outputDims.size(); ++i) { @@ -131,7 +131,7 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_ return std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>(nbData(),std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>(firstEltDims, outputDims)); } -bool Aidge::OperatorTensor::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::OperatorTensor::forwardDims(bool /*allowDataDependency*/) { // check inputs have been associated bool associated = (nbInputs() > 0); // do not compute anything if no input for (IOIndex_t i = 0; i < nbInputs(); ++i) { @@ -155,7 +155,7 @@ bool Aidge::OperatorTensor::computeOutputDims(bool /*allowDataDependency*/) { return associated; } -bool Aidge::OperatorTensor::outputDimsForwarded() const { +bool Aidge::OperatorTensor::dimsForwarded() const { bool forwarded = true; // check both inputs and outputs have been filled for (IOIndex_t i = 0; i < nbInputs(); ++i) { @@ -181,8 +181,8 @@ void Aidge::OperatorTensor::setDataType(const DataType& dataType) const { } void Aidge::OperatorTensor::forward() { - if (!outputDimsForwarded()) { - computeOutputDims(); + if (!dimsForwarded()) { + forwardDims(); } Operator::forward(); diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp index 6f09d402af8c6416f3f0b444cd48328b4f5a2031..18325d80a94f35878ededca839ec809000527c39 100644 --- a/src/operator/Pop.cpp +++ b/src/operator/Pop.cpp @@ -37,7 +37,7 @@ void Aidge::Pop_OpImpl::forward() { const std::string Aidge::Pop_Op::Type = "Pop"; -bool Aidge::Pop_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::Pop_Op::forwardDims(bool /*allowDataDependency*/) { // check inputs have been associated if (!getInput(0)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type()); diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp index 32194498b9316c8be08a04d30df5457f5f47427a..6bdfb48d19dfc91ba449f87eaa3da2e03f7c30e6 100644 --- a/src/operator/Pow.cpp +++ b/src/operator/Pow.cpp @@ -22,7 +22,7 @@ const std::string Aidge::Pow_Op::Type = "Pow"; -bool Aidge::Pow_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::Pow_Op::forwardDims(bool /*allowDataDependency*/) { // check inputs have been associated if (!getInput(0) || !getInput(1)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected"); diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp index f00ea98a91e31e6c04d8854e6317fa1509431abf..28e39b6d3387a0371c0505dc0a7b350e83a2bbaf 100644 --- a/src/operator/ReduceMean.cpp +++ b/src/operator/ReduceMean.cpp @@ -26,7 +26,7 @@ const std::string Aidge::ReduceMean_Op::Type = "ReduceMean"; -bool Aidge::ReduceMean_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) { if (!getInput(0)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor"); } diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp index 8431971dadf896add25eb04d1e66e25f0ad3e953..ab53c094dac09879c1bec86509463aab2280ca92 100644 --- a/src/operator/Reshape.cpp +++ b/src/operator/Reshape.cpp @@ -30,7 +30,7 @@ void Aidge::Reshape_OpImpl::forward() { const std::string Aidge::Reshape_Op::Type = "Reshape"; -bool Aidge::Reshape_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::Reshape_Op::forwardDims(bool /*allowDataDependency*/) { // check input has been associated if (!getInput(0)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected"); diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp index 161f1d33635a5504aaa5897ea0f9a66aabc8ec2c..38316718cf5078b4deb44961594cc91956980e62 100644 --- a/src/operator/Slice.cpp +++ b/src/operator/Slice.cpp @@ -24,7 +24,7 @@ const std::string Aidge::Slice_Op::Type = "Slice"; -bool Aidge::Slice_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::Slice_Op::forwardDims(bool /*allowDataDependency*/) { // check input have been associated if (!getInput(0) || (getInput(0)->empty())) { AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type()); diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp index 82b99b876959f00ec9443715265f047ca1e08f30..b3d8351de3114ffae0e7cf54dd61559acdda56a6 100644 --- a/src/operator/Sub.cpp +++ b/src/operator/Sub.cpp @@ -24,7 +24,7 @@ const std::string Aidge::Sub_Op::Type = "Sub"; -bool Aidge::Sub_Op::computeOutputDims(bool /*allowDataDependency*/) { +bool Aidge::Sub_Op::forwardDims(bool /*allowDataDependency*/) { // check inputs have been associated if (!getInput(0) || !getInput(1)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected"); diff --git a/src/recipes/HorizontalTiling.cpp b/src/recipes/HorizontalTiling.cpp index 8e27fea58014b4ec16729f3593dd656026e16826..7959e1b70acab617b9c6f92160c6d501712f5945 100644 --- a/src/recipes/HorizontalTiling.cpp +++ b/src/recipes/HorizontalTiling.cpp @@ -41,7 +41,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std: if (op->nbOutputs() != 1 || op->nbData() > 1) { AIDGE_INTERNAL_ASSERT("Only slice Operators with one output and at most one input for now."); } - if (!op->outputDimsForwarded()) { + if (!op->dimsForwarded()) { AIDGE_INTERNAL_ASSERT("Dimensions must be forwarded before any tiling"); } // start by doing a tiling with strict dimensions division diff --git a/unit_tests/operator/Test_Div_Op.cpp b/unit_tests/operator/Test_Div_Op.cpp index e659742c0bd200fa33b598f581cfef7b2f1e432e..d11f72474b0b70bf335dfee95d13a9b41cfe6efb 100644 --- a/unit_tests/operator/Test_Div_Op.cpp +++ b/unit_tests/operator/Test_Div_Op.cpp @@ -20,7 +20,7 @@ #include "aidge/operator/OperatorTensor.hpp" namespace Aidge { -TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]") { +TEST_CASE("[core/operator] Div_Op(forwardDims)", "[Div][forwardDims]") { constexpr std::uint16_t NBTRIALS = 10; // Create a random number generator @@ -42,7 +42,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims] /** * @todo Special case: scalar not handled yet by - * ``OperatorTensor::computeOutputDims()`` + * ``OperatorTensor::forwardDims()`` */ // SECTION("Scalar / Scalar") { // // input_0 @@ -51,7 +51,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims] // // input_1 // T1->resize({}); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>())); // } // SECTION("Scalar / +1-D") { @@ -69,7 +69,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims] // } // T1->resize(dims); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims()) == dims); // } // } @@ -88,7 +88,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims] // } // T0->resize(dims); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims()) == dims); // } // } @@ -103,7 +103,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims] T0->resize(dims0); T1->resize(dims0); - REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE_NOTHROW(op->forwardDims()); REQUIRE((op->getOutput(0)->dims()) == dims0); } @@ -126,7 +126,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims] T0->resize(dims0); T1->resize(dims1); - REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE_NOTHROW(op->forwardDims()); REQUIRE((op->getOutput(0)->dims()) == dimsOut); // input_0 - wrong @@ -137,7 +137,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims] } T1->resize(dims1_wrong); REQUIRE(dims0 != dims1_wrong); - REQUIRE_THROWS(op->computeOutputDims()); + REQUIRE_THROWS(op->forwardDims()); } } } diff --git a/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp b/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp index fcd8489144be121633f2b0a9601dee171e2bdb5e..d20f689aba55d8cbaef553388d4666fd6c1d7172 100644 --- a/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp +++ b/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp @@ -21,8 +21,8 @@ #include "aidge/utils/Types.h" namespace Aidge { -TEST_CASE("[core/operator] GlobalAveragePooling_Op(computeOutputDims)", - "[GlobalAveragePooling][computeOutputDims]") { +TEST_CASE("[core/operator] GlobalAveragePooling_Op(forwardDims)", + "[GlobalAveragePooling][forwardDims]") { constexpr std::uint16_t NB_TRIALS = 10; // Create a random number generator std::random_device rd; @@ -39,7 +39,7 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(computeOutputDims)", // input_0 std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>(); SECTION("Un-connected input leads to failure.") { - REQUIRE_THROWS(op->computeOutputDims()); + REQUIRE_THROWS(op->forwardDims()); } op->associateInput(0, input_T); @@ -49,7 +49,7 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(computeOutputDims)", const std::size_t nb_dims = 0; std::vector<std::size_t> dims(nb_dims); input_T->resize(dims); - REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE_NOTHROW(op->forwardDims()); } } SECTION("Full tensor") { @@ -61,7 +61,7 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(computeOutputDims)", dims[i] = dimsDist(gen); } input_T->resize(dims); - REQUIRE_THROWS(op->computeOutputDims()); + REQUIRE_THROWS(op->forwardDims()); } } SECTION("nbDim > 3") { @@ -74,7 +74,7 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(computeOutputDims)", std::vector<DimSize_t> dims_out{dims[0], dims[1]}; input_T->resize(dims); op->setInput(0, input_T); - REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE_NOTHROW(op->forwardDims()); REQUIRE(op->getOutput(0)->dims() == dims_out); REQUIRE((op->getOutput(0)->dims().size()) == static_cast<size_t>(2)); } diff --git a/unit_tests/operator/Test_MatMul_Op.cpp b/unit_tests/operator/Test_MatMul_Op.cpp index 6c810e675ad46cc5580bd24e57f7e7dbb84db38f..bdd1de87c27351e943c59fa616c40dc4a0001abc 100644 --- a/unit_tests/operator/Test_MatMul_Op.cpp +++ b/unit_tests/operator/Test_MatMul_Op.cpp @@ -20,7 +20,7 @@ #include "aidge/operator/OperatorTensor.hpp" namespace Aidge { -TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutputDims]") { +TEST_CASE("[core/operator] MatMul_Op(forwardDims)", "[MatMul][forwardDims]") { // Create a random number generator std::random_device rd; std::mt19937 gen(rd()); @@ -43,13 +43,13 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu // T1->resize({}); // op -> associateInput(1,T1); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims()).empty()); // // input_1 - wrong // T1->resize({dist(gen)}); - // REQUIRE_THROWS(op->computeOutputDims()); + // REQUIRE_THROWS(op->forwardDims()); // } SECTION("1-D / N-D") { @@ -66,26 +66,26 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu // input_1 - right T1->resize({dim0}); - REQUIRE_NOTHROW(op -> computeOutputDims()); + REQUIRE_NOTHROW(op -> forwardDims()); REQUIRE((op->getOutput(0)->dims()).empty()); // input_1 - wrong T1->resize({dim0+1}); - REQUIRE_THROWS(op -> computeOutputDims()); + REQUIRE_THROWS(op -> forwardDims()); } SECTION("1-D / 2-D") { // input_1 - right const std::size_t dim1 = dist(gen); T1->resize({dim0,dim1}); - REQUIRE_NOTHROW(op -> computeOutputDims()); + REQUIRE_NOTHROW(op -> forwardDims()); REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim1})); // input_1 - wrong T1->resize({dim0+1,dim1}); - REQUIRE_THROWS(op -> computeOutputDims()); + REQUIRE_THROWS(op -> forwardDims()); } SECTION("1-D / +2-D") { // input_1 - right @@ -94,7 +94,7 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu const std::size_t dim3 = dist(gen); T1->resize({dim1,dim2,dim0,dim3}); - REQUIRE_NOTHROW(op -> computeOutputDims()); + REQUIRE_NOTHROW(op -> forwardDims()); REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim1,dim2,dim3})); } } @@ -114,26 +114,26 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu // input_1 - right T1->resize({dim1}); - REQUIRE_NOTHROW(op -> computeOutputDims()); + REQUIRE_NOTHROW(op -> forwardDims()); REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0})); // input_1 - wrong T1->resize({dim1+1}); - REQUIRE_THROWS(op -> computeOutputDims()); + REQUIRE_THROWS(op -> forwardDims()); } SECTION("2-D / 2-D") { // input_1 - right const std::size_t dim2 = dist(gen); T1->resize({dim1, dim2}); - REQUIRE_NOTHROW(op -> computeOutputDims()); + REQUIRE_NOTHROW(op -> forwardDims()); REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim2})); // input_1 - wrong T1->resize({dim1+1,dim2}); - REQUIRE_THROWS(op -> computeOutputDims()); + REQUIRE_THROWS(op -> forwardDims()); } SECTION("2-D / +2-D") { // input_1 - right @@ -142,13 +142,13 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu const std::size_t dim4 = dist(gen); T1->resize({dim3,dim4,dim1, dim2}); - REQUIRE_NOTHROW(op -> computeOutputDims()); + REQUIRE_NOTHROW(op -> forwardDims()); REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim3,dim4,dim0,dim2})); // input_1 - wrong T1->resize({dim3,dim4,dim1+1,dim2}); - REQUIRE_THROWS(op -> computeOutputDims()); + REQUIRE_THROWS(op -> forwardDims()); } } SECTION("+2-D / +2-D") { @@ -169,28 +169,28 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu // 1 const std::size_t dim5 = dist(gen); T1->resize({dim0,dim1,dim3,dim5}); - REQUIRE_NOTHROW(op -> computeOutputDims()); + REQUIRE_NOTHROW(op -> forwardDims()); REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1,dim2,dim5})); // 2 - input_1 broadcast T1->resize({1,dim1,dim3,dim5}); - REQUIRE_NOTHROW(op -> computeOutputDims()); + REQUIRE_NOTHROW(op -> forwardDims()); REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1,dim2,dim5})); // 3 - input_0 broadcast const std::size_t dim1_bigger = dist(gen) + 1; T1->resize({dim0,dim1_bigger,dim3,dim5}); - REQUIRE_NOTHROW(op -> computeOutputDims()); + REQUIRE_NOTHROW(op -> forwardDims()); REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1_bigger,dim2,dim5})); // 4 - input_0+input_1 broadcast T1->resize({1,dim1_bigger,dim3,dim5}); - REQUIRE_NOTHROW(op -> computeOutputDims()); + REQUIRE_NOTHROW(op -> forwardDims()); REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1_bigger,dim2,dim5})); // input_1 - wrong T1->resize({dim0+1,dim1,dim3,dim5}); - REQUIRE_THROWS(op -> computeOutputDims()); + REQUIRE_THROWS(op -> forwardDims()); } } } // namespace Aidge \ No newline at end of file diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp index cd42791e0db1d95469bdd414cab94f1c6e8fea17..b15074d1b5eb4a2fba1a19985a9c8c4e681af3e4 100644 --- a/unit_tests/operator/Test_MetaOperator.cpp +++ b/unit_tests/operator/Test_MetaOperator.cpp @@ -41,9 +41,9 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") { myInput->resize({2,3,5,5}); std::shared_ptr<OperatorTensor> opTensor = std::static_pointer_cast<OperatorTensor>(op->getOperator()); opTensor->associateInput(0,myInput); - opTensor->computeOutputDims(); + opTensor->forwardDims(); - REQUIRE(opTensor->outputDimsForwarded()); + REQUIRE(opTensor->dimsForwarded()); REQUIRE(std::static_pointer_cast<Tensor>(opTensor->getRawOutput(0))->dims() == std::vector<size_t>({2,3,5,5})); REQUIRE(std::static_pointer_cast<Tensor>(opTensor->getRawInput(0)) == myInput); REQUIRE(microGraph->getOrderedInputs()[0].first->getOperator()->getRawInput(0) == myInput); @@ -74,9 +74,9 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") { op->associateInput(17, myInit); op->associateInput(18, myInit); - op->computeOutputDims(); + op->forwardDims(); microGraph->save("lstm_dims", true, true); - REQUIRE(op->outputDimsForwarded()); + REQUIRE(op->dimsForwarded()); //op->updateConsummerProducer(); // require implementation //auto microGraphScheduler = std::dynamic_pointer_cast<MetaOperator_Op>(op)->getMicroGraphScheduler(); diff --git a/unit_tests/operator/Test_MetaOperator.py b/unit_tests/operator/Test_MetaOperator.py new file mode 100644 index 0000000000000000000000000000000000000000..a525c94b95f5646831d6288feb634b563d37b30e --- /dev/null +++ b/unit_tests/operator/Test_MetaOperator.py @@ -0,0 +1,42 @@ +import onnx +from onnx.backend.test.case.node.lstm import LSTMHelper +from onnx.backend.test.case.node import expect +import numpy as np + +input = np.array([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [[2.0, 3.0], [4.0, 5.0], [6.0, 7.0]]]).astype(np.float32) +print(input.shape) +input_size = 2 +hidden_size = 3 +weight_scale = 0.1 +number_of_gates = 4 + +node = onnx.helper.make_node( + "LSTM", inputs=["X", "W", "R"], outputs=["", "Y_h"], hidden_size=hidden_size +) + +W = weight_scale * np.ones( + (1, number_of_gates * hidden_size, input_size) +).astype(np.float32) +R = weight_scale * np.ones( + (1, number_of_gates * hidden_size, hidden_size) +).astype(np.float32) + +lstm = LSTMHelper(X=input, W=W, R=R) +_, Y_h = lstm.step() + +print(lstm.C_0 ) + +seq_length = input.shape[0] +batch_size = input.shape[1] + +print(seq_length) +print(np.split(input, input.shape[0], axis=0)) + +expect( + node, + inputs=[input, W, R], + outputs=[Y_h.astype(np.float32)], + name="test_lstm_defaults", +) + +print(Y_h) \ No newline at end of file diff --git a/unit_tests/operator/Test_Mul_Op.cpp b/unit_tests/operator/Test_Mul_Op.cpp index d3e0c5e086fac9d31db817d628214e95d4e41a32..f3f8fb9522943d0a9574cb80cfc228135a973890 100644 --- a/unit_tests/operator/Test_Mul_Op.cpp +++ b/unit_tests/operator/Test_Mul_Op.cpp @@ -20,7 +20,7 @@ #include "aidge/operator/OperatorTensor.hpp" namespace Aidge { -TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]") { +TEST_CASE("[core/operator] Mul_Op(forwardDims)", "[Mul][forwardDims]") { constexpr std::uint16_t NBTRIALS = 10; // Create a random number generator @@ -42,7 +42,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims] /** * @todo Special case: scalar not handled yet by - * ``OperatorTensor::computeOutputDims()`` + * ``OperatorTensor::forwardDims()`` */ // SECTION("Scalar / Scalar") { // // input_0 @@ -51,7 +51,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims] // // input_1 // T1->resize({}); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>())); // } // SECTION("Scalar / +1-D") { @@ -69,7 +69,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims] // } // T1->resize(dims); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims()) == dims); // } // } @@ -88,7 +88,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims] // } // T0->resize(dims); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims()) == dims); // } // } @@ -103,7 +103,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims] T0->resize(dims0); T1->resize(dims0); - REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE_NOTHROW(op->forwardDims()); REQUIRE((op->getOutput(0)->dims()) == dims0); } @@ -126,7 +126,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims] T0->resize(dims0); T1->resize(dims1); - REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE_NOTHROW(op->forwardDims()); REQUIRE((op->getOutput(0)->dims()) == dimsOut); // input_0 - wrong @@ -137,7 +137,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims] } T1->resize(dims1_wrong); REQUIRE(dims0 != dims1_wrong); - REQUIRE_THROWS(op->computeOutputDims()); + REQUIRE_THROWS(op->forwardDims()); } } } diff --git a/unit_tests/operator/Test_Pow_Op.cpp b/unit_tests/operator/Test_Pow_Op.cpp index c77615c11e99c174707df21560044fdd3b6a3c42..4a8d242a355cda58c7b36914efdb1304220f713a 100644 --- a/unit_tests/operator/Test_Pow_Op.cpp +++ b/unit_tests/operator/Test_Pow_Op.cpp @@ -20,7 +20,7 @@ #include "aidge/operator/OperatorTensor.hpp" namespace Aidge { -TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]") { +TEST_CASE("[core/operator] Pow_Op(forwardDims)", "[Pow][forwardDims]") { constexpr std::uint16_t NBTRIALS = 10; // Create a random number generator @@ -42,7 +42,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims] /** * @todo Special case: scalar not handled yet by - * ``OperatorTensor::computeOutputDims()`` + * ``OperatorTensor::forwardDims()`` */ // SECTION("Scalar / Scalar") { // // input_0 @@ -51,7 +51,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims] // // input_1 // T1->resize({}); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>())); // } // SECTION("Scalar / +1-D") { @@ -69,7 +69,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims] // } // T1->resize(dims); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims()) == dims); // } // } @@ -88,7 +88,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims] // } // T0->resize(dims); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims()) == dims); // } // } @@ -103,7 +103,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims] T0->resize(dims0); T1->resize(dims0); - REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE_NOTHROW(op->forwardDims()); REQUIRE((op->getOutput(0)->dims()) == dims0); } @@ -126,7 +126,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims] T0->resize(dims0); T1->resize(dims1); - REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE_NOTHROW(op->forwardDims()); REQUIRE((op->getOutput(0)->dims()) == dimsOut); // input_0 - wrong @@ -137,7 +137,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims] } T1->resize(dims1_wrong); REQUIRE(dims0 != dims1_wrong); - REQUIRE_THROWS(op->computeOutputDims()); + REQUIRE_THROWS(op->forwardDims()); } } } diff --git a/unit_tests/operator/Test_Sub_Op.cpp b/unit_tests/operator/Test_Sub_Op.cpp index b7b744410d31ea32dea5a15cc7a29da093488d14..329f3da798854ddff3d1c1393d60c57ef180c70a 100644 --- a/unit_tests/operator/Test_Sub_Op.cpp +++ b/unit_tests/operator/Test_Sub_Op.cpp @@ -20,7 +20,7 @@ #include "aidge/operator/OperatorTensor.hpp" namespace Aidge { -TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]") { +TEST_CASE("[core/operator] Sub_Op(forwardDims)", "[Sub][forwardDims]") { constexpr std::uint16_t NBTRIALS = 10; // Create a random number generator @@ -42,7 +42,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims] /** * @todo Special case: scalar not handled yet by - * ``OperatorTensor::computeOutputDims()`` + * ``OperatorTensor::forwardDims()`` */ // SECTION("Scalar / Scalar") { // // input_0 @@ -51,7 +51,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims] // // input_1 // T1->resize({}); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>())); // } // SECTION("Scalar / +1-D") { @@ -69,7 +69,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims] // } // T1->resize(dims); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims()) == dims); // } // } @@ -88,7 +88,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims] // } // T0->resize(dims); - // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE_NOTHROW(op->forwardDims()); // REQUIRE((op->getOutput(0)->dims()) == dims); // } // } @@ -103,7 +103,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims] T0->resize(dims0); T1->resize(dims0); - REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE_NOTHROW(op->forwardDims()); REQUIRE((op->getOutput(0)->dims()) == dims0); } @@ -126,7 +126,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims] T0->resize(dims0); T1->resize(dims1); - REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE_NOTHROW(op->forwardDims()); REQUIRE((op->getOutput(0)->dims()) == dimsOut); // input_0 - wrong @@ -137,7 +137,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims] } T1->resize(dims1_wrong); REQUIRE(dims0 != dims1_wrong); - REQUIRE_THROWS(op->computeOutputDims()); + REQUIRE_THROWS(op->forwardDims()); } } } diff --git a/unit_tests/scheduler/Test_Scheduler.cpp b/unit_tests/scheduler/Test_Scheduler.cpp index e2c1a8fcb96256fa8c3f26a3495913bd987de2d4..ceaa5e301c820ef54970a0e76004ad3467ae66da 100644 --- a/unit_tests/scheduler/Test_Scheduler.cpp +++ b/unit_tests/scheduler/Test_Scheduler.cpp @@ -54,7 +54,7 @@ TEST_CASE("randomScheduling", "[Scheduler][randomGen]") { if (unicity1) { for (auto &node : g1->getNodes()) { std::static_pointer_cast<GenericOperator_Op>(node->getOperator()) - ->setComputeOutputDims( + ->setForwardDims( GenericOperator_Op::InputIdentity(0, node->nbOutputs())); } @@ -97,7 +97,7 @@ TEST_CASE("randomScheduling", "[Scheduler][randomGen]") { // if (unicity1) { // for (auto &node : g1->getNodes()) { // std::static_pointer_cast<GenericOperator_Op>(node->getOperator()) - // ->setComputeOutputDims( + // ->setForwardDims( // GenericOperator_Op::InputIdentity(0, node->nbOutputs())); // }