diff --git a/src/recipes/ExplicitCastMove.cpp b/src/recipes/ExplicitCastMove.cpp index e6427b8472f2ae38d97adf5dc0908c0739bdccf5..7d836c3acc835c5ed3fe014db6787029dc318afd 100644 --- a/src/recipes/ExplicitCastMove.cpp +++ b/src/recipes/ExplicitCastMove.cpp @@ -20,6 +20,7 @@ void Aidge::explicitCastMove(std::shared_ptr<GraphView> graph) { for (auto node : nodes) { // TODO: currently, Operator data type is only reflected in its output tensor data type. // But an Operator might have multiple outputs of different data type(?) + AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type."); const auto& output = std::static_pointer_cast<OperatorTensor>(node->getOperator())->getOutput(0); if (output->getImpl() == nullptr) { continue; @@ -32,6 +33,7 @@ void Aidge::explicitCastMove(std::shared_ptr<GraphView> graph) { const auto parent = node->inputs()[0]; // Check parent is not nullptr, as this Operator may be an entry point of the graph without parent if (parent.first != nullptr) { + AIDGE_ASSERT(parent.first->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type."); const auto& input = std::static_pointer_cast<OperatorTensor>(parent.first->getOperator())->getOutput(parent.second); if ((node->type() == Cast_Op::Type && input->dataType() == output->dataType()) diff --git a/src/recipes/FuseBatchNorm.cpp b/src/recipes/FuseBatchNorm.cpp index 40a1b5952220c57140d15adc6ee42a83a43eb6ef..ac1fc8d7922827217d31385395666db53c401306 100644 --- a/src/recipes/FuseBatchNorm.cpp +++ b/src/recipes/FuseBatchNorm.cpp @@ -53,6 +53,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode, DimSize_t convNbOutChannels; DimSize_t channelsSize; std::array<DimSize_t, 2> kernelDims; + AIDGE_ASSERT(convNode->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type."); std::shared_ptr<OperatorTensor> convOp = std::static_pointer_cast<OperatorTensor>(convNode->getOperator()); if (convNode->type() == Conv_Op<2>::Type) { const std::shared_ptr<Conv_Op<2>> convOpPtr = diff --git a/src/recipes/FuseMulAdd.cpp b/src/recipes/FuseMulAdd.cpp index 65837d02301f17db95d414c26148b3420f2b191a..a09c27c2be574adbccde8a1392a90a4c50a727a1 100644 --- a/src/recipes/FuseMulAdd.cpp +++ b/src/recipes/FuseMulAdd.cpp @@ -44,7 +44,8 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr< // TODO: find another way to get OutChannels for FC operator. // This poor fix supposes that one of Add inputs is a const and has the same outChannels as the output DimSize_t outSize = 0; - const auto& op = std::dynamic_pointer_cast<OperatorTensor>(addNode->getOperator()); + AIDGE_ASSERT(addNode->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type."); + const auto& op = std::static_pointer_cast<OperatorTensor>(addNode->getOperator()); for (size_t i = 0; i < op->nbInputs(); i++) { const auto& inTensor = op->getInput(i); diff --git a/src/recipes/HorizontalTiling.cpp b/src/recipes/HorizontalTiling.cpp index 4d18e012e2d0d817e3317601d188d0cab64b05b5..8e27fea58014b4ec16729f3593dd656026e16826 100644 --- a/src/recipes/HorizontalTiling.cpp +++ b/src/recipes/HorizontalTiling.cpp @@ -36,7 +36,8 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std: if (node->getOperator()->type() != "Conv") { AIDGE_INTERNAL_ASSERT("Operator should be a Convolution."); } - const auto& op = std::dynamic_pointer_cast<OperatorTensor>(node->getOperator()); + AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type."); + const auto& op = std::static_pointer_cast<OperatorTensor>(node->getOperator()); if (op->nbOutputs() != 1 || op->nbData() > 1) { AIDGE_INTERNAL_ASSERT("Only slice Operators with one output and at most one input for now."); } diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp index d45bc5f8eb1ac4b76bef3dd5c8e596efd933033b..6c827f236167c8bce4fd5a39c392f00ac8fe6649 100644 --- a/src/scheduler/Scheduler.cpp +++ b/src/scheduler/Scheduler.cpp @@ -325,6 +325,7 @@ Aidge::MemoryManager Aidge::SequentialScheduler::generateMemory(bool incProducer } const auto childs = node->getChildren(); + AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type."); const auto op = std::static_pointer_cast<OperatorTensor>(node->getOperator()); std::vector<const MemoryManager::MemoryPlane*> wrapAroundMemPlane;