From 30a052a3a4b4ad9b0039e912fa9c0880d9880d26 Mon Sep 17 00:00:00 2001 From: Olivier BICHLER <olivier.bichler@cea.fr> Date: Mon, 16 Sep 2024 11:40:12 +0200 Subject: [PATCH] Improved error messages --- python_binding/scheduler/pybind_Scheduler.cpp | 1 + src/operator/Conv.cpp | 7 ++++--- src/operator/ConvDepthWise.cpp | 7 ++++--- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp index ac35ce0a6..472af2a94 100644 --- a/python_binding/scheduler/pybind_Scheduler.cpp +++ b/python_binding/scheduler/pybind_Scheduler.cpp @@ -25,6 +25,7 @@ void init_Scheduler(py::module& m){ .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view")) .def("graph_view", &Scheduler::graphView) .def("save_scheduling_diagram", &Scheduler::saveSchedulingDiagram, py::arg("file_name")) + .def("save_static_scheduling_diagram", &Scheduler::saveStaticSchedulingDiagram, py::arg("file_name")) .def("resetScheduling", &Scheduler::resetScheduling) .def("generate_scheduling", &Scheduler::generateScheduling) .def("get_static_scheduling", &Scheduler::getStaticScheduling, py::arg("step") = 0) diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp index 92f4ec593..583599675 100644 --- a/src/operator/Conv.cpp +++ b/src/operator/Conv.cpp @@ -43,16 +43,17 @@ bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) { if (inputsAssociated()) { // first check weight since it defines inChannels and outChannels AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)), - "Wrong weight Tensor dimension: {} for Conv{}D operator.", getInput(1)->nbDims(), DIM); + "Wrong weight Tensor dimension: {} for Conv{}D operator. Expected number of dimensions is {}.", getInput(1)->nbDims(), DIM, DIM+2); // check data AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) && (getInput(0)->template dims<DIM+2>()[1] == inChannels()), - "Wrong input size for Conv operator."); + "Wrong input size ({}) for Conv operator. Expected dims are [x, {}, {}].", getInput(0)->dims(), inChannels(), fmt::join(std::vector<std::string>(DIM, "x"), ", ")); // check optional bias if(getInput(2)) AIDGE_ASSERT((getInput(2)->nbDims() == (1)) && (getInput(2)->template dims<1>()[0] == outChannels()), - "Wrong bias size for Conv operator."); + "Wrong bias size ({}) for Conv operator. Expected dims are [{}].", getInput(2)->dims(), outChannels()); + std::array<DimSize_t, DIM + 2> outputDims{}; const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>()); diff --git a/src/operator/ConvDepthWise.cpp b/src/operator/ConvDepthWise.cpp index 9e95e78ea..c9fef2188 100644 --- a/src/operator/ConvDepthWise.cpp +++ b/src/operator/ConvDepthWise.cpp @@ -44,16 +44,17 @@ bool Aidge::ConvDepthWise_Op<DIM>::forwardDims(bool /*allowDataDependency*/) { if (inputsAssociated()) { // first check weight since it defines nbChannels AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)), - "Wrong weight Tensor dimension: {} for Conv{}D operator.", getInput(1)->nbDims(), DIM); + "Wrong weight Tensor dimension: {} for ConvDepthWise{}D operator. Expected number of dimensions is {}.", getInput(1)->nbDims(), DIM, DIM+2); // check data AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) && (getInput(0)->template dims<DIM+2>()[1] == nbChannels()), - "Wrong input size for Conv operator."); + "Wrong input size ({}) for ConvDepthWise operator. Expected dims are [x, {}, {}].", getInput(0)->dims(), nbChannels(), fmt::join(std::vector<std::string>(DIM, "x"), ", ")); // check optional bias if(getInput(2)) AIDGE_ASSERT((getInput(2)->nbDims() == (1)) && (getInput(2)->template dims<1>()[0] == nbChannels()), - "Wrong bias size for Conv operator."); + "Wrong bias size ({}) for ConvDepthWise operator. Expected dims are [{}].", getInput(2)->dims(), nbChannels()); + std::array<DimSize_t, DIM + 2> outputDims = {}; const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>()); -- GitLab