Skip to content
Snippets Groups Projects
Commit 47aa7d13 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Improved error messages

parent cc0419eb
No related branches found
No related tags found
No related merge requests found
Showing
with 33 additions and 20 deletions
...@@ -75,7 +75,7 @@ public: ...@@ -75,7 +75,7 @@ public:
void computeOutputDims() override final { void computeOutputDims() override final {
// check inputs have been associated // check inputs have been associated
if (!getInput(0)) { if (!getInput(0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor"); AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
} }
if (!(getInput(0)->empty())) { if (!(getInput(0)->empty())) {
std::array<DimSize_t, DIM + 2> outputDims; std::array<DimSize_t, DIM + 2> outputDims;
......
...@@ -84,7 +84,7 @@ public: ...@@ -84,7 +84,7 @@ public:
const auto firstInputNbDims = getInput(0) -> nbDims(); const auto firstInputNbDims = getInput(0) -> nbDims();
for (IOIndex_t i = 1; i < nbInputs(); ++i) { for (IOIndex_t i = 1; i < nbInputs(); ++i) {
if (!getInput(i)) { if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor"); AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
} }
if (getInput(i)->nbDims() == firstInputNbDims) { if (getInput(i)->nbDims() == firstInputNbDims) {
......
...@@ -94,7 +94,7 @@ public: ...@@ -94,7 +94,7 @@ public:
bool associated = true; bool associated = true;
for (IOIndex_t i = 0; i < 3; ++i) { for (IOIndex_t i = 0; i < 3; ++i) {
if (!getInput(i)) { if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor"); AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
} }
associated &= !(getInput(i)->empty()); associated &= !(getInput(i)->empty());
} }
......
...@@ -85,7 +85,7 @@ public: ...@@ -85,7 +85,7 @@ public:
bool associated = true; bool associated = true;
for (IOIndex_t i = 0; i < 3; ++i) { for (IOIndex_t i = 0; i < 3; ++i) {
if (!getInput(i)) { if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor"); AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
} }
associated &= !(getInput(i)->empty()); associated &= !(getInput(i)->empty());
} }
......
...@@ -84,7 +84,7 @@ public: ...@@ -84,7 +84,7 @@ public:
bool associated = true; bool associated = true;
for (IOIndex_t i = 0; i < nbInputs(); ++i) { for (IOIndex_t i = 0; i < nbInputs(); ++i) {
if (!getInput(i)) { if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor"); AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
} }
associated &= !(getInput(i)->empty()); associated &= !(getInput(i)->empty());
} }
......
...@@ -72,7 +72,7 @@ public: ...@@ -72,7 +72,7 @@ public:
bool associated = true; bool associated = true;
for (IOIndex_t i = 0; i < nbInputs(); ++i) { for (IOIndex_t i = 0; i < nbInputs(); ++i) {
if (!getInput(i)) { if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor"); AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
} }
associated &= !(getInput(i)->empty()); associated &= !(getInput(i)->empty());
} }
......
...@@ -78,7 +78,7 @@ public: ...@@ -78,7 +78,7 @@ public:
void computeOutputDims() override final { void computeOutputDims() override final {
if (!getInput(0)) { if (!getInput(0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor"); AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
} }
if (!(getInput(0)->empty())) { if (!(getInput(0)->empty())) {
std::array<DimSize_t, DIM + 2> outputDims{}; std::array<DimSize_t, DIM + 2> outputDims{};
......
...@@ -56,6 +56,7 @@ public: ...@@ -56,6 +56,7 @@ public:
void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final { void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final {
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type"); assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
AIDGE_ASSERT(inputIdx < mGraph->getOrderedInputs().size(), "associateInput(): inputIdx ({}) out of bound for MetaOperator", inputIdx);
const auto& inputOp = mGraph->getOrderedInputs()[inputIdx]; const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
inputOp.first->getOperator()->associateInput(inputOp.second, data); inputOp.first->getOperator()->associateInput(inputOp.second, data);
......
...@@ -78,7 +78,7 @@ public: ...@@ -78,7 +78,7 @@ public:
bool associated = true; bool associated = true;
for (IOIndex_t i = 0; i < nbInputs(); ++i) { for (IOIndex_t i = 0; i < nbInputs(); ++i) {
if (!getInput(i)) { if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor"); AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
} }
associated &= !(getInput(i)->empty()); associated &= !(getInput(i)->empty());
} }
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <memory> #include <memory>
#include <fmt/format.h> #include <fmt/format.h>
#include <fmt/ranges.h>
#ifdef NO_EXCEPTION #ifdef NO_EXCEPTION
#define AIDGE_THROW_OR_ABORT(ex, ...) \ #define AIDGE_THROW_OR_ABORT(ex, ...) \
......
...@@ -78,7 +78,13 @@ void Aidge::GraphView::save(std::string path, bool verbose, bool showProducers) ...@@ -78,7 +78,13 @@ void Aidge::GraphView::save(std::string path, bool verbose, bool showProducers)
givenName.c_str()); givenName.c_str());
} }
else { else {
if ((node_ptr->type() != "Producer") || showProducers) { if (node_ptr->type() == "Producer") {
if (showProducers) {
std::fprintf(fp, "%s_%s(%s):::producerCls\n", node_ptr->type().c_str(), namePtrTable[node_ptr].c_str(),
givenName.c_str());
}
}
else {
std::fprintf(fp, "%s_%s(%s)\n", node_ptr->type().c_str(), namePtrTable[node_ptr].c_str(), std::fprintf(fp, "%s_%s(%s)\n", node_ptr->type().c_str(), namePtrTable[node_ptr].c_str(),
givenName.c_str()); givenName.c_str());
} }
...@@ -148,6 +154,7 @@ void Aidge::GraphView::save(std::string path, bool verbose, bool showProducers) ...@@ -148,6 +154,7 @@ void Aidge::GraphView::save(std::string path, bool verbose, bool showProducers)
std::fprintf(fp, "classDef outputCls fill:#ffa\n"); std::fprintf(fp, "classDef outputCls fill:#ffa\n");
std::fprintf(fp, "classDef externalCls fill:#ccc\n"); std::fprintf(fp, "classDef externalCls fill:#ccc\n");
std::fprintf(fp, "classDef rootCls stroke:#f00\n"); std::fprintf(fp, "classDef rootCls stroke:#f00\n");
std::fprintf(fp, "classDef producerCls fill:#cbf\n");
std::fprintf(fp, "\n"); std::fprintf(fp, "\n");
std::fclose(fp); std::fclose(fp);
} }
......
...@@ -15,8 +15,10 @@ ...@@ -15,8 +15,10 @@
const std::string Aidge::Memorize_Op::Type = "Memorize"; const std::string Aidge::Memorize_Op::Type = "Memorize";
void Aidge::Memorize_Op::computeOutputDims() { void Aidge::Memorize_Op::computeOutputDims() {
if (!getInput(0) || !getInput(1)) { for (size_t i = 0; i < 2; ++i) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor"); if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
}
} }
// Only require one of the input to have dims defined // Only require one of the input to have dims defined
......
...@@ -35,6 +35,6 @@ void Aidge::Mul_Op::computeOutputDims() { ...@@ -35,6 +35,6 @@ void Aidge::Mul_Op::computeOutputDims() {
mOutputs[0]->resize(getInput(0)->dims()); mOutputs[0]->resize(getInput(0)->dims());
} }
else if (!getInput(0)->empty() && !getInput(1)->empty()) { else if (!getInput(0)->empty() && !getInput(1)->empty()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible input dimensions for Operator Mul"); AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible input dimensions for Operator Mul: {} and {}", getInput(0)->dims(), getInput(1)->dims());
} }
} }
\ No newline at end of file
...@@ -32,21 +32,21 @@ Aidge::Operator::~Operator() noexcept = default; ...@@ -32,21 +32,21 @@ Aidge::Operator::~Operator() noexcept = default;
/////////////////////////////////////////////////////// ///////////////////////////////////////////////////////
Aidge::NbElts_t Aidge::Operator::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const { Aidge::NbElts_t Aidge::Operator::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
AIDGE_ASSERT(mImpl != nullptr, "getNbRequiredData(): an implementation is required!"); AIDGE_ASSERT(mImpl != nullptr, "getNbRequiredData(): an implementation is required for {}!", type());
return mImpl->getNbRequiredData(inputIdx); return mImpl->getNbRequiredData(inputIdx);
} }
Aidge::NbElts_t Aidge::Operator::getNbConsumedData(Aidge::IOIndex_t inputIdx) const { Aidge::NbElts_t Aidge::Operator::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
AIDGE_ASSERT(mImpl != nullptr, "getNbConsumedData(): an implementation is required!"); AIDGE_ASSERT(mImpl != nullptr, "getNbConsumedData(): an implementation is required for {}!", type());
return mImpl->getNbConsumedData(inputIdx); return mImpl->getNbConsumedData(inputIdx);
} }
Aidge::NbElts_t Aidge::Operator::getNbProducedData(Aidge::IOIndex_t outputIdx) const { Aidge::NbElts_t Aidge::Operator::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
AIDGE_ASSERT(mImpl != nullptr, "getNbProducedData(): an implementation is required!"); AIDGE_ASSERT(mImpl != nullptr, "getNbProducedData(): an implementation is required for {}!", type());
return mImpl->getNbProducedData(outputIdx); return mImpl->getNbProducedData(outputIdx);
} }
void Aidge::Operator::updateConsummerProducer(){ void Aidge::Operator::updateConsummerProducer(){
AIDGE_ASSERT(mImpl != nullptr, "updateConsummerProducer(): an implementation is required!"); AIDGE_ASSERT(mImpl != nullptr, "updateConsummerProducer(): an implementation is required for {}!", type());
mImpl->updateConsummerProducer(); mImpl->updateConsummerProducer();
} }
...@@ -56,7 +56,7 @@ void Aidge::Operator::runHooks() const { ...@@ -56,7 +56,7 @@ void Aidge::Operator::runHooks() const {
} }
} }
void Aidge::Operator::forward() { void Aidge::Operator::forward() {
AIDGE_ASSERT(mImpl != nullptr, "forward(): an implementation is required!"); AIDGE_ASSERT(mImpl != nullptr, "forward(): an implementation is required for {}!", type());
mImpl->forward(); mImpl->forward();
runHooks(); runHooks();
} }
......
...@@ -117,7 +117,7 @@ void Aidge::OperatorTensor::computeOutputDims() { ...@@ -117,7 +117,7 @@ void Aidge::OperatorTensor::computeOutputDims() {
bool associated = (nbInputs() > 0); // do not compute anything if no input bool associated = (nbInputs() > 0); // do not compute anything if no input
for (IOIndex_t i = 0; i < nbInputs(); ++i) { for (IOIndex_t i = 0; i < nbInputs(); ++i) {
if (!getInput(i)) { if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor"); AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
} }
associated &= !(getInput(i)->empty()); associated &= !(getInput(i)->empty());
} }
...@@ -125,7 +125,9 @@ void Aidge::OperatorTensor::computeOutputDims() { ...@@ -125,7 +125,9 @@ void Aidge::OperatorTensor::computeOutputDims() {
const auto expectedDims = getInput(0)->dims(); const auto expectedDims = getInput(0)->dims();
for (std::size_t i = 1; i < nbInputs(); ++i) { for (std::size_t i = 1; i < nbInputs(); ++i) {
if (expectedDims != getInput(i)->dims()) { if (expectedDims != getInput(i)->dims()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator's inputs should have the same dimensions"); AIDGE_THROW_OR_ABORT(std::runtime_error,
"{} operator's inputs should have the same dimensions: expected {} (input #0), given {} (input #{})",
type(), expectedDims, getInput(i)->dims(), i);
} }
} }
mOutputs[0]->resize(expectedDims); mOutputs[0]->resize(expectedDims);
......
...@@ -27,7 +27,7 @@ const std::string Aidge::Slice_Op::Type = "Slice"; ...@@ -27,7 +27,7 @@ const std::string Aidge::Slice_Op::Type = "Slice";
void Aidge::Slice_Op::computeOutputDims() { void Aidge::Slice_Op::computeOutputDims() {
// check input have been associated // check input have been associated
if (!getInput(0) || (getInput(0)->empty())) { if (!getInput(0) || (getInput(0)->empty())) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor"); AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
} }
DimSize_t nbAxes = this->template getAttr<SliceAttr::Axes>().size(); DimSize_t nbAxes = this->template getAttr<SliceAttr::Axes>().size();
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment