Skip to content
Snippets Groups Projects
Commit 7db6e417 authored by Jerome Hue's avatar Jerome Hue
Browse files

Clean-up debug prints and remove commented code

parent b4760e97
No related branches found
No related tags found
No related merge requests found
Pipeline #69091 failed
......@@ -126,7 +126,6 @@ void FCImpl_cpu_backward_kernel(const DimSize_t batchSize,
void* weightGrad_,
void* biasesGrad_)
{
Log::info("\t\t Call to FCImpl_cpu_backward_kernel");
// FIXME: missing FC attributes as arguments
const I* input = static_cast<const I*>(input_);
const I* originalInput = static_cast<const I*>(originalInput_);
......@@ -171,8 +170,6 @@ void FCImpl_cpu_backward_kernel(const DimSize_t batchSize,
output[b*inputFeatureSize + c] = sum;
}
}
Log::info("\t\t Call to FCImpl_cpu_backward_kernel - Done");
}
// Kernels registration to implementation entry point
......
......@@ -194,7 +194,6 @@ void MulImpl_cpu_backward_kernel(const std::size_t input0Length,
auto idx0 = getFlattenedIndex(broadcastedDims0, idxInput0);
auto idx1 = getFlattenedIndex(broadcastedDims1, idxInput1);
Log::info("Multiplication by {}", input1[idx1]);
grad_input_0[idx0] += static_cast<I1>(grad_output[i] * input1[idx1]);
grad_input_1[idx1] += static_cast<I2>(grad_output[i] * input0[idx0]);
}
......
......@@ -77,10 +77,8 @@ void Aidge::AddImpl_cpu::backward() {
getCPUPtr(in0grad),
getCPUPtr(in1grad));
Log::info("AddImpl_cpu::backward() : Gradient of output 0");
out0grad->print();
Log::info("AddImpl_cpu::backward() : Gradient of input 0");
in0grad->print();
Log::info("AddImpl_cpu::backward() : Gradient of input 1");
in1grad->print();
Log::info(" ");
Log::info("(AddImpl.cpp) Gradient of output 0 : {}", out0grad->toString());
Log::info("(AddImpl.cpp) Gradient of input 0 : {}", in0grad->toString());
Log::info("(AddImpl.cpp) Gradient of input 1 : {}", in1grad->toString());
}
......@@ -55,7 +55,6 @@ void Aidge::FCImpl_cpu::forward()
template <>
void Aidge::FCImpl_cpu::backward()
{
Log::info("\t Call to Aidge::FCIMpl_cpu::backward()");
const FC_Op& op_ = dynamic_cast<const FC_Op&>(mOp);
const auto& fc_grad = op_.getOutput(0)->grad();
AIDGE_ASSERT(fc_grad, "missing ouput #0 gradient");
......@@ -84,6 +83,4 @@ void Aidge::FCImpl_cpu::backward()
input0grad.getImpl()->rawPtr(),
input1grad.getImpl()->rawPtr(),
(op_.getInput(2)) ? input2grad.getImpl()->rawPtr() : nullptr);
Log::notice("\t Call to Aidge::FCIMpl_cpu::backward() - Done");
}
......@@ -41,7 +41,6 @@ void Aidge::MulImpl_cpu::forward() {
template <>
void Aidge::MulImpl_cpu::backward() {
Log::info("MulImpl_cpu::backward()");
const Mul_Op& op_ = dynamic_cast<const Mul_Op&>(mOp);
auto in0 = op_.getInput(0);
......@@ -52,10 +51,6 @@ void Aidge::MulImpl_cpu::backward() {
// Find the correct kernel type
const auto impl = Registrar<MulImpl_cpu>::create(getBestMatch(getRequiredSpec()));
Log::info("Debug printing in 1 : ");
in1->print();
Log::info("Debug printing in 0 : ");
in0->print();
// Call kernel
impl.backward(/* input0Length */ in0grad->size(),
......@@ -70,10 +65,8 @@ void Aidge::MulImpl_cpu::backward() {
getCPUPtr(in0grad),
getCPUPtr(in1grad));
Log::info("MulImpl_cpu::backward() : Gradient of output 0");
out0grad->print();
Log::info("MulImpl_cpu::backward() : Gradient of input 0");
in0grad->print();
Log::info("MulImpl_cpu::backward() : Gradient of input 1");
in1grad->print();
Log::info(" ");
Log::info("(MulImpl_cpu::backward()) After Mul Backward pass.");
Log::info("(MulImpl_cpu::backward()) Gradient of input 0 : {}", in0grad->toString());
Log::info("(MulImpl_cpu::backward()) Gradient of input 1 : {}", in1grad->toString());
}
......@@ -43,7 +43,6 @@ template <>
void Aidge::SubImpl_cpu::backward() {
const Sub_Op& op_ = dynamic_cast<const Sub_Op&>(mOp);
//Log::info("SubImpl_cpu::backward() : Node {}", op_.name());
auto in0 = op_.getInput(0);
auto in1 = op_.getInput(1);
......@@ -56,11 +55,10 @@ void Aidge::SubImpl_cpu::backward() {
const auto impl = Registrar<SubImpl_cpu>::create(getBestMatch(getRequiredSpec()));
in0grad->print();
in1grad->print();
out0grad->print();
Log::info("Sub Impl, pointer of out0 : {}" , static_cast<void*>(out0.get()));
out0->print();
Log::info(" ");
Log::info("(SubImpl.cpp) Gradient of input 0 : {}", in0grad->toString());
Log::info("(SubImpl.cpp) Gradient of input 1 : {}", in1grad->toString());
Log::info("(SubImpl.cpp) Gradient of output 0 : {}", out0grad->toString());
// Call kernel
......@@ -75,10 +73,7 @@ void Aidge::SubImpl_cpu::backward() {
/* gradInput1 */ getCPUPtr(in1grad));
Log::info("SubImpl_cpu::backward() : Gradient of output 0");
out0grad->print();
Log::info("SubImpl_cpu::backward() : Gradient of input 0");
in0grad->print();
Log::info("SubImpl_cpu::backward() : Gradient of input 1");
in1grad->print();
Log::info("(SubImpl.cpp) After backward.");
Log::info("(SubImpl.cpp) Gradient of input 0 : {}", in0grad->toString());
Log::info("(SubImpl.cpp) Gradient of input 1 : {}", in1grad->toString());
}
......@@ -929,6 +929,9 @@ TEST_CASE("[cpu/operator] MetaOperator", "[Leaky][CPU][Simple]") {
REQUIRE_NOTHROW(scheduler.generateScheduling());
REQUIRE_NOTHROW(scheduler.forward(true));
// Modify graph here -- COuld also be checked by type
auto mGraph = graph->getNode("leaky");
// Print output
//Log::notice("FC1 op output 0");
//fc1Op->getOutput(0)->print();
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment