Skip to content
Snippets Groups Projects
Commit 064fef3b authored by Benjamin Halimi's avatar Benjamin Halimi
Browse files

remove commented code

parent 7307439f
No related branches found
No related tags found
2 merge requests!54Update 0.3.1 -> 0.4.0,!36Global Quantization Improvements
......@@ -52,19 +52,6 @@ void Aidge::LSQImpl_cuda::backward() {
std::shared_ptr<Tensor> gra_int1 = op_.getInput(1)->grad();
std::shared_ptr<Tensor> gra_out0 = op_.getOutput(0)->grad();
// XXX
/*
size_t tmp;
cudaDeviceSetLimit(cudaLimitStackSize, 2048);
cudaDeviceGetLimit(&tmp, cudaLimitStackSize );
printf(" stack limit = %ld \n", tmp);
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 100000000);
cudaDeviceGetLimit(&tmp, cudaLimitMallocHeapSize);
printf(" heap limit = %ld \n", tmp);
*/
if (gra_int0->size() > mWorkspaceSize) {
// std::cout << " reallocation " << sizeof(gra_int0) << " " << gra_int0->size() << std::endl;
if (mWorkspace != nullptr) {
......@@ -87,12 +74,7 @@ void Aidge::LSQImpl_cuda::backward() {
gra_int0->getImpl()->rawPtr(),
gra_int1->getImpl()->rawPtr(),
mWorkspace);
/*
gra_int1->setBackend("cpu");
float *castedTensor = static_cast<float *> (gra_int1->getImpl()->rawPtr());
std::cout << castedTensor[0] << std::endl;
gra_int1->setBackend("cuda");
*/
}
Aidge::LSQImpl_cuda::~LSQImpl_cuda() {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment