From 286015c65139779d959bf7dda7e52ab7c6e906aa Mon Sep 17 00:00:00 2001
From: Hashim Sharif <hsharif3@miranda.cs.illinois.edu>
Date: Fri, 26 Mar 2021 13:46:15 -0500
Subject: [PATCH] Removing erroneous prints from tensor-rt

---
 .../tensor_runtime/src/wrapper_runtime.cu             | 11 ++---------
 1 file changed, 2 insertions(+), 9 deletions(-)

diff --git a/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/wrapper_runtime.cu b/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/wrapper_runtime.cu
index b9f52c0c8d..c2e116a56f 100644
--- a/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/wrapper_runtime.cu
+++ b/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/wrapper_runtime.cu
@@ -333,12 +333,10 @@ void *wrapper_ConvLayer2(
     // NOTE: out_min, out_max are only relevant for ClippedRelu
     float out_min, float out_max) {
 
-  INFO("*** ------Conv Layer \n");
+  //INFO("*** ------Conv Layer \n");
 
   NodeConfiguration *NodeConf = RC->getNodeConfiguration(hpvm_node_id);
- INFO("HERE\n");
   if (NodeConf->isGPUNodeConfiguration()) {
-    INFO("GPU Configuration for ConvLayer\n");
     // Mapped to GPU - get a GPU node configuration
     GPUNodeConfiguration *GPUConf = (GPUNodeConfiguration *)NodeConf;
 
@@ -461,8 +459,7 @@ void *wrapper_ConvLayer2(
     }
     return pool_out;
   } else if (NodeConf->isCPUNodeConfiguration()) {
-      INFO("CPU Configuration for ConvLayer\n");
-      // Mapped to CPU - get a CPU node configuration
+     // Mapped to CPU - get a CPU node configuration
       CPUNodeConfiguration *CPUConf = (CPUNodeConfiguration *)NodeConf;
 
       std::vector< std::pair< CPUNodeConfiguration::TENSOR_OP,
@@ -495,7 +492,6 @@ void *wrapper_ConvLayer2(
       switch (activation_id) {
       case -1:
        { // No activation
-         INFO("No activation Function\n");
          activation_out = add_out;
        }
        break;
@@ -603,8 +599,6 @@ wrapper_FCLayer(const char *hpvm_node_id, void *input, void *weights,
                 // NOTE: out_min and out_max are only relevant for ClippedRelu
                 float out_min, float out_max) {
 
-  INFO("*** Dense Layer \n");
-
   NodeConfiguration *NodeConf = RC->getNodeConfiguration(hpvm_node_id);
   if (NodeConf->isGPUNodeConfiguration()) {
     DEBUG("GPU Configuration for FCLayer\n");
@@ -696,7 +690,6 @@ wrapper_FCLayer(const char *hpvm_node_id, void *input, void *weights,
     	  { // No activation
     	    CUSTOM_ASSERT((ApproxChoices.size() == 2) &&
     			  "Incorrect number of operations in provided FC layer configuration");
-    	    INFO("No activation Function\n");
     	    activation_out = add_out;
     	  }
     	  break;
-- 
GitLab