diff --git a/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/wrapper_runtime.cu b/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/wrapper_runtime.cu
index b9f52c0c8dddb8e7a4aa37abec5ea0d9dfa7164b..c2e116a56fbf038628396eeb611711295a4a9170 100644
--- a/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/wrapper_runtime.cu
+++ b/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/wrapper_runtime.cu
@@ -333,12 +333,10 @@ void *wrapper_ConvLayer2(
     // NOTE: out_min, out_max are only relevant for ClippedRelu
     float out_min, float out_max) {
 
-  INFO("*** ------Conv Layer \n");
+  //INFO("*** ------Conv Layer \n");
 
   NodeConfiguration *NodeConf = RC->getNodeConfiguration(hpvm_node_id);
- INFO("HERE\n");
   if (NodeConf->isGPUNodeConfiguration()) {
-    INFO("GPU Configuration for ConvLayer\n");
     // Mapped to GPU - get a GPU node configuration
     GPUNodeConfiguration *GPUConf = (GPUNodeConfiguration *)NodeConf;
 
@@ -461,8 +459,7 @@ void *wrapper_ConvLayer2(
     }
     return pool_out;
   } else if (NodeConf->isCPUNodeConfiguration()) {
-      INFO("CPU Configuration for ConvLayer\n");
-      // Mapped to CPU - get a CPU node configuration
+     // Mapped to CPU - get a CPU node configuration
       CPUNodeConfiguration *CPUConf = (CPUNodeConfiguration *)NodeConf;
 
       std::vector< std::pair< CPUNodeConfiguration::TENSOR_OP,
@@ -495,7 +492,6 @@ void *wrapper_ConvLayer2(
       switch (activation_id) {
       case -1:
        { // No activation
-         INFO("No activation Function\n");
          activation_out = add_out;
        }
        break;
@@ -603,8 +599,6 @@ wrapper_FCLayer(const char *hpvm_node_id, void *input, void *weights,
                 // NOTE: out_min and out_max are only relevant for ClippedRelu
                 float out_min, float out_max) {
 
-  INFO("*** Dense Layer \n");
-
   NodeConfiguration *NodeConf = RC->getNodeConfiguration(hpvm_node_id);
   if (NodeConf->isGPUNodeConfiguration()) {
     DEBUG("GPU Configuration for FCLayer\n");
@@ -696,7 +690,6 @@ wrapper_FCLayer(const char *hpvm_node_id, void *input, void *weights,
     	  { // No activation
     	    CUSTOM_ASSERT((ApproxChoices.size() == 2) &&
     			  "Incorrect number of operations in provided FC layer configuration");
-    	    INFO("No activation Function\n");
     	    activation_out = add_out;
     	  }
     	  break;