diff --git a/llvm/projects/keras/frontend/approxhpvm_translator.py b/llvm/projects/keras/frontend/approxhpvm_translator.py
index 2c964db34b60f7bb67ceda8d9cba464d7856ef82..e60d6adb994ee4da56765a1fa365b14a792863d2 100644
--- a/llvm/projects/keras/frontend/approxhpvm_translator.py
+++ b/llvm/projects/keras/frontend/approxhpvm_translator.py
@@ -21,6 +21,7 @@ class DFG:
 
 
   def hasSingleInput(self, layer):
+
     layer_name = layer.__class__.__name__
     
     singleInLayers = {}
@@ -57,12 +58,12 @@ class DFG:
     inbound_node_name = inbound_node_name.split("/")[0]
     if inbound_node_name in self.node_map:
       inbound_node = self.node_map[inbound_node_name]
-      print (inbound_node_name, " found!")
+      DEBUG (inbound_node_name, " found!")
       inbound_node.add_output(dfg_node)
       dfg_node.add_input(inbound_node)
       
     else:
-      print ("--inbound node NOT FOUND!")
+      DEBUG ("--inbound node NOT FOUND!")
 
       
 
@@ -75,12 +76,12 @@ class DFG:
 
     if self.hasMultipleInputs(layer):  
       for j in range(len(layer.input)):
-        print(type(layer.input[j]))
-        print(layer.input[j].op.name)        
+        DEBUG (type(layer.input[j]))
+        DEBUG (layer.input[j].op.name)        
         self.add_dfg_edge(layer.input[j].op.name, dfg_node)
 
     else:
-      print (layer.input.name)        
+      DEBUG (layer.input.name)        
       self.add_dfg_edge(layer.input.name, dfg_node)
 
     # Adding DFG node to name mapping
@@ -104,8 +105,8 @@ class DFG:
       return
       
     if self.predVisited(cur_node, visited_nodes):
-      print(cur_node.layer_type)
-      print(cur_node.layer_name)
+      DEBUG (cur_node.layer_type)
+      DEBUG (cur_node.layer_name)
       visited_nodes[cur_node.layer_name] = True
 
       # Invoking traversal on outbound nodes
@@ -119,7 +120,7 @@ class DFG:
         
   #Build and  Print the DFG in reverse postorder
   def buildDFG(self):
-    print ("\n\n ****** Traversing and Printing DFG ******* \n\n")
+    DEBUG ("\n\n ****** Traversing and Printing DFG ******* \n\n")
     visited_nodes = {}
     # Starting traversal at the DFG root node
     self.traverseNode(self.root_node, visited_nodes)
@@ -144,42 +145,42 @@ class DFGNode:
       layer_type = layer.__class__.__name__
       self.layer_type = layer_type # layer type e.g., conv2d, add, dense
       self.layer_name = layer.name  # unique layer identifier
-      print (self.layer_name)
+      DEBUG (self.layer_name)
 
       if layer_type == "Conv2D" or layer_type == "DepthwiseConv2D" or  layer_type == "Dense":
         self.weights = layer.get_weights()[0]
-        print("\t", self.weights.shape)
+        DEBUG ("\t", self.weights.shape)
         self.use_bias = layer.use_bias
         
         if layer.use_bias:
           self.use_bias = layer.use_bias
           self.bias_weights = layer.get_weights()[1]
-          print("\t", self.bias_weights.shape)
+          DEBUG ("\t", self.bias_weights.shape)
         
           
       if layer_type == "Conv2D" or layer_type == "DepthwiseConv2D":
         self.padding = layer.padding
         self.strides = layer.strides
-        print("\t", self.strides)
-        print("\tPadding = ", self.padding)
+        DEBUG ("\t", self.strides)
+        DEBUG ("\tPadding = ", self.padding)
 
         
       if layer_type == "MaxPooling2D" or layer_type == "AveragePooling2D":
         self.pool_size = layer.pool_size
         self.strides = layer.strides
-        print("\t pool_size = ", self.pool_size)
-        print("\t strides = ", self.strides)
+        DEBUG ("\t pool_size = ", self.pool_size)
+        DEBUG ("\t strides = ", self.strides)
 
         
       if layerHasActivationAttr(self):
         self.activation_type = layer.activation.__name__
-        print ("\t Activation = ", self.activation_type)
+        DEBUG ("\t Activation = ", self.activation_type)
   
 
       if layer_type == "ZeroPadding2D":
-        print ("***ZeroPaddding \n");
+        DEBUG ("***ZeroPaddding \n");
         self.padding = layer.padding
-        print ("padding = ", self.padding);
+        DEBUG ("padding = ", self.padding);
         
       if layer_type == "BatchNormalization":
         self.epsilon = layer.epsilon
@@ -264,12 +265,12 @@ class TensorRtTranslator:
   
   def getSingleInputName(self, cur_node):
 
-    print (cur_node.layer_name)
+    DEBUG (cur_node.layer_name)
     # Assumption: If no inputs, the previous layer must be input layer
     if len(cur_node.inputs) == 0:
       return "input"
 
-    print ("Input_type = ", cur_node.inputs[0].layer_type)
+    DEBUG ("Input_type = ", cur_node.inputs[0].layer_type)
 
     # NOTE: Assuming the 'inference' phase - hence skipping Dropout
     pred_layer_type = cur_node.inputs[0].layer_type
@@ -295,12 +296,12 @@ class TensorRtTranslator:
 
   def getPrevLayerPadding(self, cur_node):
 
-    print (cur_node.layer_name)
+    DEBUG (cur_node.layer_name)
     # Assumption: If no inputs, the previous layer must be input layer
     if len(cur_node.inputs) == 0:
       return None
 
-    print ("Input_type = ", cur_node.inputs[0].layer_type)
+    DEBUG ("Input_type = ", cur_node.inputs[0].layer_type)
     if cur_node.inputs[0].layer_type == "ZeroPadding2D": 
       pred_padding = cur_node.inputs[0].padding
       return pred_padding
@@ -483,9 +484,7 @@ class TensorRtTranslator:
       self.program_str += inst_str
 
       
-    
-      
-      
+            
           
      
   def codegenNode(self, dfg, cur_node, visited_nodes):
@@ -494,12 +493,11 @@ class TensorRtTranslator:
     if cur_node.layer_name in visited_nodes:
       return
 
-    print ("-visiting = ", cur_node.layer_name, "\n")
+    DEBUG ("-visiting = ", cur_node.layer_name, "\n")
     
     if dfg.predVisited(cur_node, visited_nodes):
       
       visited_nodes[cur_node.layer_name] = True
-
       self.genNodeCalls(cur_node)
 
       # Invoking traversal on outbound nodes
@@ -510,11 +508,13 @@ class TensorRtTranslator:
   # Print the DFG in reverse postorder
   def codegen(self, dfg):
 
-    print ("\n\n ****** Codegen for HPVM Tensor Rt ******* \n\n")
+    print ("\n *** Starting Codegen for HPVM Tensor Rt *** \n")
     visited_nodes = {}
     # Starting traversal at the DFG root node
     self.codegenNode(dfg, dfg.root_node, visited_nodes)
 
+    print ("\n\n --- Codegen Completed --- \n\n")
+
 
     
     
@@ -531,7 +531,7 @@ class TensorRtTranslator:
         w_name = layer_name + "_w"
         
         self.filter_names[w_name] = 1
-        print (weights.shape, w_name)
+        DEBUG (weights.shape, w_name)
 
         N = weights.shape[3]
         C = weights.shape[2]
@@ -563,7 +563,7 @@ class TensorRtTranslator:
           b_name = layer_name + "_b"
 
           self.filter_names[b_name] = 1
-          print (bias_weights.shape, b_name)
+          DEBUG (bias_weights.shape, b_name)
 
           unique_file_name = b_name + ".bin"
           dumpFcBias(prefix + unique_file_name, bias_weights, bias_weights.shape[0])
@@ -584,7 +584,7 @@ class TensorRtTranslator:
         w_name = layer_name + "_w"
 
         self.filter_names[w_name] = 1
-        print (weights.shape, w_name)
+        DEBUG (weights.shape, w_name)
 
         H = weights.shape[0]
         W = weights.shape[1]
@@ -606,7 +606,7 @@ class TensorRtTranslator:
           b_name = layer_name + "_b"
 
           self.filter_names[b_name] = 1
-          print (bias_weights.shape, b_name)
+          DEBUG (bias_weights.shape, b_name)
 
           unique_file_name = b_name + ".bin"
           dumpFcBias(prefix + unique_file_name, bias_weights, bias_weights.shape[0])
@@ -862,7 +862,7 @@ def reloadModelParams(model, reload_dir, x_test, y_test):
   for i in range(len(model.layers)):
     layer = model.layers[i]
     layer_name = layer.name
-    print ("*layer_name = ", layer_name)
+    DEBUG ("*layer_name = ", layer_name)
 
     if "conv" not in layer_name and "dense" not in layer_name:
       continue
@@ -870,9 +870,6 @@ def reloadModelParams(model, reload_dir, x_test, y_test):
     w_path = reload_dir + layer_name + "_w.bin"
     b_path = reload_dir + layer_name + "_b.bin"
    
-    print ("** w_path = ", w_path)
-    print ("** b_path = ", b_path)
-
     w_arr = np.fromfile(w_path, dtype='float32')
     b_arr = np.fromfile(b_path, dtype='float32')
 
@@ -885,7 +882,7 @@ def reloadModelParams(model, reload_dir, x_test, y_test):
       b_arr = np.reshape(b_arr, b_shape)
     
       w_arr = np.transpose(w_arr, (2,3,1,0))
-      print ("old_shape = ", w_shape, " new_shape = ", w_arr.shape)
+      DEBUG ("old_shape = ", w_shape, " new_shape = ", w_arr.shape)
 
     if "dense" in layer_name:      
       w_arr = np.reshape(w_arr, w_shape)
@@ -921,7 +918,6 @@ def getUniquePath(weights_dir):
 
     weights_dir = getUniquePath(weights_dir)
       
-  #print (weights_dir)
   
   return weights_dir
   
@@ -950,8 +946,8 @@ def translate_to_approxhpvm(model, weights_dir, test_data=None, test_labels=None
   dfg.buildDFG()
 
 
-  print ("test_data.shape = ", test_data.shape, "\n")
-  print ("test_labels.shape = ", test_labels.shape, "\n")
+  DEBUG ("test_data.shape = ", test_data.shape, "\n")
+  DEBUG ("test_labels.shape = ", test_labels.shape, "\n")
 
   tensorRtTranslator = TensorRtTranslator(dfg)    
   tensorRtTranslator.translate(model, weights_dir, test_data, test_labels)
@@ -965,7 +961,11 @@ def translate_to_approxhpvm(model, weights_dir, test_data=None, test_labels=None
   filter_names = tensorRtTranslator.getFilterNames()
   hpvmTranslator = HPVMTranslator(dfg, weight_str, input_str, filter_names)    
   hpvmTranslator.translate(model, weights_dir, test_data)
-  
+
+  print ("-- Weight Files Under : ", weights_dir)
+  print ("-- TensorRT src : ", weights_dir + "/src.cc")
+  print ("-- ApproxHPVM src  : ", weights_dir + "approxhpvm_src.cc")
+
   
   return weights_dir
 
diff --git a/llvm/projects/keras/frontend/hpvm_dfg_translator.py b/llvm/projects/keras/frontend/hpvm_dfg_translator.py
index eda8ed9da8ab9e34f77a6165ca79d2e0a5e4e4f8..65574a98881f010bf7cd67df344517803de8c67c 100644
--- a/llvm/projects/keras/frontend/hpvm_dfg_translator.py
+++ b/llvm/projects/keras/frontend/hpvm_dfg_translator.py
@@ -59,12 +59,12 @@ class HPVMTranslator:
     
       
   def getSingleInputName(self, cur_node):
-    print (cur_node.layer_name)
+    DEBUG (cur_node.layer_name)
     # Assumption: If no inputs, the previous layer must be input layer
     if len(cur_node.inputs) == 0:
       return "input"
 
-    print ("Input_type = ", cur_node.inputs[0].layer_type)
+    DEBUG ("Input_type = ", cur_node.inputs[0].layer_type)
 
     pred_layer_type = cur_node.inputs[0].layer_type
     # NOTE: Assuming the 'inference' phase - hence skipping Dropout
@@ -90,12 +90,12 @@ class HPVMTranslator:
 
 
   def getPrevLayerPadding(self, cur_node):
-    print (cur_node.layer_name)
+    DEBUG (cur_node.layer_name)
     # Assumption: If no inputs, the previous layer must be input layer
     if len(cur_node.inputs) == 0:
       return None
 
-    print ("Input_type = ", cur_node.inputs[0].layer_type)
+    DEBUG ("Input_type = ", cur_node.inputs[0].layer_type)
     if cur_node.inputs[0].layer_type == "ZeroPadding2D": 
       pred_padding = cur_node.inputs[0].padding
       return pred_padding
@@ -215,8 +215,8 @@ class HPVMTranslator:
   # Fix: replace deprecated  genHpvmNodeEdges with  genHpvmEdges
   def genHpvmNodeEdges(self, out_var_name, input_var_name, input_var_name2):
 
-    print ("input_var_name2 = ", input_var_name2)
-    print ("input_var_name = ", input_var_name)
+    DEBUG ("input_var_name2 = ", input_var_name2)
+    DEBUG ("input_var_name = ", input_var_name)
     
     hpvm_edge_str = "\n  void* " + out_var_name + " = "
     hpvm_edge_str += "__visc__createNodeND(0, " + out_var_name + "_node); \n\n"
@@ -531,11 +531,13 @@ class HPVMTranslator:
         
   # Print the DFG in reverse postorder
   def codegen(self, dfg):
-    print ("\n\n ****** Codegen for ApproxHPVM DFG Representation ******* \n\n")
+    print ("\n *** Starting Codegen for ApproxHPVM DFG Representation *** \n")
     visited_nodes = {}
     # Starting traversal at the DFG root node
     self.codegenNode(dfg, dfg.root_node, visited_nodes)
 
+    print ("\n --- Codegen Completed --- \n")
+
 
       
 
@@ -648,7 +650,7 @@ class HPVMTranslator:
     program_str = self.file_header_str + self.node_str + self.root_str
     program_str += self.root_struct_str + self.main_func_str
 
-    print (program_str)
+    DEBUG (program_str)
     
     f = open(dir_prefix + "/approxhpvm_src.cc", "w+")
     f.write(program_str)
diff --git a/llvm/projects/keras/frontend/utils.py b/llvm/projects/keras/frontend/utils.py
index ffc338c19ea60df7c53430ac38b613c2daef402e..9a2c13715ee351b61f8d7aaffc148a90a45ee233 100644
--- a/llvm/projects/keras/frontend/utils.py
+++ b/llvm/projects/keras/frontend/utils.py
@@ -1,6 +1,13 @@
 
 
 
+def DEBUG(str, *args):
+
+  debug = False
+  if debug:
+    print (str, *args)
+
+
 
 def nodeHasBias(cur_node):
     
@@ -45,7 +52,6 @@ def genActivationCallStr(input_var, output_var, activation_type):
   inst_str = "void* " + output_var + " = "
   inst_str += "tensor" + func_name + "(" + input_var + "); \n"
 
-  print ("***** inst_str = ", inst_str, "\n")
     
   return inst_str
 
diff --git a/llvm/projects/keras/frontend/weight_utils.py b/llvm/projects/keras/frontend/weight_utils.py
index 4a9e3dbe0a1947fadda46704c984bbb087ce1545..dd22765386e2172572ad0feec201c7dec407a909 100644
--- a/llvm/projects/keras/frontend/weight_utils.py
+++ b/llvm/projects/keras/frontend/weight_utils.py
@@ -6,9 +6,10 @@ from keras.optimizers import Adam
 
 
 def dumpLabels(file_name, Y_test):
+
+    print ("Dumping Labels File = ", file_name)
     
-    f = open(file_name, "wb")
-    
+    f = open(file_name, "wb")    
     labels_map = {}    
     for label in Y_test:
         label_val = 0
@@ -54,14 +55,12 @@ def dumpData(file_name, X_test):
 
 def dumpData(file_name, X_test):
 
-    print ("*DumpData")
+    print ("*Dumping Input File = ", file_name)
     #print("-min_val = ", np.amin(X_test))
     #print("-max_val = ", np.amax(X_test))
 
     f = open(file_name, "wb")
 
-    print ("X.shape = ", X_test.shape)
-    print ("X_test.size", len(X_test))
     X_test = X_test.flatten()
     X_test = X_test.astype(np.float32)
     X_test.tofile(f)    
@@ -94,18 +93,13 @@ def dumpConvWeights(file_name, weights, N, C, H, W):
 
 def dumpConvWeights(file_name, X_test, N, C, H, W):
 
-    print ("*DumpConvWeights")
+    print ("*Dumping Conv Weights to file = ", file_name)
     #print("-min_val = ", np.amin(X_test))
     #print("-max_val = ", np.amax(X_test))
 
     f = open(file_name, "wb")
 
-    print ("X_test.shape", X_test.shape)
-    print ("X_test.size", len(X_test))
-
     X_test = np.transpose(X_test, (3, 2, 0, 1))
-
-    print ("X_test.shape", X_test.shape)
     X_test = X_test.flatten()
     X_test = X_test.astype(np.float32)
     X_test.tofile(f)    
@@ -117,11 +111,10 @@ def dumpConvWeights(file_name, X_test, N, C, H, W):
     
 def dumpFcWeights(file_name, weights, H, W):
 
-    print (weights.shape)
-    print ("*DumpFcWeights")
-    print("-min_val = ", np.amin(weights))
-    print("-max_val = ", np.amax(weights))
-
+    print ("*Dumping FC weights to = ", file_name)
+    
+    #print("-min_val = ", np.amin(weights))
+    #print("-max_val = ", np.amax(weights))
 
     f = open(file_name, "wb")
     for i in range(H):
@@ -134,11 +127,7 @@ def dumpFcWeights(file_name, weights, H, W):
     
 def dumpFcBias(file_name, bias, W):
 
-    print (bias.shape)
-    print ("*DumpFcBias")
-    print("-min_val = ", np.amin(bias))
-    print("-max_val = ", np.amax(bias))
-
+    print ("*Dump Bias Weights = ", file_name)
 
     f = open(file_name, "wb")
     for i in range(W):
@@ -189,12 +178,12 @@ def reloadHPVMWeights(model, reload_dir, output_model, X_test, Y_test):
   for i in range(len(model.layers)):
     layer = model.layers[i]
     layer_name = layer.name
-    print ("*layer_name = ", layer_name)
+    #-- print ("*layer_name = ", layer_name)
     if "conv" not in layer_name and "dense" not in layer_name:
       continue
     
     w_path = reload_dir + layer_name + "_w.bin"
-    print ("** w_path = ", w_path)    
+    #-- print ("** w_path = ", w_path)    
     w_arr = np.fromfile(w_path, dtype='float32')
     
     b_path = reload_dir + layer_name + "_b.bin"
diff --git a/llvm/projects/keras/src/Benchmark.py b/llvm/projects/keras/src/Benchmark.py
index a275d103e253e842057dd0de9b98b1fecb625aeb..e83a78f4ea5d776a21ed6a6b47ccdb840f42c129 100644
--- a/llvm/projects/keras/src/Benchmark.py
+++ b/llvm/projects/keras/src/Benchmark.py
@@ -91,7 +91,7 @@ class Benchmark:
 
         # Main call to ApproxHPVM-Keras Frontend
         working_dir = translate_to_approxhpvm(model, self.hpvm_dir, X_test, Y_test, self.num_classes)
-        print ("*** working_dir = ", working_dir)
+        #-- print ("*** working_dir = ", working_dir)
         
         if len(argv) > 3 and argv[3] == "compile":
           self.compileSource(working_dir)