diff --git a/llvm/projects/soc_simulator/src/driver.py b/llvm/projects/soc_simulator/src/driver.py
index c747f47d998edaef603a4fc4f38fccb9a4207ea6..d52711072e33d47bbeb123c72d0cce46a6d2467e 100644
--- a/llvm/projects/soc_simulator/src/driver.py
+++ b/llvm/projects/soc_simulator/src/driver.py
@@ -19,11 +19,6 @@ def is_fc(operation_name):
     return operation_name.startswith("FC")
 
 def parse_tensor_layer_file(layer_filename): 
-    '''
-    Convs: Layer name, N, Cin, H, W, Cout, Kh, Kw, Sh, Sw
-    FCs: Layer name, Rows_A, Cols_A, Rows_B, Cols_B
-    NMLs (No Man Lands):  NML<number> (edited) 
-    '''
     if not os.path.isfile(layer_filename):
         print("ERROR: %s was not found." % layer_filename)
         exit(1)
@@ -50,10 +45,9 @@ def parse_tensor_layer_file(layer_filename):
             tensor_layers[layer_name]["RB"] = layer_data[3]
             tensor_layers[layer_name]["CB"] = layer_data[4]
 
-		elif not is_nml(layer_name): # TODO should we store data for NMLs?
+        elif not is_nml(layer_name): # TODO should we store data for NMLs?
 			print("ERROR: Invalid layer name %s" % layer_name)
 			exit(1)
-
     layer_file.close()
 
 # should this be a nested dict of dicts?
@@ -64,39 +58,39 @@ def parse_tensor_table(table_filename):
     if not os.path.isfile(table_filename):
         print("ERROR: %s was not found." % table_filename)
         exit(1)
-
     table_file = open(table_filename, "r")
+    line = table_file.readline().strip()
+    print(line)
 
-	line = table_file.readline().strip()
-
-	while line:
-		# Line here MUST be a header or there's a bug 
-		# Get the description of the layer 
-		assert(line.startswith("**"))
-		header_contents = line.split(' ')[1:] 
-		layer_name = header_contents[0]
-		num_ops = int(header_contents[1])
-		col_names = header_contents[2:]
+    while line:
+        # Line here MUST be a header or there's a bug 
+        # Get the description of the layer 
+        assert(line.startswith("**"))
 
-		# Go through all operations in the layer
-		for op_count in range(num_ops):
-			line = table_file.readline().strip()
-			op_data = line.split(' ')
-			op_name = op_data[0]
+        header_contents = line.split(' ')[1:] 
+        layer_name = header_contents[0]
+        num_ops = int(header_contents[1])
+        col_names = header_contents[2:]
 
-			# Number of data items (#s) needs to match up with the # of cols 
-			assert(len(op_data) - 1 == len(col_names)) 
+        # Go through all operations in the layer
+        for op_count in range(num_ops):
+            line = table_file.readline().strip()
+            op_data = line.split(' ')
+            op_name = op_data[0]
 
-			# Go through all data items (each col element) per operation 
-			for i in range(len(col_names)):
-				tensor_table[layer_name][op_name][col_names[i]] = op_data[i + 1]
+            # Number of data items (#s) needs to match up with the # of cols 
+            assert(len(op_data) - 1 == len(col_names)) 
 
-    	line = table_file.readline().strip()
-
-	table_file.close()
+            # Go through all data items (each col element) per operation 
+            for i in range(len(col_names)):
+                tensor_table[layer_name][op_name][col_names[i]] = op_data[i + 1]
 
+        line = table_file.readline().strip()
+    table_file.close()
+    
 
 def run_simulations():
+    pass
     # open configuration file
     # open results file
     # read through each line in the configuration file
@@ -120,7 +114,10 @@ def run_simulations():
 # stores the layer name, then
 
 if __name__ == "__main__":
+    '''
     if len(sys.argv) != 4):
         print("Usage: python driver.py <layer info> <tensor info> <configurations> <results file>")
         exit(1)
-
+    '''
+    #parse_tensor_layer_file("/home/nvidia/Gitlab/hpvm/llvm/projects/hpvm-tensor-rt/build_mobilenet/mobilenet_layers.txt")
+    parse_tensor_table("/home/nvidia/Gitlab/hpvm/llvm/projects/hpvm-tensor-rt/build_pldi/mobilenet_results/mobilenet_tensors.txt")