diff --git a/llvm/projects/soc_simulator/src/driver_new_config_fp16_repl.py b/llvm/projects/soc_simulator/src/driver_new_config_fp16_repl.py
index f53573f7cde9420400194827d55d84d69e2ace5b..d2d175725397994c8e76dd33bf79d77fe652aa3e 100644
--- a/llvm/projects/soc_simulator/src/driver_new_config_fp16_repl.py
+++ b/llvm/projects/soc_simulator/src/driver_new_config_fp16_repl.py
@@ -189,7 +189,6 @@ class Driver:
                     curr_conf_results.append((layer_as_lst[1], layer_results))
                     line = config_file.readline().strip()
                     continue
-                
                 layer_ind = int(layer_as_lst[0]) - 1
                 layer_table_data = self.__tensor_layers[layer_ind]
                 layer_name = layer_table_data["Name"]
@@ -233,7 +232,6 @@ class Driver:
                             elif precision_type == "samp": 
                                 approx_type = Driver.ApproxTypes.SAMP
                             curr_layer = Driver.PrecisionTypes.FP16
-                        print(curr_layer, prev_layer)
                         quant_time, quant_energy = self.__quantize(precision_type, op_number, curr_layer, prev_layer, tensor_count, layer_table_data)
                         if quant_time != 0:
                             assert i == 2 #and layer_ind == 0
@@ -256,9 +254,8 @@ class Driver:
 
                 has_quantized = False
                 for layer_ind, (hardware, layer) in enumerate(curr_conf_results):
-                    if len(layer) == 1 and layer[0][2].find("softmax") != -1: continue
+                    if layer[0][2].find("softmax") != -1: continue
                     fp16_layer = []
-                    #print(layer_ind, hardware, layer)
                     layer_table_data = self.__tensor_layers[layer_ind]
                     layer_name = layer_table_data["Name"]
 
@@ -302,7 +299,6 @@ class Driver:
         else:
             lookup_key = "_" + precision_type + str(op_number) + "_"
 
-        print(curr_layer)
         if curr_layer == Driver.PrecisionTypes.FP32:
             time_key = "h2f%stime" % lookup_key
             energy_key = "h2f%senergy" % lookup_key
@@ -311,8 +307,6 @@ class Driver:
             energy_key = "f2h%senergy" % lookup_key
         time = tensor_op_row[time_key]
         energy = tensor_op_row[energy_key]
-        print(time_key, energy_key)
-        print("Quantization: (%f, %f)" % (time, energy))
         return (time, energy)
 
 
@@ -330,7 +324,7 @@ class Driver:
         elif Driver.is_fc(layer_name):
             rows_a = layer_data["RA"] 
             cols_a = layer_data["CA"]
-            rows_b = cols_
+            rows_b = layer_data["RB"] 
             cols_b = layer_data["CB"]
         else:
             print("PROMISE can't run whatever this layer is.")
@@ -349,9 +343,6 @@ class Driver:
     def __run_gpu_simulation(self, curr_layer, layer_name, tensor_ind, \
                     approx_type = None, knob_number = None):
         tensor_info = self.__tensor_table[layer_name][tensor_ind]
-        #print(tensor_info)
-        #print(layer_name)
-        #print(tensor_ind)
         time_key = None
         energy_key = None
 
@@ -427,7 +418,7 @@ class Driver:
 
         def get_final_times_energies_conf(curr_conf, curr_conf_name):
             final_time = final_energy = 0
-           
+
             final_conf = [] # List (conf) of lists (layers) of tuples (operation data)
 
             #for hardware, layer in self.fp16_baseline:
@@ -440,24 +431,36 @@ class Driver:
                         final_conf_layer.append((None, None, tensor_op))
                         continue
                     # layer name, operation name, val name
-                    baseline_time = self.fp16_baseline[layer_ind][1][tensor_ind][0]
-                    baseline_energy = self.fp16_baseline[layer_ind][1][tensor_ind][1]
-                    baseline_op = self.fp16_baseline[layer_ind][1][tensor_ind][2]
-                    #print(baseline_time, baseline_energy, baseline_op)
-                    #print(op_time, tensor_op)
+                    if tensor_op.find("promise") != -1: # compute sum of entire fp16 baseline layer
+                        baseline_time = 0
+                        baseline_energy = 0
+                        baseline_op = []
+                        baseline_layer = self.fp16_baseline[layer_ind][1]
+
+                        for op_time, op_energy, tensor_op in baseline_layer:
+                            baseline_time += op_time
+                            baseline_energy += op_energy
+                            baseline_op.append(tensor_op)
+                    else: # look at the individual tensor operation as before
+                        baseline_time = self.fp16_baseline[layer_ind][1][tensor_ind][0]
+                        baseline_energy = self.fp16_baseline[layer_ind][1][tensor_ind][1]
+                        baseline_op = self.fp16_baseline[layer_ind][1][tensor_ind][2]
+
                     final_tensor_op = tensor_op
-                    #print(op_time > baseline_time)
                     if op_time > baseline_time:
-                        #print("**************** BIGGER ******************")
-                        #print(curr_conf_name)
-                        #print(baseline_time, baseline_energy, baseline_op, layer_ind)
-                        #print(op_time, tensor_op, layer_ind)
+                        print("**************** BIGGER ******************")
+                        if tensor_op.find("promise") != -1:
+                            print("WARNING: PROMISE")
+                        print(curr_conf_name)
+                        print(baseline_time, baseline_energy, baseline_op, layer_ind)
+                        print(op_time, tensor_op, layer_ind)
                         final_time += baseline_time
                         final_energy += baseline_energy
                         final_tensor_op = baseline_op
                     else:
                         final_time += op_time
                         final_energy += op_energy
+
                     final_conf_layer.append((None, None, final_tensor_op)) # Don't care about the times and energies when writing
                 final_conf.append((hardware, final_conf_layer))
             #print("\n")