diff --git a/llvm/projects/soc_simulator/src/driver.py b/llvm/projects/soc_simulator/src/driver.py
index 50f80ba15d624cd0c543e9d7328bfa98bd86b4db..4018477ac755ff839ed4509a4a78341ce30251e1 100644
--- a/llvm/projects/soc_simulator/src/driver.py
+++ b/llvm/projects/soc_simulator/src/driver.py
@@ -35,21 +35,21 @@ def parse_tensor_layer_file(layer_filename):
         tensor_layer["Name"] = layer_name
 
         if is_conv(layer_name):
-            tensor_layer["N"] = int(layer_data[1])
-            tensor_layer["Cin"] = int(layer_data[2])
-            tensor_layer["H"] = int(layer_data[3])
-            tensor_layer["W"] = int(layer_data[4])
-            tensor_layer["Cout"] = int(layer_data[5])
-            tensor_layer["Kh"] = int(layer_data[6])
-            tensor_layer["Kw"] = int(layer_data[7])
-            tensor_layer["Sh"] = int(layer_data[8])
-            tensor_layer["Sw"] = int(layer_data[9])
+            tensor_layer["N"] = float(layer_data[1])
+            tensor_layer["Cin"] = float(layer_data[2])
+            tensor_layer["H"] = float(layer_data[3])
+            tensor_layer["W"] = float(layer_data[4])
+            tensor_layer["Cout"] = float(layer_data[5])
+            tensor_layer["Kh"] = float(layer_data[6])
+            tensor_layer["Kw"] = float(layer_data[7])
+            tensor_layer["Sh"] = float(layer_data[8])
+            tensor_layer["Sw"] = float(layer_data[9])
 
         elif is_fc(layer_name):
-            tensor_layer["RA"] = int(layer_data[1])
-            tensor_layer["CA"] = int(layer_data[2])
-            tensor_layer["RB"] = int(layer_data[3])
-            tensor_layer["CB"] = int(layer_data[4])
+            tensor_layer["RA"] = float(layer_data[1])
+            tensor_layer["CA"] = float(layer_data[2])
+            tensor_layer["RB"] = float(layer_data[3])
+            tensor_layer["CB"] = float(layer_data[4])
 
         elif not is_nml(layer_name): # TODO should we store data for NMLs?
 			print("ERROR: Invalid layer name %s" % layer_name)
@@ -95,7 +95,7 @@ def parse_tensor_table(table_filename):
 
             # Go through all data items (each col element) per operation 
             for i in range(len(col_names)):
-                operation_data[col_names[i]] = op_data[i + 1]
+                operation_data[col_names[i]] = float(op_data[i + 1])
 
             layer_operations.append(operation_data)
 
@@ -113,7 +113,7 @@ class ApproxTypes:
 
 def is_promise(config_layer):
     # TODO overhead in call to split?
-    return int(config_layer.split(' ')[0]) < fp16_swing
+    return float(config_layer.split(' ')[0]) < fp16_swing
 
 # NOTE smart_dma is always true
 def quantize(curr_layer, prev_layer, h2f_f2h_operation_ind, layer_data):
@@ -177,6 +177,18 @@ def run_promise_simulation(swing, layer_data):
     return total_time_energy[0], total_time_energy[1]
 
 
+def run_gpu_simulation(curr_layer, layer_name, tensor_ind):
+    print(layer_name, tensor_ind)
+    tensor_info = tensor_table[layer_name][tensor_ind]
+    if curr_layer == ApproxTypes.FP32:
+        conversion_time = tensor_info["fp32_time"]
+        conversion_energy = tensor_info["fp32_energy"]
+    else:
+        conversion_time = tensor_info["fp16_time"]
+        conversion_energy = tensor_info["fp16_energy"]
+    print("GPU: (%f, %f)" % (conversion_time, conversion_energy))
+    return (conversion_time, conversion_energy)
+
 # Default dict of default dicts 
 # [Time/Energy][number corresponding to order the layer config was read in] = time/energy
 aggregate_results = defaultdict(lambda: defaultdict(float))
@@ -193,23 +205,47 @@ def run_simulations(config_filename, results_filename):
         config_layers = config.strip().split(',')
         prev_layer = ApproxTypes.FP32
         curr_layer = None
-        
+       
+        aggregate_results["Time"][config_ind] = 0
+        aggregate_results["Energy"][config_ind] = 0
+
         for layer_ind, config_layer in enumerate(config_layers): # level
             layer_data = tensor_layers[layer_ind]  # layer
+            layer_name = layer_data["Name"]
+
             if is_promise(config_layer):
                 print("IS PROMISE")
-                print("Running layer %s on PROMISE" % layer_data["Name"])
+                print("Running layer %s on PROMISE" % layer_name)
                 curr_layer = ApproxTypes.PROMISE
                 quant_time, quant_energy = quantize(curr_layer, prev_layer, 0, layer_data)
                 
                 # Compute 
                 time, energy = run_promise_simulation(config_layer, layer_data)
-                aggregate_results["Time"][config_ind] = time
-                aggregate_results["Energy"][config_ind] = energy 
+                aggregate_results["Time"][config_ind] += time
+                aggregate_results["Energy"][config_ind] += energy 
 
             else:
-                print("Not promise")
-                pass
+                print("Running layer %s on GPU" % layer_name)
+                tensor_ops = config_layer.split(' ')
+
+                total_time = 0
+                total_energy = 0
+                print(tensor_ops)
+                for tensor_ind, tensor_op in enumerate(tensor_ops): # sublevle
+                    if tensor_op == fp16_swing:
+                        curr_layer = ApproxTypes.FP16
+                    else:
+                        curr_layer = ApproxTypes.FP32
+                    quant_time, quant_energy = quantize(curr_layer, prev_layer, tensor_ind, \
+                                layer_data)
+                    conv_time, conv_energy = run_gpu_simulation(curr_layer, layer_name, tensor_ind)
+                    total_time += conv_time
+                    total_time += conv_energy
+
+                aggregate_results["Time"][config_ind] += total_time
+                aggregate_results["Energy"][config_ind] += total_energy 
+            print("DONE WITH LAYER")
+            prev_layer = curr_layer
 
 if __name__ == "__main__":
     '''