diff --git a/llvm/projects/soc_simulator/src/driver.py b/llvm/projects/soc_simulator/src/driver.py index d76dbb5150a0fa1585426e8ab818cdc3d1e00024..6f224c6c62aaed338afe9732c39484f98957a6a2 100644 --- a/llvm/projects/soc_simulator/src/driver.py +++ b/llvm/projects/soc_simulator/src/driver.py @@ -188,24 +188,31 @@ def run_gpu_simulation(curr_layer, layer_name, tensor_ind): return (conversion_time, conversion_energy) # Default dict of default dicts +results_time_key = "Time" +results_energy_key = "Energy" # [Time/Energy][number corresponding to order the layer config was read in] = time/energy aggregate_results = defaultdict(lambda: defaultdict(float)) +config_count = 0 + +def run_simulations(config_filename): + global config_count + + if not os.path.isfile(config_filename): + print("ERROR: %s was not found" % config_filename) + exit(1) -def run_simulations(config_filename, results_filename): config_file = open(config_filename, "r") - results_file = open(results_filename, "w") # each line = indepedent configuration # layers are separated by commas # tensor ops are separated by spaces - - for config_ind, config in enumerate(config_file): + for config in config_file: config_layers = config.strip().split(',') prev_layer = ApproxTypes.FP32 curr_layer = None - - aggregate_results["Time"][config_ind] = 0 - aggregate_results["Energy"][config_ind] = 0 + + aggregate_results[results_time_key][config_count] = 0 + aggregate_results[results_energy_key][config_count] = 0 for layer_ind, config_layer in enumerate(config_layers): # level layer_data = tensor_layers[layer_ind] # layer @@ -219,8 +226,8 @@ def run_simulations(config_filename, results_filename): # Compute time, energy = run_promise_simulation(config_layer, layer_data) - aggregate_results["Time"][config_ind] += time - aggregate_results["Energy"][config_ind] += energy + aggregate_results[results_time_key][config_count] += time + aggregate_results[results_energy_key][config_count] += energy else: print("Running layer %s on GPU" % layer_name) @@ -240,10 +247,44 @@ def run_simulations(config_filename, results_filename): total_time += conv_time total_time += conv_energy - aggregate_results["Time"][config_ind] += total_time - aggregate_results["Energy"][config_ind] += total_energy + aggregate_results[results_time_key][config_count] += total_time + aggregate_results[results_energy_key][config_count] += total_energy print("DONE WITH LAYER") prev_layer = curr_layer + config_count += 1 + + config_count += 1 # because we're storing the count and not the index + config_file.close() + + +def display_results(results_filename): + results_file = open(results_filename, "w") + attributes_to_print = [results_time_key, results_energy_key] + + for attribute in attributes_to_print: + attribute_data = [] # Store as list and then write to file once bc syscalls are slow + attribute_data.append(attribute) + + attribute_data.append("Configuration,Total,Improvement") # header + + baseline_val = aggregate_results[attribute][0] + best_config = None + best_result = None + + for config_ind in range(config_count): + config_data = ["c%d" % config_ind] + time_or_energy_val = aggregate_results[attribute][config_ind] + config_data.append(str(time_or_energy_val)) + config_data.append(str(baseline_val / (time_or_energy_val + 0.0001))) + attribute_data.append(','.join(config_data)) + + if not best_result or time_or_energy_val < best_result: + best_result = time_or_energy_val + best_config = config_ind + attribute_data.append("c%d,%d" % (best_config, aggregate_results[attribute][best_config])) + attribute_data.append("") # To add an additional new line + results_file.write('\n'.join(attribute_data)) + results_file.close() if __name__ == "__main__": ''' @@ -253,4 +294,5 @@ if __name__ == "__main__": ''' parse_tensor_layer_file("/home/nvidia/Gitlab/hpvm/llvm/projects/hpvm-tensor-rt/build_mobilenet/mobilenet_layers.txt") parse_tensor_table("/home/nvidia/Gitlab/hpvm/llvm/projects/hpvm-tensor-rt/build_pldi/mobilenet_results/mobilenet_tensors.txt") - run_simulations("/home/nvidia/Gitlab/hpvm/llvm/projects/hpvm-tensor-rt/build_mobilenet/mobilenet_conf2.txt", "blah") + run_simulations("/home/nvidia/Gitlab/hpvm/llvm/projects/hpvm-tensor-rt/build_mobilenet/mobilenet_conf2.txt") + display_results("blah.txt")