From e52cd1663c2109cc0229a2bfb03da864ef4c4c47 Mon Sep 17 00:00:00 2001
From: Elizabeth <hashim.sharif91@gmail.com>
Date: Sat, 16 Nov 2019 13:41:10 -0600
Subject: [PATCH] Fixed bug inn generating output files

---
 .../src/driver_new_config_fp16_repl.py        | 22 +++++++++----------
 1 file changed, 10 insertions(+), 12 deletions(-)

diff --git a/llvm/projects/soc_simulator/src/driver_new_config_fp16_repl.py b/llvm/projects/soc_simulator/src/driver_new_config_fp16_repl.py
index a7fca62564..f53573f7cd 100644
--- a/llvm/projects/soc_simulator/src/driver_new_config_fp16_repl.py
+++ b/llvm/projects/soc_simulator/src/driver_new_config_fp16_repl.py
@@ -227,16 +227,13 @@ class Driver:
                             curr_layer = Driver.PrecisionTypes.FP16
                         elif line.find("fp32") != -1:
                             curr_layer = Driver.PrecisionTypes.FP32
-
                         if precision_type == "perf" or precision_type == "samp": # Handle approx type
                             if precision_type == "perf": 
                                 approx_type = Driver.ApproxTypes.PERF
                             elif precision_type == "samp": 
                                 approx_type = Driver.ApproxTypes.SAMP
-                            if line.find("fp16") != -1:
-                                curr_layer = Driver.PrecisionTypes.FP16
-                            elif line.find("fp32") != -1:
-                                curr_layer = Driver.PrecisionTypes.FP32
+                            curr_layer = Driver.PrecisionTypes.FP16
+                        print(curr_layer, prev_layer)
                         quant_time, quant_energy = self.__quantize(precision_type, op_number, curr_layer, prev_layer, tensor_count, layer_table_data)
                         if quant_time != 0:
                             assert i == 2 #and layer_ind == 0
@@ -280,7 +277,6 @@ class Driver:
 
                     prev_layer = curr_layer
                     self.fp16_baseline.append((hardware, fp16_layer))
-                #print(self.fp16_baseline)
             self.__conf_results.append( (first_line, curr_conf_results) )
             line = config_file.readline().strip()
         config_file.close()
@@ -306,6 +302,7 @@ class Driver:
         else:
             lookup_key = "_" + precision_type + str(op_number) + "_"
 
+        print(curr_layer)
         if curr_layer == Driver.PrecisionTypes.FP32:
             time_key = "h2f%stime" % lookup_key
             energy_key = "h2f%senergy" % lookup_key
@@ -314,8 +311,8 @@ class Driver:
             energy_key = "f2h%senergy" % lookup_key
         time = tensor_op_row[time_key]
         energy = tensor_op_row[energy_key]
-        #print(time_key, energy_key)
-        #print("Quantization: (%f, %f)" % (time, energy))
+        print(time_key, energy_key)
+        print("Quantization: (%f, %f)" % (time, energy))
         return (time, energy)
 
 
@@ -440,6 +437,7 @@ class Driver:
 
                 for tensor_ind, (op_time, op_energy, tensor_op) in enumerate(layer):
                     if tensor_op.find("softmax") != -1:
+                        final_conf_layer.append((None, None, tensor_op))
                         continue
                     # layer name, operation name, val name
                     baseline_time = self.fp16_baseline[layer_ind][1][tensor_ind][0]
@@ -450,10 +448,10 @@ class Driver:
                     final_tensor_op = tensor_op
                     #print(op_time > baseline_time)
                     if op_time > baseline_time:
-                        print("**************** BIGGER ******************")
-                        print(curr_conf_name)
-                        print(baseline_time, baseline_energy, baseline_op, layer_ind)
-                        print(op_time, tensor_op, layer_ind)
+                        #print("**************** BIGGER ******************")
+                        #print(curr_conf_name)
+                        #print(baseline_time, baseline_energy, baseline_op, layer_ind)
+                        #print(op_time, tensor_op, layer_ind)
                         final_time += baseline_time
                         final_energy += baseline_energy
                         final_tensor_op = baseline_op
-- 
GitLab