From 830e83d01354ad52f01a1514d73cafadaf95bf80 Mon Sep 17 00:00:00 2001
From: Hashim Sharif <hsharif3@tyler.cs.illinois.edu>
Date: Sat, 12 Oct 2019 18:27:30 -0500
Subject: [PATCH] Using 2 runs in the tuning phase and min_accuracy as
 threshold

---
 .../autotuner/measure_confidence2.py          | 18 ++++++++++++-
 .../opentuner/autotuner/pareto_curve.py       | 19 +++++++++-----
 .../opentuner/autotuner/promise_tuner3.py     | 26 +++++++++++++------
 3 files changed, 47 insertions(+), 16 deletions(-)

diff --git a/llvm/projects/hpvm-tensor-rt/opentuner/autotuner/measure_confidence2.py b/llvm/projects/hpvm-tensor-rt/opentuner/autotuner/measure_confidence2.py
index b38efa9c82..9ff74128f4 100644
--- a/llvm/projects/hpvm-tensor-rt/opentuner/autotuner/measure_confidence2.py
+++ b/llvm/projects/hpvm-tensor-rt/opentuner/autotuner/measure_confidence2.py
@@ -83,7 +83,23 @@ def getConfidence(accuracy_outfile, acc_threshold):
   
   return conf, avg_acc
 
-  
+
+
+def getMinAccuracy(accuracy_outfile):
+
+  f = open(accuracy_outfile, "r")
+
+  total_acc = 0.0
+  failed = 0
+  it = 0
+
+  acc_list = []
+  for x in f:
+    acc = float(x.strip())
+    acc_list.append(acc)
+    
+  return min(acc_list)
+
   
 # NOTE: invokes the binary with the number of runs
 def do_multiple_runs2(binary_name, accuracy_threshold, confidence_threshold):
diff --git a/llvm/projects/hpvm-tensor-rt/opentuner/autotuner/pareto_curve.py b/llvm/projects/hpvm-tensor-rt/opentuner/autotuner/pareto_curve.py
index 0fda8f742c..db8233994b 100644
--- a/llvm/projects/hpvm-tensor-rt/opentuner/autotuner/pareto_curve.py
+++ b/llvm/projects/hpvm-tensor-rt/opentuner/autotuner/pareto_curve.py
@@ -5,6 +5,9 @@ import shutil
 from measure_confidence2 import getConfigCost
 
 
+AL_THRESHOLD = 0.1
+  
+
 class Config:
   def __init__(self):
     self.avg_accuracy = 0
@@ -69,10 +72,6 @@ def loadConfigData(result_dir, layer_costs, baseline_accuracy):
 
     
 
-AL_THRESHOLD = 0.1
-SPEEDUP_BAND_SIZE = 0.3
-ENERGY_BAND_SIZE = 10
-
 
 class Configuration:
     def __init__(self, name, speedup, energy, accuracy, accuracy_loss):
@@ -223,11 +222,17 @@ def findParetoConfigs(base_dir, layer_costs, accuracy):
     config = Configuration(config.fname , config.speedup, 100, config.avg_accuracy, config.avg_loss)
     config_list.append(config)
 
+  
+  SPEEDUP_BAND_SIZE = 1.0
+  ENERGY_BAND_SIZE = 10
 
-  if len(config_list) < 30:
-    SPEEDUP_BAND_SIZE = 1.2
-    
+  # No Pareto Selection if list is < 50 configurations
+  if len(config_list) < 50:
+    SPEEDUP_BAND_SIZE = 100 # Include all in Pareto Frontier
     
+
+  print ("*SPEEDUP_BAND_SIZE = ", SPEEDUP_BAND_SIZE)
+  
   ASC, AEC = compute_pareto_points_with_margin(config_list, SPEEDUP_BAND_SIZE, ENERGY_BAND_SIZE)
 
   
diff --git a/llvm/projects/hpvm-tensor-rt/opentuner/autotuner/promise_tuner3.py b/llvm/projects/hpvm-tensor-rt/opentuner/autotuner/promise_tuner3.py
index 87ed35bbc4..04ce0d6158 100644
--- a/llvm/projects/hpvm-tensor-rt/opentuner/autotuner/promise_tuner3.py
+++ b/llvm/projects/hpvm-tensor-rt/opentuner/autotuner/promise_tuner3.py
@@ -22,6 +22,7 @@ import threading
 import psutil
 
 from measure_confidence2 import dump_promise_confidence_files3
+from measure_confidence2 import getConfidence, getMinAccuracy
 from select_top_results import select_top_results
 from time import sleep
 from pareto_curve import findParetoConfigs
@@ -169,25 +170,36 @@ class ClangFlagsTuner(MeasurementInterface):
     createFlagsFile("promise_flags", cfg)
     
     run_cmd = binary_name
-    print "binary_name = ", run_cmd
+    print "\nbinary_name = ", run_cmd
     #run_result_call_program = self.call_program(run_cmd)
-    #print "returned \n\n"
 
+
+    total_runs = 2
     FNULL = open(os.devnull, 'wb')
-    p = subprocess.Popen(run_cmd, stdout = FNULL)
+    #p = subprocess.Popen(run_cmd, stdout = FNULL)
+    p = subprocess.Popen([run_cmd, str(total_runs)], stdout = FNULL)
     p.wait()
 
        
     accuracy = getAccuracy("final_accuracy")
+
+    # Get Confidence for multiple runs
+    conf, avg_acc = getConfidence("run_accuracies.txt", accuracy_threshold)  
+    
     # getConfigCost returns the cost associated with the selected configuration
     total_comps = getConfigCost(cfg)
    
     
     Result = opentuner.resultsdb.models.Result()
     Result.time = total_comps
-    Result.accuracy = accuracy
-
-    if accuracy > accuracy_threshold:
+    #Result.accuracy = accuracy
+    min_accuracy = getMinAccuracy("run_accuracies.txt")
+    print ("min_accuracy = ", min_accuracy)
+    Result.accuracy = min_accuracy
+    
+    # Only pass conf if conf == 100
+    if min_accuracy > accuracy_threshold and conf == 100:
+      print ("conf = ", conf, " avg_acc = ", avg_acc)
       #if accuracy not in evaluated_configs:
       config_tuple = (total_comps, accuracy, cfg)
       self.configs_list.append(config_tuple)
@@ -199,8 +211,6 @@ class ClangFlagsTuner(MeasurementInterface):
       f_acc.close()
                    
       
-    print "done with one run"
-
     test_id += 1
     
     return Result
-- 
GitLab