From cafb70bf743cd165f26682068ace6c1b9ab9432c Mon Sep 17 00:00:00 2001 From: Hashim Sharif <hsharif3@miranda.cs.illinois.edu> Date: Fri, 12 Mar 2021 22:58:53 -0600 Subject: [PATCH] Adding successful automated tests for HPVM build --- .../keras/frontend/promise_translator.py | 2 +- .../projects/keras/scripts/test_benchmarks.py | 92 ++++++++++++++++--- hpvm/projects/keras/src/Benchmark.py | 5 + 3 files changed, 86 insertions(+), 13 deletions(-) diff --git a/hpvm/projects/keras/frontend/promise_translator.py b/hpvm/projects/keras/frontend/promise_translator.py index ee33b1c18a..015c1c562a 100644 --- a/hpvm/projects/keras/frontend/promise_translator.py +++ b/hpvm/projects/keras/frontend/promise_translator.py @@ -1157,7 +1157,7 @@ class PromiseRtTranslator: def translate(self, model, weights_dir, x_test): - print ("\n\n\n **** PromiseRT Translator ****** \n\n\n") + #print ("\n\n\n **** PromiseRT Translator ****** \n\n\n") root_node = self.dfg.root_node state = State() diff --git a/hpvm/projects/keras/scripts/test_benchmarks.py b/hpvm/projects/keras/scripts/test_benchmarks.py index d68a61264a..201dc5db47 100644 --- a/hpvm/projects/keras/scripts/test_benchmarks.py +++ b/hpvm/projects/keras/scripts/test_benchmarks.py @@ -1,6 +1,6 @@ - +import os import subprocess class Benchmark: @@ -16,14 +16,14 @@ class Benchmark: return self.binary_path - def readAccuracy(self): + def readAccuracy(self, accuracy_file): - f = open("final_accuracy", "r") # File with final benchmark accuracy + f = open(accuracy_file, "r") # File with final benchmark accuracy acc_str = f.read() return float(acc_str) - def run(self): + def runKeras(self): # Test Bechmark accuracy with pretrained weights (hpvm_relaod) run_cmd = "python " + self.binary_path + " hpvm_reload " @@ -32,7 +32,7 @@ class Benchmark: except: return False - accuracy = self.readAccuracy() + accuracy = self.readAccuracy("final_accuracy") print ("accuracy = ", accuracy, " test_accuracy = ", self.test_accuracy) @@ -45,7 +45,43 @@ class Benchmark: test_success = False return test_success - + + + def runHPVM(self): + + # Test Bechmark accuracy with pretrained weights (hpvm_relaod) + run_cmd = "python " + self.binary_path + " hpvm_reload frontend compile" + try: + subprocess.call(run_cmd, shell=True) + except: + return False + + working_dir = open("working_dir.txt").read() + cur_dir = os.getcwd() + + os.chdir(working_dir) + binary_path = "./HPVM_binary" + + try: + subprocess.call(binary_path, shell=True) + except: + return False + + accuracy = self.readAccuracy("final_accuracy") + print ("accuracy = ", accuracy, " test_accuracy = ", self.test_accuracy) + + test_success = False + if (abs(self.test_accuracy - accuracy) < self.epsilon): + print ("Test for " + self. binary_path + " Passed ") + test_success = True + else: + print ("Test Failed for " + self.binary_path) + test_success = False + + os.chdir(cur_dir) # Change back to original working directory + + return test_success + class BenchmarkTests: @@ -55,6 +91,8 @@ class BenchmarkTests: self.benchmarks = [] self.passed_tests = [] self.failed_tests = [] + self.passed_hpvm_tests = [] + self.failed_hpvm_tests = [] def addBenchmark(self, benchmark): @@ -62,17 +100,29 @@ class BenchmarkTests: self.benchmarks.append(benchmark) - def runTests(self): + def runKerasTests(self): for benchmark in self.benchmarks: - test_success = benchmark.run() + test_success = benchmark.runKeras() if not test_success: self.failed_tests.append(benchmark.getPath()) else: self.passed_tests.append(benchmark.getPath()) - def printSummary(self): + + def runHPVMTests(self): + + for benchmark in self.benchmarks: + test_success = benchmark.runHPVM() + + if not test_success: + self.failed_hpvm_tests.append(benchmark.getPath()) + else: + self.passed_hpvm_tests.append(benchmark.getPath()) + + + def printKerasSummary(self): failed_test_count = len(self.failed_tests) passed_test_count = len(self.passed_tests) @@ -88,6 +138,22 @@ class BenchmarkTests: print ("Failed: " + failed_test) + def printHPVMSummary(self): + + failed_test_count = len(self.failed_hpvm_tests) + passed_test_count = len(self.passed_hpvm_tests) + + print (" Tests Passed = " + str(passed_test_count) + " / " + str(len(self.benchmarks))) + print ("******* Passed Tests ** \n") + for passed_test in self.passed_hpvm_tests: + print ("Passed: " + passed_test) + + print (" Tests Failed = " + str(failed_test_count) + " / " + str(len(self.benchmarks))) + print ("****** Failed Tests *** \n") + for failed_test in self.failed_hpvm_tests: + print ("Failed: " + failed_test) + + if __name__ == "__main__": @@ -109,7 +175,9 @@ if __name__ == "__main__": testMgr.addBenchmark(VGG16_cifar10) testMgr.addBenchmark(VGG16_cifar100) - testMgr.runTests() - testMgr.printSummary() - + testMgr.runKerasTests() + testMgr.printKerasSummary() + testMgr.runHPVMTests() + testMgr.printHPVMSummary() + diff --git a/hpvm/projects/keras/src/Benchmark.py b/hpvm/projects/keras/src/Benchmark.py index b3b85a4277..aaa7bdacc4 100644 --- a/hpvm/projects/keras/src/Benchmark.py +++ b/hpvm/projects/keras/src/Benchmark.py @@ -69,6 +69,11 @@ class Benchmark: print ("\n\n ERROR: HPVM Compilation Failed!! \n\n") sys.exit(1) + f = open("working_dir.txt", "w+") + f.write(working_dir) + f.close() + + def printUsage(self): -- GitLab