diff --git a/llvm/projects/hpvm-tensor-rt/code_autogenerators/benchmark_testing_automator.py b/llvm/projects/hpvm-tensor-rt/code_autogenerators/benchmark_testing_automator.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f5c28032d721dcf1e77ab52407a165c0251deb2
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/code_autogenerators/benchmark_testing_automator.py
@@ -0,0 +1,51 @@
+# Automates online benchmark testing with different clock speeds
+# Input: set of benchmark names to test
+# Set of benchmarks format: (full_bin_name, half_bin_name)
+import os
+import sys
+
+from collections import defaultdict
+from subprocess import Popen, PIPE
+
+def run_benchmark(bin_name, should_print_bin_output):
+    print("RUNNING %s" % bin_name)
+    proc = Popen("./%s" % bin_name, stdout = PIPE, universal_newlines = True)
+    proc_output = proc.communicate()[0]
+    assert proc.returncode == 0
+    
+    if should_print_bin_output:
+		print(proc_output)
+    print("FINISHED RUNNING %s" % bin_name)
+    return proc_output    
+
+
+def parse_binary_output(proc_output):
+    avg_time_key_ind = proc_output.find("Average time:")
+    assert avg_time_key_ind >= 0
+    avg_time = proc_output[avg_time_key_ind : proc_output.find("\n", avg_time_key_ind)]
+    print(avg_time)
+    return avg_time
+
+
+# Input: a list of tuples of benchmark names
+# Can change to input a file containing benchmarks to run 
+def run_benchmarks(builds_dir, output_filename, should_print_bin_output = True):
+    output_file = open(output_filename, "w")
+    for bin_name in os.listdir(builds_dir):
+        if bin_name.find("profiling") == -1:
+            continue
+        output_file.write("%s: %s\n" % (bin_name, \
+                parse_binary_output(run_benchmark(os.path.join(builds_dir, bin_name), \
+                should_print_bin_output))))
+        print(bin_name)
+    output_file.close()
+
+
+if __name__ == "__main__":
+    num_args = len(sys.argv)
+
+    if num_args != 3:
+        print("Usage: python online_benchmark_testing_automator.py <builds dir> <outputs_file_name>")
+        exit(1)
+    print("Output file name: %s" % sys.argv[2])
+    run_benchmarks(sys.argv[1], sys.argv[2])