Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
H
hpvm-release
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
llvm
hpvm-release
Commits
4c81ec1b
"git@gitlab.engr.illinois.edu:cs525-sp18-g07/spark.git" did not exist on "1de1d703bf6b7ca14f7b40bbefe9bf6fd6c8ce47"
Commit
4c81ec1b
authored
5 years ago
by
Elizabeth
Browse files
Options
Downloads
Patches
Plain Diff
Added code to format outputs nicely
parent
f76659c5
No related branches found
No related tags found
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
llvm/projects/hpvm-tensor-rt/code_autogenerators/benchmark_testing_automator.py
+29
-8
29 additions, 8 deletions
...sor-rt/code_autogenerators/benchmark_testing_automator.py
with
29 additions
and
8 deletions
llvm/projects/hpvm-tensor-rt/code_autogenerators/benchmark_testing_automator.py
+
29
−
8
View file @
4c81ec1b
...
...
@@ -27,17 +27,37 @@ def parse_binary_output(proc_output):
return
avg_time
# Input: a list of tuples of benchmark names
# Can change to input a file containing benchmarks to run
def
run_benchmarks
(
builds_dir
,
output_filename
,
should_print_bin_output
=
True
):
output_file
=
open
(
output_filename
,
"
w
"
)
def
get_sorted_binaries
(
builds_dir
):
# dict of network names to lists of binaries
# list of binaries should be in sorted order (can do that when we run the benchmarks)
network_bins
=
defaultdict
(
list
)
for
bin_name
in
os
.
listdir
(
builds_dir
):
if
bin_name
.
find
(
"
profiling
"
)
==
-
1
:
continue
output_file
.
write
(
"
%s: %s
\n
"
%
(
bin_name
,
\
network_name
=
bin_name
[
:
bin_name
.
rfind
(
"
_
"
)]
network_bins
[
network_name
].
append
(
bin_name
)
return
network_bins
# Input: a list of tuples of benchmark names
# Can change to input a file containing benchmarks to run
def
run_benchmarks
(
sorted_bins
,
builds_dir
,
output_filename
,
should_print_bin_output
=
False
):
def
get_knob_id
(
bin_name
):
return
int
(
bin_name
[
bin_name
.
rfind
(
"
_
"
)
+
1
:
])
output_file
=
open
(
output_filename
,
"
w
"
,
buffering
=
0
)
for
network_name
in
sorted_bins
:
# Sort the binaries in order by knob id
sorted_bins
[
network_name
].
sort
(
key
=
get_knob_id
)
print
(
"
--------------------------------------
"
)
print
(
network_name
)
# Go through all binaries
for
bin_name
in
sorted_bins
[
network_name
]:
print
(
bin_name
)
output_file
.
write
(
"
%s results
\n
"
%
bin_name
)
'''
output_file.write(
"
%s: %s
\n
"
% (bin_name,
\
parse_binary_output(run_benchmark(os.path.join(builds_dir, bin_name),
\
should_print_bin_output
))))
print
(
bin_name
)
should_print_bin_output))))
'''
print
(
"
--------------------------------------
\n
"
)
output_file
.
close
()
...
...
@@ -48,4 +68,5 @@ if __name__ == "__main__":
print
(
"
Usage: python online_benchmark_testing_automator.py <builds dir> <outputs_file_name>
"
)
exit
(
1
)
print
(
"
Output file name: %s
"
%
sys
.
argv
[
2
])
run_benchmarks
(
sys
.
argv
[
1
],
sys
.
argv
[
2
])
sorted_bins
=
get_sorted_binaries
(
sys
.
argv
[
1
])
run_benchmarks
(
sorted_bins
,
sys
.
argv
[
1
],
sys
.
argv
[
2
])
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment