Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
H
hpvm-release
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
llvm
hpvm-release
Commits
516e468a
Commit
516e468a
authored
5 years ago
by
Elizabeth
Browse files
Options
Downloads
Patches
Plain Diff
Reorganized code and added pydoc
parent
dbbe65ef
No related branches found
No related tags found
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
llvm/projects/hpvm-tensor-rt/build_pldi/table_generator.py
+80
-79
80 additions, 79 deletions
llvm/projects/hpvm-tensor-rt/build_pldi/table_generator.py
with
80 additions
and
79 deletions
llvm/projects/hpvm-tensor-rt/build_pldi/table_generator.py
+
80
−
79
View file @
516e468a
...
@@ -48,24 +48,11 @@ class TableGenerator:
...
@@ -48,24 +48,11 @@ class TableGenerator:
3. Writes the internal table to <network_name>_tensors.txt file and uses the
3. Writes the internal table to <network_name>_tensors.txt file and uses the
<network_name>_ops.txt file as a guideline in terms of row order
<network_name>_ops.txt file as a guideline in terms of row order
'''
'''
self
.
__run_inputted_binaries
()
#
self.__run_inputted_binaries()
self
.
__build_internal_table
()
self
.
__build_internal_table
()
self
.
__output_table_to_file
()
self
.
__output_table_to_file
()
def
__should_execute_file
(
self
,
file_path
):
'''
Checks if the file at the given file path is a binary that should be run
by the profiler. Must exist, be a binary, and must start with the network
name as per our naming standards.
Args:
file_path: Path of the file to check
'''
return
os
.
path
.
isfile
(
file_path
)
and
os
.
access
(
file_path
,
os
.
X_OK
)
and
\
file_path
.
find
(
self
.
__network_name
)
!=
-
1
def
__run_inputted_binaries
(
self
):
def
__run_inputted_binaries
(
self
):
'''
'''
Invokes the profiler to run all appropriate binaries (must start with the network
Invokes the profiler to run all appropriate binaries (must start with the network
...
@@ -99,63 +86,6 @@ class TableGenerator:
...
@@ -99,63 +86,6 @@ class TableGenerator:
output_file
]).
communicate
()
output_file
]).
communicate
()
def
__get_approximation_type
(
self
,
results_filename
):
'''
Parses a given results filename for the approximation type.
Format assumption: <network_name>_<approx_type>.txt
Args:
results_filename: Name of results file
Returns:
the approximation technique (ex: fp16)
'''
approx_type_start_ind
=
results_filename
.
find
(
self
.
__network_name
)
\
+
len
(
self
.
__network_name
)
+
1
# + 1 to account for _ delimiter
approx_type_end_ind
=
results_filename
.
find
(
"
.txt
"
)
return
results_filename
[
approx_type_start_ind
:
approx_type_end_ind
]
def
__parse_tensor_operation_line
(
self
,
tensor_op_line
):
'''
Parses a tensor operation line (within a output file from the offline
profiler for the operation name, the total time used, and the total
energy used
Args:
tensor_op_line: Tensor operation line from output file
Returns:
operation name
total time used
total energy used
'''
line_as_list
=
tensor_op_line
.
split
(
"
,
"
)
return
line_as_list
[
0
],
line_as_list
[
1
],
line_as_list
[
2
]
def
__build_nested_default_dict
(
self
):
'''
Builds a nested default dictionary with an arbitrary number of levels
'''
return
defaultdict
(
self
.
__build_nested_default_dict
)
def
__get_original_operation_name
(
self
,
op_name
):
'''
Parses an operation name containing _<conversion type> for the original
operation name.
Format assumption: <original_op_name>_<conversion type>
Args:
op_name: Name of the operation
Returns:
the original operation name
'''
underscore_ind
=
op_name
.
find
(
"
_
"
)
return
op_name
[
:
underscore_ind
],
op_name
[
underscore_ind
+
1
:
]
def
__build_internal_table
(
self
):
def
__build_internal_table
(
self
):
'''
'''
Iterates through each results file generated by the runs of the offline
Iterates through each results file generated by the runs of the offline
...
@@ -180,13 +110,13 @@ class TableGenerator:
...
@@ -180,13 +110,13 @@ class TableGenerator:
# Get the original operation name (without the f2h/h2f) and the conversion type
# Get the original operation name (without the f2h/h2f) and the conversion type
orig_op_name
,
conversion_type
=
self
.
__get_original_operation_name
(
op_name
)
orig_op_name
,
conversion_type
=
self
.
__get_original_operation_name
(
op_name
)
if
orig_op_name
not
in
self
.
__table
:
if
orig_op_name
not
in
self
.
__table
:
print
(
"
ERROR: Conversion found but original %s is not in the table
"
%
orig_op_name
)
print
(
"
ERROR: Conversion found but original %s is not in the table
"
%
orig_op_name
)
exit
(
1
)
exit
(
1
)
# Store f2h and h2f as columns in the row belonging to the original operation
# Store f2h and h2f as columns in the row belonging to the original operation
self
.
__table
[
orig_op_name
][
approx_type
][
"
time
"
]
=
total_time
self
.
__table
[
orig_op_name
][
approx_type
][
"
time
"
]
=
total_time
self
.
__table
[
orig_op_name
][
approx_type
][
"
energy
"
]
=
total_energy
self
.
__table
[
orig_op_name
][
approx_type
][
"
energy
"
]
=
total_energy
# Create a new row in the dictionary
# Create a new row in the dictionary
else
:
else
:
...
@@ -206,7 +136,7 @@ class TableGenerator:
...
@@ -206,7 +136,7 @@ class TableGenerator:
time and the energy
time and the energy
'''
'''
table_file_path
=
os
.
path
.
join
(
self
.
__results_dir_path
,
self
.
__table_filename
)
table_file_path
=
os
.
path
.
join
(
self
.
__results_dir_path
,
self
.
__table_filename
)
soc_operations_file_name
=
os
.
path
.
join
(
"
home
"
,
"
nvidia
"
,
"
soc_simulator
"
,
\
soc_operations_file_name
=
os
.
path
.
join
(
"
/
"
,
"
home
"
,
"
nvidia
"
,
"
soc_simulator
"
,
\
"
%s_cifar10
"
%
self
.
__network_name
,
"
%s_ops.txt
"
%
self
.
__network_name
)
"
%s_cifar10
"
%
self
.
__network_name
,
"
%s_ops.txt
"
%
self
.
__network_name
)
soc_operations_file
=
open
(
soc_operations_file_name
,
"
r
"
)
soc_operations_file
=
open
(
soc_operations_file_name
,
"
r
"
)
...
@@ -217,7 +147,7 @@ class TableGenerator:
...
@@ -217,7 +147,7 @@ class TableGenerator:
while
curr_line
:
while
curr_line
:
# First line is always the layers line (#layer_name,num_ops)
# First line is always the layers line (#layer_name,num_ops)
layer_name
,
num_ops
=
self
.
__parse_layer_info_line
(
curr_line
)
layer_name
,
num_ops
=
self
.
__parse_layer_info_line
(
curr_line
)
# List of strings, where each string is a row corresponding to an operation
# List of strings, where each string is a row corresponding to an operation
# in the layer
# in the layer
ops_in_layer
=
[]
ops_in_layer
=
[]
...
@@ -227,14 +157,14 @@ class TableGenerator:
...
@@ -227,14 +157,14 @@ class TableGenerator:
# CRITICAL ASSUMPTION: All operations within a layer have the same # columns
# CRITICAL ASSUMPTION: All operations within a layer have the same # columns
# or everything breaks bc the header is per layer, not per operation
# or everything breaks bc the header is per layer, not per operation
header
=
[
"
**
"
,
layer_name
,
str
(
num_ops
)]
header
=
[
"
**
"
,
layer_name
,
str
(
num_ops
)]
# Iterate through all operations within the layer
# Iterate through all operations within the layer
for
op_in_layer_count
in
range
(
num_ops
):
for
op_in_layer_count
in
range
(
num_ops
):
# Contains the operation name
# Contains the operation name
curr_line
=
soc_operations_file
.
readline
().
strip
()
curr_line
=
soc_operations_file
.
readline
().
strip
()
# Stores a list of elements that will be joined to make up a row
# Stores a list of elements that will be joined to make up a row
curr_op
=
[
curr_line
]
curr_op
=
[
curr_line
]
operation_data
=
self
.
__table
[
curr_line
]
operation_data
=
self
.
__table
[
curr_line
]
# Iterate through time/energy data for each approximation type corresponding
# Iterate through time/energy data for each approximation type corresponding
...
@@ -247,7 +177,7 @@ class TableGenerator:
...
@@ -247,7 +177,7 @@ class TableGenerator:
curr_op
.
append
(
op_energy
)
curr_op
.
append
(
op_energy
)
if
op_in_layer_count
==
0
:
if
op_in_layer_count
==
0
:
header
.
append
(
"
%s_time
"
%
approx_type
)
header
.
append
(
"
%s_time
"
%
approx_type
)
header
.
append
(
"
%s_energy
"
%
approx_type
)
header
.
append
(
"
%s_energy
"
%
approx_type
)
ops_in_layer
.
append
(
'
'
.
join
(
curr_op
))
ops_in_layer
.
append
(
'
'
.
join
(
curr_op
))
...
@@ -258,6 +188,77 @@ class TableGenerator:
...
@@ -258,6 +188,77 @@ class TableGenerator:
curr_line
=
soc_operations_file
.
readline
().
strip
()
curr_line
=
soc_operations_file
.
readline
().
strip
()
def
__should_execute_file
(
self
,
file_path
):
'''
Checks if the file at the given file path is a binary that should be run
by the profiler. Must exist, be a binary, and must start with the network
name as per our naming standards.
Args:
file_path: Path of the file to check
'''
return
os
.
path
.
isfile
(
file_path
)
and
os
.
access
(
file_path
,
os
.
X_OK
)
and
\
file_path
.
find
(
self
.
__network_name
)
!=
-
1
def
__get_approximation_type
(
self
,
results_filename
):
'''
Parses a given results filename for the approximation type.
Format assumption: <network_name>_<approx_type>.txt
Args:
results_filename: Name of results file
Returns:
the approximation technique (ex: fp16)
'''
approx_type_start_ind
=
results_filename
.
find
(
self
.
__network_name
)
\
+
len
(
self
.
__network_name
)
+
1
# + 1 to account for _ delimiter
approx_type_end_ind
=
results_filename
.
find
(
"
.txt
"
)
return
results_filename
[
approx_type_start_ind
:
approx_type_end_ind
]
def
__parse_tensor_operation_line
(
self
,
tensor_op_line
):
'''
Parses a tensor operation line (within a output file from the offline
profiler for the operation name, the total time used, and the total
energy used
Args:
tensor_op_line: Tensor operation line from output file
Returns:
operation name
total time used
total energy used
'''
line_as_list
=
tensor_op_line
.
split
(
"
,
"
)
return
line_as_list
[
0
],
line_as_list
[
1
],
line_as_list
[
2
]
def
__build_nested_default_dict
(
self
):
'''
Builds a nested default dictionary with an arbitrary number of levels
'''
return
defaultdict
(
self
.
__build_nested_default_dict
)
def
__get_original_operation_name
(
self
,
op_name
):
'''
Parses an operation name containing _<conversion type> for the original
operation name.
Format assumption: <original_op_name>_<conversion type>
Args:
op_name: Name of the operation
Returns:
the original operation name
'''
underscore_ind
=
op_name
.
find
(
"
_
"
)
return
op_name
[
:
underscore_ind
],
op_name
[
underscore_ind
+
1
:
]
def
__parse_layer_info_line
(
self
,
layer_info_line
):
#layer_name,num_ops
def
__parse_layer_info_line
(
self
,
layer_info_line
):
#layer_name,num_ops
'''
'''
Parses a layer header (from the original ops.txt file) into the layer name
Parses a layer header (from the original ops.txt file) into the layer name
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment