Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
H
hpvm-release
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
llvm
hpvm-release
Commits
0b688f37
Commit
0b688f37
authored
4 years ago
by
Hashim Sharif
Browse files
Options
Downloads
Patches
Plain Diff
Starting on Json file generation
parent
1c081820
No related branches found
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
hpvm/projects/keras/frontend/approxhpvm_translator.py
+61
-11
61 additions, 11 deletions
hpvm/projects/keras/frontend/approxhpvm_translator.py
hpvm/projects/keras/frontend/knobs.py
+38
-0
38 additions, 0 deletions
hpvm/projects/keras/frontend/knobs.py
with
99 additions
and
11 deletions
hpvm/projects/keras/frontend/approxhpvm_translator.py
+
61
−
11
View file @
0b688f37
...
@@ -5,6 +5,7 @@ from frontend.promise_translator import PromiseRtTranslator
...
@@ -5,6 +5,7 @@ from frontend.promise_translator import PromiseRtTranslator
from
frontend.hpvm_dfg_translator
import
HPVMTranslator
from
frontend.hpvm_dfg_translator
import
HPVMTranslator
from
frontend.weight_utils
import
dumpLabels
,
dumpData
,
dumpConvWeights
,
dumpFcWeights
,
dumpFcBias
from
frontend.weight_utils
import
dumpLabels
,
dumpData
,
dumpConvWeights
,
dumpFcWeights
,
dumpFcBias
from
frontend.utils
import
*
from
frontend.utils
import
*
from
frontend.knobs
import
*
import
keras
import
keras
import
os
import
os
...
@@ -203,11 +204,15 @@ class TensorRtTranslator:
...
@@ -203,11 +204,15 @@ class TensorRtTranslator:
self
.
weight_str
=
""
self
.
weight_str
=
""
self
.
program_str
=
""
self
.
program_str
=
""
self
.
input_str
=
""
self
.
input_str
=
""
self
.
json_str
=
""
# Used for Json gen
self
.
cur_height
=
32
# Used for Json gen
self
.
cur_width
=
32
# Used for Json egen
self
.
op_count
=
1
# Used for Json gen
self
.
filter_names
=
{}
self
.
filter_names
=
{}
# Used for Json gen
self
.
json_str
=
""
self
.
knobs_str
=
""
self
.
cur_height
=
32
self
.
cur_width
=
32
self
.
op_count
=
1
...
@@ -215,7 +220,7 @@ class TensorRtTranslator:
...
@@ -215,7 +220,7 @@ class TensorRtTranslator:
self
.
cur_height
=
data
.
shape
[
2
]
self
.
cur_height
=
data
.
shape
[
2
]
self
.
cur_width
=
data
.
shape
[
3
]
self
.
cur_width
=
data
.
shape
[
3
]
DEBUG
(
"
cur_height =
"
,
self
.
cur_height
,
"
cur_width =
"
,
self
.
cur_width
,
"
\n
"
)
DEBUG
(
"
cur_height =
"
,
self
.
cur_height
,
"
cur_width =
"
,
self
.
cur_width
,
"
,
\n
"
)
def
addConvOverheads
(
self
,
weights
,
padding
,
strides
):
def
addConvOverheads
(
self
,
weights
,
padding
,
strides
):
...
@@ -228,7 +233,8 @@ class TensorRtTranslator:
...
@@ -228,7 +233,8 @@ class TensorRtTranslator:
flops
=
H_d
*
W_d
*
K_d
flops
=
H_d
*
W_d
*
K_d
DEBUG
(
"
conv_flops =
"
,
flops
)
DEBUG
(
"
conv_flops =
"
,
flops
)
self
.
json_str
+=
"
op
"
+
str
(
self
.
op_count
)
+
"
:
"
+
str
(
flops
)
+
"
\n
"
self
.
json_str
+=
"
convolution_
"
+
str
(
self
.
op_count
)
+
"
:
"
+
str
(
flops
)
+
"
,
\n
"
self
.
knobs_str
+=
"
convolution_
"
+
str
(
self
.
op_count
)
+
"
: [
"
+
conv_knobs
+
"
],
\n
"
self
.
op_count
+=
1
self
.
op_count
+=
1
self
.
cur_height
=
self
.
cur_height
/
strides
[
0
]
self
.
cur_height
=
self
.
cur_height
/
strides
[
0
]
...
@@ -242,7 +248,8 @@ class TensorRtTranslator:
...
@@ -242,7 +248,8 @@ class TensorRtTranslator:
flops
=
weights
.
shape
[
0
]
*
weights
.
shape
[
1
]
flops
=
weights
.
shape
[
0
]
*
weights
.
shape
[
1
]
DEBUG
(
"
dense_flops =
"
,
flops
)
DEBUG
(
"
dense_flops =
"
,
flops
)
self
.
json_str
+=
"
op
"
+
str
(
self
.
op_count
)
+
"
:
"
+
str
(
flops
)
+
"
\n
"
self
.
json_str
+=
"
linear_
"
+
str
(
self
.
op_count
)
+
"
:
"
+
str
(
flops
)
+
"
\n
"
self
.
knobs_str
+=
"
linear_
"
+
str
(
self
.
op_count
)
+
"
: [
"
+
baseline_knobs
+
"
],
\n
"
self
.
op_count
+=
1
self
.
op_count
+=
1
self
.
cur_height
=
1
self
.
cur_height
=
1
...
@@ -258,6 +265,14 @@ class TensorRtTranslator:
...
@@ -258,6 +265,14 @@ class TensorRtTranslator:
DEBUG
(
"
cur_height =
"
,
self
.
cur_height
,
"
cur_width =
"
,
self
.
cur_width
,
"
\n
"
)
DEBUG
(
"
cur_height =
"
,
self
.
cur_height
,
"
cur_width =
"
,
self
.
cur_width
,
"
\n
"
)
def
addBaselineKnob
(
self
,
op_name
):
self
.
json_str
+=
op_name
+
"
_
"
+
str
(
self
.
op_count
)
+
"
: 0,
\n
"
self
.
knobs_str
+=
op_name
+
"
_
"
+
str
(
self
.
op_count
)
+
"
: [
"
+
baseline_knobs
+
"
],
\n
"
self
.
op_count
+=
1
def
getWeightStr
(
self
):
def
getWeightStr
(
self
):
...
@@ -451,7 +466,12 @@ class TensorRtTranslator:
...
@@ -451,7 +466,12 @@ class TensorRtTranslator:
if
layer_type
==
"
Conv2D
"
:
if
layer_type
==
"
Conv2D
"
:
self
.
addConvOverheads
(
weights
,
padding
,
strides
)
self
.
addConvOverheads
(
weights
,
padding
,
strides
)
elif
layer_type
==
"
DepthwiseConv2D
"
:
#self.json_str += "depthwise_convolution_" + str(self.op_count) + " : 0, \n"
#self.op_count += 1
self
.
addBaselineKnob
(
"
depthwise_convolution
"
)
if
layer_type
==
"
Dense
"
:
if
layer_type
==
"
Dense
"
:
input_var_name
=
self
.
getSingleInputName
(
cur_node
)
input_var_name
=
self
.
getSingleInputName
(
cur_node
)
...
@@ -481,13 +501,21 @@ class TensorRtTranslator:
...
@@ -481,13 +501,21 @@ class TensorRtTranslator:
# NOTE: Changing output variable
# NOTE: Changing output variable
out_var_name1
=
out_var_name2
out_var_name1
=
out_var_name2
#self.json_str += "add_" + str(self.op_count) + " : 0, \n"
# self.op_count += 1
self
.
addBaselineKnob
(
"
add
"
)
if
layer_type
==
"
Activation
"
:
if
layer_type
==
"
Activation
"
:
input_var_name
=
self
.
getSingleInputName
(
cur_node
)
input_var_name
=
self
.
getSingleInputName
(
cur_node
)
inst_str
=
genActivationCallStr
(
input_var_name
,
out_var_name1
,
cur_node
.
activation_type
)
inst_str
=
genActivationCallStr
(
input_var_name
,
out_var_name1
,
cur_node
.
activation_type
)
self
.
program_str
+=
inst_str
self
.
program_str
+=
inst_str
#self.json_str += cur_node.activation_type + "_" + str(self.op_count) + " : 0, \n"
#self.op_count += 1
self
.
addBaselineKnob
(
cur_node
.
activation_type
)
if
self
.
hasActivation
(
cur_node
)
and
layer_type
!=
"
Activation
"
:
if
self
.
hasActivation
(
cur_node
)
and
layer_type
!=
"
Activation
"
:
activation_type
=
cur_node
.
activation_type
activation_type
=
cur_node
.
activation_type
...
@@ -499,7 +527,11 @@ class TensorRtTranslator:
...
@@ -499,7 +527,11 @@ class TensorRtTranslator:
if
activation_type
==
"
softmax
"
:
if
activation_type
==
"
softmax
"
:
print
(
"
Softmax canNOT be part of Dense/Conv Op. Insert: Activation(
'
softmax
'
);
"
)
print
(
"
Softmax canNOT be part of Dense/Conv Op. Insert: Activation(
'
softmax
'
);
"
)
sys
.
exit
(
0
)
sys
.
exit
(
0
)
#self.json_str += activation_type + "_" + str(self.op_count) + " : 0, \n"
#self.op_count += 1
self
.
addBaselineKnob
(
activation_type
)
if
layer_type
==
"
BatchNormalization
"
:
if
layer_type
==
"
BatchNormalization
"
:
input_var_name
=
self
.
getSingleInputName
(
cur_node
)
input_var_name
=
self
.
getSingleInputName
(
cur_node
)
...
@@ -514,6 +546,11 @@ class TensorRtTranslator:
...
@@ -514,6 +546,11 @@ class TensorRtTranslator:
inst_str
+=
"
);
\n
"
inst_str
+=
"
);
\n
"
self
.
program_str
+=
inst_str
self
.
program_str
+=
inst_str
#self.json_str += "batchnorm_" + str(self.op_count) + " : 0, \n"
#self.op_count += 1
self
.
addBaselineKnob
(
"
batchnorm
"
)
if
layer_type
==
"
Add
"
:
if
layer_type
==
"
Add
"
:
...
@@ -523,6 +560,10 @@ class TensorRtTranslator:
...
@@ -523,6 +560,10 @@ class TensorRtTranslator:
inst_str
+=
"
tensorAdd(
"
+
input_vars
[
0
]
+
"
,
"
+
input_vars
[
1
]
+
"
);
\n
"
inst_str
+=
"
tensorAdd(
"
+
input_vars
[
0
]
+
"
,
"
+
input_vars
[
1
]
+
"
);
\n
"
self
.
program_str
+=
inst_str
self
.
program_str
+=
inst_str
#self.json_str += "add_" + str(self.op_count) + " : 0, \n"
#self.op_count += 1
self
.
addBaselineKnob
(
"
add
"
)
if
layer_type
==
"
MaxPooling2D
"
or
layer_type
==
"
AveragePooling2D
"
:
if
layer_type
==
"
MaxPooling2D
"
or
layer_type
==
"
AveragePooling2D
"
:
input_var_name
=
self
.
getSingleInputName
(
cur_node
)
input_var_name
=
self
.
getSingleInputName
(
cur_node
)
...
@@ -534,8 +575,16 @@ class TensorRtTranslator:
...
@@ -534,8 +575,16 @@ class TensorRtTranslator:
pool_type
=
0
pool_type
=
0
if
layer_type
==
"
MaxPooling2D
"
:
if
layer_type
==
"
MaxPooling2D
"
:
pool_type
=
"
0
"
pool_type
=
"
0
"
#self.json_str += "maxpool_" + str(self.op_count) + " : 0, \n"
#self.op_count += 1
self
.
addBaselineKnob
(
"
maxpool
"
)
if
layer_type
==
"
AveragePooling2D
"
:
if
layer_type
==
"
AveragePooling2D
"
:
pool_type
=
"
1
"
pool_type
=
"
1
"
#self.json_str += "avgpool_" + str(self.op_count) + " : 0, \n"
#self.op_count += 1
self
.
addBaselineKnob
(
"
avgpool
"
)
# tensorPooling(input, pool_type, pool_h, pool_w, v_pad, h_pad, v_stride, h_stride)
# tensorPooling(input, pool_type, pool_h, pool_w, v_pad, h_pad, v_stride, h_stride)
inst_str
=
"
void*
"
+
out_var_name1
+
"
=
"
inst_str
=
"
void*
"
+
out_var_name1
+
"
=
"
...
@@ -901,6 +950,7 @@ class TensorRtTranslator:
...
@@ -901,6 +950,7 @@ class TensorRtTranslator:
f
=
open
(
dir_prefix
+
"
/tuner.json
"
,
"
w+
"
)
f
=
open
(
dir_prefix
+
"
/tuner.json
"
,
"
w+
"
)
f
.
write
(
self
.
json_str
)
f
.
write
(
self
.
json_str
)
f
.
write
(
self
.
knobs_str
)
f
.
close
()
f
.
close
()
...
...
This diff is collapsed.
Click to expand it.
hpvm/projects/keras/frontend/knobs.py
0 → 100644
+
38
−
0
View file @
0b688f37
knobs_speedups
=
{}
knobs_speedups
[
11
]
=
1
knobs_speedups
[
12
]
=
1.5
knobs_speedups
[
151
]
=
3
knobs_speedups
[
152
]
=
3
knobs_speedups
[
153
]
=
3
knobs_speedups
[
154
]
=
3
knobs_speedups
[
155
]
=
2.25
knobs_speedups
[
156
]
=
2.25
knobs_speedups
[
157
]
=
2.25
knobs_speedups
[
158
]
=
2.25
knobs_speedups
[
159
]
=
2.25
knobs_speedups
[
160
]
=
2.25
knobs_speedups
[
161
]
=
2
knobs_speedups
[
162
]
=
2
knobs_speedups
[
163
]
=
2
knobs_speedups
[
164
]
=
2
knobs_speedups
[
165
]
=
2
knobs_speedups
[
166
]
=
2
knobs_speedups
[
167
]
=
2
knobs_speedups
[
168
]
=
2
knobs_speedups
[
261
]
=
3
knobs_speedups
[
262
]
=
3
knobs_speedups
[
263
]
=
2.25
knobs_speedups
[
264
]
=
2.25
knobs_speedups
[
265
]
=
2.25
knobs_speedups
[
266
]
=
2
knobs_speedups
[
267
]
=
2
knobs_speedups
[
268
]
=
2
knobs_speedups
[
269
]
=
2
conv_knobs
=
"
12, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 261, 262, 263, 264, 265, 266, 267, 268, 269
"
baseline_knobs
=
"
12
"
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment