Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
P
predtuner
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
llvm
predtuner
Commits
95f13753
Commit
95f13753
authored
4 years ago
by
Yifan Zhao
Browse files
Options
Downloads
Patches
Plain Diff
Fixed float64 and dump config bugs
parent
789d5da1
No related branches found
Branches containing commit
No related tags found
No related merge requests found
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
predtuner/approxapp.py
+5
-5
5 additions, 5 deletions
predtuner/approxapp.py
predtuner/modeledapp.py
+5
-5
5 additions, 5 deletions
predtuner/modeledapp.py
predtuner/torchapp.py
+1
-1
1 addition, 1 deletion
predtuner/torchapp.py
with
11 additions
and
11 deletions
predtuner/approxapp.py
+
5
−
5
View file @
95f13753
...
@@ -158,7 +158,7 @@ class ApproxTuner(Generic[T]):
...
@@ -158,7 +158,7 @@ class ApproxTuner(Generic[T]):
taken_idx
=
is_pareto_efficient
(
points
,
take_n
=
n
)
taken_idx
=
is_pareto_efficient
(
points
,
take_n
=
n
)
return
[
configs
[
i
]
for
i
in
taken_idx
]
return
[
configs
[
i
]
for
i
in
taken_idx
]
def
write
_configs
_to_dir
(
self
,
directory
:
PathLike
):
def
dump
_configs
(
self
,
filepath
:
PathLike
):
import
os
import
os
from
jsonpickle
import
encode
from
jsonpickle
import
encode
...
@@ -167,9 +167,10 @@ class ApproxTuner(Generic[T]):
...
@@ -167,9 +167,10 @@ class ApproxTuner(Generic[T]):
raise
RuntimeError
(
raise
RuntimeError
(
f
"
No tuning session has been run; call self.tune() first.
"
f
"
No tuning session has been run; call self.tune() first.
"
)
)
directory
=
Path
(
directory
)
filepath
=
Path
(
filepath
)
os
.
makedirs
(
directory
,
exist_ok
=
True
)
os
.
makedirs
(
filepath
.
parent
,
exist_ok
=
True
)
encode
(
self
.
kept_configs
,
directory
)
with
filepath
.
open
(
"
w
"
)
as
f
:
f
.
write
(
encode
(
self
.
kept_configs
,
indent
=
2
))
def
plot_configs
(
self
)
->
plt
.
Figure
:
def
plot_configs
(
self
)
->
plt
.
Figure
:
if
not
self
.
tuned
:
if
not
self
.
tuned
:
...
@@ -273,7 +274,6 @@ class TunerInterface(MeasurementInterface):
...
@@ -273,7 +274,6 @@ class TunerInterface(MeasurementInterface):
cfg
=
desired_result
.
configuration
.
data
cfg
=
desired_result
.
configuration
.
data
qos
,
perf
=
self
.
app
.
measure_qos_perf
(
cfg
,
False
,
**
self
.
app_kwargs
)
qos
,
perf
=
self
.
app
.
measure_qos_perf
(
cfg
,
False
,
**
self
.
app_kwargs
)
qos
,
perf
=
float
(
qos
),
float
(
perf
)
# Print a debug message for each config in tuning and keep threshold
# Print a debug message for each config in tuning and keep threshold
self
.
print_debug_config
(
qos
,
perf
)
self
.
print_debug_config
(
qos
,
perf
)
self
.
pbar
.
update
()
self
.
pbar
.
update
()
...
...
This diff is collapsed.
Click to expand it.
predtuner/modeledapp.py
+
5
−
5
View file @
95f13753
...
@@ -86,7 +86,7 @@ class ModeledApp(ApproxApp, abc.ABC):
...
@@ -86,7 +86,7 @@ class ModeledApp(ApproxApp, abc.ABC):
f
"
(choose from
{
list
(
self
.
_perf_models
.
keys
())
}
)
"
f
"
(choose from
{
list
(
self
.
_perf_models
.
keys
())
}
)
"
)
)
perf
=
self
.
_perf_models
[
perf_model
].
measure_perf
(
with_approxes
)
perf
=
self
.
_perf_models
[
perf_model
].
measure_perf
(
with_approxes
)
assert
qos
is
not
None
and
perf
is
not
None
assert
type
(
qos
)
is
float
and
type
(
perf
)
is
float
return
qos
,
perf
return
qos
,
perf
def
get_tuner
(
self
)
->
"
ApproxModeledTuner
"
:
def
get_tuner
(
self
)
->
"
ApproxModeledTuner
"
:
...
@@ -163,9 +163,9 @@ class LinearPerfModel(IPerfModel):
...
@@ -163,9 +163,9 @@ class LinearPerfModel(IPerfModel):
def
measure_perf
(
self
,
with_approxes
:
KnobsT
)
->
float
:
def
measure_perf
(
self
,
with_approxes
:
KnobsT
)
->
float
:
"""
We implement this using a weighted linear performance model.
"""
"""
We implement this using a weighted linear performance model.
"""
return
sum
(
return
float
(
sum
(
self
.
cost_df
.
loc
[
layer
,
knob
]
for
layer
,
knob
in
with_approxes
.
items
()
self
.
cost_df
.
loc
[
layer
,
knob
]
for
layer
,
knob
in
with_approxes
.
items
()
)
)
)
class
QoSModelP1
(
IQoSModel
):
class
QoSModelP1
(
IQoSModel
):
...
@@ -208,7 +208,7 @@ class QoSModelP1(IQoSModel):
...
@@ -208,7 +208,7 @@ class QoSModelP1(IQoSModel):
[
self
.
delta_tensors
[
op
][
knob
]
for
op
,
knob
in
with_approxes
.
items
()]
[
self
.
delta_tensors
[
op
][
knob
]
for
op
,
knob
in
with_approxes
.
items
()]
)
)
ret
=
delta_tensors
.
sum
()
+
self
.
baseline_tensor
ret
=
delta_tensors
.
sum
()
+
self
.
baseline_tensor
return
self
.
qos_metric
(
ret
)
return
float
(
self
.
qos_metric
(
ret
)
)
def
_init
(
self
):
def
_init
(
self
):
dt
=
self
.
delta_tensors
dt
=
self
.
delta_tensors
...
@@ -277,7 +277,7 @@ class QoSModelP2(IQoSModel):
...
@@ -277,7 +277,7 @@ class QoSModelP2(IQoSModel):
)
-
self
.
baseline_qos
)
-
self
.
baseline_qos
ret
=
delta_qoses
.
sum
()
+
self
.
baseline_qos
ret
=
delta_qoses
.
sum
()
+
self
.
baseline_qos
assert
not
np
.
isnan
(
ret
)
assert
not
np
.
isnan
(
ret
)
return
ret
return
float
(
ret
)
def
_init
(
self
):
def
_init
(
self
):
if
self
.
storage
and
self
.
storage
.
is_file
():
if
self
.
storage
and
self
.
storage
.
is_file
():
...
...
This diff is collapsed.
Click to expand it.
predtuner/torchapp.py
+
1
−
1
View file @
95f13753
...
@@ -136,7 +136,7 @@ class TorchApp(ModeledApp, abc.ABC):
...
@@ -136,7 +136,7 @@ class TorchApp(ModeledApp, abc.ABC):
outputs
=
approxed
(
inputs
)
outputs
=
approxed
(
inputs
)
qoses
.
append
(
self
.
tensor_to_qos
(
outputs
,
targets
))
qoses
.
append
(
self
.
tensor_to_qos
(
outputs
,
targets
))
time_end
=
time_ns
()
/
(
10
**
9
)
time_end
=
time_ns
()
/
(
10
**
9
)
qos
=
self
.
combine_qos
(
np
.
array
(
qoses
))
qos
=
float
(
self
.
combine_qos
(
np
.
array
(
qoses
))
)
return
qos
,
time_end
-
time_begin
return
qos
,
time_end
-
time_begin
def
__repr__
(
self
)
->
str
:
def
__repr__
(
self
)
->
str
:
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment