Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
P
predtuner
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
llvm
predtuner
Commits
7e4a290a
Commit
7e4a290a
authored
4 years ago
by
Yifan Zhao
Browse files
Options
Downloads
Patches
Plain Diff
Added result calibration
parent
82ca1653
No related branches found
No related tags found
No related merge requests found
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
predtuner/approxapp.py
+53
-16
53 additions, 16 deletions
predtuner/approxapp.py
predtuner/modeledapp.py
+7
-7
7 additions, 7 deletions
predtuner/modeledapp.py
predtuner/torchapp.py
+2
-2
2 additions, 2 deletions
predtuner/torchapp.py
test/test_torchapp.py
+6
-1
6 additions, 1 deletion
test/test_torchapp.py
with
68 additions
and
26 deletions
predtuner/approxapp.py
+
53
−
16
View file @
7e4a290a
import
abc
import
abc
import
logging
import
logging
from
pathlib
import
Path
from
pathlib
import
Path
from
typing
import
Dict
,
List
,
NamedTuple
,
Optional
,
Tuple
,
Union
from
typing
import
Dict
,
Generic
,
List
,
NamedTuple
,
Optional
,
Tuple
,
TypeVar
,
Union
import
numpy
as
np
import
numpy
as
np
import
matplotlib.pyplot
as
plt
import
matplotlib.pyplot
as
plt
from
opentuner.measurement.interface
import
MeasurementInterface
from
opentuner.measurement.interface
import
MeasurementInterface
from
opentuner.resultsdb.models
import
Configuration
,
Result
from
opentuner.search.manipulator
import
ConfigurationManipulator
,
EnumParameter
from
opentuner.search.manipulator
import
ConfigurationManipulator
,
EnumParameter
from
._logging
import
override_opentuner_config
from
._logging
import
override_opentuner_config
...
@@ -41,7 +42,7 @@ class ApproxApp(abc.ABC):
...
@@ -41,7 +42,7 @@ class ApproxApp(abc.ABC):
@abc.abstractmethod
@abc.abstractmethod
def
measure_qos_perf
(
def
measure_qos_perf
(
self
,
with_approxes
:
KnobsT
,
is_
testset
:
bool
self
,
with_approxes
:
KnobsT
,
is_
calibration
:
bool
)
->
Tuple
[
float
,
float
]:
)
->
Tuple
[
float
,
float
]:
pass
pass
...
@@ -58,17 +59,28 @@ class ApproxApp(abc.ABC):
...
@@ -58,17 +59,28 @@ class ApproxApp(abc.ABC):
return
""
return
""
class
Config
(
NamedTuple
):
class
Config
:
qos
:
float
def
__init__
(
perf
:
float
self
,
qos
:
float
,
calib_qos
:
Optional
[
float
],
perf
:
float
,
knobs
:
KnobsT
knobs
:
KnobsT
)
->
None
:
self
.
qos
=
qos
self
.
calib_qos
=
calib_qos
self
.
perf
=
perf
self
.
knobs
=
knobs
T
=
TypeVar
(
"
T
"
,
bound
=
Config
)
class
ApproxTuner
:
# ApproxTuner is generic over the type of the config
# So that the user can use custom Config inherited from Config
# (in which case they need to override `get_all_configs_from_db`).
class
ApproxTuner
(
Generic
[
T
]):
def
__init__
(
self
,
app
:
ApproxApp
)
->
None
:
def
__init__
(
self
,
app
:
ApproxApp
)
->
None
:
self
.
app
=
app
self
.
app
=
app
self
.
all_configs
=
[]
self
.
all_configs
=
[]
self
.
kept_configs
=
[]
self
.
kept_configs
=
[]
self
.
best_configs
=
[]
self
.
keep_threshold
=
None
self
.
keep_threshold
=
None
self
.
_db
=
None
self
.
_db
=
None
...
@@ -81,7 +93,9 @@ class ApproxTuner:
...
@@ -81,7 +93,9 @@ class ApproxTuner:
max_iter
:
int
,
max_iter
:
int
,
qos_tuner_threshold
:
float
,
qos_tuner_threshold
:
float
,
qos_keep_threshold
:
Optional
[
float
]
=
None
,
qos_keep_threshold
:
Optional
[
float
]
=
None
,
accuracy_convention
:
str
=
"
absolute
"
# TODO: this
accuracy_convention
:
str
=
"
absolute
"
,
# TODO: this
take_best_n
:
Optional
[
int
]
=
None
,
calibrate
:
bool
=
True
# TODO: more parameters + opentuner param forwarding
# TODO: more parameters + opentuner param forwarding
)
->
List
[
Config
]:
)
->
List
[
Config
]:
"""
Generate an optimal set of approximation configurations for the model.
"""
"""
Generate an optimal set of approximation configurations for the model.
"""
...
@@ -104,18 +118,41 @@ class ApproxTuner:
...
@@ -104,18 +118,41 @@ class ApproxTuner:
# Parse and store results
# Parse and store results
self
.
_db
=
opentuner_args
.
database
self
.
_db
=
opentuner_args
.
database
self
.
all_configs
=
[
self
.
keep_threshold
=
qos_keep_threshold
Config
(
result
.
accuracy
,
result
.
time
,
configuration
.
data
)
self
.
all_configs
=
list
(
for
result
,
configuration
in
read_opentuner_db
(
self
.
_db
)
self
.
get_all_configs_from_db
(
read_opentuner_db
(
self
.
_db
)
)
]
)
self
.
kept_configs
=
[
self
.
kept_configs
=
[
cfg
for
cfg
in
self
.
all_configs
if
cfg
.
qos
>
qos_keep_threshold
cfg
for
cfg
in
self
.
all_configs
if
cfg
.
qos
>
qos_keep_threshold
]
]
self
.
keep_threshold
=
qos_keep_threshold
self
.
best_configs
=
self
.
take_best_configs
(
self
.
kept_configs
,
take_best_n
)
return
self
.
kept_configs
if
calibrate
:
self
.
calibrate_configs_
(
self
.
best_configs
)
return
self
.
best_configs
@classmethod
def
get_all_configs_from_db
(
cls
,
results_configs
:
List
[
Tuple
[
Result
,
Configuration
]]
)
->
Tuple
[
T
]:
return
tuple
(
Config
(
result
.
accuracy
,
None
,
result
.
time
,
configuration
.
data
)
for
result
,
configuration
in
results_configs
)
def
calibrate_configs_
(
self
,
configs
:
List
[
T
]):
from
tqdm
import
tqdm
for
cfg
in
tqdm
(
configs
,
leave
=
False
):
cfg
:
T
if
cfg
.
calib_qos
is
not
None
:
continue
cfg
.
calib_qos
,
_
=
self
.
app
.
measure_qos_perf
(
cfg
.
knobs
,
True
)
msg_logger
.
debug
(
f
"
Calibration:
{
cfg
.
qos
}
(mean) ->
{
cfg
.
calib_qos
}
(mean)
"
)
def
take_best_configs
(
self
,
n
:
Optional
[
int
]
=
None
)
->
List
[
Config
]:
@staticmethod
configs
=
self
.
kept_configs
def
take_best_configs
(
configs
:
List
[
Config
],
n
:
Optional
[
int
]
=
None
)
->
List
[
Config
]:
points
=
np
.
array
([[
c
.
perf
,
c
.
qos
]
for
c
in
configs
])
points
=
np
.
array
([[
c
.
perf
,
c
.
qos
]
for
c
in
configs
])
taken_idx
=
is_pareto_efficient
(
points
,
take_n
=
n
)
taken_idx
=
is_pareto_efficient
(
points
,
take_n
=
n
)
return
[
configs
[
i
]
for
i
in
taken_idx
]
return
[
configs
[
i
]
for
i
in
taken_idx
]
...
...
This diff is collapsed.
Click to expand it.
predtuner/modeledapp.py
+
7
−
7
View file @
7e4a290a
...
@@ -30,7 +30,7 @@ class ModeledApp(ApproxApp, abc.ABC):
...
@@ -30,7 +30,7 @@ class ModeledApp(ApproxApp, abc.ABC):
pass
pass
def
empirical_measure_qos_perf
(
def
empirical_measure_qos_perf
(
self
,
with_approxes
:
KnobsT
,
is_
testset
:
bool
self
,
with_approxes
:
KnobsT
,
is_
calibration
:
bool
)
->
Tuple
[
float
,
float
]:
)
->
Tuple
[
float
,
float
]:
"""
Measures QoS and performance by running the program with approximation.
"""
Measures QoS and performance by running the program with approximation.
...
@@ -41,7 +41,7 @@ class ModeledApp(ApproxApp, abc.ABC):
...
@@ -41,7 +41,7 @@ class ModeledApp(ApproxApp, abc.ABC):
def
measure_qos_perf
(
def
measure_qos_perf
(
self
,
self
,
with_approxes
:
KnobsT
,
with_approxes
:
KnobsT
,
is_
testset
:
bool
,
is_
calibration
:
bool
,
qos_model
:
str
=
"
none
"
,
qos_model
:
str
=
"
none
"
,
perf_model
:
str
=
"
none
"
,
perf_model
:
str
=
"
none
"
,
)
->
Tuple
[
float
,
float
]:
)
->
Tuple
[
float
,
float
]:
...
@@ -51,12 +51,12 @@ class ModeledApp(ApproxApp, abc.ABC):
...
@@ -51,12 +51,12 @@ class ModeledApp(ApproxApp, abc.ABC):
is
"
none
"
, otherwise only use model indicated by model name.
is
"
none
"
, otherwise only use model indicated by model name.
"""
"""
# Testset measurement is always empirical
# Testset measurement is always empirical
if
is_
testset
:
if
is_
calibration
:
return
self
.
empirical_measure_qos_perf
(
with_approxes
,
is_
testset
)
return
self
.
empirical_measure_qos_perf
(
with_approxes
,
is_
calibration
)
# Run empirical measurement once if either perf or qos needs it
# Run empirical measurement once if either perf or qos needs it
qos
,
perf
=
None
,
None
qos
,
perf
=
None
,
None
if
qos_model
==
"
none
"
or
perf_model
==
"
none
"
:
if
qos_model
==
"
none
"
or
perf_model
==
"
none
"
:
qos
,
perf
=
self
.
empirical_measure_qos_perf
(
with_approxes
,
is_
testset
)
qos
,
perf
=
self
.
empirical_measure_qos_perf
(
with_approxes
,
is_
calibration
)
# If we're asked to use some qos_model, overwrite `qos` value
# If we're asked to use some qos_model, overwrite `qos` value
# even if we already get it from empirical measure (i.e., even if perf_model == "none")
# even if we already get it from empirical measure (i.e., even if perf_model == "none")
if
qos_model
!=
"
none
"
:
if
qos_model
!=
"
none
"
:
...
@@ -65,7 +65,7 @@ class ModeledApp(ApproxApp, abc.ABC):
...
@@ -65,7 +65,7 @@ class ModeledApp(ApproxApp, abc.ABC):
f
'"
{
qos_model
}
"
is an invalid value for qos_model
'
f
'"
{
qos_model
}
"
is an invalid value for qos_model
'
f
"
(choose from
{
list
(
self
.
_qos_models
.
keys
())
}
)
"
f
"
(choose from
{
list
(
self
.
_qos_models
.
keys
())
}
)
"
)
)
qos
=
self
.
_qos_models
[
qos_model
].
measure_qos
(
with_approxes
,
is_
testset
)
qos
=
self
.
_qos_models
[
qos_model
].
measure_qos
(
with_approxes
,
is_
calibration
)
# Same goes for perf
# Same goes for perf
if
perf_model
!=
"
none
"
:
if
perf_model
!=
"
none
"
:
if
perf_model
not
in
self
.
_perf_models
:
if
perf_model
not
in
self
.
_perf_models
:
...
@@ -73,7 +73,7 @@ class ModeledApp(ApproxApp, abc.ABC):
...
@@ -73,7 +73,7 @@ class ModeledApp(ApproxApp, abc.ABC):
f
'"
{
perf_model
}
"
is an invalid value for perf_model
'
f
'"
{
perf_model
}
"
is an invalid value for perf_model
'
f
"
(choose from
{
list
(
self
.
_perf_models
.
keys
())
}
)
"
f
"
(choose from
{
list
(
self
.
_perf_models
.
keys
())
}
)
"
)
)
perf
=
self
.
_perf_models
[
perf_model
].
measure_perf
(
with_approxes
,
is_
testset
)
perf
=
self
.
_perf_models
[
perf_model
].
measure_perf
(
with_approxes
,
is_
calibration
)
assert
qos
is
not
None
and
perf
is
not
None
assert
qos
is
not
None
and
perf
is
not
None
return
qos
,
perf
return
qos
,
perf
...
...
This diff is collapsed.
Click to expand it.
predtuner/torchapp.py
+
2
−
2
View file @
7e4a290a
...
@@ -121,11 +121,11 @@ class TorchApp(ModeledApp, abc.ABC):
...
@@ -121,11 +121,11 @@ class TorchApp(ModeledApp, abc.ABC):
@torch.no_grad
()
@torch.no_grad
()
def
empirical_measure_qos_perf
(
def
empirical_measure_qos_perf
(
self
,
with_approxes
:
KnobsT
,
is_
testset
:
bool
self
,
with_approxes
:
KnobsT
,
is_
calibration
:
bool
)
->
Tuple
[
float
,
float
]:
)
->
Tuple
[
float
,
float
]:
from
time
import
time_ns
from
time
import
time_ns
dataloader
=
self
.
test_loader
if
is_
testset
else
self
.
val_loader
dataloader
=
self
.
test_loader
if
is_
calibration
else
self
.
val_loader
approxed
=
self
.
_apply_knobs
(
with_approxes
)
approxed
=
self
.
_apply_knobs
(
with_approxes
)
qoses
=
[]
qoses
=
[]
...
...
This diff is collapsed.
Click to expand it.
test/test_torchapp.py
+
6
−
1
View file @
7e4a290a
...
@@ -57,8 +57,13 @@ class TestTorchAppTuner(TestTorchAppInit):
...
@@ -57,8 +57,13 @@ class TestTorchAppTuner(TestTorchAppInit):
self
.
assertTrue
(
conf
.
qos
>
self
.
baseline
-
3.0
)
self
.
assertTrue
(
conf
.
qos
>
self
.
baseline
-
3.0
)
def
test_pareto
(
self
):
def
test_pareto
(
self
):
configs
=
self
.
tuner
.
take_
best_configs
()
configs
=
self
.
tuner
.
best_configs
for
c1
in
configs
:
for
c1
in
configs
:
self
.
assertFalse
(
self
.
assertFalse
(
any
(
c2
.
qos
>
c1
.
qos
and
c2
.
perf
>
c1
.
perf
for
c2
in
configs
)
any
(
c2
.
qos
>
c1
.
qos
and
c2
.
perf
>
c1
.
perf
for
c2
in
configs
)
)
)
def
test_dummy_calib
(
self
):
configs
=
self
.
tuner
.
best_configs
for
c
in
configs
:
self
.
assertAlmostEqual
(
c
.
calib_qos
,
c
.
qos
)
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment