Skip to content
Snippets Groups Projects
Commit 0a40ff88 authored by Yifan Zhao's avatar Yifan Zhao
Browse files

Added support for approx config missing operators (default to baseline)

parent 544052ae
No related branches found
No related tags found
No related merge requests found
...@@ -89,6 +89,13 @@ class ApproxApp(abc.ABC): ...@@ -89,6 +89,13 @@ class ApproxApp(abc.ABC):
knobs.append(baseline_knob) knobs.append(baseline_knob)
return baseline_knob return baseline_knob
def add_baseline_to_knobs(self, approxes: KnobsT):
approxes = approxes.copy()
for op_name in self.ops:
if op_name not in approxes:
approxes[op_name] = self.baseline_knob.name
return approxes
class BaselineKnob(ApproxKnob): class BaselineKnob(ApproxKnob):
def __init__(self, name: str = "__baseline__"): def __init__(self, name: str = "__baseline__"):
......
...@@ -144,12 +144,16 @@ class LinearPerfModel(IPerfModel): ...@@ -144,12 +144,16 @@ class LinearPerfModel(IPerfModel):
"""Weighted linear performance predictor based on cost of each operator.""" """Weighted linear performance predictor based on cost of each operator."""
def __init__( def __init__(
self, op_costs: Dict[str, float], knob_speedups: Dict[str, float] self,
app: ModeledApp,
op_costs: Dict[str, float],
knob_speedups: Dict[str, float],
) -> None: ) -> None:
import numpy as np import numpy as np
import pandas as pd import pandas as pd
super().__init__() super().__init__()
self.app = app
knob_cost_factor_v = 1 / np.array(list(knob_speedups.values())) knob_cost_factor_v = 1 / np.array(list(knob_speedups.values()))
layer_cost_v = np.array(list(op_costs.values())) layer_cost_v = np.array(list(op_costs.values()))
costs = np.outer(layer_cost_v, knob_cost_factor_v) costs = np.outer(layer_cost_v, knob_cost_factor_v)
...@@ -163,6 +167,7 @@ class LinearPerfModel(IPerfModel): ...@@ -163,6 +167,7 @@ class LinearPerfModel(IPerfModel):
def measure_perf(self, with_approxes: KnobsT) -> float: def measure_perf(self, with_approxes: KnobsT) -> float:
"""We implement this using a weighted linear performance model.""" """We implement this using a weighted linear performance model."""
with_approxes = self.app.add_baseline_to_knobs(with_approxes)
return float( return float(
sum(self.cost_df.loc[layer, knob] for layer, knob in with_approxes.items()) sum(self.cost_df.loc[layer, knob] for layer, knob in with_approxes.items())
) )
...@@ -204,6 +209,7 @@ class QoSModelP1(IQoSModel): ...@@ -204,6 +209,7 @@ class QoSModelP1(IQoSModel):
def measure_qos(self, with_approxes: KnobsT) -> float: def measure_qos(self, with_approxes: KnobsT) -> float:
"""Implementation of model.""" """Implementation of model."""
assert self.baseline_tensor is not None assert self.baseline_tensor is not None
with_approxes = self.app.add_baseline_to_knobs(with_approxes)
delta_tensors = np.array( delta_tensors = np.array(
[self.delta_tensors[op][knob] for op, knob in with_approxes.items()] [self.delta_tensors[op][knob] for op, knob in with_approxes.items()]
) )
...@@ -274,6 +280,7 @@ class QoSModelP2(IQoSModel): ...@@ -274,6 +280,7 @@ class QoSModelP2(IQoSModel):
def measure_qos(self, with_approxes: KnobsT) -> float: def measure_qos(self, with_approxes: KnobsT) -> float:
assert self.baseline_qos is not None and self.qos_df is not None assert self.baseline_qos is not None and self.qos_df is not None
with_approxes = self.app.add_baseline_to_knobs(with_approxes)
delta_qoses = ( delta_qoses = (
np.array([self.qos_df.loc[kv] for kv in with_approxes.items()]) np.array([self.qos_df.loc[kv] for kv in with_approxes.items()])
- self.baseline_qos - self.baseline_qos
......
import abc import abc
from pathlib import Path from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union from typing import Any, Callable, List, Optional, Set, Tuple, Union
import numpy as np import numpy as np
import torch import torch
...@@ -152,7 +152,7 @@ class TorchApp(ModeledApp, abc.ABC): ...@@ -152,7 +152,7 @@ class TorchApp(ModeledApp, abc.ABC):
p1_storage = self.model_storage / "p1.pkl" if self.model_storage else None p1_storage = self.model_storage / "p1.pkl" if self.model_storage else None
p2_storage = self.model_storage / "p2.json" if self.model_storage else None p2_storage = self.model_storage / "p2.json" if self.model_storage else None
return [ return [
LinearPerfModel(self._op_costs, self._knob_speedups), LinearPerfModel(self, self._op_costs, self._knob_speedups),
QoSModelP1( QoSModelP1(
self, self._get_raw_output_valset, batched_valset_qos, p1_storage self, self._get_raw_output_valset, batched_valset_qos, p1_storage
), ),
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment