From 0a40ff8870fb5a3e4ef0c1b27c7b0b5bcb6df9d4 Mon Sep 17 00:00:00 2001 From: Yifan Zhao <yifanz16@illinois.edu> Date: Thu, 28 Jan 2021 17:56:58 -0600 Subject: [PATCH] Added support for approx config missing operators (default to baseline) --- predtuner/approxapp.py | 7 +++++++ predtuner/modeledapp.py | 9 ++++++++- predtuner/torchapp.py | 4 ++-- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/predtuner/approxapp.py b/predtuner/approxapp.py index 61d203b..5cd82a1 100644 --- a/predtuner/approxapp.py +++ b/predtuner/approxapp.py @@ -89,6 +89,13 @@ class ApproxApp(abc.ABC): knobs.append(baseline_knob) return baseline_knob + def add_baseline_to_knobs(self, approxes: KnobsT): + approxes = approxes.copy() + for op_name in self.ops: + if op_name not in approxes: + approxes[op_name] = self.baseline_knob.name + return approxes + class BaselineKnob(ApproxKnob): def __init__(self, name: str = "__baseline__"): diff --git a/predtuner/modeledapp.py b/predtuner/modeledapp.py index 6c5fe70..06efc8c 100644 --- a/predtuner/modeledapp.py +++ b/predtuner/modeledapp.py @@ -144,12 +144,16 @@ class LinearPerfModel(IPerfModel): """Weighted linear performance predictor based on cost of each operator.""" def __init__( - self, op_costs: Dict[str, float], knob_speedups: Dict[str, float] + self, + app: ModeledApp, + op_costs: Dict[str, float], + knob_speedups: Dict[str, float], ) -> None: import numpy as np import pandas as pd super().__init__() + self.app = app knob_cost_factor_v = 1 / np.array(list(knob_speedups.values())) layer_cost_v = np.array(list(op_costs.values())) costs = np.outer(layer_cost_v, knob_cost_factor_v) @@ -163,6 +167,7 @@ class LinearPerfModel(IPerfModel): def measure_perf(self, with_approxes: KnobsT) -> float: """We implement this using a weighted linear performance model.""" + with_approxes = self.app.add_baseline_to_knobs(with_approxes) return float( sum(self.cost_df.loc[layer, knob] for layer, knob in with_approxes.items()) ) @@ -204,6 +209,7 @@ class QoSModelP1(IQoSModel): def measure_qos(self, with_approxes: KnobsT) -> float: """Implementation of model.""" assert self.baseline_tensor is not None + with_approxes = self.app.add_baseline_to_knobs(with_approxes) delta_tensors = np.array( [self.delta_tensors[op][knob] for op, knob in with_approxes.items()] ) @@ -274,6 +280,7 @@ class QoSModelP2(IQoSModel): def measure_qos(self, with_approxes: KnobsT) -> float: assert self.baseline_qos is not None and self.qos_df is not None + with_approxes = self.app.add_baseline_to_knobs(with_approxes) delta_qoses = ( np.array([self.qos_df.loc[kv] for kv in with_approxes.items()]) - self.baseline_qos diff --git a/predtuner/torchapp.py b/predtuner/torchapp.py index c93bd8a..0e6fc28 100644 --- a/predtuner/torchapp.py +++ b/predtuner/torchapp.py @@ -1,6 +1,6 @@ import abc from pathlib import Path -from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union +from typing import Any, Callable, List, Optional, Set, Tuple, Union import numpy as np import torch @@ -152,7 +152,7 @@ class TorchApp(ModeledApp, abc.ABC): p1_storage = self.model_storage / "p1.pkl" if self.model_storage else None p2_storage = self.model_storage / "p2.json" if self.model_storage else None return [ - LinearPerfModel(self._op_costs, self._knob_speedups), + LinearPerfModel(self, self._op_costs, self._knob_speedups), QoSModelP1( self, self._get_raw_output_valset, batched_valset_qos, p1_storage ), -- GitLab