From 697e38d91f31cf47b51de81d5001a7e2c5ae8cdc Mon Sep 17 00:00:00 2001
From: Yifan Zhao <yifanz16@illinois.edu>
Date: Fri, 22 Jan 2021 14:43:43 -0600
Subject: [PATCH] Changing API level 2 to be more extensible to new models

---
 predtuner/apps/modeledapp.py | 174 ++++++++++++++++++++++++++---------
 1 file changed, 130 insertions(+), 44 deletions(-)

diff --git a/predtuner/apps/modeledapp.py b/predtuner/apps/modeledapp.py
index 699f7a9..0001b4f 100644
--- a/predtuner/apps/modeledapp.py
+++ b/predtuner/apps/modeledapp.py
@@ -1,9 +1,9 @@
 import abc
-from typing import Dict, Tuple
+from typing import Callable, Dict, List, Tuple, Union
 
 import torch
 
-from .approxapp import ApproxApp, ApproxKnob, KnobsT
+from .approxapp import ApproxApp, KnobsT
 
 
 class ModeledApp(ApproxApp, abc.ABC):
@@ -14,90 +14,176 @@ class ModeledApp(ApproxApp, abc.ABC):
     for non-modeling application, inherit from `ApproxApp` instead.
     """
 
-    @abc.abstractmethod
-    def measure_qos(self, with_approxes: KnobsT, is_testset: bool) -> float:
-        """User should fill in this hole if not using any QoS model.
-    Otherwise this function will not be called and can be empty."""
-        pass
+    def __init__(self) -> None:
+        super().__init__()
+        models = self.get_models()
+        self._perf_models = {
+            model.name: model for model in models if isinstance(model, IPerfModel)
+        }
+        self._qos_models = {
+            model.name: model for model in models if isinstance(model, IQoSModel)
+        }
 
     @abc.abstractmethod
-    def measure_perf(self, with_approxes: KnobsT, is_testset: bool) -> float:
-        """User should fill in this hole if not using any performance model.
-    Otherwise this function will not be called and can be empty."""
+    def get_models(self) -> List[Union["IPerfModel", "IQoSModel"]]:
+        """Get QoS/Performance prediction models for this application."""
         pass
 
+    def empirical_measure_qos_perf(
+        self, with_approxes: KnobsT, is_testset: bool
+    ) -> Tuple[float, float]:
+        """Measures QoS and performance by running the program with approximation.
+        
+        An implementation is not necessary if empirical measurement is never intended.
+        """
+        raise NotImplementedError()
+
     def measure_qos_perf(
         self,
         with_approxes: KnobsT,
         is_testset: bool,
-        perf_model: str = "none",
         qos_model: str = "none",
+        perf_model: str = "none",
     ) -> Tuple[float, float]:
         """We provide this with the right qos and perf function.
 
-        Need to detect self capability using `isinstance(self, ...)`
-        and check input parameter, to decide which model to use.
-        The non-modeled part will be obtained by calling the respective
-        `measure_qos` and `measure_perf` function (a bit of dirty
-        dispatching work to do here).
+        Empirical measurement will be called once if either `perf_model` or `qos_model`
+        is "none", otherwise only use model indicated by model name.
         """
+        # Run empirical measurement once if either perf or qos needs it
+        qos, perf = None, None
+        if qos_model == "none" or perf_model == "none":
+            qos, perf = self.empirical_measure_qos_perf(with_approxes, is_testset)
+        # If we're asked to use some qos_model, overwrite `qos` value
+        # even if we already get it from empirical measure (i.e., even if perf_model == "none")
+        if qos_model != "none":
+            if qos_model not in self._qos_models:
+                raise ValueError(
+                    f'"{qos_model}" is an invalid value for qos_model '
+                    f"(choose from {list(self._qos_models.keys())})"
+                )
+            qos = self._qos_models[qos_model].measure_qos(with_approxes, is_testset)
+        # Same goes for perf
+        if perf_model != "none":
+            if perf_model not in self._perf_models:
+                raise ValueError(
+                    f'"{perf_model}" is an invalid value for perf_model '
+                    f"(choose from {list(self._perf_models.keys())})"
+                )
+            perf = self._perf_models[perf_model].measure_perf(with_approxes, is_testset)
+        assert qos is not None and perf is not None
+        return qos, perf
+
+
+class IPerfModel(abc.ABC):
+    """Abstract base class for models that provide performance prediction."""
+
+    @property
+    @abc.abstractmethod
+    def name(self) -> str:
+        """Name of model."""
+        pass
+
+    @abc.abstractmethod
+    def measure_perf(self, with_approxes: KnobsT, is_testset: bool) -> float:
+        """We implement this using a weighted linear performance model."""
         pass
 
 
-class IPerfModeled(abc.ABC):
-    """Interface to be inherited by user App which allows performance to be model-derived."""
+class IQoSModel(abc.ABC):
+    """Abstract base class for models that provide QoS prediction."""
 
     @property
     @abc.abstractmethod
-    def op_knobs_cost(self) -> Dict[str, Dict[ApproxKnob, float]]:
-        """Get a scalar cost of each operator applied with each knob.
-        The ops and knobs listed here should be strictly equal to `ApproxApp.ops_knobs()`"""
+    def name(self) -> str:
+        """Name of model."""
         pass
 
-    def measure_perf(self, with_approxes: KnobsT, is_testset: bool) -> float:
+    @abc.abstractmethod
+    def measure_qos(self, with_approxes: KnobsT, is_testset: bool) -> float:
         """We implement this using a weighted linear performance model."""
         pass
 
 
-class IQoSModeledP1(abc.ABC):
-    """Interface that allows QoS model `P1` to be applied to user-defined App."""
+class LinearPerfModel(IPerfModel):
+    """Weighted linear performance predictor based on cost of each operator."""
 
-    @abc.abstractmethod
-    def get_tensor_output(
-        self, with_approxes: KnobsT, is_testset: bool
-    ) -> torch.Tensor:
-        """Run the tensor-based application with config `with_approxes` applied,
+    def __init__(
+        self, op_costs: Dict[str, float], knob_speedups: Dict[str, float]
+    ) -> None:
+        import numpy as np
+        import pandas as pd
+
+        super().__init__()
+        knob_cost_factor_v = 1 / np.array(list(knob_speedups.values()))
+        layer_cost_v = np.array(list(op_costs.values()))
+        costs = np.outer(layer_cost_v, knob_cost_factor_v)
+        self.cost_df = pd.DataFrame(
+            costs, index=op_costs.keys(), columns=knob_speedups.keys(), dtype=float
+        )
+
+    @property
+    def name(self) -> str:
+        return "perf_linear"
+
+    def measure_perf(self, with_approxes: KnobsT, is_testset: bool) -> float:
+        """We implement this using a weighted linear performance model."""
+        return sum(
+            self.cost_df.loc[layer, knob] for layer, knob in with_approxes.items()
+        )
+
+
+class QoSModelP1(IQoSModel):
+    """QoS model `P1` in ApproxTuner.
+    
+    tensor_output_getter: Run the tensor-based application with config `with_approxes` applied,
         and return a single tensor result.
 
         Note that while we require the return value to be a PyTorch tensor,
         user is free to implement this on non-PyTorch applications.
-        """
-        pass
+    
+    qos_metric: Compute a Quality of Service level from the tensor output of application
+    """
 
-    @abc.abstractmethod
-    def qos_from_output(self, tensor_output: torch.Tensor) -> float:
-        """Compute a Quality of Service level from the tensor output of application."""
-        pass
+    def __init__(
+        self,
+        tensor_output_getter: Callable[[KnobsT, bool], torch.Tensor],
+        qos_metric: Callable[[torch.Tensor], float],
+    ) -> None:
+        super().__init__()
+        self.output_f = tensor_output_getter
+        self.qos_metric = qos_metric
+
+    @property
+    def name(self) -> str:
+        return "qos_p1"
 
     def measure_qos(self, with_approxes: KnobsT, is_testset: bool) -> float:
-        """We implement this using a QoS model P1."""
+        """Implementation of model."""
         pass
 
 
-class IQoSModeledP2(abc.ABC):
-    """Interface that allows QoS model `P2` to be applied to user-defined App."""
+class QoSModelP2(IQoSModel):
+    """QoS model `P2` in ApproxTuner."""
 
-    @abc.abstractmethod
-    def _measure_qos(self, with_approxes: KnobsT, is_testset: bool) -> torch.Tensor:
-        """An internal QoS-measuring method that does the same thing as `measure_qos_p2`.
+    def __init__(self, app: ModeledApp) -> None:
+        super().__init__()
+        self.app = app
+
+    @property
+    def name(self) -> str:
+        return "qos_p2"
+
+    def _empirical_measure_qos(self, with_approxes: KnobsT, is_testset: bool) -> float:
+        """An internal QoS-measuring method.
 
         The point is P2 queries some QoS results and caches them before tuning starts,
         and then defines a `measure_qos` that doesn't run the application during tuning
         (to reduce overhead).
         """
-        pass
+        qos, _ = self.app.empirical_measure_qos_perf(with_approxes, is_testset)
+        return qos
 
     def measure_qos(self, with_approxes: KnobsT, is_testset: bool) -> float:
-        """We implement this using a QoS model P1."""
+        """Implementation of model."""
         pass
-
-- 
GitLab