diff --git a/predtuner/approxapp.py b/predtuner/approxapp.py
index 2d7406804e453d020d81db57bbbd79b00d4a796b..c1f86f8a45b9282ff23df07ecd8237bfa437d217 100644
--- a/predtuner/approxapp.py
+++ b/predtuner/approxapp.py
@@ -158,7 +158,7 @@ class ApproxTuner(Generic[T]):
         taken_idx = is_pareto_efficient(points, take_n=n)
         return [configs[i] for i in taken_idx]
 
-    def write_configs_to_dir(self, directory: PathLike):
+    def dump_configs(self, filepath: PathLike):
         import os
 
         from jsonpickle import encode
@@ -167,9 +167,10 @@ class ApproxTuner(Generic[T]):
             raise RuntimeError(
                 f"No tuning session has been run; call self.tune() first."
             )
-        directory = Path(directory)
-        os.makedirs(directory, exist_ok=True)
-        encode(self.kept_configs, directory)
+        filepath = Path(filepath)
+        os.makedirs(filepath.parent, exist_ok=True)
+        with filepath.open("w") as f:
+            f.write(encode(self.kept_configs, indent=2))
 
     def plot_configs(self) -> plt.Figure:
         if not self.tuned:
@@ -273,7 +274,6 @@ class TunerInterface(MeasurementInterface):
 
         cfg = desired_result.configuration.data
         qos, perf = self.app.measure_qos_perf(cfg, False, **self.app_kwargs)
-        qos, perf = float(qos), float(perf)
         # Print a debug message for each config in tuning and keep threshold
         self.print_debug_config(qos, perf)
         self.pbar.update()
diff --git a/predtuner/modeledapp.py b/predtuner/modeledapp.py
index 618e2c9b9b6174c93e66ea04099496cfbc6ab5a3..b48633e99e02eac2b97afc3f88200fe5f3f2b9d3 100644
--- a/predtuner/modeledapp.py
+++ b/predtuner/modeledapp.py
@@ -86,7 +86,7 @@ class ModeledApp(ApproxApp, abc.ABC):
                     f"(choose from {list(self._perf_models.keys())})"
                 )
             perf = self._perf_models[perf_model].measure_perf(with_approxes)
-        assert qos is not None and perf is not None
+        assert type(qos) is float and type(perf) is float
         return qos, perf
 
     def get_tuner(self) -> "ApproxModeledTuner":
@@ -163,9 +163,9 @@ class LinearPerfModel(IPerfModel):
 
     def measure_perf(self, with_approxes: KnobsT) -> float:
         """We implement this using a weighted linear performance model."""
-        return sum(
+        return float(sum(
             self.cost_df.loc[layer, knob] for layer, knob in with_approxes.items()
-        )
+        ))
 
 
 class QoSModelP1(IQoSModel):
@@ -208,7 +208,7 @@ class QoSModelP1(IQoSModel):
             [self.delta_tensors[op][knob] for op, knob in with_approxes.items()]
         )
         ret = delta_tensors.sum() + self.baseline_tensor
-        return self.qos_metric(ret)
+        return float(self.qos_metric(ret))
 
     def _init(self):
         dt = self.delta_tensors
@@ -277,7 +277,7 @@ class QoSModelP2(IQoSModel):
         ) - self.baseline_qos
         ret = delta_qoses.sum() + self.baseline_qos
         assert not np.isnan(ret)
-        return ret
+        return float(ret)
 
     def _init(self):
         if self.storage and self.storage.is_file():
diff --git a/predtuner/torchapp.py b/predtuner/torchapp.py
index b056c57e3cef9665ebaede8f226c1a878ef510b8..ae62d3b4d646e59004fa8e048dbc986048c848e0 100644
--- a/predtuner/torchapp.py
+++ b/predtuner/torchapp.py
@@ -136,7 +136,7 @@ class TorchApp(ModeledApp, abc.ABC):
             outputs = approxed(inputs)
             qoses.append(self.tensor_to_qos(outputs, targets))
         time_end = time_ns() / (10 ** 9)
-        qos = self.combine_qos(np.array(qoses))
+        qos = float(self.combine_qos(np.array(qoses)))
         return qos, time_end - time_begin
 
     def __repr__(self) -> str: