diff --git a/hpvm/projects/torch2hpvm/torch2hpvm/compile.py b/hpvm/projects/torch2hpvm/torch2hpvm/compile.py
index 52b6a5bc21dbbe8df16af6972df370321013aca5..ee65a539d3ac35f4e65c6414c4337a2066b2c692 100644
--- a/hpvm/projects/torch2hpvm/torch2hpvm/compile.py
+++ b/hpvm/projects/torch2hpvm/torch2hpvm/compile.py
@@ -154,8 +154,6 @@ class ModelExporter:
                     "knob_speedup": knob_speedup,
                     "op_knobs": op_knobs,
                     "baseline_knob": baseline_knob,
-                    "tune_args": "tune",
-                    "test_args": "test",
                 },
                 f,
                 indent=2,
diff --git a/hpvm/projects/torch2hpvm/torch2hpvm/template_hpvm_inspect.cpp.in b/hpvm/projects/torch2hpvm/torch2hpvm/template_hpvm_inspect.cpp.in
index 00a15dc9553fcfab7ecbcda1be61e31cfde9dfd9..f6024fe8641a40b4d61567ab74991a9f2a177be1 100644
--- a/hpvm/projects/torch2hpvm/torch2hpvm/template_hpvm_inspect.cpp.in
+++ b/hpvm/projects/torch2hpvm/torch2hpvm/template_hpvm_inspect.cpp.in
@@ -10,14 +10,16 @@
 #include <sys/stat.h>
 
 /**** Routines for Handling Piped Execution ***/
-bool fifo_wait(const std::string &filename) {
+int fifo_wait(const std::string &filename) {
   std::ifstream file{filename};
   std::string line;
   std::getline(file, line);
-  if (line == "next")
-    return true;
+  if (line == "test")
+    return 1;
+  if (line == "tune")
+    return 2;
   if (line == "stop")
-    return false;
+    return 0;
   std::cout << "Invalid fifo file content \"" << line << "\"\n";
   abort();
 }
@@ -33,15 +35,6 @@ void make_fifo(const std::string &filename) {
     file << "{{conf_path}}\n"; // Write path to config file in FIFO file
     return;
   }
-
-  if (errno == EEXIST) {
-    if (unlink(filename.c_str()) < 0) {
-      std::cout << "Error removing existing file: " << strerror(errno) << '\n';
-      abort();
-    }
-    make_fifo(filename);
-    return;
-  }
   std::cout << "Error making FIFO file: " << strerror(errno) << '\n';
   abort();
 }
@@ -105,20 +98,13 @@ typedef struct __attribute__((__packed__)) {
 
 const int batch_size = {{batch_size}}, input_size = {{input_size}}, batch_count = input_size / batch_size;
 
-int main(int argc, char *argv[]){
-  if (argc != 2) {
-    std::cout << "Usage: " << argv[0] << " {tune|test}\n";
-    return 1;
-  }
-  std::string arg1 = argv[1];
-  if (arg1 != "tune" && arg1 != "test") {
-    std::cout << "Usage: " << argv[0] << " {tune|test}\n";
-    return 1;
-  }
-
+int main(){
   std::string dir_prefix = "{{prefix}}/";
-  std::string input_path = dir_prefix + arg1 + "_input.bin";
-  std::string labels_path = dir_prefix + arg1 + "_labels.bin";
+  std::string test_input = dir_prefix + "test_input.bin";
+  std::string test_labels = dir_prefix + "test_labels.bin";
+  std::string tune_input = dir_prefix + "tune_input.bin";
+  std::string tune_labels = dir_prefix + "tune_labels.bin";
+
 {% for w in weights %}
   std::string {{w.name}}_path = dir_prefix + "{{w.filename}}";
   void* {{w.name}} = readTrainedWeights({{w.name}}_path.c_str(), 0, {{w.shape|join(', ')}});
@@ -132,19 +118,23 @@ int main(int argc, char *argv[]){
 {% endfor %}
 
   make_fifo("{{fifo_path}}");
-  while (fifo_wait("{{fifo_path}}")) {
+  int ret = 0;
+  while ((ret = fifo_wait("{{fifo_path}}"))) {
     __hpvm__init();
     startMemTracking();
+    const auto *input_pth = (ret == 1 ? test_input : tune_input).c_str();
+    const auto *labels_pth = (ret == 1 ? test_labels : tune_labels).c_str();
+
     for (int i = 0; i < batch_count; i++){
       int start = i * batch_size, end = start + batch_size;
-      copyInputBatch(input_path.c_str(), start, end, {{input_shape|join(', ')}}, {{input_name}});
+      copyInputBatch(input_pth, start, end, {{input_shape|join(', ')}}, {{input_name}});
 
       void* dfg = __hpvm__launch(0, root, (void*) args);
       __hpvm__wait(dfg);
       void *result = static_cast<RootIn*>(args)->r.tensor;
       hpvm_request_tensor(result, 0);
 
-      llvm_hpvm_invokeRtControl(result, labels_path.c_str(), start, end);
+      llvm_hpvm_invokeRtControl(result, labels_pth, start, end);
       freeBatchMemory();
     }
     __hpvm__cleanup();
diff --git a/hpvm/test/dnn_benchmarks/pytorch/test_tuning.py b/hpvm/test/dnn_benchmarks/pytorch/test_tuning.py
index 4da55559a2c2b5abe45d432aaac0930bada5faf5..34a179b6864e57a10fb9a3516d35e9566c24aefb 100644
--- a/hpvm/test/dnn_benchmarks/pytorch/test_tuning.py
+++ b/hpvm/test/dnn_benchmarks/pytorch/test_tuning.py
@@ -1,20 +1,11 @@
-import shutil
-from pathlib import Path
-from subprocess import run
-import torch
-
-from torch2hpvm import BinDataset, ModelExporter
-from torch.nn import Module
-from predtuner.pipedbin import PipedBinaryApp
-
-
 import os
 import shutil
 import site
 from pathlib import Path
-from subprocess import run
-import torch
+from subprocess import Popen
 
+import torch
+from predtuner.pipedbin import PipedBinaryApp
 from torch2hpvm import BinDataset, ModelExporter
 from torch.nn import Module
 
@@ -37,29 +28,30 @@ benchmarks = [
 self_folder = Path(__file__).parent
 model_cls, nch, img_size, batch_size, pathname = benchmarks[0]
 codegen_dir = Path(f"/tmp/{pathname}_tune")
-print(f"Generating {pathname} to {codegen_dir}")
-if codegen_dir.exists():
-    shutil.rmtree(codegen_dir)
-
-params = self_folder / "../model_params" / pathname
-dataset_shape = 5000, nch, img_size, img_size
-bin_tuneset = BinDataset(
-    params / "tune_input.bin", params / "tune_labels.bin", dataset_shape
-)
-bin_testset = BinDataset(
-    params / "test_input.bin", params / "test_labels.bin", dataset_shape
-)
-model: Module = model_cls()
-checkpoint = self_folder / "../model_params" / f"{pathname}.pth.tar"
-model.load_state_dict(torch.load(checkpoint.as_posix()))
-
 build_dir = codegen_dir / "build"
-target_binary = build_dir / pathname
-exporter = ModelExporter(
-    model, bin_tuneset, bin_testset, codegen_dir, target="hpvm_tensor_inspect"
-)
-exporter.generate(batch_size=batch_size).compile(target_binary, build_dir)
-run([str(target_binary), "test"], check=True)
-
-# build_dir = codegen_dir / "build"
-# print(PipedBinaryApp("test", codegen_dir / "ops.json", build_dir / "lenet_mnist", build_dir))
+metadata_file = codegen_dir / "ops.json"
+binary_file = build_dir / pathname
+conf_file = codegen_dir / ModelExporter.config_file_name
+if not binary_file.is_file() or not metadata_file.is_file():
+    print(f"Generating {pathname} to {codegen_dir}")
+    if codegen_dir.exists():
+        shutil.rmtree(codegen_dir)
+
+    params = self_folder / "../model_params" / pathname
+    dataset_shape = 5000, nch, img_size, img_size
+    bin_tuneset = BinDataset(
+        params / "tune_input.bin", params / "tune_labels.bin", dataset_shape
+    )
+    bin_testset = BinDataset(
+        params / "test_input.bin", params / "test_labels.bin", dataset_shape
+    )
+    model: Module = model_cls()
+    checkpoint = self_folder / "../model_params" / f"{pathname}.pth.tar"
+    model.load_state_dict(torch.load(checkpoint.as_posix()))
+
+    exporter = ModelExporter(
+        model, bin_tuneset, bin_testset, codegen_dir, target="hpvm_tensor_inspect"
+    )
+    exporter.generate(batch_size=batch_size).compile(binary_file, build_dir)
+app = PipedBinaryApp("test", codegen_dir, "ops.json", f"build/{pathname}", "hpvm_fifo")
+app.measure_qos_perf({}, False)