diff --git a/hpvm/projects/keras/README.md b/hpvm/projects/keras/README.md
index 4202165117685eb6dc8c9ba0faad18440d28e400..ef31139ee149cb1cd4218603a50c5e687f86158a 100644
--- a/hpvm/projects/keras/README.md
+++ b/hpvm/projects/keras/README.md
@@ -24,7 +24,7 @@ conda activate keras_python36
 At the root of this project (`/projects/keras/`) install the Keras frontend pip package as:
 
 ```bash
-pip install -e ./
+pip3 install -e ./
 ```
 
 **NOTE:** If you are using the conda environment, activate it prior to this step.
diff --git a/hpvm/projects/keras/keras_frontend/approxhpvm_translator.py b/hpvm/projects/keras/keras_frontend/approxhpvm_translator.py
index a9e17a4eb8dd62c66d1519672490ecf100e8a5b7..8f39d77987fe0a4b595f48276480b80187047ea2 100644
--- a/hpvm/projects/keras/keras_frontend/approxhpvm_translator.py
+++ b/hpvm/projects/keras/keras_frontend/approxhpvm_translator.py
@@ -459,7 +459,7 @@ class TensorRtTranslator:
 
       if strides[0] > 1 and cur_node.padding.strip() == "same":
         print ("!ERROR: Same Padding not supported for Conv with Stride > 1")
-        print ("Use: ZeroPadding2D(padding=(" + str(padding) + "," + str(padding) + "));\n");
+        print ("Use: ZeroPadding2D(padding=(" + str(padding) + "," + str(padding) + ")) before the Conv2D/DepthwiseConv2D Operator  \n");
         sys.exit(0)
 
       # NOTE: For Json (tuning config) file generation
@@ -467,8 +467,6 @@ class TensorRtTranslator:
         self.addConvOverheads(weights, padding, strides)
   
       elif layer_type == "DepthwiseConv2D":
-        #self.json_str += "depthwise_convolution_" + str(self.op_count) + " : 0, \n"
-        #self.op_count += 1
         self.addBaselineKnob("depthwise_convolution")
 
     
@@ -1084,14 +1082,16 @@ def getUniquePath(weights_dir):
 def createRecursiveDir(target_dir):
 
   if os.path.exists(target_dir):
-    print ("Directory = ", target_dir, " exists. Aborting....")
+    print ("Directory = ", target_dir, " exists ")
+    print ("Delete Directory or Give Different Path. Aborting....")
     sys.exit(1)
-  
+
   toks = target_dir.split("/")
   for i in range(len(toks)):
     path_str = "/".join(toks[0:i+1])
-    if not os.path.exists(path_str):
-      os.mkdir(path_str)
+    if path_str != "":
+      if not os.path.exists(path_str):
+        os.mkdir(path_str)
   
 
 
@@ -1146,7 +1146,7 @@ def translate_to_approxhpvm(model,
   if reload_weights:
     print ("NOTE: Using existing pretrained weights \n")
   else:
-    print ("NOTE: dumping new set of weights \n")
+    print ("NOTE: Translating Keras .h5 file to HPVM .bin files  \n")
     
   print ("-- Weight Files Under : ", weights_dir)
   print ("-- TensorRT src : ", src_dir + "/src.cc")
diff --git a/hpvm/projects/keras/keras_frontend/hpvm_dfg_translator.py b/hpvm/projects/keras/keras_frontend/hpvm_dfg_translator.py
index 53369478e39e058fd4e4e065d00fb55e0bdc2960..fbe2f41c84c2d018412a682b3c211ad9897dcd5f 100644
--- a/hpvm/projects/keras/keras_frontend/hpvm_dfg_translator.py
+++ b/hpvm/projects/keras/keras_frontend/hpvm_dfg_translator.py
@@ -682,7 +682,7 @@ class HPVMTranslator:
     input_str += "std::string input_path = test_input_path; \n"
     input_str += "std::string labels_path = test_labels_path; \n\n"
 
-    input_str += "if (argc >= 2 &&  std::string(argv[1]) ==  \"tune\"){ \n"
+    input_str += "if (runtype ==  \"tune\"){ \n"
     input_str += "  input = tune_input; \n"
     input_str += "  input_path = tune_input_path; \n"
     input_str += "  labels_path = tune_labels_path; \n\n"
@@ -695,9 +695,21 @@ class HPVMTranslator:
   def genMainFunction(self, test_data, batch_size):
 
      main_func_str = "int main(int argc, char* argv[]){ \n\n"
+
+     main_func_str += self.GetOptLoop()
+     
      main_func_str += self.weight_str
      main_func_str += self.input_str
      main_func_str += "\n" + HPVM_init + "(); \n"
+
+     main_func_str += """
+
+if(config_path != ""){
+  llvm_hpvm_initializeRuntimeController(config_path.c_str());
+} 
+
+ """
+     
      main_func_str += "RootIn* args = static_cast<RootIn*>(malloc(sizeof(RootIn))); \n\n"
 
      main_func_str += self.handleTuneTestData()  
@@ -733,6 +745,9 @@ class HPVMTranslator:
   def genTunerMainFunction(self, src_dir, test_data, batch_size):    
 
      tuner_main_func_str = "int main(int argc, char* argv[]){ \n\n"
+
+     tuner_main_func_str += self.GetOptLoop()
+     
      tuner_main_func_str += self.weight_str
      tuner_main_func_str += self.input_str
      tuner_main_func_str += "RootIn* args = static_cast<RootIn*>(malloc(sizeof(RootIn))); \n\n"
@@ -745,7 +760,16 @@ class HPVMTranslator:
 
      tuner_main_func_str += "\nint ret = 0; \n"
      tuner_main_func_str += "while ((ret = fifo_wait())) { \n"
-     tuner_main_func_str += "\n" + HPVM_init + "(); \n\n"
+     tuner_main_func_str += "\n" + HPVM_init + "(); \n"
+
+     tuner_main_func_str += """
+
+if(config_path != ""){
+  llvm_hpvm_initializeRuntimeController(config_path.c_str());
+} 
+ 
+"""
+     
      tuner_main_func_str += "std::string input_pth = (ret == 1 ? test_input_path : tune_input_path); \n"
      tuner_main_func_str += "std::string labels_pth = (ret == 1 ? test_labels_path : tune_labels_path); \n"
 
@@ -833,18 +857,61 @@ void write_accuracy(float accuracy) {
   fout << std::fixed << accuracy;
 }
 
+"""
+    
+    return FIFO_str
+ 
+    
+  def getUsageStr(self):
 
+    usage_str = """
 
-"""
+void printUsage(){
+  std::cerr << \"Usage: -d {test|tune} -c {config_file_path} \";
+  abort();
+}
 
-    return FIFO_str
+"""
+    return usage_str
   
 
 
+  def GetOptLoop(self):
+
+    getopt_str = """
+
+  std::string runtype;
+  std::string config_path = "";
+  int flag;
+  while ( (flag = getopt (argc, argv, "hd:c:")) != -1){
+    switch (flag)
+      {
+      case 'd':
+	runtype = std::string(optarg);
+	if (runtype != "test" && runtype != "tune")
+	  printUsage();
+	break;
+      case 'c':
+	config_path = std::string(optarg);
+	break;
+      case 'h':
+	printUsage();
+	break;
+      default:
+	printUsage(); 
+      }
+  }
+
+"""
+    
+    return getopt_str
+  
+  
+
   def generateTestProgram(self, dir_prefix):
     
     program_str = self.file_header_str + self.node_str + self.root_str
-    program_str += self.root_struct_str + self.main_func_str
+    program_str += self.root_struct_str + self.getUsageStr() +  self.main_func_str
 
     DEBUG (program_str)
     
@@ -856,8 +923,8 @@ void write_accuracy(float accuracy) {
 
   def generateTunerProgram(self, dir_prefix, FIFO_str):
     
-    program_str = self.file_header_str + FIFO_str + self.node_str + self.root_str
-    program_str += self.root_struct_str + self.tuner_main_func_str
+    program_str = self.file_header_str + FIFO_str + self.node_str + self.root_str 
+    program_str += self.root_struct_str + self.getUsageStr() + self.tuner_main_func_str
 
     DEBUG (program_str)
     
diff --git a/hpvm/projects/keras/setup.py b/hpvm/projects/keras/setup.py
index d3a3b295f6cf1ed98027f8e104082ff98856556b..0dffc1ccc23c1d9da8cba454c7f666afcacc0ad7 100644
--- a/hpvm/projects/keras/setup.py
+++ b/hpvm/projects/keras/setup.py
@@ -12,7 +12,8 @@ setup(
         "tensorflow==1.14",
         "tensorflow-gpu==1.14",
         "keras==2.1.6",
-        "scipy==1.1.0"
+        "scipy==1.1.0",
+        "h5py==2.10.0"
     ],
     python_requires="==3.6.*"
 )
diff --git a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet2_cifar10/alexnet2_cifar10_cudnn.cpp b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet2_cifar10/alexnet2_cifar10_cudnn.cpp
index 569793db2a60af48327bf6a6328f64104b55a3e1..9c0c980d977138a628f1c0b76354d626066d77f9 100644
--- a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet2_cifar10/alexnet2_cifar10_cudnn.cpp
+++ b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet2_cifar10/alexnet2_cifar10_cudnn.cpp
@@ -491,7 +491,7 @@ int main() {
   args->dense_1_b_bytes = 0;
 
   int batch_size = 500;
-  int test_input_size = 10000;
+  int test_input_size = 5000;
   int batch_count = test_input_size / batch_size;
 
   std::string input_path = dir_prefix + std::string("tune_input.bin");
diff --git a/hpvm/test/dnn_benchmarks/keras/Benchmark.py b/hpvm/test/dnn_benchmarks/keras/Benchmark.py
index 11f762d563eba1c5060b8659c24c87d603436f43..c225ff97661aef8f4fceb123b79190cb7c819dd7 100644
--- a/hpvm/test/dnn_benchmarks/keras/Benchmark.py
+++ b/hpvm/test/dnn_benchmarks/keras/Benchmark.py
@@ -17,28 +17,31 @@ class Benchmark:
 
     def __init__(self, name, reload_dir, keras_model_file, data_dir, src_dir, num_classes, batch_size=500):
         self.name = name
-        self.reload_dir = reload_dir
+        self.reload_dir = reload_dir + "/"
         self.keras_model_file = keras_model_file
-        self.data_dir = data_dir
-        self.src_dir = src_dir
+        self.data_dir = data_dir + "/"
+        self.src_dir = src_dir + "/"
         self.num_classes = num_classes
         self.batch_size = batch_size
         
-        
+    # Override function in subclass    
     def buildModel(self):
         return
 
+    # Override function in subclass
     def data_preprocess(self):
         return
-    
+
+    # Override function in subclass
     def trainModel(self, X_train, y_train, X_test, y_test):
         return
 
+    # Override function in subclass
     def inference(self):
         return
 
 
-    # Compiles frontend generated sources
+    # Common Function - Do not override in Subclasses
     def compileSource(self, working_dir, src_name, binary_name):
               
         src_file = os.getcwd() + "/" + working_dir + "/" + src_name   #  approxhpvm_src.cc"
@@ -80,8 +83,8 @@ class Benchmark:
         print ("Usage: python ${benchmark.py} [hpvm_reload|train] [frontend] [compile]")
         sys.exit(0)
 
-        
-    def run(self, argv):
+    # Common Function for Exporting to HPVM Modules - Do not Override in Subclasses    
+    def exportToHPVM(self, argv):
 
       if len(argv) < 2:
           self.printUsage()
@@ -154,9 +157,6 @@ class Benchmark:
           model.save_weights(self.keras_model_file)
 
           
-      #elif len(argv) > 2:
-      #  self.printUsage()
-            
 
     
 
diff --git a/hpvm/test/dnn_benchmarks/keras/README.md b/hpvm/test/dnn_benchmarks/keras/README.md
index 34b7a7804d43f40295cbbe968a0fd4ef9e0682f7..f80ac8a387ecd3a537473a9797eaec190f3c9964 100644
--- a/hpvm/test/dnn_benchmarks/keras/README.md
+++ b/hpvm/test/dnn_benchmarks/keras/README.md
@@ -1,21 +1,60 @@
-# Keras Benchmarks
+# Keras Frontend 
+
+Install Keras Frontend after moving to directory `/hpvm/hpvm/projects/keras`
+
+## Requirements 
+
+* python == 3.6.x
+* pip >= 18
+
+If your system uses a different Python version, we recommend using the conda environment `keras_python36.yml`. Install this using:
+
+```
+conda env create -f keras_python36.yml --name keras_python36
+```
+
+Activate the conda environment before installing the pip package (below) using:
+
+```
+conda activate keras_python36
+```
+
+**NOTE:** This step must be performed each time (for each shell process) the frontend is to be used.
 
 
 ## Installing the Keras Frontend Package
 
-Instructions for Installing the Keras Frontend are [here](https://gitlab.engr.illinois.edu/llvm/hpvm/-/blob/approx_hpvm_reorg_keras/hpvm/projects/keras/README.md)
+At the root of this project (`/projects/keras/`) install the Keras frontend pip package as:
+
+```
+pip3 install -e ./
+```
+
+**NOTE:** If you are using the conda environment, activate it prior to this step.
+
+## Suppported Operations
+
+List of supported operations and limitations detailed in https://gitlab.engr.illinois.edu/llvm/hpvm/-/blob/approx_hpvm_reorg_keras/hpvm/projects/keras/docs/Support.md
+
+
 
 
+
+# Keras Benchmarks
+
+Run the Keras benchmarks under `hpvm/hpvm/test/dnn_benchmarks/keras`
+
 ## Download CNN Model Files 
 
-The weight (model) and data files to use with the CNN benchmarks are hosted on Git LFS and need to separately downloaded. This can be done using:
+Prior to running the benchmarks, ensure you download the CNN model data (inputs and weights) if not done in automatic build script.
 
 ```
-git lfs fetch 
-git lfs checkout 
+wget https://databank.illinois.edu/datafiles/o3izd/download -O model_params.tar.gz
+tar -xf  model_params.tar.gz
 ```
 
-**NOTE:** Data donwload is necesary before running benchmarks
+Move extracted `model_params` directory to `/test/dnn_benchmarks/model_params` (Benchmarks expect data at this location)
+
 
 ## Running Benchmaks
 
@@ -23,22 +62,22 @@ List of benchmarks and the expected accuracies:
 
 | Benchmark       | Accuracy    |
 | ----------- | ----------- |
-| AlexNet-CIFAR10      | 79.28       |
-| AlexNet2-CIFAR10   | 84.98        |
-| AlexNet-ImageNet | 56.30 |
-| LeNet-MNIST | 98.70 | 
-| MobileNet-CIFAR10 | 84.42 |
-| ResNet18-CIFAR10 | 89.56 |
-| ResNet50-ImageNet | 75.10 |
-| VGG16-CIFAR10 | 89.96 |
-| VGG16-CIFAR100 | 66.50 |
-| VGG16-ImageNet | 69.46 |
+| alexnet.py      | 79.28       |
+| alexnet2.py   | 84.98        |
+| alexnet_imagenet.py | 56.30 |
+| lenet.py | 98.70 | 
+| mobilenet_cifar10.py | 84.42 |
+| resnet18_cifar10.py | 89.56 |
+| resnet50_imagenet.py | 75.10 |
+| vgg16_cifar10.py | 89.96 |
+| vgg16_cifar100.py | 66.50 |
+| vgg16_imagenet.py | 69.46 |
 
 
 ### Synopsis
 
 ```
-python ${BENCH_NAME}.py  [hpvm_reload|keras_reload]  [frontend] [compile]
+python3 ${BENCH_NAME}.py  [hpvm_reload|keras_reload]  [frontend] [compile]
 
 ```
 
@@ -111,6 +150,32 @@ These are described here:
 Trains the Keras model constructed in `buildModel` and is expected to return the 
 trained keras model - training parameters should be tuned here.
 
+### Directly using Keras Frontend API
+
+Alternate to extending the `Benchmark` class, users may directly invoke the Keras Frontend API. This can be done as:
+
+```python
+
+from keras_frontend.approxhpvm_translator import translate_to_approxhpvm
+
+# Construct and train your Keras Model (or load pre-trained weights)
+
+translate_to_approxhpvm(model, data_dir, src_dir, test_data, test_labels, tune_data, tune_labels, batch_size, num_classes)
+
+```
+
+## Running HPVM Binary 
+
+Run the `HPVM_binary` generated under the directory specified by `src_dir` (described above). Usage: 
+
+```
+./HPVM_binary -t {test|tune} -c ${config_file_path}
+```
+
+`test|tune`: Runs with either tune (autotuning data) or test set (for evaluation)
+
+`config_file_path`: Path to an HPVM tensor configuration file (includes approximation settings)
+
 
 ## Automated Tests 
 
@@ -121,9 +186,6 @@ python test_benchmarks.py
 ```
 
 
-## Suppported Operations
-
-List of supported operations and limitations detailed in https://gitlab.engr.illinois.edu/llvm/hpvm/-/blob/approx_hpvm_reorg_keras/hpvm/projects/keras/docs/Support.md
 
 
 
diff --git a/hpvm/test/dnn_benchmarks/keras/alexnet.py b/hpvm/test/dnn_benchmarks/keras/alexnet.py
index 0eefe1b3d3dfa28cd009d74806a9bff41f6d597b..d6e7f98c4375fcb603c1f9549ea931410d7423ad 100644
--- a/hpvm/test/dnn_benchmarks/keras/alexnet.py
+++ b/hpvm/test/dnn_benchmarks/keras/alexnet.py
@@ -143,10 +143,9 @@ if __name__ == '__main__':
     # *** Below are Parameters specific to each benchmark *****
     reload_dir = MODEL_PARAMS_DIR + '/alexnet_cifar10/'
     ## Either the HPVM weights are loaded (above) or the Keras Model from the path below 
-    keras_model_file = MODEL_PARAMS_DIR + '/alexnet_cifar10/weights.h5'
-    data_dir = ''   # if reloading weights, data_dir can be set to empty string (value is ignored)
- 
-    src_dir = 'data/alexnet_cifar10_src/'  # Directory where HPVM sources are downloaded
+    keras_model_file = MODEL_PARAMS_DIR + '/keras/alexnet_cifar10.h5'
+    data_dir = 'data/alexnet_cifar10_hpvm/'   # if reloading weights, data_dir can be set to empty string (value is ignored)
+    src_dir = 'src/alexnet_cifar10_src_hpvm/'  # Directory where HPVM sources are downloaded
     num_classes = 10  # Specify num out output classes - CIFAR10 has `10` classes
     batch_size = 500  # Batch Size set to 500 - Adjust this value based on your GPU memory 
 
@@ -154,6 +153,6 @@ if __name__ == '__main__':
     model = AlexNet_CIFAR10('AlexNet_CIFAR10', reload_dir, keras_model_file, data_dir, src_dir, num_classes, batch_size)
 
     # This invokes the common run function in src/Benchmark.py 
-    model.run(sys.argv)
+    model.exportToHPVM(sys.argv)
 
     
diff --git a/hpvm/test/dnn_benchmarks/keras/alexnet2.py b/hpvm/test/dnn_benchmarks/keras/alexnet2.py
index d2c7d566bb2793a848bdb88c19e2905e6030d588..22b499d577a3b42866465614cca4d2fd71e87942 100644
--- a/hpvm/test/dnn_benchmarks/keras/alexnet2.py
+++ b/hpvm/test/dnn_benchmarks/keras/alexnet2.py
@@ -136,12 +136,12 @@ if __name__ == '__main__':
 
     ### Parameters specific to each benchmark
     reload_dir = MODEL_PARAMS_DIR + '/alexnet2_cifar10/'
-    keras_model_file = MODEL_PARAMS_DIR + '/alexnet2_cifar10/weights.h5'
-    data_dir = '' 
-    src_dir = 'data/alexnet2_cifar10_src/'
+    keras_model_file = MODEL_PARAMS_DIR + '/keras/alexnet2_cifar10.h5'
+    data_dir = 'data/alexnet2_cifar10/' 
+    src_dir = 'src/alexnet2_cifar10_src/'
     num_classes = 10
     batch_size = 500
 
     model = AlexNet2_CIFAR10('AlexNet2_CIFAR10', reload_dir, keras_model_file, data_dir, src_dir, num_classes, batch_size)
     
-    model.run(sys.argv)
+    model.exportToHPVM(sys.argv)
diff --git a/hpvm/test/dnn_benchmarks/keras/alexnet_imagenet.py b/hpvm/test/dnn_benchmarks/keras/alexnet_imagenet.py
index 1cfe7a79c2a1350689d09d07fdc50f3ce998d8af..e23fef741715be9791c1aba0210a27ef5b947934 100644
--- a/hpvm/test/dnn_benchmarks/keras/alexnet_imagenet.py
+++ b/hpvm/test/dnn_benchmarks/keras/alexnet_imagenet.py
@@ -93,15 +93,15 @@ if __name__ == '__main__':
 
     ### Parameters specific to each benchmark
     reload_dir = MODEL_PARAMS_DIR + '/alexnet_imagenet/'
-    keras_model_file = MODEL_PARAMS_DIR + '/alexnet_imagenet/weights.h5'
-    data_dir = '' 
-    src_dir = 'data/alexnet_imagenet_src/'
+    keras_model_file = MODEL_PARAMS_DIR + '/keras/alexnet_imagenet.h5'
+    data_dir = 'data/alexnet_imagenet/' 
+    src_dir = 'src/alexnet_imagenet_src/'
     num_classes = 1000
     batch_size = 50
 
     model = AlexNet('AlexNet_Imagenet', reload_dir, keras_model_file, data_dir, src_dir, num_classes, batch_size)
     
-    model.run(sys.argv)
+    model.exportToHPVM(sys.argv)
 
 
     
diff --git a/hpvm/test/dnn_benchmarks/keras/lenet.py b/hpvm/test/dnn_benchmarks/keras/lenet.py
index 70dd73a66ad49cee83a0f061d1240522332c469c..83f76ef306c31c25d1f1e961ab1922e89c217b28 100644
--- a/hpvm/test/dnn_benchmarks/keras/lenet.py
+++ b/hpvm/test/dnn_benchmarks/keras/lenet.py
@@ -102,9 +102,9 @@ if __name__ == '__main__':
 
     ### Parameters specific to each benchmark
     reload_dir = MODEL_PARAMS_DIR + '/lenet_mnist/'
-    keras_model_file = MODEL_PARAMS_DIR + '/lenet_mnist/weights.h5'
-    data_dir = '' 
-    src_dir = 'data/lenet_mnist_src/'
+    keras_model_file = MODEL_PARAMS_DIR + '/keras/lenet_mnist.h5'
+    data_dir = 'data/lenet_mnist/' 
+    src_dir = 'src/lenet_mnist_src/'
     num_classes = 10
     batch_size = 500
     
@@ -112,4 +112,4 @@ if __name__ == '__main__':
 
     model = LeNet_MNIST('LeNet_MNIST', reload_dir, keras_model_file, data_dir, src_dir, num_classes, batch_size)
     
-    model.run(sys.argv)
+    model.exportToHPVM(sys.argv)
diff --git a/hpvm/test/dnn_benchmarks/keras/mobilenet_cifar10.py b/hpvm/test/dnn_benchmarks/keras/mobilenet_cifar10.py
index 34335b0f1a7e3e414f7915a5eb9305086b7344d8..a236e0305f6f09bfa5d3335f405e6d3fb337894d 100644
--- a/hpvm/test/dnn_benchmarks/keras/mobilenet_cifar10.py
+++ b/hpvm/test/dnn_benchmarks/keras/mobilenet_cifar10.py
@@ -182,13 +182,13 @@ if __name__ == '__main__':
 
     ### Parameters specific to each benchmark
     reload_dir = MODEL_PARAMS_DIR + '/mobilenet_cifar10/'
-    keras_model_file = MODEL_PARAMS_DIR + '/mobilenet_cifar10/weights.h5'
-    data_dir = '' 
-    src_dir = 'data/mobilenet_cifar10_src/'
+    keras_model_file = MODEL_PARAMS_DIR + '/keras/mobilenet_cifar10.h5'
+    data_dir = 'data/mobilenet_cifar10/' 
+    src_dir = 'src/mobilenet_cifar10_src/'
     num_classes = 10
     batch_size = 500
 
     model = MobileNet_CIFAR10('MobileNet_CIFAR10', reload_dir, keras_model_file, data_dir, src_dir, num_classes, batch_size)
     
-    model.run(sys.argv)
+    model.exportToHPVM(sys.argv)
 
diff --git a/hpvm/test/dnn_benchmarks/keras/resnet18_cifar10.py b/hpvm/test/dnn_benchmarks/keras/resnet18_cifar10.py
index 02753f9eac83a252e5b128f29981b39c14f35d2c..9e8c23cdc6df6bb168257fa69cfba6015e9569f2 100644
--- a/hpvm/test/dnn_benchmarks/keras/resnet18_cifar10.py
+++ b/hpvm/test/dnn_benchmarks/keras/resnet18_cifar10.py
@@ -554,13 +554,13 @@ if __name__ == '__main__':
 
     ### Parameters specific to each benchmark
     reload_dir = MODEL_PARAMS_DIR + '/resnet18_cifar10/'
-    keras_model_file = MODEL_PARAMS_DIR + '/resnet18_cifar10/weights.h5'
-    data_dir = '' 
-    src_dir = 'data/resnet18_cifar10_src/'
+    keras_model_file = MODEL_PARAMS_DIR + '/keras/resnet18_cifar10.h5'
+    data_dir = 'data/resnet18_cifar10/' 
+    src_dir = 'src/resnet18_cifar10_src/'
     num_classes = 10
     batch_size = 500
 
     model = ResNet18_CIFAR10('ResNet18_CIFAR10', reload_dir, keras_model_file, data_dir, src_dir, num_classes, batch_size)
     
-    model.run(sys.argv)
+    model.exportToHPVM(sys.argv)
     
diff --git a/hpvm/test/dnn_benchmarks/keras/resnet50_imagenet.py b/hpvm/test/dnn_benchmarks/keras/resnet50_imagenet.py
index de42ae48d834b6f55e7827138f60baeefe8fb897..9ac71337b94899002296b736469d480083f7b7ad 100644
--- a/hpvm/test/dnn_benchmarks/keras/resnet50_imagenet.py
+++ b/hpvm/test/dnn_benchmarks/keras/resnet50_imagenet.py
@@ -141,15 +141,15 @@ if __name__ == '__main__':
 
     ### Parameters specific to each benchmark
     reload_dir = MODEL_PARAMS_DIR + '/resnet50_imagenet/'
-    keras_model_file = MODEL_PARAMS_DIR + '/resnet50_imagenet/weights.h5'
-    data_dir = '' 
-    src_dir = 'data/resnet50_imagenet_src/'
+    keras_model_file = MODEL_PARAMS_DIR + '/keras/resnet50_imagenet.h5'
+    data_dir = 'data/resnet50_imagenet/' 
+    src_dir = 'src/resnet50_imagenet_src/'
     num_classes = 1000
     batch_size = 50
 
     model = ResNet50('ResNet50_imagenet', reload_dir, keras_model_file, data_dir, src_dir, num_classes, batch_size)
     
-    model.run(sys.argv)
+    model.exportToHPVM(sys.argv)
 
 
     
diff --git a/hpvm/test/dnn_benchmarks/keras/test_benchmarks.py b/hpvm/test/dnn_benchmarks/keras/test_benchmarks.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d4b8afab532d7632de9968236690bc63798fc1e
--- /dev/null
+++ b/hpvm/test/dnn_benchmarks/keras/test_benchmarks.py
@@ -0,0 +1,238 @@
+
+
+import os
+import sys
+import subprocess
+
+import site
+from pathlib import Path
+
+import torch
+from torch.utils.data.dataloader import DataLoader
+from torch.utils.data.dataset import Subset
+
+#site.addsitedir(Path(__file__).parent.parent.absolute().as_posix())
+#from predtuner import PipedBinaryApp, config_pylogger
+
+
+
+class Benchmark:
+
+    def __init__(self, binary_path, test_accuracy):
+
+        self.binary_path = binary_path
+        self.test_accuracy = test_accuracy
+        self.epsilon = 0.05 # Adding some slack for accuracy difference
+
+
+    def getPath(self):
+        return self.binary_path
+
+    
+    def readAccuracy(self, accuracy_file):
+
+        f = open(accuracy_file, "r") # File with final benchmark accuracy 
+        acc_str = f.read()
+        return float(acc_str)
+    
+        
+    def runKeras(self):
+
+        # Test Bechmark accuracy with pretrained weights (hpvm_relaod)
+        run_cmd = "python3 " + self.binary_path + " keras_reload "
+        try:
+            subprocess.call(run_cmd, shell=True)
+        except:
+            return False
+
+        accuracy = self.readAccuracy("final_accuracy")
+
+        print ("accuracy = ", accuracy, " test_accuracy = ", self.test_accuracy) 
+
+        test_success = False
+        if (abs(self.test_accuracy - accuracy) < self.epsilon):
+            print ("Test for " + self. binary_path + " Passed ")
+            test_success = True
+        else:
+            print ("Test Failed for " + self.binary_path)
+            test_success = False
+
+        return test_success
+
+
+    def runHPVM(self):
+
+        # Test Bechmark accuracy with pretrained weights (hpvm_relaod)
+        run_cmd = "python3 " + self.binary_path + " keras_reload frontend compile compile_tuner"
+        try:
+            subprocess.call(run_cmd, shell=True)
+        except:
+            return False
+
+        working_dir = open("working_dir.txt").read()
+        cur_dir = os.getcwd()
+        
+        os.chdir(working_dir)
+        binary_path =  "./HPVM_binary"
+        
+        try:
+            subprocess.call(binary_path, shell=True)
+        except:
+            return False
+        
+        accuracy = self.readAccuracy("final_accuracy")
+        print ("accuracy = ", accuracy, " test_accuracy = ", self.test_accuracy) 
+
+        test_success = False
+        if (abs(self.test_accuracy - accuracy) < self.epsilon):
+            print ("Test for " + self. binary_path + " Passed ")
+            test_success = True
+        else:
+            print ("Test Failed for " + self.binary_path)
+            test_success = False
+
+        os.chdir(cur_dir)  # Change back to original working directory
+        
+        return test_success
+
+
+"""    
+    def runApproxTuner(self):
+
+        working_dir = open("working_dir.txt").read()
+        cur_dir = os.getcwd()
+        
+        os.chdir(working_dir)
+        binary_path =  "./HPVM_tuner_binary"
+
+        full_binary_path = str(cur_dir) + "/" +  working_dir + "/" + binary_path
+        full_json_path = str(cur_dir) + "/" + working_dir + "/tuner.json"
+    
+        app = PipedBinaryApp("TestHPVMApp", full_binary_path, full_json_path)
+        # Tuning procedure is exactly the same as that for PyTorch DNN.
+        # Please refer to `./tune_vgg16_cifar10.py` for details.
+        tuner = app.get_tuner()
+        tuner.tune(5000, 3.0, 3.0, True, 50, cost_model="cost_linear", qos_model="qos_p1")
+
+        tuner.dump_configs("configs.json")
+        fig = tuner.plot_configs(show_qos_loss=True)
+        fig.savefig("configs.png", dpi=300)
+        app.dump_hpvm_configs(tuner.best_configs, "hpvm_confs.txt")
+
+        os.chdir(cur_dir)  # Change back to original working directory
+"""
+            
+        
+
+class BenchmarkTests:
+
+    def __init__(self):
+
+        self.benchmarks = []
+        self.passed_tests = []
+        self.failed_tests = []
+        self.passed_hpvm_tests = []
+        self.failed_hpvm_tests = []
+
+
+    def addBenchmark(self, benchmark):
+
+        self.benchmarks.append(benchmark)
+
+
+    def runKerasTests(self):
+
+        for benchmark in self.benchmarks:
+            test_success = benchmark.runKeras()
+
+            if not test_success:
+                self.failed_tests.append(benchmark.getPath())
+            else:
+                self.passed_tests.append(benchmark.getPath())
+
+
+    def runHPVMTests(self):
+
+        for benchmark in self.benchmarks:
+            test_success = benchmark.runHPVM()
+
+            if not test_success:
+                self.failed_hpvm_tests.append(benchmark.getPath())
+            else:
+                self.passed_hpvm_tests.append(benchmark.getPath())
+
+
+    def printKerasSummary(self):
+
+        failed_test_count = len(self.failed_tests)
+        passed_test_count = len(self.passed_tests)
+        
+        print (" Tests Passed  = " + str(passed_test_count) + " / " + str(len(self.benchmarks)))
+        print ("******* Passed Tests ** \n")
+        for passed_test in self.passed_tests:
+            print ("Passed: " + passed_test)
+
+        print (" Tests Failed  = " + str(failed_test_count) + " / " + str(len(self.benchmarks)))
+        print ("****** Failed Tests *** \n")
+        for failed_test in self.failed_tests:
+            print ("Failed: " + failed_test)
+            
+
+    def printHPVMSummary(self):
+
+        failed_test_count = len(self.failed_hpvm_tests)
+        passed_test_count = len(self.passed_hpvm_tests)
+        
+        print (" Tests Passed  = " + str(passed_test_count) + " / " + str(len(self.benchmarks)))
+        print ("******* Passed Tests ** \n")
+        for passed_test in self.passed_hpvm_tests:
+            print ("Passed: " + passed_test)
+
+        print (" Tests Failed  = " + str(failed_test_count) + " / " + str(len(self.benchmarks)))
+        print ("****** Failed Tests *** \n")
+        for failed_test in self.failed_hpvm_tests:
+            print ("Failed: " + failed_test)
+            
+
+        
+            
+if __name__ == "__main__":
+
+    if len(sys.argv) < 2:
+        print ("Usage: python3 test_dnnbenchmarks.py ${work_dir}")
+
+    work_dir = sys.argv[1]
+    if not os.path.exists(work_dir):
+        os.mkdir(work_dir)
+    os.chdir(work_dir)
+    
+    testMgr = BenchmarkTests()
+    AlexNet = Benchmark("../alexnet.py", 79.28)
+    AlexNet_ImageNet = Benchmark("../alexnet_imagenet.py", 56.30)
+    AlexNet2 = Benchmark("../alexnet2.py", 84.98)
+    LeNet = Benchmark("../lenet.py", 98.70)
+    MobileNet = Benchmark("../mobilenet_cifar10.py", 84.42)
+    ResNet18 = Benchmark("../resnet18_cifar10.py", 89.56)
+    ResNet50 = Benchmark("../resnet50_imagenet.py", 75.10)
+    VGG16_cifar10 = Benchmark("../vgg16_cifar10.py", 89.96)
+    VGG16_cifar100 = Benchmark("../vgg16_cifar100.py", 66.50)
+    VGG16_ImageNet = Benchmark("../vgg16_imagenet.py", 69.46)
+
+    testMgr.addBenchmark(AlexNet)
+    #testMgr.addBenchmark(AlexNet_ImageNet)
+    testMgr.addBenchmark(AlexNet2)
+    testMgr.addBenchmark(LeNet)
+    testMgr.addBenchmark(MobileNet)
+    testMgr.addBenchmark(ResNet18)
+    #testMgr.addBenchmark(ResNet50)
+    testMgr.addBenchmark(VGG16_cifar10)
+    testMgr.addBenchmark(VGG16_cifar100)
+    #testMgr.addBenchmark(VGG16_ImageNet)
+
+    #testMgr.runKerasTests()
+    #testMgr.printKerasSummary()
+    
+    testMgr.runHPVMTests()
+    testMgr.printHPVMSummary()
+
+    
diff --git a/hpvm/test/dnn_benchmarks/keras/vgg16_cifar10.py b/hpvm/test/dnn_benchmarks/keras/vgg16_cifar10.py
index 9a5071ee94a54e4832eade954f779d64ebd3416e..03563cba8a79a58ef4cc9e04160f2ae6aaca2974 100644
--- a/hpvm/test/dnn_benchmarks/keras/vgg16_cifar10.py
+++ b/hpvm/test/dnn_benchmarks/keras/vgg16_cifar10.py
@@ -184,14 +184,14 @@ if __name__ == '__main__':
 
     ### Parameters specific to each benchmark
     reload_dir = MODEL_PARAMS_DIR + '/vgg16_cifar10/'
-    keras_model_file = MODEL_PARAMS_DIR + '/vgg16_cifar10/weights.h5'
-    data_dir = '' 
-    src_dir = 'data/vgg16_cifar10_src/'
+    keras_model_file = MODEL_PARAMS_DIR + '/keras/vgg16_cifar10.h5'
+    data_dir = 'data/vgg16_cifar10/' 
+    src_dir = 'src/vgg16_cifar10_src/'
     num_classes = 10
     batch_size = 500
 
     model = VGG16_CIFAR10('VGG16_CIFAR10', reload_dir, keras_model_file, data_dir, src_dir, num_classes, batch_size)
     
-    model.run(sys.argv)
+    model.exportToHPVM(sys.argv)
 
     
diff --git a/hpvm/test/dnn_benchmarks/keras/vgg16_cifar100.py b/hpvm/test/dnn_benchmarks/keras/vgg16_cifar100.py
index 0fd51ebe03c56ecd622cfab970c51f3096a7d2f4..716b441706f26008aae9e30c96cad23fcd0b398e 100644
--- a/hpvm/test/dnn_benchmarks/keras/vgg16_cifar100.py
+++ b/hpvm/test/dnn_benchmarks/keras/vgg16_cifar100.py
@@ -199,13 +199,13 @@ if __name__ == '__main__':
 
     ### Parameters specific to each benchmark
     reload_dir = MODEL_PARAMS_DIR + '/vgg16_cifar100/'
-    keras_model_file = MODEL_PARAMS_DIR + '/vgg16_cifar100/weights.h5'
-    data_dir = '' 
-    src_dir = 'data/vgg16_cifar100_src/'
+    keras_model_file = MODEL_PARAMS_DIR + '/keras/vgg16_cifar100.h5'
+    data_dir = 'data/vgg16_cifar100/' 
+    src_dir = 'src/vgg16_cifar100_src/'
     num_classes = 100
     batch_size = 100
 
     model = VGG16_CIFAR100('VGG16_CIFAR100', reload_dir, keras_model_file, data_dir, src_dir, num_classes, batch_size)
     
-    model.run(sys.argv)
+    model.exportToHPVM(sys.argv)
     
diff --git a/hpvm/test/dnn_benchmarks/keras/vgg16_imagenet.py b/hpvm/test/dnn_benchmarks/keras/vgg16_imagenet.py
index 6b9458b5378c421f5ef8f8811e4721056fd19643..bcba54717484b53687497dc471a1c257793bdf47 100644
--- a/hpvm/test/dnn_benchmarks/keras/vgg16_imagenet.py
+++ b/hpvm/test/dnn_benchmarks/keras/vgg16_imagenet.py
@@ -126,15 +126,15 @@ if __name__ == '__main__':
 
     ### Parameters specific to each benchmark
     reload_dir = MODEL_PARAMS_DIR + '/vgg16_imagenet/'
-    keras_model_file = MODEL_PARAMS_DIR + '/vgg16_imagenet/weights.h5'
-    data_dir = '' 
-    src_dir = 'data/vgg16_imagenet_src/'
+    keras_model_file = MODEL_PARAMS_DIR + '/keras/vgg16_imagenet.h5'
+    data_dir = 'data/vgg16_imagenet/' 
+    src_dir = 'src/vgg16_imagenet_src/'
     num_classes = 1000
     batch_size = 25
 
     alexnet = VGG16('VGG16_imagenet', reload_dir, keras_model_file, data_dir, src_dir, num_classes, batch_size)
     
-    alexnet.run(sys.argv)
+    alexnet.exportToHPVM(sys.argv)