diff --git a/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/alexnet2_promise.cc b/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/alexnet2_promise.cc
index 66e824f6d098434e140d764edda7cdacd11e110f..241eb4cea8795af05983eb7e7ea7e645b42b9edb 100644
--- a/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/alexnet2_promise.cc
+++ b/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/alexnet2_promise.cc
@@ -16,6 +16,9 @@ int to_skip = 5;
 
 int main(int argc, char* argv[]){ 
 
+  int test_input_size = 2000; 
+  int batch_size = 1000;
+  int offset = 5000;
 
   if (argc > 1){
     total_runs = atoi(argv[1]);
@@ -29,6 +32,14 @@ int main(int argc, char* argv[]){
     to_skip = atoi(argv[3]);   
   }
 
+  if(argc > 4){
+    test_input_size = atoi(argv[4]);   
+  }
+
+  if(argc > 5){
+    offset = atoi(argv[5]);   
+  }
+
   
   llvm_hpvm_initTensorRt(0); 
 
@@ -41,9 +52,6 @@ int main(int argc, char* argv[]){
 
     startMemTracking(); 
 
-    int test_input_size = 2000; 
-    int batch_size = 1000;
-    int offset = 5000;
     
     int batch_count = test_input_size / batch_size; 
     float final_accuracy = 0.0; 
diff --git a/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/alexnet_promise.cc b/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/alexnet_promise.cc
index 6b951cffcaf142bd917abc7f7c04a2c691c472d7..3777d11718adf573f17348ceb0262641293fd2a5 100644
--- a/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/alexnet_promise.cc
+++ b/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/alexnet_promise.cc
@@ -17,6 +17,9 @@ int to_skip = 5;
 
 int main(int argc, char* argv[]){ 
 
+  int test_input_size = 2000; 
+  int batch_size = 1000;
+  int offset = 5000;
 
   if (argc > 1){
     total_runs = atoi(argv[1]);
@@ -29,23 +32,29 @@ int main(int argc, char* argv[]){
   if(argc > 3){
     to_skip = atoi(argv[3]);   
   }
+  
+  if(argc > 4){
+    test_input_size = atoi(argv[4]);   
+  }
+
+  if(argc > 5){
+    offset = atoi(argv[5]);   
+  }
 
   
   llvm_hpvm_initTensorRt(0); 
 
+
   
   int missed = 0; 
   for (int i = 0 ; i < total_runs; i++){ 
-
+    
     if (missed >= to_skip){
      break;           
     }
 
     startMemTracking(); 
 
-    int test_input_size = 2000; 
-    int batch_size = 1000;
-    int offset = 5000;
     int batch_count = test_input_size / batch_size; 
     float final_accuracy = 0.0; 
 
diff --git a/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/lenet_promise.cc b/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/lenet_promise.cc
index cbc5fbee8a3bd79c5d3cc6812c5bdc351913b3dd..2247ccba9fcf08d2f24368907c328f4b77b173b0 100644
--- a/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/lenet_promise.cc
+++ b/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/lenet_promise.cc
@@ -15,17 +15,16 @@ int total_runs = 1;
 float bench_acc = 0;
 int to_skip = 5;
 
+int test_input_size = 2000; 
+int batch_size = 2000;
+int offset = 5000;
+
 
 /* NOTE: Reference Architecture to use for profiling */
 void testLenetTanh(){
 
-
   printf("********* Lenet-5 Architecture ********** \n");
-
   
-  int batch_size = 2000;
-  int offset = 5000;
- 
   std::string dir_prefix = std::string("../model_params/lenet_params/");   
   std::string input_path =  dir_prefix + std::string("input.bin"); 
   std::string labels_path =  dir_prefix + std::string("labels.bin"); 
@@ -39,8 +38,8 @@ void testLenetTanh(){
       break;           
     }   
 
-    int start = i * batch_size + offset; 
-    int end = (i + 1) * batch_size + offset; 
+    int start = offset; 
+    int end = batch_size + offset; 
 
     // Loading Input Batch
     void* input = readInputBatch(input_path.c_str(),0,start,end,1,28,28); 
@@ -112,8 +111,18 @@ int main(int argc, char* argv[]){
   if(argc > 3){
     to_skip = atoi(argv[3]);   
   }
+    
+  if(argc > 4){
+    test_input_size = atoi(argv[4]);
+    batch_size = atoi(argv[4]);
+  }
+
+  if(argc > 5){
+    offset = atoi(argv[5]);   
+  }
 
 
+  
   llvm_hpvm_initTensorRt(0);
 
   testLenetTanh();
diff --git a/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/mobilenet_promise.cc b/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/mobilenet_promise.cc
index 052809f29b9d89534005e56125e66c5e4a0bd1cf..45abde0c285c858904dafc54104aec797ca0abf7 100644
--- a/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/mobilenet_promise.cc
+++ b/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/mobilenet_promise.cc
@@ -17,6 +17,9 @@ int to_skip = 5;
 
 int main(int argc, char* argv[]){ 
 
+  int test_input_size = 2000; 
+  int batch_size = 1000;
+  int offset = 5000;
   
   if (argc > 1){
     total_runs = atoi(argv[1]);
@@ -30,6 +33,14 @@ int main(int argc, char* argv[]){
     to_skip = atoi(argv[3]);   
   }
 
+  if(argc > 4){
+    test_input_size = atoi(argv[4]);   
+  }
+
+  if(argc > 5){
+    offset = atoi(argv[5]);   
+  }
+
     
   llvm_hpvm_initTensorRt(0); 
 
@@ -43,9 +54,6 @@ int main(int argc, char* argv[]){
 
     startMemTracking(); 
 
-    int test_input_size = 2000; 
-    int batch_size = 1000;
-    int offset = 5000;
     
     int batch_count = test_input_size / batch_size; 
     float final_accuracy = 0.0; 
diff --git a/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/mobilenet_shallow_promise.cc b/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/mobilenet_shallow_promise.cc
index 42d26d34e65939b410143485a61f23e705906bfc..2585d96530a1c089beb3db8c15ade0a99be25718 100644
--- a/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/mobilenet_shallow_promise.cc
+++ b/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/mobilenet_shallow_promise.cc
@@ -17,6 +17,9 @@ int to_skip = 5;
 
 int main(int argc, char* argv[]){ 
 
+  int test_input_size = 2000; 
+  int batch_size = 1000;
+  int offset = 5000;
 
   if (argc > 1){
     total_runs = atoi(argv[1]);
@@ -30,6 +33,14 @@ int main(int argc, char* argv[]){
     to_skip = atoi(argv[3]);   
   }
 
+  if(argc > 4){
+    test_input_size = atoi(argv[4]);   
+  }
+
+  if(argc > 5){
+    offset = atoi(argv[5]);   
+  }
+
   
   llvm_hpvm_initTensorRt(0); 
 
@@ -41,10 +52,6 @@ int main(int argc, char* argv[]){
     }
 
     startMemTracking(); 
-
-    int test_input_size = 2000; 
-    int batch_size = 1000;
-    int offset = 5000;
     
     int batch_count = test_input_size / batch_size; 
     float final_accuracy = 0.0; 
diff --git a/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/resnet18_promise.cc b/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/resnet18_promise.cc
index 0e5cdd1d284e6c7621cd3331b924c06969be79db..d2e852664e931957b518902881d813acf6692408 100644
--- a/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/resnet18_promise.cc
+++ b/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/resnet18_promise.cc
@@ -17,6 +17,10 @@ int to_skip = 5;
 
 int main(int argc, char* argv[]){ 
 
+  int test_input_size = 2000; 
+  int batch_size = 1000;
+  int offset = 5000;
+
   if (argc > 1){
     total_runs = atoi(argv[1]);
   }
@@ -29,6 +33,15 @@ int main(int argc, char* argv[]){
     to_skip = atoi(argv[3]);   
   }
 
+  if(argc > 4){
+    test_input_size = atoi(argv[4]);   
+  }
+
+  if(argc > 5){
+    offset = atoi(argv[5]);   
+  }
+
+  
 
   llvm_hpvm_initTensorRt(0); 
 
@@ -40,10 +53,6 @@ int main(int argc, char* argv[]){
     }
 
     startMemTracking(); 
-
-    int test_input_size = 2000; 
-    int batch_size = 1000;
-    int offset = 5000;
     
     int batch_count = test_input_size / batch_size; 
     float final_accuracy = 0.0; 
diff --git a/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/vgg16_cifar100_promise.cc b/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/vgg16_cifar100_promise.cc
index 33c68eae84a075f50b2bc8e7484036c54ade5620..0f4c9cd62adee6df3c93de9d99812fad96f4d650 100644
--- a/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/vgg16_cifar100_promise.cc
+++ b/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/vgg16_cifar100_promise.cc
@@ -18,6 +18,11 @@ int to_skip = 5;
 
 int main(int argc, char* argv[]){ 
 
+  int test_input_size = 2000; 
+  int batch_size = 1000;
+  int offset = 5000;
+
+  
   if (argc > 1){
     total_runs = atoi(argv[1]);
   }
@@ -30,6 +35,14 @@ int main(int argc, char* argv[]){
     to_skip = atoi(argv[3]);   
   }
 
+  if(argc > 4){
+    test_input_size = atoi(argv[4]);   
+  }
+
+  if(argc > 5){
+    offset = atoi(argv[5]);   
+  }
+
 
   llvm_hpvm_initTensorRt(0); 
 
@@ -43,9 +56,6 @@ int main(int argc, char* argv[]){
 
    startMemTracking(); 
 
-   int test_input_size = 2000; 
-   int batch_size = 1000;
-   int offset = 5000;
 
    int batch_count = test_input_size / batch_size; 
    float final_accuracy = 0.0; 
diff --git a/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/vgg16_cifar10_promise.cc b/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/vgg16_cifar10_promise.cc
index ff767235e9d44139f97ad885aa89eef1c385ad33..a9363acd7614c83f8def2e4df55e23c6f767733e 100644
--- a/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/vgg16_cifar10_promise.cc
+++ b/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/vgg16_cifar10_promise.cc
@@ -16,7 +16,12 @@ int to_skip = 5;
 
 
 int main(int argc, char* argv[]){ 
-   
+
+ int test_input_size = 2000; 
+ int batch_size = 1000;
+ int offset = 5000;
+
+ 
  if (argc > 1){
    total_runs = atoi(argv[1]);
  }
@@ -29,6 +34,15 @@ int main(int argc, char* argv[]){
    to_skip = atoi(argv[3]);   
  }
 
+ if(argc > 4){
+   test_input_size = atoi(argv[4]);   
+ }
+
+ if(argc > 5){
+   offset = atoi(argv[5]);   
+ }
+
+
  llvm_hpvm_initTensorRt(0); 
 
  int missed = 0; 
@@ -40,9 +54,6 @@ int main(int argc, char* argv[]){
    
    startMemTracking(); 
 
-   int test_input_size = 2000; 
-   int batch_size = 1000;
-   int offset = 5000;
    
    int batch_count = test_input_size / batch_size; 
    float final_accuracy = 0.0;