diff --git a/llvm/test/VISC/DNN_Benchmarks/common/include/tensorTypes.h b/llvm/test/VISC/DNN_Benchmarks/common/include/tensorTypes.h
index 3479a94abec9d6357edc26e4507ec80f8b060acb..726080efe7e1a06363e7fca191f9708219d5baeb 100644
--- a/llvm/test/VISC/DNN_Benchmarks/common/include/tensorTypes.h
+++ b/llvm/test/VISC/DNN_Benchmarks/common/include/tensorTypes.h
@@ -3,7 +3,7 @@
 #define TYPES_HEADER
 
 
-struct Dimension_t{
+/*struct Dimension_t{
   int num_dims;
   size_t* dim_sizes;
 };
@@ -34,5 +34,6 @@ enum Tensor_format_t{
   nhwc 
 };
 
+*/
 
 #endif
diff --git a/llvm/test/VISC/DNN_Benchmarks/common/include/tensorUtils.h b/llvm/test/VISC/DNN_Benchmarks/common/include/tensorUtils.h
index 6dfca7fc75d6a94a9fdab15dba73111557e808fa..d2f37a35ed9d1e9b3d25fb7776faf84fc08385ba 100644
--- a/llvm/test/VISC/DNN_Benchmarks/common/include/tensorUtils.h
+++ b/llvm/test/VISC/DNN_Benchmarks/common/include/tensorUtils.h
@@ -5,8 +5,15 @@
 
 
 #include <sstream>
+#include <vector>
+#include <bits/stdc++.h>
 #include <tensor_runtime.h>
 #include <tensor.h>
+#include <cmath>
+
+
+std::vector<float> run_accuracies;
+
 
 void printTensorInfo(void* tensor_ptr){
 
@@ -17,9 +24,9 @@ void printTensorInfo(void* tensor_ptr){
   }
 
   printf("tensor dims = %d \n", tensor->dims.num_dims);
-  printf("dim1_size = %zu \n", tensor->dims.dim_sizes[0]);
-  printf("dim2_size = %zu \n", tensor->dims.dim_sizes[1]);
-  printf("num_elems = %zu \n", tensor->num_elems);
+  printf("dim1_size = %lu \n", tensor->dims.dim_sizes[0]);
+  printf("dim2_size = %lu \n", tensor->dims.dim_sizes[1]);
+  printf("num_elems = %lu \n", tensor->num_elems);
 }
 
 
@@ -27,7 +34,6 @@ void printTensorInfo(void* tensor_ptr){
 void dumpWeightsToFile(char* file_name, void* weights_ptr){
 
   struct Tensor* weights = (Tensor*) weights_ptr;
-
   // Move data back to host
   hpvm_request_tensor(weights, 0);
   
@@ -37,9 +43,9 @@ void dumpWeightsToFile(char* file_name, void* weights_ptr){
     abort();
   }
 
-  printf("size_in_bytes = %zu \n", weights->size_in_bytes);
+  //printf("size_in_bytes = %lu \n", weights->size_in_bytes);
   size_t bytes_written = fwrite(weights->host_data, 1, weights->size_in_bytes, fp);
-  printf("bytes_written = %zu\n", bytes_written);
+  //printf("bytes_written = %lu \n", bytes_written);
   fclose(fp);
 }
 
@@ -82,6 +88,22 @@ void fillWithOnesAndTwos(void* tensor_ptr){
 }
 
 
+void fillTensorWithVal(void* tensor_ptr, float target_value){
+
+  struct Tensor* tensor = (struct Tensor*) tensor_ptr;
+    
+  hpvm_request_tensor(tensor, 0);
+  
+  // initialization is specific to the floating point type
+  if(tensor->data_type == CUDNN_DATA_FLOAT){
+    float* data_arr = (float*) tensor->host_data;
+    for(unsigned int i = 0; i < tensor->num_elems; i++){
+      data_arr[i] = target_value;    
+    }
+  }
+}
+
+
 void fillTensorWithNegOnes(void* tensor_ptr){
 
   struct Tensor* tensor = (struct Tensor*) tensor_ptr;
@@ -133,9 +155,9 @@ void printTensorDims(void* tensor_ptr){
 
   struct Tensor* tensor = (struct Tensor*) tensor_ptr;
 
-  printf("Num_elems = %zu \n", tensor->num_elems);
+  printf("Num_elems = %lu \n", tensor->num_elems);
   for (int i = 0; i < tensor->dims.num_dims; i++){
-    printf("dim[%d] = %zu \n", i, tensor->dims.dim_sizes[i]);
+    printf("dim[%d] = %lu \n", i, tensor->dims.dim_sizes[i]);
   }
 }
 
@@ -197,33 +219,74 @@ void* readInputTensor(const char* file_name, int data_type, int dim1_size, int d
  
   fseek(file, file_header_size, SEEK_CUR); // Skipping the file header
   size_t bytes_read = fread(file_data, 1, sizeof(uint8_t) * num_elems, file);
+
+  fclose(file);
   
   for (size_t i = 0; i < num_elems; ++i){
     tensor_data[i] = (float) file_data[i] / 255.0f;
   }
 
-  printf("tensor_data[%d] = %f \n", 10, tensor_data[10]);
+  //printf("tensor_data[%d] = %f \n", 10, tensor_data[10]);
 
   // NOTE: Using NCHW format
   struct Tensor* input = (struct Tensor*) create4DTensor(data_type, nchw, dim1_size, dim2_size,
 					dim3_size, dim4_size);
   
   initTensorData(input, tensor_data, size_in_bytes);
-  //compareValues(input, tensor_data, num_elems);
+  //  compareValues(input, tensor_data, num_elems);
   
   return input;  
 }
 
 
+//*** FIXIT: Move this to CPU-only
+struct Tensor* readTrainedWeightsCPU(const char* file_name, int data_type,
+				     int dim1_size, int dim2_size,
+				     int dim3_size, int dim4_size){
+
+  // FIXIT: Don't assume floating point types
+  int type_size = 4; // NOTE: Assuming floating point tensors
+  long int num_elems = dim1_size * dim2_size * dim3_size * dim4_size;
+  long int size_in_bytes = type_size * dim1_size * dim2_size * dim3_size * dim4_size;
+  float* tensor_data = (float*) malloc(sizeof(float) * num_elems);
+  int file_header_size = 0;
+  
+  FILE* file = fopen(file_name, "rb");
+  if(file == NULL){
+    printf("Data file %s is not found. Aborting... \n", file_name);
+    abort();
+  }
+    
+  fseek(file, file_header_size, SEEK_CUR); // Skipping the file header
+  size_t bytes_read = fread(tensor_data, 1, size_in_bytes, file);
+
+  printf("size in bytes = %lu, bytes read = %lu \n", size_in_bytes, bytes_read);
+
+  fclose(file);
+  
+  
+  struct Tensor* weights = (struct Tensor*) create4DTensor(data_type, nchw, dim1_size, dim2_size,
+					                   dim3_size, dim4_size);
+  
+  initTensorData(weights, tensor_data, size_in_bytes);
+  //compareValues(weights, tensor_data, num_elems);
+  free(tensor_data);
 
-struct Tensor* readTrainedWeights(const char* file_name, int data_type, int dim1_size, int dim2_size,
-				  int dim3_size, int dim4_size){
+  return weights;
+}
+
+
+struct Tensor* readTrainedWeights(const char* file_name, int data_type,
+				  long int dim1_size, long int dim2_size,
+				  long int dim3_size, long int dim4_size){
 
   // FIXIT: Don't assume floating point types
   int type_size = 4; // NOTE: Assuming floating point tensors
-  int num_elems = dim1_size * dim2_size * dim3_size * dim4_size;
-  int size_in_bytes = type_size * dim1_size * dim2_size * dim3_size * dim4_size;
+  long int num_elems = dim1_size * dim2_size * dim3_size * dim4_size;
+  long int size_in_bytes = type_size * dim1_size * dim2_size * dim3_size * dim4_size;
   float* tensor_data = (float*) malloc(sizeof(float) * num_elems);
+  printf("size_in_bytes  = %lu \n", size_in_bytes);
+  
   int file_header_size = 0;
   
   FILE* file = fopen(file_name, "rb");
@@ -235,21 +298,63 @@ struct Tensor* readTrainedWeights(const char* file_name, int data_type, int dim1
   fseek(file, file_header_size, SEEK_CUR); // Skipping the file header
   size_t bytes_read = fread(tensor_data, 1, size_in_bytes, file);
 
-  //printf("tensor_data[%d] = %f \n", num_elems-1, tensor_data[num_elems-1]);
+  // printf("size in bytes = %lu, bytes read = %lu \n", size_in_bytes, bytes_read);
+
+  fclose(file);
+  
   
   struct Tensor* weights = (struct Tensor*) create4DTensor(data_type, nchw, dim1_size, dim2_size,
-					  dim3_size, dim4_size);
+					                   dim3_size, dim4_size);
   
   initTensorData(weights, tensor_data, size_in_bytes);
-  compareValues(weights, tensor_data, num_elems);
+  //compareValues(weights, tensor_data, num_elems);
+  free(tensor_data);
 
   return weights;
 }
 
 
+
+
+struct Tensor* readInputBatch(const char* file_name, int data_type,
+			      int start, int end,
+			      int dim2_size, int dim3_size, int dim4_size){
+
+  int dim1_size = end - start;
+  // FIXIT: Don't assume floating point types
+  int type_size = 4; // NOTE: Assuming floating point tensors
+  long int num_elems = dim1_size * dim2_size * dim3_size * dim4_size;
+  long int size_in_bytes = type_size * dim1_size * dim2_size * dim3_size * dim4_size;
+  float* tensor_data = (float*) malloc(sizeof(float) * num_elems);
+  int file_header_size = type_size * start * dim2_size * dim3_size * dim4_size;
+  
+  FILE* file = fopen(file_name, "rb");
+  if(file == NULL){
+    printf("Data file %s is not found. Aborting... \n", file_name);
+    abort();
+  }
+    
+  fseek(file, file_header_size, SEEK_SET); // Skipping the file header
+  size_t bytes_read = fread(tensor_data, 1, size_in_bytes, file);
+
+  // printf("size in bytes = %lu, bytes read = %lu \n", size_in_bytes, bytes_read);
+
+  fclose(file);
+  
+  
+  struct Tensor* weights = (struct Tensor*) create4DTensor(data_type, nchw, dim1_size, dim2_size,
+					                   dim3_size, dim4_size);
+  
+  initTensorData(weights, tensor_data, size_in_bytes);
+  free(tensor_data);
+
+  return weights;
+}
+
+
+
 uint8_t* readLabels(const char* labels_file, int num_labels){
 
-  //int file_header_size = 8;
   uint8_t* labels = (uint8_t*) malloc(sizeof(uint8_t) * num_labels);
   FILE* file = fopen(labels_file, "rb");
   if(file == NULL){
@@ -257,15 +362,41 @@ uint8_t* readLabels(const char* labels_file, int num_labels){
     abort();
   }
 
-  //fseek(file, file_header_size, SEEK_CUR); // Skipping the file header
   size_t bytes_read = fread(labels, 1, sizeof(uint8_t) * num_labels, file);
-  printf("*Label bytes_read = %zu \n", bytes_read);
+
+  fclose(file);
+  
+  // printf("--labels bytes_read = %lu \n", bytes_read);
   return labels;
 }
 
 
+uint8_t* readLabelsBatch(const char* labels_file, int start, int end){
+
+  int num_labels = end - start;
+  int file_header_size = sizeof(uint8_t) * start;
+  
+  uint8_t* labels = (uint8_t*) malloc(sizeof(uint8_t) * num_labels);
+  FILE* file = fopen(labels_file, "rb");
+  if(file == NULL){
+    printf("Data file %s is not found. Aborting...\n", labels_file);
+    abort();
+  }
+  
+  fseek(file, file_header_size, SEEK_SET); // Skipping the file header
+    
+  size_t bytes_read = fread(labels, 1, sizeof(uint8_t) * num_labels, file);
 
-void computeAccuracy(char* labels_file, int num_labels, void* result_ptr){
+
+  fclose(file);
+  
+  // printf("--labels bytes_read = %lu \n", bytes_read);
+  return labels;
+}
+
+
+
+void computeAccuracy(const char* labels_file, int num_labels, void* result_ptr){
 
   struct Tensor* result = (struct Tensor*) result_ptr;
   
@@ -306,30 +437,102 @@ void computeAccuracy(char* labels_file, int num_labels, void* result_ptr){
 
 
 
-void computeAccuracy2(uint8_t* labels, int num_labels, void* result_ptr){
+float computeAccuracy2(uint8_t* labels, int num_labels, void* result_ptr, unsigned num_classes = 10){
 
+  unsigned num_zeros = 0;
+  
   struct Tensor* result = (struct Tensor*) result_ptr;
   
-  //uint8_t* labels = readLabels(labels_file, num_labels);
   size_t batch_dim = result->dims.dim_sizes[0];
   size_t channels = result->dims.dim_sizes[1];
   float* data = (float*) result->host_data;
   int num_errors = 0;
+
+  printf("batch_dim = %lu, channels = %lu \n", batch_dim, channels);
+  
+  for(int i = 0; i < num_labels; i++){
   
-  for(int i = 0; i < batch_dim; i++){
     int chosen = 0;
-    for (int id = 1; id < 10; ++id){
+    for (int id = 1; id < num_classes; ++id){
       if (data[i * channels + chosen] < data[i * channels + id]) chosen = id;
     }
     
-    //printf("chosen = %d, label = %d \n", chosen, labels[i]);
+    if(labels[i] == 0)
+      num_zeros++;
+      
     if(chosen != labels[i])
       num_errors++;
+
+    //printf("chosen = %d, label = %d \n", chosen, labels[i]);
   }
 
   float accuracy = ((batch_dim - num_errors) * 1.0 / batch_dim * 1.0) * 100.0;
   printf("****** Accuracy = %f \n\n", accuracy);
+  //printf("****** Zero class labels %d \n", num_zeros);
+
+  FILE* fp = fopen("final_accuracy", "w+");
+  if(fp != NULL){
+
+    std::ostringstream ss;
+    ss << std::fixed << accuracy;
+    std::string print_str = ss.str();
+  
+    fwrite(print_str.c_str(), 1, print_str.length(), fp);
+  }
+
+  fclose(fp);
+
+  return accuracy;    
+}
+
 
+struct ClassProb{
+  float prob;
+  int index;
+};
+
+
+bool descendFloatComp(ClassProb obj1, ClassProb obj2){
+  return obj1.prob > obj2.prob;
+}
+
+
+float computeTop5Accuracy(uint8_t* labels, int num_labels, void* result_ptr, unsigned num_classes = 10){
+  
+  struct Tensor* result = (struct Tensor*) result_ptr;
+  
+  size_t batch_dim = result->dims.dim_sizes[0];
+  size_t channels = result->dims.dim_sizes[1];
+  float* data = (float*) result->host_data;
+  int num_errors = 0;
+
+  printf("batch_dim = %lu, channels = %lu \n", batch_dim, channels);
+  
+  for(int i = 0; i < num_labels; i++){
+
+    std::vector<ClassProb> elem_probs;
+    for (int id = 0; id < num_classes; ++id){
+      ClassProb cProb;
+      cProb.prob = data[i * channels + id];
+      cProb.index = id;
+      elem_probs.push_back(cProb);   
+    }
+
+    std:sort(elem_probs.begin(), elem_probs.end(), descendFloatComp);
+    // Check if any of top-5 predictions matches
+    bool matched = false;
+    for(int j = 0; j < 5; j++){
+      ClassProb cProb = elem_probs[j];
+      if(cProb.index == labels[i])
+        matched = true;
+    }
+
+    if(!matched)
+      num_errors +=1; 
+  }
+
+  float accuracy = ((batch_dim - num_errors) * 1.0 / batch_dim * 1.0) * 100.0;
+  printf("****** Accuracy = %f \n\n", accuracy);
 
   FILE* fp = fopen("final_accuracy", "w+");
   if(fp != NULL){
@@ -339,10 +542,194 @@ void computeAccuracy2(uint8_t* labels, int num_labels, void* result_ptr){
     std::string print_str = ss.str();
   
     fwrite(print_str.c_str(), 1, print_str.length(), fp);
-    fclose(fp);
+  }
+
+  fclose(fp);
+
+  return accuracy;    
+}
+
+
+
+
+void dumpFinalAccuracy(float accuracy){
+
+  printf("\n\n **** Final Accuracy = %f \n", accuracy);
+  
+  FILE* fp = fopen("final_accuracy", "w+");
+  if(fp != NULL){
+    std::ostringstream ss;
+    ss << std::fixed << accuracy;
+    std::string print_str = ss.str();
+  
+    fwrite(print_str.c_str(), 1, print_str.length(), fp);
+  }
+
+  fclose(fp);
+
+  run_accuracies.push_back(accuracy);
+}
+
+
+
+void dumpAvgPSNR(float avg_psnr){
+
+  FILE* fp = fopen("avg_psnr", "w+");
+  if(fp != NULL){
+    std::ostringstream ss;
+    ss << std::fixed << avg_psnr;
+    std::string print_str = ss.str(); 
+    fwrite(print_str.c_str(), 1, print_str.length(), fp);
+  }
+
+  fclose(fp);
+}
+
+
+void dumpPSNRStd(float psnr_std){
+
+  FILE* fp = fopen("psnr_std.txt", "w+");
+  if(fp != NULL){
+    std::ostringstream ss;
+    ss << std::fixed << psnr_std;
+    std::string print_str = ss.str(); 
+    fwrite(print_str.c_str(), 1, print_str.length(), fp);
+  }
+
+  fclose(fp);
+}
+
+
+
+
+
+void dumpExecutionAccuracies(){
+
+  FILE* fp = fopen("run_accuracies.txt", "w+");
+  if(fp != NULL){  
+    for (int i = 0; i < run_accuracies.size(); i++){
+      float accuracy = run_accuracies[i];
+      std::ostringstream ss;
+      ss << std::fixed << accuracy;
+      std::string print_str = ss.str();
+      fwrite(print_str.c_str(), 1, print_str.length(), fp);
+      fwrite("\n", 1, 1, fp);
+    }
+
+  }
+
+  fclose(fp);
+}
+
+
+float readPSNRFromFile(const char* file_name){
+
+  float psnr;
+  FILE* pFile = fopen(file_name, "r");
+  if(pFile == NULL){
+    printf("ERROR: psnr.txt not found! \n");
+    abort();
   }
   
+  fscanf(pFile, "%f", &psnr);
+  printf("**** PSNR read = %f \n\n", psnr);
+  return psnr; 
 }
 
 
+float computePSNRViolation(void* gold_ptr, void* approx_ptr, float PSNR_threshold){
+
+  
+  PSNR_threshold = readPSNRFromFile("psnr.txt");
+  std::vector<float> psnr_list;
+  
+  struct Tensor* gold_tensor = (struct Tensor*) gold_ptr;
+  struct Tensor* approx_tensor = (struct Tensor*) approx_ptr;
+
+  size_t* dim_sizes = gold_tensor->dims.dim_sizes;
+  size_t batch_dim = dim_sizes[0];
+  size_t image_size = dim_sizes[1] * dim_sizes[2] * dim_sizes[3];
+  
+  printf("batch_dim = %lu, image_size = %lu \n", batch_dim, image_size);
+	 
+  float* gold_data = (float*) gold_tensor->host_data;
+  float* approx_data = (float*) approx_tensor->host_data;
+
+  FILE* fp = fopen("img_psnr.txt", "w+");
+
+  float sum_psnr = 0.0;
+  int num_errors = 0;  
+  for(size_t i = 0; i < batch_dim; i++){
+    float mse_sum = 0.0;
+    float max_val = -999999;     
+    size_t offset = i * image_size;
+    
+    for(size_t j = 0; j < image_size; j++){
+      float diff = gold_data[offset + j] - approx_data[offset + j];
+      float diff_square = diff * diff;
+      mse_sum += diff_square;
+
+      if(max_val < gold_data[offset + j]){
+	max_val = gold_data[offset + j];
+      }   
+    }
+
+    mse_sum = mse_sum / image_size;
+    float psnr = 20 * log10(255 / sqrt(mse_sum));
+
+    sum_psnr += psnr;
+    if (psnr < PSNR_threshold)
+      num_errors += 1;    
+
+    printf("PSNR value = %f \n", psnr);
+    psnr_list.push_back(psnr);
+
+    std::ostringstream ss;
+    ss << std::fixed << psnr;
+    std::string print_str = ss.str();
+    fwrite(print_str.c_str(), 1, print_str.length(), fp);
+    fwrite("\n", 1, 1, fp);
+  }
+
+  float violation_rate = (num_errors * 1.0) / batch_dim * 100.0;
+  printf("*** violation_rate= %f \n\n", violation_rate);
+
+  float avg_psnr = sum_psnr / batch_dim;
+  printf("*** avg_psnr =  %f \n\n", avg_psnr);
+  dumpAvgPSNR(avg_psnr);
+ 
+  float success_rate = 100.0 - violation_rate;
+  dumpFinalAccuracy(success_rate);
+
+  fclose(fp);
+
+
+  float var = 0.0;
+  for(size_t i = 0; i < batch_dim; i++){
+    var = var + (psnr_list[i] - avg_psnr) * (psnr_list[i] - avg_psnr); 
+  }
+
+  var /= batch_dim;
+  float std = sqrt(var);
+
+  dumpPSNRStd(std);
+  
+  return violation_rate;  
+}
+
+
+void dumpOutput(void* output_ptr, const char* file_name){
+
+  struct Tensor* out_tensor = (struct Tensor*) output_ptr;  
+  size_t size_in_bytes = out_tensor->size_in_bytes;
+  printf ("** Output size = %lu \n", size_in_bytes);
+  
+  float* host_data = (float*) out_tensor->host_data; 
+  FILE* fd = fopen(file_name, "w+");
+  fwrite(host_data, 1, size_in_bytes, fd);
+  fclose(fd);
+}
+
+
+
 #endif