diff --git a/llvm/projects/hpvm-tensor-rt/dnn_sources/include/utils.h b/llvm/projects/hpvm-tensor-rt/dnn_sources/include/utils.h
index c086602506e8ed521c7deb99f7121f9d8159190e..8976f7dc8fa5df24536b051a5d436da6555161cf 100644
--- a/llvm/projects/hpvm-tensor-rt/dnn_sources/include/utils.h
+++ b/llvm/projects/hpvm-tensor-rt/dnn_sources/include/utils.h
@@ -407,7 +407,24 @@ uint8_t* readLabels(const char* labels_file, int num_labels){
 
   fclose(file);
   
-  // printf("--labels bytes_read = %lu \n", bytes_read);
+  return labels;
+}
+
+
+
+uint32_t* readLabels2(const char* labels_file, int num_labels){
+
+  uint32_t* labels = (uint32_t*) malloc(sizeof(uint32_t) * num_labels);
+  FILE* file = fopen(labels_file, "rb");
+  if(file == NULL){
+    printf("Data file %s is not found. Aborting...\n", labels_file);
+    abort();
+  }
+
+  size_t bytes_read = fread(labels, 1, sizeof(uint32_t) * num_labels, file);
+
+  fclose(file);
+  
   return labels;
 }
 
@@ -436,6 +453,29 @@ uint8_t* readLabelsBatch(const char* labels_file, int start, int end){
 }
 
 
+uint32_t* readLabelsBatch2(const char* labels_file, int start, int end){
+
+  int num_labels = end - start;
+  int file_header_size = sizeof(uint32_t) * start;
+  
+  uint32_t* labels = (uint32_t*) malloc(sizeof(uint32_t) * num_labels);
+  FILE* file = fopen(labels_file, "rb");
+  if(file == NULL){
+    printf("Data file %s is not found. Aborting...\n", labels_file);
+    abort();
+  }
+  
+  fseek(file, file_header_size, SEEK_SET); // Skipping the file header
+    
+  size_t bytes_read = fread(labels, 1, sizeof(uint32_t) * num_labels, file);
+
+
+  fclose(file);
+  
+  return labels;
+}
+
+
 
 void computeAccuracy(const char* labels_file, int num_labels, void* result_ptr){
 
@@ -478,7 +518,8 @@ void computeAccuracy(const char* labels_file, int num_labels, void* result_ptr){
 
 
 
-float computeAccuracy2(uint8_t* labels, int num_labels, void* result_ptr, unsigned num_classes = 10){
+float computeAccuracy2(uint8_t* labels, int num_labels,
+		       void* result_ptr, unsigned num_classes = 10){
 
   unsigned num_zeros = 0;
   
@@ -494,7 +535,8 @@ float computeAccuracy2(uint8_t* labels, int num_labels, void* result_ptr, unsign
   for(int i = 0; i < num_labels; i++){
   
     int chosen = 0;
-    for (int id = 1; id < num_classes; ++id){
+    //for (int id = 1; id < num_classes; ++id){
+    for (int id = 1; id < channels; ++id){
       if (data[i * channels + chosen] < data[i * channels + id]) chosen = id;
     }
     
@@ -538,7 +580,8 @@ bool descendFloatComp(ClassProb obj1, ClassProb obj2){
 }
 
 
-float computeTop5Accuracy(uint8_t* labels, int num_labels, void* result_ptr, unsigned num_classes = 10){
+float computeTop5Accuracy(uint8_t* labels, int num_labels,
+			  void* result_ptr, unsigned num_classes = 10){
   
   struct Tensor* result = (struct Tensor*) result_ptr;
   
diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/hpvm-rt-controller.h b/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/hpvm-rt-controller.h
index 7558a131a700e73af4624c12a4a6b6ab99a5f8aa..7b3ba18cd85c3b3e56bab6ecd45b1c165febb028 100644
--- a/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/hpvm-rt-controller.h
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/hpvm-rt-controller.h
@@ -49,6 +49,7 @@ class RuntimeController {
 
   void init(const char *Cstr, const char *Qstr) {
     readQuantizationFile(Qstr);
+    printf("Going to read Conf file \n");
     readConfigurationFile(Cstr);
   }
 
@@ -322,7 +323,12 @@ void llvm_hpvm_clearRuntimeController() {
 }
 
 
+void llvm_hpvm_invokeRtControl(void* result, uint8_t* labels){
 
+}
+
+void llvm_hpvm_invokeRtControl2(void* result, uint32_t* labels){
 
+}
 
 #endif
diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/rt-controller-api.h b/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/rt-controller-api.h
index 17441fc756be513915b4125ded958493d2c8bf46..903c9674bdcd679d2252f4ce6469f4979cf7dab8 100644
--- a/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/rt-controller-api.h
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/rt-controller-api.h
@@ -5,6 +5,8 @@ extern "C"{
   
   void llvm_hpvm_initializeRuntimeController(const char *, const char *);
   void llvm_hpvm_clearRuntimeController();
+  void llvm_hpvm_invokeRtControl(void* result, uint8_t* labels);
+  void llvm_hpvm_invokeRtControl2(void* result, uint32_t* labels);
 }
 
 
diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/alexnet/src/alexnet_loop.cpp b/llvm/test/VISC/DNN_Benchmarks/benchmarks/alexnet/src/alexnet_loop.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..7b0fb69dd853dbe678c1dee535d0ba0fdbdebcaa
--- /dev/null
+++ b/llvm/test/VISC/DNN_Benchmarks/benchmarks/alexnet/src/alexnet_loop.cpp
@@ -0,0 +1,475 @@
+
+#include <stdio.h> 
+#include <stdlib.h> 
+#include <unistd.h> 
+#include <fcntl.h> 
+#include <sys/stat.h> 
+#include <cstring> 
+#include <visc.h> 
+#include <tensorTypes.h> 
+#include <tensorUtils.h> 
+
+void var_0_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_convolution(t1, t2, 5, 5, 1, 1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_1_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_add(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_2_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_tanh(t1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_3_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_pool_max(t1, 2, 2, 0, 0, 2, 2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_4_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_convolution(t1, t2, 2, 2, 1, 1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_5_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_add(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_6_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_tanh(t1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_7_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_pool_max(t1, 2, 2, 0, 0, 2, 2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_8_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_convolution(t1, t2, 1, 1, 1, 1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_9_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_add(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_10_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_tanh(t1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_11_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_convolution(t1, t2, 1, 1, 1, 1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_12_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_add(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_13_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_tanh(t1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_14_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_convolution(t1, t2, 1, 1, 1, 1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_15_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_add(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_16_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_tanh(t1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_17_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_pool_max(t1, 2, 2, 0, 0, 2, 2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_18_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_mul(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_19_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_add(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_20_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_softmax(t1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void root(void* input, size_t input_bytes, 
+	  void* conv2d_1_w, size_t conv2d_1_w_bytes, 
+	  void* conv2d_1_b, size_t conv2d_1_b_bytes, 
+	  void* conv2d_2_w, size_t conv2d_2_w_bytes, 
+	  void* conv2d_2_b, size_t conv2d_2_b_bytes, 
+	  void* conv2d_3_w, size_t conv2d_3_w_bytes, 
+	  void* conv2d_3_b, size_t conv2d_3_b_bytes, 
+	  void* conv2d_4_w, size_t conv2d_4_w_bytes, 
+	  void* conv2d_4_b, size_t conv2d_4_b_bytes, 
+	  void* conv2d_5_w, size_t conv2d_5_w_bytes, 
+	  void* conv2d_5_b, size_t conv2d_5_b_bytes, 
+	  void* dense_1_w, size_t dense_1_w_bytes, 
+	  void* dense_1_b, size_t dense_1_b_bytes){ 
+
+
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(13, input, conv2d_1_w, conv2d_1_b, conv2d_2_w, conv2d_2_b, conv2d_3_w, conv2d_3_b, conv2d_4_w, conv2d_4_b, conv2d_5_w, conv2d_5_b, dense_1_w, dense_1_b, 0); 
+
+
+  void* var_0 = __visc__createNodeND(0, var_0_node); 
+
+  __visc__bindIn(var_0, 0, 0, 0); 
+  __visc__bindIn(var_0, 1, 1, 0); 
+  __visc__bindIn(var_0, 2, 2, 0); 
+  __visc__bindIn(var_0, 3, 3, 0); 
+
+  void* var_1 = __visc__createNodeND(0, var_1_node); 
+
+  __visc__edge(var_0, var_1, 1, 0, 0, 0); 
+  __visc__edge(var_0, var_1, 1, 1, 1, 0); 
+  __visc__bindIn(var_1, 4, 2, 0); 
+  __visc__bindIn(var_1, 5, 3, 0); 
+
+  void* var_2 = __visc__createNodeND(0, var_2_node); 
+
+  __visc__edge(var_1, var_2, 1, 0, 0, 0); 
+  __visc__edge(var_1, var_2, 1, 1, 1, 0); 
+
+  void* var_3 = __visc__createNodeND(0, var_3_node); 
+
+  __visc__edge(var_2, var_3, 1, 0, 0, 0); 
+  __visc__edge(var_2, var_3, 1, 1, 1, 0); 
+
+  void* var_4 = __visc__createNodeND(0, var_4_node); 
+
+  __visc__edge(var_3, var_4, 1, 0, 0, 0); 
+  __visc__edge(var_3, var_4, 1, 1, 1, 0); 
+  __visc__bindIn(var_4, 6, 2, 0); 
+  __visc__bindIn(var_4, 7, 3, 0); 
+
+  void* var_5 = __visc__createNodeND(0, var_5_node); 
+
+  __visc__edge(var_4, var_5, 1, 0, 0, 0); 
+  __visc__edge(var_4, var_5, 1, 1, 1, 0); 
+  __visc__bindIn(var_5, 8, 2, 0); 
+  __visc__bindIn(var_5, 9, 3, 0); 
+
+  void* var_6 = __visc__createNodeND(0, var_6_node); 
+
+  __visc__edge(var_5, var_6, 1, 0, 0, 0); 
+  __visc__edge(var_5, var_6, 1, 1, 1, 0); 
+
+  void* var_7 = __visc__createNodeND(0, var_7_node); 
+
+  __visc__edge(var_6, var_7, 1, 0, 0, 0); 
+  __visc__edge(var_6, var_7, 1, 1, 1, 0); 
+
+  void* var_8 = __visc__createNodeND(0, var_8_node); 
+
+  __visc__edge(var_7, var_8, 1, 0, 0, 0); 
+  __visc__edge(var_7, var_8, 1, 1, 1, 0); 
+  __visc__bindIn(var_8, 10, 2, 0); 
+  __visc__bindIn(var_8, 11, 3, 0); 
+
+  void* var_9 = __visc__createNodeND(0, var_9_node); 
+
+  __visc__edge(var_8, var_9, 1, 0, 0, 0); 
+  __visc__edge(var_8, var_9, 1, 1, 1, 0); 
+  __visc__bindIn(var_9, 12, 2, 0); 
+  __visc__bindIn(var_9, 13, 3, 0); 
+
+  void* var_10 = __visc__createNodeND(0, var_10_node); 
+
+  __visc__edge(var_9, var_10, 1, 0, 0, 0); 
+  __visc__edge(var_9, var_10, 1, 1, 1, 0); 
+
+  void* var_11 = __visc__createNodeND(0, var_11_node); 
+
+  __visc__edge(var_10, var_11, 1, 0, 0, 0); 
+  __visc__edge(var_10, var_11, 1, 1, 1, 0); 
+  __visc__bindIn(var_11, 14, 2, 0); 
+  __visc__bindIn(var_11, 15, 3, 0); 
+
+  void* var_12 = __visc__createNodeND(0, var_12_node); 
+
+  __visc__edge(var_11, var_12, 1, 0, 0, 0); 
+  __visc__edge(var_11, var_12, 1, 1, 1, 0); 
+  __visc__bindIn(var_12, 16, 2, 0); 
+  __visc__bindIn(var_12, 17, 3, 0); 
+
+  void* var_13 = __visc__createNodeND(0, var_13_node); 
+
+  __visc__edge(var_12, var_13, 1, 0, 0, 0); 
+  __visc__edge(var_12, var_13, 1, 1, 1, 0); 
+
+  void* var_14 = __visc__createNodeND(0, var_14_node); 
+
+  __visc__edge(var_13, var_14, 1, 0, 0, 0); 
+  __visc__edge(var_13, var_14, 1, 1, 1, 0); 
+  __visc__bindIn(var_14, 18, 2, 0); 
+  __visc__bindIn(var_14, 19, 3, 0); 
+
+  void* var_15 = __visc__createNodeND(0, var_15_node); 
+
+  __visc__edge(var_14, var_15, 1, 0, 0, 0); 
+  __visc__edge(var_14, var_15, 1, 1, 1, 0); 
+  __visc__bindIn(var_15, 20, 2, 0); 
+  __visc__bindIn(var_15, 21, 3, 0); 
+
+  void* var_16 = __visc__createNodeND(0, var_16_node); 
+
+  __visc__edge(var_15, var_16, 1, 0, 0, 0); 
+  __visc__edge(var_15, var_16, 1, 1, 1, 0); 
+
+  void* var_17 = __visc__createNodeND(0, var_17_node); 
+
+  __visc__edge(var_16, var_17, 1, 0, 0, 0); 
+  __visc__edge(var_16, var_17, 1, 1, 1, 0); 
+
+  void* var_18 = __visc__createNodeND(0, var_18_node); 
+
+  __visc__edge(var_17, var_18, 1, 0, 0, 0); 
+  __visc__edge(var_17, var_18, 1, 1, 1, 0); 
+  __visc__bindIn(var_18, 22, 2, 0); 
+  __visc__bindIn(var_18, 23, 3, 0); 
+
+  void* var_19 = __visc__createNodeND(0, var_19_node); 
+
+  __visc__edge(var_18, var_19, 1, 0, 0, 0); 
+  __visc__edge(var_18, var_19, 1, 1, 1, 0); 
+  __visc__bindIn(var_19, 24, 2, 0); 
+  __visc__bindIn(var_19, 25, 3, 0); 
+
+  void* var_20 = __visc__createNodeND(0, var_20_node); 
+
+  __visc__edge(var_19, var_20, 1, 0, 0, 0); 
+  __visc__edge(var_19, var_20, 1, 1, 1, 0); 
+
+  __visc__bindOut(var_20, 0, 0, 0); 
+  __visc__bindOut(var_20, 1, 1, 0); 
+
+}
+
+struct ret_t {
+  void* tensor; 
+  size_t bytes; 
+}; 
+
+typedef struct __attribute__((__packed__)) {
+  void* input; 
+  size_t input_bytes; 
+  void* conv2d_1_w; 
+  size_t conv2d_1_w_bytes; 
+  void* conv2d_1_b; 
+  size_t conv2d_1_b_bytes; 
+  void* conv2d_2_w; 
+  size_t conv2d_2_w_bytes; 
+  void* conv2d_2_b; 
+  size_t conv2d_2_b_bytes; 
+  void* conv2d_3_w; 
+  size_t conv2d_3_w_bytes; 
+  void* conv2d_3_b; 
+  size_t conv2d_3_b_bytes; 
+  void* conv2d_4_w; 
+  size_t conv2d_4_w_bytes; 
+  void* conv2d_4_b; 
+  size_t conv2d_4_b_bytes; 
+  void* conv2d_5_w; 
+  size_t conv2d_5_w_bytes; 
+  void* conv2d_5_b; 
+  size_t conv2d_5_b_bytes; 
+  void* dense_1_w; 
+  size_t dense_1_w_bytes; 
+  void* dense_1_b; 
+  size_t dense_1_b_bytes; 
+
+  struct ret_t r; 
+}
+RootIn;
+
+int main(){ 
+
+  std::string dir_prefix = std::string("../../../../../../projects/hpvm-tensor-rt/model_params/alexnet_cifar10_test/");
+
+
+  std::string labels_path =  dir_prefix + std::string("labels.bin"); 
+  //uint8_t* labels = readLabels(labels_path.c_str(),10000); 
+  std::string conv2d_1_w_path =  dir_prefix + std::string("conv2d_1_w.bin"); 
+  void* conv2d_1_w =  readTrainedWeights(conv2d_1_w_path.c_str(), 0,64,3,11,11); 
+  std::string conv2d_1_b_path =  dir_prefix + std::string("conv2d_1_b.bin"); 
+  void* conv2d_1_b =  readTrainedWeights(conv2d_1_b_path.c_str(), 0,1,64,1,1); 
+  std::string conv2d_2_w_path =  dir_prefix + std::string("conv2d_2_w.bin"); 
+  void* conv2d_2_w =  readTrainedWeights(conv2d_2_w_path.c_str(), 0,192,64,5,5); 
+  std::string conv2d_2_b_path =  dir_prefix + std::string("conv2d_2_b.bin"); 
+  void* conv2d_2_b =  readTrainedWeights(conv2d_2_b_path.c_str(), 0,1,192,1,1); 
+  std::string conv2d_3_w_path =  dir_prefix + std::string("conv2d_3_w.bin"); 
+  void* conv2d_3_w =  readTrainedWeights(conv2d_3_w_path.c_str(), 0,384,192,3,3); 
+  std::string conv2d_3_b_path =  dir_prefix + std::string("conv2d_3_b.bin"); 
+  void* conv2d_3_b =  readTrainedWeights(conv2d_3_b_path.c_str(), 0,1,384,1,1); 
+  std::string conv2d_4_w_path =  dir_prefix + std::string("conv2d_4_w.bin"); 
+  void* conv2d_4_w =  readTrainedWeights(conv2d_4_w_path.c_str(), 0,256,384,3,3); 
+  std::string conv2d_4_b_path =  dir_prefix + std::string("conv2d_4_b.bin"); 
+  void* conv2d_4_b =  readTrainedWeights(conv2d_4_b_path.c_str(), 0,1,256,1,1); 
+  std::string conv2d_5_w_path =  dir_prefix + std::string("conv2d_5_w.bin"); 
+  void* conv2d_5_w =  readTrainedWeights(conv2d_5_w_path.c_str(), 0,256,256,3,3); 
+  std::string conv2d_5_b_path =  dir_prefix + std::string("conv2d_5_b.bin"); 
+  void* conv2d_5_b =  readTrainedWeights(conv2d_5_b_path.c_str(), 0,1,256,1,1); 
+  std::string dense_1_w_path =  dir_prefix + std::string("dense_1_w.bin"); 
+  void* dense_1_w =  readTrainedWeights(dense_1_w_path.c_str(), 0,1,1,4096,10); 
+  std::string dense_1_b_path =  dir_prefix + std::string("dense_1_b.bin"); 
+  void* dense_1_b =  readTrainedWeights(dense_1_b_path.c_str(), 0,1,10,1,1); 
+
+
+
+  __visc__init(); 
+  RootIn* args = static_cast<RootIn*>(malloc(sizeof(RootIn))); 
+
+  args->conv2d_1_w = conv2d_1_w; 
+  args->conv2d_1_w_bytes = 0; 
+  args->conv2d_1_b = conv2d_1_b; 
+  args->conv2d_1_b_bytes = 0; 
+  args->conv2d_2_w = conv2d_2_w; 
+  args->conv2d_2_w_bytes = 0; 
+  args->conv2d_2_b = conv2d_2_b; 
+  args->conv2d_2_b_bytes = 0; 
+  args->conv2d_3_w = conv2d_3_w; 
+  args->conv2d_3_w_bytes = 0; 
+  args->conv2d_3_b = conv2d_3_b; 
+  args->conv2d_3_b_bytes = 0; 
+  args->conv2d_4_w = conv2d_4_w; 
+  args->conv2d_4_w_bytes = 0; 
+  args->conv2d_4_b = conv2d_4_b; 
+  args->conv2d_4_b_bytes = 0; 
+  args->conv2d_5_w = conv2d_5_w; 
+  args->conv2d_5_w_bytes = 0; 
+  args->conv2d_5_b = conv2d_5_b; 
+  args->conv2d_5_b_bytes = 0; 
+  args->dense_1_w = dense_1_w; 
+  args->dense_1_w_bytes = 0; 
+  args->dense_1_b = dense_1_b; 
+  args->dense_1_b_bytes = 0; 
+
+  int batch_size = 500;
+  int test_input_size = 10000;  
+  int batch_count = test_input_size / batch_size;
+
+  std::string input_path =  dir_prefix + std::string("input.bin"); 
+  void* input = create4DTensor(0,nchw,batch_size,3,32,32);
+
+  startMemTracking();
+  for (int i = 0; i < batch_count; i++){
+
+    int start = i * batch_size; 
+    int end = (i + 1) * batch_size; 
+
+    copyInputBatch(input_path.c_str(),start,end,3,32,32, input);
+
+    args->input = input; 
+    args->input_bytes = 0; 
+
+    //void* input = readInputBatch(input_path.c_str(),0,start,end,3,32,32);
+
+    void* dfg = __visc__launch(0, root, (void*) args); 
+
+    __visc__wait(dfg); 
+
+    void *result = static_cast<RootIn*>(args)->input; 
+    hpvm_request_tensor(result, 0); 
+
+
+    uint8_t* labels = readLabelsBatch(labels_path.c_str(),start,end); 
+
+    computeAccuracy2(labels, batch_size, result);
+
+    llvm_hpvm_invokeRtControl(result, labels);
+      
+    freeBatchMemory();
+  }
+
+
+  __visc__cleanup();
+
+
+  return 0; 
+
+} 
diff --git a/llvm/test/VISC/DNN_Benchmarks/common/include/tensorUtils.h b/llvm/test/VISC/DNN_Benchmarks/common/include/tensorUtils.h
index c086602506e8ed521c7deb99f7121f9d8159190e..8976f7dc8fa5df24536b051a5d436da6555161cf 100644
--- a/llvm/test/VISC/DNN_Benchmarks/common/include/tensorUtils.h
+++ b/llvm/test/VISC/DNN_Benchmarks/common/include/tensorUtils.h
@@ -407,7 +407,24 @@ uint8_t* readLabels(const char* labels_file, int num_labels){
 
   fclose(file);
   
-  // printf("--labels bytes_read = %lu \n", bytes_read);
+  return labels;
+}
+
+
+
+uint32_t* readLabels2(const char* labels_file, int num_labels){
+
+  uint32_t* labels = (uint32_t*) malloc(sizeof(uint32_t) * num_labels);
+  FILE* file = fopen(labels_file, "rb");
+  if(file == NULL){
+    printf("Data file %s is not found. Aborting...\n", labels_file);
+    abort();
+  }
+
+  size_t bytes_read = fread(labels, 1, sizeof(uint32_t) * num_labels, file);
+
+  fclose(file);
+  
   return labels;
 }
 
@@ -436,6 +453,29 @@ uint8_t* readLabelsBatch(const char* labels_file, int start, int end){
 }
 
 
+uint32_t* readLabelsBatch2(const char* labels_file, int start, int end){
+
+  int num_labels = end - start;
+  int file_header_size = sizeof(uint32_t) * start;
+  
+  uint32_t* labels = (uint32_t*) malloc(sizeof(uint32_t) * num_labels);
+  FILE* file = fopen(labels_file, "rb");
+  if(file == NULL){
+    printf("Data file %s is not found. Aborting...\n", labels_file);
+    abort();
+  }
+  
+  fseek(file, file_header_size, SEEK_SET); // Skipping the file header
+    
+  size_t bytes_read = fread(labels, 1, sizeof(uint32_t) * num_labels, file);
+
+
+  fclose(file);
+  
+  return labels;
+}
+
+
 
 void computeAccuracy(const char* labels_file, int num_labels, void* result_ptr){
 
@@ -478,7 +518,8 @@ void computeAccuracy(const char* labels_file, int num_labels, void* result_ptr){
 
 
 
-float computeAccuracy2(uint8_t* labels, int num_labels, void* result_ptr, unsigned num_classes = 10){
+float computeAccuracy2(uint8_t* labels, int num_labels,
+		       void* result_ptr, unsigned num_classes = 10){
 
   unsigned num_zeros = 0;
   
@@ -494,7 +535,8 @@ float computeAccuracy2(uint8_t* labels, int num_labels, void* result_ptr, unsign
   for(int i = 0; i < num_labels; i++){
   
     int chosen = 0;
-    for (int id = 1; id < num_classes; ++id){
+    //for (int id = 1; id < num_classes; ++id){
+    for (int id = 1; id < channels; ++id){
       if (data[i * channels + chosen] < data[i * channels + id]) chosen = id;
     }
     
@@ -538,7 +580,8 @@ bool descendFloatComp(ClassProb obj1, ClassProb obj2){
 }
 
 
-float computeTop5Accuracy(uint8_t* labels, int num_labels, void* result_ptr, unsigned num_classes = 10){
+float computeTop5Accuracy(uint8_t* labels, int num_labels,
+			  void* result_ptr, unsigned num_classes = 10){
   
   struct Tensor* result = (struct Tensor*) result_ptr;