diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/Makefile b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..d1d284a26f2fb089d4c46aef213dc03d471b898e
--- /dev/null
+++ b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/Makefile
@@ -0,0 +1,79 @@
+DNN_BENCHMARK_ROOT = $(LLVM_SRC_ROOT)/test/VISC/DNN_Benchmarks
+# NOTE: CHANGE to your BUILD DIRECTORY
+HPVM_BUILD_DIR = $(LLVM_SRC_ROOT)/../build_dsoc/
+
+CC = $(HPVM_BUILD_DIR)/bin/clang++
+OPT = $(HPVM_BUILD_DIR)/bin/opt
+LLVM_DIS = $(HPVM_BUILD_DIR)/bin/llvm-dis
+LLVM_LINK = $(HPVM_BUILD_DIR)/bin/llvm-link
+LLVM_INCLUDE_DIR = $(LLVM_SRC_ROOT)/include
+
+SRC_DIR = src
+BUILD_DIR = build
+APP = mini_era_cv
+
+define \n
+
+
+endef
+
+COMMON_INCLUDE_DIR = $(DNN_BENCHMARK_ROOT)/common/include
+DNN_INCLUDE_DIR = $(LLVM_SRC_ROOT)/projects/hpvm-tensor-rt/dnn_sources/include
+TENSOR_RT_INCLUDE_DIR = $(LLVM_SRC_ROOT)/projects/hpvm-tensor-rt/tensor_runtime/include
+TENSOR_RT_SRC_DIR = $(LLVM_SRC_ROOT)/projects/hpvm-tensor-rt/tensor_runtime/src
+
+CC_FLAGS = -I $(LLVM_INCLUDE_DIR)  -I $(DNN_INCLUDE_DIR) -I $(COMMON_INCLUDE_DIR)  -I $(TENSOR_RT_INCLUDE_DIR) -I $(CUDA_INCLUDE_PATH)  -fno-exceptions -ffast-math  -std=c++11   -O3
+LINKER_FLAGS = -lpthread -lOpenCL
+
+HPVM_LIB_DIR = $(HPVM_BUILD_DIR)/lib
+
+
+OPTFLAGS1 = -load  $(HPVM_LIB_DIR)/LLVMBuildDFG.so -load $(HPVM_LIB_DIR)/LLVMInPlaceDFGAnalysis.so  -load  $(HPVM_LIB_DIR)/ReplaceIntrinsics.so  -load  $(HPVM_LIB_DIR)/DFG2LLVM_X86_dsoc.so  -load $(HPVM_LIB_DIR)/ExtractHPVMLeafNodes.so  -load  $(HPVM_LIB_DIR)/LLVMClearDFG.so  -inplace  -replace-intrinsics  -dfg2llvm-x86-dsoc -hpvm-extract-leaf-gen -clearDFG
+
+OPTFLAGS2 = -load  $(HPVM_LIB_DIR)/InlineTensorCalls.so  -inline-tensor-calls
+
+TARGET = $(BUILD_DIR)/$(APP).final.bc
+
+SOURCES = $(SRC_DIR)/$(APP).cpp
+VISC_RT_PATH = $(LLVM_SRC_ROOT)/projects/visc-cpu-rt/visc-rt.ll
+
+
+.PRECIOUS: $(BUILD_DIR)/$(APP).ll $(BUILD_DIR)/$(APP).visc.ll
+default: $(BUILD_DIR) $(TARGET)
+
+
+$(BUILD_DIR)/%.ll: $(SRC_DIR)/%.cpp  
+	$(CC) $(CC_FLAGS) -emit-llvm -S -o $@ $<
+
+#-visc-timers-gen
+$(BUILD_DIR)/%.visc.ll: $(BUILD_DIR)/%.ll
+	$(OPT) -load LLVMGenVISC.so -genvisc -globaldce  $< -S -o $@
+
+
+expanded_modules:= $(wildcard *_module.ll)
+
+$(BUILD_DIR)/%.opt.bc: $(BUILD_DIR)/%.visc.ll
+	$(OPT) $(OPTFLAGS1) $<  -o $@
+
+
+$(BUILD_DIR)/%.linked.bc: $(BUILD_DIR)/%.opt.bc
+	$(CC) -emit-llvm  -c  $(TENSOR_RT_SRC_DIR)/tensor_cpu_runtime.cc  -o  $(BUILD_DIR)/tensor_cpu_runtime.bc
+	$(OPT) -always-inline $(BUILD_DIR)/tensor_cpu_runtime.bc  -o  $(BUILD_DIR)/tensor_cpu_runtime.bc
+	$(LLVM_LINK)   $<   $(shell find ./build -name "*module.ll")   $(BUILD_DIR)/tensor_cpu_runtime.bc $(VISC_RT_PATH)  -o  $@   
+
+
+$(BUILD_DIR)/%.final.bc: $(BUILD_DIR)/%.linked.bc
+	$(OPT) $(OPTFLAGS2)  $<  -o  $@ 
+	$(CC) $@ -o $(BUILD_DIR)/$(APP)_final  $(LINKER_FLAGS)
+	$(foreach module, $(expanded_modules), $(LLVM_LINK) $(module) $(BUILD_DIR)/tensor_cpu_runtime.bc -o $(BUILD_DIR)/$(module)_linked ${\n} $(OPT) $(OPTFLAGS2) $(BUILD_DIR)/$(module)_linked -o  $(BUILD_DIR)/$(module)_inline  ${\n} )
+
+
+
+$(BUILD_DIR):
+	mkdir -p $@
+
+clean:
+	rm -rf $(BUILD_DIR)
+
+
+
diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/approxhpvm_src.cc b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/approxhpvm_src.cc
new file mode 100644
index 0000000000000000000000000000000000000000..d8985397705cf19d5533a7e4a376a71a9f130fb0
--- /dev/null
+++ b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/approxhpvm_src.cc
@@ -0,0 +1,430 @@
+
+#include <stdio.h> 
+#include <stdlib.h> 
+#include <unistd.h> 
+#include <fcntl.h> 
+#include <sys/stat.h> 
+#include <cstring> 
+#include <visc.h> 
+#include <tensorTypes.h> 
+#include <tensorUtils.h> 
+
+void var_0_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_convolution(t1, t2, 0, 0, 1, 1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_1_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_add(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_2_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_relu(t1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_3_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_convolution(t1, t2, 0, 0, 1, 1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_4_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_add(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_5_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_relu(t1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_6_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_pool_max(t1, 2, 2, 0, 0, 2, 2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_7_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_convolution(t1, t2, 0, 0, 1, 1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_8_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_add(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_9_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_relu(t1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_10_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_convolution(t1, t2, 0, 0, 1, 1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_11_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_add(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_12_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_relu(t1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_13_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_pool_max(t1, 2, 2, 0, 0, 2, 2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_14_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_mul(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_15_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_add(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_16_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_relu(t1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_17_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_mul(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_18_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_add(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_19_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CUDNN_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_softmax(t1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void root(void* input, size_t input_bytes, 
+	  void* conv2d_1_w, size_t conv2d_1_w_bytes, 
+	  void* conv2d_1_b, size_t conv2d_1_b_bytes, 
+	  void* conv2d_2_w, size_t conv2d_2_w_bytes, 
+	  void* conv2d_2_b, size_t conv2d_2_b_bytes, 
+	  void* conv2d_3_w, size_t conv2d_3_w_bytes, 
+	  void* conv2d_3_b, size_t conv2d_3_b_bytes, 
+	  void* conv2d_4_w, size_t conv2d_4_w_bytes, 
+	  void* conv2d_4_b, size_t conv2d_4_b_bytes, 
+	  void* dense_1_w, size_t dense_1_w_bytes, 
+	  void* dense_1_b, size_t dense_1_b_bytes, 
+	  void* dense_2_w, size_t dense_2_w_bytes, 
+	  void* dense_2_b, size_t dense_2_b_bytes){ 
+
+
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(13, input, conv2d_1_w, conv2d_1_b, conv2d_2_w, conv2d_2_b, conv2d_3_w, conv2d_3_b, conv2d_4_w, conv2d_4_b, dense_1_w, dense_1_b, dense_2_w, dense_2_b, 0); 
+
+
+  void* var_0 = __visc__createNodeND(0, var_0_node); 
+
+  __visc__bindIn(var_0, 0, 0, 0); 
+  __visc__bindIn(var_0, 1, 1, 0); 
+  __visc__bindIn(var_0, 2, 2, 0); 
+  __visc__bindIn(var_0, 3, 3, 0); 
+
+  void* var_1 = __visc__createNodeND(0, var_1_node); 
+
+  __visc__edge(var_0, var_1, 1, 0, 0, 0); 
+  __visc__edge(var_0, var_1, 1, 1, 1, 0); 
+  __visc__bindIn(var_1, 4, 2, 0); 
+  __visc__bindIn(var_1, 5, 3, 0); 
+
+  void* var_2 = __visc__createNodeND(0, var_2_node); 
+
+  __visc__edge(var_1, var_2, 1, 0, 0, 0); 
+  __visc__edge(var_1, var_2, 1, 1, 1, 0); 
+
+  void* var_3 = __visc__createNodeND(0, var_3_node); 
+
+  __visc__edge(var_2, var_3, 1, 0, 0, 0); 
+  __visc__edge(var_2, var_3, 1, 1, 1, 0); 
+  __visc__bindIn(var_3, 6, 2, 0); 
+  __visc__bindIn(var_3, 7, 3, 0); 
+
+  void* var_4 = __visc__createNodeND(0, var_4_node); 
+
+  __visc__edge(var_3, var_4, 1, 0, 0, 0); 
+  __visc__edge(var_3, var_4, 1, 1, 1, 0); 
+  __visc__bindIn(var_4, 8, 2, 0); 
+  __visc__bindIn(var_4, 9, 3, 0); 
+
+  void* var_5 = __visc__createNodeND(0, var_5_node); 
+
+  __visc__edge(var_4, var_5, 1, 0, 0, 0); 
+  __visc__edge(var_4, var_5, 1, 1, 1, 0); 
+
+  void* var_6 = __visc__createNodeND(0, var_6_node); 
+
+  __visc__edge(var_5, var_6, 1, 0, 0, 0); 
+  __visc__edge(var_5, var_6, 1, 1, 1, 0); 
+
+  void* var_7 = __visc__createNodeND(0, var_7_node); 
+
+  __visc__edge(var_6, var_7, 1, 0, 0, 0); 
+  __visc__edge(var_6, var_7, 1, 1, 1, 0); 
+  __visc__bindIn(var_7, 10, 2, 0); 
+  __visc__bindIn(var_7, 11, 3, 0); 
+
+  void* var_8 = __visc__createNodeND(0, var_8_node); 
+
+  __visc__edge(var_7, var_8, 1, 0, 0, 0); 
+  __visc__edge(var_7, var_8, 1, 1, 1, 0); 
+  __visc__bindIn(var_8, 12, 2, 0); 
+  __visc__bindIn(var_8, 13, 3, 0); 
+
+  void* var_9 = __visc__createNodeND(0, var_9_node); 
+
+  __visc__edge(var_8, var_9, 1, 0, 0, 0); 
+  __visc__edge(var_8, var_9, 1, 1, 1, 0); 
+
+  void* var_10 = __visc__createNodeND(0, var_10_node); 
+
+  __visc__edge(var_9, var_10, 1, 0, 0, 0); 
+  __visc__edge(var_9, var_10, 1, 1, 1, 0); 
+  __visc__bindIn(var_10, 14, 2, 0); 
+  __visc__bindIn(var_10, 15, 3, 0); 
+
+  void* var_11 = __visc__createNodeND(0, var_11_node); 
+
+  __visc__edge(var_10, var_11, 1, 0, 0, 0); 
+  __visc__edge(var_10, var_11, 1, 1, 1, 0); 
+  __visc__bindIn(var_11, 16, 2, 0); 
+  __visc__bindIn(var_11, 17, 3, 0); 
+
+  void* var_12 = __visc__createNodeND(0, var_12_node); 
+
+  __visc__edge(var_11, var_12, 1, 0, 0, 0); 
+  __visc__edge(var_11, var_12, 1, 1, 1, 0); 
+
+  void* var_13 = __visc__createNodeND(0, var_13_node); 
+
+  __visc__edge(var_12, var_13, 1, 0, 0, 0); 
+  __visc__edge(var_12, var_13, 1, 1, 1, 0); 
+
+  void* var_14 = __visc__createNodeND(0, var_14_node); 
+
+  __visc__edge(var_13, var_14, 1, 0, 0, 0); 
+  __visc__edge(var_13, var_14, 1, 1, 1, 0); 
+  __visc__bindIn(var_14, 18, 2, 0); 
+  __visc__bindIn(var_14, 19, 3, 0); 
+
+  void* var_15 = __visc__createNodeND(0, var_15_node); 
+
+  __visc__edge(var_14, var_15, 1, 0, 0, 0); 
+  __visc__edge(var_14, var_15, 1, 1, 1, 0); 
+  __visc__bindIn(var_15, 20, 2, 0); 
+  __visc__bindIn(var_15, 21, 3, 0); 
+
+  void* var_16 = __visc__createNodeND(0, var_16_node); 
+
+  __visc__edge(var_15, var_16, 1, 0, 0, 0); 
+  __visc__edge(var_15, var_16, 1, 1, 1, 0); 
+
+  void* var_17 = __visc__createNodeND(0, var_17_node); 
+
+  __visc__edge(var_16, var_17, 1, 0, 0, 0); 
+  __visc__edge(var_16, var_17, 1, 1, 1, 0); 
+  __visc__bindIn(var_17, 22, 2, 0); 
+  __visc__bindIn(var_17, 23, 3, 0); 
+
+  void* var_18 = __visc__createNodeND(0, var_18_node); 
+
+  __visc__edge(var_17, var_18, 1, 0, 0, 0); 
+  __visc__edge(var_17, var_18, 1, 1, 1, 0); 
+  __visc__bindIn(var_18, 24, 2, 0); 
+  __visc__bindIn(var_18, 25, 3, 0); 
+
+  void* var_19 = __visc__createNodeND(0, var_19_node); 
+
+  __visc__edge(var_18, var_19, 1, 0, 0, 0); 
+  __visc__edge(var_18, var_19, 1, 1, 1, 0); 
+
+  __visc__bindOut(var_19, 0, 0, 0); 
+  __visc__bindOut(var_19, 1, 1, 0); 
+
+}
+
+struct ret_t {
+  void* tensor; 
+  size_t bytes; 
+}; 
+
+typedef struct __attribute__((__packed__)) {
+  void* input; 
+  size_t input_bytes; 
+  void* conv2d_1_w; 
+  size_t conv2d_1_w_bytes; 
+  void* conv2d_1_b; 
+  size_t conv2d_1_b_bytes; 
+  void* conv2d_2_w; 
+  size_t conv2d_2_w_bytes; 
+  void* conv2d_2_b; 
+  size_t conv2d_2_b_bytes; 
+  void* conv2d_3_w; 
+  size_t conv2d_3_w_bytes; 
+  void* conv2d_3_b; 
+  size_t conv2d_3_b_bytes; 
+  void* conv2d_4_w; 
+  size_t conv2d_4_w_bytes; 
+  void* conv2d_4_b; 
+  size_t conv2d_4_b_bytes; 
+  void* dense_1_w; 
+  size_t dense_1_w_bytes; 
+  void* dense_1_b; 
+  size_t dense_1_b_bytes; 
+  void* dense_2_w; 
+  size_t dense_2_w_bytes; 
+  void* dense_2_b; 
+  size_t dense_2_b_bytes; 
+
+  struct ret_t r; 
+}
+RootIn;
+
+int main(){ 
+
+std::string dir_prefix = std::string("hpvm_mio_4/"); 
+std::string input_path =  dir_prefix + std::string("input.bin"); 
+std::string labels_path =  dir_prefix + std::string("labels.bin"); 
+std::string conv2d_1_w_path =  dir_prefix + std::string("conv2d_1_w.bin"); 
+void* conv2d_1_w =  readTrainedWeights(conv2d_1_w_path.c_str(), 0,32,3,3,3); 
+std::string conv2d_1_b_path =  dir_prefix + std::string("conv2d_1_b.bin"); 
+void* conv2d_1_b =  readTrainedWeights(conv2d_1_b_path.c_str(), 0,1,32,1,1); 
+std::string conv2d_2_w_path =  dir_prefix + std::string("conv2d_2_w.bin"); 
+void* conv2d_2_w =  readTrainedWeights(conv2d_2_w_path.c_str(), 0,32,32,3,3); 
+std::string conv2d_2_b_path =  dir_prefix + std::string("conv2d_2_b.bin"); 
+void* conv2d_2_b =  readTrainedWeights(conv2d_2_b_path.c_str(), 0,1,32,1,1); 
+std::string conv2d_3_w_path =  dir_prefix + std::string("conv2d_3_w.bin"); 
+void* conv2d_3_w =  readTrainedWeights(conv2d_3_w_path.c_str(), 0,64,32,3,3); 
+std::string conv2d_3_b_path =  dir_prefix + std::string("conv2d_3_b.bin"); 
+void* conv2d_3_b =  readTrainedWeights(conv2d_3_b_path.c_str(), 0,1,64,1,1); 
+std::string conv2d_4_w_path =  dir_prefix + std::string("conv2d_4_w.bin"); 
+void* conv2d_4_w =  readTrainedWeights(conv2d_4_w_path.c_str(), 0,64,64,3,3); 
+std::string conv2d_4_b_path =  dir_prefix + std::string("conv2d_4_b.bin"); 
+void* conv2d_4_b =  readTrainedWeights(conv2d_4_b_path.c_str(), 0,1,64,1,1); 
+std::string dense_1_w_path =  dir_prefix + std::string("dense_1_w.bin"); 
+void* dense_1_w =  readTrainedWeights(dense_1_w_path.c_str(), 0,1,1,1600,256); 
+std::string dense_1_b_path =  dir_prefix + std::string("dense_1_b.bin"); 
+void* dense_1_b =  readTrainedWeights(dense_1_b_path.c_str(), 0,1,256,1,1); 
+std::string dense_2_w_path =  dir_prefix + std::string("dense_2_w.bin"); 
+void* dense_2_w =  readTrainedWeights(dense_2_w_path.c_str(), 0,1,1,256,5); 
+std::string dense_2_b_path =  dir_prefix + std::string("dense_2_b.bin"); 
+void* dense_2_b =  readTrainedWeights(dense_2_b_path.c_str(), 0,1,5,1,1); 
+void* input = readTrainedWeights(input_path.c_str(), 0,5000,3,32,32); 
+uint32_t* labels = readLabels2(labels_path.c_str(),5000); 
+
+__visc__init(); 
+RootIn* args = static_cast<RootIn*>(malloc(sizeof(RootIn))); 
+
+args->input = input; 
+args->input_bytes = 0; 
+args->conv2d_1_w = conv2d_1_w; 
+args->conv2d_1_w_bytes = 0; 
+args->conv2d_1_b = conv2d_1_b; 
+args->conv2d_1_b_bytes = 0; 
+args->conv2d_2_w = conv2d_2_w; 
+args->conv2d_2_w_bytes = 0; 
+args->conv2d_2_b = conv2d_2_b; 
+args->conv2d_2_b_bytes = 0; 
+args->conv2d_3_w = conv2d_3_w; 
+args->conv2d_3_w_bytes = 0; 
+args->conv2d_3_b = conv2d_3_b; 
+args->conv2d_3_b_bytes = 0; 
+args->conv2d_4_w = conv2d_4_w; 
+args->conv2d_4_w_bytes = 0; 
+args->conv2d_4_b = conv2d_4_b; 
+args->conv2d_4_b_bytes = 0; 
+args->dense_1_w = dense_1_w; 
+args->dense_1_w_bytes = 0; 
+args->dense_1_b = dense_1_b; 
+args->dense_1_b_bytes = 0; 
+args->dense_2_w = dense_2_w; 
+args->dense_2_w_bytes = 0; 
+args->dense_2_b = dense_2_b; 
+args->dense_2_b_bytes = 0; 
+
+void* dfg = __visc__launch(0, root, (void*) args); 
+
+__visc__wait(dfg); 
+
+void *result = static_cast<RootIn*>(args)->input; 
+hpvm_request_tensor(result, 0); 
+
+__visc__cleanup(); 
+ computeAccuracy3(labels, result); 
+return 0; 
+
+} 
diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_1_b.bin b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_1_b.bin
new file mode 100644
index 0000000000000000000000000000000000000000..39c3fbac7f94a6824736f8b21f184b71b3d45a7b
--- /dev/null
+++ b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_1_b.bin
@@ -0,0 +1,2 @@
+αÝ>aŸ¾Ì?N?„œQ¿JÙ%½t‰¼ªl©=™&¼½œ ¿¿^L8?د>r¾:õö¾
νóù¼š¶?B Y?–;Uì>ç—=€ëh?rXö½ï
+‹=&ç½Ýˆ™½C#S>¥”½7ü¹>vÉ…>ɇ¿!?
\ No newline at end of file
diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_1_w.bin b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_1_w.bin
new file mode 100644
index 0000000000000000000000000000000000000000..d01508286ed5fddf05790e261efa168847699efd
Binary files /dev/null and b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_1_w.bin differ
diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_2_b.bin b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_2_b.bin
new file mode 100644
index 0000000000000000000000000000000000000000..39489675632774a46e0ea704d7d13807b2e4feb5
--- /dev/null
+++ b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_2_b.bin
@@ -0,0 +1,2 @@
+„§Ÿ½æ.î=•·?¾Š¥¿vƒ¿ºS¿Ó놻Þ >
+Qżøæ¼å—8¿ÂVä».I>Æp𼄃;dd=h䈾Ðé¾N½.¾ÓñÍ=/Ú¾ŒÖl¾×;¾ð4¾6ƒ>cTʾR¶	¼ê¿¾ô2Í=c_¨¾­¾ÚZ¾
\ No newline at end of file
diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_2_w.bin b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_2_w.bin
new file mode 100644
index 0000000000000000000000000000000000000000..381b72379b85614a79910c9560c6115310da538a
Binary files /dev/null and b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_2_w.bin differ
diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_3_b.bin b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_3_b.bin
new file mode 100644
index 0000000000000000000000000000000000000000..43fe41a6edcae03a1a531123940e528a71807300
--- /dev/null
+++ b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_3_b.bin
@@ -0,0 +1 @@
+˜Ò½Ê+¥½ò?Š$ù¾méÊ>(>¼½hŠ >qÂB½y²*½‚ì>IÒ¸>»Kˆ?@ ¨¼t\¢?æH¾
•=ùÔý>…œ;½_å—>Ÿfœ=;┿®Œû>›jÞ¾DâÓ»×Á‰¾šU>·`†? Éc>ÈŽ?Õª?Ÿ·/<#&?—?ôðš¾Dy<Hbf¾lò:?ÑwS¾M
)>}¾«t ¾Ÿt'?ö¥ú¿´$¾<¥!æ½ œ?Ë¡½c’‹<Za¾>E5“>0Jê>p9J¾†žÁ>ÞWð>nèr>e–'?¹+R¿Gž>kæò;·bÆ¿²àR>˜{ÿ>ãš½
\ No newline at end of file
diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_3_w.bin b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_3_w.bin
new file mode 100644
index 0000000000000000000000000000000000000000..a82a9e397918217bf37873c59dd92bee713fa9df
Binary files /dev/null and b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_3_w.bin differ
diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_4_b.bin b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_4_b.bin
new file mode 100644
index 0000000000000000000000000000000000000000..bd29ee60df1f0f0b3a199f1f65adb810a2649a3c
Binary files /dev/null and b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_4_b.bin differ
diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_4_w.bin b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_4_w.bin
new file mode 100644
index 0000000000000000000000000000000000000000..7f6211e76617ee18dac06c9b5449c18183a149de
Binary files /dev/null and b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/conv2d_4_w.bin differ
diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/dense_1_b.bin b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/dense_1_b.bin
new file mode 100644
index 0000000000000000000000000000000000000000..fae8736fa3da691229c66e73962cb4f0131c2961
Binary files /dev/null and b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/dense_1_b.bin differ
diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/dense_1_w.bin b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/dense_1_w.bin
new file mode 100644
index 0000000000000000000000000000000000000000..034b84905ae34893f5b57be77033fd91b388a80b
Binary files /dev/null and b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/dense_1_w.bin differ
diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/dense_2_b.bin b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/dense_2_b.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2bb9d3e89f7d35a4e66063ec93baab23ef90b86e
--- /dev/null
+++ b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/dense_2_b.bin
@@ -0,0 +1 @@
+k"õ;(¿¾òë>™¿JÄ@?
\ No newline at end of file
diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/dense_2_w.bin b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/dense_2_w.bin
new file mode 100644
index 0000000000000000000000000000000000000000..f3bce42e01c37928d0cd54493835bcda70529bf2
Binary files /dev/null and b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/dense_2_w.bin differ
diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/input.bin b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/input.bin
new file mode 100644
index 0000000000000000000000000000000000000000..0abae55bf84ff5dc8e2d1074c97853331fc5d879
Binary files /dev/null and b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/input.bin differ
diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/labels.bin b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/labels.bin
new file mode 100644
index 0000000000000000000000000000000000000000..effaef8583b30228039ff7f61d9c6be51c020b49
Binary files /dev/null and b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/labels.bin differ
diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/layer_composition.txt b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/layer_composition.txt
new file mode 100644
index 0000000000000000000000000000000000000000..54ef6c9f01517d20355681b1d19c8b865daf514c
--- /dev/null
+++ b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/layer_composition.txt
@@ -0,0 +1,6 @@
+conv  add  activation  
+conv  add  activation  pool  
+conv  add  activation  
+conv  add  activation  pool  
+dense  add  activation  
+dense  add  
diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/layers.txt b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/layers.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c0aecb467775babfd4b5c2873abf287905ee11f8
--- /dev/null
+++ b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/layers.txt
@@ -0,0 +1,6 @@
+Conv1,5000,3,32,32,32,3,3,3
+Conv2,5000,32,30,30,32,32,3,3
+Conv3,5000,32,14,14,64,32,3,3
+Conv4,5000,64,12,12,64,64,3,3
+FC1,5000,1600,1600,256
+FC2,5000,256,256,5
diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/promise_src.cc b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/promise_src.cc
new file mode 100644
index 0000000000000000000000000000000000000000..fd96ab0878269718c58c52115a22b79e2f62ec99
--- /dev/null
+++ b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/promise_src.cc
@@ -0,0 +1,93 @@
+
+#include <stdio.h> 
+#include <stdlib.h> 
+#include <unistd.h> 
+#include <fcntl.h> 
+#include <sys/types.h> 
+#include <sys/stat.h> 
+#include <string.h> 
+#include "../../../tensor_runtime/include/tensor_runtime.h" 
+#include "../../include/utils.h" 
+
+int main(){ 
+
+llvm_hpvm_initTensorRt(0); 
+
+int total_runs = 100; 
+for (int i = 0 ; i < total_runs; i++){ 
+
+
+startMemTracking(); 
+
+int test_input_size = 5000; 
+int batch_size = 5000; 
+int batch_count = test_input_size / batch_size; 
+float final_accuracy = 0.0; 
+
+for(int i = 0; i < batch_count; i++){ 
+
+
+
+std::string dir_prefix = std::string("hpvm_mio_4/"); 
+std::string input_path =  dir_prefix + std::string("input.bin"); 
+std::string labels_path =  dir_prefix + std::string("labels.bin"); 
+std::string conv2d_1_w_path =  dir_prefix + std::string("conv2d_1_w.bin"); 
+void* conv2d_1_w =  readTrainedWeights(conv2d_1_w_path.c_str(), 0,32,3,3,3); 
+std::string conv2d_1_b_path =  dir_prefix + std::string("conv2d_1_b.bin"); 
+void* conv2d_1_b =  readTrainedWeights(conv2d_1_b_path.c_str(), 0,1,32,1,1); 
+std::string conv2d_2_w_path =  dir_prefix + std::string("conv2d_2_w.bin"); 
+void* conv2d_2_w =  readTrainedWeights(conv2d_2_w_path.c_str(), 0,32,32,3,3); 
+std::string conv2d_2_b_path =  dir_prefix + std::string("conv2d_2_b.bin"); 
+void* conv2d_2_b =  readTrainedWeights(conv2d_2_b_path.c_str(), 0,1,32,1,1); 
+std::string conv2d_3_w_path =  dir_prefix + std::string("conv2d_3_w.bin"); 
+void* conv2d_3_w =  readTrainedWeights(conv2d_3_w_path.c_str(), 0,64,32,3,3); 
+std::string conv2d_3_b_path =  dir_prefix + std::string("conv2d_3_b.bin"); 
+void* conv2d_3_b =  readTrainedWeights(conv2d_3_b_path.c_str(), 0,1,64,1,1); 
+std::string conv2d_4_w_path =  dir_prefix + std::string("conv2d_4_w.bin"); 
+void* conv2d_4_w =  readTrainedWeights(conv2d_4_w_path.c_str(), 0,64,64,3,3); 
+std::string conv2d_4_b_path =  dir_prefix + std::string("conv2d_4_b.bin"); 
+void* conv2d_4_b =  readTrainedWeights(conv2d_4_b_path.c_str(), 0,1,64,1,1); 
+std::string dense_1_w_path =  dir_prefix + std::string("dense_1_w.bin"); 
+void* dense_1_w =  readTrainedWeights(dense_1_w_path.c_str(), 0,1,1,1600,256); 
+std::string dense_1_b_path =  dir_prefix + std::string("dense_1_b.bin"); 
+void* dense_1_b =  readTrainedWeights(dense_1_b_path.c_str(), 0,1,256,1,1); 
+std::string dense_2_w_path =  dir_prefix + std::string("dense_2_w.bin"); 
+void* dense_2_w =  readTrainedWeights(dense_2_w_path.c_str(), 0,1,1,256,5); 
+std::string dense_2_b_path =  dir_prefix + std::string("dense_2_b.bin"); 
+void* dense_2_b =  readTrainedWeights(dense_2_b_path.c_str(), 0,1,5,1,1); 
+
+
+int start = i * batch_size; 
+int end = (i + 1) * batch_size; 
+
+void* input = readInputBatch(input_path.c_str(),0,start,end,3,32,32); 
+
+void* var_0 = ConvLayer_PROMISE(input, -2.682209e-07, 1.0000002, conv2d_1_w, -1.9097954802513122, 1.849404644250894, conv2d_1_b, -1.4970889, 0.90984344, 0, 0, 1, 1, -1, 0, 1, 0.0, 1.9360680677890976, 9); 
+void* var_1 = ConvLayer_PROMISE(var_0, 0.0, 1.9360680677890976, conv2d_2_w, -0.6551046761870384, 0.5357062590122245, conv2d_2_b, -1.2897198, 0.25627556, 0, 0, 1, 1, 0, 2, 1, 0.0, 3.61756298995042, 9); 
+void* var_2 = ConvLayer_PROMISE(var_1, 0.0, 3.61756298995042, conv2d_3_w, -0.479531730890274, 0.38338643845919407, conv2d_3_b, -1.9581897, 1.2684464, 0, 0, 1, 1, -1, 0, 1, 0.0, 4.717274737834942, 9); 
+void* var_3 = ConvLayer_PROMISE(var_2, 0.0, 4.717274737834942, conv2d_4_w, -0.37545250764489174, 0.3687883540093907, conv2d_4_b, -0.5458527, 0.6755934, 0, 0, 1, 1, 0, 2, 1, 0.0, 6.558154335499082, 9); 
+void* var_4 = FCLayer_PROMISE(var_3, 0.0, 6.558154335499082, dense_1_w, -0.19869577795267107, 0.2030584679543994, dense_1_b, -0.1697124, 0.22991186, 1, 0.0, 8.8694415378571, 9); 
+void* var_5 = FCLayer_PROMISE(var_4, 0.0, 8.8694415378571, dense_2_w, -0.38784850630164147, 0.387768742352725, dense_2_b, -0.65646386, 0.75299513, -1, -23.875294536590577, 35.08045856094383, 9); 
+void* var_6 = tensorSoftmax(var_5); 
+
+uint32_t* labels = readLabelsBatch3(labels_path.c_str(),start,end); 
+
+float accuracy = computeAccuracy3(labels, var_6); 
+final_accuracy += accuracy; 
+freeBatchMemory(); 
+ 
+}
+
+final_accuracy = final_accuracy / batch_count; 
+dumpFinalAccuracy(final_accuracy); 
+
+
+}
+
+dumpExecutionAccuracies(); 
+
+llvm_hpvm_cleanupTensorRt(); 
+
+return 0; 
+
+}
diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/src.cc b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/src.cc
new file mode 100644
index 0000000000000000000000000000000000000000..70a9a40c7878b4aeb4894acb186524870664fe09
--- /dev/null
+++ b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/data/weights/src.cc
@@ -0,0 +1,98 @@
+
+#include <stdio.h> 
+#include <stdlib.h> 
+#include <unistd.h> 
+#include <fcntl.h> 
+#include <sys/types.h> 
+#include <sys/stat.h> 
+#include <string.h> 
+#include "../../tensor_runtime/include/tensor_runtime.h" 
+#include "../include/utils.h" 
+
+int main(){ 
+
+  llvm_hpvm_initTensorRt(0); 
+
+
+  std::string dir_prefix = std::string("hpvm_mio_4/"); 
+  std::string input_path =  dir_prefix + std::string("input.bin"); 
+  std::string labels_path =  dir_prefix + std::string("labels.bin"); 
+  std::string conv2d_1_w_path =  dir_prefix + std::string("conv2d_1_w.bin"); 
+  void* conv2d_1_w =  readTrainedWeights(conv2d_1_w_path.c_str(), 0,32,3,3,3); 
+  std::string conv2d_1_b_path =  dir_prefix + std::string("conv2d_1_b.bin"); 
+  void* conv2d_1_b =  readTrainedWeights(conv2d_1_b_path.c_str(), 0,1,32,1,1); 
+  std::string conv2d_2_w_path =  dir_prefix + std::string("conv2d_2_w.bin"); 
+  void* conv2d_2_w =  readTrainedWeights(conv2d_2_w_path.c_str(), 0,32,32,3,3); 
+  std::string conv2d_2_b_path =  dir_prefix + std::string("conv2d_2_b.bin"); 
+  void* conv2d_2_b =  readTrainedWeights(conv2d_2_b_path.c_str(), 0,1,32,1,1); 
+  std::string conv2d_3_w_path =  dir_prefix + std::string("conv2d_3_w.bin"); 
+  void* conv2d_3_w =  readTrainedWeights(conv2d_3_w_path.c_str(), 0,64,32,3,3); 
+  std::string conv2d_3_b_path =  dir_prefix + std::string("conv2d_3_b.bin"); 
+  void* conv2d_3_b =  readTrainedWeights(conv2d_3_b_path.c_str(), 0,1,64,1,1); 
+  std::string conv2d_4_w_path =  dir_prefix + std::string("conv2d_4_w.bin"); 
+  void* conv2d_4_w =  readTrainedWeights(conv2d_4_w_path.c_str(), 0,64,64,3,3); 
+  std::string conv2d_4_b_path =  dir_prefix + std::string("conv2d_4_b.bin"); 
+  void* conv2d_4_b =  readTrainedWeights(conv2d_4_b_path.c_str(), 0,1,64,1,1); 
+  std::string dense_1_w_path =  dir_prefix + std::string("dense_1_w.bin"); 
+  void* dense_1_w =  readTrainedWeights(dense_1_w_path.c_str(), 0,1,1,1600,256); 
+  std::string dense_1_b_path =  dir_prefix + std::string("dense_1_b.bin"); 
+  void* dense_1_b =  readTrainedWeights(dense_1_b_path.c_str(), 0,1,256,1,1); 
+  std::string dense_2_w_path =  dir_prefix + std::string("dense_2_w.bin"); 
+  void* dense_2_w =  readTrainedWeights(dense_2_w_path.c_str(), 0,1,1,256,5); 
+  std::string dense_2_b_path =  dir_prefix + std::string("dense_2_b.bin"); 
+  void* dense_2_b =  readTrainedWeights(dense_2_b_path.c_str(), 0,1,5,1,1); 
+
+
+
+  startMemTracking(); 
+
+  int test_input_size = 5000; 
+  int batch_size = 5000; 
+  int batch_count = test_input_size / batch_size; 
+  float final_accuracy = 0.0; 
+
+  for(int i = 0; i < batch_count; i++){ 
+
+    int start = i * batch_size; 
+    int end = (i + 1) * batch_size; 
+
+    void* input = readInputBatch(input_path.c_str(),0,start,end,3,32,32); 
+
+    void* var_0 = tensorConvolution(input, conv2d_1_w, 0, 0, 1, 1, 1, 1); 
+    void* var_1 = tensorAdd(var_0, conv2d_1_b); 
+    void* var_2 = tensorRelu(var_1); 
+    void* var_3 = tensorConvolution(var_2, conv2d_2_w, 0, 0, 1, 1, 1, 1); 
+    void* var_4 = tensorAdd(var_3, conv2d_2_b); 
+    void* var_5 = tensorRelu(var_4); 
+    void* var_6 = tensorPooling(var_5,0,2,2,0,0,2,2); 
+    void* var_8 = tensorConvolution(var_6, conv2d_3_w, 0, 0, 1, 1, 1, 1); 
+    void* var_9 = tensorAdd(var_8, conv2d_3_b); 
+    void* var_10 = tensorRelu(var_9); 
+    void* var_11 = tensorConvolution(var_10, conv2d_4_w, 0, 0, 1, 1, 1, 1); 
+    void* var_12 = tensorAdd(var_11, conv2d_4_b); 
+    void* var_13 = tensorRelu(var_12); 
+    void* var_14 = tensorPooling(var_13,0,2,2,0,0,2,2); 
+    void* var_17 = tensorGemmGPU(var_14, dense_1_w); 
+    void* var_18 = tensorAdd(var_17, dense_1_b); 
+    void* var_19 = tensorRelu(var_18); 
+    void* var_21 = tensorGemmGPU(var_19, dense_2_w); 
+    void* var_22 = tensorAdd(var_21, dense_2_b); 
+    void* var_23 = tensorSoftmax(var_22); 
+
+    uint32_t* labels = readLabelsBatch3(labels_path.c_str(),start,end); 
+
+    float accuracy = computeAccuracy3(labels, var_23); 
+    final_accuracy += accuracy; 
+    freeBatchMemory(); 
+ 
+  }
+
+  final_accuracy = final_accuracy / batch_count; 
+  dumpFinalAccuracy(final_accuracy); 
+
+
+  llvm_hpvm_cleanupTensorRt(); 
+
+  return 0; 
+
+}
diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/src/mini_era_cv.cpp b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/src/mini_era_cv.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9d7f2f76019ba5367d30ee2b00f51110d2ed04ca
--- /dev/null
+++ b/llvm/test/VISC/DNN_Benchmarks/benchmarks/mini_era_cv/src/mini_era_cv.cpp
@@ -0,0 +1,430 @@
+
+#include <visc.h> 
+#include <utils_cpu.h> 
+#include <stdio.h> 
+#include <stdlib.h> 
+#include <unistd.h> 
+#include <fcntl.h> 
+#include <sys/stat.h> 
+#include <cstring> 
+
+
+void var_0_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_convolution(t1, t2, 0, 0, 1, 1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_1_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_add(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_2_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_relu(t1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_3_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_convolution(t1, t2, 0, 0, 1, 1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_4_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_add(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_5_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_relu(t1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_6_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_pool_max(t1, 2, 2, 0, 0, 2, 2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_7_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_convolution(t1, t2, 0, 0, 1, 1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_8_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_add(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_9_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_relu(t1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_10_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_convolution(t1, t2, 0, 0, 1, 1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_11_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_add(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_12_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_relu(t1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_13_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_pool_max(t1, 2, 2, 0, 0, 2, 2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_14_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_mul(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_15_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_add(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_16_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_relu(t1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_17_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_mul(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_18_node(void* t1, size_t bytes_t1, void* t2, size_t bytes_t2) { 
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(2, t1, t2, 0); 
+
+  void *r = __visc__tensor_add(t1, t2); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void var_19_node(void* t1, size_t bytes_t1) { 
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(1, t1, 0); 
+
+  void* r = __visc__tensor_softmax(t1); 
+  __visc__return(2, r, (size_t) 0); 
+}
+
+void root(void* input, size_t input_bytes, 
+	  void* conv2d_1_w, size_t conv2d_1_w_bytes, 
+	  void* conv2d_1_b, size_t conv2d_1_b_bytes, 
+	  void* conv2d_2_w, size_t conv2d_2_w_bytes, 
+	  void* conv2d_2_b, size_t conv2d_2_b_bytes, 
+	  void* conv2d_3_w, size_t conv2d_3_w_bytes, 
+	  void* conv2d_3_b, size_t conv2d_3_b_bytes, 
+	  void* conv2d_4_w, size_t conv2d_4_w_bytes, 
+	  void* conv2d_4_b, size_t conv2d_4_b_bytes, 
+	  void* dense_1_w, size_t dense_1_w_bytes, 
+	  void* dense_1_b, size_t dense_1_b_bytes, 
+	  void* dense_2_w, size_t dense_2_w_bytes, 
+	  void* dense_2_b, size_t dense_2_b_bytes){ 
+
+
+  __visc__hint(visc::CPU_TARGET); 
+  __visc__attributes(13, input, conv2d_1_w, conv2d_1_b, conv2d_2_w, conv2d_2_b, conv2d_3_w, conv2d_3_b, conv2d_4_w, conv2d_4_b, dense_1_w, dense_1_b, dense_2_w, dense_2_b, 0); 
+
+
+  void* var_0 = __visc__createNodeND(0, var_0_node); 
+
+  __visc__bindIn(var_0, 0, 0, 0); 
+  __visc__bindIn(var_0, 1, 1, 0); 
+  __visc__bindIn(var_0, 2, 2, 0); 
+  __visc__bindIn(var_0, 3, 3, 0); 
+
+  void* var_1 = __visc__createNodeND(0, var_1_node); 
+
+  __visc__edge(var_0, var_1, 1, 0, 0, 0); 
+  __visc__edge(var_0, var_1, 1, 1, 1, 0); 
+  __visc__bindIn(var_1, 4, 2, 0); 
+  __visc__bindIn(var_1, 5, 3, 0); 
+
+  void* var_2 = __visc__createNodeND(0, var_2_node); 
+
+  __visc__edge(var_1, var_2, 1, 0, 0, 0); 
+  __visc__edge(var_1, var_2, 1, 1, 1, 0); 
+
+  void* var_3 = __visc__createNodeND(0, var_3_node); 
+
+  __visc__edge(var_2, var_3, 1, 0, 0, 0); 
+  __visc__edge(var_2, var_3, 1, 1, 1, 0); 
+  __visc__bindIn(var_3, 6, 2, 0); 
+  __visc__bindIn(var_3, 7, 3, 0); 
+
+  void* var_4 = __visc__createNodeND(0, var_4_node); 
+
+  __visc__edge(var_3, var_4, 1, 0, 0, 0); 
+  __visc__edge(var_3, var_4, 1, 1, 1, 0); 
+  __visc__bindIn(var_4, 8, 2, 0); 
+  __visc__bindIn(var_4, 9, 3, 0); 
+
+  void* var_5 = __visc__createNodeND(0, var_5_node); 
+
+  __visc__edge(var_4, var_5, 1, 0, 0, 0); 
+  __visc__edge(var_4, var_5, 1, 1, 1, 0); 
+
+  void* var_6 = __visc__createNodeND(0, var_6_node); 
+
+  __visc__edge(var_5, var_6, 1, 0, 0, 0); 
+  __visc__edge(var_5, var_6, 1, 1, 1, 0); 
+
+  void* var_7 = __visc__createNodeND(0, var_7_node); 
+
+  __visc__edge(var_6, var_7, 1, 0, 0, 0); 
+  __visc__edge(var_6, var_7, 1, 1, 1, 0); 
+  __visc__bindIn(var_7, 10, 2, 0); 
+  __visc__bindIn(var_7, 11, 3, 0); 
+
+  void* var_8 = __visc__createNodeND(0, var_8_node); 
+
+  __visc__edge(var_7, var_8, 1, 0, 0, 0); 
+  __visc__edge(var_7, var_8, 1, 1, 1, 0); 
+  __visc__bindIn(var_8, 12, 2, 0); 
+  __visc__bindIn(var_8, 13, 3, 0); 
+
+  void* var_9 = __visc__createNodeND(0, var_9_node); 
+
+  __visc__edge(var_8, var_9, 1, 0, 0, 0); 
+  __visc__edge(var_8, var_9, 1, 1, 1, 0); 
+
+  void* var_10 = __visc__createNodeND(0, var_10_node); 
+
+  __visc__edge(var_9, var_10, 1, 0, 0, 0); 
+  __visc__edge(var_9, var_10, 1, 1, 1, 0); 
+  __visc__bindIn(var_10, 14, 2, 0); 
+  __visc__bindIn(var_10, 15, 3, 0); 
+
+  void* var_11 = __visc__createNodeND(0, var_11_node); 
+
+  __visc__edge(var_10, var_11, 1, 0, 0, 0); 
+  __visc__edge(var_10, var_11, 1, 1, 1, 0); 
+  __visc__bindIn(var_11, 16, 2, 0); 
+  __visc__bindIn(var_11, 17, 3, 0); 
+
+  void* var_12 = __visc__createNodeND(0, var_12_node); 
+
+  __visc__edge(var_11, var_12, 1, 0, 0, 0); 
+  __visc__edge(var_11, var_12, 1, 1, 1, 0); 
+
+  void* var_13 = __visc__createNodeND(0, var_13_node); 
+
+  __visc__edge(var_12, var_13, 1, 0, 0, 0); 
+  __visc__edge(var_12, var_13, 1, 1, 1, 0); 
+
+  void* var_14 = __visc__createNodeND(0, var_14_node); 
+
+  __visc__edge(var_13, var_14, 1, 0, 0, 0); 
+  __visc__edge(var_13, var_14, 1, 1, 1, 0); 
+  __visc__bindIn(var_14, 18, 2, 0); 
+  __visc__bindIn(var_14, 19, 3, 0); 
+
+  void* var_15 = __visc__createNodeND(0, var_15_node); 
+
+  __visc__edge(var_14, var_15, 1, 0, 0, 0); 
+  __visc__edge(var_14, var_15, 1, 1, 1, 0); 
+  __visc__bindIn(var_15, 20, 2, 0); 
+  __visc__bindIn(var_15, 21, 3, 0); 
+
+  void* var_16 = __visc__createNodeND(0, var_16_node); 
+
+  __visc__edge(var_15, var_16, 1, 0, 0, 0); 
+  __visc__edge(var_15, var_16, 1, 1, 1, 0); 
+
+  void* var_17 = __visc__createNodeND(0, var_17_node); 
+
+  __visc__edge(var_16, var_17, 1, 0, 0, 0); 
+  __visc__edge(var_16, var_17, 1, 1, 1, 0); 
+  __visc__bindIn(var_17, 22, 2, 0); 
+  __visc__bindIn(var_17, 23, 3, 0); 
+
+  void* var_18 = __visc__createNodeND(0, var_18_node); 
+
+  __visc__edge(var_17, var_18, 1, 0, 0, 0); 
+  __visc__edge(var_17, var_18, 1, 1, 1, 0); 
+  __visc__bindIn(var_18, 24, 2, 0); 
+  __visc__bindIn(var_18, 25, 3, 0); 
+
+  void* var_19 = __visc__createNodeND(0, var_19_node); 
+
+  __visc__edge(var_18, var_19, 1, 0, 0, 0); 
+  __visc__edge(var_18, var_19, 1, 1, 1, 0); 
+
+  __visc__bindOut(var_19, 0, 0, 0); 
+  __visc__bindOut(var_19, 1, 1, 0); 
+
+}
+
+struct ret_t {
+  void* tensor; 
+  size_t bytes; 
+}; 
+
+typedef struct __attribute__((__packed__)) {
+  void* input; 
+  size_t input_bytes; 
+  void* conv2d_1_w; 
+  size_t conv2d_1_w_bytes; 
+  void* conv2d_1_b; 
+  size_t conv2d_1_b_bytes; 
+  void* conv2d_2_w; 
+  size_t conv2d_2_w_bytes; 
+  void* conv2d_2_b; 
+  size_t conv2d_2_b_bytes; 
+  void* conv2d_3_w; 
+  size_t conv2d_3_w_bytes; 
+  void* conv2d_3_b; 
+  size_t conv2d_3_b_bytes; 
+  void* conv2d_4_w; 
+  size_t conv2d_4_w_bytes; 
+  void* conv2d_4_b; 
+  size_t conv2d_4_b_bytes; 
+  void* dense_1_w; 
+  size_t dense_1_w_bytes; 
+  void* dense_1_b; 
+  size_t dense_1_b_bytes; 
+  void* dense_2_w; 
+  size_t dense_2_w_bytes; 
+  void* dense_2_b; 
+  size_t dense_2_b_bytes; 
+
+  struct ret_t r; 
+}
+RootIn;
+
+int main(){ 
+
+  std::string dir_prefix = std::string("../data/weights/"); 
+  std::string input_path =  dir_prefix + std::string("input.bin"); 
+  std::string labels_path =  dir_prefix + std::string("labels.bin"); 
+  std::string conv2d_1_w_path =  dir_prefix + std::string("conv2d_1_w.bin"); 
+  void* conv2d_1_w =  readTrainedWeights(conv2d_1_w_path.c_str(), 0,32,3,3,3); 
+  std::string conv2d_1_b_path =  dir_prefix + std::string("conv2d_1_b.bin"); 
+  void* conv2d_1_b =  readTrainedWeights(conv2d_1_b_path.c_str(), 0,1,32,1,1); 
+  std::string conv2d_2_w_path =  dir_prefix + std::string("conv2d_2_w.bin"); 
+  void* conv2d_2_w =  readTrainedWeights(conv2d_2_w_path.c_str(), 0,32,32,3,3); 
+  std::string conv2d_2_b_path =  dir_prefix + std::string("conv2d_2_b.bin"); 
+  void* conv2d_2_b =  readTrainedWeights(conv2d_2_b_path.c_str(), 0,1,32,1,1); 
+  std::string conv2d_3_w_path =  dir_prefix + std::string("conv2d_3_w.bin"); 
+  void* conv2d_3_w =  readTrainedWeights(conv2d_3_w_path.c_str(), 0,64,32,3,3); 
+  std::string conv2d_3_b_path =  dir_prefix + std::string("conv2d_3_b.bin"); 
+  void* conv2d_3_b =  readTrainedWeights(conv2d_3_b_path.c_str(), 0,1,64,1,1); 
+  std::string conv2d_4_w_path =  dir_prefix + std::string("conv2d_4_w.bin"); 
+  void* conv2d_4_w =  readTrainedWeights(conv2d_4_w_path.c_str(), 0,64,64,3,3); 
+  std::string conv2d_4_b_path =  dir_prefix + std::string("conv2d_4_b.bin"); 
+  void* conv2d_4_b =  readTrainedWeights(conv2d_4_b_path.c_str(), 0,1,64,1,1); 
+  std::string dense_1_w_path =  dir_prefix + std::string("dense_1_w.bin"); 
+  void* dense_1_w =  readTrainedWeights(dense_1_w_path.c_str(), 0,1,1,1600,256); 
+  std::string dense_1_b_path =  dir_prefix + std::string("dense_1_b.bin"); 
+  void* dense_1_b =  readTrainedWeights(dense_1_b_path.c_str(), 0,1,256,1,1); 
+  std::string dense_2_w_path =  dir_prefix + std::string("dense_2_w.bin"); 
+  void* dense_2_w =  readTrainedWeights(dense_2_w_path.c_str(), 0,1,1,256,5); 
+  std::string dense_2_b_path =  dir_prefix + std::string("dense_2_b.bin"); 
+  void* dense_2_b =  readTrainedWeights(dense_2_b_path.c_str(), 0,1,5,1,1); 
+  void* input = readTrainedWeights(input_path.c_str(), 0,500,3,32,32); 
+  uint32_t* labels = readLabels3(labels_path.c_str(),500); 
+
+  __visc__init(); 
+  RootIn* args = static_cast<RootIn*>(malloc(sizeof(RootIn))); 
+
+  args->input = input; 
+  args->input_bytes = 0; 
+  args->conv2d_1_w = conv2d_1_w; 
+  args->conv2d_1_w_bytes = 0; 
+  args->conv2d_1_b = conv2d_1_b; 
+  args->conv2d_1_b_bytes = 0; 
+  args->conv2d_2_w = conv2d_2_w; 
+  args->conv2d_2_w_bytes = 0; 
+  args->conv2d_2_b = conv2d_2_b; 
+  args->conv2d_2_b_bytes = 0; 
+  args->conv2d_3_w = conv2d_3_w; 
+  args->conv2d_3_w_bytes = 0; 
+  args->conv2d_3_b = conv2d_3_b; 
+  args->conv2d_3_b_bytes = 0; 
+  args->conv2d_4_w = conv2d_4_w; 
+  args->conv2d_4_w_bytes = 0; 
+  args->conv2d_4_b = conv2d_4_b; 
+  args->conv2d_4_b_bytes = 0; 
+  args->dense_1_w = dense_1_w; 
+  args->dense_1_w_bytes = 0; 
+  args->dense_1_b = dense_1_b; 
+  args->dense_1_b_bytes = 0; 
+  args->dense_2_w = dense_2_w; 
+  args->dense_2_w_bytes = 0; 
+  args->dense_2_b = dense_2_b; 
+  args->dense_2_b_bytes = 0; 
+
+  void* dfg = __visc__launch(0, root, (void*) args); 
+
+  __visc__wait(dfg); 
+
+  void *result = static_cast<RootIn*>(args)->input; 
+  hpvm_request_tensor(result, 0); 
+
+  __visc__cleanup(); 
+  computeAccuracy3(labels, result); 
+  return 0; 
+
+}