From 9ffc997cfdaa12097d835f5671654cd5738cecde Mon Sep 17 00:00:00 2001
From: Akash Kothari <akashk4@tyler.cs.illinois.edu>
Date: Tue, 1 Dec 2020 16:05:25 -0600
Subject: [PATCH] Add chnages to fix bugs for resnets

---
 .../alexnet2_cifar10/alexnet2_cifar10.txt     |  419 +++
 .../alexnet_cifar10/alexnet_cifar10.txt       |  511 +++
 .../alexnet_imagenet/alexnet_imagenet.txt     |  229 ++
 .../dev_time/lenet_keras/lenet_keras.txt      |  409 +++
 .../mobilenet_cifar10/mobilenet_cifar10.txt   | 3220 +++++++++++++++++
 .../resnet18_cifar10/resnet18_cifar10.txt     | 2296 ++++++++++++
 .../resnet50_imagenet/resnet50_imagenet.txt   | 1057 ++++++
 .../dev_time/vgg16_cifar10/vgg16_cifar10.txt  |  913 +++++
 .../vgg16_cifar100/vgg16_cifar100.txt         |  970 +++++
 .../vgg16_imagenet/vgg16_imagenet.txt         |  561 +++
 .../alexnet2_cifar10/alexnet2_cifar10.txt     |   23 +
 .../alexnet_cifar10/alexnet_cifar10.txt       |  421 +++
 .../alexnet_imagenet/alexnet_imagenet.txt     |  289 ++
 .../emp_time/lenet_keras/lenet_keras.txt      |  409 +++
 .../mobilenet_cifar10/mobilenet_cifar10.txt   |  871 +++++
 .../resnet18_cifar10/resnet18_cifar10.txt     |   91 +
 .../resnet50_imagenet/resnet50_imagenet.txt   | 1233 +++++++
 .../emp_time/vgg16_cifar10/vgg16_cifar10.txt  |   58 +
 .../vgg16_cifar100/vgg16_cifar100.txt         |   77 +
 .../vgg16_imagenet/vgg16_imagenet.txt         |   41 +
 .../hpvm-tensor-rt/lib/tensor_runtime.ll      |   10 +-
 .../tensor_runtime/include/configuration.h    |    5 +-
 .../tensor_runtime/include/global_data.h      |    2 +
 .../tensor_runtime/include/tensor_runtime.h   |   21 +
 .../include/tensor_signatures.cc              |    6 +-
 .../tensor_runtime/src/global_data.cc         |    3 +
 .../tensor_runtime/src/hpvm-rt-controller.cpp |   31 +-
 .../src/legacy/approx_techniques_back.cu      |  862 +++++
 .../src/legacy/tensor_runtime.cu              | 2121 +++++++++++
 .../src/legacy/wrapper_runtime_back.cu        |  123 +
 .../tensor_runtime/src/wrapper_runtime.cu     |   45 +-
 31 files changed, 17307 insertions(+), 20 deletions(-)
 create mode 100644 llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/alexnet2_cifar10/alexnet2_cifar10.txt
 create mode 100644 llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/alexnet_cifar10/alexnet_cifar10.txt
 create mode 100644 llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/alexnet_imagenet/alexnet_imagenet.txt
 create mode 100644 llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/lenet_keras/lenet_keras.txt
 create mode 100644 llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/mobilenet_cifar10/mobilenet_cifar10.txt
 create mode 100644 llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/resnet18_cifar10/resnet18_cifar10.txt
 create mode 100644 llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/resnet50_imagenet/resnet50_imagenet.txt
 create mode 100644 llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/vgg16_cifar10/vgg16_cifar10.txt
 create mode 100644 llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/vgg16_cifar100/vgg16_cifar100.txt
 create mode 100644 llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/vgg16_imagenet/vgg16_imagenet.txt
 create mode 100644 llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/alexnet2_cifar10/alexnet2_cifar10.txt
 create mode 100644 llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/alexnet_cifar10/alexnet_cifar10.txt
 create mode 100644 llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/alexnet_imagenet/alexnet_imagenet.txt
 create mode 100644 llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/lenet_keras/lenet_keras.txt
 create mode 100644 llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/mobilenet_cifar10/mobilenet_cifar10.txt
 create mode 100644 llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/resnet18_cifar10/resnet18_cifar10.txt
 create mode 100644 llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/resnet50_imagenet/resnet50_imagenet.txt
 create mode 100644 llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/vgg16_cifar10/vgg16_cifar10.txt
 create mode 100644 llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/vgg16_cifar100/vgg16_cifar100.txt
 create mode 100644 llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/vgg16_imagenet/vgg16_imagenet.txt
 create mode 100644 llvm/projects/hpvm-tensor-rt/tensor_runtime/src/legacy/approx_techniques_back.cu
 create mode 100644 llvm/projects/hpvm-tensor-rt/tensor_runtime/src/legacy/tensor_runtime.cu
 create mode 100644 llvm/projects/hpvm-tensor-rt/tensor_runtime/src/legacy/wrapper_runtime_back.cu

diff --git a/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/alexnet2_cifar10/alexnet2_cifar10.txt b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/alexnet2_cifar10/alexnet2_cifar10.txt
new file mode 100644
index 0000000000..6ec4a06d3d
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/alexnet2_cifar10/alexnet2_cifar10.txt
@@ -0,0 +1,419 @@
+1114.3009809999999
++++++
+conf1 1 1 84.98 0.0
+1 gpu conv fp32 11 add fp32 1 tanh fp32 1
+2 gpu conv fp32 11 add fp32 1 tanh fp32 1 pool_max fp32 1
+3 gpu conv fp32 11 add fp32 1 tanh fp32 1
+4 gpu conv fp32 11 add fp32 1 tanh fp32 1 pool_max fp32 1
+5 gpu conv fp32 11 add fp32 1 tanh fp32 1
+6 gpu conv fp32 11 add fp32 1 tanh fp32 1 pool_max fp32 1
+7 gpu mul fp32 11 add fp32 1
+8 gpu softmax fp32 1
+-----
++++++
+conf2 2.4248748377353113 2.0815908534183163 84.5 0.480000000000004
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 154 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 157 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf3 2.4055188425519614 2.0586265720811823 84.48 0.5
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 154 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 266 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 269 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 157 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf4 2.4156140842962985 2.0617867479342706 84.28 0.7000000000000028
+1 gpu conv perf_fp16 157 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 267 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 163 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf5 2.396416918342732 2.0506214971794585 84.02 0.960000000000008
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 151 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 157 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf6 2.463002582910052 2.1171077568609458 83.84 1.1400000000000006
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 154 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 266 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 167 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf7 2.360283215266004 2.0255245321874304 83.78 1.2000000000000028
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 154 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 157 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf8 2.4140791541736157 2.0671513522247653 83.74000000000001 1.2399999999999949
+1 gpu conv fp16 11 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 154 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 267 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 160 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf9 2.457753689612079 2.1086250651240137 83.7 1.2800000000000011
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 154 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 266 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 163 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf10 2.459170454055443 2.1111925341396343 83.7 1.2800000000000011
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 154 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 266 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 164 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf11 2.4135986141645764 2.060453960420927 83.62 1.3599999999999994
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 154 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 267 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 266 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 157 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf12 2.4631278039012106 2.1092094797926637 83.58 1.4000000000000057
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 154 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 266 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 157 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf13 2.535761391794481 2.16998336112692 83.58 1.4000000000000057
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 154 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 266 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 157 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf14 2.289006193945062 1.961240158652051 83.54 1.4399999999999977
+1 gpu conv perf_fp16 167 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 155 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 157 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf15 2.4257674844112573 2.0808440756495563 83.5 1.480000000000004
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 161 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf16 2.458122368488622 2.109531159729078 83.48 1.5
+1 gpu conv fp16 11 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 154 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 162 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf17 2.281072202152105 1.9539314420536427 83.46000000000001 1.519999999999996
+1 gpu conv fp16 11 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 160 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 160 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf18 2.4572171342078444 2.1088933553775697 83.46000000000001 1.519999999999996
+1 gpu conv fp16 11 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 154 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 163 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf19 2.3017607719030058 1.9782265708150768 83.42 1.5600000000000023
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 162 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 162 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf20 2.379206814483014 2.047909200292713 83.39999999999999 1.5800000000000125
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 154 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 266 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 151 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf21 2.4636282705302537 2.1162281156388527 83.39999999999999 1.5800000000000125
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 154 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 160 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf22 2.461590101374146 2.1108493881199184 83.22 1.7600000000000051
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 154 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 266 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 161 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf23 2.537054645442804 2.167568834938183 83.22 1.7600000000000051
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 266 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 157 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf24 2.4631604723407885 2.1099694757102845 83.17999999999999 1.8000000000000114
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 154 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 267 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 157 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf25 2.4636282705302537 2.1162281156388527 83.14 1.8400000000000034
+1 gpu conv fp16 11 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 154 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 160 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf26 2.462588899729088 2.109477918791931 83.14 1.8400000000000034
+1 gpu conv fp16 11 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 154 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 157 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf27 2.4638085754689025 2.1071960926343603 83.1 1.8800000000000097
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 157 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf28 2.4640079766123635 2.110326453157297 83.08 1.9000000000000057
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 154 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf29 2.459337622764853 2.107249218450713 83.06 1.9200000000000017
+1 gpu conv fp16 11 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 162 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf30 2.538176340059405 2.173287257415721 83.02000000000001 1.9599999999999937
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 164 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf31 2.3905426931959846 2.044333576277581 83.02000000000001 1.9599999999999937
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 160 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 157 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf32 2.459337622764853 2.107249218450713 83.0 1.980000000000004
+1 gpu conv fp16 11 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 162 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf33 2.458968579288317 2.1063450826631396 82.89999999999999 2.0800000000000125
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 266 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 163 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf34 2.2912974651603877 1.9670210508860688 82.8 2.180000000000007
+1 gpu conv perf_fp16 168 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 155 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 160 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf35 2.4648489763056327 2.113931670664391 82.66 2.3200000000000074
+1 gpu conv fp16 11 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 160 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf36 2.4599076869402854 2.1077397371200193 82.6 2.3800000000000097
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 267 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 162 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf37 2.4636282705302537 2.1162281156388527 82.54 2.4399999999999977
+1 gpu conv fp16 11 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 154 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 160 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
++++++
+conf38 2.591814267389778 2.222680944458784 82.26 2.719999999999999
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv perf_fp16 154 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv perf_fp16 157 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
diff --git a/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/alexnet_cifar10/alexnet_cifar10.txt b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/alexnet_cifar10/alexnet_cifar10.txt
new file mode 100644
index 0000000000..a9ccba6eb6
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/alexnet_cifar10/alexnet_cifar10.txt
@@ -0,0 +1,511 @@
+2592.187221
++++++
+conf1 1 1 79.28 0.0
+1 gpu conv fp32 11 add fp32 1 tanh fp32 1 pool_max fp32 1
+2 gpu conv fp32 11 add fp32 1 tanh fp32 1 pool_max fp32 1
+3 gpu conv fp32 11 add fp32 1 tanh fp32 1
+4 gpu conv fp32 11 add fp32 1 tanh fp32 1
+5 gpu conv fp32 11 add fp32 1 tanh fp32 1 pool_max fp32 1
+6 gpu mul fp32 11 add fp32 1
+7 gpu softmax fp32 1
+-----
++++++
+conf2 1.7593976485873195 1.6193399031642917 79.23 0.04999999999999716
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf3 2.092625440752526 1.9139078015388271 78.96 0.3200000000000074
+1 gpu conv samp_fp16 263 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 266 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf4 1.8870195448805414 1.7296919053025768 78.8 0.480000000000004
+1 gpu conv samp_fp16 263 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf5 2.1184804041774554 1.9598989563949536 78.75999999999999 0.5200000000000102
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 263 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf6 2.1184804041774554 1.9598989563949536 78.75999999999999 0.5200000000000102
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 263 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf7 2.0933825381386364 1.9150743378318535 78.64 0.6400000000000006
+1 gpu conv samp_fp16 263 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf8 2.081712090729918 1.9102226906341664 78.5 0.7800000000000011
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf9 2.081712090729918 1.9102226906341664 78.5 0.7800000000000011
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf10 2.2662606588487595 2.066560750795139 78.48 0.7999999999999972
+1 gpu conv samp_fp16 264 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf11 2.121684761285686 1.966318179285323 78.48 0.7999999999999972
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 263 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf12 2.3417491169395532 2.1355030360671465 78.38000000000001 0.8999999999999915
+1 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf13 2.2247938983110425 2.060416584958474 78.38000000000001 0.8999999999999915
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf14 2.2247938983110425 2.060416584958474 78.38000000000001 0.8999999999999915
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf15 2.2247938983110425 2.060416584958474 78.38000000000001 0.8999999999999915
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf16 2.2627828537139263 2.065683616898884 78.32000000000001 0.9599999999999937
+1 gpu conv samp_fp16 263 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf17 2.2627828537139263 2.065683616898884 78.32000000000001 0.9599999999999937
+1 gpu conv samp_fp16 263 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf18 2.2627828537139263 2.065683616898884 78.32000000000001 0.9599999999999937
+1 gpu conv samp_fp16 263 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf19 2.146571989407323 1.95711703610764 78.18 1.0999999999999943
+1 gpu conv samp_fp16 264 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 267 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 266 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf20 2.303316973793268 2.1036463961913276 78.10000000000001 1.1799999999999926
+1 gpu conv samp_fp16 263 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv samp_fp16 267 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf21 2.436875653706139 2.2434837737118056 78.08 1.2000000000000028
+1 gpu conv samp_fp16 264 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf22 2.436875653706139 2.2434837737118056 78.08 1.2000000000000028
+1 gpu conv samp_fp16 264 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf23 2.436875653706139 2.2434837737118056 78.08 1.2000000000000028
+1 gpu conv samp_fp16 264 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf24 2.1106508925330925 1.9419233584234938 78.06 1.2199999999999989
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf25 2.3203534290038634 2.116965679235447 78.06 1.2199999999999989
+1 gpu conv samp_fp16 264 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 267 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf26 2.3527290658539215 2.145832257234814 78.03999999999999 1.240000000000009
+1 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf27 2.3527290658539215 2.145832257234814 78.03999999999999 1.240000000000009
+1 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf28 2.432854949808342 2.2424500615508003 78.0 1.2800000000000011
+1 gpu conv samp_fp16 263 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf29 2.432854949808342 2.2424500615508003 78.0 1.2800000000000011
+1 gpu conv samp_fp16 263 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf30 2.432854949808342 2.2424500615508003 78.0 1.2800000000000011
+1 gpu conv samp_fp16 263 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf31 2.3137982135449207 2.1281257317083417 77.84 1.4399999999999977
+1 gpu conv samp_fp16 264 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 265 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf32 2.1198074418988333 1.9522214255218437 77.82 1.460000000000008
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf33 2.246924974355375 2.065289762405701 77.8 1.480000000000004
+1 gpu conv samp_fp16 264 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 269 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf34 2.263614734554485 2.090777846534249 77.74 1.5400000000000063
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 267 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf35 2.263614734554485 2.090777846534249 77.74 1.5400000000000063
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 267 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf36 2.263614734554485 2.090777846534249 77.74 1.5400000000000063
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 267 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf37 2.5289288699015304 2.334007588396142 77.72 1.5600000000000023
+1 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf38 2.5289288699015304 2.334007588396142 77.72 1.5600000000000023
+1 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf39 2.3117594882585775 2.1152397180868943 77.56 1.7199999999999989
+1 gpu conv samp_fp16 264 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 266 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf40 2.452732477854469 2.264573687601476 77.56 1.7199999999999989
+1 gpu conv perf_fp16 167 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf41 2.452732477854469 2.264573687601476 77.56 1.7199999999999989
+1 gpu conv perf_fp16 167 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf42 2.382518688546389 2.178614303992064 77.5 1.7800000000000011
+1 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 267 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 267 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf43 2.382518688546389 2.178614303992064 77.5 1.7800000000000011
+1 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 267 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 267 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf44 2.3900667100485924 2.188128526401265 77.48 1.7999999999999972
+1 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf45 2.3900667100485924 2.188128526401265 77.48 1.7999999999999972
+1 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf46 2.3900667100485924 2.188128526401265 77.48 1.7999999999999972
+1 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf47 2.4835281673276515 2.279527076032239 77.3 1.980000000000004
+1 gpu conv samp_fp16 264 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 267 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf48 2.4835281673276515 2.279527076032239 77.3 1.980000000000004
+1 gpu conv samp_fp16 264 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 267 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf49 2.1553694968551302 1.9959124044028933 77.18 2.0999999999999943
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 265 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf50 2.5877520959724816 2.3763616521050364 77.03999999999999 2.240000000000009
+1 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 267 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf51 2.5877520959724816 2.3763616521050364 77.03999999999999 2.240000000000009
+1 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 267 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
diff --git a/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/alexnet_imagenet/alexnet_imagenet.txt b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/alexnet_imagenet/alexnet_imagenet.txt
new file mode 100644
index 0000000000..b0e42a5aaa
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/alexnet_imagenet/alexnet_imagenet.txt
@@ -0,0 +1,229 @@
+2739.950736
++++++
+conf1 1 1 56.3 0.0
+1 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+2 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+3 gpu conv fp32 11 add fp32 1 relu fp32 1
+4 gpu conv fp32 11 add fp32 1 relu fp32 1
+5 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+6 gpu mul fp32 11 add fp32 1 relu fp32 1
+7 gpu mul fp32 11 add fp32 1 relu fp32 1
+8 gpu mul fp32 11 add fp32 1
+9 gpu softmax fp32 1
+-----
++++++
+conf2 1.802133644103582 1.8186433204507424 55.76 0.5399999999999991
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf3 1.7574572103878898 1.7673706184460103 55.58 0.7199999999999989
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1
+5 gpu conv samp_fp16 268 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf4 2.0227701930718065 2.043112495268932 55.42 0.8799999999999955
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 166 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf5 1.9872634777043927 2.002789650227035 55.120000000000005 1.1799999999999926
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 164 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf6 1.8204253918445088 1.843736069756362 54.84 1.4599999999999937
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 164 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 154 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf7 1.9308336510645352 1.934889049414224 54.74 1.5599999999999952
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 168 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 163 add fp16 1 relu fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf8 2.0146435217865446 2.0367475358800102 54.58 1.7199999999999989
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 162 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf9 2.0101709494490696 2.0329911158023064 54.400000000000006 1.8999999999999915
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 164 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 163 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf10 2.0052132441967916 2.0284931705407003 54.300000000000004 1.999999999999993
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 168 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 163 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf11 2.010827434817262 2.036001862538864 54.2 2.0999999999999943
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 164 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 163 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 154 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf12 2.019868378233057 2.0433540129730265 54.17999999999999 2.1200000000000045
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 162 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 163 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf13 1.9923471030291253 2.009177323959059 54.120000000000005 2.1799999999999926
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 164 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 163 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf14 1.9923471030291253 2.009177323959059 54.120000000000005 2.1799999999999926
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 164 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 163 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf15 2.028037341700216 2.049760395549724 54.0 2.299999999999997
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 166 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 163 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf16 1.9910730364852436 2.006510848093771 53.54 2.759999999999998
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 164 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 164 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf17 2.1567475543719614 2.159142310265706 53.300000000000004 2.999999999999993
+1 gpu conv perf_fp16 164 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 166 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 163 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf18 2.1567475543719614 2.159142310265706 53.300000000000004 2.999999999999993
+1 gpu conv perf_fp16 164 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 166 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 163 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf19 2.0232690820426464 2.0527698121318476 53.300000000000004 2.999999999999993
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 168 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 11 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 163 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
diff --git a/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/lenet_keras/lenet_keras.txt b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/lenet_keras/lenet_keras.txt
new file mode 100644
index 0000000000..b4e51dff42
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/lenet_keras/lenet_keras.txt
@@ -0,0 +1,409 @@
+282.5141369999999
++++++
+conf1 1 1 98.7 0.0
+1 gpu conv fp32 11 add fp32 1 pool_max fp32 1 tanh fp32 1
+2 gpu conv fp32 11 add fp32 1 pool_max fp32 1 tanh fp32 1
+3 gpu mul fp32 11 add fp32 1 tanh fp32 1
+4 gpu mul fp32 11 add fp32 1 tanh fp32 1
+5 gpu softmax fp32 1
+-----
++++++
+conf2 1.828613181003043 2.071721708828981 98.65 0.04999999999999716
+1 gpu conv perf_fp16 156 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf3 1.8936889628815377 2.139779619692146 98.65 0.04999999999999716
+1 gpu conv perf_fp16 152 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf4 1.8936889628815377 2.139779619692146 98.65 0.04999999999999716
+1 gpu conv perf_fp16 152 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf5 1.8936889628815377 2.139779619692146 98.65 0.04999999999999716
+1 gpu conv perf_fp16 152 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf6 1.8247639611533713 2.0227145446958756 98.64 0.060000000000002274
+1 gpu conv fp16 11 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf7 1.8247639611533713 2.0227145446958756 98.64 0.060000000000002274
+1 gpu conv fp16 11 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf8 1.8406161850501603 2.037849502542524 98.64 0.060000000000002274
+1 gpu conv fp16 11 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf9 1.8406161850501603 2.037849502542524 98.64 0.060000000000002274
+1 gpu conv fp16 11 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf10 1.8406161850501603 2.037849502542524 98.64 0.060000000000002274
+1 gpu conv fp16 11 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf11 1.8663357888260776 2.115790921611576 98.64 0.060000000000002274
+1 gpu conv perf_fp16 155 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf12 1.8663357888260776 2.115790921611576 98.64 0.060000000000002274
+1 gpu conv perf_fp16 155 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf13 1.8663357888260776 2.115790921611576 98.64 0.060000000000002274
+1 gpu conv perf_fp16 155 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf14 1.8645645142051612 2.1037012333044935 98.61999999999999 0.0800000000000125
+1 gpu conv perf_fp16 167 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf15 1.8645645142051612 2.1037012333044935 98.61999999999999 0.0800000000000125
+1 gpu conv perf_fp16 167 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf16 1.8645645142051612 2.1037012333044935 98.61999999999999 0.0800000000000125
+1 gpu conv perf_fp16 167 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf17 2.2168527051833635 2.453341076720038 98.61999999999999 0.0800000000000125
+1 gpu conv samp_fp16 264 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf18 2.2168527051833635 2.453341076720038 98.61999999999999 0.0800000000000125
+1 gpu conv samp_fp16 264 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf19 2.2168527051833635 2.453341076720038 98.61999999999999 0.0800000000000125
+1 gpu conv samp_fp16 264 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf20 1.8406161850501603 2.037849502542524 98.6 0.10000000000000853
+1 gpu conv fp16 12 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf21 1.8406161850501603 2.037849502542524 98.6 0.10000000000000853
+1 gpu conv fp16 12 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf22 1.8406161850501603 2.037849502542524 98.6 0.10000000000000853
+1 gpu conv fp16 12 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf23 1.8406161850501603 2.037849502542524 98.6 0.10000000000000853
+1 gpu conv fp16 11 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf24 1.8406161850501603 2.037849502542524 98.6 0.10000000000000853
+1 gpu conv fp16 11 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf25 1.8406161850501603 2.037849502542524 98.6 0.10000000000000853
+1 gpu conv fp16 11 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf26 2.200653361151419 2.425091789360736 98.6 0.10000000000000853
+1 gpu conv samp_fp16 266 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf27 2.200653361151419 2.425091789360736 98.6 0.10000000000000853
+1 gpu conv samp_fp16 266 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf28 1.8406161850501603 2.037849502542524 98.58 0.12000000000000455
+1 gpu conv fp16 11 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf29 1.8406161850501603 2.037849502542524 98.58 0.12000000000000455
+1 gpu conv fp16 11 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf30 1.8406161850501603 2.037849502542524 98.58 0.12000000000000455
+1 gpu conv fp16 11 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf31 1.8445326456180258 2.087601822059355 98.58 0.12000000000000455
+1 gpu conv perf_fp16 156 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf32 1.8445326456180258 2.087601822059355 98.58 0.12000000000000455
+1 gpu conv perf_fp16 156 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf33 1.8445326456180258 2.087601822059355 98.58 0.12000000000000455
+1 gpu conv perf_fp16 156 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf34 1.8916677984300285 2.155437579874673 98.58 0.12000000000000455
+1 gpu conv perf_fp16 158 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf35 1.8916677984300285 2.155437579874673 98.58 0.12000000000000455
+1 gpu conv perf_fp16 158 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf36 1.8916677984300285 2.155437579874673 98.58 0.12000000000000455
+1 gpu conv perf_fp16 158 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf37 1.8649226857257986 2.1076025277601325 98.56 0.14000000000000057
+1 gpu conv perf_fp16 168 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf38 1.8649226857257986 2.1076025277601325 98.56 0.14000000000000057
+1 gpu conv perf_fp16 168 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf39 1.8649226857257986 2.1076025277601325 98.56 0.14000000000000057
+1 gpu conv perf_fp16 168 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf40 1.8463058650555446 2.067271423078985 98.56 0.14000000000000057
+1 gpu conv perf_fp16 157 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf41 1.8463058650555446 2.067271423078985 98.56 0.14000000000000057
+1 gpu conv perf_fp16 157 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf42 1.8463058650555446 2.067271423078985 98.56 0.14000000000000057
+1 gpu conv perf_fp16 157 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf43 1.9234076467497994 2.1864740913112275 98.56 0.14000000000000057
+1 gpu conv perf_fp16 153 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf44 1.9234076467497994 2.1864740913112275 98.56 0.14000000000000057
+1 gpu conv perf_fp16 153 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf45 1.9234076467497994 2.1864740913112275 98.56 0.14000000000000057
+1 gpu conv perf_fp16 153 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf46 1.8698191484268973 2.13979218727595 98.54 0.1599999999999966
+1 gpu conv perf_fp16 159 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf47 1.8698191484268973 2.13979218727595 98.54 0.1599999999999966
+1 gpu conv perf_fp16 159 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf48 1.8575043605938137 2.092057786757256 98.52 0.18000000000000682
+1 gpu conv perf_fp16 165 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf49 1.8575043605938137 2.092057786757256 98.52 0.18000000000000682
+1 gpu conv perf_fp16 165 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf50 1.8575043605938137 2.092057786757256 98.52 0.18000000000000682
+1 gpu conv perf_fp16 165 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf51 1.8534621507951072 2.1231113105788597 98.44000000000001 0.2599999999999909
+1 gpu conv perf_fp16 159 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
diff --git a/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/mobilenet_cifar10/mobilenet_cifar10.txt b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/mobilenet_cifar10/mobilenet_cifar10.txt
new file mode 100644
index 0000000000..b4d8bd893c
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/mobilenet_cifar10/mobilenet_cifar10.txt
@@ -0,0 +1,3220 @@
+4077.307063200001
++++++
+conf1 1 1 84.42 0.0
+1 gpu conv fp32 11
+2 gpu batchnorm fp32 11
+3 gpu relu fp32 11
+4 gpu group_conv fp32 11
+5 gpu batchnorm fp32 11
+6 gpu relu fp32 11
+7 gpu conv fp32 11
+8 gpu batchnorm fp32 11
+9 gpu relu fp32 11
+10 gpu group_conv fp32 11
+11 gpu batchnorm fp32 11
+12 gpu relu fp32 11
+13 gpu conv fp32 11
+14 gpu batchnorm fp32 11
+15 gpu relu fp32 11
+16 gpu group_conv fp32 11
+17 gpu batchnorm fp32 11
+18 gpu relu fp32 11
+19 gpu conv fp32 11
+20 gpu batchnorm fp32 11
+21 gpu relu fp32 11
+22 gpu group_conv fp32 11
+23 gpu batchnorm fp32 11
+24 gpu relu fp32 11
+25 gpu conv fp32 11
+26 gpu batchnorm fp32 11
+27 gpu relu fp32 11
+28 gpu group_conv fp32 11
+29 gpu batchnorm fp32 11
+30 gpu relu fp32 11
+31 gpu conv fp32 11
+32 gpu batchnorm fp32 11
+33 gpu relu fp32 11
+34 gpu group_conv fp32 11
+35 gpu batchnorm fp32 11
+36 gpu relu fp32 11
+37 gpu conv fp32 11
+38 gpu batchnorm fp32 11
+39 gpu relu fp32 11
+40 gpu group_conv fp32 11
+41 gpu batchnorm fp32 11
+42 gpu relu fp32 11
+43 gpu conv fp32 11
+44 gpu batchnorm fp32 11
+45 gpu relu fp32 11
+46 gpu group_conv fp32 11
+47 gpu batchnorm fp32 11
+48 gpu relu fp32 11
+49 gpu conv fp32 11
+50 gpu batchnorm fp32 11
+51 gpu relu fp32 11
+52 gpu group_conv fp32 11
+53 gpu batchnorm fp32 11
+54 gpu relu fp32 11
+55 gpu conv fp32 11
+56 gpu batchnorm fp32 11
+57 gpu relu fp32 11
+58 gpu group_conv fp32 11
+59 gpu batchnorm fp32 11
+60 gpu relu fp32 11
+61 gpu conv fp32 11
+62 gpu batchnorm fp32 11
+63 gpu relu fp32 11
+64 gpu group_conv fp32 11
+65 gpu batchnorm fp32 11
+66 gpu relu fp32 11
+67 gpu conv fp32 11
+68 gpu batchnorm fp32 11
+69 gpu relu fp32 11
+70 gpu group_conv fp32 11
+71 gpu batchnorm fp32 11
+72 gpu relu fp32 11
+73 gpu conv fp32 11
+74 gpu batchnorm fp32 11
+75 gpu relu fp32 11
+76 gpu group_conv fp32 11
+77 gpu batchnorm fp32 11
+78 gpu relu fp32 11
+79 gpu conv fp32 11
+80 gpu batchnorm fp32 11
+81 gpu relu fp32 11
+82 gpu pool_mean fp32 11
+83 gpu mul fp32 11 add fp32 1
+84 gpu softmax fp32 1
+-----
++++++
+conf2 1.4930855091460031 1.447990050940341 83.72 0.7000000000000028
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 163
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 151
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv fp16 12
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf3 1.493397883226807 1.449591062426989 83.72 0.7000000000000028
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 163
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 151
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 163
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf4 1.4934429016801338 1.4500582352111675 83.72 0.7000000000000028
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 163
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 151
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 168
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf5 1.4938214813031556 1.450038222978811 83.72 0.7000000000000028
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 163
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 151
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 157
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf6 1.4933879828131855 1.449975636202813 83.72 0.7000000000000028
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 163
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 151
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 160
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf7 1.492663093331302 1.4487067754520524 83.7 0.7199999999999989
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv fp16 12
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 151
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 167
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf8 1.495724395088184 1.4507925552157772 83.56 0.8599999999999994
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv fp16 12
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 151
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 162
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf9 1.496506307637598 1.4521705950285135 83.36 1.0600000000000023
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 163
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 151
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 162
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf10 1.496532672928805 1.4521696542076958 83.36 1.0600000000000023
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 163
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 151
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 156
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf11 1.4988418058849937 1.4555327556053628 83.28 1.1400000000000006
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 168
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv fp16 12
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 164
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 158
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf12 1.4994289979945077 1.4562439330251535 83.28 1.1400000000000006
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 168
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv fp16 12
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 152
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 153
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf13 1.4952028793065038 1.450369851058777 83.14 1.2800000000000011
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 162
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv perf_fp16 161
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 151
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 155
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 156
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 152
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 152
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf14 1.4933978285280285 1.448265686258097 83.12 1.2999999999999972
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 163
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv fp16 12
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 158
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf15 1.491958833559989 1.4459262032919467 83.08 1.3400000000000034
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv fp16 12
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv perf_fp16 161
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 157
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 155
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv fp16 12
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 152
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 152
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf16 1.4937317297990984 1.4498121856525021 83.02000000000001 1.3999999999999915
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 163
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 156
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 158
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf17 1.4963413808686974 1.4522391736954623 82.86 1.5600000000000023
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 163
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 151
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 165
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf18 1.4942172827099065 1.4504631324933321 82.86 1.5600000000000023
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 163
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 157
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 158
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf19 1.4963964073376739 1.4525461321361477 82.86 1.5600000000000023
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 163
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 151
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 158
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf20 1.4932583049858652 1.4472547227714012 82.84 1.5799999999999983
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 163
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 151
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv samp_fp16 266
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf21 1.4964326545281064 1.4526263046333605 82.82000000000001 1.5999999999999943
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 163
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 152
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 158
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf22 1.4966042483929347 1.4527859961226985 82.82000000000001 1.5999999999999943
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 163
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 152
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 153
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf23 1.4966008974318024 1.4527415844509437 82.78 1.6400000000000006
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 163
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 155
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 158
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf24 1.4932738366973777 1.448820445466833 82.64 1.7800000000000011
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 164
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv perf_fp16 161
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 151
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 155
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 156
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 157
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 152
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf25 1.4940402684133964 1.447332235394843 82.48 1.9399999999999977
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv fp16 12
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 151
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv samp_fp16 261
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf26 1.4981764588414919 1.4530714150549078 82.39999999999999 2.0200000000000102
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 152
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv perf_fp16 161
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 151
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 161
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 156
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 152
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 152
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf27 1.5004334658773033 1.4549115105608688 82.3 2.1200000000000045
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 152
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv perf_fp16 161
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 151
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 155
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 156
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 152
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 156
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf28 1.5006808163336343 1.4553824345285296 82.3 2.1200000000000045
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 152
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv perf_fp16 161
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 151
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 155
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 156
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 152
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 152
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf29 1.4999870719460484 1.4571625511374704 82.28 2.1400000000000006
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 168
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 163
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 152
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 165
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf30 1.500042366879961 1.4574715946270216 82.28 2.1400000000000006
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 168
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 163
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 152
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 158
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf31 1.500214789632402 1.4576323532660131 82.28 2.1400000000000006
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 168
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 163
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 164
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 152
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 153
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf32 1.4927009086066445 1.4484049211953174 82.26 2.1599999999999966
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 164
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv perf_fp16 161
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 151
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 161
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 156
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 152
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 152
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf33 1.5003438014588875 1.4538240352408085 82.22 2.200000000000003
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 152
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv perf_fp16 161
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 151
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 155
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv fp16 12
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 152
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 152
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf34 1.5041587978616728 1.4610492456195174 82.02000000000001 2.3999999999999915
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 168
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 152
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv perf_fp16 161
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 161
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 155
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 152
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 152
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 158
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf35 1.5000040131742656 1.4555601139156464 81.88 2.5400000000000063
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 152
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv perf_fp16 161
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 151
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 155
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 151
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 152
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 167
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf36 1.4950571524902583 1.451478376045808 81.84 2.5799999999999983
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 164
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv fp16 12
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv perf_fp16 161
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 161
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 155
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 155
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 152
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 153
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf37 1.4975271575548847 1.4532126224638244 81.44 2.980000000000004
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 164
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv fp16 12
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 11
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 155
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 155
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 152
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 153
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
diff --git a/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/resnet18_cifar10/resnet18_cifar10.txt b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/resnet18_cifar10/resnet18_cifar10.txt
new file mode 100644
index 0000000000..654cffbf63
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/resnet18_cifar10/resnet18_cifar10.txt
@@ -0,0 +1,2296 @@
+2484.981244
++++++
+conf1 1 1 89.56 0.0
+1 gpu conv fp32 11 add fp32 1 relu fp32 1
+2 gpu conv fp32 11 add fp32 1 relu fp32 1
+3 gpu conv fp32 11 add fp32 1
+4 gpu add fp32 11
+5 gpu relu fp32 11
+6 gpu conv fp32 11 add fp32 1 relu fp32 1
+7 gpu conv fp32 11 add fp32 1
+8 gpu add fp32 11
+9 gpu relu fp32 11
+10 gpu conv fp32 11 add fp32 1 relu fp32 1
+11 gpu conv fp32 11 add fp32 1
+12 gpu add fp32 11
+13 gpu relu fp32 11
+14 gpu conv fp32 11 add fp32 1 relu fp32 1
+15 gpu conv fp32 11 add fp32 1
+16 gpu conv fp32 11 add fp32 1
+17 gpu add fp32 11
+18 gpu relu fp32 11
+19 gpu conv fp32 11 add fp32 1 relu fp32 1
+20 gpu conv fp32 11 add fp32 1
+21 gpu add fp32 11
+22 gpu relu fp32 11
+23 gpu conv fp32 11 add fp32 1 relu fp32 1
+24 gpu conv fp32 11 add fp32 1
+25 gpu add fp32 11
+26 gpu relu fp32 11
+27 gpu conv fp32 11 add fp32 1 relu fp32 1
+28 gpu conv fp32 11 add fp32 1
+29 gpu conv fp32 11 add fp32 1
+30 gpu add fp32 11
+31 gpu relu fp32 11
+32 gpu conv fp32 11 add fp32 1 relu fp32 1
+33 gpu conv fp32 11 add fp32 1
+34 gpu add fp32 11
+35 gpu relu fp32 11
+36 gpu conv fp32 11 add fp32 1 relu fp32 1
+37 gpu conv fp32 11 add fp32 1
+38 gpu add fp32 11
+39 gpu relu fp32 11
+40 gpu pool_mean fp32 11
+41 gpu mul fp32 11 add fp32 1
+42 gpu softmax fp32 1
+-----
++++++
+conf2 1.767527790869615 1.7962938589450996 88.96 0.6000000000000085
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 162 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 167 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 167 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 160 add fp16 1 relu fp16 1
+20 gpu conv perf_fp16 155 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv fp16 12 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 160 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 11 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 155 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf3 1.7676486174436143 1.7967155014984917 88.78 0.7800000000000011
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 162 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 167 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 167 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv perf_fp16 160 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 160 add fp16 1 relu fp16 1
+20 gpu conv perf_fp16 155 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv fp16 12 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 160 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 11 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 155 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf4 1.7674352647250422 1.792910560846682 88.7 0.8599999999999994
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 162 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 167 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 167 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 168 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv fp16 12 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 160 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 11 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 155 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf5 1.8655703338511067 1.8930089896922888 88.53999999999999 1.0200000000000102
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 167 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 158 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv perf_fp16 159 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 165 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 157 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf6 1.9070428103729684 1.9172857853336078 88.38000000000001 1.1799999999999926
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 160 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 157 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 152 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv samp_fp16 266 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+20 gpu conv perf_fp16 152 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv samp_fp16 261 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 11 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 155 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf7 1.769778590701739 1.7956222622694236 88.24 1.3200000000000074
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv fp16 12 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv samp_fp16 268 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf8 1.841404652091802 1.8677947628418006 88.24 1.3200000000000074
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 168 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 162 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf9 1.8679349428783487 1.8995927920729931 88.22 1.3400000000000034
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 159 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 160 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 168 add fp16 1 relu fp16 1
+20 gpu conv perf_fp16 161 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 11 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf10 1.876937310100899 1.9041581451399825 88.1 1.460000000000008
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 158 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 11 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf11 1.842140004857965 1.8673692956620238 88.06 1.5
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 166 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 167 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 11 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf12 1.9070567138857761 1.9165525910492667 88.02 1.5400000000000063
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 160 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 157 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 152 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv samp_fp16 266 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 261 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 152 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 11 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 155 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf13 1.9185835698271805 1.9328202469403 87.98 1.5799999999999983
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 160 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 157 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 152 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv samp_fp16 266 add fp16 1
+16 gpu conv perf_fp16 160 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+20 gpu conv perf_fp16 152 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 152 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 11 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 155 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf14 1.781744853993609 1.8082995958456516 87.92 1.6400000000000006
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 166 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 160 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 168 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv perf_fp16 159 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 165 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv samp_fp16 265 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv samp_fp16 268 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf15 1.9185835698271805 1.9328202469403 87.92 1.6400000000000006
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 160 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 157 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 152 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv samp_fp16 266 add fp16 1
+16 gpu conv perf_fp16 160 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+20 gpu conv perf_fp16 152 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 152 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 12 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 155 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf16 1.875261840315855 1.8986912653657988 87.88 1.6800000000000068
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 159 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 156 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 12 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf17 1.9013559086026153 1.9230901214481015 87.86 1.7000000000000028
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 11 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf18 1.9185835698271805 1.9328202469403 87.83999999999999 1.720000000000013
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 160 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 157 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 152 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv samp_fp16 266 add fp16 1
+16 gpu conv perf_fp16 160 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+20 gpu conv perf_fp16 152 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 152 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 11 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 155 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf19 1.8770503055325798 1.9007923328014182 87.82 1.740000000000009
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 162 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 158 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 151 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf20 1.8774136276932418 1.90365663123621 87.82 1.740000000000009
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 158 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf21 1.943143041264842 1.9591958561422729 87.82 1.740000000000009
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 152 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf22 1.870789918969847 1.8863625217899933 87.8 1.7600000000000051
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 264 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf23 1.7445941809066292 1.7754934270309912 87.78 1.7800000000000011
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 163 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 162 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 167 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 167 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv perf_fp16 160 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 160 add fp16 1 relu fp16 1
+20 gpu conv perf_fp16 155 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv fp16 12 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 160 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 11 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv perf_fp16 166 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 155 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf24 1.9065930313550916 1.928938946228637 87.78 1.7800000000000011
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 167 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 11 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf25 1.9021824494907031 1.9237134505552098 87.78 1.7800000000000011
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 154 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf26 1.9017271009017505 1.9211078231701697 87.78 1.7800000000000011
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 162 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf27 1.8187224917656395 1.820406007609536 87.76 1.7999999999999972
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv samp_fp16 264 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf28 1.9070855899343322 1.9285210655709735 87.76 1.7999999999999972
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv samp_fp16 268 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf29 1.9013559086026153 1.9230901214481015 87.74 1.8200000000000074
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 11 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf30 1.8772990284718367 1.9022146647342513 87.72 1.8400000000000034
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 162 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 158 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf31 1.9013559086026153 1.9230901214481015 87.68 1.8799999999999955
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 11 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf32 1.9020502478364545 1.923319572598976 87.66000000000001 1.8999999999999915
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf33 1.7516394053514481 1.7809034526471939 87.62 1.9399999999999977
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 157 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 162 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 167 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 167 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv perf_fp16 160 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 160 add fp16 1 relu fp16 1
+20 gpu conv perf_fp16 155 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv fp16 12 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 160 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 11 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv perf_fp16 166 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 155 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf34 1.7814953252955337 1.8122658147993431 87.62 1.9399999999999977
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 162 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 167 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 167 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv perf_fp16 160 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 160 add fp16 1 relu fp16 1
+20 gpu conv perf_fp16 155 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv fp16 12 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 160 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 11 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv perf_fp16 166 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 155 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf35 1.887538247557846 1.9103369445911678 87.62 1.9399999999999977
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 158 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 159 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf36 1.9107566783735581 1.9273803227885578 87.6 1.960000000000008
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 157 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf37 1.9013559086026153 1.9230901214481015 87.58 1.980000000000004
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 12 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf38 1.8984089819969947 1.9195632881772446 87.58 1.980000000000004
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 160 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf39 1.9020502478364545 1.923319572598976 87.52 2.0400000000000063
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf40 1.9020502478364545 1.923319572598976 87.52 2.0400000000000063
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf41 1.9013559086026153 1.9230901214481015 87.5 2.0600000000000023
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 11 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf42 1.9013559086026153 1.9230901214481015 87.46000000000001 2.0999999999999943
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 11 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf43 1.9196179152539186 1.9443459719929068 87.44 2.1200000000000045
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 153 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf44 1.9020502478364545 1.923319572598976 87.4 2.1599999999999966
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf45 1.9152817031040366 1.9357432559063958 87.4 2.1599999999999966
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf46 1.915754791147898 1.9373322475753219 87.4 2.1599999999999966
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf47 1.9130551004051772 1.9409232417921056 87.38 2.180000000000007
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv perf_fp16 153 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf48 1.9421147660673033 1.9584555432766413 87.38 2.180000000000007
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf49 1.9052849920081363 1.9300100333661123 87.32 2.240000000000009
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 153 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf50 1.9154322863033566 1.934908329027621 87.3 2.260000000000005
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv perf_fp16 151 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
++++++
+conf51 1.9079703554020564 1.9287218218306195 86.96000000000001 2.5999999999999943
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv perf_fp16 153 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 161 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv perf_fp16 154 add fp16 1 relu fp16 1
+11 gpu conv perf_fp16 151 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 11 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+20 gpu conv samp_fp16 262 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv perf_fp16 158 add fp16 1 relu fp16 1
+24 gpu conv perf_fp16 153 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv samp_fp16 261 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv perf_fp16 152 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
diff --git a/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/resnet50_imagenet/resnet50_imagenet.txt b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/resnet50_imagenet/resnet50_imagenet.txt
new file mode 100644
index 0000000000..094eed413b
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/resnet50_imagenet/resnet50_imagenet.txt
@@ -0,0 +1,1057 @@
+7161.053769000008
++++++
+conf1 1 1 75.7 0.0
+1 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+2 gpu batchnorm fp32 11
+3 gpu conv fp32 11 add fp32 1
+4 gpu batchnorm fp32 11
+5 gpu relu fp32 11
+6 gpu conv fp32 11 add fp32 1
+7 gpu batchnorm fp32 11
+8 gpu relu fp32 11
+9 gpu conv fp32 11 add fp32 1
+10 gpu batchnorm fp32 11
+11 gpu conv fp32 11 add fp32 1
+12 gpu batchnorm fp32 11
+13 gpu add fp32 11
+14 gpu relu fp32 11
+15 gpu conv fp32 11 add fp32 1
+16 gpu batchnorm fp32 11
+17 gpu relu fp32 11
+18 gpu conv fp32 11 add fp32 1
+19 gpu batchnorm fp32 11
+20 gpu relu fp32 11
+21 gpu conv fp32 11 add fp32 1
+22 gpu batchnorm fp32 11
+23 gpu add fp32 11
+24 gpu relu fp32 11
+25 gpu conv fp32 11 add fp32 1
+26 gpu batchnorm fp32 11
+27 gpu relu fp32 11
+28 gpu conv fp32 11 add fp32 1
+29 gpu batchnorm fp32 11
+30 gpu relu fp32 11
+31 gpu conv fp32 11 add fp32 1
+32 gpu batchnorm fp32 11
+33 gpu add fp32 11
+34 gpu relu fp32 11
+35 gpu conv fp32 11 add fp32 1
+36 gpu batchnorm fp32 11
+37 gpu relu fp32 11
+38 gpu conv fp32 11 add fp32 1
+39 gpu batchnorm fp32 11
+40 gpu relu fp32 11
+41 gpu conv fp32 11 add fp32 1
+42 gpu batchnorm fp32 11
+43 gpu conv fp32 11 add fp32 1
+44 gpu batchnorm fp32 11
+45 gpu add fp32 11
+46 gpu relu fp32 11
+47 gpu conv fp32 11 add fp32 1
+48 gpu batchnorm fp32 11
+49 gpu relu fp32 11
+50 gpu conv fp32 11 add fp32 1
+51 gpu batchnorm fp32 11
+52 gpu relu fp32 11
+53 gpu conv fp32 11 add fp32 1
+54 gpu batchnorm fp32 11
+55 gpu add fp32 11
+56 gpu relu fp32 11
+57 gpu conv fp32 11 add fp32 1
+58 gpu batchnorm fp32 11
+59 gpu relu fp32 11
+60 gpu conv fp32 11 add fp32 1
+61 gpu batchnorm fp32 11
+62 gpu relu fp32 11
+63 gpu conv fp32 11 add fp32 1
+64 gpu batchnorm fp32 11
+65 gpu add fp32 11
+66 gpu relu fp32 11
+67 gpu conv fp32 11 add fp32 1
+68 gpu batchnorm fp32 11
+69 gpu relu fp32 11
+70 gpu conv fp32 11 add fp32 1
+71 gpu batchnorm fp32 11
+72 gpu relu fp32 11
+73 gpu conv fp32 11 add fp32 1
+74 gpu batchnorm fp32 11
+75 gpu add fp32 11
+76 gpu relu fp32 11
+77 gpu conv fp32 11 add fp32 1
+78 gpu batchnorm fp32 11
+79 gpu relu fp32 11
+80 gpu conv fp32 11 add fp32 1
+81 gpu batchnorm fp32 11
+82 gpu relu fp32 11
+83 gpu conv fp32 11 add fp32 1
+84 gpu batchnorm fp32 11
+85 gpu conv fp32 11 add fp32 1
+86 gpu batchnorm fp32 11
+87 gpu add fp32 11
+88 gpu relu fp32 11
+89 gpu conv fp32 11 add fp32 1
+90 gpu batchnorm fp32 11
+91 gpu relu fp32 11
+92 gpu conv fp32 11 add fp32 1
+93 gpu batchnorm fp32 11
+94 gpu relu fp32 11
+95 gpu conv fp32 11 add fp32 1
+96 gpu batchnorm fp32 11
+97 gpu add fp32 11
+98 gpu relu fp32 11
+99 gpu conv fp32 11 add fp32 1
+100 gpu batchnorm fp32 11
+101 gpu relu fp32 11
+102 gpu conv fp32 11 add fp32 1
+103 gpu batchnorm fp32 11
+104 gpu relu fp32 11
+105 gpu conv fp32 11 add fp32 1
+106 gpu batchnorm fp32 11
+107 gpu add fp32 11
+108 gpu relu fp32 11
+109 gpu conv fp32 11 add fp32 1
+110 gpu batchnorm fp32 11
+111 gpu relu fp32 11
+112 gpu conv fp32 11 add fp32 1
+113 gpu batchnorm fp32 11
+114 gpu relu fp32 11
+115 gpu conv fp32 11 add fp32 1
+116 gpu batchnorm fp32 11
+117 gpu add fp32 11
+118 gpu relu fp32 11
+119 gpu conv fp32 11 add fp32 1
+120 gpu batchnorm fp32 11
+121 gpu relu fp32 11
+122 gpu conv fp32 11 add fp32 1
+123 gpu batchnorm fp32 11
+124 gpu relu fp32 11
+125 gpu conv fp32 11 add fp32 1
+126 gpu batchnorm fp32 11
+127 gpu add fp32 11
+128 gpu relu fp32 11
+129 gpu conv fp32 11 add fp32 1
+130 gpu batchnorm fp32 11
+131 gpu relu fp32 11
+132 gpu conv fp32 11 add fp32 1
+133 gpu batchnorm fp32 11
+134 gpu relu fp32 11
+135 gpu conv fp32 11 add fp32 1
+136 gpu batchnorm fp32 11
+137 gpu add fp32 11
+138 gpu relu fp32 11
+139 gpu conv fp32 11 add fp32 1
+140 gpu batchnorm fp32 11
+141 gpu relu fp32 11
+142 gpu conv fp32 11 add fp32 1
+143 gpu batchnorm fp32 11
+144 gpu relu fp32 11
+145 gpu conv fp32 11 add fp32 1
+146 gpu batchnorm fp32 11
+147 gpu conv fp32 11 add fp32 1
+148 gpu batchnorm fp32 11
+149 gpu add fp32 11
+150 gpu relu fp32 11
+151 gpu conv fp32 11 add fp32 1
+152 gpu batchnorm fp32 11
+153 gpu relu fp32 11
+154 gpu conv fp32 11 add fp32 1
+155 gpu batchnorm fp32 11
+156 gpu relu fp32 11
+157 gpu conv fp32 11 add fp32 1
+158 gpu batchnorm fp32 11
+159 gpu add fp32 11
+160 gpu relu fp32 11
+161 gpu conv fp32 11 add fp32 1
+162 gpu batchnorm fp32 11
+163 gpu relu fp32 11
+164 gpu conv fp32 11 add fp32 1
+165 gpu batchnorm fp32 11
+166 gpu relu fp32 11
+167 gpu conv fp32 11 add fp32 1
+168 gpu batchnorm fp32 11
+169 gpu add fp32 11
+170 gpu relu fp32 11
+171 gpu pool_max fp32 11
+172 gpu mul fp32 11 add fp32 1
+173 gpu softmax fp32 1
+-----
++++++
+conf2 1.8254789092281507 1.4527803526239977 75.7 0.0
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu batchnorm fp16 12
+3 gpu conv fp16 12 add fp16 1
+4 gpu batchnorm fp16 12
+5 gpu relu fp16 12
+6 gpu conv fp16 12 add fp16 1
+7 gpu batchnorm fp16 12
+8 gpu relu fp16 12
+9 gpu conv fp16 12 add fp16 1
+10 gpu batchnorm fp16 12
+11 gpu conv fp16 12 add fp16 1
+12 gpu batchnorm fp16 12
+13 gpu add fp16 12
+14 gpu relu fp16 12
+15 gpu conv fp16 12 add fp16 1
+16 gpu batchnorm fp16 12
+17 gpu relu fp16 12
+18 gpu conv fp16 12 add fp16 1
+19 gpu batchnorm fp16 12
+20 gpu relu fp16 12
+21 gpu conv fp16 12 add fp16 1
+22 gpu batchnorm fp16 12
+23 gpu add fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12 add fp16 1
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu conv fp16 12 add fp16 1
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv fp16 12 add fp16 1
+32 gpu batchnorm fp16 12
+33 gpu add fp16 12
+34 gpu relu fp16 12
+35 gpu conv fp16 12 add fp16 1
+36 gpu batchnorm fp16 12
+37 gpu relu fp16 12
+38 gpu conv fp16 12 add fp16 1
+39 gpu batchnorm fp16 12
+40 gpu relu fp16 12
+41 gpu conv fp16 12 add fp16 1
+42 gpu batchnorm fp16 12
+43 gpu conv fp16 12 add fp16 1
+44 gpu batchnorm fp16 12
+45 gpu add fp16 12
+46 gpu relu fp16 12
+47 gpu conv fp16 12 add fp16 1
+48 gpu batchnorm fp16 12
+49 gpu relu fp16 12
+50 gpu conv fp16 12 add fp16 1
+51 gpu batchnorm fp16 12
+52 gpu relu fp16 12
+53 gpu conv fp16 12 add fp16 1
+54 gpu batchnorm fp16 12
+55 gpu add fp16 12
+56 gpu relu fp16 12
+57 gpu conv fp16 12 add fp16 1
+58 gpu batchnorm fp16 12
+59 gpu relu fp16 12
+60 gpu conv fp16 12 add fp16 1
+61 gpu batchnorm fp16 12
+62 gpu relu fp16 12
+63 gpu conv fp16 12 add fp16 1
+64 gpu batchnorm fp16 12
+65 gpu add fp16 12
+66 gpu relu fp16 12
+67 gpu conv fp16 12 add fp16 1
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu conv fp16 12 add fp16 1
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv fp16 12 add fp16 1
+74 gpu batchnorm fp16 12
+75 gpu add fp16 12
+76 gpu relu fp16 12
+77 gpu conv fp16 12 add fp16 1
+78 gpu batchnorm fp16 12
+79 gpu relu fp16 12
+80 gpu conv fp16 12 add fp16 1
+81 gpu batchnorm fp16 12
+82 gpu relu fp16 12
+83 gpu conv fp16 12 add fp16 1
+84 gpu batchnorm fp16 12
+85 gpu conv fp16 12 add fp16 1
+86 gpu batchnorm fp16 12
+87 gpu add fp16 12
+88 gpu relu fp16 12
+89 gpu conv fp16 12 add fp16 1
+90 gpu batchnorm fp16 12
+91 gpu relu fp16 12
+92 gpu conv fp16 12 add fp16 1
+93 gpu batchnorm fp16 12
+94 gpu relu fp16 12
+95 gpu conv fp16 12 add fp16 1
+96 gpu batchnorm fp16 12
+97 gpu add fp16 12
+98 gpu relu fp16 12
+99 gpu conv fp16 12 add fp16 1
+100 gpu batchnorm fp16 12
+101 gpu relu fp16 12
+102 gpu conv fp16 12 add fp16 1
+103 gpu batchnorm fp16 12
+104 gpu relu fp16 12
+105 gpu conv fp16 12 add fp16 1
+106 gpu batchnorm fp16 12
+107 gpu add fp16 12
+108 gpu relu fp16 12
+109 gpu conv fp16 12 add fp16 1
+110 gpu batchnorm fp16 12
+111 gpu relu fp16 12
+112 gpu conv fp16 12 add fp16 1
+113 gpu batchnorm fp16 12
+114 gpu relu fp16 12
+115 gpu conv fp16 12 add fp16 1
+116 gpu batchnorm fp16 12
+117 gpu add fp16 12
+118 gpu relu fp16 12
+119 gpu conv fp16 12 add fp16 1
+120 gpu batchnorm fp16 12
+121 gpu relu fp16 12
+122 gpu conv fp16 12 add fp16 1
+123 gpu batchnorm fp16 12
+124 gpu relu fp16 12
+125 gpu conv fp16 12 add fp16 1
+126 gpu batchnorm fp16 12
+127 gpu add fp16 12
+128 gpu relu fp16 12
+129 gpu conv fp16 12 add fp16 1
+130 gpu batchnorm fp16 12
+131 gpu relu fp16 12
+132 gpu conv fp16 12 add fp16 1
+133 gpu batchnorm fp16 12
+134 gpu relu fp16 12
+135 gpu conv fp16 12 add fp16 1
+136 gpu batchnorm fp16 12
+137 gpu add fp16 12
+138 gpu relu fp16 12
+139 gpu conv fp16 12 add fp16 1
+140 gpu batchnorm fp16 12
+141 gpu relu fp16 12
+142 gpu conv fp16 12 add fp16 1
+143 gpu batchnorm fp16 12
+144 gpu relu fp16 12
+145 gpu conv fp16 12 add fp16 1
+146 gpu batchnorm fp16 12
+147 gpu conv fp16 12 add fp16 1
+148 gpu batchnorm fp16 12
+149 gpu add fp16 12
+150 gpu relu fp16 12
+151 gpu conv fp16 12 add fp16 1
+152 gpu batchnorm fp16 12
+153 gpu relu fp16 12
+154 gpu conv fp16 12 add fp16 1
+155 gpu batchnorm fp16 12
+156 gpu relu fp16 12
+157 gpu conv fp16 12 add fp16 1
+158 gpu batchnorm fp16 12
+159 gpu add fp16 12
+160 gpu relu fp16 12
+161 gpu conv fp16 12 add fp16 1
+162 gpu batchnorm fp16 12
+163 gpu relu fp16 12
+164 gpu conv fp16 12 add fp16 1
+165 gpu batchnorm fp16 12
+166 gpu relu fp16 12
+167 gpu conv fp16 12 add fp16 1
+168 gpu batchnorm fp16 12
+169 gpu add fp16 12
+170 gpu relu fp16 12
+171 gpu pool_max fp16 12
+172 gpu mul fp16 12 add fp16 1
+173 gpu softmax fp32 1
+-----
++++++
+conf3 1.8254789092281507 1.4527803526239977 75.7 0.0
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu batchnorm fp16 12
+3 gpu conv fp16 12 add fp16 1
+4 gpu batchnorm fp16 12
+5 gpu relu fp16 12
+6 gpu conv fp16 12 add fp16 1
+7 gpu batchnorm fp16 12
+8 gpu relu fp16 12
+9 gpu conv fp16 12 add fp16 1
+10 gpu batchnorm fp16 12
+11 gpu conv fp16 12 add fp16 1
+12 gpu batchnorm fp16 12
+13 gpu add fp16 12
+14 gpu relu fp16 12
+15 gpu conv fp16 12 add fp16 1
+16 gpu batchnorm fp16 12
+17 gpu relu fp16 12
+18 gpu conv fp16 12 add fp16 1
+19 gpu batchnorm fp16 12
+20 gpu relu fp16 12
+21 gpu conv fp16 12 add fp16 1
+22 gpu batchnorm fp16 12
+23 gpu add fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12 add fp16 1
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu conv fp16 12 add fp16 1
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv fp16 12 add fp16 1
+32 gpu batchnorm fp16 12
+33 gpu add fp16 12
+34 gpu relu fp16 12
+35 gpu conv fp16 12 add fp16 1
+36 gpu batchnorm fp16 12
+37 gpu relu fp16 12
+38 gpu conv fp16 12 add fp16 1
+39 gpu batchnorm fp16 12
+40 gpu relu fp16 12
+41 gpu conv fp16 12 add fp16 1
+42 gpu batchnorm fp16 12
+43 gpu conv fp16 12 add fp16 1
+44 gpu batchnorm fp16 12
+45 gpu add fp16 12
+46 gpu relu fp16 12
+47 gpu conv fp16 12 add fp16 1
+48 gpu batchnorm fp16 12
+49 gpu relu fp16 12
+50 gpu conv fp16 12 add fp16 1
+51 gpu batchnorm fp16 12
+52 gpu relu fp16 12
+53 gpu conv fp16 12 add fp16 1
+54 gpu batchnorm fp16 12
+55 gpu add fp16 12
+56 gpu relu fp16 12
+57 gpu conv fp16 12 add fp16 1
+58 gpu batchnorm fp16 12
+59 gpu relu fp16 12
+60 gpu conv fp16 12 add fp16 1
+61 gpu batchnorm fp16 12
+62 gpu relu fp16 12
+63 gpu conv fp16 12 add fp16 1
+64 gpu batchnorm fp16 12
+65 gpu add fp16 12
+66 gpu relu fp16 12
+67 gpu conv fp16 12 add fp16 1
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu conv fp16 12 add fp16 1
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv fp16 12 add fp16 1
+74 gpu batchnorm fp16 12
+75 gpu add fp16 12
+76 gpu relu fp16 12
+77 gpu conv fp16 12 add fp16 1
+78 gpu batchnorm fp16 12
+79 gpu relu fp16 12
+80 gpu conv fp16 12 add fp16 1
+81 gpu batchnorm fp16 12
+82 gpu relu fp16 12
+83 gpu conv fp16 12 add fp16 1
+84 gpu batchnorm fp16 12
+85 gpu conv fp16 12 add fp16 1
+86 gpu batchnorm fp16 12
+87 gpu add fp16 12
+88 gpu relu fp16 12
+89 gpu conv fp16 12 add fp16 1
+90 gpu batchnorm fp16 12
+91 gpu relu fp16 12
+92 gpu conv fp16 12 add fp16 1
+93 gpu batchnorm fp16 12
+94 gpu relu fp16 12
+95 gpu conv fp16 12 add fp16 1
+96 gpu batchnorm fp16 12
+97 gpu add fp16 12
+98 gpu relu fp16 12
+99 gpu conv fp16 12 add fp16 1
+100 gpu batchnorm fp16 12
+101 gpu relu fp16 12
+102 gpu conv fp16 12 add fp16 1
+103 gpu batchnorm fp16 12
+104 gpu relu fp16 12
+105 gpu conv fp16 12 add fp16 1
+106 gpu batchnorm fp16 12
+107 gpu add fp16 12
+108 gpu relu fp16 12
+109 gpu conv fp16 12 add fp16 1
+110 gpu batchnorm fp16 12
+111 gpu relu fp16 12
+112 gpu conv fp16 12 add fp16 1
+113 gpu batchnorm fp16 12
+114 gpu relu fp16 12
+115 gpu conv fp16 12 add fp16 1
+116 gpu batchnorm fp16 12
+117 gpu add fp16 12
+118 gpu relu fp16 12
+119 gpu conv fp16 12 add fp16 1
+120 gpu batchnorm fp16 12
+121 gpu relu fp16 12
+122 gpu conv fp16 12 add fp16 1
+123 gpu batchnorm fp16 12
+124 gpu relu fp16 12
+125 gpu conv fp16 12 add fp16 1
+126 gpu batchnorm fp16 12
+127 gpu add fp16 12
+128 gpu relu fp16 12
+129 gpu conv fp16 12 add fp16 1
+130 gpu batchnorm fp16 12
+131 gpu relu fp16 12
+132 gpu conv fp16 12 add fp16 1
+133 gpu batchnorm fp16 12
+134 gpu relu fp16 12
+135 gpu conv fp16 12 add fp16 1
+136 gpu batchnorm fp16 12
+137 gpu add fp16 12
+138 gpu relu fp16 12
+139 gpu conv fp16 12 add fp16 1
+140 gpu batchnorm fp16 12
+141 gpu relu fp16 12
+142 gpu conv fp16 12 add fp16 1
+143 gpu batchnorm fp16 12
+144 gpu relu fp16 12
+145 gpu conv fp16 12 add fp16 1
+146 gpu batchnorm fp16 12
+147 gpu conv fp16 12 add fp16 1
+148 gpu batchnorm fp16 12
+149 gpu add fp16 12
+150 gpu relu fp16 12
+151 gpu conv fp16 12 add fp16 1
+152 gpu batchnorm fp16 12
+153 gpu relu fp16 12
+154 gpu conv fp16 12 add fp16 1
+155 gpu batchnorm fp16 12
+156 gpu relu fp16 12
+157 gpu conv fp16 12 add fp16 1
+158 gpu batchnorm fp16 12
+159 gpu add fp16 12
+160 gpu relu fp16 12
+161 gpu conv fp16 12 add fp16 1
+162 gpu batchnorm fp16 12
+163 gpu relu fp16 12
+164 gpu conv fp16 12 add fp16 1
+165 gpu batchnorm fp16 12
+166 gpu relu fp16 12
+167 gpu conv fp16 12 add fp16 1
+168 gpu batchnorm fp16 12
+169 gpu add fp16 12
+170 gpu relu fp16 12
+171 gpu pool_max fp16 12
+172 gpu mul fp16 12 add fp16 1
+173 gpu softmax fp32 1
+-----
++++++
+conf4 1.8254789092281507 1.4527803526239977 75.7 0.0
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu batchnorm fp16 12
+3 gpu conv fp16 12 add fp16 1
+4 gpu batchnorm fp16 12
+5 gpu relu fp16 12
+6 gpu conv fp16 12 add fp16 1
+7 gpu batchnorm fp16 12
+8 gpu relu fp16 12
+9 gpu conv fp16 12 add fp16 1
+10 gpu batchnorm fp16 12
+11 gpu conv fp16 12 add fp16 1
+12 gpu batchnorm fp16 12
+13 gpu add fp16 12
+14 gpu relu fp16 12
+15 gpu conv fp16 12 add fp16 1
+16 gpu batchnorm fp16 12
+17 gpu relu fp16 12
+18 gpu conv fp16 12 add fp16 1
+19 gpu batchnorm fp16 12
+20 gpu relu fp16 12
+21 gpu conv fp16 12 add fp16 1
+22 gpu batchnorm fp16 12
+23 gpu add fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12 add fp16 1
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu conv fp16 12 add fp16 1
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv fp16 12 add fp16 1
+32 gpu batchnorm fp16 12
+33 gpu add fp16 12
+34 gpu relu fp16 12
+35 gpu conv fp16 12 add fp16 1
+36 gpu batchnorm fp16 12
+37 gpu relu fp16 12
+38 gpu conv fp16 12 add fp16 1
+39 gpu batchnorm fp16 12
+40 gpu relu fp16 12
+41 gpu conv fp16 12 add fp16 1
+42 gpu batchnorm fp16 12
+43 gpu conv fp16 12 add fp16 1
+44 gpu batchnorm fp16 12
+45 gpu add fp16 12
+46 gpu relu fp16 12
+47 gpu conv fp16 12 add fp16 1
+48 gpu batchnorm fp16 12
+49 gpu relu fp16 12
+50 gpu conv fp16 12 add fp16 1
+51 gpu batchnorm fp16 12
+52 gpu relu fp16 12
+53 gpu conv fp16 12 add fp16 1
+54 gpu batchnorm fp16 12
+55 gpu add fp16 12
+56 gpu relu fp16 12
+57 gpu conv fp16 12 add fp16 1
+58 gpu batchnorm fp16 12
+59 gpu relu fp16 12
+60 gpu conv fp16 12 add fp16 1
+61 gpu batchnorm fp16 12
+62 gpu relu fp16 12
+63 gpu conv fp16 12 add fp16 1
+64 gpu batchnorm fp16 12
+65 gpu add fp16 12
+66 gpu relu fp16 12
+67 gpu conv fp16 12 add fp16 1
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu conv fp16 12 add fp16 1
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv fp16 12 add fp16 1
+74 gpu batchnorm fp16 12
+75 gpu add fp16 12
+76 gpu relu fp16 12
+77 gpu conv fp16 12 add fp16 1
+78 gpu batchnorm fp16 12
+79 gpu relu fp16 12
+80 gpu conv fp16 12 add fp16 1
+81 gpu batchnorm fp16 12
+82 gpu relu fp16 12
+83 gpu conv fp16 12 add fp16 1
+84 gpu batchnorm fp16 12
+85 gpu conv fp16 12 add fp16 1
+86 gpu batchnorm fp16 12
+87 gpu add fp16 12
+88 gpu relu fp16 12
+89 gpu conv fp16 12 add fp16 1
+90 gpu batchnorm fp16 12
+91 gpu relu fp16 12
+92 gpu conv fp16 12 add fp16 1
+93 gpu batchnorm fp16 12
+94 gpu relu fp16 12
+95 gpu conv fp16 12 add fp16 1
+96 gpu batchnorm fp16 12
+97 gpu add fp16 12
+98 gpu relu fp16 12
+99 gpu conv fp16 12 add fp16 1
+100 gpu batchnorm fp16 12
+101 gpu relu fp16 12
+102 gpu conv fp16 12 add fp16 1
+103 gpu batchnorm fp16 12
+104 gpu relu fp16 12
+105 gpu conv fp16 12 add fp16 1
+106 gpu batchnorm fp16 12
+107 gpu add fp16 12
+108 gpu relu fp16 12
+109 gpu conv fp16 12 add fp16 1
+110 gpu batchnorm fp16 12
+111 gpu relu fp16 12
+112 gpu conv fp16 12 add fp16 1
+113 gpu batchnorm fp16 12
+114 gpu relu fp16 12
+115 gpu conv fp16 12 add fp16 1
+116 gpu batchnorm fp16 12
+117 gpu add fp16 12
+118 gpu relu fp16 12
+119 gpu conv fp16 12 add fp16 1
+120 gpu batchnorm fp16 12
+121 gpu relu fp16 12
+122 gpu conv fp16 12 add fp16 1
+123 gpu batchnorm fp16 12
+124 gpu relu fp16 12
+125 gpu conv fp16 12 add fp16 1
+126 gpu batchnorm fp16 12
+127 gpu add fp16 12
+128 gpu relu fp16 12
+129 gpu conv fp16 12 add fp16 1
+130 gpu batchnorm fp16 12
+131 gpu relu fp16 12
+132 gpu conv fp16 12 add fp16 1
+133 gpu batchnorm fp16 12
+134 gpu relu fp16 12
+135 gpu conv fp16 12 add fp16 1
+136 gpu batchnorm fp16 12
+137 gpu add fp16 12
+138 gpu relu fp16 12
+139 gpu conv fp16 12 add fp16 1
+140 gpu batchnorm fp16 12
+141 gpu relu fp16 12
+142 gpu conv fp16 12 add fp16 1
+143 gpu batchnorm fp16 12
+144 gpu relu fp16 12
+145 gpu conv fp16 12 add fp16 1
+146 gpu batchnorm fp16 12
+147 gpu conv fp16 12 add fp16 1
+148 gpu batchnorm fp16 12
+149 gpu add fp16 12
+150 gpu relu fp16 12
+151 gpu conv fp16 12 add fp16 1
+152 gpu batchnorm fp16 12
+153 gpu relu fp16 12
+154 gpu conv fp16 12 add fp16 1
+155 gpu batchnorm fp16 12
+156 gpu relu fp16 12
+157 gpu conv fp16 12 add fp16 1
+158 gpu batchnorm fp16 12
+159 gpu add fp16 12
+160 gpu relu fp16 12
+161 gpu conv fp16 12 add fp16 1
+162 gpu batchnorm fp16 12
+163 gpu relu fp16 12
+164 gpu conv fp16 12 add fp16 1
+165 gpu batchnorm fp16 12
+166 gpu relu fp16 12
+167 gpu conv fp16 12 add fp16 1
+168 gpu batchnorm fp16 12
+169 gpu add fp16 12
+170 gpu relu fp16 12
+171 gpu pool_max fp16 12
+172 gpu mul fp16 12 add fp16 1
+173 gpu softmax fp32 1
+-----
++++++
+conf5 1.8323072136026506 1.457112696128105 74.76 0.9399999999999977
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu batchnorm fp16 12
+3 gpu conv fp16 12 add fp16 1
+4 gpu batchnorm fp16 12
+5 gpu relu fp16 12
+6 gpu conv fp16 12 add fp16 1
+7 gpu batchnorm fp16 12
+8 gpu relu fp16 12
+9 gpu conv fp16 12 add fp16 1
+10 gpu batchnorm fp16 12
+11 gpu conv fp16 12 add fp16 1
+12 gpu batchnorm fp16 12
+13 gpu add fp16 12
+14 gpu relu fp16 12
+15 gpu conv fp16 12 add fp16 1
+16 gpu batchnorm fp16 12
+17 gpu relu fp16 12
+18 gpu conv fp16 12 add fp16 1
+19 gpu batchnorm fp16 12
+20 gpu relu fp16 12
+21 gpu conv fp16 12 add fp16 1
+22 gpu batchnorm fp16 12
+23 gpu add fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12 add fp16 1
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu conv fp16 12 add fp16 1
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv fp16 12 add fp16 1
+32 gpu batchnorm fp16 12
+33 gpu add fp16 12
+34 gpu relu fp16 12
+35 gpu conv fp16 12 add fp16 1
+36 gpu batchnorm fp16 12
+37 gpu relu fp16 12
+38 gpu conv fp16 12 add fp16 1
+39 gpu batchnorm fp16 12
+40 gpu relu fp16 12
+41 gpu conv fp16 12 add fp16 1
+42 gpu batchnorm fp16 12
+43 gpu conv fp16 12 add fp16 1
+44 gpu batchnorm fp16 12
+45 gpu add fp16 12
+46 gpu relu fp16 12
+47 gpu conv fp16 12 add fp16 1
+48 gpu batchnorm fp16 12
+49 gpu relu fp16 12
+50 gpu conv fp16 12 add fp16 1
+51 gpu batchnorm fp16 12
+52 gpu relu fp16 12
+53 gpu conv fp16 12 add fp16 1
+54 gpu batchnorm fp16 12
+55 gpu add fp16 12
+56 gpu relu fp16 12
+57 gpu conv fp16 12 add fp16 1
+58 gpu batchnorm fp16 12
+59 gpu relu fp16 12
+60 gpu conv fp16 12 add fp16 1
+61 gpu batchnorm fp16 12
+62 gpu relu fp16 12
+63 gpu conv fp16 12 add fp16 1
+64 gpu batchnorm fp16 12
+65 gpu add fp16 12
+66 gpu relu fp16 12
+67 gpu conv fp16 12 add fp16 1
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu conv fp16 12 add fp16 1
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv fp16 12 add fp16 1
+74 gpu batchnorm fp16 12
+75 gpu add fp16 12
+76 gpu relu fp16 12
+77 gpu conv fp16 12 add fp16 1
+78 gpu batchnorm fp16 12
+79 gpu relu fp16 12
+80 gpu conv fp16 12 add fp16 1
+81 gpu batchnorm fp16 12
+82 gpu relu fp16 12
+83 gpu conv fp16 12 add fp16 1
+84 gpu batchnorm fp16 12
+85 gpu conv fp16 12 add fp16 1
+86 gpu batchnorm fp16 12
+87 gpu add fp16 12
+88 gpu relu fp16 12
+89 gpu conv fp16 12 add fp16 1
+90 gpu batchnorm fp16 12
+91 gpu relu fp16 12
+92 gpu conv fp16 12 add fp16 1
+93 gpu batchnorm fp16 12
+94 gpu relu fp16 12
+95 gpu conv fp16 12 add fp16 1
+96 gpu batchnorm fp16 12
+97 gpu add fp16 12
+98 gpu relu fp16 12
+99 gpu conv perf_fp16 157 add fp16 1
+100 gpu batchnorm fp16 12
+101 gpu relu fp16 12
+102 gpu conv fp16 12 add fp16 1
+103 gpu batchnorm fp16 12
+104 gpu relu fp16 12
+105 gpu conv fp16 12 add fp16 1
+106 gpu batchnorm fp16 12
+107 gpu add fp16 12
+108 gpu relu fp16 12
+109 gpu conv fp16 12 add fp16 1
+110 gpu batchnorm fp16 12
+111 gpu relu fp16 12
+112 gpu conv fp16 12 add fp16 1
+113 gpu batchnorm fp16 12
+114 gpu relu fp16 12
+115 gpu conv fp16 12 add fp16 1
+116 gpu batchnorm fp16 12
+117 gpu add fp16 12
+118 gpu relu fp16 12
+119 gpu conv fp16 12 add fp16 1
+120 gpu batchnorm fp16 12
+121 gpu relu fp16 12
+122 gpu conv fp16 12 add fp16 1
+123 gpu batchnorm fp16 12
+124 gpu relu fp16 12
+125 gpu conv fp16 12 add fp16 1
+126 gpu batchnorm fp16 12
+127 gpu add fp16 12
+128 gpu relu fp16 12
+129 gpu conv fp16 12 add fp16 1
+130 gpu batchnorm fp16 12
+131 gpu relu fp16 12
+132 gpu conv fp16 12 add fp16 1
+133 gpu batchnorm fp16 12
+134 gpu relu fp16 12
+135 gpu conv fp16 12 add fp16 1
+136 gpu batchnorm fp16 12
+137 gpu add fp16 12
+138 gpu relu fp16 12
+139 gpu conv fp16 12 add fp16 1
+140 gpu batchnorm fp16 12
+141 gpu relu fp16 12
+142 gpu conv fp16 12 add fp16 1
+143 gpu batchnorm fp16 12
+144 gpu relu fp16 12
+145 gpu conv fp16 12 add fp16 1
+146 gpu batchnorm fp16 12
+147 gpu conv fp16 12 add fp16 1
+148 gpu batchnorm fp16 12
+149 gpu add fp16 12
+150 gpu relu fp16 12
+151 gpu conv fp16 12 add fp16 1
+152 gpu batchnorm fp16 12
+153 gpu relu fp16 12
+154 gpu conv fp16 12 add fp16 1
+155 gpu batchnorm fp16 12
+156 gpu relu fp16 12
+157 gpu conv fp16 12 add fp16 1
+158 gpu batchnorm fp16 12
+159 gpu add fp16 12
+160 gpu relu fp16 12
+161 gpu conv fp16 12 add fp16 1
+162 gpu batchnorm fp16 12
+163 gpu relu fp16 12
+164 gpu conv perf_fp16 152 add fp16 1
+165 gpu batchnorm fp16 12
+166 gpu relu fp16 12
+167 gpu conv fp16 12 add fp16 1
+168 gpu batchnorm fp16 12
+169 gpu add fp16 12
+170 gpu relu fp16 12
+171 gpu pool_max fp16 12
+172 gpu mul fp16 12 add fp16 1
+173 gpu softmax fp32 1
+-----
++++++
+conf6 1.8333922701839533 1.4589203187717397 74.53999999999999 1.1600000000000108
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu batchnorm fp16 12
+3 gpu conv fp16 12 add fp16 1
+4 gpu batchnorm fp16 12
+5 gpu relu fp16 12
+6 gpu conv fp16 12 add fp16 1
+7 gpu batchnorm fp16 12
+8 gpu relu fp16 12
+9 gpu conv fp16 12 add fp16 1
+10 gpu batchnorm fp16 12
+11 gpu conv fp16 12 add fp16 1
+12 gpu batchnorm fp16 12
+13 gpu add fp16 12
+14 gpu relu fp16 12
+15 gpu conv fp16 12 add fp16 1
+16 gpu batchnorm fp16 12
+17 gpu relu fp16 12
+18 gpu conv fp16 12 add fp16 1
+19 gpu batchnorm fp16 12
+20 gpu relu fp16 12
+21 gpu conv fp16 12 add fp16 1
+22 gpu batchnorm fp16 12
+23 gpu add fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12 add fp16 1
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu conv fp16 12 add fp16 1
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv fp16 12 add fp16 1
+32 gpu batchnorm fp16 12
+33 gpu add fp16 12
+34 gpu relu fp16 12
+35 gpu conv fp16 12 add fp16 1
+36 gpu batchnorm fp16 12
+37 gpu relu fp16 12
+38 gpu conv fp16 12 add fp16 1
+39 gpu batchnorm fp16 12
+40 gpu relu fp16 12
+41 gpu conv fp16 12 add fp16 1
+42 gpu batchnorm fp16 12
+43 gpu conv fp16 12 add fp16 1
+44 gpu batchnorm fp16 12
+45 gpu add fp16 12
+46 gpu relu fp16 12
+47 gpu conv fp16 12 add fp16 1
+48 gpu batchnorm fp16 12
+49 gpu relu fp16 12
+50 gpu conv fp16 12 add fp16 1
+51 gpu batchnorm fp16 12
+52 gpu relu fp16 12
+53 gpu conv fp16 12 add fp16 1
+54 gpu batchnorm fp16 12
+55 gpu add fp16 12
+56 gpu relu fp16 12
+57 gpu conv fp16 12 add fp16 1
+58 gpu batchnorm fp16 12
+59 gpu relu fp16 12
+60 gpu conv fp16 12 add fp16 1
+61 gpu batchnorm fp16 12
+62 gpu relu fp16 12
+63 gpu conv fp16 12 add fp16 1
+64 gpu batchnorm fp16 12
+65 gpu add fp16 12
+66 gpu relu fp16 12
+67 gpu conv fp16 12 add fp16 1
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu conv fp16 12 add fp16 1
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv fp16 12 add fp16 1
+74 gpu batchnorm fp16 12
+75 gpu add fp16 12
+76 gpu relu fp16 12
+77 gpu conv fp16 12 add fp16 1
+78 gpu batchnorm fp16 12
+79 gpu relu fp16 12
+80 gpu conv fp16 12 add fp16 1
+81 gpu batchnorm fp16 12
+82 gpu relu fp16 12
+83 gpu conv fp16 12 add fp16 1
+84 gpu batchnorm fp16 12
+85 gpu conv fp16 12 add fp16 1
+86 gpu batchnorm fp16 12
+87 gpu add fp16 12
+88 gpu relu fp16 12
+89 gpu conv fp16 12 add fp16 1
+90 gpu batchnorm fp16 12
+91 gpu relu fp16 12
+92 gpu conv fp16 12 add fp16 1
+93 gpu batchnorm fp16 12
+94 gpu relu fp16 12
+95 gpu conv fp16 12 add fp16 1
+96 gpu batchnorm fp16 12
+97 gpu add fp16 12
+98 gpu relu fp16 12
+99 gpu conv perf_fp16 157 add fp16 1
+100 gpu batchnorm fp16 12
+101 gpu relu fp16 12
+102 gpu conv samp_fp16 267 add fp16 1
+103 gpu batchnorm fp16 12
+104 gpu relu fp16 12
+105 gpu conv fp16 12 add fp16 1
+106 gpu batchnorm fp16 12
+107 gpu add fp16 12
+108 gpu relu fp16 12
+109 gpu conv fp16 12 add fp16 1
+110 gpu batchnorm fp16 12
+111 gpu relu fp16 12
+112 gpu conv fp16 12 add fp16 1
+113 gpu batchnorm fp16 12
+114 gpu relu fp16 12
+115 gpu conv fp16 12 add fp16 1
+116 gpu batchnorm fp16 12
+117 gpu add fp16 12
+118 gpu relu fp16 12
+119 gpu conv fp16 12 add fp16 1
+120 gpu batchnorm fp16 12
+121 gpu relu fp16 12
+122 gpu conv fp16 12 add fp16 1
+123 gpu batchnorm fp16 12
+124 gpu relu fp16 12
+125 gpu conv fp16 12 add fp16 1
+126 gpu batchnorm fp16 12
+127 gpu add fp16 12
+128 gpu relu fp16 12
+129 gpu conv fp16 12 add fp16 1
+130 gpu batchnorm fp16 12
+131 gpu relu fp16 12
+132 gpu conv fp16 12 add fp16 1
+133 gpu batchnorm fp16 12
+134 gpu relu fp16 12
+135 gpu conv fp16 12 add fp16 1
+136 gpu batchnorm fp16 12
+137 gpu add fp16 12
+138 gpu relu fp16 12
+139 gpu conv fp16 12 add fp16 1
+140 gpu batchnorm fp16 12
+141 gpu relu fp16 12
+142 gpu conv fp16 12 add fp16 1
+143 gpu batchnorm fp16 12
+144 gpu relu fp16 12
+145 gpu conv fp16 12 add fp16 1
+146 gpu batchnorm fp16 12
+147 gpu conv fp16 12 add fp16 1
+148 gpu batchnorm fp16 12
+149 gpu add fp16 12
+150 gpu relu fp16 12
+151 gpu conv fp16 12 add fp16 1
+152 gpu batchnorm fp16 12
+153 gpu relu fp16 12
+154 gpu conv fp16 12 add fp16 1
+155 gpu batchnorm fp16 12
+156 gpu relu fp16 12
+157 gpu conv fp16 12 add fp16 1
+158 gpu batchnorm fp16 12
+159 gpu add fp16 12
+160 gpu relu fp16 12
+161 gpu conv fp16 12 add fp16 1
+162 gpu batchnorm fp16 12
+163 gpu relu fp16 12
+164 gpu conv perf_fp16 152 add fp16 1
+165 gpu batchnorm fp16 12
+166 gpu relu fp16 12
+167 gpu conv fp16 12 add fp16 1
+168 gpu batchnorm fp16 12
+169 gpu add fp16 12
+170 gpu relu fp16 12
+171 gpu pool_max fp16 12
+172 gpu mul fp16 12 add fp16 1
+173 gpu softmax fp32 1
+-----
diff --git a/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/vgg16_cifar10/vgg16_cifar10.txt b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/vgg16_cifar10/vgg16_cifar10.txt
new file mode 100644
index 0000000000..2b325a9fe2
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/vgg16_cifar10/vgg16_cifar10.txt
@@ -0,0 +1,913 @@
+3776.508929999999
++++++
+conf1 1 1 89.96 0.0
+1 gpu conv fp32 11 add fp32 1 relu fp32 1
+2 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+3 gpu conv fp32 11 add fp32 1 relu fp32 1
+4 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+5 gpu conv fp32 11 add fp32 1 relu fp32 1
+6 gpu conv fp32 11 add fp32 1 relu fp32 1
+7 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+8 gpu conv fp32 11 add fp32 1 relu fp32 1
+9 gpu conv fp32 11 add fp32 1 relu fp32 1
+10 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+11 gpu conv fp32 11 add fp32 1 relu fp32 1
+12 gpu conv fp32 11 add fp32 1 relu fp32 1
+13 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+14 gpu mul fp32 11 add fp32 1 relu fp32 1
+15 gpu mul fp32 11 add fp32 1
+16 gpu softmax fp32 1
+-----
++++++
+conf2 2.1225958306417145 1.9771056444390926 89.91 0.04999999999999716
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 167 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 155 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 164 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 267 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 161 add fp16 1 relu fp16 1
+12 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf3 2.090180991844805 1.9532689756636086 89.82 0.14000000000000057
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 167 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 269 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 155 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 164 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 161 add fp16 1 relu fp16 1
+12 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf4 2.169931036393396 2.0048851858669283 89.53999999999999 0.4200000000000017
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 155 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv perf_fp16 162 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 264 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 269 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf5 2.1012179398201756 1.9325098819632314 89.42 0.539999999999992
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 155 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 160 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 264 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv fp16 11 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 269 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf6 2.2313002482945326 2.069581185407626 89.38000000000001 0.5799999999999841
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 158 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 269 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv fp16 12 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 269 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 264 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf7 2.143061101834193 1.9675759235961738 89.3 0.6599999999999966
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 155 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv fp16 12 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 265 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 264 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 269 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf8 2.199379444387758 2.0314348091429677 89.2 0.7599999999999909
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 155 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 264 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf9 2.3236298452294624 2.156907976575644 89.03999999999999 0.9200000000000017
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 159 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv fp16 11 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 160 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf10 2.3224369486241603 2.1560351277882046 89.03999999999999 0.9200000000000017
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 159 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv fp16 11 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 160 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf11 2.358467412507993 2.1904290636262784 89.02 0.9399999999999977
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 159 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 266 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 264 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 160 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf12 2.3633503986583126 2.1980949050120437 88.88000000000001 1.079999999999984
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 159 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 264 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 160 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf13 2.4903388172036043 2.3063593441573564 88.82 1.1400000000000006
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 160 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf14 2.508156996742662 2.3204109539869595 88.78 1.1799999999999926
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 156 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf15 2.4818531813049622 2.2910866330696744 88.75999999999999 1.2000000000000028
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 263 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 160 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf16 2.4591564896606 2.272664410995804 88.74 1.2199999999999989
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 263 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 160 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf17 2.5370582721089496 2.3464665753522405 88.72 1.2399999999999949
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf18 2.438100014978735 2.257620696759345 88.7 1.259999999999991
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 263 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 160 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf19 2.4776935382337006 2.2949598026093168 88.7 1.259999999999991
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 160 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf20 2.4380041604279596 2.254330054479329 88.68 1.279999999999987
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 157 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 160 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf21 2.4745444350223327 2.2883888475386525 88.64 1.3199999999999932
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 160 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf22 2.4136652022060625 2.2360545757445407 88.52 1.4399999999999977
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv perf_fp16 160 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 160 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf23 2.510093966915115 2.316437144001897 88.52 1.4399999999999977
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 266 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf24 2.475990790728594 2.28127562431577 88.5 1.4599999999999937
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv fp16 11 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf25 2.4761929121466926 2.290365501363375 88.5 1.4599999999999937
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf26 2.4763575559033875 2.291312348847263 88.5 1.4599999999999937
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 160 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf27 2.600249602991055 2.4123747341424644 88.06 1.8999999999999915
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv perf_fp16 165 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf28 2.596077615026303 2.4115375655840245 88.02 1.9399999999999977
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv perf_fp16 166 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf29 2.580888020555937 2.3840829703999833 87.88 2.0799999999999983
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv samp_fp16 269 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf30 2.556352783745439 2.3641413704751537 87.8 2.1599999999999966
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv samp_fp16 269 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf31 2.5559756082494527 2.3677471703724575 87.78 2.1799999999999926
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 11 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf32 2.597413373332546 2.4091972878097585 87.76 2.1999999999999886
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv perf_fp16 164 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf33 2.4797467027434656 2.2874608793842612 87.74 2.219999999999999
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf34 2.593675604602072 2.400513932866452 87.7 2.259999999999991
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv perf_fp16 156 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 264 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf35 2.6300759173431336 2.432687374579977 87.62 2.339999999999989
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 266 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf36 2.5907083037103864 2.4042762580264356 87.6 2.3599999999999994
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv perf_fp16 156 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf37 2.6143261650366187 2.423427684623993 87.6 2.3599999999999994
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv perf_fp16 156 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf38 2.6144436259117203 2.4231961521843344 87.6 2.3599999999999994
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv perf_fp16 156 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf39 2.662088796913144 2.4660859696742032 87.6 2.3599999999999994
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf40 2.6210428708834517 2.423389791646294 87.58 2.3799999999999955
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv perf_fp16 156 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 265 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf41 2.6399924349243533 2.4443864221157914 87.58 2.3799999999999955
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv perf_fp16 156 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf42 2.616443708384916 2.4217582570150697 87.58 2.3799999999999955
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf43 2.6883473596205225 2.5036952786284137 87.5 2.4599999999999937
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv perf_fp16 166 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv perf_fp16 156 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf44 2.6117356623585875 2.420771216556161 87.48 2.4799999999999898
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv perf_fp16 156 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf45 2.6359174040106708 2.444231592562593 87.48 2.4799999999999898
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf46 2.56504192294198 2.371871906722655 87.44 2.519999999999996
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv fp16 11 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf47 2.5652588453899727 2.3816996471861174 87.44 2.519999999999996
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf48 2.68806951500876 2.5007647690311425 87.14 2.819999999999993
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv perf_fp16 166 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv perf_fp16 156 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
diff --git a/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/vgg16_cifar100/vgg16_cifar100.txt b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/vgg16_cifar100/vgg16_cifar100.txt
new file mode 100644
index 0000000000..2c29bedd09
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/vgg16_cifar100/vgg16_cifar100.txt
@@ -0,0 +1,970 @@
+3768.819777999999
++++++
+conf1 1 1 66.5 0.0
+1 gpu conv fp32 11 add fp32 1 relu fp32 1
+2 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+3 gpu conv fp32 11 add fp32 1 relu fp32 1
+4 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+5 gpu conv fp32 11 add fp32 1 relu fp32 1
+6 gpu conv fp32 11 add fp32 1 relu fp32 1
+7 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+8 gpu conv fp32 11 add fp32 1 relu fp32 1
+9 gpu conv fp32 11 add fp32 1 relu fp32 1
+10 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+11 gpu conv fp32 11 add fp32 1 relu fp32 1
+12 gpu conv fp32 11 add fp32 1 relu fp32 1
+13 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+14 gpu mul fp32 11 add fp32 1 relu fp32 1
+15 gpu mul fp32 11 add fp32 1
+16 gpu softmax fp32 1
+-----
++++++
+conf2 2.2877724452131787 2.08025704453875 66.45 0.04999999999999716
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 266 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 269 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 268 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 162 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 266 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf3 2.5314658805383816 2.30737681453141 66.45 0.04999999999999716
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf4 2.044123178914057 1.8616966918258782 66.32000000000001 0.1799999999999926
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 168 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 268 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 11 add fp16 1 relu fp16 1
+13 gpu conv fp16 11 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf5 2.231179358259141 2.0317825813373864 66.18 0.3199999999999932
+1 gpu conv fp16 11 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 161 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv fp16 12 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 269 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 265 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf6 2.2474834421641057 2.0338639876373272 65.88000000000001 0.6199999999999903
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 266 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 268 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 267 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 265 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 267 add fp16 1 relu fp16 1
+13 gpu conv fp16 11 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf7 2.22281439516094 2.0205460706906377 65.88000000000001 0.6199999999999903
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 155 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 268 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 161 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 167 add fp16 1 relu fp16 1
+12 gpu conv perf_fp16 161 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf8 2.1625085012968484 1.94560449637282 65.88000000000001 0.6199999999999903
+1 gpu conv fp16 11 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 266 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv fp16 11 add fp16 1 relu fp16 1
+10 gpu conv fp16 11 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 264 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 263 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf9 2.639337323402163 2.3960416499256825 65.8 0.7000000000000028
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 269 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf10 2.672718090670276 2.4276905528801507 65.68 0.8199999999999932
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf11 2.699089631751789 2.446114054498494 65.68 0.8199999999999932
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf12 2.6003752638648767 2.3553067802112344 65.64 0.8599999999999994
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv fp16 11 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf13 2.638763904718665 2.395072565223988 65.64 0.8599999999999994
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 268 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf14 2.6003752638648767 2.3553067802112344 65.64 0.8599999999999994
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv fp16 11 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf15 2.6003752638648767 2.3553067802112344 65.64 0.8599999999999994
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv fp16 11 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf16 2.6732183804279006 2.4287517162140326 65.62 0.8799999999999955
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf17 2.6728394017929027 2.428768169588016 65.60000000000001 0.8999999999999915
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf18 2.4549989178389238 2.2406620346549433 65.56 0.9399999999999977
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 156 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 266 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf19 2.673556689244081 2.429092581627209 65.52 0.980000000000004
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf20 2.6525635304451756 2.406830663552284 65.5 1.0
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 263 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf21 2.6692288605087553 2.423462800937785 65.5 1.0
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf22 2.583650505571873 2.3471533059252194 65.48 1.019999999999996
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 160 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 263 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf23 2.6474572655420125 2.400471260394867 65.48 1.019999999999996
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 265 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf24 2.4710116424304736 2.2555966923178996 65.46 1.0400000000000063
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 161 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 266 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf25 2.557911102074785 2.3292661683311526 65.46 1.0400000000000063
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 156 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf26 2.6032957018479532 2.367574146141511 65.44 1.0600000000000023
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 163 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf27 2.6029968728098916 2.3672068592437223 65.44 1.0600000000000023
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 164 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf28 2.602540311129756 2.3691028781436954 65.44 1.0600000000000023
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 167 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf29 2.602756708588441 2.3708111025211718 65.44 1.0600000000000023
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 168 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf30 2.603240857443844 2.3662875785790183 65.44 1.0600000000000023
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 157 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf31 2.602882717372841 2.368011704225619 65.44 1.0600000000000023
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 160 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf32 2.67999343314603 2.4305182001043826 65.4 1.0999999999999943
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf33 2.670314990364046 2.4275308713267485 65.38000000000001 1.1199999999999903
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf34 2.650982630033638 2.405821467700663 65.36 1.1400000000000006
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 263 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf35 2.6507266317871756 2.405938171802741 65.36 1.1400000000000006
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 265 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf36 2.6523068534836174 2.406695716686769 65.34 1.1599999999999966
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 264 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf37 2.6533198495191073 2.4077689394073865 65.34 1.1599999999999966
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 264 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf38 2.64630900155657 2.4073892305914986 65.32 1.1800000000000068
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf39 2.6725522534379413 2.42903505877629 65.32 1.1800000000000068
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf40 2.6435249267602225 2.403536258709464 65.3 1.2000000000000028
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 161 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf41 2.6442059720503557 2.4037376163252024 65.3 1.2000000000000028
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf42 2.6536933126724027 2.4077527693156053 65.3 1.2000000000000028
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 264 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf43 2.6442798101298948 2.4056031584129225 65.3 1.2000000000000028
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf44 2.603921271336049 2.3665955131107683 65.28 1.2199999999999989
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 157 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf45 2.4967248028856828 2.2748997625822716 65.25999999999999 1.240000000000009
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 266 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 157 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf46 2.4963953691980665 2.2764932409573166 65.25999999999999 1.240000000000009
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 266 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 160 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf47 2.678944927989822 2.4251978482969956 65.24 1.2600000000000051
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 264 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf48 2.6727135417173904 2.428897140422096 65.22 1.2800000000000011
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf49 2.600256135586627 2.355428067042657 65.16 1.3400000000000034
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+12 gpu conv fp16 11 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf50 2.264460006128871 2.058037581586567 64.9 1.5999999999999943
+1 gpu conv fp16 11 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 155 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv perf_fp16 165 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv fp16 12 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 269 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 164 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 263 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 265 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf51 2.2817447204106736 2.0758846029697513 64.84 1.6599999999999966
+1 gpu conv fp16 11 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 155 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv perf_fp16 165 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv fp16 12 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 267 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 160 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 265 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
diff --git a/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/vgg16_imagenet/vgg16_imagenet.txt b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/vgg16_imagenet/vgg16_imagenet.txt
new file mode 100644
index 0000000000..108a101c81
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/dev_time/vgg16_imagenet/vgg16_imagenet.txt
@@ -0,0 +1,561 @@
+19194.623482
++++++
+conf1 1 1 72.84 0.0
+1 gpu conv fp32 11 add fp32 1 relu fp32 1
+2 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+3 gpu conv fp32 11 add fp32 1 relu fp32 1
+4 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+5 gpu conv fp32 11 add fp32 1 relu fp32 1
+6 gpu conv fp32 11 add fp32 1 relu fp32 1
+7 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+8 gpu conv fp32 11 add fp32 1 relu fp32 1
+9 gpu conv fp32 11 add fp32 1 relu fp32 1
+10 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+11 gpu conv fp32 11 add fp32 1 relu fp32 1
+12 gpu conv fp32 11 add fp32 1 relu fp32 1
+13 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+14 gpu mul fp32 11 add fp32 1 relu fp32 1
+15 gpu mul fp32 11 add fp32 1 relu fp32 1
+16 gpu mul fp32 11 add fp32 1
+17 gpu softmax fp32 1
+-----
++++++
+conf2 2.0787477568568082 1.7725701909562666 72.76 0.0799999999999983
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 267 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv fp16 12 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv fp16 12 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf3 2.2877881266029436 1.9268677640464096 72.04 0.7999999999999972
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv fp16 12 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf4 2.493698381711785 2.0336802939709626 72.02 0.8200000000000074
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 162 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv fp16 12 add fp16 1 relu fp16 1
+9 gpu conv fp16 12 add fp16 1 relu fp16 1
+10 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf5 2.164723960411776 1.8442442134020163 71.94 0.9000000000000057
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 267 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf6 2.53794461743687 2.069640641367895 71.67999999999999 1.1600000000000108
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 162 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv fp16 12 add fp16 1 relu fp16 1
+9 gpu conv fp16 12 add fp16 1 relu fp16 1
+10 gpu conv perf_fp16 156 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf7 1.7943268128686711 1.6103705347377417 71.58 1.2600000000000051
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv fp16 12 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv fp16 12 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 266 add fp16 1 relu fp16 1
+10 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv fp16 11 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf8 1.8143284638396158 1.6288620764171362 71.5 1.3400000000000034
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv fp16 12 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv fp16 12 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 266 add fp16 1 relu fp16 1
+10 gpu conv perf_fp16 162 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv fp16 11 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf9 2.5462742331906263 2.076061630349781 71.48 1.3599999999999994
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 167 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv fp16 12 add fp16 1 relu fp16 1
+9 gpu conv fp16 12 add fp16 1 relu fp16 1
+10 gpu conv perf_fp16 156 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf10 2.526515422129153 2.063839193109964 71.39999999999999 1.440000000000012
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 162 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+9 gpu conv fp16 12 add fp16 1 relu fp16 1
+10 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv fp16 11 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf11 2.1596661517243856 1.8351710968407349 71.34 1.5
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 267 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 268 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv perf_fp16 156 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf12 2.3444383477958337 1.981259839350623 71.22 1.6200000000000045
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+9 gpu conv fp16 12 add fp16 1 relu fp16 1
+10 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf13 1.8402020049200172 1.652343405000522 71.2 1.6400000000000006
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv fp16 12 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 266 add fp16 1 relu fp16 1
+10 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+13 gpu conv fp16 11 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf14 2.6420417968257306 2.167425635999969 71.12 1.7199999999999989
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 167 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+9 gpu conv fp16 12 add fp16 1 relu fp16 1
+10 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 155 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf15 2.543198098440602 2.0805826545876145 71.1 1.740000000000009
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 162 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+9 gpu conv fp16 12 add fp16 1 relu fp16 1
+10 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf16 2.6224991911009328 2.1476958232678807 70.89999999999999 1.940000000000012
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 167 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+9 gpu conv fp16 12 add fp16 1 relu fp16 1
+10 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf17 2.5978010917593752 2.131515210392801 70.8 2.0400000000000063
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 162 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+9 gpu conv fp16 12 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf18 2.623210258119482 2.156636511928761 70.76 2.0799999999999983
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 160 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv fp16 12 add fp16 1 relu fp16 1
+9 gpu conv fp16 12 add fp16 1 relu fp16 1
+10 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf19 2.598187894495609 2.1322228990374104 70.76 2.0799999999999983
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 162 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+9 gpu conv fp16 12 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 160 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf20 2.640464221374653 2.1682626030871295 70.76 2.0799999999999983
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 167 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+9 gpu conv fp16 12 add fp16 1 relu fp16 1
+10 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf21 2.659563405662692 2.1881035849678936 70.54 2.299999999999997
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 160 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+9 gpu conv fp16 12 add fp16 1 relu fp16 1
+10 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf22 2.636584103560761 2.1652496021557557 70.39999999999999 2.440000000000012
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 165 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+9 gpu conv fp16 12 add fp16 1 relu fp16 1
+10 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf23 2.6315080449303547 2.161259580137757 70.38 2.460000000000008
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 162 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+9 gpu conv fp16 12 add fp16 1 relu fp16 1
+10 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf24 2.7367939789033153 2.263326406058847 70.34 2.5
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 160 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf25 2.712182817327382 2.2404693918737233 70.24000000000001 2.5999999999999943
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 168 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf26 2.660510795888948 2.187299344706456 70.22 2.6200000000000045
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 159 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+9 gpu conv fp16 12 add fp16 1 relu fp16 1
+10 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf27 2.457573203839654 2.0936930776435383 70.1 2.740000000000009
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv fp16 12 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
++++++
+conf28 2.7452293174567757 2.2593302388139347 69.92 2.9200000000000017
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 159 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 266 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv fp16 12 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+10 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
diff --git a/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/alexnet2_cifar10/alexnet2_cifar10.txt b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/alexnet2_cifar10/alexnet2_cifar10.txt
new file mode 100644
index 0000000000..208f154e02
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/alexnet2_cifar10/alexnet2_cifar10.txt
@@ -0,0 +1,23 @@
+1114.3009809999999
++++++
+conf1 1 1 84.76 0.0
+1 gpu conv fp32 11 add fp32 1 tanh fp32 1
+2 gpu conv fp32 11 add fp32 1 tanh fp32 1 pool_max fp32 1
+3 gpu conv fp32 11 add fp32 1 tanh fp32 1
+4 gpu conv fp32 11 add fp32 1 tanh fp32 1 pool_max fp32 1
+5 gpu conv fp32 11 add fp32 1 tanh fp32 1
+6 gpu conv fp32 11 add fp32 1 tanh fp32 1 pool_max fp32 1
+7 gpu mul fp32 11 add fp32 1
+8 gpu softmax fp32 1
+-----
++++++
+conf2 1.678391931801309 1.4393008204786808 84.76 0.0
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1
+2 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1
+6 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+7 gpu mul fp16 12 add fp16 1
+8 gpu softmax fp32 1
+-----
diff --git a/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/alexnet_cifar10/alexnet_cifar10.txt b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/alexnet_cifar10/alexnet_cifar10.txt
new file mode 100644
index 0000000000..eba22e3f01
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/alexnet_cifar10/alexnet_cifar10.txt
@@ -0,0 +1,421 @@
+2592.187221
++++++
+conf1 1 1 78.78 0.0
+1 gpu conv fp32 11 add fp32 1 tanh fp32 1 pool_max fp32 1
+2 gpu conv fp32 11 add fp32 1 tanh fp32 1 pool_max fp32 1
+3 gpu conv fp32 11 add fp32 1 tanh fp32 1
+4 gpu conv fp32 11 add fp32 1 tanh fp32 1
+5 gpu conv fp32 11 add fp32 1 tanh fp32 1 pool_max fp32 1
+6 gpu mul fp32 11 add fp32 1
+7 gpu softmax fp32 1
+-----
++++++
+conf2 1.7593976485873195 1.6193399031642917 78.78 0.0
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf3 2.081712090729918 1.9102226906341664 78.53999999999999 0.2400000000000091
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf4 2.081712090729918 1.9102226906341664 78.53999999999999 0.2400000000000091
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf5 2.2627828537139263 2.065683616898884 78.34 0.4399999999999977
+1 gpu conv samp_fp16 263 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf6 2.3527290658539215 2.145832257234814 78.10000000000001 0.6799999999999926
+1 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf7 2.3527290658539215 2.145832257234814 78.10000000000001 0.6799999999999926
+1 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf8 2.3527290658539215 2.145832257234814 78.10000000000001 0.6799999999999926
+1 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf9 2.2247938983110425 2.060416584958474 77.98 0.7999999999999972
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf10 2.2247938983110425 2.060416584958474 77.98 0.7999999999999972
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf11 2.4370818494175888 2.250857540113024 77.98 0.7999999999999972
+1 gpu conv samp_fp16 263 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf12 2.432854949808342 2.2424500615508003 77.9 0.8799999999999955
+1 gpu conv samp_fp16 263 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf13 2.432854949808342 2.2424500615508003 77.9 0.8799999999999955
+1 gpu conv samp_fp16 263 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf14 2.432854949808342 2.2424500615508003 77.9 0.8799999999999955
+1 gpu conv samp_fp16 263 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf15 2.228328207535687 2.0675123320068267 77.82 0.960000000000008
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf16 2.228328207535687 2.0675123320068267 77.82 0.960000000000008
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf17 2.3417491169395532 2.1355030360671465 77.78 1.0
+1 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf18 2.3417491169395532 2.1355030360671465 77.78 1.0
+1 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf19 2.3417491169395532 2.1355030360671465 77.78 1.0
+1 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv fp16 12 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf20 2.5243776633638846 2.324968713897418 77.78 1.0
+1 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf21 2.5243776633638846 2.324968713897418 77.78 1.0
+1 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf22 2.5243776633638846 2.324968713897418 77.78 1.0
+1 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf23 2.5371416718362823 2.3372173527293847 77.56 1.2199999999999989
+1 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf24 2.5371416718362823 2.3372173527293847 77.56 1.2199999999999989
+1 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf25 2.472472828611022 2.286262888143739 77.48 1.2999999999999972
+1 gpu conv samp_fp16 263 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf26 2.574475112841438 2.3637004022727544 77.4 1.3799999999999955
+1 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 267 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf27 2.1200397577541747 1.951741010849448 77.3 1.480000000000004
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 266 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf28 2.1200397577541747 1.951741010849448 77.3 1.480000000000004
+1 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 266 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf29 2.5289288699015304 2.334007588396142 77.2 1.5799999999999983
+1 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf30 2.5289288699015304 2.334007588396142 77.2 1.5799999999999983
+1 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf31 2.5289288699015304 2.334007588396142 77.2 1.5799999999999983
+1 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf32 2.541739061163583 2.3463519042470864 77.18 1.5999999999999943
+1 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf33 2.541739061163583 2.3463519042470864 77.18 1.5999999999999943
+1 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf34 2.580258965052788 2.3848508703934153 76.96 1.8200000000000074
+1 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf35 2.580258965052788 2.3848508703934153 76.96 1.8200000000000074
+1 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf36 2.4768386387310675 2.295002745725082 76.94 1.8400000000000034
+1 gpu conv samp_fp16 263 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1
+5 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf37 2.5713008246729716 2.3684101116633007 76.94 1.8400000000000034
+1 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 269 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf38 2.5713008246729716 2.3684101116633007 76.94 1.8400000000000034
+1 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 269 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf39 2.5670585645212847 2.3720992406158463 76.92 1.8599999999999994
+1 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf40 2.5670585645212847 2.3720992406158463 76.92 1.8599999999999994
+1 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1
+5 gpu conv samp_fp16 268 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf41 2.5760229577267673 2.3777906009584133 76.9 1.8799999999999955
+1 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 269 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
++++++
+conf42 2.5760229577267673 2.3777906009584133 76.9 1.8799999999999955
+1 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 tanh fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 269 add fp16 1 tanh fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 tanh fp16 1
+5 gpu conv fp16 12 add fp16 1 tanh fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1
+7 gpu softmax fp32 1
+-----
diff --git a/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/alexnet_imagenet/alexnet_imagenet.txt b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/alexnet_imagenet/alexnet_imagenet.txt
new file mode 100644
index 0000000000..8ae986b90c
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/alexnet_imagenet/alexnet_imagenet.txt
@@ -0,0 +1,289 @@
+2739.950736
++++++
+conf1 1 1 56.3 0.0
+1 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+2 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+3 gpu conv fp32 11 add fp32 1 relu fp32 1
+4 gpu conv fp32 11 add fp32 1 relu fp32 1
+5 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+6 gpu mul fp32 11 add fp32 1 relu fp32 1
+7 gpu mul fp32 11 add fp32 1 relu fp32 1
+8 gpu mul fp32 11 add fp32 1
+9 gpu softmax fp32 1
+-----
++++++
+conf2 1.802133644103582 1.8186433204507424 55.76 0.5399999999999991
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf3 2.0227701930718065 2.043112495268932 55.42 0.8799999999999955
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 166 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf4 1.8063132288735129 1.8239088223620996 54.96 1.3399999999999963
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 163 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf5 1.8063132288735129 1.8239088223620996 54.96 1.3399999999999963
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 163 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf6 1.8063132288735129 1.8239088223620996 54.96 1.3399999999999963
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 163 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf7 2.085011755614172 2.122606306624671 54.92 1.3799999999999955
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 159 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf8 2.085011755614172 2.122606306624671 54.92 1.3799999999999955
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 159 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf9 1.8052659214923805 1.8217111622759978 54.82 1.4799999999999969
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 164 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf10 2.0146435217865446 2.0367475358800102 54.58 1.7199999999999989
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 162 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf11 1.9101312060368951 1.9552389688678584 54.24 2.059999999999995
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 157 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf12 1.9101312060368951 1.9552389688678584 54.24 2.059999999999995
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 157 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf13 1.9101312060368951 1.9552389688678584 54.24 2.059999999999995
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 157 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf14 2.019868378233057 2.0433540129730265 54.17999999999999 2.1200000000000045
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 162 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 163 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf15 2.019868378233057 2.0433540129730265 54.17999999999999 2.1200000000000045
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 162 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 163 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf16 2.028037341700216 2.049760395549724 53.98 2.3200000000000003
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 166 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 163 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf17 2.028037341700216 2.049760395549724 53.98 2.3200000000000003
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 166 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 163 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf18 2.028037341700216 2.049760395549724 53.98 2.3200000000000003
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 166 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 163 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf19 1.8052659214923805 1.8217111622759978 53.879999999999995 2.4200000000000017
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 11 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 164 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf20 1.8052659214923805 1.8217111622759978 53.879999999999995 2.4200000000000017
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 11 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 164 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf21 2.0267172350289036 2.046985186681549 53.86 2.4399999999999977
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 166 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 164 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf22 2.0267172350289036 2.046985186681549 53.86 2.4399999999999977
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 166 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 164 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf23 2.0267172350289036 2.046985186681549 53.86 2.4399999999999977
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 166 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 164 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
++++++
+conf24 2.0185588815268836 2.0405961127674277 53.559999999999995 2.740000000000002
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu conv perf_fp16 162 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv perf_fp16 164 add fp16 1 relu fp16 1
+5 gpu conv perf_fp16 157 add fp16 1 relu fp16 1 pool_max fp16 1
+6 gpu mul fp16 12 add fp16 1 relu fp16 1
+7 gpu mul fp16 12 add fp16 1 relu fp16 1
+8 gpu mul fp16 12 add fp16 1
+9 gpu softmax fp32 1
+-----
diff --git a/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/lenet_keras/lenet_keras.txt b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/lenet_keras/lenet_keras.txt
new file mode 100644
index 0000000000..da88f7cd26
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/lenet_keras/lenet_keras.txt
@@ -0,0 +1,409 @@
+282.5141369999999
++++++
+conf1 1 1 98.7 0.0
+1 gpu conv fp32 11 add fp32 1 pool_max fp32 1 tanh fp32 1
+2 gpu conv fp32 11 add fp32 1 pool_max fp32 1 tanh fp32 1
+3 gpu mul fp32 11 add fp32 1 tanh fp32 1
+4 gpu mul fp32 11 add fp32 1 tanh fp32 1
+5 gpu softmax fp32 1
+-----
++++++
+conf2 1.9343699741206566 2.1183040240042 98.68 0.01999999999999602
+1 gpu conv samp_fp16 264 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 265 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf3 1.9343699741206566 2.1183040240042 98.68 0.01999999999999602
+1 gpu conv samp_fp16 264 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 265 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf4 1.8936889628815377 2.139779619692146 98.68 0.01999999999999602
+1 gpu conv perf_fp16 152 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf5 1.8936889628815377 2.139779619692146 98.68 0.01999999999999602
+1 gpu conv perf_fp16 152 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf6 1.6415764141643088 1.8012120076077847 98.66 0.04000000000000625
+1 gpu conv fp16 11 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 265 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf7 1.9358279784215788 2.1233340385374495 98.66 0.04000000000000625
+1 gpu conv samp_fp16 264 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 264 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf8 1.9358279784215788 2.1233340385374495 98.66 0.04000000000000625
+1 gpu conv samp_fp16 264 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 264 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf9 1.6319327047042609 1.8046853367113418 98.64 0.060000000000002274
+1 gpu conv fp16 12 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 263 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf10 1.6319327047042609 1.8046853367113418 98.64 0.060000000000002274
+1 gpu conv fp16 12 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 263 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf11 1.6319327047042609 1.8046853367113418 98.64 0.060000000000002274
+1 gpu conv fp16 12 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 263 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf12 1.6319327047042609 1.8046853367113418 98.64 0.060000000000002274
+1 gpu conv fp16 11 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 263 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf13 1.6319327047042609 1.8046853367113418 98.64 0.060000000000002274
+1 gpu conv fp16 11 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 263 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf14 1.5602284338468988 1.7102497386784767 98.61999999999999 0.0800000000000125
+1 gpu conv fp16 12 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 267 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf15 1.5602284338468988 1.7102497386784767 98.61999999999999 0.0800000000000125
+1 gpu conv fp16 12 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 267 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf16 1.5602284338468988 1.7102497386784767 98.61999999999999 0.0800000000000125
+1 gpu conv fp16 12 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 267 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf17 1.8224050632690918 1.9936046569348063 98.61999999999999 0.0800000000000125
+1 gpu conv samp_fp16 264 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 267 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf18 1.8224050632690918 1.9936046569348063 98.61999999999999 0.0800000000000125
+1 gpu conv samp_fp16 264 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 267 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf19 1.8224050632690918 1.9936046569348063 98.61999999999999 0.0800000000000125
+1 gpu conv samp_fp16 264 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 267 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf20 2.2168527051833635 2.453341076720038 98.61999999999999 0.0800000000000125
+1 gpu conv samp_fp16 264 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf21 2.2168527051833635 2.453341076720038 98.61999999999999 0.0800000000000125
+1 gpu conv samp_fp16 264 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf22 1.9040998718547615 2.1501783570812565 98.61999999999999 0.0800000000000125
+1 gpu conv perf_fp16 151 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf23 1.9040998718547615 2.1501783570812565 98.61999999999999 0.0800000000000125
+1 gpu conv perf_fp16 151 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf24 1.5630416487818 1.7451546885860074 98.6 0.10000000000000853
+1 gpu conv perf_fp16 156 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 267 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf25 1.5630416487818 1.7451546885860074 98.6 0.10000000000000853
+1 gpu conv perf_fp16 156 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 267 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf26 1.8406161850501603 2.037849502542524 98.6 0.10000000000000853
+1 gpu conv fp16 12 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf27 1.8406161850501603 2.037849502542524 98.6 0.10000000000000853
+1 gpu conv fp16 12 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf28 1.8406161850501603 2.037849502542524 98.6 0.10000000000000853
+1 gpu conv fp16 11 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf29 1.8406161850501603 2.037849502542524 98.6 0.10000000000000853
+1 gpu conv fp16 11 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf30 2.1941568976363475 2.4445764373737644 98.6 0.10000000000000853
+1 gpu conv samp_fp16 269 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf31 2.1941568976363475 2.4445764373737644 98.6 0.10000000000000853
+1 gpu conv samp_fp16 269 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf32 1.5602284338468988 1.7102497386784767 98.58 0.12000000000000455
+1 gpu conv fp16 11 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 267 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf33 1.5602284338468988 1.7102497386784767 98.58 0.12000000000000455
+1 gpu conv fp16 11 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 267 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf34 1.5602284338468988 1.7102497386784767 98.58 0.12000000000000455
+1 gpu conv fp16 11 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 267 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf35 1.9209933607603906 2.123109543083542 98.58 0.12000000000000455
+1 gpu conv samp_fp16 264 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 263 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf36 1.9209933607603906 2.123109543083542 98.58 0.12000000000000455
+1 gpu conv samp_fp16 264 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 263 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf37 1.9209933607603906 2.123109543083542 98.58 0.12000000000000455
+1 gpu conv samp_fp16 264 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 263 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf38 1.8406161850501603 2.037849502542524 98.58 0.12000000000000455
+1 gpu conv fp16 11 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf39 1.8406161850501603 2.037849502542524 98.58 0.12000000000000455
+1 gpu conv fp16 11 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf40 1.8445326456180258 2.087601822059355 98.58 0.12000000000000455
+1 gpu conv perf_fp16 156 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf41 1.8445326456180258 2.087601822059355 98.58 0.12000000000000455
+1 gpu conv perf_fp16 156 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf42 1.8649226857257986 2.1076025277601325 98.56 0.14000000000000057
+1 gpu conv perf_fp16 168 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf43 1.8649226857257986 2.1076025277601325 98.56 0.14000000000000057
+1 gpu conv perf_fp16 168 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf44 1.8463058650555446 2.067271423078985 98.56 0.14000000000000057
+1 gpu conv perf_fp16 157 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf45 1.8463058650555446 2.067271423078985 98.56 0.14000000000000057
+1 gpu conv perf_fp16 157 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf46 1.9234076467497994 2.1864740913112275 98.56 0.14000000000000057
+1 gpu conv perf_fp16 153 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf47 1.9234076467497994 2.1864740913112275 98.56 0.14000000000000057
+1 gpu conv perf_fp16 153 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 262 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf48 1.6319327047042609 1.8046853367113418 98.54 0.1599999999999966
+1 gpu conv fp16 11 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 263 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf49 1.6350106933897723 1.8435952834193967 98.52 0.18000000000000682
+1 gpu conv perf_fp16 156 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 263 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf50 1.6350106933897723 1.8435952834193967 98.52 0.18000000000000682
+1 gpu conv perf_fp16 156 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 263 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
++++++
+conf51 1.6510114896409525 1.8591762752048948 98.48 0.21999999999999886
+1 gpu conv perf_fp16 168 add fp16 1 pool_max fp16 1 tanh fp16 1
+2 gpu conv samp_fp16 263 add fp16 1 pool_max fp16 1 tanh fp16 1
+3 gpu mul fp16 12 add fp16 1 tanh fp16 1
+4 gpu mul fp16 12 add fp16 1 tanh fp16 1
+5 gpu softmax fp32 1
+-----
diff --git a/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/mobilenet_cifar10/mobilenet_cifar10.txt b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/mobilenet_cifar10/mobilenet_cifar10.txt
new file mode 100644
index 0000000000..93ca37c00a
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/mobilenet_cifar10/mobilenet_cifar10.txt
@@ -0,0 +1,871 @@
+4077.307063200001
++++++
+conf1 1 1 84.42 0.0
+1 gpu conv fp32 11
+2 gpu batchnorm fp32 11
+3 gpu relu fp32 11
+4 gpu group_conv fp32 11
+5 gpu batchnorm fp32 11
+6 gpu relu fp32 11
+7 gpu conv fp32 11
+8 gpu batchnorm fp32 11
+9 gpu relu fp32 11
+10 gpu group_conv fp32 11
+11 gpu batchnorm fp32 11
+12 gpu relu fp32 11
+13 gpu conv fp32 11
+14 gpu batchnorm fp32 11
+15 gpu relu fp32 11
+16 gpu group_conv fp32 11
+17 gpu batchnorm fp32 11
+18 gpu relu fp32 11
+19 gpu conv fp32 11
+20 gpu batchnorm fp32 11
+21 gpu relu fp32 11
+22 gpu group_conv fp32 11
+23 gpu batchnorm fp32 11
+24 gpu relu fp32 11
+25 gpu conv fp32 11
+26 gpu batchnorm fp32 11
+27 gpu relu fp32 11
+28 gpu group_conv fp32 11
+29 gpu batchnorm fp32 11
+30 gpu relu fp32 11
+31 gpu conv fp32 11
+32 gpu batchnorm fp32 11
+33 gpu relu fp32 11
+34 gpu group_conv fp32 11
+35 gpu batchnorm fp32 11
+36 gpu relu fp32 11
+37 gpu conv fp32 11
+38 gpu batchnorm fp32 11
+39 gpu relu fp32 11
+40 gpu group_conv fp32 11
+41 gpu batchnorm fp32 11
+42 gpu relu fp32 11
+43 gpu conv fp32 11
+44 gpu batchnorm fp32 11
+45 gpu relu fp32 11
+46 gpu group_conv fp32 11
+47 gpu batchnorm fp32 11
+48 gpu relu fp32 11
+49 gpu conv fp32 11
+50 gpu batchnorm fp32 11
+51 gpu relu fp32 11
+52 gpu group_conv fp32 11
+53 gpu batchnorm fp32 11
+54 gpu relu fp32 11
+55 gpu conv fp32 11
+56 gpu batchnorm fp32 11
+57 gpu relu fp32 11
+58 gpu group_conv fp32 11
+59 gpu batchnorm fp32 11
+60 gpu relu fp32 11
+61 gpu conv fp32 11
+62 gpu batchnorm fp32 11
+63 gpu relu fp32 11
+64 gpu group_conv fp32 11
+65 gpu batchnorm fp32 11
+66 gpu relu fp32 11
+67 gpu conv fp32 11
+68 gpu batchnorm fp32 11
+69 gpu relu fp32 11
+70 gpu group_conv fp32 11
+71 gpu batchnorm fp32 11
+72 gpu relu fp32 11
+73 gpu conv fp32 11
+74 gpu batchnorm fp32 11
+75 gpu relu fp32 11
+76 gpu group_conv fp32 11
+77 gpu batchnorm fp32 11
+78 gpu relu fp32 11
+79 gpu conv fp32 11
+80 gpu batchnorm fp32 11
+81 gpu relu fp32 11
+82 gpu pool_mean fp32 11
+83 gpu mul fp32 11 add fp32 1
+84 gpu softmax fp32 1
+-----
++++++
+conf2 1.504059255565631 1.4598468219902432 81.86 2.5600000000000023
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 161
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 152
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv perf_fp16 161
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 152
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 152
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 152
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf3 1.5040783418076804 1.459845395800413 81.86 2.5600000000000023
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 161
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 152
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv perf_fp16 161
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 152
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 152
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 151
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 152
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf4 1.5042737817275433 1.4598464522370567 81.74 2.680000000000007
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 161
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 152
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv perf_fp16 161
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 155
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 152
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 152
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 152
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf5 1.5042737817275433 1.4598464522370567 81.74 2.680000000000007
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 161
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 152
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv perf_fp16 161
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 155
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 152
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 152
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 152
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf6 1.5070383438802568 1.463241585164149 81.69999999999999 2.720000000000013
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 168
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 152
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv perf_fp16 161
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 152
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 151
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 152
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 152
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf7 1.5070575058058588 1.463240152333617 81.58 2.8400000000000034
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 168
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 152
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv perf_fp16 161
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 152
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 152
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 151
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 152
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf8 1.5039678813445672 1.4598454486222088 81.56 2.8599999999999994
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 161
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 152
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv perf_fp16 161
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 152
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 152
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 151
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 152
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 153
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf9 1.5038655354281372 1.4599130636549171 81.46 2.960000000000008
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv perf_fp16 161
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv perf_fp16 152
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv perf_fp16 161
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv perf_fp16 155
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv perf_fp16 152
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv perf_fp16 152
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv perf_fp16 151
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv perf_fp16 151
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv perf_fp16 153
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
++++++
+conf10 1.4785375660713596 1.4280520288797043 84.42 0.0
+1 gpu conv fp16 12
+2 gpu batchnorm fp16 12
+3 gpu relu fp16 12
+4 gpu group_conv fp16 12
+5 gpu batchnorm fp16 12
+6 gpu relu fp16 12
+7 gpu conv fp16 12
+8 gpu batchnorm fp16 12
+9 gpu relu fp16 12
+10 gpu group_conv fp16 12
+11 gpu batchnorm fp16 12
+12 gpu relu fp16 12
+13 gpu conv fp16 12
+14 gpu batchnorm fp16 12
+15 gpu relu fp16 12
+16 gpu group_conv fp16 12
+17 gpu batchnorm fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12
+20 gpu batchnorm fp16 12
+21 gpu relu fp16 12
+22 gpu group_conv fp16 12
+23 gpu batchnorm fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu group_conv fp16 12
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv fp16 12
+32 gpu batchnorm fp16 12
+33 gpu relu fp16 12
+34 gpu group_conv fp16 12
+35 gpu batchnorm fp16 12
+36 gpu relu fp16 12
+37 gpu conv fp16 12
+38 gpu batchnorm fp16 12
+39 gpu relu fp16 12
+40 gpu group_conv fp16 12
+41 gpu batchnorm fp16 12
+42 gpu relu fp16 12
+43 gpu conv fp16 12
+44 gpu batchnorm fp16 12
+45 gpu relu fp16 12
+46 gpu group_conv fp16 12
+47 gpu batchnorm fp16 12
+48 gpu relu fp16 12
+49 gpu conv fp16 12
+50 gpu batchnorm fp16 12
+51 gpu relu fp16 12
+52 gpu group_conv fp16 12
+53 gpu batchnorm fp16 12
+54 gpu relu fp16 12
+55 gpu conv fp16 12
+56 gpu batchnorm fp16 12
+57 gpu relu fp16 12
+58 gpu group_conv fp16 12
+59 gpu batchnorm fp16 12
+60 gpu relu fp16 12
+61 gpu conv fp16 12
+62 gpu batchnorm fp16 12
+63 gpu relu fp16 12
+64 gpu group_conv fp16 12
+65 gpu batchnorm fp16 12
+66 gpu relu fp16 12
+67 gpu conv fp16 12
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu group_conv fp16 12
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv fp16 12
+74 gpu batchnorm fp16 12
+75 gpu relu fp16 12
+76 gpu group_conv fp16 12
+77 gpu batchnorm fp16 12
+78 gpu relu fp16 12
+79 gpu conv fp16 12
+80 gpu batchnorm fp16 12
+81 gpu relu fp16 12
+82 gpu pool_mean fp16 12
+83 gpu mul fp16 12 add fp16 1
+84 gpu softmax fp32 1
+-----
diff --git a/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/resnet18_cifar10/resnet18_cifar10.txt b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/resnet18_cifar10/resnet18_cifar10.txt
new file mode 100644
index 0000000000..d1d75a011e
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/resnet18_cifar10/resnet18_cifar10.txt
@@ -0,0 +1,91 @@
+2484.981244
++++++
+conf1 1 1 89.42 0.0
+1 gpu conv fp32 11 add fp32 1 relu fp32 1
+2 gpu conv fp32 11 add fp32 1 relu fp32 1
+3 gpu conv fp32 11 add fp32 1
+4 gpu add fp32 11
+5 gpu relu fp32 11
+6 gpu conv fp32 11 add fp32 1 relu fp32 1
+7 gpu conv fp32 11 add fp32 1
+8 gpu add fp32 11
+9 gpu relu fp32 11
+10 gpu conv fp32 11 add fp32 1 relu fp32 1
+11 gpu conv fp32 11 add fp32 1
+12 gpu add fp32 11
+13 gpu relu fp32 11
+14 gpu conv fp32 11 add fp32 1 relu fp32 1
+15 gpu conv fp32 11 add fp32 1
+16 gpu conv fp32 11 add fp32 1
+17 gpu add fp32 11
+18 gpu relu fp32 11
+19 gpu conv fp32 11 add fp32 1 relu fp32 1
+20 gpu conv fp32 11 add fp32 1
+21 gpu add fp32 11
+22 gpu relu fp32 11
+23 gpu conv fp32 11 add fp32 1 relu fp32 1
+24 gpu conv fp32 11 add fp32 1
+25 gpu add fp32 11
+26 gpu relu fp32 11
+27 gpu conv fp32 11 add fp32 1 relu fp32 1
+28 gpu conv fp32 11 add fp32 1
+29 gpu conv fp32 11 add fp32 1
+30 gpu add fp32 11
+31 gpu relu fp32 11
+32 gpu conv fp32 11 add fp32 1 relu fp32 1
+33 gpu conv fp32 11 add fp32 1
+34 gpu add fp32 11
+35 gpu relu fp32 11
+36 gpu conv fp32 11 add fp32 1 relu fp32 1
+37 gpu conv fp32 11 add fp32 1
+38 gpu add fp32 11
+39 gpu relu fp32 11
+40 gpu pool_mean fp32 11
+41 gpu mul fp32 11 add fp32 1
+42 gpu softmax fp32 1
+-----
++++++
+conf2 1.3617910209460897 1.3866827244386561 89.42 0.0
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1
+3 gpu conv fp16 12 add fp16 1
+4 gpu add fp16 12
+5 gpu relu fp16 12
+6 gpu conv fp16 12 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1
+8 gpu add fp16 12
+9 gpu relu fp16 12
+10 gpu conv fp16 12 add fp16 1 relu fp16 1
+11 gpu conv fp16 12 add fp16 1
+12 gpu add fp16 12
+13 gpu relu fp16 12
+14 gpu conv fp16 12 add fp16 1 relu fp16 1
+15 gpu conv fp16 12 add fp16 1
+16 gpu conv fp16 12 add fp16 1
+17 gpu add fp16 12
+18 gpu relu fp16 12
+19 gpu conv fp16 12 add fp16 1 relu fp16 1
+20 gpu conv fp16 12 add fp16 1
+21 gpu add fp16 12
+22 gpu relu fp16 12
+23 gpu conv fp16 12 add fp16 1 relu fp16 1
+24 gpu conv fp16 12 add fp16 1
+25 gpu add fp16 12
+26 gpu relu fp16 12
+27 gpu conv fp16 12 add fp16 1 relu fp16 1
+28 gpu conv fp16 12 add fp16 1
+29 gpu conv fp16 12 add fp16 1
+30 gpu add fp16 12
+31 gpu relu fp16 12
+32 gpu conv fp16 12 add fp16 1 relu fp16 1
+33 gpu conv fp16 12 add fp16 1
+34 gpu add fp16 12
+35 gpu relu fp16 12
+36 gpu conv fp16 12 add fp16 1 relu fp16 1
+37 gpu conv fp16 12 add fp16 1
+38 gpu add fp16 12
+39 gpu relu fp16 12
+40 gpu pool_mean fp16 12
+41 gpu mul fp16 12 add fp16 1
+42 gpu softmax fp32 1
+-----
diff --git a/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/resnet50_imagenet/resnet50_imagenet.txt b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/resnet50_imagenet/resnet50_imagenet.txt
new file mode 100644
index 0000000000..a045011580
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/resnet50_imagenet/resnet50_imagenet.txt
@@ -0,0 +1,1233 @@
+7161.053769000008
++++++
+conf1 1 1 75.7 0.0
+1 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+2 gpu batchnorm fp32 11
+3 gpu conv fp32 11 add fp32 1
+4 gpu batchnorm fp32 11
+5 gpu relu fp32 11
+6 gpu conv fp32 11 add fp32 1
+7 gpu batchnorm fp32 11
+8 gpu relu fp32 11
+9 gpu conv fp32 11 add fp32 1
+10 gpu batchnorm fp32 11
+11 gpu conv fp32 11 add fp32 1
+12 gpu batchnorm fp32 11
+13 gpu add fp32 11
+14 gpu relu fp32 11
+15 gpu conv fp32 11 add fp32 1
+16 gpu batchnorm fp32 11
+17 gpu relu fp32 11
+18 gpu conv fp32 11 add fp32 1
+19 gpu batchnorm fp32 11
+20 gpu relu fp32 11
+21 gpu conv fp32 11 add fp32 1
+22 gpu batchnorm fp32 11
+23 gpu add fp32 11
+24 gpu relu fp32 11
+25 gpu conv fp32 11 add fp32 1
+26 gpu batchnorm fp32 11
+27 gpu relu fp32 11
+28 gpu conv fp32 11 add fp32 1
+29 gpu batchnorm fp32 11
+30 gpu relu fp32 11
+31 gpu conv fp32 11 add fp32 1
+32 gpu batchnorm fp32 11
+33 gpu add fp32 11
+34 gpu relu fp32 11
+35 gpu conv fp32 11 add fp32 1
+36 gpu batchnorm fp32 11
+37 gpu relu fp32 11
+38 gpu conv fp32 11 add fp32 1
+39 gpu batchnorm fp32 11
+40 gpu relu fp32 11
+41 gpu conv fp32 11 add fp32 1
+42 gpu batchnorm fp32 11
+43 gpu conv fp32 11 add fp32 1
+44 gpu batchnorm fp32 11
+45 gpu add fp32 11
+46 gpu relu fp32 11
+47 gpu conv fp32 11 add fp32 1
+48 gpu batchnorm fp32 11
+49 gpu relu fp32 11
+50 gpu conv fp32 11 add fp32 1
+51 gpu batchnorm fp32 11
+52 gpu relu fp32 11
+53 gpu conv fp32 11 add fp32 1
+54 gpu batchnorm fp32 11
+55 gpu add fp32 11
+56 gpu relu fp32 11
+57 gpu conv fp32 11 add fp32 1
+58 gpu batchnorm fp32 11
+59 gpu relu fp32 11
+60 gpu conv fp32 11 add fp32 1
+61 gpu batchnorm fp32 11
+62 gpu relu fp32 11
+63 gpu conv fp32 11 add fp32 1
+64 gpu batchnorm fp32 11
+65 gpu add fp32 11
+66 gpu relu fp32 11
+67 gpu conv fp32 11 add fp32 1
+68 gpu batchnorm fp32 11
+69 gpu relu fp32 11
+70 gpu conv fp32 11 add fp32 1
+71 gpu batchnorm fp32 11
+72 gpu relu fp32 11
+73 gpu conv fp32 11 add fp32 1
+74 gpu batchnorm fp32 11
+75 gpu add fp32 11
+76 gpu relu fp32 11
+77 gpu conv fp32 11 add fp32 1
+78 gpu batchnorm fp32 11
+79 gpu relu fp32 11
+80 gpu conv fp32 11 add fp32 1
+81 gpu batchnorm fp32 11
+82 gpu relu fp32 11
+83 gpu conv fp32 11 add fp32 1
+84 gpu batchnorm fp32 11
+85 gpu conv fp32 11 add fp32 1
+86 gpu batchnorm fp32 11
+87 gpu add fp32 11
+88 gpu relu fp32 11
+89 gpu conv fp32 11 add fp32 1
+90 gpu batchnorm fp32 11
+91 gpu relu fp32 11
+92 gpu conv fp32 11 add fp32 1
+93 gpu batchnorm fp32 11
+94 gpu relu fp32 11
+95 gpu conv fp32 11 add fp32 1
+96 gpu batchnorm fp32 11
+97 gpu add fp32 11
+98 gpu relu fp32 11
+99 gpu conv fp32 11 add fp32 1
+100 gpu batchnorm fp32 11
+101 gpu relu fp32 11
+102 gpu conv fp32 11 add fp32 1
+103 gpu batchnorm fp32 11
+104 gpu relu fp32 11
+105 gpu conv fp32 11 add fp32 1
+106 gpu batchnorm fp32 11
+107 gpu add fp32 11
+108 gpu relu fp32 11
+109 gpu conv fp32 11 add fp32 1
+110 gpu batchnorm fp32 11
+111 gpu relu fp32 11
+112 gpu conv fp32 11 add fp32 1
+113 gpu batchnorm fp32 11
+114 gpu relu fp32 11
+115 gpu conv fp32 11 add fp32 1
+116 gpu batchnorm fp32 11
+117 gpu add fp32 11
+118 gpu relu fp32 11
+119 gpu conv fp32 11 add fp32 1
+120 gpu batchnorm fp32 11
+121 gpu relu fp32 11
+122 gpu conv fp32 11 add fp32 1
+123 gpu batchnorm fp32 11
+124 gpu relu fp32 11
+125 gpu conv fp32 11 add fp32 1
+126 gpu batchnorm fp32 11
+127 gpu add fp32 11
+128 gpu relu fp32 11
+129 gpu conv fp32 11 add fp32 1
+130 gpu batchnorm fp32 11
+131 gpu relu fp32 11
+132 gpu conv fp32 11 add fp32 1
+133 gpu batchnorm fp32 11
+134 gpu relu fp32 11
+135 gpu conv fp32 11 add fp32 1
+136 gpu batchnorm fp32 11
+137 gpu add fp32 11
+138 gpu relu fp32 11
+139 gpu conv fp32 11 add fp32 1
+140 gpu batchnorm fp32 11
+141 gpu relu fp32 11
+142 gpu conv fp32 11 add fp32 1
+143 gpu batchnorm fp32 11
+144 gpu relu fp32 11
+145 gpu conv fp32 11 add fp32 1
+146 gpu batchnorm fp32 11
+147 gpu conv fp32 11 add fp32 1
+148 gpu batchnorm fp32 11
+149 gpu add fp32 11
+150 gpu relu fp32 11
+151 gpu conv fp32 11 add fp32 1
+152 gpu batchnorm fp32 11
+153 gpu relu fp32 11
+154 gpu conv fp32 11 add fp32 1
+155 gpu batchnorm fp32 11
+156 gpu relu fp32 11
+157 gpu conv fp32 11 add fp32 1
+158 gpu batchnorm fp32 11
+159 gpu add fp32 11
+160 gpu relu fp32 11
+161 gpu conv fp32 11 add fp32 1
+162 gpu batchnorm fp32 11
+163 gpu relu fp32 11
+164 gpu conv fp32 11 add fp32 1
+165 gpu batchnorm fp32 11
+166 gpu relu fp32 11
+167 gpu conv fp32 11 add fp32 1
+168 gpu batchnorm fp32 11
+169 gpu add fp32 11
+170 gpu relu fp32 11
+171 gpu pool_max fp32 11
+172 gpu mul fp32 11 add fp32 1
+173 gpu softmax fp32 1
+-----
++++++
+conf2 1.8254789092281507 1.4527803526239977 75.7 0.0
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu batchnorm fp16 12
+3 gpu conv fp16 12 add fp16 1
+4 gpu batchnorm fp16 12
+5 gpu relu fp16 12
+6 gpu conv fp16 12 add fp16 1
+7 gpu batchnorm fp16 12
+8 gpu relu fp16 12
+9 gpu conv fp16 12 add fp16 1
+10 gpu batchnorm fp16 12
+11 gpu conv fp16 12 add fp16 1
+12 gpu batchnorm fp16 12
+13 gpu add fp16 12
+14 gpu relu fp16 12
+15 gpu conv fp16 12 add fp16 1
+16 gpu batchnorm fp16 12
+17 gpu relu fp16 12
+18 gpu conv fp16 12 add fp16 1
+19 gpu batchnorm fp16 12
+20 gpu relu fp16 12
+21 gpu conv fp16 12 add fp16 1
+22 gpu batchnorm fp16 12
+23 gpu add fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12 add fp16 1
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu conv fp16 12 add fp16 1
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv fp16 12 add fp16 1
+32 gpu batchnorm fp16 12
+33 gpu add fp16 12
+34 gpu relu fp16 12
+35 gpu conv fp16 12 add fp16 1
+36 gpu batchnorm fp16 12
+37 gpu relu fp16 12
+38 gpu conv fp16 12 add fp16 1
+39 gpu batchnorm fp16 12
+40 gpu relu fp16 12
+41 gpu conv fp16 12 add fp16 1
+42 gpu batchnorm fp16 12
+43 gpu conv fp16 12 add fp16 1
+44 gpu batchnorm fp16 12
+45 gpu add fp16 12
+46 gpu relu fp16 12
+47 gpu conv fp16 12 add fp16 1
+48 gpu batchnorm fp16 12
+49 gpu relu fp16 12
+50 gpu conv fp16 12 add fp16 1
+51 gpu batchnorm fp16 12
+52 gpu relu fp16 12
+53 gpu conv fp16 12 add fp16 1
+54 gpu batchnorm fp16 12
+55 gpu add fp16 12
+56 gpu relu fp16 12
+57 gpu conv fp16 12 add fp16 1
+58 gpu batchnorm fp16 12
+59 gpu relu fp16 12
+60 gpu conv fp16 12 add fp16 1
+61 gpu batchnorm fp16 12
+62 gpu relu fp16 12
+63 gpu conv fp16 12 add fp16 1
+64 gpu batchnorm fp16 12
+65 gpu add fp16 12
+66 gpu relu fp16 12
+67 gpu conv fp16 12 add fp16 1
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu conv fp16 12 add fp16 1
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv fp16 12 add fp16 1
+74 gpu batchnorm fp16 12
+75 gpu add fp16 12
+76 gpu relu fp16 12
+77 gpu conv fp16 12 add fp16 1
+78 gpu batchnorm fp16 12
+79 gpu relu fp16 12
+80 gpu conv fp16 12 add fp16 1
+81 gpu batchnorm fp16 12
+82 gpu relu fp16 12
+83 gpu conv fp16 12 add fp16 1
+84 gpu batchnorm fp16 12
+85 gpu conv fp16 12 add fp16 1
+86 gpu batchnorm fp16 12
+87 gpu add fp16 12
+88 gpu relu fp16 12
+89 gpu conv fp16 12 add fp16 1
+90 gpu batchnorm fp16 12
+91 gpu relu fp16 12
+92 gpu conv fp16 12 add fp16 1
+93 gpu batchnorm fp16 12
+94 gpu relu fp16 12
+95 gpu conv fp16 12 add fp16 1
+96 gpu batchnorm fp16 12
+97 gpu add fp16 12
+98 gpu relu fp16 12
+99 gpu conv fp16 12 add fp16 1
+100 gpu batchnorm fp16 12
+101 gpu relu fp16 12
+102 gpu conv fp16 12 add fp16 1
+103 gpu batchnorm fp16 12
+104 gpu relu fp16 12
+105 gpu conv fp16 12 add fp16 1
+106 gpu batchnorm fp16 12
+107 gpu add fp16 12
+108 gpu relu fp16 12
+109 gpu conv fp16 12 add fp16 1
+110 gpu batchnorm fp16 12
+111 gpu relu fp16 12
+112 gpu conv fp16 12 add fp16 1
+113 gpu batchnorm fp16 12
+114 gpu relu fp16 12
+115 gpu conv fp16 12 add fp16 1
+116 gpu batchnorm fp16 12
+117 gpu add fp16 12
+118 gpu relu fp16 12
+119 gpu conv fp16 12 add fp16 1
+120 gpu batchnorm fp16 12
+121 gpu relu fp16 12
+122 gpu conv fp16 12 add fp16 1
+123 gpu batchnorm fp16 12
+124 gpu relu fp16 12
+125 gpu conv fp16 12 add fp16 1
+126 gpu batchnorm fp16 12
+127 gpu add fp16 12
+128 gpu relu fp16 12
+129 gpu conv fp16 12 add fp16 1
+130 gpu batchnorm fp16 12
+131 gpu relu fp16 12
+132 gpu conv fp16 12 add fp16 1
+133 gpu batchnorm fp16 12
+134 gpu relu fp16 12
+135 gpu conv fp16 12 add fp16 1
+136 gpu batchnorm fp16 12
+137 gpu add fp16 12
+138 gpu relu fp16 12
+139 gpu conv fp16 12 add fp16 1
+140 gpu batchnorm fp16 12
+141 gpu relu fp16 12
+142 gpu conv fp16 12 add fp16 1
+143 gpu batchnorm fp16 12
+144 gpu relu fp16 12
+145 gpu conv fp16 12 add fp16 1
+146 gpu batchnorm fp16 12
+147 gpu conv fp16 12 add fp16 1
+148 gpu batchnorm fp16 12
+149 gpu add fp16 12
+150 gpu relu fp16 12
+151 gpu conv fp16 12 add fp16 1
+152 gpu batchnorm fp16 12
+153 gpu relu fp16 12
+154 gpu conv fp16 12 add fp16 1
+155 gpu batchnorm fp16 12
+156 gpu relu fp16 12
+157 gpu conv fp16 12 add fp16 1
+158 gpu batchnorm fp16 12
+159 gpu add fp16 12
+160 gpu relu fp16 12
+161 gpu conv fp16 12 add fp16 1
+162 gpu batchnorm fp16 12
+163 gpu relu fp16 12
+164 gpu conv fp16 12 add fp16 1
+165 gpu batchnorm fp16 12
+166 gpu relu fp16 12
+167 gpu conv fp16 12 add fp16 1
+168 gpu batchnorm fp16 12
+169 gpu add fp16 12
+170 gpu relu fp16 12
+171 gpu pool_max fp16 12
+172 gpu mul fp16 12 add fp16 1
+173 gpu softmax fp32 1
+-----
++++++
+conf3 1.8521749055745271 1.472492365706726 75.02 0.6800000000000068
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu batchnorm fp16 12
+3 gpu conv fp16 12 add fp16 1
+4 gpu batchnorm fp16 12
+5 gpu relu fp16 12
+6 gpu conv fp16 12 add fp16 1
+7 gpu batchnorm fp16 12
+8 gpu relu fp16 12
+9 gpu conv fp16 12 add fp16 1
+10 gpu batchnorm fp16 12
+11 gpu conv fp16 12 add fp16 1
+12 gpu batchnorm fp16 12
+13 gpu add fp16 12
+14 gpu relu fp16 12
+15 gpu conv fp16 12 add fp16 1
+16 gpu batchnorm fp16 12
+17 gpu relu fp16 12
+18 gpu conv fp16 12 add fp16 1
+19 gpu batchnorm fp16 12
+20 gpu relu fp16 12
+21 gpu conv fp16 12 add fp16 1
+22 gpu batchnorm fp16 12
+23 gpu add fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12 add fp16 1
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu conv perf_fp16 160 add fp16 1
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv fp16 11 add fp16 1
+32 gpu batchnorm fp16 12
+33 gpu add fp16 12
+34 gpu relu fp16 12
+35 gpu conv fp16 12 add fp16 1
+36 gpu batchnorm fp16 12
+37 gpu relu fp16 12
+38 gpu conv fp16 12 add fp16 1
+39 gpu batchnorm fp16 12
+40 gpu relu fp16 12
+41 gpu conv fp16 12 add fp16 1
+42 gpu batchnorm fp16 12
+43 gpu conv fp16 12 add fp16 1
+44 gpu batchnorm fp16 12
+45 gpu add fp16 12
+46 gpu relu fp16 12
+47 gpu conv fp16 12 add fp16 1
+48 gpu batchnorm fp16 12
+49 gpu relu fp16 12
+50 gpu conv fp16 12 add fp16 1
+51 gpu batchnorm fp16 12
+52 gpu relu fp16 12
+53 gpu conv fp16 12 add fp16 1
+54 gpu batchnorm fp16 12
+55 gpu add fp16 12
+56 gpu relu fp16 12
+57 gpu conv fp16 12 add fp16 1
+58 gpu batchnorm fp16 12
+59 gpu relu fp16 12
+60 gpu conv fp16 12 add fp16 1
+61 gpu batchnorm fp16 12
+62 gpu relu fp16 12
+63 gpu conv fp16 12 add fp16 1
+64 gpu batchnorm fp16 12
+65 gpu add fp16 12
+66 gpu relu fp16 12
+67 gpu conv fp16 12 add fp16 1
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu conv fp16 12 add fp16 1
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv fp16 12 add fp16 1
+74 gpu batchnorm fp16 12
+75 gpu add fp16 12
+76 gpu relu fp16 12
+77 gpu conv fp16 12 add fp16 1
+78 gpu batchnorm fp16 12
+79 gpu relu fp16 12
+80 gpu conv fp16 12 add fp16 1
+81 gpu batchnorm fp16 12
+82 gpu relu fp16 12
+83 gpu conv fp16 12 add fp16 1
+84 gpu batchnorm fp16 12
+85 gpu conv fp16 12 add fp16 1
+86 gpu batchnorm fp16 12
+87 gpu add fp16 12
+88 gpu relu fp16 12
+89 gpu conv fp16 12 add fp16 1
+90 gpu batchnorm fp16 12
+91 gpu relu fp16 12
+92 gpu conv fp16 12 add fp16 1
+93 gpu batchnorm fp16 12
+94 gpu relu fp16 12
+95 gpu conv fp16 11 add fp16 1
+96 gpu batchnorm fp16 12
+97 gpu add fp16 12
+98 gpu relu fp16 12
+99 gpu conv fp16 12 add fp16 1
+100 gpu batchnorm fp16 12
+101 gpu relu fp16 12
+102 gpu conv perf_fp16 164 add fp16 1
+103 gpu batchnorm fp16 12
+104 gpu relu fp16 12
+105 gpu conv fp16 12 add fp16 1
+106 gpu batchnorm fp16 12
+107 gpu add fp16 12
+108 gpu relu fp16 12
+109 gpu conv fp16 12 add fp16 1
+110 gpu batchnorm fp16 12
+111 gpu relu fp16 12
+112 gpu conv fp16 12 add fp16 1
+113 gpu batchnorm fp16 12
+114 gpu relu fp16 12
+115 gpu conv fp16 12 add fp16 1
+116 gpu batchnorm fp16 12
+117 gpu add fp16 12
+118 gpu relu fp16 12
+119 gpu conv fp16 12 add fp16 1
+120 gpu batchnorm fp16 12
+121 gpu relu fp16 12
+122 gpu conv fp16 12 add fp16 1
+123 gpu batchnorm fp16 12
+124 gpu relu fp16 12
+125 gpu conv fp16 12 add fp16 1
+126 gpu batchnorm fp16 12
+127 gpu add fp16 12
+128 gpu relu fp16 12
+129 gpu conv fp16 12 add fp16 1
+130 gpu batchnorm fp16 12
+131 gpu relu fp16 12
+132 gpu conv fp16 12 add fp16 1
+133 gpu batchnorm fp16 12
+134 gpu relu fp16 12
+135 gpu conv fp16 12 add fp16 1
+136 gpu batchnorm fp16 12
+137 gpu add fp16 12
+138 gpu relu fp16 12
+139 gpu conv fp16 12 add fp16 1
+140 gpu batchnorm fp16 12
+141 gpu relu fp16 12
+142 gpu conv fp16 12 add fp16 1
+143 gpu batchnorm fp16 12
+144 gpu relu fp16 12
+145 gpu conv fp16 12 add fp16 1
+146 gpu batchnorm fp16 12
+147 gpu conv fp16 12 add fp16 1
+148 gpu batchnorm fp16 12
+149 gpu add fp16 12
+150 gpu relu fp16 12
+151 gpu conv fp16 12 add fp16 1
+152 gpu batchnorm fp16 12
+153 gpu relu fp16 12
+154 gpu conv fp16 12 add fp16 1
+155 gpu batchnorm fp16 12
+156 gpu relu fp16 12
+157 gpu conv fp16 12 add fp16 1
+158 gpu batchnorm fp16 12
+159 gpu add fp16 12
+160 gpu relu fp16 12
+161 gpu conv fp16 12 add fp16 1
+162 gpu batchnorm fp16 12
+163 gpu relu fp16 12
+164 gpu conv fp16 12 add fp16 1
+165 gpu batchnorm fp16 12
+166 gpu relu fp16 12
+167 gpu conv fp16 12 add fp16 1
+168 gpu batchnorm fp16 12
+169 gpu add fp16 12
+170 gpu relu fp16 12
+171 gpu pool_max fp16 12
+172 gpu mul fp16 12 add fp16 1
+173 gpu softmax fp32 1
+-----
++++++
+conf4 1.8509087142956673 1.4713858340895483 74.68 1.019999999999996
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu batchnorm fp16 12
+3 gpu conv fp16 12 add fp16 1
+4 gpu batchnorm fp16 12
+5 gpu relu fp16 12
+6 gpu conv fp16 12 add fp16 1
+7 gpu batchnorm fp16 12
+8 gpu relu fp16 12
+9 gpu conv fp16 12 add fp16 1
+10 gpu batchnorm fp16 12
+11 gpu conv fp16 12 add fp16 1
+12 gpu batchnorm fp16 12
+13 gpu add fp16 12
+14 gpu relu fp16 12
+15 gpu conv fp16 12 add fp16 1
+16 gpu batchnorm fp16 12
+17 gpu relu fp16 12
+18 gpu conv fp16 12 add fp16 1
+19 gpu batchnorm fp16 12
+20 gpu relu fp16 12
+21 gpu conv fp16 12 add fp16 1
+22 gpu batchnorm fp16 12
+23 gpu add fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12 add fp16 1
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu conv perf_fp16 160 add fp16 1
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv fp16 12 add fp16 1
+32 gpu batchnorm fp16 12
+33 gpu add fp16 12
+34 gpu relu fp16 12
+35 gpu conv fp16 12 add fp16 1
+36 gpu batchnorm fp16 12
+37 gpu relu fp16 12
+38 gpu conv fp16 12 add fp16 1
+39 gpu batchnorm fp16 12
+40 gpu relu fp16 12
+41 gpu conv fp16 12 add fp16 1
+42 gpu batchnorm fp16 12
+43 gpu conv fp16 12 add fp16 1
+44 gpu batchnorm fp16 12
+45 gpu add fp16 12
+46 gpu relu fp16 12
+47 gpu conv fp16 12 add fp16 1
+48 gpu batchnorm fp16 12
+49 gpu relu fp16 12
+50 gpu conv fp16 12 add fp16 1
+51 gpu batchnorm fp16 12
+52 gpu relu fp16 12
+53 gpu conv fp16 12 add fp16 1
+54 gpu batchnorm fp16 12
+55 gpu add fp16 12
+56 gpu relu fp16 12
+57 gpu conv fp16 12 add fp16 1
+58 gpu batchnorm fp16 12
+59 gpu relu fp16 12
+60 gpu conv fp16 12 add fp16 1
+61 gpu batchnorm fp16 12
+62 gpu relu fp16 12
+63 gpu conv fp16 12 add fp16 1
+64 gpu batchnorm fp16 12
+65 gpu add fp16 12
+66 gpu relu fp16 12
+67 gpu conv fp16 12 add fp16 1
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu conv fp16 12 add fp16 1
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv fp16 12 add fp16 1
+74 gpu batchnorm fp16 12
+75 gpu add fp16 12
+76 gpu relu fp16 12
+77 gpu conv fp16 12 add fp16 1
+78 gpu batchnorm fp16 12
+79 gpu relu fp16 12
+80 gpu conv fp16 12 add fp16 1
+81 gpu batchnorm fp16 12
+82 gpu relu fp16 12
+83 gpu conv fp16 12 add fp16 1
+84 gpu batchnorm fp16 12
+85 gpu conv fp16 12 add fp16 1
+86 gpu batchnorm fp16 12
+87 gpu add fp16 12
+88 gpu relu fp16 12
+89 gpu conv fp16 12 add fp16 1
+90 gpu batchnorm fp16 12
+91 gpu relu fp16 12
+92 gpu conv fp16 12 add fp16 1
+93 gpu batchnorm fp16 12
+94 gpu relu fp16 12
+95 gpu conv fp16 12 add fp16 1
+96 gpu batchnorm fp16 12
+97 gpu add fp16 12
+98 gpu relu fp16 12
+99 gpu conv fp16 12 add fp16 1
+100 gpu batchnorm fp16 12
+101 gpu relu fp16 12
+102 gpu conv fp16 12 add fp16 1
+103 gpu batchnorm fp16 12
+104 gpu relu fp16 12
+105 gpu conv fp16 12 add fp16 1
+106 gpu batchnorm fp16 12
+107 gpu add fp16 12
+108 gpu relu fp16 12
+109 gpu conv fp16 12 add fp16 1
+110 gpu batchnorm fp16 12
+111 gpu relu fp16 12
+112 gpu conv fp16 12 add fp16 1
+113 gpu batchnorm fp16 12
+114 gpu relu fp16 12
+115 gpu conv fp16 12 add fp16 1
+116 gpu batchnorm fp16 12
+117 gpu add fp16 12
+118 gpu relu fp16 12
+119 gpu conv fp16 12 add fp16 1
+120 gpu batchnorm fp16 12
+121 gpu relu fp16 12
+122 gpu conv fp16 12 add fp16 1
+123 gpu batchnorm fp16 12
+124 gpu relu fp16 12
+125 gpu conv fp16 12 add fp16 1
+126 gpu batchnorm fp16 12
+127 gpu add fp16 12
+128 gpu relu fp16 12
+129 gpu conv fp16 12 add fp16 1
+130 gpu batchnorm fp16 12
+131 gpu relu fp16 12
+132 gpu conv fp16 12 add fp16 1
+133 gpu batchnorm fp16 12
+134 gpu relu fp16 12
+135 gpu conv fp16 12 add fp16 1
+136 gpu batchnorm fp16 12
+137 gpu add fp16 12
+138 gpu relu fp16 12
+139 gpu conv fp16 12 add fp16 1
+140 gpu batchnorm fp16 12
+141 gpu relu fp16 12
+142 gpu conv fp16 12 add fp16 1
+143 gpu batchnorm fp16 12
+144 gpu relu fp16 12
+145 gpu conv fp16 12 add fp16 1
+146 gpu batchnorm fp16 12
+147 gpu conv fp16 12 add fp16 1
+148 gpu batchnorm fp16 12
+149 gpu add fp16 12
+150 gpu relu fp16 12
+151 gpu conv fp16 12 add fp16 1
+152 gpu batchnorm fp16 12
+153 gpu relu fp16 12
+154 gpu conv fp16 12 add fp16 1
+155 gpu batchnorm fp16 12
+156 gpu relu fp16 12
+157 gpu conv fp16 12 add fp16 1
+158 gpu batchnorm fp16 12
+159 gpu add fp16 12
+160 gpu relu fp16 12
+161 gpu conv fp16 12 add fp16 1
+162 gpu batchnorm fp16 12
+163 gpu relu fp16 12
+164 gpu conv fp16 12 add fp16 1
+165 gpu batchnorm fp16 12
+166 gpu relu fp16 12
+167 gpu conv fp16 12 add fp16 1
+168 gpu batchnorm fp16 12
+169 gpu add fp16 12
+170 gpu relu fp16 12
+171 gpu pool_max fp16 12
+172 gpu mul fp16 12 add fp16 1
+173 gpu softmax fp32 1
+-----
++++++
+conf5 1.8538077719438253 1.4749308494814874 73.82 1.8800000000000097
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu batchnorm fp16 12
+3 gpu conv fp16 12 add fp16 1
+4 gpu batchnorm fp16 12
+5 gpu relu fp16 12
+6 gpu conv fp16 12 add fp16 1
+7 gpu batchnorm fp16 12
+8 gpu relu fp16 12
+9 gpu conv fp16 12 add fp16 1
+10 gpu batchnorm fp16 12
+11 gpu conv fp16 12 add fp16 1
+12 gpu batchnorm fp16 12
+13 gpu add fp16 12
+14 gpu relu fp16 12
+15 gpu conv fp16 12 add fp16 1
+16 gpu batchnorm fp16 12
+17 gpu relu fp16 12
+18 gpu conv fp16 12 add fp16 1
+19 gpu batchnorm fp16 12
+20 gpu relu fp16 12
+21 gpu conv fp16 12 add fp16 1
+22 gpu batchnorm fp16 12
+23 gpu add fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12 add fp16 1
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu conv perf_fp16 160 add fp16 1
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv fp16 11 add fp16 1
+32 gpu batchnorm fp16 12
+33 gpu add fp16 12
+34 gpu relu fp16 12
+35 gpu conv fp16 12 add fp16 1
+36 gpu batchnorm fp16 12
+37 gpu relu fp16 12
+38 gpu conv fp16 12 add fp16 1
+39 gpu batchnorm fp16 12
+40 gpu relu fp16 12
+41 gpu conv fp16 12 add fp16 1
+42 gpu batchnorm fp16 12
+43 gpu conv fp16 12 add fp16 1
+44 gpu batchnorm fp16 12
+45 gpu add fp16 12
+46 gpu relu fp16 12
+47 gpu conv fp16 12 add fp16 1
+48 gpu batchnorm fp16 12
+49 gpu relu fp16 12
+50 gpu conv fp16 12 add fp16 1
+51 gpu batchnorm fp16 12
+52 gpu relu fp16 12
+53 gpu conv perf_fp16 153 add fp16 1
+54 gpu batchnorm fp16 12
+55 gpu add fp16 12
+56 gpu relu fp16 12
+57 gpu conv fp16 12 add fp16 1
+58 gpu batchnorm fp16 12
+59 gpu relu fp16 12
+60 gpu conv fp16 12 add fp16 1
+61 gpu batchnorm fp16 12
+62 gpu relu fp16 12
+63 gpu conv fp16 12 add fp16 1
+64 gpu batchnorm fp16 12
+65 gpu add fp16 12
+66 gpu relu fp16 12
+67 gpu conv fp16 12 add fp16 1
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu conv fp16 12 add fp16 1
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv fp16 12 add fp16 1
+74 gpu batchnorm fp16 12
+75 gpu add fp16 12
+76 gpu relu fp16 12
+77 gpu conv fp16 12 add fp16 1
+78 gpu batchnorm fp16 12
+79 gpu relu fp16 12
+80 gpu conv fp16 12 add fp16 1
+81 gpu batchnorm fp16 12
+82 gpu relu fp16 12
+83 gpu conv fp16 12 add fp16 1
+84 gpu batchnorm fp16 12
+85 gpu conv fp16 12 add fp16 1
+86 gpu batchnorm fp16 12
+87 gpu add fp16 12
+88 gpu relu fp16 12
+89 gpu conv fp16 12 add fp16 1
+90 gpu batchnorm fp16 12
+91 gpu relu fp16 12
+92 gpu conv fp16 12 add fp16 1
+93 gpu batchnorm fp16 12
+94 gpu relu fp16 12
+95 gpu conv fp16 11 add fp16 1
+96 gpu batchnorm fp16 12
+97 gpu add fp16 12
+98 gpu relu fp16 12
+99 gpu conv fp16 12 add fp16 1
+100 gpu batchnorm fp16 12
+101 gpu relu fp16 12
+102 gpu conv perf_fp16 164 add fp16 1
+103 gpu batchnorm fp16 12
+104 gpu relu fp16 12
+105 gpu conv fp16 12 add fp16 1
+106 gpu batchnorm fp16 12
+107 gpu add fp16 12
+108 gpu relu fp16 12
+109 gpu conv fp16 12 add fp16 1
+110 gpu batchnorm fp16 12
+111 gpu relu fp16 12
+112 gpu conv fp16 12 add fp16 1
+113 gpu batchnorm fp16 12
+114 gpu relu fp16 12
+115 gpu conv fp16 12 add fp16 1
+116 gpu batchnorm fp16 12
+117 gpu add fp16 12
+118 gpu relu fp16 12
+119 gpu conv samp_fp16 268 add fp16 1
+120 gpu batchnorm fp16 12
+121 gpu relu fp16 12
+122 gpu conv fp16 12 add fp16 1
+123 gpu batchnorm fp16 12
+124 gpu relu fp16 12
+125 gpu conv fp16 12 add fp16 1
+126 gpu batchnorm fp16 12
+127 gpu add fp16 12
+128 gpu relu fp16 12
+129 gpu conv fp16 12 add fp16 1
+130 gpu batchnorm fp16 12
+131 gpu relu fp16 12
+132 gpu conv fp16 12 add fp16 1
+133 gpu batchnorm fp16 12
+134 gpu relu fp16 12
+135 gpu conv fp16 11 add fp16 1
+136 gpu batchnorm fp16 12
+137 gpu add fp16 12
+138 gpu relu fp16 12
+139 gpu conv fp16 12 add fp16 1
+140 gpu batchnorm fp16 12
+141 gpu relu fp16 12
+142 gpu conv fp16 12 add fp16 1
+143 gpu batchnorm fp16 12
+144 gpu relu fp16 12
+145 gpu conv fp16 12 add fp16 1
+146 gpu batchnorm fp16 12
+147 gpu conv fp16 12 add fp16 1
+148 gpu batchnorm fp16 12
+149 gpu add fp16 12
+150 gpu relu fp16 12
+151 gpu conv fp16 12 add fp16 1
+152 gpu batchnorm fp16 12
+153 gpu relu fp16 12
+154 gpu conv fp16 12 add fp16 1
+155 gpu batchnorm fp16 12
+156 gpu relu fp16 12
+157 gpu conv fp16 11 add fp16 1
+158 gpu batchnorm fp16 12
+159 gpu add fp16 12
+160 gpu relu fp16 12
+161 gpu conv fp16 12 add fp16 1
+162 gpu batchnorm fp16 12
+163 gpu relu fp16 12
+164 gpu conv fp16 12 add fp16 1
+165 gpu batchnorm fp16 12
+166 gpu relu fp16 12
+167 gpu conv fp16 12 add fp16 1
+168 gpu batchnorm fp16 12
+169 gpu add fp16 12
+170 gpu relu fp16 12
+171 gpu pool_max fp16 12
+172 gpu mul fp16 12 add fp16 1
+173 gpu softmax fp32 1
+-----
++++++
+conf6 1.8538077719438253 1.4749308494814874 73.7 2.0
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu batchnorm fp16 12
+3 gpu conv fp16 12 add fp16 1
+4 gpu batchnorm fp16 12
+5 gpu relu fp16 12
+6 gpu conv fp16 12 add fp16 1
+7 gpu batchnorm fp16 12
+8 gpu relu fp16 12
+9 gpu conv fp16 12 add fp16 1
+10 gpu batchnorm fp16 12
+11 gpu conv fp16 12 add fp16 1
+12 gpu batchnorm fp16 12
+13 gpu add fp16 12
+14 gpu relu fp16 12
+15 gpu conv fp16 12 add fp16 1
+16 gpu batchnorm fp16 12
+17 gpu relu fp16 12
+18 gpu conv fp16 12 add fp16 1
+19 gpu batchnorm fp16 12
+20 gpu relu fp16 12
+21 gpu conv fp16 12 add fp16 1
+22 gpu batchnorm fp16 12
+23 gpu add fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12 add fp16 1
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu conv perf_fp16 160 add fp16 1
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv fp16 11 add fp16 1
+32 gpu batchnorm fp16 12
+33 gpu add fp16 12
+34 gpu relu fp16 12
+35 gpu conv fp16 12 add fp16 1
+36 gpu batchnorm fp16 12
+37 gpu relu fp16 12
+38 gpu conv fp16 12 add fp16 1
+39 gpu batchnorm fp16 12
+40 gpu relu fp16 12
+41 gpu conv fp16 12 add fp16 1
+42 gpu batchnorm fp16 12
+43 gpu conv fp16 12 add fp16 1
+44 gpu batchnorm fp16 12
+45 gpu add fp16 12
+46 gpu relu fp16 12
+47 gpu conv fp16 12 add fp16 1
+48 gpu batchnorm fp16 12
+49 gpu relu fp16 12
+50 gpu conv fp16 12 add fp16 1
+51 gpu batchnorm fp16 12
+52 gpu relu fp16 12
+53 gpu conv perf_fp16 153 add fp16 1
+54 gpu batchnorm fp16 12
+55 gpu add fp16 12
+56 gpu relu fp16 12
+57 gpu conv fp16 12 add fp16 1
+58 gpu batchnorm fp16 12
+59 gpu relu fp16 12
+60 gpu conv fp16 12 add fp16 1
+61 gpu batchnorm fp16 12
+62 gpu relu fp16 12
+63 gpu conv fp16 12 add fp16 1
+64 gpu batchnorm fp16 12
+65 gpu add fp16 12
+66 gpu relu fp16 12
+67 gpu conv fp16 12 add fp16 1
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu conv fp16 12 add fp16 1
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv fp16 12 add fp16 1
+74 gpu batchnorm fp16 12
+75 gpu add fp16 12
+76 gpu relu fp16 12
+77 gpu conv fp16 12 add fp16 1
+78 gpu batchnorm fp16 12
+79 gpu relu fp16 12
+80 gpu conv fp16 12 add fp16 1
+81 gpu batchnorm fp16 12
+82 gpu relu fp16 12
+83 gpu conv fp16 12 add fp16 1
+84 gpu batchnorm fp16 12
+85 gpu conv fp16 12 add fp16 1
+86 gpu batchnorm fp16 12
+87 gpu add fp16 12
+88 gpu relu fp16 12
+89 gpu conv fp16 12 add fp16 1
+90 gpu batchnorm fp16 12
+91 gpu relu fp16 12
+92 gpu conv fp16 12 add fp16 1
+93 gpu batchnorm fp16 12
+94 gpu relu fp16 12
+95 gpu conv fp16 11 add fp16 1
+96 gpu batchnorm fp16 12
+97 gpu add fp16 12
+98 gpu relu fp16 12
+99 gpu conv fp16 12 add fp16 1
+100 gpu batchnorm fp16 12
+101 gpu relu fp16 12
+102 gpu conv perf_fp16 164 add fp16 1
+103 gpu batchnorm fp16 12
+104 gpu relu fp16 12
+105 gpu conv fp16 12 add fp16 1
+106 gpu batchnorm fp16 12
+107 gpu add fp16 12
+108 gpu relu fp16 12
+109 gpu conv fp16 12 add fp16 1
+110 gpu batchnorm fp16 12
+111 gpu relu fp16 12
+112 gpu conv fp16 12 add fp16 1
+113 gpu batchnorm fp16 12
+114 gpu relu fp16 12
+115 gpu conv fp16 12 add fp16 1
+116 gpu batchnorm fp16 12
+117 gpu add fp16 12
+118 gpu relu fp16 12
+119 gpu conv samp_fp16 268 add fp16 1
+120 gpu batchnorm fp16 12
+121 gpu relu fp16 12
+122 gpu conv fp16 12 add fp16 1
+123 gpu batchnorm fp16 12
+124 gpu relu fp16 12
+125 gpu conv fp16 12 add fp16 1
+126 gpu batchnorm fp16 12
+127 gpu add fp16 12
+128 gpu relu fp16 12
+129 gpu conv fp16 12 add fp16 1
+130 gpu batchnorm fp16 12
+131 gpu relu fp16 12
+132 gpu conv fp16 12 add fp16 1
+133 gpu batchnorm fp16 12
+134 gpu relu fp16 12
+135 gpu conv fp16 12 add fp16 1
+136 gpu batchnorm fp16 12
+137 gpu add fp16 12
+138 gpu relu fp16 12
+139 gpu conv fp16 12 add fp16 1
+140 gpu batchnorm fp16 12
+141 gpu relu fp16 12
+142 gpu conv fp16 12 add fp16 1
+143 gpu batchnorm fp16 12
+144 gpu relu fp16 12
+145 gpu conv fp16 12 add fp16 1
+146 gpu batchnorm fp16 12
+147 gpu conv fp16 12 add fp16 1
+148 gpu batchnorm fp16 12
+149 gpu add fp16 12
+150 gpu relu fp16 12
+151 gpu conv fp16 12 add fp16 1
+152 gpu batchnorm fp16 12
+153 gpu relu fp16 12
+154 gpu conv fp16 12 add fp16 1
+155 gpu batchnorm fp16 12
+156 gpu relu fp16 12
+157 gpu conv fp16 12 add fp16 1
+158 gpu batchnorm fp16 12
+159 gpu add fp16 12
+160 gpu relu fp16 12
+161 gpu conv fp16 12 add fp16 1
+162 gpu batchnorm fp16 12
+163 gpu relu fp16 12
+164 gpu conv fp16 12 add fp16 1
+165 gpu batchnorm fp16 12
+166 gpu relu fp16 12
+167 gpu conv fp16 12 add fp16 1
+168 gpu batchnorm fp16 12
+169 gpu add fp16 12
+170 gpu relu fp16 12
+171 gpu pool_max fp16 12
+172 gpu mul fp16 12 add fp16 1
+173 gpu softmax fp32 1
+-----
++++++
+conf7 1.8577902325643394 1.478552049679054 72.82 2.8800000000000097
+1 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+2 gpu batchnorm fp16 12
+3 gpu conv fp16 12 add fp16 1
+4 gpu batchnorm fp16 12
+5 gpu relu fp16 12
+6 gpu conv fp16 12 add fp16 1
+7 gpu batchnorm fp16 12
+8 gpu relu fp16 12
+9 gpu conv fp16 12 add fp16 1
+10 gpu batchnorm fp16 12
+11 gpu conv fp16 12 add fp16 1
+12 gpu batchnorm fp16 12
+13 gpu add fp16 12
+14 gpu relu fp16 12
+15 gpu conv fp16 12 add fp16 1
+16 gpu batchnorm fp16 12
+17 gpu relu fp16 12
+18 gpu conv fp16 12 add fp16 1
+19 gpu batchnorm fp16 12
+20 gpu relu fp16 12
+21 gpu conv fp16 12 add fp16 1
+22 gpu batchnorm fp16 12
+23 gpu add fp16 12
+24 gpu relu fp16 12
+25 gpu conv fp16 12 add fp16 1
+26 gpu batchnorm fp16 12
+27 gpu relu fp16 12
+28 gpu conv perf_fp16 160 add fp16 1
+29 gpu batchnorm fp16 12
+30 gpu relu fp16 12
+31 gpu conv fp16 11 add fp16 1
+32 gpu batchnorm fp16 12
+33 gpu add fp16 12
+34 gpu relu fp16 12
+35 gpu conv fp16 12 add fp16 1
+36 gpu batchnorm fp16 12
+37 gpu relu fp16 12
+38 gpu conv fp16 12 add fp16 1
+39 gpu batchnorm fp16 12
+40 gpu relu fp16 12
+41 gpu conv fp16 12 add fp16 1
+42 gpu batchnorm fp16 12
+43 gpu conv fp16 12 add fp16 1
+44 gpu batchnorm fp16 12
+45 gpu add fp16 12
+46 gpu relu fp16 12
+47 gpu conv fp16 12 add fp16 1
+48 gpu batchnorm fp16 12
+49 gpu relu fp16 12
+50 gpu conv fp16 12 add fp16 1
+51 gpu batchnorm fp16 12
+52 gpu relu fp16 12
+53 gpu conv fp16 11 add fp16 1
+54 gpu batchnorm fp16 12
+55 gpu add fp16 12
+56 gpu relu fp16 12
+57 gpu conv samp_fp16 268 add fp16 1
+58 gpu batchnorm fp16 12
+59 gpu relu fp16 12
+60 gpu conv fp16 12 add fp16 1
+61 gpu batchnorm fp16 12
+62 gpu relu fp16 12
+63 gpu conv fp16 12 add fp16 1
+64 gpu batchnorm fp16 12
+65 gpu add fp16 12
+66 gpu relu fp16 12
+67 gpu conv fp16 12 add fp16 1
+68 gpu batchnorm fp16 12
+69 gpu relu fp16 12
+70 gpu conv fp16 12 add fp16 1
+71 gpu batchnorm fp16 12
+72 gpu relu fp16 12
+73 gpu conv fp16 12 add fp16 1
+74 gpu batchnorm fp16 12
+75 gpu add fp16 12
+76 gpu relu fp16 12
+77 gpu conv fp16 12 add fp16 1
+78 gpu batchnorm fp16 12
+79 gpu relu fp16 12
+80 gpu conv fp16 12 add fp16 1
+81 gpu batchnorm fp16 12
+82 gpu relu fp16 12
+83 gpu conv fp16 12 add fp16 1
+84 gpu batchnorm fp16 12
+85 gpu conv fp16 12 add fp16 1
+86 gpu batchnorm fp16 12
+87 gpu add fp16 12
+88 gpu relu fp16 12
+89 gpu conv fp16 12 add fp16 1
+90 gpu batchnorm fp16 12
+91 gpu relu fp16 12
+92 gpu conv fp16 12 add fp16 1
+93 gpu batchnorm fp16 12
+94 gpu relu fp16 12
+95 gpu conv fp16 11 add fp16 1
+96 gpu batchnorm fp16 12
+97 gpu add fp16 12
+98 gpu relu fp16 12
+99 gpu conv fp16 12 add fp16 1
+100 gpu batchnorm fp16 12
+101 gpu relu fp16 12
+102 gpu conv perf_fp16 164 add fp16 1
+103 gpu batchnorm fp16 12
+104 gpu relu fp16 12
+105 gpu conv fp16 12 add fp16 1
+106 gpu batchnorm fp16 12
+107 gpu add fp16 12
+108 gpu relu fp16 12
+109 gpu conv fp16 12 add fp16 1
+110 gpu batchnorm fp16 12
+111 gpu relu fp16 12
+112 gpu conv fp16 12 add fp16 1
+113 gpu batchnorm fp16 12
+114 gpu relu fp16 12
+115 gpu conv fp16 12 add fp16 1
+116 gpu batchnorm fp16 12
+117 gpu add fp16 12
+118 gpu relu fp16 12
+119 gpu conv samp_fp16 268 add fp16 1
+120 gpu batchnorm fp16 12
+121 gpu relu fp16 12
+122 gpu conv fp16 12 add fp16 1
+123 gpu batchnorm fp16 12
+124 gpu relu fp16 12
+125 gpu conv fp16 12 add fp16 1
+126 gpu batchnorm fp16 12
+127 gpu add fp16 12
+128 gpu relu fp16 12
+129 gpu conv fp16 12 add fp16 1
+130 gpu batchnorm fp16 12
+131 gpu relu fp16 12
+132 gpu conv fp16 12 add fp16 1
+133 gpu batchnorm fp16 12
+134 gpu relu fp16 12
+135 gpu conv perf_fp16 158 add fp16 1
+136 gpu batchnorm fp16 12
+137 gpu add fp16 12
+138 gpu relu fp16 12
+139 gpu conv fp16 12 add fp16 1
+140 gpu batchnorm fp16 12
+141 gpu relu fp16 12
+142 gpu conv fp16 12 add fp16 1
+143 gpu batchnorm fp16 12
+144 gpu relu fp16 12
+145 gpu conv fp16 12 add fp16 1
+146 gpu batchnorm fp16 12
+147 gpu conv fp16 12 add fp16 1
+148 gpu batchnorm fp16 12
+149 gpu add fp16 12
+150 gpu relu fp16 12
+151 gpu conv fp16 12 add fp16 1
+152 gpu batchnorm fp16 12
+153 gpu relu fp16 12
+154 gpu conv fp16 12 add fp16 1
+155 gpu batchnorm fp16 12
+156 gpu relu fp16 12
+157 gpu conv fp16 11 add fp16 1
+158 gpu batchnorm fp16 12
+159 gpu add fp16 12
+160 gpu relu fp16 12
+161 gpu conv fp16 12 add fp16 1
+162 gpu batchnorm fp16 12
+163 gpu relu fp16 12
+164 gpu conv fp16 12 add fp16 1
+165 gpu batchnorm fp16 12
+166 gpu relu fp16 12
+167 gpu conv fp16 12 add fp16 1
+168 gpu batchnorm fp16 12
+169 gpu add fp16 12
+170 gpu relu fp16 12
+171 gpu pool_max fp16 12
+172 gpu mul fp16 12 add fp16 1
+173 gpu softmax fp32 1
+-----
diff --git a/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/vgg16_cifar10/vgg16_cifar10.txt b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/vgg16_cifar10/vgg16_cifar10.txt
new file mode 100644
index 0000000000..f4e185f358
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/vgg16_cifar10/vgg16_cifar10.txt
@@ -0,0 +1,58 @@
+3776.508929999999
++++++
+conf1 1 1 89.96 0.0
+1 gpu conv fp32 11 add fp32 1 relu fp32 1
+2 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+3 gpu conv fp32 11 add fp32 1 relu fp32 1
+4 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+5 gpu conv fp32 11 add fp32 1 relu fp32 1
+6 gpu conv fp32 11 add fp32 1 relu fp32 1
+7 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+8 gpu conv fp32 11 add fp32 1 relu fp32 1
+9 gpu conv fp32 11 add fp32 1 relu fp32 1
+10 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+11 gpu conv fp32 11 add fp32 1 relu fp32 1
+12 gpu conv fp32 11 add fp32 1 relu fp32 1
+13 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+14 gpu mul fp32 11 add fp32 1 relu fp32 1
+15 gpu mul fp32 11 add fp32 1
+16 gpu softmax fp32 1
+-----
++++++
+conf2 2.4192803184847484 2.2393153800931898 89.22 0.7399999999999949
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 266 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 152 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+12 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf3 2.1240075032467187 1.9749367321301132 88.64 1.3199999999999932
+1 gpu conv fp16 11 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 167 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 152 add fp16 1 relu fp16 1
+9 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+10 gpu conv fp16 11 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+12 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 269 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
diff --git a/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/vgg16_cifar100/vgg16_cifar100.txt b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/vgg16_cifar100/vgg16_cifar100.txt
new file mode 100644
index 0000000000..b55bb668b1
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/vgg16_cifar100/vgg16_cifar100.txt
@@ -0,0 +1,77 @@
+3768.819777999999
++++++
+conf1 1 1 66.5 0.0
+1 gpu conv fp32 11 add fp32 1 relu fp32 1
+2 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+3 gpu conv fp32 11 add fp32 1 relu fp32 1
+4 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+5 gpu conv fp32 11 add fp32 1 relu fp32 1
+6 gpu conv fp32 11 add fp32 1 relu fp32 1
+7 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+8 gpu conv fp32 11 add fp32 1 relu fp32 1
+9 gpu conv fp32 11 add fp32 1 relu fp32 1
+10 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+11 gpu conv fp32 11 add fp32 1 relu fp32 1
+12 gpu conv fp32 11 add fp32 1 relu fp32 1
+13 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+14 gpu mul fp32 11 add fp32 1 relu fp32 1
+15 gpu mul fp32 11 add fp32 1
+16 gpu softmax fp32 1
+-----
++++++
+conf2 2.2793321208062913 2.0502797911533945 66.42 0.0799999999999983
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 269 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 268 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv fp16 12 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 267 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 268 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+12 gpu conv fp16 11 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf3 2.2793321208062913 2.0502797911533945 66.42 0.0799999999999983
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 269 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 268 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv fp16 12 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 267 add fp16 1 relu fp16 1
+10 gpu conv samp_fp16 268 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+12 gpu conv fp16 11 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
++++++
+conf4 2.664296720624579 2.427276363573644 64.7 1.7999999999999972
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv perf_fp16 153 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+4 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+7 gpu conv samp_fp16 261 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv perf_fp16 155 add fp16 1 relu fp16 1
+9 gpu conv samp_fp16 262 add fp16 1 relu fp16 1
+10 gpu conv perf_fp16 151 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv perf_fp16 151 add fp16 1 relu fp16 1
+12 gpu conv samp_fp16 261 add fp16 1 relu fp16 1
+13 gpu conv samp_fp16 262 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1
+16 gpu softmax fp32 1
+-----
diff --git a/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/vgg16_imagenet/vgg16_imagenet.txt b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/vgg16_imagenet/vgg16_imagenet.txt
new file mode 100644
index 0000000000..d0a23ffb10
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/PPoPP_results/soc_sim_results/emp_time/vgg16_imagenet/vgg16_imagenet.txt
@@ -0,0 +1,41 @@
+19194.623482
++++++
+conf1 1 1 72.84 0.0
+1 gpu conv fp32 11 add fp32 1 relu fp32 1
+2 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+3 gpu conv fp32 11 add fp32 1 relu fp32 1
+4 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+5 gpu conv fp32 11 add fp32 1 relu fp32 1
+6 gpu conv fp32 11 add fp32 1 relu fp32 1
+7 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+8 gpu conv fp32 11 add fp32 1 relu fp32 1
+9 gpu conv fp32 11 add fp32 1 relu fp32 1
+10 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+11 gpu conv fp32 11 add fp32 1 relu fp32 1
+12 gpu conv fp32 11 add fp32 1 relu fp32 1
+13 gpu conv fp32 11 add fp32 1 relu fp32 1 pool_max fp32 1
+14 gpu mul fp32 11 add fp32 1 relu fp32 1
+15 gpu mul fp32 11 add fp32 1 relu fp32 1
+16 gpu mul fp32 11 add fp32 1
+17 gpu softmax fp32 1
+-----
++++++
+conf2 1.7719381411481732 1.5850925672384186 72.84 0.0
+1 gpu conv fp16 12 add fp16 1 relu fp16 1
+2 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+3 gpu conv fp16 12 add fp16 1 relu fp16 1
+4 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+5 gpu conv fp16 12 add fp16 1 relu fp16 1
+6 gpu conv fp16 12 add fp16 1 relu fp16 1
+7 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+8 gpu conv fp16 12 add fp16 1 relu fp16 1
+9 gpu conv fp16 12 add fp16 1 relu fp16 1
+10 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+11 gpu conv fp16 12 add fp16 1 relu fp16 1
+12 gpu conv fp16 12 add fp16 1 relu fp16 1
+13 gpu conv fp16 12 add fp16 1 relu fp16 1 pool_max fp16 1
+14 gpu mul fp16 12 add fp16 1 relu fp16 1
+15 gpu mul fp16 12 add fp16 1 relu fp16 1
+16 gpu mul fp16 12 add fp16 1
+17 gpu softmax fp32 1
+-----
diff --git a/llvm/projects/hpvm-tensor-rt/lib/tensor_runtime.ll b/llvm/projects/hpvm-tensor-rt/lib/tensor_runtime.ll
index 0a2273cfdd..42325406bd 100644
--- a/llvm/projects/hpvm-tensor-rt/lib/tensor_runtime.ll
+++ b/llvm/projects/hpvm-tensor-rt/lib/tensor_runtime.ll
@@ -65,8 +65,8 @@ entry:
   %tensorAddErrorPtr = alloca i8*, align 8
   %ConvLayer = alloca i8*, align 8
   %FCLayer = alloca i8*, align 8
+  %ConvLayer_ = alloca i8*, align 8
   %ConvLayer2 = alloca i8*, align 8
-  %ConvLayer_wrapper = alloca i8*, align 8
   %FCLayer2 = alloca i8*, align 8
   %AddWrapper = alloca i8*, align 8
   %ReluWrapper = alloca i8*, align 8
@@ -82,6 +82,7 @@ entry:
   %tensorMap3 = alloca i8*, align 8
   %tensorStencil = alloca i8*, align 8
   %tensorCosineT = alloca i8*, align 8
+  %tensorNodeID = alloca i8*, align 8
   store i8* bitcast (void (i32)* @llvm_hpvm_initTensorRt to i8*), i8** %initRT, align 8
   store i8* bitcast (void ()* @llvm_hpvm_cleanupTensorRt to i8*), i8** %cleanRT, align 8
   store i8* bitcast (void (i32)* @llvm_hpvm_initApproxhpvmRt to i8*), i8** %initApproxRT, align 8
@@ -119,8 +120,8 @@ entry:
   store i8* bitcast (i8* (i8*, i32)* @tensorAddError to i8*), i8** %tensorAddErrorPtr, align 8
   store i8* bitcast (i8* (i8*, float, float, i8*, float, float, i8*, float, float, i32, i32, i32, i32, i32, i32, i32, float, float, i32)* @ConvLayer_PROMISE to i8*), i8** %ConvLayer, align 8
   store i8* bitcast (i8* (i8*, float, float, i8*, float, float, i8*, float, float, i32, float, float, i32)* @FCLayer_PROMISE to i8*), i8** %FCLayer, align 8
-  store i8* bitcast (i8* (i8*, i8*, i8*, i8*, i32, i32, i32, i32, i32, i32, i32, float, float)* @wrapper_ConvLayer to i8*), i8** %ConvLayer2, align 8
-  store i8* bitcast (i8* (i8*, i8*, i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float)* @wrapper_ConvLayer2 to i8*), i8** %ConvLayer_wrapper, align 8
+  store i8* bitcast (i8* (i8*, i8*, i8*, i8*, i32, i32, i32, i32, i32, i32, i32, float, float)* @wrapper_ConvLayer to i8*), i8** %ConvLayer_, align 8
+  store i8* bitcast (i8* (i8*, i8*, i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float)* @wrapper_ConvLayer2 to i8*), i8** %ConvLayer2, align 8
   store i8* bitcast (i8* (i8*, i8*, i8*, i8*, i32, float, float)* @wrapper_FCLayer to i8*), i8** %FCLayer2, align 8
   store i8* bitcast (i8* (i8*, i8*, i8*)* @wrapper_tensorAdd to i8*), i8** %AddWrapper, align 8
   store i8* bitcast (i8* (i8*, i8*)* @wrapper_tensorRelu to i8*), i8** %ReluWrapper, align 8
@@ -136,6 +137,7 @@ entry:
   store i8* bitcast (i8* (i8*, i32, i8*, i8*, i8*)* @wrapper_tensorMap3 to i8*), i8** %tensorMap3, align 8
   store i8* bitcast (i8* (i8*, i8*)* @wrapper_tensorStencil to i8*), i8** %tensorStencil, align 8
   store i8* bitcast (i8* (i8*, i8*)* @wrapper_tensorCosineT to i8*), i8** %tensorCosineT, align 8
+  store i8* bitcast (i8* (i32)* @tensor_set_node_id to i8*), i8** %tensorNodeID, align 8
   ret void
 }
 
@@ -247,6 +249,8 @@ declare i8* @wrapper_tensorStencil(i8*, i8*) #1
 
 declare i8* @wrapper_tensorCosineT(i8*, i8*) #1
 
+declare i8* @tensor_set_node_id(i32) #1
+
 ; Function Attrs: noinline uwtable
 define internal void @_GLOBAL__sub_I_tensor_signatures.cc() #0 section ".text.startup" {
 entry:
diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/configuration.h b/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/configuration.h
index d27f463e78..ca2bcd9666 100644
--- a/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/configuration.h
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/configuration.h
@@ -167,8 +167,7 @@ public:
 // - energy
 // - accuracy (compared to golden output)
 // - accuracy loss (compared to baseline)
-// - a hardware choice and set or operations-approximation choices, described in
-// setup
+// - a hardware choice and set or operations-approximation choices, described in setup
 struct Configuration {
   std::string name;
   float speedup;
@@ -176,6 +175,8 @@ struct Configuration {
   float accuracy;
   float accuracyLoss;
   std::map<std::string, NodeConfiguration *> setup;
+  // map for mapping visc.node.id IDs to HPVM (fused) node approx-configurations 
+  std::map<int, NodeConfiguration *> idConfigMap;
 
   Configuration(std::string &n, float f, float e, float a, float al);
 
diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/global_data.h b/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/global_data.h
index c91c5b9cc3..f859b83e94 100644
--- a/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/global_data.h
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/global_data.h
@@ -58,4 +58,6 @@ extern std::string profile_data;
 extern PerfParamSet *perfParamSet;
 extern SampParamSet *sampParamSet;
 
+extern unsigned int currentTensorID;
+
 #endif
diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/tensor_runtime.h b/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/tensor_runtime.h
index abd89cc1ad..b6d7f862fa 100644
--- a/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/tensor_runtime.h
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/tensor_runtime.h
@@ -168,6 +168,22 @@ void *wrapper_ConvLayer(const char *hpvm_node_id, void *input, void *filter,
                         int activation_id, // Relu, Tanh, ClipRelu
                         float out_min, float out_max);
 
+
+void* wrapper_ConvLayer2(const char* hpvm_node_id,
+			  void* input, 
+			  void* filter, 
+			  void* bias, 
+			  int conv_pad_h, int conv_pad_w,
+			  int conv_stride_h, int conv_stride_w,
+			  int pool_id,
+			  int pool_size_v, int pool_size_h,			 
+			  int pool_pad_v, int pool_pad_h,
+			  int pool_stride_v, int pool_stride_h,
+			  int activation_id,
+			  // NOTE: out_min, out_max are only relevant for ClippedRelu
+			  float out_min, float out_max);
+  
+  
 void *wrapper_FCLayer(const char *hpvm_node_id, void *input, void *weights,
                       void *bias, int activation_id, float out_min,
                       float out_max);
@@ -197,6 +213,11 @@ void *wrapper_tensorPooling(const char *hpvm_node_id, void *input_ptr,
 
 void *wrapper_tensorSoftmax(const char *hpvm_node_id, void *input_ptr);
 
+
+void *tensor_set_node_id(unsigned int node_id);
+  
+  
+  
 // Utilities
 // TODO: separate utils in separate header
 void dumpAccuracyNorms();
diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/tensor_signatures.cc b/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/tensor_signatures.cc
index 19c385e27a..9c4cf97908 100644
--- a/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/tensor_signatures.cc
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/tensor_signatures.cc
@@ -45,7 +45,9 @@ void dummyFunction() {
   void *ConvLayer = (void *)&ConvLayer_PROMISE;
   void *FCLayer = (void *)&FCLayer_PROMISE;
 
-  void *ConvLayer2 = (void *)&wrapper_ConvLayer;
+  void *ConvLayer_ = (void *)&wrapper_ConvLayer;
+  void *ConvLayer2 = (void *)&wrapper_ConvLayer2;
+
   void *FCLayer2 = (void *)&wrapper_FCLayer;
   void *AddWrapper = (void *)&wrapper_tensorAdd;
   void *ReluWrapper = (void *)&wrapper_tensorRelu;
@@ -62,4 +64,6 @@ void dummyFunction() {
   void *tensorMap3 = (void *)&wrapper_tensorMap3;
   void *tensorStencil = (void *)&wrapper_tensorStencil;
   void *tensorCosineT = (void *)&wrapper_tensorCosineT;
+
+  void *tensorNodeID = (void*) &tensor_set_node_id;
 }
diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/global_data.cc b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/global_data.cc
index 61e37ed9a3..4902043b7c 100644
--- a/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/global_data.cc
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/global_data.cc
@@ -46,3 +46,6 @@ std::string profile_data = "";
 
 PerfParamSet *perfParamSet;
 SampParamSet *sampParamSet;
+
+unsigned int currentTensorID = -1;
+
diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/hpvm-rt-controller.cpp b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/hpvm-rt-controller.cpp
index 2dcbf9dcef..339c0ebd2f 100644
--- a/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/hpvm-rt-controller.cpp
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/hpvm-rt-controller.cpp
@@ -416,9 +416,18 @@ std::vector<float> &RuntimeController::getQuantizationRanges(const char *data) {
 }
 
 NodeConfiguration *RuntimeController::getNodeConfiguration(const char *data) {
-  std::string s(data);
-  // All nodes are expected to have a configuration
-  return (*Configurations)[configurationIdx]->setup.at(s);
+
+  // if visc.node.id Not specified for this HPVM Node
+  if (currentTensorID == -1){
+    std::string s(data);
+    // All nodes are expected to have a configuration
+    return (*Configurations)[configurationIdx]->setup.at(s);
+  }
+  else{
+    DEBUG("-- currentTensorID = \%u \n", currentTensorID); 
+    return (*Configurations)[configurationIdx]->idConfigMap.at(currentTensorID);
+  }
+  
 }
 
 void RuntimeController::init(const char *Cstr, const char *Qstr) {
@@ -428,6 +437,8 @@ void RuntimeController::init(const char *Cstr, const char *Qstr) {
 
   readQuantizationFile(Qstr);
   readConfigurationFile(Cstr);
+
+  // NOTE: Configurations is pareto-configs. InitialConfigurations is the full list (config file)
   Configurations = NULL;
   computeParetoConfigurationPoints();
   //    compute3DParetoConfigurationPoints(); Not using 3D curve
@@ -726,7 +737,9 @@ void RuntimeController::readConfigurationFile(const char *str) {
   catch(...){
     ERROR("Please Add/Fix Baseline Time at Top of Config File.. ");
   }
+
   
+  unsigned int firstTensorID = 1;  
   for (std::string line; std::getline(qin, line);) {
     DEBUG("line: %s\n", line.c_str());
 
@@ -758,6 +771,8 @@ void RuntimeController::readConfigurationFile(const char *str) {
     if (readingFirstLine) {
       // Read first line, to create the new configuration struct
       readingFirstLine = false;
+      firstTensorID = 1; // reset first tensor ID for new config
+      
       InitialConfigurations.push_back(Configuration(
           tokens[0], std::stof(tokens[1]), std::stof(tokens[2]),
           std::stof(tokens[3]), std::stof(tokens[4])));
@@ -800,6 +815,12 @@ void RuntimeController::readConfigurationFile(const char *str) {
       InitialConfigurations.back().setup.insert(
           std::make_pair(tokens[0], NodeConf));
 
+      // Updating map of visc.node.id ID values to NodeConfigurations
+      // FIXME: Do same for CPU and PROMISE configs
+      InitialConfigurations.back().idConfigMap.insert(
+          std::make_pair(firstTensorID, NodeConf));
+      DEBUG("*** firstTensorID = %d \n\n", firstTensorID);
+      
       unsigned idx = 2;
       while (idx < tokens.size()) {
         if (tokens[idx] == "add") {
@@ -946,6 +967,9 @@ void RuntimeController::readConfigurationFile(const char *str) {
         // TODO: other approximation options handled here
       }
 
+      // Update first TensorID using number of tensor ops in current node
+      firstTensorID += NodeConf->getApproxChoices().size();
+      
     } else if (tokens[1] == "cpu") {
       DEBUG("Found gpu configuration\n");
 
@@ -1313,6 +1337,7 @@ void RuntimeController::findTargetConfiguration(
   std::vector<struct Configuration *>::iterator low_it;
   switch (sk) {
   case SPEEDUP: {
+    // Assigning one of Pareto configs to 'Configurations' class attribute
     Configurations = &SpeedupConfigurations;
     low_it = std::lower_bound(
         Configurations->begin(), Configurations->end() - 1, goal,
diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/legacy/approx_techniques_back.cu b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/legacy/approx_techniques_back.cu
new file mode 100644
index 0000000000..25432c4e32
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/legacy/approx_techniques_back.cu
@@ -0,0 +1,862 @@
+
+
+#include "tensor_utils.h"
+#include "fp16_gemm.h"
+#include "debug.h"
+#include "global_data.h"
+#include "profiling.h"
+
+
+extern "C"{
+
+
+
+__global__
+void depthwise_conv(float* const __restrict__ y,
+const float* const __restrict__ x,
+const float* const __restrict__ w,
+const int B, const int M,
+const int H, const int W, const int KH,
+const int KW, const int H_out, const int W_out,
+const int H_pad, const int W_pad,
+const int H_stride, const int W_stride, const int start_batch)
+{
+
+#define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0]
+#define x4d(i3, i2, i1, i0) x[(i3) * (M * H * W) + (i2) * (H * W) + (i1) * (W) + i0]
+
+const int num = 1;
+
+const int b = num * blockIdx.x + start_batch;
+const int m = blockIdx.y; //current filter/channel
+
+const int tx = threadIdx.x;
+
+const int start_h = (threadIdx.x / W_out) * H_stride - H_pad;
+const int start_w = (threadIdx.x % W_out) * W_stride - W_pad;
+
+float C[num] = { 0 };
+
+const float* weights = &w[m * KH * KW];
+
+for (int k = 0; k < KH * KW; k++) {
+int p = k / KW;
+int q = k % KW;
+
+#pragma unroll
+for (int i = 0; i < num; i++) {
+if (start_h + p > -1 && start_h + p < H &&
+start_w + q > -1 && start_w + q < W) {
+
+C[i] += x4d(b + i, m, start_h + p, start_w + q) * weights[k];
+}
+
+}
+}
+
+#pragma unroll
+for (int i = 0; i < num; i++) {
+if(b + i < B)
+y4d(b + i, m, 0, tx) = C[i];
+
+}
+
+
+#undef y4d 
+#undef x4d
+}
+
+
+__global__
+void depthwise_convNew(float* const __restrict__ y,
+const float* const __restrict__ x,
+const float* const __restrict__ w,
+const int B, const int M,
+const int H, const int W, const int KH,
+const int KW, const int H_out, const int W_out,
+const int H_pad, const int W_pad,
+const int H_stride, const int W_stride)
+{
+
+#define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0]
+#define x4d(i3, i2, i1, i0) x[(i3) * (M * H * W) + (i2) * (H * W) + (i1) * (W) + i0]
+
+const int num = 12;
+
+const int b = num * blockIdx.x;
+const int m = (blockIdx.y * blockDim.x  + threadIdx.x)/ (H_out * W_out); 
+
+const int tx = (blockIdx.y * blockDim.x  + threadIdx.x) % (H_out * W_out);
+
+const int start_h = (tx / W_out) * H_stride - H_pad;
+const int start_w = (tx % W_out) * W_stride - W_pad;
+
+float C[num] = { 0 };
+
+const float* weights = &w[m * KH * KW];
+
+for (int k = 0; k < KH * KW; k++) {
+int p = k / KW;
+int q = k % KW;
+
+if (start_h + p > -1 && start_h + p < H &&
+start_w + q > -1 && start_w + q < W) {
+
+#pragma unroll
+for (int i = 0; i < num; i++) {
+if(b + i < B)
+C[i] += x4d(b + i, m, start_h + p, start_w + q) * weights[k];
+}
+
+}
+}
+
+#pragma unroll
+for (int i = 0; i < num; i++) {
+if(b + i < B)
+y4d(b + i, m, 0, tx) = C[i];
+
+}
+
+#undef y4d 
+#undef x4d
+}
+
+
+
+
+__global__ void depthwise_convNew8_half(__half* const __restrict__ y,
+					const __half* const __restrict__ x,
+					const __half* const __restrict__ w,
+					const int B, const int M,
+					const int H, const int W, const int KH,
+					const int KW, const int H_out, const int W_out,
+					const int H_pad, const int W_pad,
+					const int H_stride, const int W_stride)
+{
+
+  #define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0]
+  #define x4d(i3, i2, i1, i0) x[(i3) * (M * H * W) + (i2) * (H * W) + (i1) * (W) + i0]
+
+  const int num = 8;
+
+  const int b = num * blockIdx.x;
+  const int m = (blockIdx.y * blockDim.x  + threadIdx.x)/ (H_out * W_out);
+	
+  if(m < M){
+    const int tx = (blockIdx.y * blockDim.x  + threadIdx.x) % (H_out * W_out);
+
+    const int start_h = (tx / W_out) * H_stride - H_pad;
+    const int start_w = (tx % W_out) * W_stride - W_pad;
+
+    __half c0 = 0;
+    __half c1 = 0;
+    __half c2 = 0;
+    __half c3 = 0;
+    __half c4 = 0;
+    __half c5 = 0;
+    __half c6 = 0;
+    __half c7 = 0;
+	
+    const __half* weights = &w[m * KH * KW];
+
+    for (int k = 0; k < KH * KW; k++) {
+      int p = k / KW;
+      int q = k % KW;
+
+      if (start_h + p > -1 && start_h + p < H &&
+	  start_w + q > -1 && start_w + q < W) {
+
+	c0 = __hfma(x4d(b, m, start_h + p, start_w + q), weights[k], c0);
+	if(b + 1 < B)
+	  c1 = __hfma(x4d(b + 1, m, start_h + p, start_w + q), weights[k], c1);
+	if(b + 2 < B)
+	  c2 = __hfma(x4d(b + 2, m, start_h + p, start_w + q), weights[k], c2);
+	if(b + 3 < B)
+	  c3 = __hfma(x4d(b + 3, m, start_h + p, start_w + q), weights[k], c3);
+	if(b + 4 < B)
+	  c4 = __hfma(x4d(b + 4, m, start_h + p, start_w + q), weights[k], c4);
+	if(b + 5 < B)
+	  c5 = __hfma(x4d(b + 5, m, start_h + p, start_w + q), weights[k], c5);
+	if(b + 6 < B)
+	  c6 = __hfma(x4d(b + 6, m, start_h + p, start_w + q), weights[k], c6);
+	if(b + 7 < B)
+	  c7 = __hfma(x4d(b + 7, m, start_h + p, start_w + q), weights[k], c7);
+    
+
+      }
+    }
+
+    y4d(b, m, 0, tx) = c0;	
+    if(b + 1 < B)
+      y4d(b + 1, m, 0, tx) = c1;
+    if(b + 2 < B)
+      y4d(b + 2, m, 0, tx) = c2;
+    if(b + 3 < B)
+      y4d(b + 3, m, 0, tx) = c3;
+    if(b + 4 < B)
+      y4d(b + 4, m, 0, tx) = c4;
+    if(b + 5 < B)
+      y4d(b + 5, m, 0, tx) = c5;
+    if(b + 6 < B)
+      y4d(b + 6, m, 0, tx) = c6;
+    if(b + 7 < B)
+      y4d(b + 7, m, 0, tx) = c7;
+  }
+	
+  #undef y4d 
+  #undef x4d
+}
+
+__global__ void depthwise_convNew8_half1(__half* const __restrict__ y,
+					const __half* const __restrict__ x,
+					const __half* const __restrict__ w,
+					const int B, const int M,
+					const int H, const int W, const int KH,
+					const int KW, const int H_out, const int W_out,
+					const int H_pad, const int W_pad,
+					const int H_stride, const int W_stride)
+{
+
+  #define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0]
+  #define x4d(i3, i2, i1, i0) x[(i3) * (M * H * W) + (i2) * (H * W) + (i1) * (W) + i0]
+
+  const int num = 8;
+
+  const int b = num * blockIdx.x;
+  const int m = (blockIdx.y * blockDim.x  + threadIdx.x)/ (H_out * W_out);
+	
+  if(m < M){
+    const int tx = (blockIdx.y * blockDim.x  + threadIdx.x) % (H_out * W_out);
+
+    const int start_h = (tx / W_out) * H_stride - H_pad;
+    const int start_w = (tx % W_out) * W_stride - W_pad;
+
+    __half c0 = 0;
+    __half c1 = 0;
+    __half c2 = 0;
+    __half c3 = 0;
+    __half c4 = 0;
+    __half c5 = 0;
+    __half c6 = 0;
+    __half c7 = 0;
+	
+    const __half* weights = &w[m * KH * KW];
+
+    for (int k = 0; k < KH * KW; k++) {
+      int p = k / KW;
+      int q = k % KW;
+
+      if (start_h + p > -1 && start_h + p < H &&
+	  start_w + q > -1 && start_w + q < W) {
+
+	c0 = __hfma(x4d(b, m, start_h + p, start_w + q), weights[k], c0);
+      }
+    }
+
+    if(b + 1 < B){
+      for (int k = 0; k < KH * KW; k++) {
+	int p = k / KW;
+	int q = k % KW;
+
+	if (start_h + p > -1 && start_h + p < H &&
+	    start_w + q > -1 && start_w + q < W) {
+
+	  c1 = __hfma(x4d(b + 1, m, start_h + p, start_w + q), weights[k], c1);
+	}
+      }
+    }
+
+    if(b + 2 < B){
+      for (int k = 0; k < KH * KW; k++) {
+	int p = k / KW;
+	int q = k % KW;
+
+	if (start_h + p > -1 && start_h + p < H &&
+	    start_w + q > -1 && start_w + q < W) {
+
+	  c2 = __hfma(x4d(b + 2, m, start_h + p, start_w + q), weights[k], c2);
+	}
+      }
+    }
+
+    if(b + 3 < B){
+      for (int k = 0; k < KH * KW; k++) {
+	int p = k / KW;
+	int q = k % KW;
+
+	if (start_h + p > -1 && start_h + p < H &&
+	    start_w + q > -1 && start_w + q < W) {
+
+	  c3 = __hfma(x4d(b + 3, m, start_h + p, start_w + q), weights[k], c3);
+	}
+      }
+    }
+
+    if(b + 4 < B){
+      for (int k = 0; k < KH * KW; k++) {
+	int p = k / KW;
+	int q = k % KW;
+
+	if (start_h + p > -1 && start_h + p < H &&
+	    start_w + q > -1 && start_w + q < W) {
+
+	  c4 = __hfma(x4d(b + 4, m, start_h + p, start_w + q), weights[k], c4);
+	}
+      }
+    }
+
+    if(b + 5 < B){
+      for (int k = 0; k < KH * KW; k++) {
+	int p = k / KW;
+	int q = k % KW;
+
+	if (start_h + p > -1 && start_h + p < H &&
+	    start_w + q > -1 && start_w + q < W) {
+
+	  c5 = __hfma(x4d(b + 5, m, start_h + p, start_w + q), weights[k], c5);
+	}
+      }
+    }
+
+    if(b + 6 < B){
+      for (int k = 0; k < KH * KW; k++) {
+	int p = k / KW;
+	int q = k % KW;
+
+	if (start_h + p > -1 && start_h + p < H &&
+	    start_w + q > -1 && start_w + q < W) {
+
+	  c6 = __hfma(x4d(b + 6, m, start_h + p, start_w + q), weights[k], c6);
+	}
+      }
+    }
+
+    if(b + 7 < B){
+      for (int k = 0; k < KH * KW; k++) {
+	int p = k / KW;
+	int q = k % KW;
+
+	if (start_h + p > -1 && start_h + p < H &&
+	    start_w + q > -1 && start_w + q < W) {
+
+	  c7 = __hfma(x4d(b + 7, m, start_h + p, start_w + q), weights[k], c7);
+	}
+      }
+    }
+
+    
+
+    y4d(b, m, 0, tx) = c0;	
+    if(b + 1 < B)
+      y4d(b + 1, m, 0, tx) = c1;
+    if(b + 2 < B)
+      y4d(b + 2, m, 0, tx) = c2;
+    if(b + 3 < B)
+      y4d(b + 3, m, 0, tx) = c3;
+    if(b + 4 < B)
+      y4d(b + 4, m, 0, tx) = c4;
+    if(b + 5 < B)
+      y4d(b + 5, m, 0, tx) = c5;
+    if(b + 6 < B)
+      y4d(b + 6, m, 0, tx) = c6;
+    if(b + 7 < B)
+      y4d(b + 7, m, 0, tx) = c7;
+  }
+	
+  #undef y4d 
+  #undef x4d
+}
+
+
+
+
+
+
+
+
+__global__ void depthwise_convNew12(float* const __restrict__ y,
+				    const float* const __restrict__ x,
+				    const float* const __restrict__ w,
+				    const int B, const int M,
+				    const int H, const int W, const int KH,
+				    const int KW, const int H_out, const int W_out,
+				    const int H_pad, const int W_pad,
+				    const int H_stride, const int W_stride)
+{
+
+  #define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0]
+  #define x4d(i3, i2, i1, i0) x[(i3) * (M * H * W) + (i2) * (H * W) + (i1) * (W) + i0]
+
+  const int num = 12;
+
+  const int b = num * blockIdx.x;
+  const int m = (blockIdx.y * blockDim.x  + threadIdx.x)/ (H_out * W_out);
+	
+  if(m < M){
+    const int tx = (blockIdx.y * blockDim.x  + threadIdx.x) % (H_out * W_out);
+
+    const int start_h = (tx / W_out) * H_stride - H_pad;
+    const int start_w = (tx % W_out) * W_stride - W_pad;
+
+    float c0 = 0;
+    float c1 = 0;
+    float c2 = 0;
+    float c3 = 0;
+    float c4 = 0;
+    float c5 = 0;
+    float c6 = 0;
+    float c7 = 0;
+    float c8 = 0;
+    float c9 = 0;
+    float c10 = 0;
+    float c11 = 0;
+	
+    const float* weights = &w[m * KH * KW];
+
+    for (int k = 0; k < KH * KW; k++) {
+      int p = k / KW;
+      int q = k % KW;
+
+      if (start_h + p > -1 && start_h + p < H &&
+	  start_w + q > -1 && start_w + q < W) {
+
+	c0 += x4d(b, m, start_h + p, start_w + q) * weights[k];
+	if(b + 1 < B)
+	  c1 += x4d(b + 1, m, start_h + p, start_w + q) * weights[k];
+	if(b + 2 < B)
+	  c2 += x4d(b + 2, m, start_h + p, start_w + q) * weights[k];
+	if(b + 3 < B)
+	  c3 += x4d(b + 3, m, start_h + p, start_w + q) * weights[k];
+	if(b + 4 < B)
+	  c4 += x4d(b + 4, m, start_h + p, start_w + q) * weights[k];
+	if(b + 5 < B)
+	  c5 += x4d(b + 5, m, start_h + p, start_w + q) * weights[k];
+	if(b + 6 < B)
+	  c6 += x4d(b + 6, m, start_h + p, start_w + q) * weights[k];
+	if(b + 7 < B)
+	  c7 += x4d(b + 7, m, start_h + p, start_w + q) * weights[k];
+	if(b + 8 < B)
+	  c8 += x4d(b + 8, m, start_h + p, start_w + q) * weights[k];
+	if(b + 9 < B)
+	  c9 += x4d(b + 9, m, start_h + p, start_w + q) * weights[k];
+	if(b + 10 < B)
+	  c10 += x4d(b + 10, m, start_h + p, start_w + q) * weights[k];
+	if(b + 11 < B)
+	  c11 += x4d(b + 11, m, start_h + p, start_w + q) * weights[k];
+    
+
+      }
+    }
+
+    y4d(b, m, 0, tx) = c0;	
+    if(b + 1 < B)
+      y4d(b + 1, m, 0, tx) = c1;
+    if(b + 2 < B)
+      y4d(b + 2, m, 0, tx) = c2;
+    if(b + 3 < B)
+      y4d(b + 3, m, 0, tx) = c3;
+    if(b + 4 < B)
+      y4d(b + 4, m, 0, tx) = c4;
+    if(b + 5 < B)
+      y4d(b + 5, m, 0, tx) = c5;
+    if(b + 6 < B)
+      y4d(b + 6, m, 0, tx) = c6;
+    if(b + 7 < B)
+      y4d(b + 7, m, 0, tx) = c7;
+    if(b + 8 < B)
+      y4d(b + 8, m, 0, tx) = c8;
+    if(b + 9 < B)
+      y4d(b + 9, m, 0, tx) = c9;
+    if(b + 10 < B)
+      y4d(b + 10, m, 0, tx) = c10;
+    if(b + 11 < B)
+      y4d(b + 11, m, 0, tx) = c11;
+	
+  }
+	
+  #undef y4d 
+  #undef x4d
+}
+
+
+__global__ void depthwise_convNew12_half(__half* const __restrict__ y,
+				    const __half* const __restrict__ x,
+				    const __half* const __restrict__ w,
+				    const int B, const int M,
+				    const int H, const int W, const int KH,
+				    const int KW, const int H_out, const int W_out,
+				    const int H_pad, const int W_pad,
+				    const int H_stride, const int W_stride)
+{
+
+  #define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0]
+  #define x4d(i3, i2, i1, i0) x[(i3) * (M * H * W) + (i2) * (H * W) + (i1) * (W) + i0]
+
+  const int num = 12;
+
+  const int b = num * blockIdx.x;
+  const int m = (blockIdx.y * blockDim.x  + threadIdx.x)/ (H_out * W_out);
+	
+  if(m < M){
+    const int tx = (blockIdx.y * blockDim.x  + threadIdx.x) % (H_out * W_out);
+
+    const int start_h = (tx / W_out) * H_stride - H_pad;
+    const int start_w = (tx % W_out) * W_stride - W_pad;
+
+    __half c0 = 0;
+    __half c1 = 0;
+    __half c2 = 0;
+    __half c3 = 0;
+    __half c4 = 0;
+    __half c5 = 0;
+    __half c6 = 0;
+    __half c7 = 0;
+    __half c8 = 0;
+    __half c9 = 0;
+    __half c10 = 0;
+    __half c11 = 0;
+	
+    const __half* weights = &w[m * KH * KW];
+
+    for (int k = 0; k < KH * KW; k++) {
+      int p = k / KW;
+      int q = k % KW;
+
+      if (start_h + p > -1 && start_h + p < H &&
+	  start_w + q > -1 && start_w + q < W) {
+
+	c0 = __hfma(x4d(b, m, start_h + p, start_w + q), weights[k], c0);
+	if(b + 1 < B)
+	  c1 = __hfma(x4d(b + 1, m, start_h + p, start_w + q), weights[k], c1);
+	if(b + 2 < B)
+	  c2 = __hfma(x4d(b + 2, m, start_h + p, start_w + q), weights[k], c2);
+	if(b + 3 < B)
+	  c3 = __hfma(x4d(b + 3, m, start_h + p, start_w + q), weights[k], c3);
+	if(b + 4 < B)
+	  c4 = __hfma(x4d(b + 4, m, start_h + p, start_w + q), weights[k], c4);
+	if(b + 5 < B)
+	  c5 = __hfma(x4d(b + 5, m, start_h + p, start_w + q), weights[k], c5);
+	if(b + 6 < B)
+	  c6 = __hfma(x4d(b + 6, m, start_h + p, start_w + q), weights[k], c6);
+	if(b + 7 < B)
+	  c7 = __hfma(x4d(b + 7, m, start_h + p, start_w + q), weights[k], c7);
+	if(b + 8 < B)
+	  c8 = __hfma(x4d(b + 8, m, start_h + p, start_w + q), weights[k], c8);
+	if(b + 9 < B)
+	  c9 = __hfma(x4d(b + 9, m, start_h + p, start_w + q), weights[k], c9);
+	if(b + 10 < B)
+	  c10 = __hfma(x4d(b + 10, m, start_h + p, start_w + q), weights[k], c10);
+	if(b + 11 < B)
+	  c11 = __hfma(x4d(b + 11, m, start_h + p, start_w + q), weights[k], c11);
+    
+
+      }
+    }
+
+    y4d(b, m, 0, tx) = c0;	
+    if(b + 1 < B)
+      y4d(b + 1, m, 0, tx) = c1;
+    if(b + 2 < B)
+      y4d(b + 2, m, 0, tx) = c2;
+    if(b + 3 < B)
+      y4d(b + 3, m, 0, tx) = c3;
+    if(b + 4 < B)
+      y4d(b + 4, m, 0, tx) = c4;
+    if(b + 5 < B)
+      y4d(b + 5, m, 0, tx) = c5;
+    if(b + 6 < B)
+      y4d(b + 6, m, 0, tx) = c6;
+    if(b + 7 < B)
+      y4d(b + 7, m, 0, tx) = c7;
+    if(b + 8 < B)
+      y4d(b + 8, m, 0, tx) = c8;
+    if(b + 9 < B)
+      y4d(b + 9, m, 0, tx) = c9;
+    if(b + 10 < B)
+      y4d(b + 10, m, 0, tx) = c10;
+    if(b + 11 < B)
+      y4d(b + 11, m, 0, tx) = c11;
+	
+  }
+	
+  #undef y4d 
+  #undef x4d
+}
+
+
+
+
+
+__global__ void depthwise_convNew4_half2(__half* const __restrict__ y,
+					const __half* const __restrict__ x,
+					const __half* const __restrict__ w,
+					const int B, const int M,
+					const int H, const int W, const int KH,
+					const int KW, const int H_out, const int W_out,
+					const int H_pad, const int W_pad,
+					const int H_stride, const int W_stride)
+{
+
+  #define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0]
+  #define x4d(i3, i2, i1, i0) x[(i3) * (M * H * W) + (i2) * (H * W) + (i1) * (W) + i0]
+
+  const int num = 4;
+
+  const int b = num * blockIdx.x;
+  const int m = (blockIdx.y * blockDim.x  + threadIdx.x)/ (H_out * W_out);
+	
+  if(m < M){
+    const int tx = (blockIdx.y * blockDim.x  + threadIdx.x) % (H_out * W_out);
+
+    const int start_h = (tx / W_out) * H_stride - H_pad;
+    const int start_w = (tx % W_out) * W_stride - W_pad;
+
+    __half2 c0 = __half2half2(0);
+    __half2 c1 = __half2half2(0);
+ 
+    const __half* weights = &w[m * KH * KW];
+
+    for (int k = 0; k < KH * KW; k++) {
+      int p = k / KW;
+      int q = k % KW;
+      if (start_h + p > -1 && start_h + p < H &&
+	  start_w + q > -1 && start_w + q < W) {
+
+      
+	__half2 t1;
+	__half2 t2;
+	if(b + 3 < B){
+	    t1 = __halves2half2(x4d(b + 1, m, start_h + p, start_w + q), x4d(b, m, start_h + p, start_w + q));
+	    t2 = __halves2half2(x4d(b + 3, m, start_h + p, start_w + q), x4d(b + 2, m, start_h + p, start_w + q));
+	 }
+	else if(b + 2 < B){
+	  t1 = __halves2half2(x4d(b + 1, m, start_h + p, start_w + q), x4d(b, m, start_h + p, start_w + q));
+	  t2 = __halves2half2(0, x4d(b + 2, m, start_h + p, start_w + q));
+
+	}
+	else if(b + 1 < B){
+	  t1 = __halves2half2(x4d(b + 1, m, start_h + p, start_w + q), x4d(b, m, start_h + p, start_w + q));
+	}
+	else{
+	  t1 = __halves2half2(0, x4d(b, m, start_h + p, start_w + q));
+
+	 }
+
+	
+	c0 = __hfma2(t1, __halves2half2(weights[k], weights[k]), c0);
+	c1 = __hfma2(t2, __halves2half2(weights[k], weights[k]), c1);
+	
+      }
+    }
+
+    y4d(b, m, 0, tx) = __high2half(c0);	
+    if(b + 1 < B)
+      y4d(b + 1, m, 0, tx) = __low2half(c0);
+    if(b + 2 < B)
+      y4d(b + 2, m, 0, tx) = __high2half(c1);
+    if(b + 3 < B)
+      y4d(b + 3, m, 0, tx) = __low2half(c1);
+
+  }
+	
+  #undef y4d 
+  #undef x4d
+}
+
+
+
+
+
+
+// Perforated Tensor Conv with 'perforation_rate' parameter
+void* tensorConvPerf(void* input_ptr, void* filter_ptr,
+		     int vertical_pad, int horizontal_pad,
+		     int vertical_stride, int horizontal_stride,
+		     int conv_mode, int conv_groups, int row, int col){
+
+  INFO("*** TensorConvolution \n");
+  profileEvent("tensorConv");
+
+  Tensor* input = (Tensor*) input_ptr;
+  Tensor* filter = (Tensor*) filter_ptr;
+
+  cudnnConvolutionDescriptor_t convDesc;
+  cudnnConvolutionFwdAlgo_t convAlgo;
+  cudnnConvolutionMode_t mode;
+  if(conv_mode == 0)
+    mode = CUDNN_CONVOLUTION;
+  else if(conv_mode == 1)
+    mode = CUDNN_CROSS_CORRELATION;
+
+  // FIXIT: Need to be more aware of the implications of alpha and beta
+  float alpha = 1.0f, beta = 0.0f;
+
+  // TODO: Support other cases;
+  hostToDeviceCopy(input);
+  hostToDeviceCopy(filter);
+
+  INFO("vertical_stride = %lu, horizontal_stride = %lu \n", vertical_stride, horizontal_stride);
+
+  checkCUDNN(cudnnCreateConvolutionDescriptor(&convDesc));
+
+  //FIXME: Current hack to preserve backward compatibilty
+  if(conv_groups == 0){
+    conv_groups = 1;
+  }
+
+  // NOTE: Adding support for grouped convolution
+  checkCUDNN(cudnnSetConvolutionGroupCount(convDesc, conv_groups));
+
+  int new_v = vertical_stride + row;
+  int new_h = horizontal_stride + col;
+  cudnnDataType_t computeType = CUDNN_DATA_FLOAT;
+  // FIXIT: Think if upscaling values need to be configurable?
+  // IMP-FIXIT: Either make mode configurable OR see if CUDNN_CONVOLUTION MODE should be used?
+  checkCUDNN(cudnnSetConvolution2dDescriptor(convDesc,
+					     vertical_pad, horizontal_pad, // conv padding
+					     new_v, new_h, // conv strides
+					     1, 1, // upscaling values
+					     mode , // mode is configurable
+					     computeType)); // defines compute precision
+
+  int n, c, h, w; // output dimensions
+  // Find dimension of convolution output
+  checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convDesc,
+						   input->tensor_desc,
+						   filter->filter_desc,
+						   &n, &c, &h, &w));
+
+
+  DEBUG("**Output Tensor Dims, n = %d, c = %d, h = %d, w = %d \n", n, c, h, w);
+
+  Tensor* output;
+  if(input->data_format == CUDNN_TENSOR_NCHW)
+    output = (Tensor*) create4DTensor((cudnnDataType_t) input->data_type,
+				      CUDNN_TENSOR_NCHW, n, c, h, w);
+  else if(input->data_format == CUDNN_TENSOR_NHWC){
+    DEBUG("* NHWC Format \n");
+    output = (Tensor*) create4DTensor((cudnnDataType_t) input->data_type,
+				      CUDNN_TENSOR_NHWC, n, h, w, c);
+  }
+  else
+    ERROR("Unsupported Tensor Type");
+
+  // NOTE: Changing output tensor placement from host to device
+  changeTensorPlacement(output, DEVICE);
+  // NOTE: Necessary to insert the above call for every output tensor
+
+  DEBUG("tensor->data_type = %d, tensor->data_format = %d, N = %d, C = %d, H = %d, W = %d \n",
+	output->data_type, output->data_format, output->dims.dim_sizes[0], output->dims.dim_sizes[1],
+	output->dims.dim_sizes[2], output->dims.dim_sizes[3]);
+
+  if(convDesc == NULL || input->tensor_desc == NULL ||
+     filter->filter_desc == NULL || output->tensor_desc == NULL)
+    ERROR("NULL descriptor! \n");
+
+
+  // Debugging info prints
+  printTensorDescInfo(input);
+  printTensorDescInfo(filter);
+  printTensorDescInfo(output);
+
+  // NOTE-FIXIT: function failing for NHWC formats - perhaps some CUDNN support is lacking
+  checkCUDNN(cudnnGetConvolutionForwardAlgorithm(cudnnHandle,
+						 input->tensor_desc,
+						 filter->filter_desc,
+						 convDesc,
+						 output->tensor_desc,
+						 CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
+						 //CUDNN_CONVOLUTION_FWD_NO_WORKSPACE,
+						 0,
+						 &convAlgo));
+
+
+  DEBUG("ConvAlgo = %d, FFT = %d, GEMM = %d, WINOGRAD = %d \n", convAlgo,
+	CUDNN_CONVOLUTION_FWD_ALGO_FFT, CUDNN_CONVOLUTION_FWD_ALGO_GEMM,
+	CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD);
+
+
+  // FIXIT: Algo shouldn't be hardcoded
+  convAlgo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
+
+  size_t workspace_size;
+  checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle,
+						     input->tensor_desc,
+						     filter->filter_desc,
+						     convDesc,
+						     output->tensor_desc,
+						     convAlgo,
+						     &workspace_size));
+
+  // Allocating memory for the convolution workspace
+  void* workspace;
+  checkCudaErrors(cudaMalloc(&workspace, workspace_size));
+  DEBUG("workspace size = %d \n", workspace_size);
+
+
+  checkCUDNN(cudnnConvolutionForward(cudnnHandle, &alpha, input->tensor_desc,
+				     input->gpu_data, filter->filter_desc, filter->gpu_data,
+				     convDesc, convAlgo, workspace, workspace_size,
+				     &beta, output->tensor_desc, output->gpu_data));
+
+
+  int old_w = w;
+  int old_h = h;
+  h = (2 * vertical_pad + input->dims.dim_sizes[2] - filter->dims.dim_sizes[2]) / vertical_stride + 1;
+  w = (2 * horizontal_pad + input->dims.dim_sizes[3] - filter->dims.dim_sizes[3]) / horizontal_stride + 1;
+
+  Tensor* new_output;
+  if(input->data_format == CUDNN_TENSOR_NCHW)
+    new_output = (Tensor*) create4DTensor((cudnnDataType_t) float_type, //input->data_type,
+					  CUDNN_TENSOR_NCHW, n, c, h, w);
+  else if(input->data_format == CUDNN_TENSOR_NHWC){
+    DEBUG("* NHWC Format \n");
+    new_output = (Tensor*) create4DTensor((cudnnDataType_t) input->data_type,
+					  CUDNN_TENSOR_NHWC, n, h, w, c);
+  }
+  else
+    ERROR("Unsupported Tensor Type");
+
+
+  int numBlocks = (n * c * h * w  + 127) / 128;
+  if(vertical_stride == 0 && row == 0)
+    return output;
+
+  if(vertical_stride == 1 && row == 1){
+    interpolateRow<<<numBlocks,128>>>(n * c * h * w, old_h, n, c, h, w,
+				      (float *)output->gpu_data, (float *)new_output->gpu_data);
+  }
+  else if(horizontal_stride == 1 && col == 1){
+    interpolateCol<<<numBlocks,128>>>(n * c * h * w, old_w, n, c, h, w,
+				      (float *)output->gpu_data, (float *)new_output->gpu_data);
+  }
+  else if (col > 0){
+    interpolateXCol<<<numBlocks,128>>>(n * c * h * w, old_w, n, c, h, w,
+				       (float *)output->gpu_data, (float *)new_output->gpu_data, col + 1);
+  }
+  else{
+    interpolateXRow<<<numBlocks,128>>>(n * c * h * w, old_h, n, c, h, w,
+				       (float *)output->gpu_data, (float *)new_output->gpu_data, row + 1);
+  }
+
+
+  cudaDeviceSynchronize();
+
+  profileEvent("tensorConv_end", true);
+
+
+  changeTensorPlacement(new_output, DEVICE);
+  return new_output;
+
+}
+
+
+
+
+
+
+
+
+}
+
diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/legacy/tensor_runtime.cu b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/legacy/tensor_runtime.cu
new file mode 100644
index 0000000000..5c6f036938
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/legacy/tensor_runtime.cu
@@ -0,0 +1,2121 @@
+/* This file includes the API implementation of the HPVM tensor runtime built on cublas, cudnn
+**
+**  Author: Hashim Sharif
+**  Email: hsharif3@illinois.edu
+*/
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <cstdio>
+#include <cstdlib>
+#include <cmath>
+#include <ctime>
+#include <cfloat>
+#include <algorithm>
+#include <chrono>
+#include <iomanip>
+#include <iostream>
+#include <map>
+#include <memory>
+#include <random>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include <cuda_runtime.h>
+#include <device_launch_parameters.h>
+
+#include <cublas_v2.h>
+#include <cudnn.h>
+#include <cublas_api.h>
+#include <cuda_fp16.h>
+#include <driver_types.h>
+
+
+// Tensor runtime header files
+#include "tensor_runtime.h"
+#include "tensor_utils.h"
+#include "debug.h"
+#include "profiling.h"
+#include "fp16_conversion.h"
+#include "global_data.h"
+#include "error.h"
+#include "tensor.h"
+#include "op_overheads.h"
+#include "half_precision_api.h"
+#include "hpvm-rt-controller.h"
+#include "approxhpvm_runtime_utils.h" 
+#include "approx_api.h"
+
+
+// Image tensor runtime implementation
+#include "img_tensor_runtime.cu"
+
+//** Potential Improvements:
+//   1) Add support for dataypes beyond floats and half 
+//   2) Support for more CUDNN operations
+
+
+
+void llvm_hpvm_initTensorRt(int gpuid){
+
+  if(!runtime_initialized){
+    
+    printf("INITIALIZING GPU %d \n", gpuid);
+    // NOTE: Setting the target GPU. Can we use multiple GPUs?
+    checkCudaErrors(cudaSetDevice(gpuid));
+    // Initializing cuDNN and cuBlas handles
+    checkCudaErrors(cublasCreate(&cublasHandle));
+    checkCUDNN(cudnnCreate(&cudnnHandle));
+
+
+#ifdef PROMISE_TUNER_ENABLED
+    //    readOpenTunerFlags("opentuner_flags");
+    readOpenTunerFlags("promise_flags");
+#endif
+
+
+#ifdef ERROR_INJECTION_ENABLED
+    readOpenTunerFlags("opentuner_flags");
+#endif
+
+    
+    runtime_initialized = true;
+  }
+  
+}
+
+
+void llvm_hpvm_cleanupTensorRt(){
+  DEBUG("\**** llvm_hpvm_cleanupTensorRt ***\n");
+  dumpAccuracyNorms();
+}
+
+
+void llvm_hpvm_initApproxhpvmRt(int gpuid){
+  llvm_hpvm_initTensorRt(gpuid);
+  approxhpvm_runtime_mode = true;
+}
+
+void llvm_hpvm_cleanupApproxhpvmRt(){
+
+}
+
+
+
+void dumpAccuracyNorms(){
+
+  #ifdef ERROR_INJECTION_ENABLED
+  
+  
+  #endif
+
+  dump_result("accuracy_summary");
+
+}
+
+
+// Returns the number of GPUs active on the platform
+int getGPUCount(){
+  int num_gpus;
+  checkCudaErrors(cudaGetDeviceCount(&num_gpus));
+  return num_gpus;
+}
+
+
+
+void clearTensorMap(){
+
+  tensors_ptr.clear();
+  host_ptr.clear();
+  obj_ptr.clear();
+}
+
+
+void startMemTracking(){
+
+  tensors_ptr.clear();
+  host_ptr.clear();
+  obj_ptr.clear();
+
+  tracked_tensors.clear();
+}
+
+
+void freeOutputTensors(){
+
+  DEBUG("**** Freeing Ouput Tensors *** \n");
+  for(int i = 0; i < tensors_ptr.size(); i++){
+    cudaFree(tensors_ptr[i]);
+    tensors_ptr[i] = NULL;
+  }
+
+  for(int i = 0; i < host_ptr.size(); i++){
+    free(host_ptr[i]);
+    host_ptr[i] = NULL;
+  }
+  
+  for(int i = 0; i < obj_ptr.size(); i++){
+    free(obj_ptr[i]);
+    obj_ptr[i] = NULL;
+  }
+}
+
+
+
+void clearOpCounter(){
+  total_ops = 0;
+  op_counter = 0;
+  op_accuracies.clear();
+}
+
+
+
+void freeBatchMemory(){
+  // Free allocated memory for the current mini-batch
+  freeOutputTensors();
+  // Reinitialize couter for OpenTuner flags - next mini-batch of execution
+  op_counter = 0;
+  // Clearing profiling data map
+  func_counters.clear();
+}
+
+
+
+
+// FIXIT: Fix any assumptions on the NCHW format
+// TODO: benchmark split performance and check if it is prohibitively high?
+void** tensorSplit(void* tensor_ptr, int num_splits, int split_dim){
+
+  INFO("*** TensorSplit \n");  
+  profileEvent("tensorSplit");
+
+  Tensor* tensor = (Tensor*) tensor_ptr;
+  
+  deviceToHostCopy(tensor); // Splitting done on the host
+
+  Tensor** splits = (Tensor**) malloc(sizeof(Tensor*) * num_splits);
+  size_t* dim_sizes = (size_t*) malloc(sizeof(size_t) * tensor->dims.num_dims);
+  for(unsigned int i = 0; i < tensor->dims.num_dims; i++){
+    dim_sizes[i] = tensor->dims.dim_sizes[i];
+  }
+
+  
+  dim_sizes[split_dim] = tensor->dims.dim_sizes[split_dim] / num_splits;
+  if(dim_sizes[split_dim] < 1)
+    ERROR("Split Dimension < 1 after splitting");
+
+  size_t copy_size = getTypeSize(tensor->data_type);
+  for(unsigned int i = split_dim; i < tensor->dims.num_dims; i++){
+    copy_size = copy_size * dim_sizes[i];
+  }
+  
+  for(unsigned int i = 0; i < num_splits; i++){
+    // FIXIT: Don't be specific to 4D tensors
+    // NOTE: Using same data format (NHWC/NCHW) for the split tensors
+    INFO("dim_sizes[0] = %d, dim_sizes[1] = %d, dim_sizes[2] = %d, dim_sizes[3] = %d \n",
+	 dim_sizes[0], dim_sizes[1], dim_sizes[2], dim_sizes[3]);
+
+    Tensor* split = (Tensor*) create4DTensor(tensor->data_type, tensor->data_format,
+					  dim_sizes[0], dim_sizes[1], dim_sizes[2], dim_sizes[3]);
+    
+    size_t copy_start = i * copy_size;
+    size_t copy_stride = num_splits * copy_size;
+    INFO("copy_size = %d, copy_start = %d, copy_stride = %d, tensor->size_in_bytes = %d \n",
+	 copy_size, copy_start, copy_stride, tensor->size_in_bytes);
+
+    int index = 0;
+    while(copy_start + copy_size <= tensor->size_in_bytes){
+      memcpy(((char*) split->host_data + (index * copy_size)),
+	     ((char*)tensor->host_data + copy_start),
+	     copy_size);
+      copy_start += copy_stride;
+      index++;
+    }
+   	
+    splits[i] = split;     
+  }
+
+  profileEvent("tensorSplit_end", true);
+
+  return (void**) splits;
+}
+
+
+void* tensorConcat(void** tensors_ptr, int num_splits, int split_dim){
+
+  INFO("*** TensorConcat \n");  
+  profileEvent("tensorConcat");
+
+  Tensor** tensors = (Tensor**) tensors_ptr;
+
+  for(int i = 0; i < num_splits; i++){
+    deviceToHostCopy(tensors[i]); // Concatenation done on the host
+  }
+  
+  // The no of dimensions of concatenated tensor are the same
+  size_t* dim_sizes = (size_t*) malloc(sizeof(size_t) * tensors[0]->dims.num_dims);
+  for(unsigned int i = 0; i < tensors[0]->dims.num_dims; i++){
+    dim_sizes[i] = tensors[0]->dims.dim_sizes[i];
+  }
+  
+  size_t copy_size = getTypeSize(tensors[0]->data_type);
+  for(unsigned int i = split_dim; i < tensors[0]->dims.num_dims; i++){
+    copy_size = copy_size * dim_sizes[i];
+  }
+
+  dim_sizes[split_dim] = dim_sizes[split_dim] * num_splits;
+  if(dim_sizes[split_dim] < 1)
+    ERROR("Split Dimension < 1 after concat");
+
+  Tensor* output = (Tensor*) create4DTensor(tensors[0]->data_type, tensors[0]->data_format,
+					 dim_sizes[0], dim_sizes[1], dim_sizes[2], dim_sizes[3]);
+
+  INFO("dim_sizes[0] = %d, dim_sizes[1] = %d, dim_sizes[2] = %d, dim_sizes[3] = %d \n",
+       dim_sizes[0], dim_sizes[1], dim_sizes[2], dim_sizes[3]);
+
+
+  int num_copies = 1;
+  for(unsigned int i = 0; i < split_dim; i++){
+    num_copies = num_copies * dim_sizes[i];
+  }
+  
+  size_t copy_stride = num_splits * copy_size;
+  INFO("copy_size = %d, num_copies = %d, copy_stride = %d, output->size_in_bytes = %d \n",
+       copy_size, num_copies, copy_stride, output->size_in_bytes);
+
+  for(unsigned int i = 0; i < num_copies; i++){
+    // FIXIT: Don't be specific to 4D tensors
+    size_t copy_start = i * copy_stride;
+   
+    for(int j = 0; j < num_splits; j++){
+      struct Tensor* split = tensors[j];
+      memcpy(((char*) output->host_data + copy_start + (j * copy_size)),
+	     ((char*) split->host_data + (i * copy_size)),
+	     copy_size);   
+    }      
+  }
+
+  profileEvent("tensorConcat_end", true);
+
+  return output;
+}
+
+
+
+void* tensorLRN(void* input_ptr, unsigned int LRN_window,
+		double LRN_alpha, double LRN_beta, double LRN_k){
+
+  INFO("*** TensorLRN \n");  
+  profileEvent("tensorLRN");
+
+  Tensor* input = (Tensor*) input_ptr;
+
+  hostToDeviceCopy(input);
+
+  float alpha = 1.0f, beta = 0.0f;
+  cudnnLRNDescriptor_t LRNDesc;
+  checkCUDNN(cudnnCreateLRNDescriptor(&LRNDesc));
+
+  INFO("window = %d, LRN_alpha = %f, LRN_beta = %f, LRN_k = %f \n",
+       LRN_window, LRN_alpha, LRN_beta, LRN_k);
+ 
+  
+  checkCUDNN(cudnnSetLRNDescriptor(LRNDesc, LRN_window, LRN_alpha, LRN_beta, LRN_k));
+
+  size_t* dim_sizes = input->dims.dim_sizes;
+  Tensor* output = (Tensor*) create4DTensor((cudnnDataType_t) float_type, 
+			  CUDNN_TENSOR_NCHW, dim_sizes[0], dim_sizes[1],
+			  dim_sizes[2], dim_sizes[3]);
+  // NOTE: Changing output tensor placement from host to device
+  changeTensorPlacement(output, DEVICE); 
+  // NOTE: Necessary to insert the above call for every output tensor
+
+  printTensorDescInfo(input);
+  printTensorDescInfo(output);
+  
+  checkCUDNN(cudnnLRNCrossChannelForward(cudnnHandle, LRNDesc, CUDNN_LRN_CROSS_CHANNEL_DIM1,
+					 &alpha, input->tensor_desc, input->gpu_data,
+					 &beta, output->tensor_desc, output->gpu_data));
+
+  profileEvent("tensorLRN_end", true);
+    
+  return output;
+}
+
+
+void printTensorDims2(void* tensor_ptr){
+
+  struct Tensor* tensor = (struct Tensor*) tensor_ptr;
+
+  printf("Num_elems = %lu \n", tensor->num_elems);
+  for (int i = 0; i < tensor->dims.num_dims; i++){
+    printf("dim[%d] = %lu \n", i, tensor->dims.dim_sizes[i]);
+  }
+}
+
+
+
+
+// FIXIT: tensorAdd currently only works for 4D tensors
+void* tensorAdd(void* x_ptr, void* bias_ptr){
+  
+  Tensor* x = (Tensor*) x_ptr;
+  Tensor* bias = (Tensor*) bias_ptr;
+  
+  INFO("*** TensorAdd \n");  
+  profileEvent("Add");
+    
+  float alpha = 1.0f;
+  //float beta = 0.0f;
+  hostToDeviceCopy(x);
+  hostToDeviceCopy(bias);
+
+  convertToFP32(x);
+  convertToFP32(bias);
+
+  
+  INFO("x->num_elems = %d \n", x->num_elems);
+  INFO("bias->num_elems = %d \n", bias->num_elems);
+
+  if(cudnnHandle == NULL){
+    ERROR("cudnnHandle NOT initialized!! \n");    
+  }
+  
+  // FIXIT: routine fails for 3D tensors
+  checkCUDNN(cudnnAddTensor(cudnnHandle, &alpha, bias->tensor_desc,
+			    bias->gpu_data, &alpha, x->tensor_desc, x->gpu_data));
+
+  profileEvent("Add_end", true);
+
+  #ifdef ERROR_INJECTION_ENABLED  
+  if(op_counter >= total_ops){
+    ERROR("No accuracy flag found \n");
+  }
+  
+  int op_acc = op_accuracies[op_counter];
+
+  // Forcing 0 error for (Resnet-like) equal dimension adds (Testing-only)
+  
+  //-- if (bias->dims.dim_sizes[0] > 1)
+  //--  op_acc = 0;
+  // Skip errorInjection if explicitly requested
+  //-- if (skip_tensors.find(op_counter) != skip_tensors.end()){
+  //--   op_acc = 0;  
+  //  }
+
+  void* error_norms = tensorAddError(x, op_acc);
+  add_norms(error_norms, "tensorAdd", op_acc);
+  add_bias_overheads(x, op_acc);
+  op_counter++;
+  
+  #endif
+  
+  
+  return x;
+}
+
+
+// FIXIT: Generalize all of the routines for types {half, float, double}
+void* tensorConvolution(void* input_ptr, void* filter_ptr,
+			int vertical_pad, int horizontal_pad,
+			int vertical_stride, int horizontal_stride,
+			int conv_mode, int conv_groups){  
+  
+  INFO("*** TensorConvolution \n");
+  profileEvent("Conv");
+
+  Tensor* input = (Tensor*) input_ptr;
+  Tensor* filter = (Tensor*) filter_ptr;
+  
+  cudnnConvolutionDescriptor_t convDesc;
+  cudnnConvolutionFwdAlgo_t convAlgo;
+  cudnnConvolutionMode_t mode;
+  if(conv_mode == 0)
+    mode = CUDNN_CONVOLUTION;
+  else if(conv_mode == 1)
+    mode = CUDNN_CROSS_CORRELATION;
+
+  mode = CUDNN_CROSS_CORRELATION;
+  // FIXIT: Need to be more aware of the implications of alpha and beta
+  float alpha = 1.0f, beta = 0.0f;
+  
+  // TODO: Support other cases;  
+  hostToDeviceCopy(input);
+  hostToDeviceCopy(filter);
+
+  convertToFP32(input);
+  convertToFP32(filter);
+
+  
+  INFO("vertical_stride = %lu, horizontal_stride = %lu \n", vertical_stride, horizontal_stride);  
+
+  checkCUDNN(cudnnCreateConvolutionDescriptor(&convDesc));
+
+  //FIXME: Current hack to preserve backward compatibilty
+  if(conv_groups == 0){
+    conv_groups = 1;
+  }
+  
+  
+  
+  cudnnDataType_t computeType = CUDNN_DATA_FLOAT;
+  // FIXIT: Think if upscaling values need to be configurable?
+  // IMP-FIXIT: Either make mode configurable OR see if CUDNN_CONVOLUTION MODE should be used?
+  checkCUDNN(cudnnSetConvolution2dDescriptor(convDesc,
+					     vertical_pad, horizontal_pad, // conv padding
+					     vertical_stride, horizontal_stride, // conv strides
+					     1, 1, // upscaling values
+					     mode , // mode is configurable
+                                             computeType)); // defines compute precision
+
+  // NOTE: Adding support for grouped convolution
+  checkCUDNN(cudnnSetConvolutionGroupCount(convDesc, conv_groups));
+
+  int n, c, h, w; // output dimensions  
+  // Find dimension of convolution output
+
+  if(input->tensor_desc == NULL || filter->filter_desc == NULL)
+    ERROR("Input or Filter descriptor is NULL");
+    
+  checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convDesc,
+						   input->tensor_desc,
+						   filter->filter_desc,
+						   &n, &c, &h, &w));
+
+    
+  DEBUG("**Output Tensor Dims, n = %d, c = %d, h = %d, w = %d \n", n, c, h, w);
+
+  Tensor* output;
+  if(input->data_format == CUDNN_TENSOR_NCHW)
+    output = (Tensor*) create4DTensor((cudnnDataType_t) float_type,  
+			              CUDNN_TENSOR_NCHW, n, c, h, w);
+  else if(input->data_format == CUDNN_TENSOR_NHWC){
+    DEBUG("* NHWC Format \n");
+    output = (Tensor*) create4DTensor((cudnnDataType_t) float_type, 
+			              CUDNN_TENSOR_NHWC, n, h, w, c);
+  }
+  else
+    ERROR("Unsupported Tensor Type");
+
+  // NOTE: Changing output tensor placement from host to device
+  changeTensorPlacement(output, DEVICE); 
+  // NOTE: Necessary to insert the above call for every output tensor
+    
+  DEBUG("tensor->data_type = %d, tensor->data_format = %d, N = %d, C = %d, H = %d, W = %d \n",
+	output->data_type, output->data_format, output->dims.dim_sizes[0],
+	output->dims.dim_sizes[1],
+	output->dims.dim_sizes[2], output->dims.dim_sizes[3]);
+
+  if(convDesc == NULL || input->tensor_desc == NULL ||
+     filter->filter_desc == NULL || output->tensor_desc == NULL)
+    ERROR("NULL descriptor! \n");
+
+
+  // Debugging info prints
+  printTensorDescInfo(input);
+  printTensorDescInfo(filter);
+  printTensorDescInfo(output);
+
+  // NOTE-FIXIT: function failing for NHWC formats - perhaps some CUDNN support is lacking
+  checkCUDNN(cudnnGetConvolutionForwardAlgorithm(cudnnHandle,
+						 input->tensor_desc,
+						 filter->filter_desc,
+						 convDesc,
+						 output->tensor_desc,
+						 CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,	 
+						 //CUDNN_CONVOLUTION_FWD_NO_WORKSPACE,
+						 0,
+						 &convAlgo));
+
+  
+  DEBUG("ConvAlgo = %d, FFT = %d, GEMM = %d, WINOGRAD = %d \n", convAlgo,
+	 CUDNN_CONVOLUTION_FWD_ALGO_FFT, CUDNN_CONVOLUTION_FWD_ALGO_GEMM,
+	 CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD);
+	 
+
+  // FIXIT: Algo shouldn't be hardcoded
+  //convAlgo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
+  convAlgo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM;
+
+  size_t workspace_size;
+  checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle,
+						     input->tensor_desc,
+						     filter->filter_desc,
+						     convDesc,
+						     output->tensor_desc,
+						     convAlgo,
+						     &workspace_size));
+
+  // Allocating memory for the convolution workspace
+  void* workspace;
+  checkCudaErrors(cudaMalloc(&workspace, workspace_size)); 
+  DEBUG("workspace size = %d \n", workspace_size);
+
+
+  checkCUDNN(cudnnConvolutionForward(cudnnHandle, &alpha, input->tensor_desc,
+				     input->gpu_data, filter->filter_desc, filter->gpu_data,
+				     convDesc, convAlgo, workspace, workspace_size,
+				     &beta, output->tensor_desc, output->gpu_data));
+		       
+  profileEvent("Conv_end", true);
+
+
+  #ifdef ERROR_INJECTION_ENABLED
+
+  if(op_counter >= total_ops){
+    ERROR("No accuracy flag found \n");
+  }
+  
+  int op_acc = op_accuracies[op_counter];
+
+  // Ignore Error Injection for Depthwise Convolution  
+  /*if (conv_groups > 1){
+    op_acc = 0;
+  }
+  */
+
+  
+  void* error_norms = tensorAddError(output, op_acc);
+  add_norms(error_norms, "tensorConv", op_acc);
+  add_conv_overheads(input, filter, vertical_stride, horizontal_stride, op_acc);
+
+  op_counter++;
+  
+  #endif
+  
+  
+  return output;
+}
+
+
+
+// NOTE: Supports Max and Avg Pooling
+void* tensorPooling(void* input_ptr,
+		    int poolFunction,
+		    int window_height, int window_width,
+		    int vertical_pad, int horizontal_pad,
+		    int vertical_stride, int horizontal_stride){
+
+  INFO("*** TensorPooling \n");
+  profileEvent("Pool");
+
+  Tensor* input = (Tensor*) input_ptr;
+
+  cudnnPoolingDescriptor_t poolDesc;
+  // FIXIT: Need to be more aware of the implications of alpha and beta
+  float alpha = 1.0f, beta = 0.0f;
+
+  hostToDeviceCopy(input);
+
+  convertToFP32(input);
+
+  
+  checkCUDNN(cudnnCreatePoolingDescriptor(&poolDesc));            
+
+  int n = input->dims.dim_sizes[0];
+  int c = input->dims.dim_sizes[1];
+  int h = (input->dims.dim_sizes[2] + (2 * vertical_pad) - window_height) / vertical_stride;
+  h = h + 1;
+  int w = (input->dims.dim_sizes[3] + (2 * horizontal_pad) - window_width) / horizontal_stride;
+  w = w + 1;
+
+  DEBUG("n = %d, c = %d, h = %d, w = %d \n", n, c, h, w);
+  
+  // FIXIT: Don't be specific to floats
+  Tensor* output = (Tensor*) create4DTensor(CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, n, c, h, w);
+  // Changing output tensor placement from host to device
+  changeTensorPlacement(output, DEVICE); 
+
+  // FIXIT: Fix being specific to CUDNN_DATA_FLOAT and NCHW format
+  // FIXIT: Is this setTensor even needed?
+  checkCUDNN(cudnnSetTensor4dDescriptor(output->tensor_desc,
+					CUDNN_TENSOR_NCHW,
+					CUDNN_DATA_FLOAT,
+					n, c,
+					h, w));
+
+
+  cudnnPoolingMode_t pool_mode;
+  if(poolFunction == 0)
+    pool_mode = CUDNN_POOLING_MAX;
+  else if(poolFunction == 1)
+    pool_mode = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
+
+  
+  
+  // FIXIT: Make the pool function (max, min, avg) configurable
+  checkCUDNN(cudnnSetPooling2dDescriptor(poolDesc,
+					 //CUDNN_POOLING_MAX,
+					 pool_mode,
+					 CUDNN_PROPAGATE_NAN,
+					 window_height, window_width,
+					 vertical_pad, horizontal_pad,
+					 vertical_stride, horizontal_stride));
+     
+  checkCUDNN(cudnnPoolingForward(cudnnHandle, poolDesc, &alpha, input->tensor_desc,
+				 input->gpu_data, &beta, output->tensor_desc, output->gpu_data));
+
+  profileEvent("Pool_end", true);
+
+
+  #ifdef ERROR_INJECTION_ENABLED
+
+  if(op_counter >= total_ops){
+    ERROR("No accuracy flag found \n");
+  }
+  
+  int op_acc = op_accuracies[op_counter];
+  void* error_norms = tensorAddError(output, op_acc);
+  add_norms(error_norms, "tensorPooling", op_acc);
+  add_pool_overheads(input, window_height, vertical_stride, op_acc);
+
+  op_counter++;
+  
+  #endif
+
+  
+  return output;
+}
+
+
+
+
+void* tensorGemmCPU(void* lhs_ptr, void* rhs_ptr){
+
+  INFO("*** TensorGemmCPU \n");
+
+  Tensor* lhs = (Tensor*) lhs_ptr;
+  Tensor* rhs = (Tensor*) rhs_ptr;
+  
+  // The operation is done on the CPU
+  deviceToHostCopy(lhs);
+  deviceToHostCopy(rhs);
+
+  if(lhs->data_type != CUDNN_DATA_FLOAT){
+    ERROR("Currently only Floating point is supported ");
+  }
+  
+  profileEvent("tensorGemmCPU");
+  
+  INFO("rhs->dims.num_dims = %d \n", rhs->dims.num_dims);
+  INFO("lhs->dims.num_dims = %d \n", lhs->dims.num_dims);
+
+  // FIXIT: Need to be more aware of the implications of alpha and beta
+  //float alpha = 1.0f;
+  // float beta = 0.0f;
+  // 'm' holds the batch dimension - assuming NCHW format Tensors
+  int m = lhs->dims.dim_sizes[0];
+  // The rhs must be a 2D tensor
+  int n = rhs->dims.dim_sizes[rhs->dims.num_dims-1]; // output neurons
+  int k = 1;
+  // Flattening the dimensions after the batch dimension
+  // NOTE: Allowing any number of dimensions > 2 for lhs
+  for (int j = 1 ; j < lhs->dims.num_dims; j++){
+    k = k * lhs->dims.dim_sizes[j]; // input neurons
+  }
+
+  int rhs_k = rhs->dims.dim_sizes[rhs->dims.num_dims-2];
+  // Dimension-note: Check if k is same across the two tensors
+  INFO("m = %d, n = %d, k = %d \n", m, n, k);
+  if(rhs_k != k){
+    ERROR("rhs=%d and lhs=%d columns/rows don't match", rhs_k, k);
+  }
+
+  // NOTE: Creating a 4D tensor to be compatible with later called cuDNN routines
+  Tensor* output = (Tensor*) create4DTensor(CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, m, n, 1, 1);
+  // Changing output tensor placement from host to device
+  changeTensorPlacement(output, HOST); 
+
+  float* lhs_arr = (float*) lhs->host_data;
+  float* rhs_arr = (float*) rhs->host_data;
+  float* output_arr = (float*) output->host_data;
+  
+  for(int i = 0; i < m; i++){
+    for(int j = 0; j < n; j++){
+      float sum = 0.0;
+      for(int l = 0; l < k; l++){
+	float mul = lhs_arr[i*k+l] * rhs_arr[l*n+j];
+	sum = sum + mul;
+      }
+      output_arr[i*n+j] = sum;
+    }
+  }
+      
+   
+  profileEvent("tensorGemmCPU_end", true);
+  
+  return output;
+}
+
+
+
+// Reference: https://gist.github.com/peterwittek/6303527
+void* tensorGemmGPU(void* lhs_ptr, void* rhs_ptr ){ //, void* result_tensor){
+
+  INFO("*** TensorGemmGPU \n");
+  profileEvent("Mul");
+
+  Tensor* lhs = (Tensor*) lhs_ptr;
+  Tensor* rhs = (Tensor*) rhs_ptr;
+
+
+  INFO("rhs->dims.num_dims = %d \n", rhs->dims.num_dims);
+  INFO("lhs->dims.num_dims = %d \n", lhs->dims.num_dims);
+
+  // FIXIT: Need to be more aware of the implications of alpha and beta
+  float alpha = 1.0f, beta = 0.0f;
+  // 'm' holds the batch dimension - assuming NCHW format Tensors
+  int m = lhs->dims.dim_sizes[0];
+  // The rhs last dimension must contain the neurons
+  int n = rhs->dims.dim_sizes[rhs->dims.num_dims-1]; // output neurons
+  int k = 1;
+  
+  // Flattening the dimensions after the batch dimension
+  // NOTE: Allowing any number of dimensions > 2 for lhs
+  for (int j = 1 ; j < lhs->dims.num_dims; j++){
+    k = k * lhs->dims.dim_sizes[j]; // input neurons
+  }
+
+  int rhs_k = rhs->dims.dim_sizes[rhs->dims.num_dims-2];
+  // Dimension-note: Check if k is same across the two tensors
+  INFO("m = %d, n = %d, k = %d \n", m, n, k);
+  if(rhs_k != k){
+    ERROR("rhs=%d and lhs=%d columns/rows don't match", rhs_k, k);
+  }
+
+  Tensor* output = NULL;
+  DEBUG("Creating new TENSOR * \n");
+  output = (Tensor*) create4DTensor(CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, m, n, 1, 1);
+
+  
+  /* else{
+    DEBUG("Reusing TENSOR *\n");
+    // FIXIT: Add Assertion to check for null pointer and dimension matching
+    output = (Tensor*) result_tensor;
+    // FIXIT: output value is trashing - Is this deallocated?
+    INFO("output->num_elems = %lu \n", output->data_type);
+  }
+  */
+  
+  DEBUG("Changing placement *\n");
+  // Changing output tensor placement from host to device
+  changeTensorPlacement(output, DEVICE); 
+
+  DEBUG("Changed Placement * \n\n");
+
+  hostToDeviceCopy(lhs);
+  hostToDeviceCopy(rhs);
+
+  convertToFP32(lhs);
+  convertToFP32(rhs);
+
+  
+  DEBUG("CuBlasSgemm *\n");
+   
+  // INFO: cuBlas uses column-major format
+  // INFO: The leading dimension is just the FIRST Dimension
+  // IMP: output is N * M in column-major format, M*N in row-major - what cuDNN expects
+  checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N,
+			      n, m, k,
+			      &alpha,
+			      (float*) rhs->gpu_data, n,
+			      (float*) lhs->gpu_data, k,
+			      &beta,
+			      (float*) output->gpu_data, n));  
+
+  
+  profileEvent("Mul_end", true);
+
+
+
+  #ifdef ERROR_INJECTION_ENABLED
+
+  if(op_counter >= total_ops){
+    ERROR("No accuracy flag found \n");
+  }
+  
+  int op_acc = op_accuracies[op_counter];
+  
+  void* error_norms = tensorAddError(output, op_acc);
+  add_norms(error_norms, "tensorGemm", op_acc);
+  add_gemm_overheads(lhs_ptr, rhs_ptr, op_acc);
+
+  op_counter++;
+  
+  #endif
+ 
+  
+  return output;
+}
+
+
+
+
+
+
+
+void* tensorGemm(void* lhs_ptr, void* rhs_ptr){
+
+  INFO("*** TensorGemm \n");
+  profileEvent("tensorGemm");
+
+  Tensor* lhs = (Tensor*) lhs_ptr;
+  Tensor* rhs = (Tensor*) rhs_ptr;
+    
+  INFO("rhs->dims.num_dims = %d \n", rhs->dims.num_dims);
+  INFO("lhs->dims.num_dims = %d \n", lhs->dims.num_dims);
+
+  // FIXIT: Need to be more aware of the implications of alpha and beta
+  float alpha = 1.0f, beta = 0.0f;
+  // 'm' holds the batch dimension - assuming NCHW format Tensors
+  int m = lhs->dims.dim_sizes[0];
+  // The rhs last dimension must contain the neurons
+  int n = rhs->dims.dim_sizes[rhs->dims.num_dims-1]; // output neurons
+  int k = 1;
+  // Flattening the dimensions after the batch dimension
+  // NOTE: Allowing any number of dimensions > 2 for lhs
+  for (int j = 1 ; j < lhs->dims.num_dims; j++){
+    k = k * lhs->dims.dim_sizes[j]; // input neurons
+  }
+
+  int rhs_k = rhs->dims.dim_sizes[rhs->dims.num_dims-2];
+  // Dimension-note: Check if k is same across the two tensors
+  INFO("m = %d, n = %d, k = %d \n", m, n, k);
+  if(rhs_k != k){
+    ERROR("rhs=%d and lhs=%d columns/rows don't match", rhs_k, k);
+  }
+
+  // NOTE: Creating a 4D tensor to be compatible with later called cuDNN routines
+  Tensor* output = (Tensor*) create4DTensor(CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, m, n, 1, 1);
+  // Changing output tensor placement from host to device
+  changeTensorPlacement(output, DEVICE); 
+
+  hostToDeviceCopy(lhs);
+  hostToDeviceCopy(rhs);
+
+  // NOTE: cuBlas uses column-major format
+  // NOTE: The leading dimension is the FIRST Dimension
+  // NOTE: The output is N * M in column-major format, M*N in row-major - what cuDNN expects
+  checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N,
+			      n, m, k,
+			      &alpha,
+			      (float*) rhs->gpu_data, k,
+			      (float*) lhs->gpu_data, k,
+			      &beta,
+			      (float*) output->gpu_data, n));
+  
+  profileEvent("tensorGemm_end", true);
+  
+  return output;
+}
+
+
+
+
+// FIXIT: Add dimension check assertions throughout the code
+void* tensorGemmBias(void* input_ptr, void* bias_ptr){
+
+  INFO("*** TensorGemmBias \n");
+  profileEvent("tensorGemmBias");
+
+  Tensor* input = (Tensor*) input_ptr;
+  Tensor* bias = (Tensor*) bias_ptr;  
+
+  // NOTE: beta is set to 1 to append to input
+  // C = A * B + Beta * C
+  float alpha = 1.0f, beta = 1.0f;
+  // 'm' holds the batch dimension - assuming NCHW format Tensors
+  int m = input->dims.dim_sizes[0];
+  // The bias must be a 2D tensor
+  int n = bias->dims.dim_sizes[bias->dims.num_dims - 1]; // output neurons
+
+  INFO("m = %d, n = %d \n", m, n);
+  
+  hostToDeviceCopy(input);
+  hostToDeviceCopy(bias);
+
+  struct Tensor* onevec = (Tensor*) create2DTensor(CUDNN_DATA_FLOAT, m, 1);
+  fillOnes(onevec);
+  hostToDeviceCopy(onevec);
+  
+  // NOTE: cuBlas uses column-major format
+  // NOTE: The leading dimension is just the FIRST Dimension
+  checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N,
+			      n, m, 1,
+			      &alpha,
+			      (float*) bias->gpu_data, n,
+			      (float*) onevec->gpu_data, 1,
+ 			      &beta,
+			      (float*) input->gpu_data, n));
+
+  profileEvent("tensorGemmBias_end", true);
+  
+  return input;
+}
+
+
+void* tensorRelu(void* input_ptr){
+
+  INFO("*** TensorRelu \n");
+  profileEvent("Relu");
+
+  Tensor* input = (Tensor*) input_ptr;
+  
+  cudnnActivationDescriptor_t reluDesc;
+  float alpha = 1.0f, beta = 0.0f;
+
+  hostToDeviceCopy(input);
+
+  convertToFP32(input);
+  
+  
+  checkCUDNN(cudnnCreateActivationDescriptor(&reluDesc));
+
+  checkCUDNN(cudnnSetActivationDescriptor(reluDesc, CUDNN_ACTIVATION_RELU,
+					  CUDNN_PROPAGATE_NAN, 0.0));
+
+  checkCUDNN(cudnnActivationForward(cudnnHandle, reluDesc, &alpha,
+				    input->tensor_desc, input->gpu_data, &beta,
+				    input->tensor_desc, input->gpu_data));
+
+  profileEvent("Relu_end", true);
+
+
+  #ifdef ERROR_INJECTION_ENABLED
+  
+  if(op_counter >= total_ops){
+    ERROR("No accuracy flag found \n");
+  }
+  
+  int op_acc = op_accuracies[op_counter];
+    
+  void* error_norms = tensorAddError(input, op_acc);
+  add_norms(error_norms, "tensorRelu", op_acc);
+  add_relu_overheads(input, op_acc);
+  op_counter++;  
+  #endif
+  
+
+  return input;
+}
+
+
+// Think: Should Softmax be broken into multiple IR operations?
+void* tensorSoftmax(void* input_ptr){
+
+  INFO("*** TensorSoftmax \n");
+  profileEvent("Softmax");
+
+  Tensor* input = (Tensor*) input_ptr;
+  float alpha = 1.0f, beta = 0.0f;
+
+  hostToDeviceCopy(input);
+  convertToFP32(input); 
+ 
+    
+  // IMP: CUDNN_SOFTMAX_ACCURATE can be replaced with a less acurate CUDNN_SOFTMAX_FAST
+  checkCUDNN(cudnnSoftmaxForward(cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL,
+				 &alpha, input->tensor_desc, input->gpu_data, &beta,
+				 input->tensor_desc, input->gpu_data));
+
+  deviceToHostCopy(input);  
+  profileEvent("Softmax_end", true);
+  
+  return input;
+}
+
+
+
+__global__ void clipValues(float* A, float min, float max, int n){
+
+  int id = blockIdx.x * blockDim.x + threadIdx.x;
+
+  if(id < n){
+    A[id] = fmaxf(min, A[id]);
+    A[id] = fminf(max, A[id]);
+  }
+}
+
+
+
+void* tensorRelu2(void* input_ptr, float min, float max){
+
+  INFO("*** TensorClippedRelu *** \n");
+  profileEvent("Relu");
+
+  cudnnActivationDescriptor_t reluDesc;
+  float alpha = 1.0f, beta = 0.0f;
+  
+  Tensor* input = (Tensor*) input_ptr;
+
+  hostToDeviceCopy(input);
+
+  convertToFP32(input);
+  
+
+  checkCUDNN(cudnnCreateActivationDescriptor(&reluDesc));
+
+  checkCUDNN(cudnnSetActivationDescriptor(reluDesc, CUDNN_ACTIVATION_CLIPPED_RELU,
+					  CUDNN_PROPAGATE_NAN, max));
+
+  checkCUDNN(cudnnActivationForward(cudnnHandle, reluDesc, &alpha,
+				    input->tensor_desc, input->gpu_data, &beta,
+				    input->tensor_desc, input->gpu_data));
+
+  
+  
+  profileEvent("Relu_end", true);
+
+
+  #ifdef ERROR_INJECTION_ENABLED
+  
+  if(op_counter >= total_ops){
+    ERROR("No accuracy flag found \n");
+  }
+  
+  int op_acc = op_accuracies[op_counter];
+  void* error_norms = tensorAddError(input, op_acc);
+  add_norms(error_norms, "tensorClippedRelu", op_acc);
+  add_relu_overheads(input, op_acc);
+  op_counter++;  
+  #endif
+  
+
+  return input;
+}
+
+
+void* tensorTanh(void* input_ptr){
+
+  INFO("*** TensorTanh \n");
+  profileEvent("Tanh");
+
+  Tensor* input = (Tensor*) input_ptr;
+  
+  cudnnActivationDescriptor_t tanhDesc;
+  float alpha = 1.0f, beta = 0.0f;
+
+  hostToDeviceCopy(input);
+
+  convertToFP32(input);
+
+  
+  checkCUDNN(cudnnCreateActivationDescriptor(&tanhDesc));
+
+  checkCUDNN(cudnnSetActivationDescriptor(tanhDesc, CUDNN_ACTIVATION_TANH,
+					  CUDNN_PROPAGATE_NAN, 0.0));
+
+  checkCUDNN(cudnnActivationForward(cudnnHandle, tanhDesc, &alpha,
+				    input->tensor_desc, input->gpu_data, &beta,
+				    input->tensor_desc, input->gpu_data));
+
+  profileEvent("Tanh_end", true);
+
+
+  #ifdef ERROR_INJECTION_ENABLED
+  
+  if(op_counter >= total_ops){
+    ERROR("No accuracy flag found \n");
+  }
+  
+  int op_acc = op_accuracies[op_counter];
+  void* error_norms = tensorAddError(input, op_acc);
+  add_norms(error_norms, "tensorTanh", op_acc);
+  add_relu_overheads(input, op_acc);
+  op_counter++;  
+  #endif
+  
+
+  return input;
+}
+
+
+
+
+void* tensorBatchNorm(void* input_ptr, void* gamma_ptr, void* beta_ptr,
+		      void* mean_ptr, void* variance_ptr, double epsilon){
+
+  INFO("*** TensorBatchNorm \n");
+  profileEvent("BatchNorm");
+
+  Tensor* input = (Tensor*) input_ptr;
+  Tensor* gamma = (Tensor*) gamma_ptr;
+  Tensor* beta = (Tensor*) beta_ptr;
+  Tensor* mean = (Tensor*) mean_ptr;
+  Tensor* variance = (Tensor*) variance_ptr;
+
+  if (input == NULL || gamma == NULL || beta == NULL || mean == NULL || variance == NULL){
+    ERROR("NULL Input Tensor");
+  }
+  
+  float alpha_val = 1.0f, beta_val = 0.0f;
+  hostToDeviceCopy(input);
+  hostToDeviceCopy(gamma);
+  hostToDeviceCopy(beta);
+  hostToDeviceCopy(mean);
+  hostToDeviceCopy(variance);
+
+  convertToFP32(input);
+
+ 
+  
+  checkCUDNN(cudnnBatchNormalizationForwardInference(cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
+						     &alpha_val, &beta_val,
+						     input->tensor_desc, input->gpu_data,
+						     input->tensor_desc, input->gpu_data,
+						     gamma->tensor_desc, gamma->gpu_data,
+						     beta->gpu_data, mean->gpu_data,
+						     variance->gpu_data,
+						     epsilon));
+
+  profileEvent("BatchNorm_end", true);
+
+
+  #ifdef ERROR_INJECTION_ENABLED
+  
+  if(op_counter >= total_ops){
+    ERROR("No accuracy flag found \n");
+  }
+  
+  int op_acc = op_accuracies[op_counter];
+  //op_acc = 0;  
+  void* error_norms = tensorAddError(input, op_acc);
+  add_norms(error_norms, "tensorBatchNorm", op_acc);
+  add_relu_overheads(input, op_acc);
+  op_counter++;  
+  #endif
+  
+
+  return input;
+}
+
+
+
+
+/************* GPU Layer API  *************/
+
+void* ConvLayer_GPU(void* input, 
+		    void* filter, 
+		    void* bias, 
+		    int conv_pad_h, int conv_pad_w, int conv_stride_h, int conv_stride_w,
+		    int pool_id, int pool_size,
+		    int activation_id, // Relu, Tanh, ClipRelu
+		    float out_min, float out_max){ // NOTE: min_val, max_val apply to 'ClippedRelu'
+
+  void* conv_out = tensorConvolution(input, filter,
+				     conv_pad_h, conv_pad_w,
+				     conv_stride_h, conv_stride_w,
+				     1, 0);
+  void* conv_add;
+  if(bias != NULL){
+    conv_add = tensorAdd(conv_out, bias);
+  }
+  else{
+    conv_add = conv_out;
+  }
+
+  void* activation_out;  
+  switch(activation_id){
+  case -1:
+    activation_out = conv_add;
+    INFO("NO Activation Function \n");
+    break;
+  case 0:
+    activation_out = tensorTanh(conv_add);
+    break;
+  case 1:
+    activation_out = tensorRelu(conv_add);
+    break;
+  case 2:
+    activation_out = tensorRelu2(conv_add, out_min, out_max);
+    break;
+  default:
+    ERROR("Activation id %d NOT supported \n", activation_out);
+    break;
+  }
+
+
+  void* pool_out = activation_out;
+  // NOTE: Skip pooling on negative pool sizes
+  if(pool_size > 0){
+    //FIXME: Currently only using MaxPooling
+    pool_out = tensorPooling(activation_out, 0, pool_size, pool_size, 0, 0, pool_size, pool_size);
+  }
+  else{
+    pool_out = activation_out;
+  }
+
+  return pool_out;
+}
+
+
+void* FCLayer_GPU(void* input, 
+		  void* weights, 
+		  void* bias, 
+		  int activation_id,
+		  float out_min, float out_max){ // NOTE: min_val, max_val apply to 'ClippedRelu'
+
+  void* gemm_out = tensorGemmGPU(input, weights);
+
+  void* gemmbias_out;
+  if(bias != NULL){
+    gemmbias_out = tensorAdd(gemm_out, bias);
+  }
+  else{
+    gemmbias_out = gemm_out;
+  }
+ 
+  void* activation_out;
+  switch(activation_id){
+
+  case -1:
+    activation_out = gemmbias_out;
+    INFO("No Activation Function \n");
+    break;
+  case 0:
+    activation_out = tensorTanh(gemmbias_out);
+    break;
+  case 1:
+    activation_out = tensorRelu(gemmbias_out);
+    break;
+  case 2:
+    activation_out = tensorRelu2(gemmbias_out, out_min, out_max);
+    break;
+  default:
+    ERROR("Activation id %d NOT supported \n", activation_out);
+    break;
+  }
+   
+  return activation_out;
+}
+
+
+/*********** PROMISE API **************/
+
+/*
+void* ConvLayer_PROMISE(void* input, float i_min, float i_max,
+			void* filter, float w_min, float w_max,
+			void* bias, float b_min, float b_max,
+			int conv_pad_h, int conv_pad_w, int conv_stride_h, int conv_stride_w,
+			int pool_id, int pool_size,
+			int activation_id, // Relu, Tanh, ClipRelu
+			float out_min, float out_max, int swing){ 
+
+  
+  #ifdef PROMISE_TUNER_ENABLED
+
+  // NOTE: Skip reading file-based error levels for ApproxHPVM wrapper runtime
+  if(!approxhpvm_runtime_mode){
+  
+    if(op_counter >= total_ops){
+      ERROR("No accuracy flag found \n");
+    }
+  
+    swing = op_accuracies[op_counter];
+    op_counter++;
+  }
+  
+  #endif  
+
+  
+  if (swing < 0 || swing > 20){
+    ERROR("Incorrect swing value");
+  }
+
+  
+
+  if(swing < 8){
+    input = quantizeTensorPromise(input, i_min, i_max);
+    filter = quantizeTensorPromise(filter, w_min, w_max);
+    if(bias != NULL)
+      bias = quantizeTensorPromise(bias, b_min, b_max);
+    // aRead error
+    
+    input = addPromiseError(input, swing);
+  }
+
+  
+  void* conv_out;
+  if(swing == 8 || (swing >= 12 && swing <= 15) ){
+    //conv_out = tensorConvPerf(input, filter, conv_pad_h, conv_pad_w,
+    //		              conv_stride_h, conv_stride_w, 1, 1, 1, 0);
+
+    int rows = 2;
+    switch(swing){
+
+    case 12: rows = 5; break;
+    case 13: rows = 4; break;
+    case 14: rows = 3; break;
+    case 15: rows = 2; break;    
+		   
+    default: rows = 2; break;
+    }
+    
+    conv_out = tensorConvPerf2(input, filter, conv_pad_h, conv_pad_w,
+    		              conv_stride_h, conv_stride_w, 1, 1, rows, 0);
+
+    /*void* gold = tensorConvolution(input, filter,
+				   conv_pad_h, conv_pad_w,
+				   conv_stride_h, conv_stride_w,
+				   1, 0);
+
+    Norm_t* norms = calculateNormsTreeReduction((struct Tensor*) conv_out, (struct Tensor*) gold);
+
+    DEBUG("\n-------- l2_norm = %f \n", norms->l2_norm); 
+    */
+
+
+
+  /* -----
+  }
+  else if(swing == 9 || (swing >= 16 && swing <= 19) ){
+    //conv_out = tensorConvPerf(input, filter, conv_pad_h, conv_pad_w,
+    //		              conv_stride_h, conv_stride_w, 1, 1, 0, 1);
+
+
+    int cols = 2;
+    switch(swing){
+
+    case 16: cols = 5; break;
+    case 17: cols = 4; break;
+    case 18: cols = 3; break;
+    case 19: cols = 2; break;    
+		   
+    default: cols = 2; break;
+    }
+
+    
+    conv_out = tensorConvPerf2(input, filter, conv_pad_h, conv_pad_w,
+    		              conv_stride_h, conv_stride_w, 1, 1, 0, cols);
+
+
+    /*void* gold = tensorConvolution(input, filter,
+				   conv_pad_h, conv_pad_w,
+				   conv_stride_h, conv_stride_w,
+				   1, 0);
+
+    Norm_t* norms = calculateNormsTreeReduction((struct Tensor*)conv_out, (struct Tensor*) gold);
+
+    DEBUG("\n-------- l2_norm = %f \n", norms->l2_norm); 
+    */
+
+  /*------
+  }
+  else if(swing == 10){  
+    conv_out = tensorHalfConvolution(input, filter,
+				     conv_pad_h, conv_pad_w,
+				     conv_stride_h, conv_stride_w,
+				     1, 0);
+  }
+  else{
+    conv_out = tensorConvolution(input, filter,
+				 conv_pad_h, conv_pad_w,
+				 conv_stride_h, conv_stride_w,
+				 1, 0);
+  }
+  
+  void* conv_add;
+  if(bias != NULL){
+    if(swing >= 8){  
+      conv_add = tensorHalfAdd(conv_out, bias);
+    }
+    else{
+      conv_add = tensorAdd(conv_out, bias);
+    }
+  }
+  else{
+    conv_add = conv_out;
+  }
+
+  void* pool_out;
+  // NOTE: Skip pooling on negative pool sizes
+  if(pool_size > 0){
+    //FIXME: Currently only using MaxPooling
+    pool_out = tensorHalfPooling(conv_add, 0, pool_size, pool_size, 0, 0, pool_size, pool_size);
+  }
+  else{
+    pool_out = conv_add;
+  }
+  
+  void* activation_out;  
+  switch(activation_id){
+  case -1:
+    activation_out = pool_out;
+    INFO("NO Activation Function \n");
+    break;
+  case 0:
+    activation_out = tensorHalfTanh(pool_out);
+    break;
+  case 1:
+    activation_out = tensorHalfRelu(pool_out);
+    break;
+  case 2:
+    activation_out = tensorHalfRelu2(pool_out, out_min, out_max);
+    break;
+  default:
+    ERROR("Activation id %d NOT supported \n", activation_out);
+    break;
+  }
+
+
+  if(swing < 8 && activation_id != -1){
+    activation_out = quantizeTensorPromise(activation_out, out_min, out_max);
+  }
+  
+  return activation_out;
+}
+
+
+void* FCLayer_PROMISE(void* input, float i_min, float i_max,
+		      void* weights, float w_min, float w_max,
+		      void* bias, float b_min, float b_max,
+		      int activation_id,
+		      float out_min, float out_max, int swing){ //NOTE: min_val, max_val apply to 'ClippedRelu'
+
+
+  
+  #ifdef PROMISE_TUNER_ENABLED
+
+  // NOTE: Skip reading file-based error levels for ApproxHPVM wrapper runtime
+  if(!approxhpvm_runtime_mode){
+
+    if(op_counter >= total_ops){
+      ERROR("No accuracy flag found \n");
+    }
+  
+    swing = op_accuracies[op_counter];
+    op_counter++;
+  }
+  
+  #endif
+ 
+  
+  if (swing < 0 || swing > 20){
+    ERROR("Incorrect swing value");
+  }
+  
+  if(swing < 8){
+    input = quantizeTensorPromise(input, i_min, i_max);
+    weights = quantizeTensorPromise(weights, w_min, w_max);
+    if(bias != NULL)
+      bias = quantizeTensorPromise(bias, b_min, b_max);
+
+    // NOTE: Modelling aRead error in PROMISE
+    input = addPromiseError(input, swing);
+  }
+
+
+  
+  void* gemm_out;
+  if(swing >= 8 && swing < 11){
+    gemm_out = tensorHalfGemm(input, weights);
+  }
+  else{
+    gemm_out = tensorGemmGPU(input, weights);
+  }
+
+  
+  void* gemmbias_out;
+  if(bias != NULL){
+    // Swing 8 corresponds to FP32
+    if(swing >= 8 && swing < 20){
+      gemmbias_out = tensorHalfAdd(gemm_out, bias);
+    }
+    else{
+      gemmbias_out = tensorAdd(gemm_out, bias);
+    }
+  }
+  else{
+    gemmbias_out = gemm_out;
+  }
+ 
+  void* activation_out;
+  switch(activation_id){
+
+  case -1:
+    activation_out = gemmbias_out;
+    INFO("No Activation Function \n");
+    break;
+  case 0:
+    activation_out = tensorTanh(gemmbias_out);
+    break;
+  case 1:
+    activation_out = tensorRelu(gemmbias_out);
+    break;
+  case 2:
+    activation_out = tensorRelu2(gemmbias_out, out_min, out_max);
+    break;
+  default:
+    ERROR("Activation id %d NOT supported \n", activation_out);
+    break;
+  }
+  
+  
+  if(swing < 8 && activation_id != -1){
+    activation_out = quantizeTensorPromise(activation_out, out_min, out_max);
+  }
+  
+  return activation_out;
+}
+
+*****/
+
+
+
+/**** Wrapper Runtime API ***/
+  
+void* wrapper_ConvLayer(const char* hpvm_node_id,
+			void* input, 
+		        void* filter, 
+		        void* bias, 
+		        int conv_pad_h, int conv_pad_w,
+		        int conv_stride_h, int conv_stride_w,
+		        int pool_id, int pool_size,
+		        int activation_id,
+		        // NOTE: out_min, out_max are only relevant for ClippedRelu
+		        float out_min, float out_max){
+
+  NodeConfiguration *NodeConf = RC->getNodeConfiguration(hpvm_node_id);
+
+  if (NodeConf->isPROMISENodeConfiguration()) {
+    DEBUG("PROMISE Configuration for ConvLayer\n");
+    // Mapped to PROMISE - get a PROMISE node configuration
+    PROMISENodeConfiguration *PROMISEConf = (PROMISENodeConfiguration *)NodeConf;
+    std::vector<float> &QRanges = RC->getQuantizationRanges(hpvm_node_id);
+
+    std::vector<std::pair<PROMISENodeConfiguration::APPROX, int> > &approxTuples =
+      PROMISEConf->getApproxChoices();
+
+    if (approxTuples.size() == 1) {
+      enum PROMISENodeConfiguration::APPROX approx = approxTuples[0].first;
+      int param = approxTuples[0].second;
+      if (approx == PROMISENodeConfiguration::APPROX::SWING_LEVEL) {
+        DEBUG("Approximation choice for ConvLayer: swing level %d\n", param);
+
+        struct Tensor* input_tensor_cast = (struct Tensor*) input;
+        struct Tensor* filter_tensor_cast = (struct Tensor*) filter;
+        std::pair<double, double> pinfo =
+          RC->conv_profile(input_tensor_cast->dims.dim_sizes[0], //n
+                           input_tensor_cast->dims.dim_sizes[1], //c
+                           input_tensor_cast->dims.dim_sizes[2], //h
+                           input_tensor_cast->dims.dim_sizes[3], //w
+                           filter_tensor_cast->dims.dim_sizes[0], //c_out
+                           filter_tensor_cast->dims.dim_sizes[1], //c_in
+                           filter_tensor_cast->dims.dim_sizes[2], //k_h
+                           filter_tensor_cast->dims.dim_sizes[3], //k_w
+                           conv_stride_h, //s_h
+                           conv_stride_w, //s_w
+                           param, //voltage_swing
+                           filter_tensor_cast->dims.dim_sizes[2] *
+                             filter_tensor_cast->dims.dim_sizes[3] /*patch_factor: k_h*k_w*/);
+        RC->addToCurrentIterationComputeTime("ConvLayer_PROMISE", pinfo.first);
+        RC->addToCurrentIterationComputeEnergy("ConvLayer_PROMISE", pinfo.second);
+        void* t_out;
+        t_out = PROMISE_Conv(input, QRanges[0], QRanges[1],
+                            filter, QRanges[2], QRanges[3],
+                            bias, QRanges[4], QRanges[5],
+                            conv_pad_h, conv_pad_w,
+                            conv_stride_h, conv_stride_w,
+                            pool_id, pool_size,
+                            activation_id,
+                            QRanges[6], QRanges[7], param);
+
+        return t_out;
+      } else {
+        CUSTOM_ASSERT(false && "Unknown approximation type");
+        ERROR("Unknown approximation type");
+        abort();
+      }
+      // TODO additional approx methods implemented here
+
+    } else if (approxTuples.size() == 2) {
+      ERROR("Currently unsupported case");
+      abort();
+    } else {
+      ERROR("Unsupported case");
+      abort();
+    }
+  }
+  else
+  if (NodeConf->isGPUNodeConfiguration()) {
+    DEBUG("GPU Configuration for ConvLayer\n");
+    // Mapped to GPU - get a GPU node configuration
+    GPUNodeConfiguration *GPUConf = (GPUNodeConfiguration *)NodeConf;
+
+    std::vector< std::pair< GPUNodeConfiguration::TENSOR_OP,
+                            std::vector< std::pair<GPUNodeConfiguration::APPROX,
+                                                   int> > > > &ApproxChoices =
+      GPUConf->getApproxChoices();
+
+    // Check for convolution as first operation
+    CUSTOM_ASSERT((ApproxChoices.size() >= 1) &&
+           (ApproxChoices[0].first == GPUNodeConfiguration::TENSOR_OP::CONV) &&
+           "Incorrect number/type of operations in provided Conv layer configuration");
+
+    void* conv_out = handleTensorConvApproximationTuples(ApproxChoices[0].second,
+                       input, filter, conv_pad_h, conv_pad_w,
+                       conv_stride_h, conv_stride_w);
+    void* add_out;
+    if (bias != NULL) {
+      // Check for add as second operation
+      CUSTOM_ASSERT((ApproxChoices.size() >= 2) &&
+             (ApproxChoices[1].first == GPUNodeConfiguration::TENSOR_OP::ADD) &&
+             "Incorrect number/type of operations in provided Conv layer configuration");
+      add_out = handleTensorAddApproximationTuples(ApproxChoices[1].second,
+                                                   conv_out, bias);
+    } else {
+      add_out = conv_out;
+    }
+
+    void* activation_out;
+    switch (activation_id) {
+      case -1:
+        { // No activation
+          INFO("No activation Function\n");
+          activation_out = add_out;
+        }
+        break;
+      case 0:
+        { // TanH activation
+          CUSTOM_ASSERT((ApproxChoices.size() >= 3) &&
+                 (ApproxChoices[2].first == GPUNodeConfiguration::TENSOR_OP::TANH) &&
+                 "Incorrect number/type of operations in provided Conv layer configuration");
+          activation_out = handleTensorTanhApproximationTuples(ApproxChoices[2].second,
+                                                               add_out);
+        }
+        break;
+      case 1:
+        { // ReLU activation
+          CUSTOM_ASSERT((ApproxChoices.size() >= 3) &&
+                 (ApproxChoices[2].first == GPUNodeConfiguration::TENSOR_OP::RELU) &&
+                 "Incorrect number/type of operations in provided Conv layer configuration");
+          activation_out = handleTensorReluApproximationTuples(ApproxChoices[2].second,
+                                                               add_out);
+        }
+        break;
+      case 2:
+        { // Clipped ReLU activation
+          CUSTOM_ASSERT((ApproxChoices.size() >= 3) &&
+                 (ApproxChoices[2].first == GPUNodeConfiguration::TENSOR_OP::CLIPPED_RELU) &&
+                 "Incorrect number/type of operations in provided Conv layer configuration");
+          activation_out =
+            handleTensorClippedReluApproximationTuples(ApproxChoices[2].second,
+                                                       add_out, out_min, out_max);
+        }
+        break;
+      default:
+        {
+          ERROR("Activation id %d NOT supported \n", activation_id);
+        }
+        break;
+    }
+
+    void* pool_out;
+
+    if (pool_size > 0) {
+      switch (pool_id) {
+        case 0:
+          {
+            // If we remove the asserts, we can have all cases handled by a single call
+            CUSTOM_ASSERT((ApproxChoices.back().first == GPUNodeConfiguration::TENSOR_OP::POOL_MAX) &&
+                  "Expected POOL_MAX in provided Conv layer configuration");
+            pool_out =
+              handleTensorPoolingApproximationTuples(ApproxChoices.back().second,
+                                                     activation_out, pool_id,
+                                                     pool_size, pool_size, 0, 0,
+                                                     pool_size, pool_size);
+          }
+          break;
+        case 1:
+          {
+            CUSTOM_ASSERT((ApproxChoices.back().first == GPUNodeConfiguration::TENSOR_OP::POOL_MEAN) &&
+                  "Expected POOL_MEAN in provided Conv layer configuration");
+            pool_out =
+              handleTensorPoolingApproximationTuples(ApproxChoices.back().second,
+                                                     activation_out, pool_id,
+                                                     pool_size, pool_size, 0, 0,
+                                                     pool_size, pool_size);
+          }
+          break;
+        case 2:
+          {
+            CUSTOM_ASSERT((ApproxChoices.back().first == GPUNodeConfiguration::TENSOR_OP::POOL_MIN) &&
+                  "Expected POOL_MIN in provided Conv layer configuration");
+            pool_out =
+              handleTensorPoolingApproximationTuples(ApproxChoices.back().second,
+                                                     activation_out, pool_id,
+                                                     pool_size, pool_size, 0, 0,
+                                                     pool_size, pool_size);
+          }
+          break;
+        default:
+          {
+            ERROR("Pool id %d NOT supported \n", pool_id);
+          }
+          break;
+      }
+    } else {
+      pool_out = activation_out;
+    }
+    return pool_out;
+  }
+  else {
+    ERROR("Unsupported Configuration");
+    abort();
+  }
+
+  return NULL;
+}
+
+
+void* wrapper_FCLayer(const char* hpvm_node_id,
+		      void* input, 
+		      void* weights, 
+		      void* bias, 
+		      int activation_id,
+		      // NOTE: out_min and out_max are only relevant for ClippedRelu
+		      float out_min, float out_max){ 
+
+  NodeConfiguration *NodeConf = RC->getNodeConfiguration(hpvm_node_id);
+
+  if (NodeConf->isPROMISENodeConfiguration()) {
+    DEBUG("PROMISE Configuration for FCLayer\n");
+    // Mapped to PROMISE - get a PROMISE node configuration
+    PROMISENodeConfiguration *PROMISEConf = (PROMISENodeConfiguration *)NodeConf;
+    std::vector<float> &QRanges = RC->getQuantizationRanges(hpvm_node_id);
+
+    std::vector<std::pair<PROMISENodeConfiguration::APPROX, int> > &approxTuples =
+      PROMISEConf->getApproxChoices();
+
+    if (approxTuples.size() == 1) {
+      enum PROMISENodeConfiguration::APPROX approx = approxTuples[0].first;
+      int param = approxTuples[0].second;
+      if (approx == PROMISENodeConfiguration::APPROX::SWING_LEVEL) {
+        DEBUG("Approximation choice for FCLayer: swing level %d\n", param);
+
+        struct Tensor* input_tensor_cast = (struct Tensor*) input;
+        struct Tensor* weights_tensor_cast = (struct Tensor*) weights;
+        CUSTOM_ASSERT((input_tensor_cast->dims.dim_sizes[1] *
+                       input_tensor_cast->dims.dim_sizes[2] *
+                       input_tensor_cast->dims.dim_sizes[3] ==
+                         weights_tensor_cast->dims.dim_sizes[2]) &&
+                      "Dimensions for matrix multiplication do not match.");
+        std::pair<double, double> pinfo =
+          RC->fc_profile(input_tensor_cast->dims.dim_sizes[0], //num_rows_a,
+                         input_tensor_cast->dims.dim_sizes[1] *
+                           input_tensor_cast->dims.dim_sizes[2] *
+                           input_tensor_cast->dims.dim_sizes[3], //num_cols_a,
+                         weights_tensor_cast->dims.dim_sizes[2], //num_rows_b,
+                         weights_tensor_cast->dims.dim_sizes[3], //num_cols_b,
+                         param, //voltage_swing,
+                         1 /*patch_factor*/);
+        RC->addToCurrentIterationComputeTime("FCLayer_PROMISE", pinfo.first);
+        RC->addToCurrentIterationComputeEnergy("FCLayer_PROMISE", pinfo.second);
+        void* t_out;
+        t_out = PROMISE_FC(input, QRanges[0], QRanges[1],
+                           weights, QRanges[2], QRanges[3],
+                           bias, QRanges[4], QRanges[5],
+                           activation_id,
+                           QRanges[6], QRanges[7], param);
+        return t_out;
+      } else {
+        CUSTOM_ASSERT(false && "Unknown approximation type");
+        ERROR("Unknown approximation type");
+        abort();
+      }
+      // TODO additional approx methods implemented here
+
+    } else if (approxTuples.size() == 2) {
+      ERROR("Currently unsupported case");
+      abort();
+    } else {
+      ERROR("Unsupported case");
+      abort();
+    }
+  }
+  else
+  if (NodeConf->isGPUNodeConfiguration()) {
+    DEBUG("GPU Configuration for FCLayer\n");
+    // Mapped to GPU - get a GPU node configuration
+    GPUNodeConfiguration *GPUConf = (GPUNodeConfiguration *)NodeConf;
+
+    std::vector< std::pair< GPUNodeConfiguration::TENSOR_OP,
+                            std::vector< std::pair<GPUNodeConfiguration::APPROX,
+                                                   int> > > > &ApproxChoices =
+      GPUConf->getApproxChoices();
+
+    // Approximation choices must be for a FC wrapper operation
+    CUSTOM_ASSERT((ApproxChoices.size() == 2 || ApproxChoices.size() == 3) &&
+         ApproxChoices[0].first == GPUNodeConfiguration::TENSOR_OP::MUL &&
+         ApproxChoices[1].first == GPUNodeConfiguration::TENSOR_OP::ADD &&
+         "Invalid configuration generated for FC layer wrapper operation");
+
+    void* gemm_out = handleTensorMulApproximationTuples(ApproxChoices[0].second,
+                                                        input, weights);
+    void* add_out = handleTensorAddApproximationTuples(ApproxChoices[1].second,
+                                                        gemm_out, bias);
+
+    void* activation_out;
+    switch (activation_id) {
+      case -1:
+        { // No activation
+          CUSTOM_ASSERT((ApproxChoices.size() == 2) &&
+                 "Incorrect number of operations in provided FC layer configuration");
+          INFO("No activation Function\n");
+          activation_out = add_out;
+        }
+        break;
+      case 0:
+        { // TanH activation
+          CUSTOM_ASSERT((ApproxChoices.size() == 3) &&
+                 (ApproxChoices[2].first == GPUNodeConfiguration::TENSOR_OP::TANH) &&
+                 "Incorrect number/type of operations in provided FC layer configuration");
+          activation_out = handleTensorTanhApproximationTuples(ApproxChoices[1].second,
+                                                               add_out);
+        }
+        break;
+      case 1:
+        { // ReLU activation
+          CUSTOM_ASSERT((ApproxChoices.size() == 3) &&
+                 (ApproxChoices[2].first == GPUNodeConfiguration::TENSOR_OP::RELU) &&
+                 "Incorrect number/type of operations in provided FC layer configuration");
+          activation_out = handleTensorReluApproximationTuples(ApproxChoices[1].second,
+                                                               add_out);
+        }
+        break;
+      case 2:
+        { // Clipped ReLU activation
+          CUSTOM_ASSERT((ApproxChoices.size() == 3) &&
+                 (ApproxChoices[2].first == GPUNodeConfiguration::TENSOR_OP::CLIPPED_RELU) &&
+                 "Incorrect number/type of operations in provided FC layer configuration");
+          activation_out =
+            handleTensorClippedReluApproximationTuples(ApproxChoices[1].second,
+                                                       add_out, out_min, out_max);
+        }
+        break;
+      default:
+        {
+          ERROR("Activation id %d NOT supported \n", activation_id);
+        }
+        break;
+    }
+    return activation_out;
+  }
+  else {
+    ERROR("Unsupported Configuration");
+    abort();
+  }
+
+  return NULL;
+}
+
+
+
+
+void* wrapper_tensorRelu(const char* hpvm_node_id, void* input_ptr){
+//  return tensorRelu(input_ptr);
+
+  // Only mapped to GPU - get a GPU configuration
+  GPUNodeConfiguration *GPUConf =
+    (GPUNodeConfiguration *)RC->getNodeConfiguration(hpvm_node_id);
+
+  std::vector< std::pair< GPUNodeConfiguration::TENSOR_OP,
+                          std::vector< std::pair<GPUNodeConfiguration::APPROX,
+                                                 int> > > > &ApproxChoices =
+    GPUConf->getApproxChoices();
+
+  // Approximation choices must be for a relu operation
+  CUSTOM_ASSERT(ApproxChoices.size() == 1 &&
+         ApproxChoices[0].first == GPUNodeConfiguration::TENSOR_OP::RELU &&
+         "Invalid configuration generated for tensor relu wrapper operation");
+
+  return handleTensorReluApproximationTuples(ApproxChoices[0].second,
+                                             input_ptr);
+
+}
+
+void* wrapper_tensorClippedRelu(const char* hpvm_node_id,
+                                void* input_ptr,
+                                float out_min, float out_max){
+  // Only mapped to GPU - get a GPU configuration
+  GPUNodeConfiguration *GPUConf =
+    (GPUNodeConfiguration *)RC->getNodeConfiguration(hpvm_node_id);
+
+  std::vector< std::pair< GPUNodeConfiguration::TENSOR_OP,
+                          std::vector< std::pair<GPUNodeConfiguration::APPROX,
+                                                 int> > > > &ApproxChoices =
+    GPUConf->getApproxChoices();
+
+  // Approximation choices must be for a relu operation
+  CUSTOM_ASSERT(ApproxChoices.size() == 1 &&
+         ApproxChoices[0].first == GPUNodeConfiguration::TENSOR_OP::CLIPPED_RELU &&
+         "Invalid configuration generated for tensor clipped relu wrapper operation");
+
+  return handleTensorClippedReluApproximationTuples(ApproxChoices[0].second,
+                                                    input_ptr, out_min, out_max);
+
+}
+
+void* wrapper_tensorTanh(const char* hpvm_node_id, void* input_ptr){
+//  return tensorTanh(input_ptr);
+
+  GPUNodeConfiguration *GPUConf =
+    (GPUNodeConfiguration *)RC->getNodeConfiguration(hpvm_node_id);
+
+  std::vector< std::pair< GPUNodeConfiguration::TENSOR_OP,
+                          std::vector< std::pair<GPUNodeConfiguration::APPROX,
+                                                 int> > > > &ApproxChoices =
+    GPUConf->getApproxChoices();
+
+  // Approximation choices must be for a tanh operation
+  CUSTOM_ASSERT(ApproxChoices.size() == 1 &&
+         ApproxChoices[0].first == GPUNodeConfiguration::TENSOR_OP::TANH &&
+         "Invalid configuration generated for tensor tanh wrapper operation");
+
+  return handleTensorTanhApproximationTuples(ApproxChoices[0].second,
+                                             input_ptr);
+
+}
+
+
+void* wrapper_tensorBatchNorm(const char* hpvm_node_id,
+			      void* input_ptr, void* gamma_ptr, void* beta_ptr,
+			      void* mean_ptr, void* variance_ptr, double epsilon){
+//  return tensorBatchNorm(input_ptr, gamma_ptr, beta_ptr, mean_ptr, variance_ptr, epsilon);
+
+  // Only mapped to GPU - get a GPU configuration
+  GPUNodeConfiguration *GPUConf =
+    (GPUNodeConfiguration *)RC->getNodeConfiguration(hpvm_node_id);
+
+  std::vector< std::pair< GPUNodeConfiguration::TENSOR_OP,
+                          std::vector< std::pair<GPUNodeConfiguration::APPROX,
+                                                 int> > > > &ApproxChoices =
+    GPUConf->getApproxChoices();
+
+  // Approximation choices must be for a batchnorm operation
+  CUSTOM_ASSERT(ApproxChoices.size() == 1 &&
+         ApproxChoices[0].first == GPUNodeConfiguration::TENSOR_OP::BATCHNORM &&
+         "Invalid configuration generated for tensor batchnorm wrapper operation");
+
+  return handleTensorBatchNormApproximationTuples(ApproxChoices[0].second,
+                                                  input_ptr, gamma_ptr, beta_ptr,
+                                                  mean_ptr, variance_ptr, epsilon);
+
+}
+
+
+void* wrapper_tensorAdd(const char* hpvm_node_id, void* input_ptr, void* bias_ptr){
+//  return tensorAdd(input_ptr, bias_ptr);
+
+  // Only mapped to GPU - get a GPU configuration
+  GPUNodeConfiguration *GPUConf =
+    (GPUNodeConfiguration *)RC->getNodeConfiguration(hpvm_node_id);
+
+  std::vector< std::pair< GPUNodeConfiguration::TENSOR_OP,
+                          std::vector< std::pair<GPUNodeConfiguration::APPROX,
+                                                 int> > > > &ApproxChoices =
+    GPUConf->getApproxChoices();
+
+  // Approximation choices must be for an add operation
+  CUSTOM_ASSERT(ApproxChoices.size() == 1 &&
+         ApproxChoices[0].first == GPUNodeConfiguration::TENSOR_OP::ADD &&
+         "Invalid configuration generated for tensor add wrapper operation");
+
+  return handleTensorAddApproximationTuples(ApproxChoices[0].second,
+                                            input_ptr, bias_ptr);
+
+}
+
+
+void* wrapper_tensorPooling(const char* hpvm_node_id,
+			    void* input_ptr,
+			    int poolFunction,
+			    int window_height, int window_width,
+			    int vertical_pad, int horizontal_pad,
+			    int vertical_stride, int horizontal_stride){
+
+//  return tensorPooling(input_ptr, poolFunction, window_height, window_width,
+//		       vertical_pad, horizontal_pad, vertical_stride, horizontal_stride);
+
+  // Only mapped to GPU - get a GPU configuration
+  GPUNodeConfiguration *GPUConf =
+    (GPUNodeConfiguration *)RC->getNodeConfiguration(hpvm_node_id);
+
+  std::vector< std::pair< GPUNodeConfiguration::TENSOR_OP,
+                          std::vector< std::pair<GPUNodeConfiguration::APPROX,
+                                                 int> > > > &ApproxChoices =
+    GPUConf->getApproxChoices();
+
+  // Approximation choices must be for a single operation
+  CUSTOM_ASSERT(ApproxChoices.size() == 1 &&
+                "Invalid configuration generated for tensor pool wrapper operation");
+  enum GPUNodeConfiguration::TENSOR_OP top = ApproxChoices[0].first;
+  // Approximation choices must be for a pool operation
+  CUSTOM_ASSERT((top == GPUNodeConfiguration::TENSOR_OP::POOL_MAX  ||
+                 top == GPUNodeConfiguration::TENSOR_OP::POOL_MEAN ||
+                 top == GPUNodeConfiguration::TENSOR_OP::POOL_MIN) &&
+         "Invalid configuration generated for tensor pool wrapper operation");
+
+  return handleTensorPoolingApproximationTuples(ApproxChoices[0].second,
+                                                input_ptr, poolFunction,
+                                                window_height, window_width,
+                                                vertical_pad, horizontal_pad,
+                                                vertical_stride, horizontal_stride);
+
+}
+
+
+void* wrapper_tensorGroupConvolution(const char* hpvm_node_id,
+                                     void* input, void* filter,
+				                     int vertical_pad, int horizontal_pad,
+				                     int vertical_stride, int horizontal_stride,
+				                     int conv_mode, int conv_groups){
+  // Only mapped to GPU - get a GPU configuration
+  GPUNodeConfiguration *GPUConf =
+    (GPUNodeConfiguration *)RC->getNodeConfiguration(hpvm_node_id);
+
+  std::vector< std::pair< GPUNodeConfiguration::TENSOR_OP,
+                          std::vector< std::pair<GPUNodeConfiguration::APPROX,
+                                                 int> > > > &ApproxChoices =
+    GPUConf->getApproxChoices();
+
+  // Approximation choices must be for a group_conv operation
+  CUSTOM_ASSERT(ApproxChoices.size() == 1 &&
+         ApproxChoices[0].first == GPUNodeConfiguration::TENSOR_OP::GROUP_CONV &&
+         "Invalid configuration generated for tensor group_conv wrapper operation");
+
+  return handleTensorGroupConvApproximationTuples(ApproxChoices[0].second,
+                                                  input, filter,
+                                                  vertical_pad, horizontal_pad,
+                                                  vertical_stride, horizontal_stride,
+                                                  conv_mode, conv_groups);
+
+}
+
+
+
+void* wrapper_tensorSoftmax(const char* hpvm_node_id, void* input_ptr){
+//  return tensorSoftmax(input_ptr);
+
+  // Only mapped to GPU - get a GPU configuration
+  GPUNodeConfiguration *GPUConf =
+    (GPUNodeConfiguration *)RC->getNodeConfiguration(hpvm_node_id);
+
+  std::vector< std::pair< GPUNodeConfiguration::TENSOR_OP,
+                          std::vector< std::pair<GPUNodeConfiguration::APPROX,
+                                                 int> > > > &ApproxChoices =
+    GPUConf->getApproxChoices();
+
+  // Approximation choices must be for a softmax operation
+  CUSTOM_ASSERT(ApproxChoices.size() == 1 &&
+         ApproxChoices[0].first == GPUNodeConfiguration::TENSOR_OP::SOFTMAX &&
+         "Invalid configuration generated for tensor softmax wrapper operation");
+
+  return handleTensorSoftmaxApproximationTuples(ApproxChoices[0].second, input_ptr);
+
+
+}
+
+
+
diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/legacy/wrapper_runtime_back.cu b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/legacy/wrapper_runtime_back.cu
new file mode 100644
index 0000000000..f6c4fff296
--- /dev/null
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/legacy/wrapper_runtime_back.cu
@@ -0,0 +1,123 @@
+
+#include <stdio.h>
+#include <cstdio>
+#include <cstdlib>
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include <cuda_runtime.h>
+#include <device_launch_parameters.h>
+
+#include <cublas_v2.h>
+#include <cudnn.h>
+#include <cublas_api.h>
+#include <cuda_fp16.h>
+#include <driver_types.h>
+
+
+// Tensor runtime header files
+#include "../include/tensor_runtime.h"
+#include "../include/tensor_utils.h"
+#include "../include/debug.h"
+#include "../include/profiling.h"
+#include "../include/fp16_conversion.h"
+#include "../include/global_data.h"
+#include "../include/error.h"
+#include "../include/tensor.h"
+#include "../include/op_overheads.h"
+#include "../include/half_precision_api.h"
+
+
+
+/*********** Generic Layer API **************/
+
+void* ConvLayerWrapper(void* input, 
+		void* filter, 
+		void* bias, 
+		int conv_pad_h, int conv_pad_w,
+		int conv_stride_h, int conv_stride_w,
+		int pool_id, int pool_size,
+		int activation_id,
+		// NOTE: out_min, out_max are only relevant for ClippedRelu
+		float out_min, float out_max){
+ 			
+  
+  void* conv_out = tensorConvolution(input, filter,
+				     conv_pad_h, conv_pad_w,
+				     conv_stride_h, conv_stride_w,
+				     1, 0);
+  
+  void* conv_add = tensorAdd(conv_out, bias);
+
+  void* pool_out;
+  // NOTE: Skip pooling when pool size is not greater than 0
+  if(pool_size > 0){
+    //FIXME: Currently only using MaxPooling
+    pool_out = tensorPooling(conv_add, 0, pool_size, pool_size, 0, 0, pool_size, pool_size);
+  }
+  else{
+    pool_out = conv_add;
+  }
+  
+  void* activation_out;  
+  switch(activation_id){
+  case -1:
+    activation_out = pool_out;
+    INFO("NO Activation Function \n");
+    break;
+  case 0:
+    activation_out = tensorTanh(pool_out);
+    break;
+  case 1:
+    activation_out = tensorRelu(pool_out);
+    break;
+  case 2:
+    activation_out = tensorRelu2(pool_out, out_min, out_max);
+    break;
+  default:
+    ERROR("Activation id %d NOT supported \n", activation_out);
+    break;
+  }
+
+  return activation_out;
+}
+
+
+void* FCLayerWrapper(void* input, 
+	      void* weights, 
+	      void* bias, 
+	      int activation_id,
+	      // NOTE: out_min and out_max are only relevant for ClippedRelu
+	      float out_min, float out_max){ 
+
+  
+  void* gemm_out = tensorGemmGPU(input, weights);
+
+  void* gemmbias_out = tensorAdd(gemm_out, bias);
+  
+  void* activation_out;
+  switch(activation_id){
+
+  case -1:
+    activation_out = gemmbias_out;
+    INFO("No Activation Function \n");
+    break;
+  case 0:
+    activation_out = tensorTanh(gemmbias_out);
+    break;
+  case 1:
+    activation_out = tensorRelu(gemmbias_out);
+    break;
+  case 2:
+    activation_out = tensorRelu2(gemmbias_out, out_min, out_max);
+    break;
+  default:
+    ERROR("Activation id %d NOT supported \n", activation_out);
+    break;
+  }
+  
+  return activation_out;
+}
+
+
diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/wrapper_runtime.cu b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/wrapper_runtime.cu
index fda3037aa5..6759ab3b8e 100644
--- a/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/wrapper_runtime.cu
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/wrapper_runtime.cu
@@ -330,13 +330,20 @@ extern "C"{
 	std::vector< std::pair< GPUNodeConfiguration::TENSOR_OP,
 				std::vector< std::pair<GPUNodeConfiguration::APPROX,
 						       int> > > > &ApproxChoices =
-	  GPUConf->getApproxChoices();
+	GPUConf->getApproxChoices();
+
+	
+	printf("*** Convolution \n ApproxChoice = %d \n  BatchNorm = %d \n CONV = %d \n", ApproxChoices[0].first,
+	       GPUNodeConfiguration::TENSOR_OP::BATCHNORM,
+	       GPUNodeConfiguration::TENSOR_OP::CONV);
 
 	// Check for convolution as first operation
 	CUSTOM_ASSERT((ApproxChoices.size() >= 1) &&
 		      (ApproxChoices[0].first == GPUNodeConfiguration::TENSOR_OP::CONV) &&
 		      "Incorrect number/type of operations in provided Conv layer configuration");
 
+
+	
 	void* conv_out = handleTensorConvApproximationTuples(ApproxChoices[0].second,
 							     input, filter, conv_pad_h, conv_pad_w,
 							     conv_stride_h, conv_stride_w);
@@ -547,7 +554,7 @@ extern "C"{
 	std::vector< std::pair< GPUNodeConfiguration::TENSOR_OP,
 				std::vector< std::pair<GPUNodeConfiguration::APPROX,
 						       int> > > > &ApproxChoices =
-	  GPUConf->getApproxChoices();
+	GPUConf->getApproxChoices();
 
 	// Approximation choices must be for a FC wrapper operation
 	CUSTOM_ASSERT((ApproxChoices.size() == 2 || ApproxChoices.size() == 3) &&
@@ -627,7 +634,7 @@ extern "C"{
     std::vector< std::pair< GPUNodeConfiguration::TENSOR_OP,
 			    std::vector< std::pair<GPUNodeConfiguration::APPROX,
 						   int> > > > &ApproxChoices =
-      GPUConf->getApproxChoices();
+    GPUConf->getApproxChoices();
 
     // Approximation choices must be for a relu operation
     CUSTOM_ASSERT(ApproxChoices.size() == 1 &&
@@ -649,7 +656,7 @@ extern "C"{
     std::vector< std::pair< GPUNodeConfiguration::TENSOR_OP,
 			    std::vector< std::pair<GPUNodeConfiguration::APPROX,
 						   int> > > > &ApproxChoices =
-      GPUConf->getApproxChoices();
+    GPUConf->getApproxChoices();
 
     // Approximation choices must be for a relu operation
     CUSTOM_ASSERT(ApproxChoices.size() == 1 &&
@@ -670,7 +677,7 @@ extern "C"{
     std::vector< std::pair< GPUNodeConfiguration::TENSOR_OP,
 			    std::vector< std::pair<GPUNodeConfiguration::APPROX,
 						   int> > > > &ApproxChoices =
-      GPUConf->getApproxChoices();
+    GPUConf->getApproxChoices();
 
     // Approximation choices must be for a tanh operation
     CUSTOM_ASSERT(ApproxChoices.size() == 1 &&
@@ -686,7 +693,7 @@ extern "C"{
   void* wrapper_tensorBatchNorm(const char* hpvm_node_id,
 				void* input_ptr, void* gamma_ptr, void* beta_ptr,
 				void* mean_ptr, void* variance_ptr, double epsilon){
-    //  return tensorBatchNorm(input_ptr, gamma_ptr, beta_ptr, mean_ptr, variance_ptr, epsilon);
+    
 
     // Only mapped to GPU - get a GPU configuration
     GPUNodeConfiguration *GPUConf =
@@ -695,7 +702,13 @@ extern "C"{
     std::vector< std::pair< GPUNodeConfiguration::TENSOR_OP,
 			    std::vector< std::pair<GPUNodeConfiguration::APPROX,
 						   int> > > > &ApproxChoices =
-      GPUConf->getApproxChoices();
+
+    GPUConf->getApproxChoices();
+    
+
+    printf("*** BatchNorm \n ApproxChoice = %d \n  BatchNorm = %d \n CONV = %d \n", ApproxChoices[0].first,
+	       GPUNodeConfiguration::TENSOR_OP::BATCHNORM,
+	       GPUNodeConfiguration::TENSOR_OP::CONV);
 
     // Approximation choices must be for a batchnorm operation
     CUSTOM_ASSERT(ApproxChoices.size() == 1 &&
@@ -719,7 +732,8 @@ extern "C"{
     std::vector< std::pair< GPUNodeConfiguration::TENSOR_OP,
 			    std::vector< std::pair<GPUNodeConfiguration::APPROX,
 						   int> > > > &ApproxChoices =
-      GPUConf->getApproxChoices();
+
+    GPUConf->getApproxChoices();
 
     // Approximation choices must be for an add operation
     CUSTOM_ASSERT(ApproxChoices.size() == 1 &&
@@ -749,7 +763,8 @@ extern "C"{
     std::vector< std::pair< GPUNodeConfiguration::TENSOR_OP,
 			    std::vector< std::pair<GPUNodeConfiguration::APPROX,
 						   int> > > > &ApproxChoices =
-      GPUConf->getApproxChoices();
+
+    GPUConf->getApproxChoices();
 
     // Approximation choices must be for a single operation
     CUSTOM_ASSERT(ApproxChoices.size() == 1 &&
@@ -782,7 +797,7 @@ extern "C"{
     std::vector< std::pair< GPUNodeConfiguration::TENSOR_OP,
 			    std::vector< std::pair<GPUNodeConfiguration::APPROX,
 						   int> > > > &ApproxChoices =
-      GPUConf->getApproxChoices();
+    GPUConf->getApproxChoices();
 
     // Approximation choices must be for a group_conv operation
     CUSTOM_ASSERT(ApproxChoices.size() == 1 &&
@@ -809,7 +824,7 @@ extern "C"{
     std::vector< std::pair< GPUNodeConfiguration::TENSOR_OP,
 			    std::vector< std::pair<GPUNodeConfiguration::APPROX,
 						   int> > > > &ApproxChoices =
-      GPUConf->getApproxChoices();
+    GPUConf->getApproxChoices();
 
     // Approximation choices must be for a softmax operation
     CUSTOM_ASSERT(ApproxChoices.size() == 1 &&
@@ -822,4 +837,12 @@ extern "C"{
   }
 
 
+
+  void* tensor_set_node_id(unsigned int node_id){
+
+    currentTensorID = node_id;
+
+    return NULL;
+  }
+
 }
-- 
GitLab