From 3b35b08f39974b417ddb92e8a6fe60e5c0fef390 Mon Sep 17 00:00:00 2001
From: Yifan Zhao <yifanz16@illinois.edu>
Date: Sat, 3 Apr 2021 14:23:25 -0500
Subject: [PATCH] Removed cudnn targets in non-cudnn benchmarks

---
 .../alexnet2_cifar10/alexnet2_cifar10.cpp     |   2 +-
 .../alexnet_cifar10/alexnet_cifar10.cpp       |   2 +-
 .../alexnet_imagenet/alexnet_imagenet.cpp     |   2 +-
 .../benchmarks/lenet_mnist/lenet_mnist.cpp    |   2 +-
 .../mobilenet_cifar10/mobilenet_cifar10.cpp   | 138 +++++++++---------
 .../resnet18_cifar10/resnet18_cifar10.cpp     |  40 ++---
 .../vgg16_cifar10/vgg16_cifar10.cpp           |   2 +-
 .../vgg16_cifar100/vgg16_cifar100.cpp         |   2 +-
 .../vgg16_imagenet/vgg16_imagenet.cpp         |   2 +-
 9 files changed, 96 insertions(+), 96 deletions(-)

diff --git a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet2_cifar10/alexnet2_cifar10.cpp b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet2_cifar10/alexnet2_cifar10.cpp
index 8a043d3644..255ec86924 100644
--- a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet2_cifar10/alexnet2_cifar10.cpp
+++ b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet2_cifar10/alexnet2_cifar10.cpp
@@ -196,7 +196,7 @@ void var_22_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_23_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_softmax(t1);
diff --git a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet_cifar10/alexnet_cifar10.cpp b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet_cifar10/alexnet_cifar10.cpp
index 1ff20c28a6..b37fd71dea 100644
--- a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet_cifar10/alexnet_cifar10.cpp
+++ b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet_cifar10/alexnet_cifar10.cpp
@@ -172,7 +172,7 @@ void var_19_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_20_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_softmax(t1);
diff --git a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet_imagenet/alexnet_imagenet.cpp b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet_imagenet/alexnet_imagenet.cpp
index b20aab8720..abdf532c7a 100644
--- a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet_imagenet/alexnet_imagenet.cpp
+++ b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet_imagenet/alexnet_imagenet.cpp
@@ -220,7 +220,7 @@ void var_25_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_26_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_softmax(t1);
diff --git a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/lenet_mnist/lenet_mnist.cpp b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/lenet_mnist/lenet_mnist.cpp
index f9d8bc3e43..684c1bfef5 100644
--- a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/lenet_mnist/lenet_mnist.cpp
+++ b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/lenet_mnist/lenet_mnist.cpp
@@ -124,7 +124,7 @@ void var_13_node(void *t1, size_t bytes_t1) {
 }
 
 void var_14_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_softmax(t1);
diff --git a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/mobilenet_cifar10/mobilenet_cifar10.cpp b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/mobilenet_cifar10/mobilenet_cifar10.cpp
index d309cd5b5e..881a9bbaa8 100644
--- a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/mobilenet_cifar10/mobilenet_cifar10.cpp
+++ b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/mobilenet_cifar10/mobilenet_cifar10.cpp
@@ -22,7 +22,7 @@ void var_0_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_1_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                 size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                 size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -30,7 +30,7 @@ void var_1_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_2_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -38,7 +38,7 @@ void var_2_node(void *t1, size_t bytes_t1) {
 }
 
 void var_3_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(2, t1, t2, 0);
 
   void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 1, 1, 1, 32);
@@ -48,7 +48,7 @@ void var_3_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_4_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                 size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                 size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -56,7 +56,7 @@ void var_4_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_5_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -74,7 +74,7 @@ void var_6_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_7_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                 size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                 size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -82,7 +82,7 @@ void var_7_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_8_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -90,7 +90,7 @@ void var_8_node(void *t1, size_t bytes_t1) {
 }
 
 void var_9_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(2, t1, t2, 0);
 
   void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 2, 2, 1, 64);
@@ -100,7 +100,7 @@ void var_9_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_10_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -108,7 +108,7 @@ void var_10_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_11_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -126,7 +126,7 @@ void var_12_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_13_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -134,7 +134,7 @@ void var_13_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_14_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -142,7 +142,7 @@ void var_14_node(void *t1, size_t bytes_t1) {
 }
 
 void var_15_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(2, t1, t2, 0);
 
   void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 1, 1, 1, 128);
@@ -152,7 +152,7 @@ void var_15_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_16_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -160,7 +160,7 @@ void var_16_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_17_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -178,7 +178,7 @@ void var_18_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_19_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -186,7 +186,7 @@ void var_19_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_20_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -194,7 +194,7 @@ void var_20_node(void *t1, size_t bytes_t1) {
 }
 
 void var_21_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(2, t1, t2, 0);
 
   void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 2, 2, 1, 128);
@@ -204,7 +204,7 @@ void var_21_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_22_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -212,7 +212,7 @@ void var_22_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_23_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -230,7 +230,7 @@ void var_24_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_25_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -238,7 +238,7 @@ void var_25_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_26_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -246,7 +246,7 @@ void var_26_node(void *t1, size_t bytes_t1) {
 }
 
 void var_27_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(2, t1, t2, 0);
 
   void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 1, 1, 1, 256);
@@ -256,7 +256,7 @@ void var_27_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_28_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -264,7 +264,7 @@ void var_28_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_29_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -282,7 +282,7 @@ void var_30_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_31_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -290,7 +290,7 @@ void var_31_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_32_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -298,7 +298,7 @@ void var_32_node(void *t1, size_t bytes_t1) {
 }
 
 void var_33_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(2, t1, t2, 0);
 
   void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 2, 2, 1, 256);
@@ -308,7 +308,7 @@ void var_33_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_34_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -316,7 +316,7 @@ void var_34_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_35_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -334,7 +334,7 @@ void var_36_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_37_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -342,7 +342,7 @@ void var_37_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_38_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -350,7 +350,7 @@ void var_38_node(void *t1, size_t bytes_t1) {
 }
 
 void var_39_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(2, t1, t2, 0);
 
   void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 1, 1, 1, 512);
@@ -360,7 +360,7 @@ void var_39_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_40_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -368,7 +368,7 @@ void var_40_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_41_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -386,7 +386,7 @@ void var_42_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_43_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -394,7 +394,7 @@ void var_43_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_44_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -402,7 +402,7 @@ void var_44_node(void *t1, size_t bytes_t1) {
 }
 
 void var_45_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(2, t1, t2, 0);
 
   void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 1, 1, 1, 512);
@@ -412,7 +412,7 @@ void var_45_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_46_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -420,7 +420,7 @@ void var_46_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_47_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -438,7 +438,7 @@ void var_48_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_49_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -446,7 +446,7 @@ void var_49_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_50_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -454,7 +454,7 @@ void var_50_node(void *t1, size_t bytes_t1) {
 }
 
 void var_51_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(2, t1, t2, 0);
 
   void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 1, 1, 1, 512);
@@ -464,7 +464,7 @@ void var_51_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_52_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -472,7 +472,7 @@ void var_52_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_53_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -490,7 +490,7 @@ void var_54_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_55_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -498,7 +498,7 @@ void var_55_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_56_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -506,7 +506,7 @@ void var_56_node(void *t1, size_t bytes_t1) {
 }
 
 void var_57_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(2, t1, t2, 0);
 
   void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 1, 1, 1, 512);
@@ -516,7 +516,7 @@ void var_57_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_58_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -524,7 +524,7 @@ void var_58_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_59_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -542,7 +542,7 @@ void var_60_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_61_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -550,7 +550,7 @@ void var_61_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_62_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -558,7 +558,7 @@ void var_62_node(void *t1, size_t bytes_t1) {
 }
 
 void var_63_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(2, t1, t2, 0);
 
   void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 1, 1, 1, 512);
@@ -568,7 +568,7 @@ void var_63_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_64_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -576,7 +576,7 @@ void var_64_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_65_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -594,7 +594,7 @@ void var_66_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_67_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -602,7 +602,7 @@ void var_67_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_68_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -610,7 +610,7 @@ void var_68_node(void *t1, size_t bytes_t1) {
 }
 
 void var_69_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(2, t1, t2, 0);
 
   void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 2, 2, 1, 512);
@@ -620,7 +620,7 @@ void var_69_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_70_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -628,7 +628,7 @@ void var_70_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_71_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -646,7 +646,7 @@ void var_72_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_73_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -654,7 +654,7 @@ void var_73_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_74_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -662,7 +662,7 @@ void var_74_node(void *t1, size_t bytes_t1) {
 }
 
 void var_75_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(2, t1, t2, 0);
 
   void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 1, 1, 1, 1024);
@@ -672,7 +672,7 @@ void var_75_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_76_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -680,7 +680,7 @@ void var_76_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_77_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -698,7 +698,7 @@ void var_78_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 void var_79_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
                  size_t bytes_t3, void *t4, size_t bytes_t4, void *t5,
                  size_t bytes_t5) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(5, t1, t2, t3, t4, t5, 0);
 
   void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001);
@@ -706,7 +706,7 @@ void var_79_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3,
 }
 
 void var_80_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_relu(t1);
@@ -714,7 +714,7 @@ void var_80_node(void *t1, size_t bytes_t1) {
 }
 
 void var_81_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_pool_mean(t1, 2, 2, 0, 0, 2, 2);
@@ -738,7 +738,7 @@ void var_83_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_84_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_softmax(t1);
diff --git a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/resnet18_cifar10/resnet18_cifar10.cpp b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/resnet18_cifar10/resnet18_cifar10.cpp
index 54e93e0ff5..ce164a16f0 100644
--- a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/resnet18_cifar10/resnet18_cifar10.cpp
+++ b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/resnet18_cifar10/resnet18_cifar10.cpp
@@ -85,7 +85,7 @@ void var_7_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_8_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(2, t1, t2, 0);
   __hpvm__node_id(9);
 
@@ -94,7 +94,7 @@ void var_8_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_9_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
   __hpvm__node_id(10);
 
@@ -148,7 +148,7 @@ void var_14_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_15_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(2, t1, t2, 0);
   __hpvm__node_id(16);
 
@@ -157,7 +157,7 @@ void var_15_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_16_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
   __hpvm__node_id(17);
 
@@ -211,7 +211,7 @@ void var_21_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_22_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(2, t1, t2, 0);
   __hpvm__node_id(23);
 
@@ -220,7 +220,7 @@ void var_22_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_23_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
   __hpvm__node_id(24);
 
@@ -292,7 +292,7 @@ void var_30_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_31_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(2, t1, t2, 0);
   __hpvm__node_id(32);
 
@@ -301,7 +301,7 @@ void var_31_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_32_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
   __hpvm__node_id(33);
 
@@ -355,7 +355,7 @@ void var_37_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_38_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(2, t1, t2, 0);
   __hpvm__node_id(39);
 
@@ -364,7 +364,7 @@ void var_38_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_39_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
   __hpvm__node_id(40);
 
@@ -418,7 +418,7 @@ void var_44_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_45_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(2, t1, t2, 0);
   __hpvm__node_id(46);
 
@@ -427,7 +427,7 @@ void var_45_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_46_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
   __hpvm__node_id(47);
 
@@ -499,7 +499,7 @@ void var_53_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_54_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(2, t1, t2, 0);
   __hpvm__node_id(55);
 
@@ -508,7 +508,7 @@ void var_54_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_55_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
   __hpvm__node_id(56);
 
@@ -562,7 +562,7 @@ void var_60_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_61_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(2, t1, t2, 0);
   __hpvm__node_id(62);
 
@@ -571,7 +571,7 @@ void var_61_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_62_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
   __hpvm__node_id(63);
 
@@ -625,7 +625,7 @@ void var_67_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_68_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(2, t1, t2, 0);
   __hpvm__node_id(69);
 
@@ -634,7 +634,7 @@ void var_68_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_69_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
   __hpvm__node_id(70);
 
@@ -643,7 +643,7 @@ void var_69_node(void *t1, size_t bytes_t1) {
 }
 
 void var_70_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
   __hpvm__node_id(71);
 
@@ -670,7 +670,7 @@ void var_72_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_73_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
   __hpvm__node_id(74);
 
diff --git a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_cifar10/vgg16_cifar10.cpp b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_cifar10/vgg16_cifar10.cpp
index bf1c007ff3..ddd015a63a 100644
--- a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_cifar10/vgg16_cifar10.cpp
+++ b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_cifar10/vgg16_cifar10.cpp
@@ -404,7 +404,7 @@ void var_48_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_49_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_softmax(t1);
diff --git a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_cifar100/vgg16_cifar100.cpp b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_cifar100/vgg16_cifar100.cpp
index c45e115fb3..7a8fbbc9b3 100644
--- a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_cifar100/vgg16_cifar100.cpp
+++ b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_cifar100/vgg16_cifar100.cpp
@@ -404,7 +404,7 @@ void var_48_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_49_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_softmax(t1);
diff --git a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_imagenet/vgg16_imagenet.cpp b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_imagenet/vgg16_imagenet.cpp
index dcf237bd78..2fdf36965d 100644
--- a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_imagenet/vgg16_imagenet.cpp
+++ b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_imagenet/vgg16_imagenet.cpp
@@ -428,7 +428,7 @@ void var_51_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) {
 }
 
 void var_52_node(void *t1, size_t bytes_t1) {
-  __hpvm__hint(hpvm::CUDNN_TARGET);
+  __hpvm__hint(hpvm::TENSOR_TARGET);
   __hpvm__attributes(1, t1, 0);
 
   void *r = __hpvm__tensor_softmax(t1);
-- 
GitLab