diff --git a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet2_cifar10/alexnet2_cifar10.cpp b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet2_cifar10/alexnet2_cifar10.cpp index 8a043d3644aa360b5104e34503f80b187204a03c..255ec86924066beb82e18cf83e7c0b4500ad7287 100644 --- a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet2_cifar10/alexnet2_cifar10.cpp +++ b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet2_cifar10/alexnet2_cifar10.cpp @@ -196,7 +196,7 @@ void var_22_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_23_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_softmax(t1); diff --git a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet_cifar10/alexnet_cifar10.cpp b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet_cifar10/alexnet_cifar10.cpp index 1ff20c28a691ffd8c764eca6b75bbc5abba1c17d..b37fd71deaeb607545837faf09b133c14b9d8968 100644 --- a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet_cifar10/alexnet_cifar10.cpp +++ b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet_cifar10/alexnet_cifar10.cpp @@ -172,7 +172,7 @@ void var_19_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_20_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_softmax(t1); diff --git a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet_imagenet/alexnet_imagenet.cpp b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet_imagenet/alexnet_imagenet.cpp index b20aab8720923e0eb3f1eb3bcf6f964ebe37a510..abdf532c7a0417e16a55f4bb7ec57471340837a4 100644 --- a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet_imagenet/alexnet_imagenet.cpp +++ b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/alexnet_imagenet/alexnet_imagenet.cpp @@ -220,7 +220,7 @@ void var_25_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_26_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_softmax(t1); diff --git a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/lenet_mnist/lenet_mnist.cpp b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/lenet_mnist/lenet_mnist.cpp index f9d8bc3e436630b9778dc5c4d1cfe271884d75a1..684c1bfef532c162a7981a12b54b5282c5a1b114 100644 --- a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/lenet_mnist/lenet_mnist.cpp +++ b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/lenet_mnist/lenet_mnist.cpp @@ -124,7 +124,7 @@ void var_13_node(void *t1, size_t bytes_t1) { } void var_14_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_softmax(t1); diff --git a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/mobilenet_cifar10/mobilenet_cifar10.cpp b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/mobilenet_cifar10/mobilenet_cifar10.cpp index d309cd5b5ea74563c1ee1fd868e55b092fd6a7cb..881a9bbaa877aad6c0a4b0d7cbae79d7a60c862c 100644 --- a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/mobilenet_cifar10/mobilenet_cifar10.cpp +++ b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/mobilenet_cifar10/mobilenet_cifar10.cpp @@ -22,7 +22,7 @@ void var_0_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_1_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -30,7 +30,7 @@ void var_1_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_2_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -38,7 +38,7 @@ void var_2_node(void *t1, size_t bytes_t1) { } void var_3_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(2, t1, t2, 0); void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 1, 1, 1, 32); @@ -48,7 +48,7 @@ void var_3_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_4_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -56,7 +56,7 @@ void var_4_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_5_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -74,7 +74,7 @@ void var_6_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_7_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -82,7 +82,7 @@ void var_7_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_8_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -90,7 +90,7 @@ void var_8_node(void *t1, size_t bytes_t1) { } void var_9_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(2, t1, t2, 0); void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 2, 2, 1, 64); @@ -100,7 +100,7 @@ void var_9_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_10_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -108,7 +108,7 @@ void var_10_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_11_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -126,7 +126,7 @@ void var_12_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_13_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -134,7 +134,7 @@ void var_13_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_14_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -142,7 +142,7 @@ void var_14_node(void *t1, size_t bytes_t1) { } void var_15_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(2, t1, t2, 0); void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 1, 1, 1, 128); @@ -152,7 +152,7 @@ void var_15_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_16_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -160,7 +160,7 @@ void var_16_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_17_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -178,7 +178,7 @@ void var_18_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_19_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -186,7 +186,7 @@ void var_19_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_20_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -194,7 +194,7 @@ void var_20_node(void *t1, size_t bytes_t1) { } void var_21_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(2, t1, t2, 0); void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 2, 2, 1, 128); @@ -204,7 +204,7 @@ void var_21_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_22_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -212,7 +212,7 @@ void var_22_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_23_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -230,7 +230,7 @@ void var_24_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_25_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -238,7 +238,7 @@ void var_25_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_26_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -246,7 +246,7 @@ void var_26_node(void *t1, size_t bytes_t1) { } void var_27_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(2, t1, t2, 0); void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 1, 1, 1, 256); @@ -256,7 +256,7 @@ void var_27_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_28_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -264,7 +264,7 @@ void var_28_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_29_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -282,7 +282,7 @@ void var_30_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_31_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -290,7 +290,7 @@ void var_31_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_32_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -298,7 +298,7 @@ void var_32_node(void *t1, size_t bytes_t1) { } void var_33_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(2, t1, t2, 0); void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 2, 2, 1, 256); @@ -308,7 +308,7 @@ void var_33_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_34_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -316,7 +316,7 @@ void var_34_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_35_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -334,7 +334,7 @@ void var_36_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_37_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -342,7 +342,7 @@ void var_37_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_38_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -350,7 +350,7 @@ void var_38_node(void *t1, size_t bytes_t1) { } void var_39_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(2, t1, t2, 0); void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 1, 1, 1, 512); @@ -360,7 +360,7 @@ void var_39_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_40_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -368,7 +368,7 @@ void var_40_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_41_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -386,7 +386,7 @@ void var_42_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_43_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -394,7 +394,7 @@ void var_43_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_44_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -402,7 +402,7 @@ void var_44_node(void *t1, size_t bytes_t1) { } void var_45_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(2, t1, t2, 0); void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 1, 1, 1, 512); @@ -412,7 +412,7 @@ void var_45_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_46_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -420,7 +420,7 @@ void var_46_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_47_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -438,7 +438,7 @@ void var_48_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_49_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -446,7 +446,7 @@ void var_49_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_50_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -454,7 +454,7 @@ void var_50_node(void *t1, size_t bytes_t1) { } void var_51_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(2, t1, t2, 0); void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 1, 1, 1, 512); @@ -464,7 +464,7 @@ void var_51_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_52_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -472,7 +472,7 @@ void var_52_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_53_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -490,7 +490,7 @@ void var_54_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_55_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -498,7 +498,7 @@ void var_55_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_56_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -506,7 +506,7 @@ void var_56_node(void *t1, size_t bytes_t1) { } void var_57_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(2, t1, t2, 0); void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 1, 1, 1, 512); @@ -516,7 +516,7 @@ void var_57_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_58_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -524,7 +524,7 @@ void var_58_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_59_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -542,7 +542,7 @@ void var_60_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_61_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -550,7 +550,7 @@ void var_61_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_62_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -558,7 +558,7 @@ void var_62_node(void *t1, size_t bytes_t1) { } void var_63_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(2, t1, t2, 0); void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 1, 1, 1, 512); @@ -568,7 +568,7 @@ void var_63_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_64_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -576,7 +576,7 @@ void var_64_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_65_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -594,7 +594,7 @@ void var_66_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_67_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -602,7 +602,7 @@ void var_67_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_68_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -610,7 +610,7 @@ void var_68_node(void *t1, size_t bytes_t1) { } void var_69_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(2, t1, t2, 0); void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 2, 2, 1, 512); @@ -620,7 +620,7 @@ void var_69_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_70_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -628,7 +628,7 @@ void var_70_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_71_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -646,7 +646,7 @@ void var_72_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_73_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -654,7 +654,7 @@ void var_73_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_74_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -662,7 +662,7 @@ void var_74_node(void *t1, size_t bytes_t1) { } void var_75_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(2, t1, t2, 0); void *r = __hpvm__tensor_group_convolution(t1, t2, 1, 1, 1, 1, 1, 1024); @@ -672,7 +672,7 @@ void var_75_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_76_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -680,7 +680,7 @@ void var_76_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_77_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -698,7 +698,7 @@ void var_78_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { void var_79_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, size_t bytes_t3, void *t4, size_t bytes_t4, void *t5, size_t bytes_t5) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(5, t1, t2, t3, t4, t5, 0); void *r = __hpvm__tensor_batchnorm(t1, t2, t3, t4, t5, 0.001); @@ -706,7 +706,7 @@ void var_79_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2, void *t3, } void var_80_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_relu(t1); @@ -714,7 +714,7 @@ void var_80_node(void *t1, size_t bytes_t1) { } void var_81_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_pool_mean(t1, 2, 2, 0, 0, 2, 2); @@ -738,7 +738,7 @@ void var_83_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_84_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_softmax(t1); diff --git a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/resnet18_cifar10/resnet18_cifar10.cpp b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/resnet18_cifar10/resnet18_cifar10.cpp index 54e93e0ff5f9b02a811dc15aa76d0bb30d8c1d2b..ce164a16f0c95a6f6dfc141609542a9614e2c994 100644 --- a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/resnet18_cifar10/resnet18_cifar10.cpp +++ b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/resnet18_cifar10/resnet18_cifar10.cpp @@ -85,7 +85,7 @@ void var_7_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_8_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(2, t1, t2, 0); __hpvm__node_id(9); @@ -94,7 +94,7 @@ void var_8_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_9_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); __hpvm__node_id(10); @@ -148,7 +148,7 @@ void var_14_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_15_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(2, t1, t2, 0); __hpvm__node_id(16); @@ -157,7 +157,7 @@ void var_15_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_16_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); __hpvm__node_id(17); @@ -211,7 +211,7 @@ void var_21_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_22_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(2, t1, t2, 0); __hpvm__node_id(23); @@ -220,7 +220,7 @@ void var_22_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_23_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); __hpvm__node_id(24); @@ -292,7 +292,7 @@ void var_30_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_31_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(2, t1, t2, 0); __hpvm__node_id(32); @@ -301,7 +301,7 @@ void var_31_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_32_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); __hpvm__node_id(33); @@ -355,7 +355,7 @@ void var_37_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_38_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(2, t1, t2, 0); __hpvm__node_id(39); @@ -364,7 +364,7 @@ void var_38_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_39_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); __hpvm__node_id(40); @@ -418,7 +418,7 @@ void var_44_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_45_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(2, t1, t2, 0); __hpvm__node_id(46); @@ -427,7 +427,7 @@ void var_45_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_46_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); __hpvm__node_id(47); @@ -499,7 +499,7 @@ void var_53_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_54_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(2, t1, t2, 0); __hpvm__node_id(55); @@ -508,7 +508,7 @@ void var_54_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_55_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); __hpvm__node_id(56); @@ -562,7 +562,7 @@ void var_60_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_61_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(2, t1, t2, 0); __hpvm__node_id(62); @@ -571,7 +571,7 @@ void var_61_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_62_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); __hpvm__node_id(63); @@ -625,7 +625,7 @@ void var_67_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_68_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(2, t1, t2, 0); __hpvm__node_id(69); @@ -634,7 +634,7 @@ void var_68_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_69_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); __hpvm__node_id(70); @@ -643,7 +643,7 @@ void var_69_node(void *t1, size_t bytes_t1) { } void var_70_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); __hpvm__node_id(71); @@ -670,7 +670,7 @@ void var_72_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_73_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); __hpvm__node_id(74); diff --git a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_cifar10/vgg16_cifar10.cpp b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_cifar10/vgg16_cifar10.cpp index bf1c007ff389fa80cd41dec1fea05f5be4f07e3a..ddd015a63a3284f2c78a57a8173544d233fd2772 100644 --- a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_cifar10/vgg16_cifar10.cpp +++ b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_cifar10/vgg16_cifar10.cpp @@ -404,7 +404,7 @@ void var_48_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_49_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_softmax(t1); diff --git a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_cifar100/vgg16_cifar100.cpp b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_cifar100/vgg16_cifar100.cpp index c45e115fb31b50a428ace6a7344532ade12b1989..7a8fbbc9b3a5de110996b56e8f5ee06fc761ef41 100644 --- a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_cifar100/vgg16_cifar100.cpp +++ b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_cifar100/vgg16_cifar100.cpp @@ -404,7 +404,7 @@ void var_48_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_49_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_softmax(t1); diff --git a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_imagenet/vgg16_imagenet.cpp b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_imagenet/vgg16_imagenet.cpp index dcf237bd7897febdfd4a3972dc0a47a873f3646b..2fdf36965da100843f69e2ca6ba975bcae4a13ff 100644 --- a/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_imagenet/vgg16_imagenet.cpp +++ b/hpvm/test/dnn_benchmarks/hpvm-c/benchmarks/vgg16_imagenet/vgg16_imagenet.cpp @@ -428,7 +428,7 @@ void var_51_node(void *t1, size_t bytes_t1, void *t2, size_t bytes_t2) { } void var_52_node(void *t1, size_t bytes_t1) { - __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__hint(hpvm::TENSOR_TARGET); __hpvm__attributes(1, t1, 0); void *r = __hpvm__tensor_softmax(t1);