From ecacc22600ea3d5b5a8cf15225f16979907de468 Mon Sep 17 00:00:00 2001
From: Hashim Sharif <hsharif3@tyler.cs.illinois.edu>
Date: Fri, 14 Dec 2018 01:25:37 -0600
Subject: [PATCH] Adding 1st Lenet Conv1 Layer in Lenet Benchmark

---
 .../benchmarks/lenet/src/lenet.cpp            | 56 ++++++++++++++-----
 .../VISC/DNN_Benchmarks/common/include/visc.h |  1 +
 2 files changed, 43 insertions(+), 14 deletions(-)

diff --git a/llvm/test/VISC/DNN_Benchmarks/benchmarks/lenet/src/lenet.cpp b/llvm/test/VISC/DNN_Benchmarks/benchmarks/lenet/src/lenet.cpp
index e588db93bf..a245f1a208 100644
--- a/llvm/test/VISC/DNN_Benchmarks/benchmarks/lenet/src/lenet.cpp
+++ b/llvm/test/VISC/DNN_Benchmarks/benchmarks/lenet/src/lenet.cpp
@@ -9,7 +9,7 @@
 
 using namespace std;
 
-void tensorConvNode(void *t1, size_t bytes1, void *t2, size_t bytes2) {
+void tensorConvNode1(void *t1, size_t bytes1, void *t2, size_t bytes2) {
     __visc__hint(visc::CUDNN_TARGET);
     __visc__attributes(2, t1, t2, 0);
 
@@ -19,7 +19,7 @@ void tensorConvNode(void *t1, size_t bytes1, void *t2, size_t bytes2) {
 }
 
 
-void tensorAddNode(void *t1, size_t bytest1, void *t2, size_t bytest2) {
+void tensorAddNode1(void *t1, size_t bytest1, void *t2, size_t bytest2) {
     __visc__hint(visc::CUDNN_TARGET);
     __visc__attributes(2, t1, t2, 0);
 
@@ -27,6 +27,23 @@ void tensorAddNode(void *t1, size_t bytest1, void *t2, size_t bytest2) {
     __visc__return(2, r, (size_t) 0);
 }
 
+void tensorPoolNode1(void *t1, size_t bytest1) {
+    __visc__hint(visc::CUDNN_TARGET);
+    __visc__attributes(1, t1, 0);
+
+    void* r = __visc__tensor_pool_max(t1, 2, 2, 0, 0, 2, 2);
+    __visc__return(2, r, (size_t) 0);
+}
+
+void tensorTanhNode1(void *t1, size_t bytest1) {
+    __visc__hint(visc::CUDNN_TARGET);
+    __visc__attributes(1, t1, 0);
+
+    void* r = __visc__tensor_tanh(t1);
+    __visc__return(2, r, (size_t) 0);
+}
+
+
 void root(void *x, size_t x_bytes,
 	  void *conv1_w, size_t conv1_w_bytes,
 	  void *conv1_b, size_t conv1_b_bytes,
@@ -36,8 +53,10 @@ void root(void *x, size_t x_bytes,
     __visc__hint(visc::CPU_TARGET);
     __visc__attributes(5, x, conv1_w, conv1_b, conv2_w, conv2_b, 0);
 
-    void *nodeConv1 = __visc__createNodeND(0, tensorConvNode);
-    void *nodeAdd = __visc__createNodeND(0, tensorAddNode);
+    void *nodeConv1 = __visc__createNodeND(0, tensorConvNode1);
+    void *nodeAdd1 = __visc__createNodeND(0, tensorAddNode1);
+    void *nodePool1 = __visc__createNodeND(0, tensorPoolNode1);
+    void *nodeTanh1 = __visc__createNodeND(0, tensorTanhNode1);
 
     // node, src, dst, stream
     __visc__bindIn(nodeConv1, 0, 0, 0);
@@ -46,14 +65,24 @@ void root(void *x, size_t x_bytes,
     __visc__bindIn(nodeConv1, 3, 3, 0);
 
     // node, node, type, src, dst, stream
-    __visc__edge(nodeConv1, nodeAdd, 1, 0, 0, 0);
-    __visc__edge(nodeConv1, nodeAdd, 1, 1, 1, 0);
+    __visc__edge(nodeConv1, nodeAdd1, 1, 0, 0, 0);
+    __visc__edge(nodeConv1, nodeAdd1, 1, 1, 1, 0);
 
-    __visc__bindIn(nodeAdd, 4, 2, 0);
-    __visc__bindIn(nodeAdd, 5, 3, 0);
+    // parent_index, dest_index, bind_type
+    __visc__bindIn(nodeAdd1, 4, 2, 0);
+    __visc__bindIn(nodeAdd1, 5, 3, 0);
 
-    __visc__bindOut(nodeAdd, 0, 0, 0);
-    __visc__bindOut(nodeAdd, 1, 1, 0);
+    // node, node, type, src, dst, stream
+    __visc__edge(nodeAdd1, nodePool1, 1, 0, 0, 0);
+    __visc__edge(nodeAdd1, nodePool1, 1, 1, 1, 0);
+
+    // node, node, type, src, dst, stream
+    __visc__edge(nodePool1, nodeTanh1, 1, 0, 0, 0);
+    __visc__edge(nodePool1, nodeTanh1, 1, 1, 1, 0);
+    
+    
+    __visc__bindOut(nodeTanh1, 0, 0, 0);
+    __visc__bindOut(nodeTanh1, 1, 1, 0);
 
 }
 
@@ -67,12 +96,12 @@ struct ret_t {
 typedef struct __attribute__((__packed__)) {
     void *x;
     size_t x_bytes;
-    
+    // 1st Layer parameters
     void *conv1_w;
     size_t conv1_w_bytes;
     void *conv1_b;
     size_t conv1_b_bytes;
-
+    // 2nd Layer parameters
     void *conv2_w;
     size_t conv2_w_bytes;
     void *conv2_b;
@@ -95,7 +124,7 @@ int main() {
     printf("Reading Input Data from = %s \n", input_data_path.c_str());
     
     void* x = readTrainedWeights(input_data_path.c_str(), float_type,
-                           test_batch_size, 1, 28, 28);
+				 test_batch_size, 1, 28, 28);
     void* conv1_w = readTrainedWeights(conv1_w_path.c_str(), float_type, 32, 1, 5, 5);
     void* conv1_b = readTrainedWeights(conv1_b_path.c_str(), float_type, 1, 32, 1, 1);
     void* conv2_w = readTrainedWeights(conv2_w_path.c_str(), float_type, 64, 32, 5, 5);
@@ -115,7 +144,6 @@ int main() {
     args->conv2_b = conv2_b;
     args->conv2_b_bytes = 0;
 
-
     void *dfg = __visc__launch(0, root, (void *)args);
 
     __visc__wait(dfg);
diff --git a/llvm/test/VISC/DNN_Benchmarks/common/include/visc.h b/llvm/test/VISC/DNN_Benchmarks/common/include/visc.h
index cf6180e406..49b58aa0d6 100644
--- a/llvm/test/VISC/DNN_Benchmarks/common/include/visc.h
+++ b/llvm/test/VISC/DNN_Benchmarks/common/include/visc.h
@@ -95,6 +95,7 @@ float __visc__cos(float);
 void* __visc__tensor_add(void*, void*);
 void* __visc__tensor_mul(void*, void*);
 void* __visc__tensor_convolution(void*, void*, int, int, int, int);
+void* __visc__tensor_pool_max(void*, int, int, int, int, int, int);
 void* __visc__tensor_relu(void*);
 void* __visc__tensor_tanh(void*);
 void* __visc__tensor_softmax(void*);
-- 
GitLab