From be677ed395aa45f56254279ac45c06e469d94982 Mon Sep 17 00:00:00 2001
From: Hashim Sharif <hsharif3@tyler.cs.illinois.edu>
Date: Sat, 21 Nov 2020 02:40:20 -0600
Subject: [PATCH] Adding missing wrapper_convLayer2 signatures

---
 .../hpvm-tensor-rt/lib/tensor_runtime.ll         | 10 +++++++---
 .../tensor_runtime/include/tensor_runtime.h      | 16 ++++++++++++++++
 .../tensor_runtime/include/tensor_signatures.cc  |  4 +++-
 3 files changed, 26 insertions(+), 4 deletions(-)

diff --git a/llvm/projects/hpvm-tensor-rt/lib/tensor_runtime.ll b/llvm/projects/hpvm-tensor-rt/lib/tensor_runtime.ll
index 0a2273cfdd..42325406bd 100644
--- a/llvm/projects/hpvm-tensor-rt/lib/tensor_runtime.ll
+++ b/llvm/projects/hpvm-tensor-rt/lib/tensor_runtime.ll
@@ -65,8 +65,8 @@ entry:
   %tensorAddErrorPtr = alloca i8*, align 8
   %ConvLayer = alloca i8*, align 8
   %FCLayer = alloca i8*, align 8
+  %ConvLayer_ = alloca i8*, align 8
   %ConvLayer2 = alloca i8*, align 8
-  %ConvLayer_wrapper = alloca i8*, align 8
   %FCLayer2 = alloca i8*, align 8
   %AddWrapper = alloca i8*, align 8
   %ReluWrapper = alloca i8*, align 8
@@ -82,6 +82,7 @@ entry:
   %tensorMap3 = alloca i8*, align 8
   %tensorStencil = alloca i8*, align 8
   %tensorCosineT = alloca i8*, align 8
+  %tensorNodeID = alloca i8*, align 8
   store i8* bitcast (void (i32)* @llvm_hpvm_initTensorRt to i8*), i8** %initRT, align 8
   store i8* bitcast (void ()* @llvm_hpvm_cleanupTensorRt to i8*), i8** %cleanRT, align 8
   store i8* bitcast (void (i32)* @llvm_hpvm_initApproxhpvmRt to i8*), i8** %initApproxRT, align 8
@@ -119,8 +120,8 @@ entry:
   store i8* bitcast (i8* (i8*, i32)* @tensorAddError to i8*), i8** %tensorAddErrorPtr, align 8
   store i8* bitcast (i8* (i8*, float, float, i8*, float, float, i8*, float, float, i32, i32, i32, i32, i32, i32, i32, float, float, i32)* @ConvLayer_PROMISE to i8*), i8** %ConvLayer, align 8
   store i8* bitcast (i8* (i8*, float, float, i8*, float, float, i8*, float, float, i32, float, float, i32)* @FCLayer_PROMISE to i8*), i8** %FCLayer, align 8
-  store i8* bitcast (i8* (i8*, i8*, i8*, i8*, i32, i32, i32, i32, i32, i32, i32, float, float)* @wrapper_ConvLayer to i8*), i8** %ConvLayer2, align 8
-  store i8* bitcast (i8* (i8*, i8*, i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float)* @wrapper_ConvLayer2 to i8*), i8** %ConvLayer_wrapper, align 8
+  store i8* bitcast (i8* (i8*, i8*, i8*, i8*, i32, i32, i32, i32, i32, i32, i32, float, float)* @wrapper_ConvLayer to i8*), i8** %ConvLayer_, align 8
+  store i8* bitcast (i8* (i8*, i8*, i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float)* @wrapper_ConvLayer2 to i8*), i8** %ConvLayer2, align 8
   store i8* bitcast (i8* (i8*, i8*, i8*, i8*, i32, float, float)* @wrapper_FCLayer to i8*), i8** %FCLayer2, align 8
   store i8* bitcast (i8* (i8*, i8*, i8*)* @wrapper_tensorAdd to i8*), i8** %AddWrapper, align 8
   store i8* bitcast (i8* (i8*, i8*)* @wrapper_tensorRelu to i8*), i8** %ReluWrapper, align 8
@@ -136,6 +137,7 @@ entry:
   store i8* bitcast (i8* (i8*, i32, i8*, i8*, i8*)* @wrapper_tensorMap3 to i8*), i8** %tensorMap3, align 8
   store i8* bitcast (i8* (i8*, i8*)* @wrapper_tensorStencil to i8*), i8** %tensorStencil, align 8
   store i8* bitcast (i8* (i8*, i8*)* @wrapper_tensorCosineT to i8*), i8** %tensorCosineT, align 8
+  store i8* bitcast (i8* (i32)* @tensor_set_node_id to i8*), i8** %tensorNodeID, align 8
   ret void
 }
 
@@ -247,6 +249,8 @@ declare i8* @wrapper_tensorStencil(i8*, i8*) #1
 
 declare i8* @wrapper_tensorCosineT(i8*, i8*) #1
 
+declare i8* @tensor_set_node_id(i32) #1
+
 ; Function Attrs: noinline uwtable
 define internal void @_GLOBAL__sub_I_tensor_signatures.cc() #0 section ".text.startup" {
 entry:
diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/tensor_runtime.h b/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/tensor_runtime.h
index ba87a6bd40..b6d7f862fa 100644
--- a/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/tensor_runtime.h
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/tensor_runtime.h
@@ -168,6 +168,22 @@ void *wrapper_ConvLayer(const char *hpvm_node_id, void *input, void *filter,
                         int activation_id, // Relu, Tanh, ClipRelu
                         float out_min, float out_max);
 
+
+void* wrapper_ConvLayer2(const char* hpvm_node_id,
+			  void* input, 
+			  void* filter, 
+			  void* bias, 
+			  int conv_pad_h, int conv_pad_w,
+			  int conv_stride_h, int conv_stride_w,
+			  int pool_id,
+			  int pool_size_v, int pool_size_h,			 
+			  int pool_pad_v, int pool_pad_h,
+			  int pool_stride_v, int pool_stride_h,
+			  int activation_id,
+			  // NOTE: out_min, out_max are only relevant for ClippedRelu
+			  float out_min, float out_max);
+  
+  
 void *wrapper_FCLayer(const char *hpvm_node_id, void *input, void *weights,
                       void *bias, int activation_id, float out_min,
                       float out_max);
diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/tensor_signatures.cc b/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/tensor_signatures.cc
index 7da43b216e..9c4cf97908 100644
--- a/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/tensor_signatures.cc
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/tensor_signatures.cc
@@ -45,7 +45,9 @@ void dummyFunction() {
   void *ConvLayer = (void *)&ConvLayer_PROMISE;
   void *FCLayer = (void *)&FCLayer_PROMISE;
 
-  void *ConvLayer2 = (void *)&wrapper_ConvLayer;
+  void *ConvLayer_ = (void *)&wrapper_ConvLayer;
+  void *ConvLayer2 = (void *)&wrapper_ConvLayer2;
+
   void *FCLayer2 = (void *)&wrapper_FCLayer;
   void *AddWrapper = (void *)&wrapper_tensorAdd;
   void *ReluWrapper = (void *)&wrapper_tensorRelu;
-- 
GitLab