From 6032f36b10fd30a4df51b7b5a296b552a8f0d901 Mon Sep 17 00:00:00 2001
From: Hashim Sharif <hsharif3@miranda.cs.illinois.edu>
Date: Thu, 1 Apr 2021 16:59:25 -0500
Subject: [PATCH] Making OpenCL code conditional on HPVM_USE_OPENCL

---
 hpvm/projects/hpvm-rt/hpvm-rt.cpp | 245 +++++++++++++++++++++++++++++-
 1 file changed, 241 insertions(+), 4 deletions(-)

diff --git a/hpvm/projects/hpvm-rt/hpvm-rt.cpp b/hpvm/projects/hpvm-rt/hpvm-rt.cpp
index dff4f0e4a2..f0716378fe 100644
--- a/hpvm/projects/hpvm-rt/hpvm-rt.cpp
+++ b/hpvm/projects/hpvm-rt/hpvm-rt.cpp
@@ -1,4 +1,6 @@
-#include <CL/cl.h>
+
+//#define HPVM_USE_OPENCL 1
+
 #include <algorithm>
 #include <cassert>
 #include <cstdio>
@@ -8,9 +10,14 @@
 #include <map>
 #include <pthread.h>
 #include <string>
-
 #include <unistd.h>
 
+#ifdef HPVM_USE_OPENCL
+
+#include <CL/cl.h>
+
+#endif
+
 #if _POSIX_VERSION >= 200112L
 #include <sys/time.h>
 #endif
@@ -40,6 +47,9 @@ typedef struct {
   std::vector<CircularBuffer<uint64_t> *> *isLastInputBuffers;
 } DFNodeContext_CPU;
 
+
+#ifdef HPVM_USE_OPENCL
+
 typedef struct {
   cl_context clOCLContext;
   cl_command_queue clCommandQue;
@@ -51,6 +61,9 @@ cl_context globalOCLContext;
 cl_device_id *clDevices;
 cl_command_queue globalCommandQue;
 
+#endif
+
+
 MemTracker MTracker;
 vector<DFGDepth> DStack;
 // Mutex to prevent concurrent access by multiple thereads in pipeline
@@ -59,6 +72,8 @@ pthread_mutex_t ocl_mtx;
 #define NUM_TESTS 1
 hpvm_TimerSet kernel_timer;
 
+#ifdef HPVM_USE_OPENCL
+
 static const char *getErrorString(cl_int error) {
   switch (error) {
   // run-time and JIT compiler errors
@@ -209,6 +224,15 @@ static inline void checkErr(cl_int err, cl_int success, const char *name) {
   }
 }
 
+#endif
+
+
+void openCLAbort(){
+ cout <<" ERROR: OpenCL NOT found!. Please Recompile with OpenCL - Make sure to have OpenCL on System \n ";
+ abort();
+}
+
+
 /************************* Depth Stack Routines ***************************/
 
 void llvm_hpvm_cpu_dstack_push(unsigned n, uint64_t limitX, uint64_t iX,
@@ -260,6 +284,9 @@ uint64_t llvm_hpvm_cpu_getDimInstance(unsigned level, unsigned dim) {
 /********************** Memory Tracking Routines **************************/
 
 void llvm_hpvm_track_mem(void *ptr, size_t size) {
+
+#ifdef HPVM_USE_OPENCL 
+  
   DEBUG(cout << "Start tracking memory: " << ptr << flush << "\n");
   MemTrackerEntry *MTE = MTracker.lookup(ptr);
   if (MTE != NULL) {
@@ -269,9 +296,19 @@ void llvm_hpvm_track_mem(void *ptr, size_t size) {
   DEBUG(cout << "Inserting ID " << ptr << " in the MemTracker Table\n");
   MTracker.insert(ptr, size, MemTrackerEntry::HOST, ptr);
   DEBUG(MTracker.print());
+
+#else
+
+  openCLAbort();
+  
+#endif
+  
 }
 
 void llvm_hpvm_untrack_mem(void *ptr) {
+
+#ifdef HPVM_USE_OPENCL 
+
   DEBUG(cout << "Stop tracking memory: " << ptr << flush << "\n");
   MemTrackerEntry *MTE = MTracker.lookup(ptr);
   if (MTE == NULL) {
@@ -284,11 +321,22 @@ void llvm_hpvm_untrack_mem(void *ptr) {
     clReleaseMemObject((cl_mem)MTE->getAddress());
   MTracker.remove(ptr);
   DEBUG(MTracker.print());
+
+#else
+
+  openCLAbort();
+  
+#endif
+  
 }
 
+
+#ifdef HPVM_USE_OPENCL 
+
 static void *llvm_hpvm_ocl_request_mem(void *ptr, size_t size,
                                        DFNodeContext_OCL *Context, bool isInput,
                                        bool isOutput) {
+  
   pthread_mutex_lock(&ocl_mtx);
   DEBUG(cout << "[OCL] Request memory: " << ptr
              << " for context: " << Context->clOCLContext << flush << "\n");
@@ -343,13 +391,20 @@ static void *llvm_hpvm_ocl_request_mem(void *ptr, size_t size,
   DEBUG(MTracker.print());
   pthread_mutex_unlock(&ocl_mtx);
   return d_input;
+  
 }
 
+#endif
+
+
 void *llvm_hpvm_cpu_argument_ptr(void *ptr, size_t size) {
   return llvm_hpvm_request_mem(ptr, size);
 }
 
 void *llvm_hpvm_request_mem(void *ptr, size_t size) {
+
+#ifdef HPVM_USE_OPENCL 
+
   pthread_mutex_lock(&ocl_mtx);
   DEBUG(cout << "[CPU] Request memory: " << ptr << flush << "\n");
   MemTrackerEntry *MTE = MTracker.lookup(ptr);
@@ -386,6 +441,13 @@ void *llvm_hpvm_request_mem(void *ptr, size_t size) {
   DEBUG(MTracker.print());
   pthread_mutex_unlock(&ocl_mtx);
   return ptr;
+
+#else
+
+  openCLAbort();
+
+#endif
+
 }
 
 /*************************** Timer Routines **********************************/
@@ -419,6 +481,9 @@ get_last_async(struct hpvm_TimerSet *timers) {
 }
 
 static void insert_marker(struct hpvm_TimerSet *tset, enum hpvm_TimerID timer) {
+
+#ifdef HPVM_USE_OPENCL
+  
   cl_int ciErrNum = CL_SUCCESS;
   struct hpvm_async_time_marker_list **new_event = &(tset->async_markers);
 
@@ -441,10 +506,20 @@ static void insert_marker(struct hpvm_TimerSet *tset, enum hpvm_TimerID timer) {
   if (ciErrNum != CL_SUCCESS) {
     fprintf(stderr, "Error Enqueueing Marker!\n");
   }
+
+#else
+
+  openCLAbort();
+
+#endif
+
 }
 
 static void insert_submarker(struct hpvm_TimerSet *tset, char *label,
                              enum hpvm_TimerID timer) {
+
+#ifdef HPVM_USE_OPENCL
+  
   cl_int ciErrNum = CL_SUCCESS;
   struct hpvm_async_time_marker_list **new_event = &(tset->async_markers);
 
@@ -467,10 +542,20 @@ static void insert_submarker(struct hpvm_TimerSet *tset, char *label,
   if (ciErrNum != CL_SUCCESS) {
     fprintf(stderr, "Error Enqueueing Marker!\n");
   }
+
+#else
+
+  openCLAbort();
+
+#endif
+  
 }
 
 /* Assumes that all recorded events have completed */
 static hpvm_Timestamp record_async_times(struct hpvm_TimerSet *tset) {
+
+#ifdef HPVM_USE_OPENCL
+  
   struct hpvm_async_time_marker_list *next_interval = NULL;
   struct hpvm_async_time_marker_list *last_marker = get_last_async(tset);
   hpvm_Timestamp total_async_time = 0;
@@ -517,6 +602,13 @@ static hpvm_Timestamp record_async_times(struct hpvm_TimerSet *tset) {
     next_interval->timerID = INVALID_TIMERID;
 
   return total_async_time;
+
+#else
+
+  openCLAbort();
+
+#endif
+  
 }
 
 static void accumulate_time(hpvm_Timestamp *accum, hpvm_Timestamp start,
@@ -733,6 +825,9 @@ void hpvm_AddSubTimer(struct hpvm_TimerSet *timers, char *label,
 }
 
 void hpvm_SwitchToTimer(struct hpvm_TimerSet *timers, enum hpvm_TimerID timer) {
+
+#ifdef HPVM_USE_OPENCL
+  
   // cerr << "Switch to timer: " << timer << flush << "\n";
   /* Stop the currently running timer */
   if (timers->current != hpvm_TimerID_NONE) {
@@ -849,10 +944,21 @@ void hpvm_SwitchToTimer(struct hpvm_TimerSet *timers, enum hpvm_TimerID timer) {
     }
   }
   timers->current = timer;
+
+#else
+
+  openCLAbort();
+
+#endif
+
+
 }
 
 void hpvm_SwitchToSubTimer(struct hpvm_TimerSet *timers, char *label,
                            enum hpvm_TimerID category) {
+
+#ifdef HPVM_USE_OPENCL
+  
   struct hpvm_SubTimerList *subtimerlist =
       timers->sub_timer_list[timers->current];
   struct hpvm_SubTimer *curr =
@@ -1001,6 +1107,13 @@ void hpvm_SwitchToSubTimer(struct hpvm_TimerSet *timers, char *label,
   }
 
   timers->current = category;
+
+#else
+
+  openCLAbort();
+
+#endif
+  
 }
 
 void hpvm_PrintTimerSet(struct hpvm_TimerSet *timers) {
@@ -1069,6 +1182,9 @@ void hpvm_PrintTimerSet(struct hpvm_TimerSet *timers) {
 }
 
 void hpvm_DestroyTimerSet(struct hpvm_TimerSet *timers) {
+
+#ifdef HPVM_USE_OPENCL
+  
   /* clean up all of the async event markers */
   struct hpvm_async_time_marker_list *event = timers->async_markers;
   while (event != NULL) {
@@ -1106,6 +1222,13 @@ void hpvm_DestroyTimerSet(struct hpvm_TimerSet *timers) {
       free(timers->sub_timer_list[i]);
     }
   }
+
+#else
+
+  openCLAbort();
+
+#endif
+
 }
 
 /**************************** Pipeline API ************************************/
@@ -1304,10 +1427,13 @@ void llvm_hpvm_cpu_wait(void *graphID) {
   DEBUG(cout << "\t... pthread Done!\n");
 }
 
+
+#ifdef HPVM_USE_OPENCL
+
 // Returns the platform name.
 std::string getPlatformName(cl_platform_id pid) {
+ 
   cl_int status;
-
   size_t sz;
   status = clGetPlatformInfo(pid, CL_PLATFORM_NAME, 0, NULL, &sz);
   checkErr(status, CL_SUCCESS, "Query for platform name size failed");
@@ -1318,12 +1444,18 @@ std::string getPlatformName(cl_platform_id pid) {
 
   const auto &tmp = std::string(name, name + sz);
   delete[] name;
-  return tmp;
+  return tmp;  
 }
 
+#endif
+
+
+#ifdef HPVM_USE_OPENCL
+
 // Searches all platforms for the first platform whose name
 // contains the search string (case-insensitive).
 cl_platform_id findPlatform(const char *platform_name_search) {
+  
   cl_int status;
 
   std::string search = platform_name_search;
@@ -1360,7 +1492,13 @@ cl_platform_id findPlatform(const char *platform_name_search) {
   assert(false && "No matching platform found!");
 }
 
+#endif
+
+
 void *llvm_hpvm_ocl_initContext(enum hpvm::Target T) {
+
+#ifdef HPVM_USE_OPENCL
+  
   pthread_mutex_lock(&ocl_mtx);
   DEBUG(std::string Target = T == hpvm::GPU_TARGET ? "GPU" : "SPIR");
   DEBUG(cout << "Initializing Context for " << Target << " device\n");
@@ -1450,9 +1588,19 @@ void *llvm_hpvm_ocl_initContext(enum hpvm::Target T) {
 
   pthread_mutex_unlock(&ocl_mtx);
   return globalOCLContext;
+
+#else
+
+  openCLAbort();
+
+#endif
+  
 }
 
 void llvm_hpvm_ocl_clearContext(void *graphID) {
+
+#ifdef HPVM_USE_OPENCL
+  
   pthread_mutex_lock(&ocl_mtx);
   DEBUG(cout << "Clear Context\n");
   DFNodeContext_OCL *Context = (DFNodeContext_OCL *)graphID;
@@ -1464,9 +1612,19 @@ void llvm_hpvm_ocl_clearContext(void *graphID) {
   cout << "Printing HPVM Timer: KernelTimer\n";
   hpvm_PrintTimerSet(&kernel_timer);
   pthread_mutex_unlock(&ocl_mtx);
+
+#else
+
+  openCLAbort();
+
+#endif
+  
 }
 
 void llvm_hpvm_ocl_argument_shared(void *graphID, int arg_index, size_t size) {
+
+#ifdef HPVM_USE_OPENCL
+  
   pthread_mutex_lock(&ocl_mtx);
   DEBUG(cout << "Set Shared Memory Input:");
   DEBUG(cout << "\tArgument Index = " << arg_index << ", Size = " << size
@@ -1477,10 +1635,20 @@ void llvm_hpvm_ocl_argument_shared(void *graphID, int arg_index, size_t size) {
   cl_int errcode = clSetKernelArg(Context->clKernel, arg_index, size, NULL);
   checkErr(errcode, CL_SUCCESS, "Failure to set shared memory argument");
   pthread_mutex_unlock(&ocl_mtx);
+
+#else
+
+  openCLAbort();
+
+#endif
+  
 }
 
 void llvm_hpvm_ocl_argument_scalar(void *graphID, void *input, int arg_index,
                                    size_t size) {
+
+#ifdef HPVM_USE_OPENCL
+  
   pthread_mutex_lock(&ocl_mtx);
   DEBUG(cout << "Set Scalar Input:");
   DEBUG(cout << "\tArgument Index = " << arg_index << ", Size = " << size
@@ -1491,10 +1659,20 @@ void llvm_hpvm_ocl_argument_scalar(void *graphID, void *input, int arg_index,
   cl_int errcode = clSetKernelArg(Context->clKernel, arg_index, size, input);
   checkErr(errcode, CL_SUCCESS, "Failure to set constant input argument");
   pthread_mutex_unlock(&ocl_mtx);
+
+#else
+
+  openCLAbort();
+
+#endif
+
 }
 
 void *llvm_hpvm_ocl_argument_ptr(void *graphID, void *input, int arg_index,
                                  size_t size, bool isInput, bool isOutput) {
+
+#ifdef HPVM_USE_OPENCL
+  
   pthread_mutex_lock(&ocl_mtx);
   DEBUG(cout << "Set Pointer Input:");
   DEBUG(cout << "\tArgument Index = " << arg_index << ", Ptr = " << input
@@ -1518,9 +1696,19 @@ void *llvm_hpvm_ocl_argument_ptr(void *graphID, void *input, int arg_index,
   DEBUG(cout << "\tDevicePtr = " << d_input << flush << "\n");
   pthread_mutex_unlock(&ocl_mtx);
   return d_input;
+
+#else
+
+  openCLAbort();
+
+#endif
+
 }
 
 void *llvm_hpvm_ocl_output_ptr(void *graphID, int arg_index, size_t size) {
+
+#ifdef HPVM_USE_OPENCL
+  
   pthread_mutex_lock(&ocl_mtx);
   DEBUG(cout << "Set device memory for Output Struct:");
   DEBUG(cout << "\tArgument Index = " << arg_index << ", Size = " << size
@@ -1536,12 +1724,22 @@ void *llvm_hpvm_ocl_output_ptr(void *graphID, int arg_index, size_t size) {
   DEBUG(cout << "\tDevicePtr = " << d_output << flush << "\n");
   pthread_mutex_unlock(&ocl_mtx);
   return d_output;
+
+#else
+
+  openCLAbort();
+
+#endif
+
 }
 
 void llvm_hpvm_ocl_free(void *ptr) {}
 
 void *llvm_hpvm_ocl_getOutput(void *graphID, void *h_output, void *d_output,
                               size_t size) {
+
+#ifdef HPVM_USE_OPENCL
+  
   pthread_mutex_lock(&ocl_mtx);
   DEBUG(cout << "Get Output:\n");
   DEBUG(cout << "\tHostPtr = " << h_output << ", DevicePtr = " << d_output
@@ -1555,11 +1753,21 @@ void *llvm_hpvm_ocl_getOutput(void *graphID, void *h_output, void *d_output,
   checkErr(errcode, CL_SUCCESS, "[getOutput] Failure to read output");
   pthread_mutex_unlock(&ocl_mtx);
   return h_output;
+
+#else
+
+  openCLAbort();
+
+#endif
+
 }
 
 void *llvm_hpvm_ocl_executeNode(void *graphID, unsigned workDim,
                                 const size_t *localWorkSize,
                                 const size_t *globalWorkSize) {
+
+#ifdef HPVM_USE_OPENCL 
+ 
   pthread_mutex_lock(&ocl_mtx);
 
   size_t GlobalWG[3];
@@ -1610,6 +1818,13 @@ void *llvm_hpvm_ocl_executeNode(void *graphID, unsigned workDim,
 
   pthread_mutex_unlock(&ocl_mtx);
   return NULL;
+
+#else
+
+  openCLAbort();
+
+#endif
+
 }
 
 //////////////////////////////////////////////////////////////////////////////
@@ -1656,6 +1871,9 @@ static char *LoadProgSource(const char *Filename, size_t *szFinalLength) {
 }
 
 void *llvm_hpvm_ocl_launch(const char *FileName, const char *KernelName) {
+
+#ifdef HPVM_USE_OPENCL
+  
   pthread_mutex_lock(&ocl_mtx);
   DEBUG(cout << "Launch OCL Kernel\n");
   // Initialize OpenCL
@@ -1717,14 +1935,31 @@ void *llvm_hpvm_ocl_launch(const char *FileName, const char *KernelName) {
 
   pthread_mutex_unlock(&ocl_mtx);
   return Context;
+
+#else
+
+  openCLAbort();
+
+#endif
+
 }
 
 void llvm_hpvm_ocl_wait(void *graphID) {
+
+#ifdef HPVM_USE_OPENCL
+  
   pthread_mutex_lock(&ocl_mtx);
   DEBUG(cout << "Wait\n");
   DFNodeContext_OCL *Context = (DFNodeContext_OCL *)graphID;
   clFinish(Context->clCommandQue);
   pthread_mutex_unlock(&ocl_mtx);
+
+#else
+
+  openCLAbort();
+
+#endif
+
 }
 
 void llvm_hpvm_switchToTimer(void **timerSet, enum hpvm_TimerID timer) {
@@ -1749,3 +1984,5 @@ void *llvm_hpvm_initializeTimerSet() {
   pthread_mutex_unlock(&ocl_mtx);
   return TS;
 }
+
+
-- 
GitLab