diff --git a/hpvm/projects/hpvm-tensor-rt/CMakeLists.txt b/hpvm/projects/hpvm-tensor-rt/CMakeLists.txt index 4c92c5534160221038f157ec274638f435e5f288..f6fed2ac296f93bc060fe09b3b889b42ee8c4a1a 100644 --- a/hpvm/projects/hpvm-tensor-rt/CMakeLists.txt +++ b/hpvm/projects/hpvm-tensor-rt/CMakeLists.txt @@ -22,7 +22,11 @@ configure_file( ) # -- Default include directories -set(INCLUDES ./tensor_runtime/include ${CMAKE_CURRENT_BINARY_DIR}/tensor_runtime/include) +set( + INCLUDES + ./tensor_runtime/include ${CMAKE_CURRENT_BINARY_DIR}/tensor_runtime/include + ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES} ${CUDNN_INCLUDE_PATH} +) # Build gpu_profiler and soc_simulator (dependencies) add_library(gpu_profiler SHARED gpu_profiler/profiler.cpp) @@ -163,7 +167,7 @@ if(CLANG_NAME) message(STATUS "Creating tensor_runtime.ll in ${TENSOR_RT_LL_PREFIX}") # Manually add cuda includes because add_custom_command doesn't handle them # (unlike add_library which has CUDA-lang support). - foreach(dir ${INCLUDES} ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES} ${CUDNN_INCLUDE_PATH}) + foreach(dir ${INCLUDES}) list(APPEND INCLUDE_COMPILER_STRINGS "-I${dir}") endforeach() add_custom_command(