diff --git a/hpvm/CMakeLists.txt b/hpvm/CMakeLists.txt
index d63675b34275c3f83c10ca83005bbfe563777554..53cd456ad96108226c5aec0bad3aed70f0555f1f 100644
--- a/hpvm/CMakeLists.txt
+++ b/hpvm/CMakeLists.txt
@@ -1,4 +1,6 @@
 include_directories(./include/)
+# find_package will use the auxillary cmake/Find*.cmake we provide
+list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake)
 
 # Generate TENSOR_RT_PREFIX into config.h
 set(TENSOR_RT_PREFIX ${CMAKE_LIBRARY_OUTPUT_DIRECTORY})
diff --git a/hpvm/cmake/FindCUDNN.cmake b/hpvm/cmake/FindCUDNN.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..e5a427f0317a6f3b8f7e7b2cc89fd176fd4362dc
--- /dev/null
+++ b/hpvm/cmake/FindCUDNN.cmake
@@ -0,0 +1,83 @@
+# Obtained from PyTorch repo: https://github.com/pytorch/pytorch/blob/master/cmake/Modules_CUDA_fix/FindCUDNN.cmake
+# Find the CUDNN libraries
+#
+# The following variables are optionally searched for defaults
+#  CUDNN_ROOT: Base directory where CUDNN is found
+#  CUDNN_INCLUDE_DIR: Directory where CUDNN header is searched for
+#  CUDNN_LIBRARY: Directory where CUDNN library is searched for
+#  CUDNN_STATIC: Are we looking for a static library? (default: no)
+#
+# The following are set after configuration is done:
+#  CUDNN_FOUND
+#  CUDNN_INCLUDE_PATH
+#  CUDNN_LIBRARY_PATH
+#
+
+include(FindPackageHandleStandardArgs)
+
+set(CUDNN_ROOT $ENV{CUDNN_ROOT_DIR} CACHE PATH "Folder containing NVIDIA cuDNN")
+if (DEFINED $ENV{CUDNN_ROOT_DIR})
+  message(WARNING "CUDNN_ROOT_DIR is deprecated. Please set CUDNN_ROOT instead.")
+endif()
+list(APPEND CUDNN_ROOT $ENV{CUDNN_ROOT_DIR} ${CUDA_TOOLKIT_ROOT_DIR})
+
+# Compatible layer for CMake <3.12. CUDNN_ROOT will be accounted in for searching paths and libraries for CMake >=3.12.
+list(APPEND CMAKE_PREFIX_PATH ${CUDNN_ROOT})
+
+set(CUDNN_INCLUDE_DIR $ENV{CUDNN_INCLUDE_DIR} CACHE PATH "Folder containing NVIDIA cuDNN header files")
+
+find_path(CUDNN_INCLUDE_PATH cudnn.h
+  HINTS ${CUDNN_INCLUDE_DIR}
+  PATH_SUFFIXES cuda/include cuda include)
+
+option(CUDNN_STATIC "Look for static CUDNN" OFF)
+if (CUDNN_STATIC)
+  set(CUDNN_LIBNAME "libcudnn_static.a")
+else()
+  set(CUDNN_LIBNAME "cudnn")
+endif()
+
+set(CUDNN_LIBRARY $ENV{CUDNN_LIBRARY} CACHE PATH "Path to the cudnn library file (e.g., libcudnn.so)")
+if (CUDNN_LIBRARY MATCHES ".*cudnn_static.a" AND NOT CUDNN_STATIC)
+  message(WARNING "CUDNN_LIBRARY points to a static library (${CUDNN_LIBRARY}) but CUDNN_STATIC is OFF.")
+endif()
+
+find_library(CUDNN_LIBRARY_PATH ${CUDNN_LIBNAME}
+  PATHS ${CUDNN_LIBRARY}
+  PATH_SUFFIXES lib lib64 cuda/lib cuda/lib64 lib/x64)
+# Get director from filename ${CUDNN_LIBRARY_PATH}
+get_filename_component(
+  CUDNN_LIBRARY_PATH
+  "${CUDNN_LIBRARY_PATH}/.." ABSOLUTE
+)
+
+# This version check is from OpenCV repo: https://github.com/opencv/opencv/blob/master/cmake/FindCUDNN.cmake
+# extract version from the include
+if(CUDNN_INCLUDE_PATH)
+  if(EXISTS "${CUDNN_INCLUDE_PATH}/cudnn_version.h")
+    file(READ "${CUDNN_INCLUDE_PATH}/cudnn_version.h" CUDNN_H_CONTENTS)
+  else()
+    file(READ "${CUDNN_INCLUDE_PATH}/cudnn.h" CUDNN_H_CONTENTS)
+  endif()
+
+  string(REGEX MATCH "define CUDNN_MAJOR ([0-9]+)" _ "${CUDNN_H_CONTENTS}")
+  set(CUDNN_VERSION_MAJOR ${CMAKE_MATCH_1})
+  string(REGEX MATCH "define CUDNN_MINOR ([0-9]+)" _ "${CUDNN_H_CONTENTS}")
+  set(CUDNN_VERSION_MINOR ${CMAKE_MATCH_1})
+  string(REGEX MATCH "define CUDNN_PATCHLEVEL ([0-9]+)" _ "${CUDNN_H_CONTENTS}")
+  set(CUDNN_VERSION_PATCH ${CMAKE_MATCH_1})
+
+  set(CUDNN_VERSION "${CUDNN_VERSION_MAJOR}.${CUDNN_VERSION_MINOR}.${CUDNN_VERSION_PATCH}")
+  unset(CUDNN_H_CONTENTS)
+endif()
+
+find_package_handle_standard_args(
+  CUDNN
+  FOUND_VAR CUDNN_FOUND
+  REQUIRED_VARS
+    CUDNN_LIBRARY_PATH
+    CUDNN_INCLUDE_PATH
+  VERSION_VAR CUDNN_VERSION
+)
+
+mark_as_advanced(CUDNN_ROOT CUDNN_INCLUDE_DIR CUDNN_LIBRARY)
diff --git a/hpvm/projects/hpvm-tensor-rt/CMakeLists.txt b/hpvm/projects/hpvm-tensor-rt/CMakeLists.txt
index cd865b11305ec3f2eec7fe80af71c0587b1b41f6..f863158ff2a6fdecce38fe42bd7510ab8187d809 100644
--- a/hpvm/projects/hpvm-tensor-rt/CMakeLists.txt
+++ b/hpvm/projects/hpvm-tensor-rt/CMakeLists.txt
@@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.17)
 project(hpvm-tensor-rt CUDA CXX)
 set(CMAKE_CXX_STANDARD 14)
 
-# -- Config path configuration file
+# -- Configure path configuration file
 if(NOT EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/global_knobs.txt)
   message(FATAL_ERROR "global_knobs.txt not found")
 endif()
@@ -15,15 +15,18 @@ configure_file(
 )
 
 # -- Default include directories
+find_package(CUDNN 7 EXACT REQUIRED)  # Include CUDNN_INCLUDE_PATH, then link CUDNN_LIBRARY_PATH
 set(
   INCLUDES
   ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES}
+  ${CUDNN_INCLUDE_PATH}
   ./tensor_runtime/include ${CMAKE_CURRENT_BINARY_DIR}/tensor_runtime/include
   ./dnn_sources/include
   ../gpu_profiler/include ../soc_simulator/include
 )
 
 # -- Link libraries
+find_package(OpenMP REQUIRED)  # Provides ${OpenMP_CXX_FLAGS}
 # Configure gpu_profiler and soc_simulator, and setup all libs to link to
 # Conditionally add gpu_profiler project if we're building independently
 # (not building the whole hpvm)
@@ -33,11 +36,11 @@ if(NOT LLVM_BUILD_DIR)  # Defined in ../CMakeLists.txt. This means we're compili
   add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../gpu_profiler ${CMAKE_CURRENT_BINARY_DIR}/gpu_profiler)
   add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../soc_simulator ${CMAKE_CURRENT_BINARY_DIR}/soc_simulator)
 endif()
+set(LINK_DIR CUDNN_LIBRARY_PATH)
 set(LINK_LIBS gpu_profiler promise_profiler stdc++fs cudnn curand cublas)
 if(USE_GFLAGS)
   list(APPEND LINK_LIBS gflags)
 endif()
-find_package(OpenMP REQUIRED)  # Provides ${OpenMP_CXX_FLAGS}
 
 # -- Definitions
 set(DEFS -DPROMISE_TUNER_ENABLED -DSIMULATION_MODE=true)
@@ -78,6 +81,7 @@ function(add_tensor_runtime target_name)
     $<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler=${OpenMP_CXX_FLAGS}>
   )
   target_include_directories(${target_name} PUBLIC ${INCLUDES})
+  target_link_directories(${target_name} PUBLIC ${LINK_DIR})
   target_link_libraries(${target_name} PUBLIC ${LINK_LIBS})
   target_compile_definitions(${target_name} PRIVATE ${DEFS} ${ARGN})
 endfunction(add_tensor_runtime)
diff --git a/hpvm/scripts/llvm_installer.sh b/hpvm/scripts/llvm_installer.sh
index b6b71383d56d4d3bb95c829ef7628f422b3b0f27..a8fa022047fb7983c466b618863a7b2a66a50f92 100755
--- a/hpvm/scripts/llvm_installer.sh
+++ b/hpvm/scripts/llvm_installer.sh
@@ -184,6 +184,7 @@ if [ ! -d $HPVM_DIR ]; then
   echo Adding HPVM sources to tree
   mkdir -p $HPVM_DIR
   ln -s $CURRENT_DIR/CMakeLists.txt $HPVM_DIR
+  ln -s $CURRENT_DIR/cmake $HPVM_DIR/
   ln -s $CURRENT_DIR/include $HPVM_DIR/
   ln -s $CURRENT_DIR/lib $HPVM_DIR/
   ln -s $CURRENT_DIR/projects $HPVM_DIR/
diff --git a/hpvm/tools/py-approxhpvm/CMakeLists.txt b/hpvm/tools/py-approxhpvm/CMakeLists.txt
index e46c45623f13034e1cb4c5b1ed2434ec40d4c12c..d35ae6ac24b6b1f59bc06d1365d0dda7903ba017 100644
--- a/hpvm/tools/py-approxhpvm/CMakeLists.txt
+++ b/hpvm/tools/py-approxhpvm/CMakeLists.txt
@@ -26,7 +26,9 @@ set(
     LLVMClearDFG
     LLVMGenHPVM
 )
-# CUDA_TOOLKIT_ROOT_DIR is already defined
+find_package(CUDA REQUIRED)  # Defines CUDA_TOOLKIT_ROOT_DIR
+find_package(CUDNN 7 REQUIRED)  # Defines CUDNN_LIBRARY_PATH
+set(CUDNN_DIR ${CUDNN_LIBRARY_PATH})
 # First resolve all `@symbol@` by configuring the file
 configure_file(main.py.in ${CMAKE_CURRENT_BINARY_DIR}/main.py.conf)
 # Then resolve all generator expressions we configured into the previous file
diff --git a/hpvm/tools/py-approxhpvm/main.py.in b/hpvm/tools/py-approxhpvm/main.py.in
index 752a7609ca0831838949b037ac7b8c0323ac8871..fdbbaec1ccc070f87bedcd0f0c646e12531d99fe 100644
--- a/hpvm/tools/py-approxhpvm/main.py.in
+++ b/hpvm/tools/py-approxhpvm/main.py.in
@@ -12,6 +12,7 @@ CUDA_TOOLKIT_ROOT_DIR = Path("@CUDA_TOOLKIT_ROOT_DIR@")
 TENSOR_RUNTIME_LIBS = "@TENSOR_RUNTIME_LIBS@".split(";")
 AVAILABLE_PASSES = "@AVAILABLE_PASSES@".split(";")
 HPVM_RT_PATH = "@HPVM_RT_PATH@"
+CUDNN_DIR = "@CUDNN_DIR@"
 
 # Directories to include
 INCLUDE_DIRS = [
@@ -21,7 +22,7 @@ INCLUDE_DIRS = [
     HPVM_PROJECT_DIR / "test/dnn_benchmarks/hpvm-c/include",  # hpvm-c intrinsics decl dir
     CUDA_TOOLKIT_ROOT_DIR / "include",  # CUDA include dir
 ]
-LINK_DIRS = [CUDA_TOOLKIT_ROOT_DIR / "lib64"]
+LINK_DIRS = [CUDA_TOOLKIT_ROOT_DIR / "lib64", CUDNN_DIR]
 LINK_LIBS = [
     "pthread", "cudart", "curand", "cudnn", "cublas", "cufft", "OpenCL", "stdc++fs", "omp", "m"
 ]