Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
H
hpvm-release
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
llvm
hpvm-release
Commits
a8aa6473
Commit
a8aa6473
authored
4 years ago
by
Yifan Zhao
Browse files
Options
Downloads
Patches
Plain Diff
Moved hpvm-c test to use approxhpvm.py to compile as well
parent
b2812cbf
No related branches found
No related tags found
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
hpvm/test/dnn_benchmarks/hpvm-c/CMakeLists.txt
+26
-110
26 additions, 110 deletions
hpvm/test/dnn_benchmarks/hpvm-c/CMakeLists.txt
with
26 additions
and
110 deletions
hpvm/test/dnn_benchmarks/hpvm-c/CMakeLists.txt
+
26
−
110
View file @
a8aa6473
# Look for CUDA again (already done in hpvm-tensor-rt) so we can include its header
# First get approxhpvm.py which we then use to compile benchmarks.
# This is not the best practice,
get_filename_component
(
APPROXHPVM_PY
${
PROJECT_BINARY_DIR
}
/bin/approxhpvm.py REALPATH
)
# but easier than having the tensor runtime tell us which CUDA it used.
find_package
(
CUDA REQUIRED
)
get_filename_component
(
LLVM_BIN_DIR
${
PROJECT_BINARY_DIR
}
/bin REALPATH
)
# Configure config.h which tells the benchmarks where's the model parameter directory.
set
(
LLVM_OPT
"
${
LLVM_BIN_DIR
}
/opt"
)
# We can also use the one in tensor_runtime, but we're avoiding that so as to
set
(
LLVM_LINK
"
${
LLVM_BIN_DIR
}
/llvm-link"
)
set
(
CMAKE_CXX_COMPILER
"
${
LLVM_BIN_DIR
}
/clang++"
)
# Configure config.h which tells the benchmarks where's the build directory.
# We can also use the one in tensor_runtime, but we're avoiding that trying to
# decouple things.
# decouple things.
set
(
MODEL_PARAMS_DIR
"
${
CMAKE_CURRENT_SOURCE_DIR
}
/../model_params/"
)
set
(
MODEL_PARAMS_DIR
"
${
CMAKE_CURRENT_SOURCE_DIR
}
/../model_params/"
)
configure_file
(
configure_file
(
"include/config.h.in"
"include/config.h.in"
"
${
CMAKE_CURRENT_BINARY_DIR
}
/include/config.h"
"
${
CMAKE_CURRENT_BINARY_DIR
}
/include/config.h"
)
)
# This will be an extra include directory (specific to these benchmarks)
# and we'll give this to approxhpvm.py
set
(
CONFIG_INCLUDE_DIR
"
${
CMAKE_CURRENT_BINARY_DIR
}
/include"
)
set
(
CONFIG_INCLUDE_DIR
"
${
CMAKE_CURRENT_BINARY_DIR
}
/include"
)
# Directories to include
set
(
HPVM_PROJECTS
${
PROJECT_SOURCE_DIR
}
/tools/hpvm/projects
)
set
(
HPVM_INCLUDE_DIR
${
PROJECT_SOURCE_DIR
}
/tools/hpvm/include
)
set
(
BENCHMARK_INCLUDE_DIR
${
CMAKE_CURRENT_SOURCE_DIR
}
/../dnn_benchmarks/include
)
set
(
TENSOR_INCLUDE_DIR
${
CMAKE_CURRENT_SOURCE_DIR
}
/include
)
set
(
TENSOR_RT_INCLUDE_DIR
${
HPVM_PROJECTS
}
/hpvm-tensor-rt/tensor_runtime/include
)
set
(
INCLUDES
${
CONFIG_INCLUDE_DIR
}
${
HPVM_INCLUDE_DIR
}
${
BENCHMARK_INCLUDE_DIR
}
${
TENSOR_INCLUDE_DIR
}
${
TENSOR_RT_INCLUDE_DIR
}
${
CUDA_INCLUDE_DIRS
}
)
foreach
(
dir
${
INCLUDES
}
)
list
(
APPEND INCLUDE_COMPILER_STRINGS
"-I
${
dir
}
"
)
endforeach
()
# Built-in libraries to link
list
(
APPEND LINKER_FLAGS
"-L
${
CUDA_TOOLKIT_ROOT_DIR
}
/lib64"
-lpthread -lcudart -lcurand -lcudnn -lcublas -lcufft -lOpenCL -lstdc++fs -lomp -lm
)
# The hpvm-rt runtime
# This has to be explicitly set as hpvm-rt.bc is created in a custom_target
# and does not export its file location.
# Keep this in sync with hpvm/projects/hpvm-rt/CMakeLists.txt.
set
(
HPVM_RT_PATH
${
PROJECT_BINARY_DIR
}
/tools/hpvm/projects/hpvm-rt/hpvm-rt.bc
)
# Compile flags (clang++)
set
(
CLANG_FLAGS -fno-exceptions -std=c++11 -O3
)
# All compilation uses HPVM_DEFAULT_PASSES.
set
(
HPVM_DEFAULT_PASSES
LLVMBuildDFG
LLVMInPlaceDFGAnalysis
LLVMDFG2LLVM_CPU
LLVMFuseHPVMTensorNodes
LLVMClearDFG
LLVMGenHPVM
)
set
(
WORK_DIR
${
CMAKE_CURRENT_BINARY_DIR
}
)
set
(
test_compile_targets
""
)
set
(
test_compile_targets
""
)
function
(
compile_single_benchmark target src_file extra_passes extra_dfg_flags
)
function
(
approxhpvm_py_codegen bin_filename src_filepath codegen_target
)
foreach
(
pass
${
HPVM_DEFAULT_PASSES
}
${
extra_passes
}
)
list
(
APPEND LOAD_FILE_FLAGS
"-load"
"
${
pass
}
.so"
)
endforeach
()
set
(
HPVM_PASSES
${
LOAD_FILE_FLAGS
}
-buildDFG -inplace -hpvm-fuse
${
extra_dfg_flags
}
-dfg2llvm-cpu -clearDFG
)
add_custom_command
(
add_custom_command
(
OUTPUT
"
${
WORK_DIR
}
/
${
target
}
.ll"
DEPENDS
${
src_file
}
clang
OUTPUT
${
CMAKE_CURRENT_BINARY_DIR
}
/
${
bin_filename
}
COMMAND
${
CMAKE_CXX_COMPILER
}
${
INCLUDE_COMPILER_STRINGS
}
${
CLANG_FLAGS
}
-emit-llvm -S
${
src_file
}
DEPENDS
${
src_filepath
}
approxhpvm.py
-o
${
WORK_DIR
}
/
${
target
}
.ll
COMMAND
${
APPROXHPVM_PY
}
${
src_filepath
}
${
CMAKE_CURRENT_BINARY_DIR
}
/
${
bin_filename
}
-t
${
codegen_target
}
-I
${
CONFIG_INCLUDE_DIR
}
${
ARGV
}
)
)
add_custom_command
(
add_custom_target
(
${
bin_filename
}
DEPENDS
${
CMAKE_CURRENT_BINARY_DIR
}
/
${
bin_filename
}
)
OUTPUT
"
${
WORK_DIR
}
/
${
target
}
.hpvm.ll"
set
(
test_compile_targets
${
test_compile_targets
}
${
bin_filename
}
PARENT_SCOPE
)
DEPENDS
"
${
WORK_DIR
}
/
${
target
}
.ll"
opt LLVMGenHPVM
endfunction
(
approxhpvm_py_codegen
)
COMMAND
${
LLVM_OPT
}
-load LLVMGenHPVM.so -genhpvm -globaldce -S
${
WORK_DIR
}
/
${
target
}
.ll
-o
${
WORK_DIR
}
/
${
target
}
.hpvm.ll
)
add_custom_command
(
OUTPUT
"
${
WORK_DIR
}
/
${
target
}
.llvm.ll"
DEPENDS
"
${
WORK_DIR
}
/
${
target
}
.hpvm.ll"
opt
${
HPVM_DEFAULT_PASSES
}
${
extra_passes
}
COMMAND
${
LLVM_OPT
}
${
HPVM_PASSES
}
-S
${
WORK_DIR
}
/
${
target
}
.hpvm.ll
-o
${
WORK_DIR
}
/
${
target
}
.llvm.ll
)
add_custom_command
(
OUTPUT
"
${
WORK_DIR
}
/
${
target
}
.linked.bc"
DEPENDS
"
${
WORK_DIR
}
/
${
target
}
.llvm.ll"
hpvm-rt.bc llvm-link
COMMAND
${
LLVM_LINK
}
${
WORK_DIR
}
/
${
target
}
.llvm.ll
${
HPVM_RT_PATH
}
-o
${
WORK_DIR
}
/
${
target
}
.linked.bc
)
add_custom_command
(
OUTPUT
"
${
WORK_DIR
}
/
${
target
}
"
DEPENDS
"
${
WORK_DIR
}
/
${
target
}
.linked.bc"
tensor_runtime gpu_profiler promise_profiler
COMMAND
${
CMAKE_CXX_COMPILER
}
${
WORK_DIR
}
/
${
target
}
.linked.bc
$<TARGET_FILE:tensor_runtime> $<TARGET_FILE:gpu_profiler> $<TARGET_FILE:promise_profiler>
-o
${
WORK_DIR
}
/
${
target
}
${
LINKER_FLAGS
}
)
add_custom_target
(
${
target
}
DEPENDS
"
${
WORK_DIR
}
/
${
target
}
"
)
set
(
test_compile_targets
${
test_compile_targets
}
${
target
}
PARENT_SCOPE
)
endfunction
(
compile_single_benchmark
)
set
(
test_run_targets
""
)
set
(
test_run_targets
""
)
function
(
run_single_benchmark run_target benchmark
)
function
(
run_single_benchmark run_target benchmark
)
add_custom_target
(
add_custom_target
(
${
run_target
}
${
run_target
}
COMMAND
${
CMAKE_CURRENT_BINARY_DIR
}
/
${
benchmark
}
COMMAND
${
WORK_DIR
}
/
${
benchmark
}
)
)
add_dependencies
(
${
run_target
}
${
benchmark
}
)
add_dependencies
(
${
run_target
}
${
benchmark
}
)
set
(
test_run_targets
${
test_run_targets
}
${
run_target
}
PARENT_SCOPE
)
set
(
test_run_targets
${
test_run_targets
}
${
run_target
}
PARENT_SCOPE
)
...
@@ -120,22 +38,20 @@ endfunction(run_single_benchmark)
...
@@ -120,22 +38,20 @@ endfunction(run_single_benchmark)
file
(
GLOB entries ./benchmarks/*
)
file
(
GLOB entries ./benchmarks/*
)
foreach
(
dir
${
entries
}
)
foreach
(
dir
${
entries
}
)
get_filename_component
(
dirname
"
${
dir
}
"
NAME
)
get_filename_component
(
dirname
"
${
dir
}
"
NAME
)
set
(
# Generate "tensor"-targeted code
loop_extra_flags
approxhpvm_py_codegen
(
-dfg2llvm-wrapperapi
${
dirname
}
${
dir
}
/
${
dirname
}
.cpp tensor
-quantization-levels-filename=
${
dir
}
/data/quant_ranges_rt.txt
--quant-file
${
dir
}
/data/quant_ranges_rt.txt
-configuration-inputs-filename=
${
dir
}
/data/tuner_confs.txt
--config-file
${
dir
}
/data/tuner_confs.txt
)
compile_single_benchmark
(
${
dirname
}
${
dir
}
/
${
dirname
}
.cpp
LLVMDFG2LLVM_WrapperAPI
"
${
loop_extra_flags
}
"
)
)
# Run tensor binary
run_single_benchmark
(
run_
${
dirname
}
${
dirname
}
)
run_single_benchmark
(
run_
${
dirname
}
${
dirname
}
)
compile_single_benchmark
(
# Generate "cudnn"-targeted code
${
dirname
}
_cudnn
${
dir
}
/
${
dirname
}
_cudnn.cpp
LLVMDFG2LLVM_CUDNN -dfg2llvm-
cudnn
approxhpvm_py_codegen
(
${
dirname
}
_cudnn
${
dir
}
/
${
dirname
}
_cudnn.cpp cudnn
)
)
# Run cudnn binary
run_single_benchmark
(
run_
${
dirname
}
_cudnn
${
dirname
}
_cudnn
)
run_single_benchmark
(
run_
${
dirname
}
_cudnn
${
dirname
}
_cudnn
)
endforeach
(
dir
)
endforeach
(
dir
)
message
(
STATUS
"List of test dnn benchmarks:
${
test_compile_targets
}
"
)
message
(
STATUS
"List of test dnn benchmarks:
${
test_compile_targets
}
"
)
add_custom_target
(
dnn_benchmarks DEPENDS
${
test_compile_targets
}
)
add_custom_target
(
dnn_benchmarks DEPENDS
${
test_compile_targets
}
)
message
(
STATUS
"Target name for compiling all dnn benchmarks: dnn_benchmarks"
)
message
(
STATUS
"Target name for compiling all dnn benchmarks: dnn_benchmarks"
)
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment