diff --git a/hpvm/test/CMakeLists.txt b/hpvm/test/CMakeLists.txt index a9098bb01d7ef174a3abe49ae4a6631195bb6005..660003538fe72d45e3dbfc1178fd296cdc7156b5 100644 --- a/hpvm/test/CMakeLists.txt +++ b/hpvm/test/CMakeLists.txt @@ -1,3 +1,4 @@ include(../cmake/TestFile.cmake) # Generation of `.test` files in CMake add_subdirectory(hpvm_pass) # Passes test suite add_subdirectory(dnn_benchmarks/hpvm-c) # DNN accuracy test suite +add_subdirectory(dnn_benchmarks/profiling) # hpvm-profiler test suite diff --git a/hpvm/test/README.md b/hpvm/test/README.md index 1bfa919c1dd1da5e47e5dea71155ed6084f972a6..18cb05b833434fcffc7e4c50b5f38150c924fb19 100644 --- a/hpvm/test/README.md +++ b/hpvm/test/README.md @@ -38,10 +38,17 @@ The following targets runs these tests respectively: * `make -j check-hpvm-dnn` runs all 20 DNN benchmarks under `dnn_benchmarks/hpvm-c` (10 DNNs x 2 versions) and validates their accuracy. - *Note* that this is quite time-consuming due to the size of DNNs and datasets. + *Note* that this can take quite long due to the size of DNNs and datasets. Depending on your hardware capability, this test can take 5-30 minutes. Also, this is set to run sequentially out of GPU memory concerns. +* `make -j check-hpvm-profiler` runs `hpvm-profiler` on some smaller networks + (as it is extremely time-consuming) and presents the tradeoff curve with profiled speedup. + + *Note* that if you're on an NVIDIA Jetson TX2, you may want to run + `bash dnn_benchmarks/profiling/jetson_clocks.sh` + to ensure that the clocks are running at the maximum frequency + Underneath, `llvm-lit` is used to discover and run the tests. `benchmarks/` can only be compiled in-source with `make`. diff --git a/hpvm/test/dnn_benchmarks/README.md b/hpvm/test/dnn_benchmarks/README.md deleted file mode 100644 index fc9819d3cccab87c1a60b35d643b2f7b6b28bb7d..0000000000000000000000000000000000000000 --- a/hpvm/test/dnn_benchmarks/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# Build DNN benchmarks - -Move to your HPVM build directory - -`cd hpvm/hpvm/build` - -## Build All Benchmarks - -`make dnn_benchmarks -j ${NUM_THREADS}` - - -## Build a Specific CNN - -`make ${BENCH} -j ${NUM_THREADS}` - -The ${BENCH} name should match the directory name of the respective benchmark under `hpvm-c` directory - -# Run Benchmarks - -The benchmarks are built under: `/hpvm/hpvm/build/tools/hpvm/test/dnn_benchmarks/hpvm-c` - -Before running benchmarks set up CUDA/CuDNN paths using `hpvm/hpvm/set_paths.sh` - -Modify the setup script to point to your local installations. - -The accuracy of the benchmark (averaged across batches) is dumped to a file named `final_accuracy` - -Ensure that this matches the accuracy of corresponding benchmarks here: https://gitlab.engr.illinois.edu/llvm/hpvm/-/tree/approx_hpvm_reorg/hpvm/projects/keras - - - diff --git a/hpvm/test/dnn_benchmarks/profiling/CMakeLists.txt b/hpvm/test/dnn_benchmarks/profiling/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..712741c0e347acfc84e37bc2c91d998f549c7077 --- /dev/null +++ b/hpvm/test/dnn_benchmarks/profiling/CMakeLists.txt @@ -0,0 +1,14 @@ +# --[ llvm-lit test setup +# lit.cfg.py looks for tests in CMAKE_CURRENT_BINARY_DIR (see lit.cfg.py) +# as most of the tests require some kind of compilation / generation +# which is best done over there. +configure_lit_site_cfg( + ../../lit.site.cfg.py.in + ${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg.py + MAIN_CONFIG + ${CMAKE_CURRENT_SOURCE_DIR}/lit.cfg.py +) +add_lit_testsuite(check-hpvm-profiler "Run tests for package hpvm-profiler" + ${CMAKE_CURRENT_BINARY_DIR} + DEPENDS dnn_benchmarks # Requires all dnn benchmarks +) diff --git a/hpvm/test/dnn_benchmarks/profiling/README.md b/hpvm/test/dnn_benchmarks/profiling/README.md deleted file mode 100644 index 3a4e115b47d846cd8bbb28247a61545a9b02537f..0000000000000000000000000000000000000000 --- a/hpvm/test/dnn_benchmarks/profiling/README.md +++ /dev/null @@ -1,20 +0,0 @@ -## Running CNN benchmarks - -NOTE: Before running the CNN benchmarks on NVIDIA Jetson TX2, the following script must be executed in order to ensure that the clocks are running at the maximum frequency: -``` -bash jetson_clocks.sh -``` - -We provide two scripts to run the CNN benchmarks automatically: run_dnn.py and run_dnns.py. - -In order to run all CNN benchmarks, execute the following: -``` -python run_dnns.py -``` - -And to run a particular benchmark, one must specify the name of the benchmark. The valid names of the benchmarks are: alexnet_cifar10, alexnet2_cifar10, alexnet_imagenet, lenet_mnist, mobilenet_cifar10, resnet18_cifar10, resnet50_imagenet, vgg16_cifar10, vgg16_cifar100 and vgg16_imagenet. -``` -python run_dnn.py <dnn_name> -``` - -These scripts not only automate the execution of the benchmarks, but also produce trade-off curves between accuracy and performance for executed benchmarks. These trade-off curves are named as <dnn_name>-tradeoff-curves.txt under ./hpvm/hpvm/docs/tradeoff-curves. Trade-off curves for the CNN benchmarks suit are already provided as examples under ./hpvm/hpvm/docs/tradeoff-curves. diff --git a/hpvm/test/dnn_benchmarks/profiling/alexnet2_cifar10.test b/hpvm/test/dnn_benchmarks/profiling/alexnet2_cifar10.test new file mode 100644 index 0000000000000000000000000000000000000000..455a3e75a7aff4ac76123cb62e860701e8397713 --- /dev/null +++ b/hpvm/test/dnn_benchmarks/profiling/alexnet2_cifar10.test @@ -0,0 +1 @@ +RUN: test_hpvm_c_profiling.py alexnet2_cifar10 \ No newline at end of file diff --git a/hpvm/test/dnn_benchmarks/profiling/alexnet_cifar10.test b/hpvm/test/dnn_benchmarks/profiling/alexnet_cifar10.test new file mode 100644 index 0000000000000000000000000000000000000000..62c667a249e514a17f8ea809f364c4e65c3332dd --- /dev/null +++ b/hpvm/test/dnn_benchmarks/profiling/alexnet_cifar10.test @@ -0,0 +1 @@ +RUN: test_hpvm_c_profiling.py alexnet_cifar10 \ No newline at end of file diff --git a/hpvm/test/dnn_benchmarks/profiling/lenet_mnist.test b/hpvm/test/dnn_benchmarks/profiling/lenet_mnist.test new file mode 100644 index 0000000000000000000000000000000000000000..88856a8913f2c9fb275187d65d443c50aa8bf583 --- /dev/null +++ b/hpvm/test/dnn_benchmarks/profiling/lenet_mnist.test @@ -0,0 +1 @@ +RUN: test_hpvm_c_profiling.py lenet_mnist \ No newline at end of file diff --git a/hpvm/test/dnn_benchmarks/profiling/lit.cfg.py b/hpvm/test/dnn_benchmarks/profiling/lit.cfg.py new file mode 100644 index 0000000000000000000000000000000000000000..c3584478209402a308ed17ba2c3e5994a49dab76 --- /dev/null +++ b/hpvm/test/dnn_benchmarks/profiling/lit.cfg.py @@ -0,0 +1,34 @@ +# -*- Python -*- + +# Configuration file for the 'lit' test runner. + +import os + +import lit.formats +from lit.llvm import llvm_config + +# name: The name of this test suite. +config.name = "HPVM-Profiler" + +# testFormat: The test format to use to interpret tests. +config.test_format = lit.formats.ShTest(False) + +# suffixes: A list of file extensions to treat as test files. This is overriden +# by individual lit.local.cfg files in the test subdirectories. +config.suffixes = [".test"] + +# test_source_root: The root path where tests are located. +config.test_source_root = os.path.dirname(__file__) + +# test_exec_root: The root path where tests should be run. +current_source_dir = os.path.dirname(os.path.relpath(__file__, config.llvm_src_root)) +current_binary_dir = os.path.join(config.llvm_obj_root, current_source_dir) +config.test_exec_root = current_binary_dir + +# Tweak the PATH to include the tools dir. +llvm_config.with_environment("PATH", config.llvm_tools_dir, append_path=True) + +llvm_config.use_default_substitutions() + +# Add substitution for our main script in this directory. +llvm_config.add_tool_substitutions(["test_hpvm_c_profiling.py"], config.test_source_root) diff --git a/hpvm/test/dnn_benchmarks/profiling/mobilenet_cifar10.test b/hpvm/test/dnn_benchmarks/profiling/mobilenet_cifar10.test new file mode 100644 index 0000000000000000000000000000000000000000..a40981c9408b52f45ae9a58ab3895e12889bf665 --- /dev/null +++ b/hpvm/test/dnn_benchmarks/profiling/mobilenet_cifar10.test @@ -0,0 +1 @@ +RUN: test_hpvm_c_profiling.py mobilenet_cifar10 \ No newline at end of file diff --git a/hpvm/test/dnn_benchmarks/profiling/resnet18_cifar10.test b/hpvm/test/dnn_benchmarks/profiling/resnet18_cifar10.test new file mode 100644 index 0000000000000000000000000000000000000000..5d09297309e6f2ac48c23e0c529021144d6734e7 --- /dev/null +++ b/hpvm/test/dnn_benchmarks/profiling/resnet18_cifar10.test @@ -0,0 +1 @@ +RUN: test_hpvm_c_profiling.py resnet18_cifar10 \ No newline at end of file diff --git a/hpvm/test/dnn_benchmarks/profiling/test_hpvm_c_profiling.py b/hpvm/test/dnn_benchmarks/profiling/test_hpvm_c_profiling.py new file mode 100755 index 0000000000000000000000000000000000000000..5f4a96740cedb05295e4fcde0c5dfa65a0be34cc --- /dev/null +++ b/hpvm/test/dnn_benchmarks/profiling/test_hpvm_c_profiling.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python3 +from pathlib import Path +from sys import argv + +from hpvm_profiler import profile_configs, read_hpvm_configs + +# relative to cwd() +benchmarks_bindir = Path("../hpvm-c") +# relative to location of this file +benchmarks_srcdir = Path(__file__).parent / "../hpvm-c/benchmarks" +# We're called in the "current" binary directory. +# For example (depending on where build dir is), +# "hpvm/build/tools/hpvm/test/dnn_benchmarks/profiling". +# So we know where the benchmark binaries are due to source directory structure, +# and this is not hardcoding. +dnn = argv[1] +bench_bin_file = benchmarks_bindir / dnn +config_file = benchmarks_srcdir / dnn / "data/tuner_confs.txt" +out_config_file = f"./{dnn}.txt" +profile_configs(bench_bin_file, config_file, out_config_file)