Skip to content
Snippets Groups Projects
Commit aede13c7 authored by Yifan Zhao's avatar Yifan Zhao
Browse files

Added tests for Keras frontend

parent eb8a6c69
No related branches found
No related tags found
No related merge requests found
......@@ -26,6 +26,7 @@ build-and-test:
- cd build
- make -j32 check-hpvm-pass
- make -j32 check-hpvm-dnn
- make -j32 check-hpvm-keras-acc
- make -j32 check-hpvm-torch-acc
- make -j32 check-hpvm-torch-profiling
- make -j32 check-hpvm-torch-tuning
......
......@@ -6,7 +6,8 @@ set(CLANG_C ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/clang)
set(CLANG_CXX ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/clang++)
add_subdirectory(hpvm_pass) # Passes test suite
add_subdirectory(benchmarks)
add_subdirectory(benchmarks) # HPVM benchmarks (no DNN)
add_subdirectory(dnn_benchmarks/hpvm-c) # HPVM-C DNN accuracy test suite
add_subdirectory(dnn_benchmarks/keras) # Keras frontend test suite
add_subdirectory(dnn_benchmarks/pytorch) # Torch frontend test suites (3 of them)
add_subdirectory(dnn_benchmarks/tensor-rt-src) # tensor_runtime DNN (build only, no tests)
......@@ -61,15 +61,18 @@ The following targets runs these tests respectively:
Depending on your hardware capability, this test can take 5-30 minutes.
Also, this is set to run sequentially out of GPU memory concerns.
* ``make -j check-hpvm-torch-acc`` generates all 10 DNNs with torch frontend,
runs them and checks their accuracy. This tests the torch frontend in isolation.
* ``make -j check-hpvm-keras-acc`` generates all 10 DNNs with Keras frontend,
runs them and checks their accuracy. This tests the Keras frontend in isolation.
* ``make -j check-hpvm-torch-tuning`` runs `predtuner` with binaries from torch frontend
* Similarly, ``make -j check-hpvm-torch-acc`` generates all 10 DNNs with PyTorch frontend,
runs them and checks their accuracy, to test the PyTorch frontend in isolation.
* ``make -j check-hpvm-torch-tuning`` runs `predtuner` with binaries from PyTorch frontend
to exercise both empirical and predictive autotuning.
This is only done for a few smaller networks for 5 iterations,
as it is extremely time-consuming.
* ``make -j check-hpvm-torch-profiling`` runs `hpvm-profiler` with binaries from torch frontend,
* ``make -j check-hpvm-torch-profiling`` runs `hpvm-profiler` with binaries from PyTorch frontend,
and presents the tradeoff curve with profiled speedup.
This is only done for a few smaller networks.
......
# --[ llvm-lit test setup
configure_lit_site_cfg(
../../lit.site.cfg.py.in
${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg.py
MAIN_CONFIG
${CMAKE_CURRENT_SOURCE_DIR}/lit.cfg.py
)
add_lit_testsuite(check-hpvm-keras-acc "Run accuracy tests for HPVM Keras frontend"
${CMAKE_CURRENT_BINARY_DIR}
# We depend on check_dnn_acc.py defined in ../hpvm-c/
# to compare the inference accuracy of our frontend-generated binary
# to that of the baseline.
DEPENDS check_dnn_acc
ARGS "-j1" # Run frontend generation sequentially
)
RUN: rm -rf alexnet_imagenet && mkdir alexnet_imagenet && cd alexnet_imagenet
RUN: python3 $DIR_PREFIX/alexnet_imagenet.py hpvm_reload frontend compile compile_tuner
RUN: src/alexnet_imagenet_src/HPVM_binary
RUN: check_dnn_acc.py final_accuracy alexnet_imagenet
RUN: cd ..
RUN: rm -r alexnet_imagenet
# -*- Python -*-
# Configuration file for the 'lit' test runner.
import os
import lit.formats
from lit.llvm import llvm_config
# name: The name of this test suite.
config.name = "HPVM-Keras"
# testFormat: The test format to use to interpret tests.
config.test_format = lit.formats.ShTest(True)
# suffixes: A list of file extensions to treat as test files. This is overriden
# by individual lit.local.cfg files in the test subdirectories.
config.suffixes = [".test"]
# test_source_root: The root path where tests are located.
config.test_source_root = os.path.dirname(__file__)
# test_exec_root: The root path where tests should be run.
current_source_dir = os.path.dirname(os.path.relpath(__file__, config.llvm_src_root))
current_binary_dir = os.path.join(config.llvm_obj_root, current_source_dir)
config.test_exec_root = current_binary_dir
# All .test files have DIR_PREFIX (to current dir) for finding their .py script
# We cannot use PATH because the script to run is not an executable;
# it is called by "python3 $DIR_PREFIX/<network_name> ...".
llvm_config.with_environment("DIR_PREFIX", config.test_source_root)
# Add substitution for check_dnn_acc.py which goes under build/bin.
llvm_config.add_tool_substitutions(["check_dnn_acc.py"], config.llvm_tools_dir)
RUN: rm -rf mobilenet_cifar10 && mkdir mobilenet_cifar10 && cd mobilenet_cifar10
RUN: python3 $DIR_PREFIX/mobilenet_cifar10.py hpvm_reload frontend compile compile_tuner
RUN: src/mobilenet_cifar10_src/HPVM_binary
RUN: check_dnn_acc.py final_accuracy mobilenet_cifar10
RUN: cd ..
RUN: rm -r mobilenet_cifar10
RUN: rm -rf resnet18_cifar10 && mkdir resnet18_cifar10 && cd resnet18_cifar10
RUN: python3 $DIR_PREFIX/resnet18_cifar10.py keras_reload frontend compile compile_tuner
RUN: src/resnet18_cifar10_src/HPVM_binary
RUN: check_dnn_acc.py final_accuracy resnet18_cifar10
RUN: cd ..
RUN: rm -r resnet18_cifar10
RUN: rm -rf vgg16_cifar100 && mkdir vgg16_cifar100 && cd vgg16_cifar100
RUN: python3 $DIR_PREFIX/vgg16_cifar100.py hpvm_reload frontend compile compile_tuner
RUN: src/vgg16_cifar100_src/HPVM_binary
RUN: check_dnn_acc.py final_accuracy vgg16_cifar100
RUN: cd ..
RUN: rm -r vgg16_cifar100
......@@ -29,8 +29,6 @@ config.test_exec_root = current_binary_dir
llvm_config.with_environment("PATH", config.llvm_tools_dir, append_path=True)
# Add substitution for check_dnn_acc.py which goes under build/bin.
llvm_config.add_tool_substitutions(
["check_dnn_acc.py"], os.path.join(config.llvm_obj_root, "bin")
)
llvm_config.add_tool_substitutions(["check_dnn_acc.py"], config.llvm_tools_dir)
# Add substitution for our main script in this directory.
llvm_config.add_tool_substitutions(["test_frontend.py"], config.test_source_root)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment