Skip to content
Snippets Groups Projects
Commit 1475e91f authored by Yifan Zhao's avatar Yifan Zhao
Browse files

Make frontend actually work on pytorch source

parent a6309ad6
No related branches found
No related tags found
No related merge requests found
...@@ -7,6 +7,6 @@ setup( ...@@ -7,6 +7,6 @@ setup(
author="Yuanjing Shi, Yifan Zhao", author="Yuanjing Shi, Yifan Zhao",
author_email="ys26@illinois.edu, yifanz16@illinois.edu", author_email="ys26@illinois.edu, yifanz16@illinois.edu",
packages=["torch2hpvm"], packages=["torch2hpvm"],
install_requires=["jinja2>=2.11", "networkx>=2.5", "onnx>=1.8.0"], install_requires=["jinja2>=2.11", "networkx>=2.5", "onnx>=1.8.0", "torch"],
entry_points={"console_scripts": ["torch2hpvm=torch2hpvm:main"]}, entry_points={"console_scripts": ["torch2hpvm=torch2hpvm:main"]},
) )
from .compile import compile from .compile import compile_onnx_model, compile_torch_module
from .__main__ import main from .__main__ import main
import os import os
from pathlib import Path from pathlib import Path
from .compile import compile from .compile import compile_onnx_model
def parse_args(): def parse_args():
...@@ -15,7 +15,7 @@ def parse_args(): ...@@ -15,7 +15,7 @@ def parse_args():
help="Output folder where source file and weight files are generated", help="Output folder where source file and weight files are generated",
) )
parser.add_argument( parser.add_argument(
"input_size", type=int, help="Size of input dataset", "dataset_size", type=int, help="Size of input dataset",
) )
parser.add_argument( parser.add_argument(
"-p", "-p",
...@@ -51,5 +51,4 @@ hpvmc: HPVM C Interface. Default value is hpvmc.""", ...@@ -51,5 +51,4 @@ hpvmc: HPVM C Interface. Default value is hpvmc.""",
def main(): def main():
args = parse_args() args = parse_args()
os.makedirs(args.output_dir, exist_ok=True) compile_onnx_model(**vars(args))
compile(**vars(args))
import os
from pathlib import Path from pathlib import Path
from typing import Optional, Union from tempfile import NamedTemporaryFile
from typing import IO, Optional, Sequence, Union
import onnx import onnx
import torch
from onnx import version_converter from onnx import version_converter
from torch.nn import Module
from .codegen_hpvm import HpvmCodeGen from .codegen_hpvm import HpvmCodeGen
from .codegen_tensor import TensorCodeGen from .codegen_tensor import TensorCodeGen
...@@ -11,7 +15,7 @@ from .graph_builder import DFG ...@@ -11,7 +15,7 @@ from .graph_builder import DFG
PathLike = Union[Path, str] PathLike = Union[Path, str]
def check_version(model, new_version): def check_onnx_version(model, new_version):
try: try:
opset = model.opset_import[0].version if model.opset_import else 1 opset = model.opset_import[0].version if model.opset_import else 1
except AttributeError: except AttributeError:
...@@ -29,24 +33,73 @@ def check_version(model, new_version): ...@@ -29,24 +33,73 @@ def check_version(model, new_version):
return model return model
def compile( def torch_to_onnx(
onnx_file: Path, module_cpu: Module,
output_dir: Path, model_args_cpu: tuple,
input_size: int, output_obj: Union[IO, PathLike],
prefix: Optional[str], opset_version: int = 10,
batch_size: Optional[int], ):
opset: Optional[int], # Export the model (must be on CPU, some model only supports this)
torch.onnx.export(
module_cpu.eval(),
model_args_cpu,
output_obj,
export_params=True, # store the trained parameter weights inside the model file
opset_version=opset_version, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=["input"], # the model's input names
output_names=["output"], # the model's output names
dynamic_axes={
"input": {0: "batch_size"}, # variable length axes
"output": {0: "batch_size"},
},
strip_doc_string=False,
)
def compile_onnx_model(
file_or_model: Union[PathLike, onnx.ModelProto],
output_dir: PathLike,
dataset_size: int,
hpvmc: bool, hpvmc: bool,
prefix: Optional[str] = None,
batch_size: Optional[int] = None,
opset: Optional[int] = None,
): ):
model = onnx.load(onnx_file) if isinstance(file_or_model, onnx.ModelProto):
model = file_or_model
else:
model = onnx.load(Path(file_or_model).as_posix())
if opset is not None: if opset is not None:
model = check_version(model, opset) model = check_onnx_version(model, opset)
model = onnx.shape_inference.infer_shapes(model) model = onnx.shape_inference.infer_shapes(model)
dfg = DFG(model.graph) dfg = DFG(model.graph)
output_dir = Path(output_dir)
os.makedirs(output_dir, exist_ok=True)
if hpvmc: if hpvmc:
hpvm_code_gen = HpvmCodeGen(dfg, output_dir, input_size, batch_size, prefix) hpvm_code_gen = HpvmCodeGen(dfg, output_dir, dataset_size, batch_size, prefix)
hpvm_code_gen.compile() hpvm_code_gen.compile()
else: else:
tensor_code_gen = TensorCodeGen(dfg, output_dir, input_size, batch_size, prefix) tensor_code_gen = TensorCodeGen(dfg, output_dir, dataset_size, batch_size, prefix)
tensor_code_gen.compile() tensor_code_gen.compile()
dfg.dump_weights(output_dir) dfg.dump_weights(output_dir)
def compile_torch_module(
module: Module,
input_shape: Sequence[int],
output_dir: PathLike,
hpvmc: bool,
prefix: Optional[str] = None,
batch_size: Optional[int] = None,
):
dataset_size, *single_input_shape = input_shape
sample_input_shape = 1, *single_input_shape
sample_input = torch.rand(sample_input_shape)
with NamedTemporaryFile("w+b") as tmp:
torch_to_onnx(module, (sample_input, ), tmp)
tmp.seek(0)
onnx_model = onnx.load_model(tmp)
compile_onnx_model(
onnx_model, output_dir, dataset_size, hpvmc, prefix, batch_size
)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment