Skip to content
Snippets Groups Projects
Commit e960a7f1 authored by Yifan Zhao's avatar Yifan Zhao
Browse files

Changed pytorch version requirement

parent 466d5930
No related branches found
No related tags found
No related merge requests found
......@@ -11,7 +11,7 @@ dependencies:
- pandas=1.1
- python==3.6.13
- pip
- pytorch=1.7
- pytorch==1.6.0
- torchvision=0.8
- tqdm=4.59
- scipy==1.1.0
......
Subproject commit fd00663b145998da06ef861ffafa6c99ac2c0a47
Subproject commit 2fbd6f876c34bfdbcbddc71cd73646e71bde5748
......@@ -12,7 +12,8 @@ setup(
"jinja2>=2.11",
"networkx>=2.5",
"onnx>=1.8.0",
"torch>=1.5",
# Starting from 1.7.0 PyTorch starts to do some weird optimizations.
"torch>=1.4,<=1.6",
"onnx-simplifier>=0.2.27",
],
)
......@@ -81,7 +81,7 @@ class ModelExporter:
"tune_labels_path": (self.weight_dir / self.tuneset_name[1]).as_posix(),
"conf_path": config_file.as_posix(),
"fifo_path_r": (output_dir / self.fifo_file_name_r).as_posix(),
"fifo_path_w": (output_dir / self.fifo_file_name_w).as_posix()
"fifo_path_w": (output_dir / self.fifo_file_name_w).as_posix(),
}
self.compile_args = ["-t", "tensor", "--conf-file", str(config_file)]
self.codegen = HpvmCodeGen(*args3, "tensor", self.path_params)
......@@ -161,7 +161,7 @@ class ModelExporter:
"knob_speedup": knob_speedup,
"op_knobs": op_knobs,
"baseline_knob": baseline_knob,
**self.path_params
**self.path_params,
},
f,
indent=2,
......@@ -290,7 +290,9 @@ class ModelExporter:
raise ValueError(f"Cannot accept model of type {type(model)}")
if opset is not None:
onnx_model = check_onnx_version(onnx_model, opset)
onnx_model, check = simplify(onnx_model)
onnx_model, check = simplify(
onnx_model, skip_fuse_bn=True, skipped_optimizers=["fuse_bn_into_conv"]
)
assert check, "Simplified ONNX model could not be validated"
return onnx.shape_inference.infer_shapes(onnx_model)
......@@ -318,17 +320,18 @@ def torch_to_onnx(
output_obj: Union[IO, PathLike],
opset_version: int = 10,
):
from torch.onnx import export
# Export the model (must be on CPU, some model only supports this)
torch.onnx.export(
export(
module_cpu.eval(),
model_args_cpu,
output_obj,
export_params=True, # store the trained parameter weights inside the model file
do_constant_folding=False,
opset_version=opset_version, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=["input"], # the model's input names
output_names=["output"], # the model's output names
strip_doc_string=False,
)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment