Skip to content
Snippets Groups Projects
Commit b18b5f12 authored by Yifan Zhao's avatar Yifan Zhao
Browse files

Finished generating model metadata

parent 89a1cc7c
No related branches found
No related tags found
No related merge requests found
......@@ -2,206 +2,203 @@
{
"name": "11",
"speedup": 1.0,
"applies_to": [
"Conv2D",
"MatMul"
]
"applies_to": null
},
{
"name": "12",
"speedup": 1.5,
"applies_to": [
"Conv2D",
"MatMul"
"convolution",
"linear"
]
},
{
"name": "151",
"speedup": 3.0,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "152",
"speedup": 3.0,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "153",
"speedup": 3.0,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "154",
"speedup": 3.0,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "155",
"speedup": 2.25,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "156",
"speedup": 2.25,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "157",
"speedup": 2.25,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "158",
"speedup": 2.25,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "159",
"speedup": 2.25,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "160",
"speedup": 2.25,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "161",
"speedup": 2.0,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "162",
"speedup": 2.0,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "163",
"speedup": 2.0,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "164",
"speedup": 2.0,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "165",
"speedup": 2.0,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "166",
"speedup": 2.0,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "167",
"speedup": 2.0,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "168",
"speedup": 2.0,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "261",
"speedup": 3.0,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "262",
"speedup": 3.0,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "263",
"speedup": 2.25,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "264",
"speedup": 2.25,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "265",
"speedup": 2.25,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "266",
"speedup": 2.0,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "267",
"speedup": 2.0,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "268",
"speedup": 2.0,
"applies_to": [
"Conv2D"
"convolution"
]
},
{
"name": "269",
"speedup": 2.0,
"applies_to": [
"Conv2D"
"convolution"
]
}
]
import os
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import IO, NamedTuple, Optional, Sequence, Tuple, Union
from typing import Dict, IO, List, NamedTuple, Optional, Sequence, Tuple, Union
import onnx
import torch
......@@ -33,6 +33,7 @@ class ModelExporter:
testset_name = "test_input.bin", "test_labels.bin"
weight_dir_name = "weights"
source_file_name = "hpvm_c.cpp"
metadata_file_name = "ops.json"
def __init__(
self,
......@@ -72,34 +73,64 @@ class ModelExporter:
def export_datasets(self):
input_, labels = self.tuneset_name
self._dump_dataset(self.tune_dataset, self.weight_dir / input_, self.weight_dir / labels)
self._dump_dataset(
self.tune_dataset, self.weight_dir / input_, self.weight_dir / labels
)
input_, labels = self.testset_name
self._dump_dataset(self.test_dataset, self.weight_dir / input_, self.weight_dir / labels)
self._dump_dataset(
self.test_dataset, self.weight_dir / input_, self.weight_dir / labels
)
def export_metadata(
self,
output: PathLike = None, approx_knobs_file: PathLike = def_approx_knobs_file
self, output: PathLike, approx_knobs_file: PathLike = def_approx_knobs_file
):
import json
from collections import defaultdict
with Path(approx_knobs_file).open() as f:
knobs = json.load(f)
ty_knobs = defaultdict(list)
KnobInfoT = Tuple[str, float]
ty_knobs: Dict[str, List[KnobInfoT]] = defaultdict(list)
default_knobs: List[KnobInfoT] = []
for k in knobs:
for ty in k.pop("applies_to"):
ty_knobs[ty].append((k["name"], k["speedup"]))
knobs_used = set()
applies_to = k.pop("applies_to")
k = k["name"], k["speedup"]
if applies_to is None:
default_knobs.append(k)
continue
for ty in applies_to:
ty_knobs[ty].append(k)
idx = 0
op_cost: Dict[str, int] = {}
op_knobs: Dict[str, List[str]] = {}
knob_speedup: Dict[str, float] = {}
for node in self.dfg.traverse_order:
knobs = ty_knobs.get(node.op_type, [])
flops = node.get_flops()
knobs_used.update(knobs)
print(f"{node.name} ({node.op_type}) -> {knobs}, {flops}")
if not node.hpvm_op_type:
continue
hpvm_op_name = f"{node.hpvm_op_type}_{idx}"
type_knobs = ty_knobs.get(node.hpvm_op_type, [])
this_op_knobs = type_knobs + default_knobs
knobs_speedup = dict(this_op_knobs)
op_cost[hpvm_op_name] = int(node.get_flops()) # May get np.int64
op_knobs[hpvm_op_name] = list(knobs_speedup.keys())
knob_speedup.update(knobs_speedup)
idx += 1
with Path(output).open("w") as f:
json.dump(
{
"op_cost": op_cost,
"knob_speedup": knob_speedup,
"op_knobs": op_knobs,
},
f,
indent=2,
)
def export_all(self, output: PathLike = None, batch_size: Optional[int] = None):
default_codefile = self.output_dir / self.source_file_name
self.export_source_code(output or default_codefile, batch_size)
self.export_metadata()
default_metafile = self.output_dir / self.metadata_file_name
self.export_metadata(default_metafile)
self.export_weights()
self.export_datasets()
......@@ -121,7 +152,9 @@ class ModelExporter:
inputs.tofile(labels_filename)
@classmethod
def _check_datasets(cls, tune_dataset: DatasetTy, test_dataset: DatasetTy) -> Tuple[int, int, int, int]:
def _check_datasets(
cls, tune_dataset: DatasetTy, test_dataset: DatasetTy
) -> Tuple[int, int, int, int]:
tune_shape = cls._check_dataset_get_shape(tune_dataset)
test_shape = cls._check_dataset_get_shape(test_dataset)
if tune_shape != test_shape:
......@@ -137,9 +170,14 @@ class ModelExporter:
if isinstance(dataset, Dataset):
size = len(dataset)
sample = dataset[0]
if not isinstance(sample, (np.ndarray, torch.Tensor)) or len(sample.shape) != 4:
raise ValueError("Dataset must be a 4D tensor due to backend limitation")
return size, *sample.shape
if (
not isinstance(sample, (np.ndarray, torch.Tensor))
or len(sample.shape) != 4
):
raise ValueError(
"Dataset must be a 4D tensor due to backend limitation"
)
return [size, *sample.shape]
if not isinstance(dataset, BinDataset):
raise TypeError("Only BinDataset or PyTorch Dataset are supported")
input_file = Path(dataset.input_file)
......
......@@ -15,6 +15,7 @@ class DFGNode(abc.ABC):
"""
op_type = ""
hpvm_op_type = ""
def __init__(
self, name: str, input_shapes: Sequence[ShapeT], output_shape: ShapeT, **kwargs
......@@ -114,6 +115,7 @@ class WeightTensor(TensorNode):
class Conv2DNode(DFGNode):
op_type = "Conv2D"
hpvm_op_type = "convolution"
def __init__(
self,
......@@ -205,15 +207,18 @@ class _Pool2DNode(DFGNode, abc.ABC):
class MaxPool2DNode(_Pool2DNode):
pool_type = "0"
op_type = "MaxPool2D"
hpvm_op_type = "maxpool"
class AveragePool2DNode(_Pool2DNode):
pool_type = "1"
op_type = "AveragePool2D"
hpvm_op_type = "avgpool"
class BiasAddNode(DFGNode):
op_type = "BiasAdd"
hpvm_op_type = "add"
def codegen(self):
return "tensorAdd", []
......@@ -224,6 +229,7 @@ class BiasAddNode(DFGNode):
class MatMulNode(DFGNode):
op_type = "MatMul"
hpvm_op_type = "linear"
def codegen(self):
return "tensorGemmGPU", []
......@@ -265,6 +271,7 @@ class MatMulNode(DFGNode):
class SoftMaxNode(DFGNode):
op_type = "SoftMax"
hpvm_op_type = "softmax"
def codegen(self):
return "tensorSoftmax", []
......@@ -275,6 +282,7 @@ class SoftMaxNode(DFGNode):
class AddNode(DFGNode):
op_type = "Add"
hpvm_op_type = "add"
def codegen(self):
return "tensorAdd", []
......@@ -285,6 +293,7 @@ class AddNode(DFGNode):
class ReluNode(DFGNode):
op_type = "ReLU"
hpvm_op_type = "relu"
def codegen(self):
return "tensorRelu", []
......@@ -295,6 +304,7 @@ class ReluNode(DFGNode):
class TanhNode(DFGNode):
op_type = "Tanh"
hpvm_op_type = "tanh"
def codegen(self):
return "tensorTanh", []
......@@ -304,7 +314,8 @@ class TanhNode(DFGNode):
class BatchNormalizationNode(DFGNode):
op_type = "BN"
op_type = "BatchNorm"
hpvm_op_type = "batchnorm"
def __init__(
self,
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment