Skip to content
Snippets Groups Projects
Commit b3135926 authored by Yifan Zhao's avatar Yifan Zhao
Browse files

Moved miniera model files together

parent 060bc6cf
No related branches found
No related tags found
No related merge requests found
...@@ -10,7 +10,8 @@ from torch2hpvm import BinDataset, ModelExporter ...@@ -10,7 +10,8 @@ from torch2hpvm import BinDataset, ModelExporter
self_folder = Path(__file__).parent.absolute() self_folder = Path(__file__).parent.absolute()
site.addsitedir(self_folder.as_posix()) site.addsitedir(self_folder.as_posix())
from torch_dnn import CIFAR, MiniERA, quantize from torch_dnn import quantize
from torch_dnn.miniera import CIFAR, MiniERA
# Consts (don't change) # Consts (don't change)
BUFFER_NAME = "hpvm-mod.nvdla" BUFFER_NAME = "hpvm-mod.nvdla"
......
from .datasets import CIFAR
from .miniera import MiniERA
from .quantizer import quantize from .quantizer import quantize
from .dataset import CIFAR
from .model import MiniERA
...@@ -7,39 +7,16 @@ import torch ...@@ -7,39 +7,16 @@ import torch
from torch.utils.data.dataset import Dataset from torch.utils.data.dataset import Dataset
RetT = Tuple[torch.Tensor, torch.Tensor] RetT = Tuple[torch.Tensor, torch.Tensor]
msg_logger = logging.getLogger(__name__)
PathLike = Union[Path, str] PathLike = Union[Path, str]
msg_logger = logging.getLogger(__name__)
class SingleFileDataset(Dataset): class SingleFileDataset(Dataset):
image_shape = None
def __init__(self, inputs: torch.Tensor, outputs: torch.Tensor): def __init__(self, inputs: torch.Tensor, outputs: torch.Tensor):
self.inputs, self.outputs = inputs, outputs self.inputs, self.outputs = inputs, outputs
@classmethod
def from_file(cls, *args, **kwargs):
pass
@property
def sample_input(self):
inputs, outputs = next(iter(self))
return inputs
def __len__(self) -> int:
return len(self.inputs)
def __getitem__(self, idx) -> RetT:
return self.inputs[idx], self.outputs[idx]
def __iter__(self) -> Iterator[RetT]:
for i in range(len(self)):
yield self[i]
class DNNDataset(SingleFileDataset):
image_shape = None
label_ty = np.int32
@classmethod @classmethod
def from_file( def from_file(
cls, cls,
...@@ -61,8 +38,8 @@ class DNNDataset(SingleFileDataset): ...@@ -61,8 +38,8 @@ class DNNDataset(SingleFileDataset):
labels = read_tensor_from_file( labels = read_tensor_from_file(
labels_file, labels_file,
-1, -1,
read_ty=cls.label_ty, read_ty=np.int32,
cast_ty=np.long, cast_ty=np.int64,
count=count, count=count,
offset=offset, offset=offset,
) )
...@@ -71,16 +48,31 @@ class DNNDataset(SingleFileDataset): ...@@ -71,16 +48,31 @@ class DNNDataset(SingleFileDataset):
msg_logger.info(f"%d entries loaded from dataset.", inputs.shape[0]) msg_logger.info(f"%d entries loaded from dataset.", inputs.shape[0])
return cls(inputs, labels) return cls(inputs, labels)
@property
def sample_input(self):
inputs, outputs = next(iter(self))
return inputs
def __len__(self) -> int:
return len(self.inputs)
def __getitem__(self, idx) -> RetT:
return self.inputs[idx], self.outputs[idx]
def __iter__(self) -> Iterator[RetT]:
for i in range(len(self)):
yield self[i]
class MNIST(DNNDataset): class MNIST(SingleFileDataset):
image_shape = 1, 28, 28 image_shape = 1, 28, 28
class CIFAR(DNNDataset): class CIFAR(SingleFileDataset):
image_shape = 3, 32, 32 image_shape = 3, 32, 32
class ImageNet(DNNDataset): class ImageNet(SingleFileDataset):
image_shape = 3, 224, 224 image_shape = 3, 224, 224
......
...@@ -36,7 +36,9 @@ class MiniERA(Module): ...@@ -36,7 +36,9 @@ class MiniERA(Module):
for conv in self.convs: for conv in self.convs:
if not isinstance(conv, Conv2d): if not isinstance(conv, Conv2d):
continue continue
weight_np = np.fromfile(prefix / f"conv2d_{count+1}_w.bin", dtype=np.float32) weight_np = np.fromfile(
prefix / f"conv2d_{count+1}_w.bin", dtype=np.float32
)
bias_np = np.fromfile(prefix / f"conv2d_{count+1}_b.bin", dtype=np.float32) bias_np = np.fromfile(prefix / f"conv2d_{count+1}_b.bin", dtype=np.float32)
conv.weight.data = torch.tensor(weight_np).reshape(conv.weight.shape) conv.weight.data = torch.tensor(weight_np).reshape(conv.weight.shape)
conv.bias.data = torch.tensor(bias_np).reshape(conv.bias.shape) conv.bias.data = torch.tensor(bias_np).reshape(conv.bias.shape)
......
import os import os
from copy import deepcopy from copy import deepcopy
from pathlib import Path from pathlib import Path
from typing import Union
from shutil import move from shutil import move
from typing import Union
import distiller import distiller
import torch import torch
from torch.utils.data.dataset import Dataset
import yaml import yaml
from distiller.data_loggers import collect_quant_stats from distiller.data_loggers import collect_quant_stats
from distiller.quantization import PostTrainLinearQuantizer from distiller.quantization import PostTrainLinearQuantizer
from torch import nn from torch import nn
from torch.utils.data import DataLoader from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
PathLike = Union[str, Path] PathLike = Union[str, Path]
STATS_FILENAME = "acts_quantization_stats.yaml" STATS_FILENAME = "acts_quantization_stats.yaml"
...@@ -40,7 +40,7 @@ def quantize( ...@@ -40,7 +40,7 @@ def quantize(
strat: str = "NONE", strat: str = "NONE",
working_dir: PathLike = ".", working_dir: PathLike = ".",
output_name: str = "calib.txt", output_name: str = "calib.txt",
eval_batchsize: int = 128 eval_batchsize: int = 128,
): ):
# possible quant strats ['NONE', 'AVG', 'N_STD', 'GAUSS', 'LAPLACE'] # possible quant strats ['NONE', 'AVG', 'N_STD', 'GAUSS', 'LAPLACE']
print("Quantizing...") print("Quantizing...")
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment