Skip to content
Snippets Groups Projects
Commit afa2ddf8 authored by Yifan Zhao's avatar Yifan Zhao
Browse files

Added dnn pytorch sources and simplest frontend test

parent 1475e91f
No related branches found
No related tags found
No related merge requests found
from .alexnet import AlexNet, AlexNet2, AlexNetImageNet
from .datasets import CIFAR, MNIST, ImageNet
from .lenet import LeNet
from .vgg16 import VGG16Cifar10, VGG16Cifar100
from .mobilenet import MobileNet
from .resnet import ResNet18, ResNet50
from typing import Callable, Optional
import torch
from torch.nn import Conv2d, MaxPool2d, Module, Sequential, Softmax
ActivT = Optional[Callable[[], Module]]
def make_conv_pool_activ(
in_channels: int,
out_channels: int,
kernel_size: int,
activation: ActivT = None,
pool_size: Optional[int] = None,
pool_stride: Optional[int] = None,
**conv_kwargs
):
layers = [Conv2d(in_channels, out_channels, kernel_size, **conv_kwargs)]
if pool_size is not None:
layers.append(MaxPool2d(pool_size, stride=pool_stride))
if activation:
layers.append(activation())
return layers
class Classifier(Module):
def __init__(
self, convs: Sequential, linears: Sequential, use_softmax: bool = False
):
super().__init__()
self.convs = convs
self.linears = linears
self.softmax = Softmax(1) if use_softmax else Sequential()
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
outputs = self.convs(inputs)
return self.softmax(self.linears(outputs.view(outputs.shape[0], -1)))
from torch.nn import Linear, ReLU, Sequential, Tanh
from ._container import Classifier, make_conv_pool_activ
class AlexNet(Classifier):
def __init__(self):
convs = Sequential(
*make_conv_pool_activ(3, 64, 11, Tanh, pool_size=2, padding=5),
*make_conv_pool_activ(64, 192, 5, Tanh, pool_size=2, padding=2),
*make_conv_pool_activ(192, 384, 3, Tanh, padding=1),
*make_conv_pool_activ(384, 256, 3, Tanh, padding=1),
*make_conv_pool_activ(256, 256, 3, Tanh, pool_size=2, padding=1)
)
linears = Sequential(Linear(4096, 10))
super().__init__(convs, linears)
class AlexNet2(Classifier):
def __init__(self):
convs = Sequential(
*make_conv_pool_activ(3, 32, 3, Tanh, padding=1),
*make_conv_pool_activ(32, 32, 3, Tanh, pool_size=2, padding=1),
*make_conv_pool_activ(32, 64, 3, Tanh, padding=1),
*make_conv_pool_activ(64, 64, 3, Tanh, pool_size=2, padding=1),
*make_conv_pool_activ(64, 128, 3, Tanh, padding=1),
*make_conv_pool_activ(128, 128, 3, Tanh, pool_size=2, padding=1)
)
linears = Sequential(Linear(2048, 10))
super().__init__(convs, linears)
class AlexNetImageNet(Classifier):
def __init__(self):
convs = Sequential(
*make_conv_pool_activ(
3, 64, 11, ReLU, padding=2, stride=4, pool_size=3, pool_stride=2
),
*make_conv_pool_activ(
64, 192, 5, ReLU, padding=2, pool_size=3, pool_stride=2
),
*make_conv_pool_activ(192, 384, 3, ReLU, padding=1),
*make_conv_pool_activ(384, 256, 3, ReLU, padding=1),
*make_conv_pool_activ(
256, 256, 3, ReLU, padding=1, pool_size=3, pool_stride=2
)
)
linears = Sequential(
Linear(9216, 4096), ReLU(), Linear(4096, 4096), ReLU(), Linear(4096, 1000),
)
super().__init__(convs, linears)
import logging
from pathlib import Path
from typing import Iterator, Tuple, Union
import numpy as np
import torch
from torch.utils.data.dataset import Dataset
RetT = Tuple[torch.Tensor, torch.Tensor]
msg_logger = logging.getLogger(__name__)
PathLike = Union[Path, str]
class SingleFileDataset(Dataset):
def __init__(self, inputs: torch.Tensor, outputs: torch.Tensor):
self.inputs, self.outputs = inputs, outputs
@classmethod
def from_file(cls, *args, **kwargs):
pass
@property
def sample_input(self):
inputs, outputs = next(iter(self))
return inputs
def __len__(self) -> int:
return len(self.inputs)
def __getitem__(self, idx) -> RetT:
return self.inputs[idx], self.outputs[idx]
def __iter__(self) -> Iterator[RetT]:
for i in range(len(self)):
yield self[i]
class DNNDataset(SingleFileDataset):
image_shape = None
label_ty = np.int32
@classmethod
def from_file(
cls,
input_file: PathLike,
labels_file: PathLike,
count: int = -1,
offset: int = 0,
):
# NOTE: assuming (N, *) ordering of inputs (such as NCHW, NHWC)
channel_size = np.prod(np.array(cls.image_shape))
inputs_count_byte = -1 if count == -1 else count * channel_size
inputs = read_tensor_from_file(
input_file,
-1,
*cls.image_shape,
count=inputs_count_byte,
offset=offset * channel_size,
)
labels = read_tensor_from_file(
labels_file,
-1,
read_ty=cls.label_ty,
cast_ty=np.long,
count=count,
offset=offset,
)
if inputs.shape[0] != labels.shape[0]:
raise ValueError("Input and output have different number of data points")
msg_logger.info(f"%d entries loaded from dataset.", inputs.shape[0])
return cls(inputs, labels)
class MNIST(DNNDataset):
image_shape = 1, 28, 28
class CIFAR(DNNDataset):
image_shape = 3, 32, 32
class ImageNet(DNNDataset):
image_shape = 3, 224, 224
def read_tensor_from_file(
filename: Union[str, Path],
*shape: int,
read_ty=np.float32,
cast_ty=np.float32,
count: int = -1,
offset: int = 0,
) -> torch.Tensor:
offset = offset * read_ty().itemsize
mmap = np.memmap(filename, dtype=read_ty, mode="r", offset=offset)
n_entries = min(mmap.shape[0], count) if count != -1 else mmap.shape[0]
np_array = mmap[:n_entries].reshape(shape).astype(cast_ty)
return torch.from_numpy(np_array).clone()
from torch.nn import Linear, Sequential, Tanh
from ._container import Classifier, make_conv_pool_activ
class LeNet(Classifier):
def __init__(self):
convs = Sequential(
*make_conv_pool_activ(1, 32, 5, Tanh, 2, padding=2),
*make_conv_pool_activ(32, 64, 5, Tanh, 2, padding=2)
)
linears = Sequential(Linear(7 * 7 * 64, 1024), Tanh(), Linear(1024, 10), Tanh())
super().__init__(convs, linears)
from torch.nn import AvgPool2d, BatchNorm2d, Conv2d, Linear, ReLU, Sequential
from ._container import Classifier, make_conv_pool_activ
def _make_seq(in_channels, out_channels, c_kernel_size, gc_stride, gc_kernel_size=3):
return Sequential(
*make_conv_pool_activ(
in_channels,
out_channels,
c_kernel_size,
bias=False,
padding=(c_kernel_size - 1) // 2,
),
BatchNorm2d(out_channels, eps=0.001),
ReLU(),
Conv2d(
out_channels,
out_channels,
gc_kernel_size,
bias=False,
stride=gc_stride,
padding=(gc_kernel_size - 1) // 2,
groups=out_channels,
),
BatchNorm2d(out_channels, eps=0.001),
ReLU()
)
class MobileNet(Classifier):
def __init__(self):
convs = Sequential(
_make_seq(3, 32, 3, 1),
_make_seq(32, 64, 1, 2),
_make_seq(64, 128, 1, 1),
_make_seq(128, 128, 1, 2),
_make_seq(128, 256, 1, 1),
_make_seq(256, 256, 1, 2),
_make_seq(256, 512, 1, 1),
_make_seq(512, 512, 1, 1),
_make_seq(512, 512, 1, 1),
_make_seq(512, 512, 1, 1),
_make_seq(512, 512, 1, 1),
_make_seq(512, 512, 1, 2),
_make_seq(512, 1024, 1, 1),
*make_conv_pool_activ(1024, 1024, 1, padding=0, bias=False),
BatchNorm2d(1024, eps=0.001),
ReLU(),
AvgPool2d(2)
)
linears = Sequential(Linear(1024, 10))
super().__init__(convs, linears)
from torch.nn import AvgPool2d, BatchNorm2d, Linear, Module, ReLU, Sequential
from ._container import Classifier, make_conv_pool_activ
class BasicBlock(Module):
def __init__(self, ins, outs, shortcut=False):
super().__init__()
stride = 2 if shortcut else 1
self.mainline = Sequential(
*make_conv_pool_activ(ins, outs, 3, ReLU, padding=1, stride=stride),
*make_conv_pool_activ(outs, outs, 3, padding=1)
)
self.relu1 = ReLU()
self.shortcut = (
Sequential(*make_conv_pool_activ(ins, outs, 1, stride=stride))
if shortcut
else Sequential()
)
def forward(self, input_):
return self.relu1(self.mainline(input_) + self.shortcut(input_))
class ResNet18(Classifier):
def __init__(self):
convs = Sequential(
*make_conv_pool_activ(3, 16, 3, ReLU, padding=1),
BasicBlock(16, 16),
BasicBlock(16, 16),
BasicBlock(16, 16),
BasicBlock(16, 32, True),
BasicBlock(32, 32),
BasicBlock(32, 32),
BasicBlock(32, 64, True),
BasicBlock(64, 64),
BasicBlock(64, 64),
AvgPool2d(8)
)
linears = Sequential(Linear(64, 10))
super().__init__(convs, linears)
class Bottleneck(Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.mainline = Sequential(
*make_conv_pool_activ(in_planes, planes, 1, stride=stride),
BatchNorm2d(planes, eps=0.001),
ReLU(),
*make_conv_pool_activ(planes, planes, 3, padding=1),
BatchNorm2d(planes, eps=0.001),
ReLU(),
*make_conv_pool_activ(planes, self.expansion * planes, 1),
BatchNorm2d(self.expansion * planes, eps=0.001)
)
self.relu1 = ReLU()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = Sequential(
*make_conv_pool_activ(
in_planes, self.expansion * planes, 1, stride=stride
),
BatchNorm2d(self.expansion * planes, eps=0.001)
)
else:
self.shortcut = Sequential()
def forward(self, input_):
return self.relu1(self.mainline(input_) + self.shortcut(input_))
class ResNet50(Classifier):
def __init__(self):
convs = Sequential(
*make_conv_pool_activ(
3, 64, 7, ReLU, pool_size=3, pool_stride=2, padding=3, stride=2
),
BatchNorm2d(64, eps=0.001),
Bottleneck(64, 64),
Bottleneck(256, 64),
Bottleneck(256, 64),
Bottleneck(256, 128, stride=2),
Bottleneck(512, 128),
Bottleneck(512, 128),
Bottleneck(512, 128),
Bottleneck(512, 256, stride=2),
Bottleneck(1024, 256),
Bottleneck(1024, 256),
Bottleneck(1024, 256),
Bottleneck(1024, 256),
Bottleneck(1024, 256),
Bottleneck(1024, 512, stride=2),
Bottleneck(2048, 512),
Bottleneck(2048, 512),
AvgPool2d(7)
)
linears = Sequential(Linear(2048, 1000))
super().__init__(convs, linears)
from typing import Iterable
from torch.nn import Linear, ReLU, Sequential
from ._container import Classifier, make_conv_pool_activ
class _VGG16(Classifier):
def __init__(self, linear_inouts: Iterable[int]):
convs = Sequential(
*make_conv_pool_activ(3, 64, 3, ReLU, padding=1),
*make_conv_pool_activ(64, 64, 3, ReLU, 2, padding=1),
*make_conv_pool_activ(64, 128, 3, ReLU, padding=1),
*make_conv_pool_activ(128, 128, 3, ReLU, 2, padding=1),
*make_conv_pool_activ(128, 256, 3, ReLU, padding=1),
*make_conv_pool_activ(256, 256, 3, ReLU, padding=1),
*make_conv_pool_activ(256, 256, 3, ReLU, 2, padding=1),
*make_conv_pool_activ(256, 512, 3, ReLU, padding=1),
*make_conv_pool_activ(512, 512, 3, ReLU, padding=1),
*make_conv_pool_activ(512, 512, 3, ReLU, 2, padding=1),
*make_conv_pool_activ(512, 512, 3, ReLU, padding=1),
*make_conv_pool_activ(512, 512, 3, ReLU, padding=1),
*make_conv_pool_activ(512, 512, 3, ReLU, 2, padding=1)
)
linear_layers = [
Linear(in_, out) for in_, out in zip(linear_inouts, linear_inouts[1:])
]
linear_relus = [ReLU() for _ in range(2 * len(linear_layers) - 1)]
linear_relus[::2] = linear_layers
linears = Sequential(*linear_relus)
super().__init__(convs, linears)
class VGG16Cifar10(_VGG16):
def __init__(self):
super().__init__([512, 512, 10])
class VGG16Cifar100(_VGG16):
def __init__(self):
super().__init__([512, 512, 100])
from torch2hpvm import compile_torch_module
from dnn import AlexNet
compile_torch_module(AlexNet(), (1, 3, 32, 32), "/tmp/alexnet", True)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment