Skip to content
Snippets Groups Projects
Commit d425031b authored by Yifan Zhao's avatar Yifan Zhao
Browse files

Added all pretrained models back and adjusted model parameter path

parent 0a40ff88
No related branches found
No related tags found
No related merge requests found
...@@ -26,7 +26,7 @@ doc/build ...@@ -26,7 +26,7 @@ doc/build
# Custom # Custom
.idea/ .idea/
.vscode/ .vscode/
model_data model_params
results/ results/
tuner_results tuner_results
tuner_results/ tuner_results/
......
import site
from pathlib import Path
import torch
from torch.nn.modules.module import Module
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Subset
site.addsitedir(Path(__file__).absolute().parent.parent)
import model_zoo as net
from predtuner import TorchApp, accuracy, get_knobs_from_file
def load_from_default_path(cls, prefix: str):
return cls.from_file(f"{prefix}/input.bin", f"{prefix}/labels.bin")
mnist = load_from_default_path(net.MNIST, "model_data/mnist")
cifar10 = load_from_default_path(net.CIFAR, "model_data/cifar10")
cifar100 = load_from_default_path(net.CIFAR, "model_data/cifar100")
imagenet = load_from_default_path(net.ImageNet, "model_data/imagenet")
networks_in_folder = {
"lenet_mnist": (net.LeNet, mnist),
"alexnet_cifar10": (net.AlexNet, cifar10),
"alexnet2_cifar10": (net.AlexNet2, cifar10),
"vgg16_cifar10": (net.VGG16Cifar10, cifar10),
"vgg16_cifar100": (net.VGG16Cifar100, cifar100),
}
for name, (cls, dataset) in networks_in_folder.items():
network: Module = cls()
network.load_state_dict(torch.load(f"model_data/{name}.pth.tar"))
d1, d2 = DataLoader(Subset(dataset, range(5000, 10000)), 1), DataLoader(dataset, 1)
app = TorchApp("", network, d1, d2, get_knobs_from_file(), accuracy)
qos, _ = app.measure_qos_perf({}, False)
print(f"{name} -> {qos}")
from .alexnet import AlexNet, AlexNet2 from .alexnet import AlexNet, AlexNet2, AlexNetImageNet
from .datasets import CIFAR, MNIST, ImageNet from .datasets import CIFAR, MNIST, ImageNet
from .lenet import LeNet from .lenet import LeNet
from .vgg16 import VGG16Cifar10, VGG16Cifar100 from .mobilenet import MobileNet
from .resnet import ResNet18, ResNet50
from .vgg16 import VGG16Cifar10, VGG16Cifar100, VGG16ImageNet
...@@ -28,3 +28,28 @@ class AlexNet2(Classifier): ...@@ -28,3 +28,28 @@ class AlexNet2(Classifier):
) )
linears = Sequential(Linear(2048, 10)) linears = Sequential(Linear(2048, 10))
super().__init__(convs, linears) super().__init__(convs, linears)
class AlexNetImageNet(Classifier):
def __init__(self):
convs = Sequential(
*make_conv_pool_activ(
3, 64, 11, ReLU, padding=2, stride=4, pool_size=3, pool_stride=2
),
*make_conv_pool_activ(
64, 192, 5, ReLU, padding=2, pool_size=3, pool_stride=2
),
*make_conv_pool_activ(192, 384, 3, ReLU, padding=1),
*make_conv_pool_activ(384, 256, 3, ReLU, padding=1),
*make_conv_pool_activ(
256, 256, 3, ReLU, padding=1, pool_size=3, pool_stride=2
)
)
linears = Sequential(
Linear(9216, 4096),
ReLU(),
Linear(4096, 4096),
ReLU(),
Linear(4096, 1000),
)
super().__init__(convs, linears)
...@@ -9,8 +9,5 @@ class LeNet(Classifier): ...@@ -9,8 +9,5 @@ class LeNet(Classifier):
*make_conv_pool_activ(1, 32, 5, Tanh, 2, padding=2), *make_conv_pool_activ(1, 32, 5, Tanh, 2, padding=2),
*make_conv_pool_activ(32, 64, 5, Tanh, 2, padding=2) *make_conv_pool_activ(32, 64, 5, Tanh, 2, padding=2)
) )
linears = Sequential( linears = Sequential(Linear(7 * 7 * 64, 1024), Tanh(), Linear(1024, 10), Tanh())
Linear(7 * 7 * 64, 1024), Tanh(),
Linear(1024, 10), Tanh()
)
super().__init__(convs, linears) super().__init__(convs, linears)
from torch.nn import AvgPool2d, BatchNorm2d, Conv2d, Linear, ReLU, Sequential
from ._container import Classifier, make_conv_pool_activ
def _make_seq(in_channels, out_channels, c_kernel_size, gc_stride, gc_kernel_size=3):
return Sequential(
*make_conv_pool_activ(
in_channels,
out_channels,
c_kernel_size,
bias=False,
padding=(c_kernel_size - 1) // 2,
),
BatchNorm2d(out_channels, eps=0.001),
ReLU(),
Conv2d(
out_channels,
out_channels,
gc_kernel_size,
bias=False,
stride=gc_stride,
padding=(gc_kernel_size - 1) // 2,
groups=out_channels,
),
BatchNorm2d(out_channels, eps=0.001),
ReLU()
)
class MobileNet(Classifier):
def __init__(self):
convs = Sequential(
_make_seq(3, 32, 3, 1),
_make_seq(32, 64, 1, 2),
_make_seq(64, 128, 1, 1),
_make_seq(128, 128, 1, 2),
_make_seq(128, 256, 1, 1),
_make_seq(256, 256, 1, 2),
_make_seq(256, 512, 1, 1),
_make_seq(512, 512, 1, 1),
_make_seq(512, 512, 1, 1),
_make_seq(512, 512, 1, 1),
_make_seq(512, 512, 1, 1),
_make_seq(512, 512, 1, 2),
_make_seq(512, 1024, 1, 1),
*make_conv_pool_activ(1024, 1024, 1, padding=0, bias=False),
BatchNorm2d(1024, eps=0.001),
ReLU(),
AvgPool2d(2)
)
linears = Sequential(Linear(1024, 10))
super().__init__(convs, linears)
from torch.nn import AvgPool2d, BatchNorm2d, Linear, Module, ReLU, Sequential
from ._container import Classifier, make_conv_pool_activ
class BasicBlock(Module):
def __init__(self, ins, outs, shortcut=False):
super().__init__()
stride = 2 if shortcut else 1
self.mainline = Sequential(
*make_conv_pool_activ(ins, outs, 3, ReLU, padding=1, stride=stride),
*make_conv_pool_activ(outs, outs, 3, padding=1)
)
self.relu1 = ReLU()
self.shortcut = (
Sequential(*make_conv_pool_activ(ins, outs, 1, stride=stride))
if shortcut
else Sequential()
)
def forward(self, input_):
return self.relu1(self.mainline(input_) + self.shortcut(input_))
class ResNet18(Classifier):
def __init__(self):
convs = Sequential(
*make_conv_pool_activ(3, 16, 3, ReLU, padding=1),
BasicBlock(16, 16),
BasicBlock(16, 16),
BasicBlock(16, 16),
BasicBlock(16, 32, True),
BasicBlock(32, 32),
BasicBlock(32, 32),
BasicBlock(32, 64, True),
BasicBlock(64, 64),
BasicBlock(64, 64),
AvgPool2d(8)
)
linears = Sequential(Linear(64, 10))
super().__init__(convs, linears)
class Bottleneck(Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.mainline = Sequential(
*make_conv_pool_activ(in_planes, planes, 1, stride=stride),
BatchNorm2d(planes, eps=0.001),
ReLU(),
*make_conv_pool_activ(planes, planes, 3, padding=1),
BatchNorm2d(planes, eps=0.001),
ReLU(),
*make_conv_pool_activ(planes, self.expansion * planes, 1),
BatchNorm2d(self.expansion * planes, eps=0.001)
)
self.relu1 = ReLU()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = Sequential(
*make_conv_pool_activ(
in_planes, self.expansion * planes, 1, stride=stride
),
BatchNorm2d(self.expansion * planes, eps=0.001)
)
else:
self.shortcut = Sequential()
def forward(self, input_):
return self.relu1(self.mainline(input_) + self.shortcut(input_))
class ResNet50(Classifier):
def __init__(self):
convs = Sequential(
*make_conv_pool_activ(
3, 64, 7, ReLU, pool_size=3, pool_stride=2, padding=3, stride=2
),
BatchNorm2d(64, eps=0.001),
Bottleneck(64, 64),
Bottleneck(256, 64),
Bottleneck(256, 64),
Bottleneck(256, 128, stride=2),
Bottleneck(512, 128),
Bottleneck(512, 128),
Bottleneck(512, 128),
Bottleneck(512, 256, stride=2),
Bottleneck(1024, 256),
Bottleneck(1024, 256),
Bottleneck(1024, 256),
Bottleneck(1024, 256),
Bottleneck(1024, 256),
Bottleneck(1024, 512, stride=2),
Bottleneck(2048, 512),
Bottleneck(2048, 512),
AvgPool2d(7)
)
linears = Sequential(Linear(2048, 1000))
super().__init__(convs, linears)
...@@ -22,7 +22,9 @@ class _VGG16(Classifier): ...@@ -22,7 +22,9 @@ class _VGG16(Classifier):
*make_conv_pool_activ(512, 512, 3, ReLU, padding=1), *make_conv_pool_activ(512, 512, 3, ReLU, padding=1),
*make_conv_pool_activ(512, 512, 3, ReLU, 2, padding=1) *make_conv_pool_activ(512, 512, 3, ReLU, 2, padding=1)
) )
linear_layers = [Linear(in_, out) for in_, out in zip(linear_inouts, linear_inouts[1:])] linear_layers = [
Linear(in_, out) for in_, out in zip(linear_inouts, linear_inouts[1:])
]
linear_relus = [ReLU() for _ in range(2 * len(linear_layers) - 1)] linear_relus = [ReLU() for _ in range(2 * len(linear_layers) - 1)]
linear_relus[::2] = linear_layers linear_relus[::2] = linear_layers
linears = Sequential(*linear_relus) linears = Sequential(*linear_relus)
...@@ -37,3 +39,8 @@ class VGG16Cifar10(_VGG16): ...@@ -37,3 +39,8 @@ class VGG16Cifar10(_VGG16):
class VGG16Cifar100(_VGG16): class VGG16Cifar100(_VGG16):
def __init__(self): def __init__(self):
super().__init__([512, 512, 100]) super().__init__([512, 512, 100])
class VGG16ImageNet(_VGG16):
def __init__(self):
super().__init__([25088, 4096, 4096, 1000])
...@@ -6,21 +6,31 @@ from torch.utils.data.dataloader import DataLoader ...@@ -6,21 +6,31 @@ from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Subset from torch.utils.data.dataset import Subset
site.addsitedir(Path(__file__).absolute().parent.parent) site.addsitedir(Path(__file__).absolute().parent.parent)
from model_zoo import CIFAR, VGG16Cifar10 from predtuner.model_zoo import CIFAR, VGG16Cifar10
from predtuner import TorchApp, accuracy, config_pylogger, get_knobs_from_file from predtuner import TorchApp, accuracy, config_pylogger, get_knobs_from_file
msg_logger = config_pylogger(output_dir="tuner_results/logs", verbose=True) msg_logger = config_pylogger(output_dir="tuner_results/logs", verbose=True)
dataset = CIFAR.from_file( tune_set = CIFAR.from_file(
"model_data/cifar10/input.bin", "model_data/cifar10/labels.bin" "model_params/vgg16_cifar10/tune_input.bin",
"model_params/vgg16_cifar10/tune_labels.bin",
) )
tune_loader = DataLoader(Subset(dataset, range(500)), batch_size=500) tune_loader = DataLoader(Subset(tune_set, range(500)), batch_size=500)
test_loader = DataLoader(Subset(dataset, range(5000, 5500)), batch_size=500) test_set = CIFAR.from_file(
"model_params/vgg16_cifar10/test_input.bin",
"model_params/vgg16_cifar10/test_labels.bin",
)
test_loader = DataLoader(Subset(test_set, range(500)), batch_size=500)
module = VGG16Cifar10() module = VGG16Cifar10()
module.load_state_dict(torch.load("model_data/vgg16_cifar10.pth.tar")) module.load_state_dict(torch.load("model_params/vgg16_cifar10.pth.tar"))
app = TorchApp( app = TorchApp(
"TestTorchApp", module, tune_loader, test_loader, get_knobs_from_file(), accuracy, "TestTorchApp",
model_storage_folder="tuner_results/vgg16_cifar10" module,
tune_loader,
test_loader,
get_knobs_from_file(),
accuracy,
model_storage_folder="tuner_results/vgg16_cifar10",
) )
baseline, _ = app.measure_qos_perf({}, False) baseline, _ = app.measure_qos_perf({}, False)
tuner = app.get_tuner() tuner = app.get_tuner()
......
import unittest
import predtuner.model_zoo as net
import torch
from predtuner import TorchApp, accuracy, config_pylogger, get_knobs_from_file
from torch.nn import Module
from torch.utils.data.dataloader import DataLoader
msg_logger = config_pylogger(output_dir="/tmp", verbose=True)
class TestModelZooAcc(unittest.TestCase):
networks = {
"lenet_mnist": (net.LeNet, net.MNIST, 2000, 99.65),
"alexnet_cifar10": (net.AlexNet, net.CIFAR, 500, 78.78),
"alexnet2_cifar10": (net.AlexNet2, net.CIFAR, 500, 84.75),
"vgg16_cifar10": (net.VGG16Cifar10, net.CIFAR, 250, 89.22),
"vgg16_cifar100": (net.VGG16Cifar100, net.CIFAR, 250, 68.42),
"resnet18_cifar10": (net.ResNet18, net.CIFAR, 250, 89.41),
"mobilenet": (net.MobileNet, net.CIFAR, 250, 84.9),
"alexnet_imagenet": (net.AlexNetImageNet, net.ImageNet, 20, 55.86),
# "resnet50_imagenet": (net.ResNet50, net.ImageNet, 10, 71.72),
"vgg16_imagenet": (net.VGG16ImageNet, net.ImageNet, 5, 68.82),
}
def test_all_accuracy(self):
for name, netinfo in self.networks.items():
model_cls, dataset_cls, batchsize, target_acc = netinfo
network: Module = model_cls()
network.load_state_dict(torch.load(f"model_params/{name}.pth.tar"))
dataset = dataset_cls.from_file(
f"model_params/{name}/tune_input.bin",
f"model_params/{name}/tune_labels.bin",
)
tune = DataLoader(dataset, batchsize)
app = TorchApp("", network, tune, tune, get_knobs_from_file(), accuracy)
qos, _ = app.measure_qos_perf({}, False)
self.assertAlmostEqual(qos, target_acc)
...@@ -14,11 +14,12 @@ class TorchAppSetUp(unittest.TestCase): ...@@ -14,11 +14,12 @@ class TorchAppSetUp(unittest.TestCase):
@classmethod @classmethod
def setUpClass(cls): def setUpClass(cls):
dataset = CIFAR.from_file( dataset = CIFAR.from_file(
"model_data/cifar10/input.bin", "model_data/cifar10/labels.bin" "model_params/vgg16_cifar10/tune_input.bin",
"model_params/vgg16_cifar10/tune_labels.bin",
) )
cls.dataset = Subset(dataset, range(100)) cls.dataset = Subset(dataset, range(100))
cls.module = VGG16Cifar10() cls.module = VGG16Cifar10()
cls.module.load_state_dict(torch.load("model_data/vgg16_cifar10.pth.tar")) cls.module.load_state_dict(torch.load("model_params/vgg16_cifar10.pth.tar"))
cls.app = TorchApp( cls.app = TorchApp(
"TestTorchApp", "TestTorchApp",
cls.module, cls.module,
...@@ -47,7 +48,7 @@ class TestTorchAppTuning(TorchAppSetUp): ...@@ -47,7 +48,7 @@ class TestTorchAppTuning(TorchAppSetUp):
def test_baseline_qos(self): def test_baseline_qos(self):
qos, _ = self.app.measure_qos_perf({}, False) qos, _ = self.app.measure_qos_perf({}, False)
self.assertAlmostEqual(qos, 88.0) self.assertAlmostEqual(qos, 93.0)
def test_tuning_relative_thres(self): def test_tuning_relative_thres(self):
baseline, _ = self.app.measure_qos_perf({}, False) baseline, _ = self.app.measure_qos_perf({}, False)
...@@ -138,7 +139,7 @@ class TestModelSaving(TorchAppSetUp): ...@@ -138,7 +139,7 @@ class TestModelSaving(TorchAppSetUp):
DataLoader(cls.dataset, batch_size=500), DataLoader(cls.dataset, batch_size=500),
get_knobs_from_file(), get_knobs_from_file(),
accuracy, accuracy,
model_storage_folder=cls.model_path model_storage_folder=cls.model_path,
) )
def test_loading_p1(self): def test_loading_p1(self):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment