From d425031b435e5bd1fff75bee0d6d22a21054977b Mon Sep 17 00:00:00 2001 From: Yifan Zhao <yifanz16@illinois.edu> Date: Thu, 28 Jan 2021 20:11:56 -0600 Subject: [PATCH] Added all pretrained models back and adjusted model parameter path --- .gitignore | 2 +- bin/show_baseline_acc.py | 37 ------------ predtuner/model_zoo/__init__.py | 6 +- predtuner/model_zoo/alexnet.py | 25 ++++++++ predtuner/model_zoo/lenet.py | 5 +- predtuner/model_zoo/mobilenet.py | 53 ++++++++++++++++ predtuner/model_zoo/resnet.py | 100 +++++++++++++++++++++++++++++++ predtuner/model_zoo/vgg16.py | 9 ++- test/integrated_tuning.py | 26 +++++--- test/test_model_zoo_acc.py | 38 ++++++++++++ test/test_torchapp.py | 9 +-- 11 files changed, 253 insertions(+), 57 deletions(-) delete mode 100644 bin/show_baseline_acc.py create mode 100644 predtuner/model_zoo/mobilenet.py create mode 100644 predtuner/model_zoo/resnet.py create mode 100644 test/test_model_zoo_acc.py diff --git a/.gitignore b/.gitignore index 72f0845..bfa59bf 100644 --- a/.gitignore +++ b/.gitignore @@ -26,7 +26,7 @@ doc/build # Custom .idea/ .vscode/ -model_data +model_params results/ tuner_results tuner_results/ diff --git a/bin/show_baseline_acc.py b/bin/show_baseline_acc.py deleted file mode 100644 index 08a90fd..0000000 --- a/bin/show_baseline_acc.py +++ /dev/null @@ -1,37 +0,0 @@ -import site -from pathlib import Path - -import torch -from torch.nn.modules.module import Module -from torch.utils.data.dataloader import DataLoader -from torch.utils.data.dataset import Subset - -site.addsitedir(Path(__file__).absolute().parent.parent) -import model_zoo as net -from predtuner import TorchApp, accuracy, get_knobs_from_file - - -def load_from_default_path(cls, prefix: str): - return cls.from_file(f"{prefix}/input.bin", f"{prefix}/labels.bin") - - -mnist = load_from_default_path(net.MNIST, "model_data/mnist") -cifar10 = load_from_default_path(net.CIFAR, "model_data/cifar10") -cifar100 = load_from_default_path(net.CIFAR, "model_data/cifar100") -imagenet = load_from_default_path(net.ImageNet, "model_data/imagenet") - -networks_in_folder = { - "lenet_mnist": (net.LeNet, mnist), - "alexnet_cifar10": (net.AlexNet, cifar10), - "alexnet2_cifar10": (net.AlexNet2, cifar10), - "vgg16_cifar10": (net.VGG16Cifar10, cifar10), - "vgg16_cifar100": (net.VGG16Cifar100, cifar100), -} - -for name, (cls, dataset) in networks_in_folder.items(): - network: Module = cls() - network.load_state_dict(torch.load(f"model_data/{name}.pth.tar")) - d1, d2 = DataLoader(Subset(dataset, range(5000, 10000)), 1), DataLoader(dataset, 1) - app = TorchApp("", network, d1, d2, get_knobs_from_file(), accuracy) - qos, _ = app.measure_qos_perf({}, False) - print(f"{name} -> {qos}") diff --git a/predtuner/model_zoo/__init__.py b/predtuner/model_zoo/__init__.py index 375e4ac..01c6dbf 100644 --- a/predtuner/model_zoo/__init__.py +++ b/predtuner/model_zoo/__init__.py @@ -1,4 +1,6 @@ -from .alexnet import AlexNet, AlexNet2 +from .alexnet import AlexNet, AlexNet2, AlexNetImageNet from .datasets import CIFAR, MNIST, ImageNet from .lenet import LeNet -from .vgg16 import VGG16Cifar10, VGG16Cifar100 +from .mobilenet import MobileNet +from .resnet import ResNet18, ResNet50 +from .vgg16 import VGG16Cifar10, VGG16Cifar100, VGG16ImageNet diff --git a/predtuner/model_zoo/alexnet.py b/predtuner/model_zoo/alexnet.py index 9256139..8e62285 100644 --- a/predtuner/model_zoo/alexnet.py +++ b/predtuner/model_zoo/alexnet.py @@ -28,3 +28,28 @@ class AlexNet2(Classifier): ) linears = Sequential(Linear(2048, 10)) super().__init__(convs, linears) + + +class AlexNetImageNet(Classifier): + def __init__(self): + convs = Sequential( + *make_conv_pool_activ( + 3, 64, 11, ReLU, padding=2, stride=4, pool_size=3, pool_stride=2 + ), + *make_conv_pool_activ( + 64, 192, 5, ReLU, padding=2, pool_size=3, pool_stride=2 + ), + *make_conv_pool_activ(192, 384, 3, ReLU, padding=1), + *make_conv_pool_activ(384, 256, 3, ReLU, padding=1), + *make_conv_pool_activ( + 256, 256, 3, ReLU, padding=1, pool_size=3, pool_stride=2 + ) + ) + linears = Sequential( + Linear(9216, 4096), + ReLU(), + Linear(4096, 4096), + ReLU(), + Linear(4096, 1000), + ) + super().__init__(convs, linears) diff --git a/predtuner/model_zoo/lenet.py b/predtuner/model_zoo/lenet.py index bf0a69a..f9deb43 100644 --- a/predtuner/model_zoo/lenet.py +++ b/predtuner/model_zoo/lenet.py @@ -9,8 +9,5 @@ class LeNet(Classifier): *make_conv_pool_activ(1, 32, 5, Tanh, 2, padding=2), *make_conv_pool_activ(32, 64, 5, Tanh, 2, padding=2) ) - linears = Sequential( - Linear(7 * 7 * 64, 1024), Tanh(), - Linear(1024, 10), Tanh() - ) + linears = Sequential(Linear(7 * 7 * 64, 1024), Tanh(), Linear(1024, 10), Tanh()) super().__init__(convs, linears) diff --git a/predtuner/model_zoo/mobilenet.py b/predtuner/model_zoo/mobilenet.py new file mode 100644 index 0000000..34e4f79 --- /dev/null +++ b/predtuner/model_zoo/mobilenet.py @@ -0,0 +1,53 @@ +from torch.nn import AvgPool2d, BatchNorm2d, Conv2d, Linear, ReLU, Sequential + +from ._container import Classifier, make_conv_pool_activ + + +def _make_seq(in_channels, out_channels, c_kernel_size, gc_stride, gc_kernel_size=3): + return Sequential( + *make_conv_pool_activ( + in_channels, + out_channels, + c_kernel_size, + bias=False, + padding=(c_kernel_size - 1) // 2, + ), + BatchNorm2d(out_channels, eps=0.001), + ReLU(), + Conv2d( + out_channels, + out_channels, + gc_kernel_size, + bias=False, + stride=gc_stride, + padding=(gc_kernel_size - 1) // 2, + groups=out_channels, + ), + BatchNorm2d(out_channels, eps=0.001), + ReLU() + ) + + +class MobileNet(Classifier): + def __init__(self): + convs = Sequential( + _make_seq(3, 32, 3, 1), + _make_seq(32, 64, 1, 2), + _make_seq(64, 128, 1, 1), + _make_seq(128, 128, 1, 2), + _make_seq(128, 256, 1, 1), + _make_seq(256, 256, 1, 2), + _make_seq(256, 512, 1, 1), + _make_seq(512, 512, 1, 1), + _make_seq(512, 512, 1, 1), + _make_seq(512, 512, 1, 1), + _make_seq(512, 512, 1, 1), + _make_seq(512, 512, 1, 2), + _make_seq(512, 1024, 1, 1), + *make_conv_pool_activ(1024, 1024, 1, padding=0, bias=False), + BatchNorm2d(1024, eps=0.001), + ReLU(), + AvgPool2d(2) + ) + linears = Sequential(Linear(1024, 10)) + super().__init__(convs, linears) diff --git a/predtuner/model_zoo/resnet.py b/predtuner/model_zoo/resnet.py new file mode 100644 index 0000000..c0273c4 --- /dev/null +++ b/predtuner/model_zoo/resnet.py @@ -0,0 +1,100 @@ +from torch.nn import AvgPool2d, BatchNorm2d, Linear, Module, ReLU, Sequential + +from ._container import Classifier, make_conv_pool_activ + + +class BasicBlock(Module): + def __init__(self, ins, outs, shortcut=False): + super().__init__() + stride = 2 if shortcut else 1 + self.mainline = Sequential( + *make_conv_pool_activ(ins, outs, 3, ReLU, padding=1, stride=stride), + *make_conv_pool_activ(outs, outs, 3, padding=1) + ) + self.relu1 = ReLU() + self.shortcut = ( + Sequential(*make_conv_pool_activ(ins, outs, 1, stride=stride)) + if shortcut + else Sequential() + ) + + def forward(self, input_): + return self.relu1(self.mainline(input_) + self.shortcut(input_)) + + +class ResNet18(Classifier): + def __init__(self): + convs = Sequential( + *make_conv_pool_activ(3, 16, 3, ReLU, padding=1), + BasicBlock(16, 16), + BasicBlock(16, 16), + BasicBlock(16, 16), + BasicBlock(16, 32, True), + BasicBlock(32, 32), + BasicBlock(32, 32), + BasicBlock(32, 64, True), + BasicBlock(64, 64), + BasicBlock(64, 64), + AvgPool2d(8) + ) + linears = Sequential(Linear(64, 10)) + super().__init__(convs, linears) + + +class Bottleneck(Module): + expansion = 4 + + def __init__(self, in_planes, planes, stride=1): + super(Bottleneck, self).__init__() + self.mainline = Sequential( + *make_conv_pool_activ(in_planes, planes, 1, stride=stride), + BatchNorm2d(planes, eps=0.001), + ReLU(), + *make_conv_pool_activ(planes, planes, 3, padding=1), + BatchNorm2d(planes, eps=0.001), + ReLU(), + *make_conv_pool_activ(planes, self.expansion * planes, 1), + BatchNorm2d(self.expansion * planes, eps=0.001) + ) + self.relu1 = ReLU() + if stride != 1 or in_planes != self.expansion * planes: + self.shortcut = Sequential( + *make_conv_pool_activ( + in_planes, self.expansion * planes, 1, stride=stride + ), + BatchNorm2d(self.expansion * planes, eps=0.001) + ) + else: + self.shortcut = Sequential() + + def forward(self, input_): + return self.relu1(self.mainline(input_) + self.shortcut(input_)) + + +class ResNet50(Classifier): + def __init__(self): + convs = Sequential( + *make_conv_pool_activ( + 3, 64, 7, ReLU, pool_size=3, pool_stride=2, padding=3, stride=2 + ), + BatchNorm2d(64, eps=0.001), + Bottleneck(64, 64), + Bottleneck(256, 64), + Bottleneck(256, 64), + Bottleneck(256, 128, stride=2), + Bottleneck(512, 128), + Bottleneck(512, 128), + Bottleneck(512, 128), + Bottleneck(512, 256, stride=2), + Bottleneck(1024, 256), + Bottleneck(1024, 256), + Bottleneck(1024, 256), + Bottleneck(1024, 256), + Bottleneck(1024, 256), + Bottleneck(1024, 512, stride=2), + Bottleneck(2048, 512), + Bottleneck(2048, 512), + AvgPool2d(7) + ) + linears = Sequential(Linear(2048, 1000)) + super().__init__(convs, linears) diff --git a/predtuner/model_zoo/vgg16.py b/predtuner/model_zoo/vgg16.py index 1a33d31..43ac8f9 100644 --- a/predtuner/model_zoo/vgg16.py +++ b/predtuner/model_zoo/vgg16.py @@ -22,7 +22,9 @@ class _VGG16(Classifier): *make_conv_pool_activ(512, 512, 3, ReLU, padding=1), *make_conv_pool_activ(512, 512, 3, ReLU, 2, padding=1) ) - linear_layers = [Linear(in_, out) for in_, out in zip(linear_inouts, linear_inouts[1:])] + linear_layers = [ + Linear(in_, out) for in_, out in zip(linear_inouts, linear_inouts[1:]) + ] linear_relus = [ReLU() for _ in range(2 * len(linear_layers) - 1)] linear_relus[::2] = linear_layers linears = Sequential(*linear_relus) @@ -37,3 +39,8 @@ class VGG16Cifar10(_VGG16): class VGG16Cifar100(_VGG16): def __init__(self): super().__init__([512, 512, 100]) + + +class VGG16ImageNet(_VGG16): + def __init__(self): + super().__init__([25088, 4096, 4096, 1000]) diff --git a/test/integrated_tuning.py b/test/integrated_tuning.py index 2068214..4ba0d55 100644 --- a/test/integrated_tuning.py +++ b/test/integrated_tuning.py @@ -6,21 +6,31 @@ from torch.utils.data.dataloader import DataLoader from torch.utils.data.dataset import Subset site.addsitedir(Path(__file__).absolute().parent.parent) -from model_zoo import CIFAR, VGG16Cifar10 +from predtuner.model_zoo import CIFAR, VGG16Cifar10 from predtuner import TorchApp, accuracy, config_pylogger, get_knobs_from_file msg_logger = config_pylogger(output_dir="tuner_results/logs", verbose=True) -dataset = CIFAR.from_file( - "model_data/cifar10/input.bin", "model_data/cifar10/labels.bin" +tune_set = CIFAR.from_file( + "model_params/vgg16_cifar10/tune_input.bin", + "model_params/vgg16_cifar10/tune_labels.bin", ) -tune_loader = DataLoader(Subset(dataset, range(500)), batch_size=500) -test_loader = DataLoader(Subset(dataset, range(5000, 5500)), batch_size=500) +tune_loader = DataLoader(Subset(tune_set, range(500)), batch_size=500) +test_set = CIFAR.from_file( + "model_params/vgg16_cifar10/test_input.bin", + "model_params/vgg16_cifar10/test_labels.bin", +) +test_loader = DataLoader(Subset(test_set, range(500)), batch_size=500) module = VGG16Cifar10() -module.load_state_dict(torch.load("model_data/vgg16_cifar10.pth.tar")) +module.load_state_dict(torch.load("model_params/vgg16_cifar10.pth.tar")) app = TorchApp( - "TestTorchApp", module, tune_loader, test_loader, get_knobs_from_file(), accuracy, - model_storage_folder="tuner_results/vgg16_cifar10" + "TestTorchApp", + module, + tune_loader, + test_loader, + get_knobs_from_file(), + accuracy, + model_storage_folder="tuner_results/vgg16_cifar10", ) baseline, _ = app.measure_qos_perf({}, False) tuner = app.get_tuner() diff --git a/test/test_model_zoo_acc.py b/test/test_model_zoo_acc.py new file mode 100644 index 0000000..55f770d --- /dev/null +++ b/test/test_model_zoo_acc.py @@ -0,0 +1,38 @@ +import unittest + +import predtuner.model_zoo as net +import torch +from predtuner import TorchApp, accuracy, config_pylogger, get_knobs_from_file +from torch.nn import Module +from torch.utils.data.dataloader import DataLoader + +msg_logger = config_pylogger(output_dir="/tmp", verbose=True) + + +class TestModelZooAcc(unittest.TestCase): + networks = { + "lenet_mnist": (net.LeNet, net.MNIST, 2000, 99.65), + "alexnet_cifar10": (net.AlexNet, net.CIFAR, 500, 78.78), + "alexnet2_cifar10": (net.AlexNet2, net.CIFAR, 500, 84.75), + "vgg16_cifar10": (net.VGG16Cifar10, net.CIFAR, 250, 89.22), + "vgg16_cifar100": (net.VGG16Cifar100, net.CIFAR, 250, 68.42), + "resnet18_cifar10": (net.ResNet18, net.CIFAR, 250, 89.41), + "mobilenet": (net.MobileNet, net.CIFAR, 250, 84.9), + "alexnet_imagenet": (net.AlexNetImageNet, net.ImageNet, 20, 55.86), + # "resnet50_imagenet": (net.ResNet50, net.ImageNet, 10, 71.72), + "vgg16_imagenet": (net.VGG16ImageNet, net.ImageNet, 5, 68.82), + } + + def test_all_accuracy(self): + for name, netinfo in self.networks.items(): + model_cls, dataset_cls, batchsize, target_acc = netinfo + network: Module = model_cls() + network.load_state_dict(torch.load(f"model_params/{name}.pth.tar")) + dataset = dataset_cls.from_file( + f"model_params/{name}/tune_input.bin", + f"model_params/{name}/tune_labels.bin", + ) + tune = DataLoader(dataset, batchsize) + app = TorchApp("", network, tune, tune, get_knobs_from_file(), accuracy) + qos, _ = app.measure_qos_perf({}, False) + self.assertAlmostEqual(qos, target_acc) diff --git a/test/test_torchapp.py b/test/test_torchapp.py index 74267ef..841c657 100644 --- a/test/test_torchapp.py +++ b/test/test_torchapp.py @@ -14,11 +14,12 @@ class TorchAppSetUp(unittest.TestCase): @classmethod def setUpClass(cls): dataset = CIFAR.from_file( - "model_data/cifar10/input.bin", "model_data/cifar10/labels.bin" + "model_params/vgg16_cifar10/tune_input.bin", + "model_params/vgg16_cifar10/tune_labels.bin", ) cls.dataset = Subset(dataset, range(100)) cls.module = VGG16Cifar10() - cls.module.load_state_dict(torch.load("model_data/vgg16_cifar10.pth.tar")) + cls.module.load_state_dict(torch.load("model_params/vgg16_cifar10.pth.tar")) cls.app = TorchApp( "TestTorchApp", cls.module, @@ -47,7 +48,7 @@ class TestTorchAppTuning(TorchAppSetUp): def test_baseline_qos(self): qos, _ = self.app.measure_qos_perf({}, False) - self.assertAlmostEqual(qos, 88.0) + self.assertAlmostEqual(qos, 93.0) def test_tuning_relative_thres(self): baseline, _ = self.app.measure_qos_perf({}, False) @@ -138,7 +139,7 @@ class TestModelSaving(TorchAppSetUp): DataLoader(cls.dataset, batch_size=500), get_knobs_from_file(), accuracy, - model_storage_folder=cls.model_path + model_storage_folder=cls.model_path, ) def test_loading_p1(self): -- GitLab