diff --git a/distiller/data_loggers/collector.py b/distiller/data_loggers/collector.py index 999373c52d961c4fcc5492d96f0c05d27833136f..5aeba01677a5f0b890af7499337655b8ac6ea308 100755 --- a/distiller/data_loggers/collector.py +++ b/distiller/data_loggers/collector.py @@ -147,14 +147,18 @@ class SummaryActivationStatsCollector(ActivationStatsCollector): """ try: getattr(module, self.stat_name).add(self.summary_fn(output.data)) - except RuntimeError: - raise ValueError("ActivationStatsCollector: a module was encountered twice during model.apply().\n" - "This is an indication that your model is using the same module instance, " - "in multiple nodes in the graph. This usually occurs with ReLU modules: \n" - "For example in TorchVision's ResNet model, self.relu = nn.ReLU(inplace=True) is " - "instantiated once, but used multiple times. This is not permissible when using " - "instances of ActivationStatsCollector.") - + except RuntimeError as e: + if "The expanded size of the tensor" in e.args[0]: + raise ValueError("ActivationStatsCollector: a module ({} - {}) was encountered twice during model.apply().\n" + "This is an indication that your model is using the same module instance, " + "in multiple nodes in the graph. This usually occurs with ReLU modules: \n" + "For example in TorchVision's ResNet model, self.relu = nn.ReLU(inplace=True) is " + "instantiated once, but used multiple times. This is not permissible when using " + "instances of ActivationStatsCollector.". + format(module.distiller_name, type(module))) + else: + msglogger.info("Exception in _activation_stats_cb: {} {}".format(module.distiller_name, type(module))) + raise def _start_counter(self, module): if not hasattr(module, self.stat_name): diff --git a/distiller/utils.py b/distiller/utils.py index 25ccc08e2aeeab6f1940048a190641bea7d477df..cb000b301a8d88ad18c0662850818272289ac4ea 100755 --- a/distiller/utils.py +++ b/distiller/utils.py @@ -144,10 +144,10 @@ def density(tensor): Returns: density (float) """ - nonzero = torch.nonzero(tensor) - if nonzero.dim() == 0: - return 0.0 - return nonzero.size(0) / float(torch.numel(tensor)) + # Using torch.nonzero(tensor) can lead to memory exhaustion on + # very large tensors, so we count zeros "manually". + nonzero = tensor.abs().gt(0).sum() + return float(nonzero.item()) / torch.numel(tensor) def sparsity(tensor): @@ -252,14 +252,14 @@ def sparsity_matrix(tensor, dim): return 1 - nonzero_structs/num_structs -def sparsity_cols(tensor, trasposed=True): +def sparsity_cols(tensor, transposed=True): """Column-wise sparsity for 2D tensors PyTorch GEMM matrices are transposed before they are used in the GEMM operation. In other words the matrices are stored in memory transposed. So by default we compute the sparsity of the transposed dimension. """ - if trasposed: + if transposed: return sparsity_matrix(tensor, 0) return sparsity_matrix(tensor, 1) @@ -269,14 +269,14 @@ def density_cols(tensor, transposed=True): return 1 - sparsity_cols(tensor, transposed) -def sparsity_rows(tensor, trasposed=True): +def sparsity_rows(tensor, transposed=True): """Row-wise sparsity for 2D matrices PyTorch GEMM matrices are transposed before they are used in the GEMM operation. In other words the matrices are stored in memory transposed. So by default we compute the sparsity of the transposed dimension. """ - if trasposed: + if transposed: return sparsity_matrix(tensor, 1) return sparsity_matrix(tensor, 0) @@ -339,9 +339,14 @@ def activation_channels_l1(activation): Returns - for each channel: the batch-mean of its L1 magnitudes (i.e. over all of the activations in the mini-batch, compute the mean of the L! magnitude of each channel). """ - view_2d = activation.view(-1, activation.size(2) * activation.size(3)) # (batch*channel) x (h*w) - featuremap_norms = view_2d.norm(p=1, dim=1) - featuremap_norms_mat = featuremap_norms.view(activation.size(0), activation.size(1)) # batch x channel + if activation.dim() == 4: + view_2d = activation.view(-1, activation.size(2) * activation.size(3)) # (batch*channels) x (h*w) + featuremap_norms = view_2d.norm(p=1, dim=1) # (batch*channels) x 1 + featuremap_norms_mat = featuremap_norms.view(activation.size(0), activation.size(1)) # batch x channels + elif activation.dim() == 2: + featuremap_norms_mat = activation.norm(p=1, dim=1) # batch x 1 + else: + raise ValueError("activation_channels_l1: Unsupported shape: ".format(activation.shape)) # We need to move the results back to the CPU return featuremap_norms_mat.mean(dim=0).cpu() @@ -357,9 +362,14 @@ def activation_channels_means(activation): Returns - for each channel: the batch-mean of its L1 magnitudes (i.e. over all of the activations in the mini-batch, compute the mean of the L1 magnitude of each channel). """ - view_2d = activation.view(-1, activation.size(2) * activation.size(3)) # (batch*channel) x (h*w) - featuremap_means = sparsity_rows(view_2d) - featuremap_means_mat = featuremap_means.view(activation.size(0), activation.size(1)) # batch x channel + if activation.dim() == 4: + view_2d = activation.view(-1, activation.size(2) * activation.size(3)) # (batch*channels) x (h*w) + featuremap_means = view_2d.mean(dim=1) # (batch*channels) x 1 + featuremap_means_mat = featuremap_means.view(activation.size(0), activation.size(1)) # batch x channels + elif activation.dim() == 2: + featuremap_means_mat = activation.mean(dim=1) # batch x 1 + else: + raise ValueError("activation_channels_means: Unsupported shape: ".format(activation.shape)) # We need to move the results back to the CPU return featuremap_means_mat.mean(dim=0).cpu() @@ -377,11 +387,15 @@ def activation_channels_apoz(activation): Returns - for each channel: the batch-mean of its sparsity. """ - view_2d = activation.view(-1, activation.size(2) * activation.size(3)) # (batch*channel) x (h*w) - featuremap_means = view_2d.mean(dim=1) # global average pooling - featuremap_means_mat = featuremap_means.view(activation.size(0), activation.size(1)) # batch x channel - # We need to move the results back to the CPU - return featuremap_means_mat.mean(dim=0).cpu() + if activation.dim() == 4: + view_2d = activation.view(-1, activation.size(2) * activation.size(3)) # (batch*channels) x (h*w) + featuremap_apoz = view_2d.abs().gt(0).sum(dim=1).float() / (activation.size(2) * activation.size(3)) # (batch*channels) x 1 + featuremap_apoz_mat = featuremap_apoz.view(activation.size(0), activation.size(1)) # batch x channels + elif activation.dim() == 2: + featuremap_apoz_mat = activation.abs().gt(0).sum(dim=1).float() / activation.size(1) # batch x 1 + else: + raise ValueError("activation_channels_apoz: Unsupported shape: ".format(activation.shape)) + return featuremap_apoz_mat.mean(dim=0).cpu() def log_training_progress(stats_dict, params_dict, epoch, steps_completed, total_steps, log_freq, loggers): diff --git a/docs-src/docs/usage.md b/docs-src/docs/usage.md index 9fbfe083fd9976d4fb9c0b0dbf006a09e7882c7b..264c55a9af0f4f7c317197cf4e15459ccdc4b6ce 100755 --- a/docs-src/docs/usage.md +++ b/docs-src/docs/usage.md @@ -225,7 +225,7 @@ $ tensorboard --logdir=logs Distillers's setup (requirements.txt) installs TensorFlow for CPU. If you want a different installation, please follow the [TensorFlow installation instructions](https://www.tensorflow.org/install/install_linux). -## Collecting feature-maps statistics +## Collecting activations statistics In CNNs with ReLU layers, ReLU activations (feature-maps) also exhibit a nice level of sparsity (50-60% sparsity is typical). <br> You can collect activation statistics using the ```--act_stats``` command-line flag.<br> For example: @@ -258,6 +258,96 @@ You can use a utility function, ```distiller.log_activation_statsitics```, to lo distiller.log_activation_statsitics(epoch, "train", loggers=[tflogger], collector=collectors["sparsity"]) ``` + +### Caveats +Distiller collects activations statistics using PyTorch's forward-hooks mechanism. Collectors iteratively register the modules' forward-hooks, and collectors are called during the forward traversal and get exposed to activation data. Registering for forward callbacks is performed like this: +``` +module.register_forward_hook +``` +This makes apparent two limitations of this mechanism: + +1. We can only register on PyTorch modules. This means that we can't register on the forward hook of a functionals such as ```torch.nn.functional.relu``` and ```torch.nn.functional.max_pool2d```. + Therefore, you may need to replace functionals with their module alternative. For example: +``` +class MadeUpNet(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return x +``` + Can be changed to: +``` +class MadeUpNet(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.relu(self.conv1(x)) + return x +``` + +2. We can only use a module instance once in our models. If we use the same module several times, then we can't determine which node in the graph has invoked the callback, because the PyTorch callback signature ```def hook(module, input, output)``` doesn't provide enough contextual information. +TorchVision's [ResNet](https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py) is an example of a model that uses the same instance of nn.ReLU multiple times: +``` +class BasicBlock(nn.Module): + expansion = 1 + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) # <================ + out = self.conv2(out) + out = self.bn2(out) + if self.downsample is not None: + residual = self.downsample(x) + out += residual + out = self.relu(out) # <================ + return out +``` +In Distiller we changed [ResNet](https://github.com/NervanaSystems/distiller/blob/master/models/imagenet/resnet.py) to use multiple instances of nn.ReLU, and each instance is used only once: +``` +class BasicBlock(nn.Module): + expansion = 1 + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes) + self.relu1 = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.relu2 = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + out = self.conv1(x) + out = self.bn1(out) + out = self.relu1(out) # <================ + out = self.conv2(out) + out = self.bn2(out) + if self.downsample is not None: + residual = self.downsample(x) + out += residual + out = self.relu2(out) # <================ + return out +``` + # Using the Jupyter notebooks The Jupyter notebooks contain many examples of how to use the statistics summaries generated by Distiller. They are explained in a separate page. diff --git a/docs/index.html b/docs/index.html index 00f798d9ef616cb9e1bc74396a35aea98d4e0a16..5c1c08925ca10929101d11abe40851ce677ad261 100644 --- a/docs/index.html +++ b/docs/index.html @@ -258,5 +258,5 @@ And of course, if we used a sparse or compressed representation, then we are red <!-- MkDocs version : 0.17.2 -Build Date UTC : 2018-11-21 21:54:00 +Build Date UTC : 2018-11-24 09:47:02 --> diff --git a/docs/search/search_index.json b/docs/search/search_index.json index 050d1383e2d368e22dbfdaaf80b3e325d88bece1..2d40acc210c49e90da97f089b22eddd766a222ec 100644 --- a/docs/search/search_index.json +++ b/docs/search/search_index.json @@ -77,7 +77,7 @@ }, { "location": "/usage/index.html", - "text": "Using the sample application\n\n\nThe Distiller repository contains a sample application, \ndistiller/examples/classifier_compression/compress_classifier.py\n, and a set of scheduling files which demonstrate Distiller's features. Following is a brief discussion of how to use this application and the accompanying schedules.\n\n\nYou might also want to refer to the following resources:\n\n\n\n\nAn \nexplanation\n of the scheduler file format.\n\n\nAn in-depth \ndiscussion\n of how we used these schedule files to implement several state-of-the-art DNN compression research papers.\n\n\n\n\nThe sample application supports various features for compression of image classification DNNs, and gives an example of how to integrate distiller in your own application. The code is documented and should be considered the best source of documentation, but we provide some elaboration here.\n\n\nThis diagram shows how where \ncompress_classifier.py\n fits in the compression workflow, and how we integrate the Jupyter notebooks as part of our research work.\n\n\n\nCommand line arguments\n\n\nTo get help on the command line arguments, invoke:\n\n\n$ python3 compress_classifier.py --help\n\n\n\n\nFor example:\n\n\n$ time python3 compress_classifier.py -a alexnet --lr 0.005 -p 50 ../../../data.imagenet -j 44 --epochs 90 --pretrained --compress=../sensitivity-pruning/alexnet.schedule_sensitivity.yaml\n\nParameters:\n +----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+\n | | Name | Shape | NNZ (dense) | NNZ (sparse) | Cols (%) | Rows (%) | Ch (%) | 2D (%) | 3D (%) | Fine (%) | Std | Mean | Abs-Mean |\n |----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------|\n | 0 | features.module.0.weight | (64, 3, 11, 11) | 23232 | 13411 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 42.27359 | 0.14391 | -0.00002 | 0.08805 |\n | 1 | features.module.3.weight | (192, 64, 5, 5) | 307200 | 115560 | 0.00000 | 0.00000 | 0.00000 | 1.91243 | 0.00000 | 62.38281 | 0.04703 | -0.00250 | 0.02289 |\n | 2 | features.module.6.weight | (384, 192, 3, 3) | 663552 | 256565 | 0.00000 | 0.00000 | 0.00000 | 6.18490 | 0.00000 | 61.33445 | 0.03354 | -0.00184 | 0.01803 |\n | 3 | features.module.8.weight | (256, 384, 3, 3) | 884736 | 315065 | 0.00000 | 0.00000 | 0.00000 | 6.96411 | 0.00000 | 64.38881 | 0.02646 | -0.00168 | 0.01422 |\n | 4 | features.module.10.weight | (256, 256, 3, 3) | 589824 | 186938 | 0.00000 | 0.00000 | 0.00000 | 15.49225 | 0.00000 | 68.30614 | 0.02714 | -0.00246 | 0.01409 |\n | 5 | classifier.1.weight | (4096, 9216) | 37748736 | 3398881 | 0.00000 | 0.21973 | 0.00000 | 0.21973 | 0.00000 | 90.99604 | 0.00589 | -0.00020 | 0.00168 |\n | 6 | classifier.4.weight | (4096, 4096) | 16777216 | 1782769 | 0.21973 | 3.46680 | 0.00000 | 3.46680 | 0.00000 | 89.37387 | 0.00849 | -0.00066 | 0.00263 |\n | 7 | classifier.6.weight | (1000, 4096) | 4096000 | 994738 | 3.36914 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 75.71440 | 0.01718 | 0.00030 | 0.00778 |\n | 8 | Total sparsity: | - | 61090496 | 7063928 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 88.43694 | 0.00000 | 0.00000 | 0.00000 |\n +----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+\n 2018-04-04 21:30:52,499 - Total sparsity: 88.44\n\n 2018-04-04 21:30:52,499 - --- validate (epoch=89)-----------\n 2018-04-04 21:30:52,499 - 128116 samples (256 per mini-batch)\n 2018-04-04 21:31:04,646 - Epoch: [89][ 50/ 500] Loss 2.175988 Top1 51.289063 Top5 74.023438\n 2018-04-04 21:31:06,427 - Epoch: [89][ 100/ 500] Loss 2.171564 Top1 51.175781 Top5 74.308594\n 2018-04-04 21:31:11,432 - Epoch: [89][ 150/ 500] Loss 2.159347 Top1 51.546875 Top5 74.473958\n 2018-04-04 21:31:14,364 - Epoch: [89][ 200/ 500] Loss 2.156857 Top1 51.585938 Top5 74.568359\n 2018-04-04 21:31:18,381 - Epoch: [89][ 250/ 500] Loss 2.152790 Top1 51.707813 Top5 74.681250\n 2018-04-04 21:31:22,195 - Epoch: [89][ 300/ 500] Loss 2.149962 Top1 51.791667 Top5 74.755208\n 2018-04-04 21:31:25,508 - Epoch: [89][ 350/ 500] Loss 2.150936 Top1 51.827009 Top5 74.767857\n 2018-04-04 21:31:29,538 - Epoch: [89][ 400/ 500] Loss 2.150853 Top1 51.781250 Top5 74.763672\n 2018-04-04 21:31:32,842 - Epoch: [89][ 450/ 500] Loss 2.150156 Top1 51.828125 Top5 74.821181\n 2018-04-04 21:31:35,338 - Epoch: [89][ 500/ 500] Loss 2.150417 Top1 51.833594 Top5 74.817187\n 2018-04-04 21:31:35,357 - ==> Top1: 51.838 Top5: 74.817 Loss: 2.150\n\n 2018-04-04 21:31:35,364 - Saving checkpoint\n 2018-04-04 21:31:39,251 - --- test ---------------------\n 2018-04-04 21:31:39,252 - 50000 samples (256 per mini-batch)\n 2018-04-04 21:31:51,512 - Test: [ 50/ 195] Loss 1.487607 Top1 63.273438 Top5 85.695312\n 2018-04-04 21:31:55,015 - Test: [ 100/ 195] Loss 1.638043 Top1 60.636719 Top5 83.664062\n 2018-04-04 21:31:58,732 - Test: [ 150/ 195] Loss 1.833214 Top1 57.619792 Top5 80.447917\n 2018-04-04 21:32:01,274 - ==> Top1: 56.606 Top5: 79.446 Loss: 1.893\n\n\n\n\nLet's look at the command line again:\n\n\n$ time python3 compress_classifier.py -a alexnet --lr 0.005 -p 50 ../../../data.imagenet -j 44 --epochs 90 --pretrained --compress=../sensitivity-pruning/alexnet.schedule_sensitivity.yaml\n\n\n\n\nIn this example, we prune a TorchVision pre-trained AlexNet network, using the following configuration:\n\n\n\n\nLearning-rate of 0.005\n\n\nPrint progress every 50 mini-batches.\n\n\nUse 44 worker threads to load data (make sure to use something suitable for your machine).\n\n\nRun for 90 epochs. Torchvision's pre-trained models did not store the epoch metadata, so pruning starts at epoch 0. When you train and prune your own networks, the last training epoch is saved as a metadata with the model. Therefore, when you load such models, the first epoch is not 0, but it is the last training epoch.\n\n\nThe pruning schedule is provided in \nalexnet.schedule_sensitivity.yaml\n\n\nLog files are written to directory \nlogs\n.\n\n\n\n\nExamples\n\n\nDistiller comes with several example schedules which can be used together with \ncompress_classifier.py\n.\nThese example schedules (YAML) files, contain the command line that is used in order to invoke the schedule (so that you can easily recreate the results in your environment), together with the results of the pruning or regularization. The results usually contain a table showing the sparsity of each of the model parameters, together with the validation and test top1, top5 and loss scores.\n\n\nFor more details on the example schedules, you can refer to the coverage of the \nModel Zoo\n.\n\n\n\n\nexamples/agp-pruning\n:\n\n\nAutomated Gradual Pruning (AGP) on MobileNet and ResNet18 (ImageNet dataset)\n\n\n\n\n\n\n\nexamples/hybrid\n:\n\n\nAlexNet AGP with 2D (kernel) regularization (ImageNet dataset)\n\n\nAlexNet sensitivity pruning with 2D regularization\n\n\n\n\n\n\n\nexamples/network_slimming\n:\n\n\nResNet20 Network Slimming (this is work-in-progress)\n\n\n\n\n\n\n\nexamples/pruning_filters_for_efficient_convnets\n:\n\n\nResNet56 baseline training (CIFAR10 dataset)\n\n\nResNet56 filter removal using filter ranking\n\n\n\n\n\n\n\nexamples/sensitivity_analysis\n:\n\n\nElement-wise pruning sensitivity-analysis:\n\n\nAlexNet (ImageNet)\n\n\nMobileNet (ImageNet)\n\n\nResNet18 (ImageNet)\n\n\nResNet20 (CIFAR10)\n\n\nResNet34 (ImageNet)\n\n\nFilter-wise pruning sensitivity-analysis:\n\n\nResNet20 (CIFAR10)\n\n\nResNet56 (CIFAR10)\n\n\n\n\n\n\n\nexamples/sensitivity-pruning\n:\n\n\nAlexNet sensitivity pruning with Iterative Pruning\n\n\nAlexNet sensitivity pruning with One-Shot Pruning\n\n\n\n\n\n\n\nexamples/ssl\n:\n\n\nResNet20 baseline training (CIFAR10 dataset)\n\n\nStructured Sparsity Learning (SSL) with layer removal on ResNet20\n\n\nSSL with channels removal on ResNet20\n\n\n\n\n\n\n\nexamples/quantization\n:\n\n\nAlexNet w. Batch-Norm (base FP32 + DoReFa)\n\n\nPre-activation ResNet20 on CIFAR10 (base FP32 + DoReFa)\n\n\nPre-activation ResNet18 on ImageNEt (base FP32 + DoReFa)\n\n\n\n\n\n\n\n\nExperiment reproducibility\n\n\nExperiment reproducibility is sometimes important. Pete Warden recently expounded about this in his \nblog\n.\n\nPyTorch's support for deterministic execution requires us to use only one thread for loading data (other wise the multi-threaded execution of the data loaders can create random order and change the results), and to set the seed of the CPU and GPU PRNGs. Using the \n--deterministic\n command-line flag and setting \nj=1\n will produce reproducible results (for the same PyTorch version).\n\n\nPerforming pruning sensitivity analysis\n\n\nDistiller supports element-wise and filter-wise pruning sensitivity analysis. In both cases, L1-norm is used to rank which elements or filters to prune. For example, when running filter-pruning sensitivity analysis, the L1-norm of the filters of each layer's weights tensor are calculated, and the bottom x% are set to zero. \n\nThe analysis process is quite long, because currently we use the entire test dataset to assess the accuracy performance at each pruning level of each weights tensor. Using a small dataset for this would save much time and we plan on assessing if this will provide sufficient results.\n\nResults are output as a CSV file (\nsensitivity.csv\n) and PNG file (\nsensitivity.png\n). The implementation is in \ndistiller/sensitivity.py\n and it contains further details about process and the format of the CSV file.\n\n\nThe example below performs element-wise pruning sensitivity analysis on ResNet20 for CIFAR10:\n\n\n$ python3 compress_classifier.py -a resnet20_cifar ../../../data.cifar10/ -j=1 --resume=../cifar10/resnet20/checkpoint_trained_dense.pth.tar --sense=element\n\n\n\n\nThe \nsense\n command-line argument can be set to either \nelement\n or \nfilter\n, depending on the type of analysis you want done.\n\n\nThere is also a \nJupyter notebook\n with example invocations, outputs and explanations.\n\n\n\"Direct\" Quantization Without Training\n\n\nDistiller supports 8-bit quantization of trained modules without re-training (using \nSymmetric Linear Quantization\n). So, any model (whether pruned or not) can be quantized.\n\nUse the \n--quantize\n command-line flag, together with \n--evaluate\n to evaluate the accuracy of your model after quantization. The following example qunatizes ResNet18 for ImageNet:\n\n\n$ python3 compress_classifier.py -a resnet18 ../../../data.imagenet --pretrained --quantize --evaluate\n\n\n\n\nGenerates:\n\n\nPreparing model for quantization\n--- test ---------------------\n50000 samples (256 per mini-batch)\nTest: [ 10/ 195] Loss 0.856354 Top1 79.257812 Top5 92.500000\nTest: [ 20/ 195] Loss 0.923131 Top1 76.953125 Top5 92.246094\nTest: [ 30/ 195] Loss 0.885186 Top1 77.955729 Top5 92.486979\nTest: [ 40/ 195] Loss 0.930263 Top1 76.181641 Top5 92.597656\nTest: [ 50/ 195] Loss 0.931062 Top1 75.726562 Top5 92.906250\nTest: [ 60/ 195] Loss 0.932019 Top1 75.651042 Top5 93.151042\nTest: [ 70/ 195] Loss 0.921287 Top1 76.060268 Top5 93.270089\nTest: [ 80/ 195] Loss 0.932539 Top1 75.986328 Top5 93.100586\nTest: [ 90/ 195] Loss 0.996000 Top1 74.700521 Top5 92.330729\nTest: [ 100/ 195] Loss 1.066699 Top1 73.289062 Top5 91.437500\nTest: [ 110/ 195] Loss 1.100970 Top1 72.574574 Top5 91.001420\nTest: [ 120/ 195] Loss 1.122376 Top1 72.268880 Top5 90.696615\nTest: [ 130/ 195] Loss 1.171726 Top1 71.198918 Top5 90.120192\nTest: [ 140/ 195] Loss 1.191500 Top1 70.797991 Top5 89.902344\nTest: [ 150/ 195] Loss 1.219954 Top1 70.210938 Top5 89.453125\nTest: [ 160/ 195] Loss 1.240942 Top1 69.855957 Top5 89.162598\nTest: [ 170/ 195] Loss 1.265741 Top1 69.342831 Top5 88.807445\nTest: [ 180/ 195] Loss 1.281185 Top1 69.051649 Top5 88.589410\nTest: [ 190/ 195] Loss 1.279682 Top1 69.019326 Top5 88.632812\n==> Top1: 69.130 Top5: 88.732 Loss: 1.276\n\n\n\n\nSummaries\n\n\nYou can use the sample compression application to generate model summary reports, such as the attributes and compute summary report (see screen capture below).\nYou can log sparsity statistics (written to console and CSV file), performance, optimizer and model information, and also create a PNG image of the DNN.\nCreating a PNG image is an experimental feature (it relies on features which are not available on PyTorch 3.1 and that we hope will be available in PyTorch's next release), so to use it you will need to compile the PyTorch master branch, and hope for the best ;-).\n\n\n$ python3 compress_classifier.py --resume=../ssl/checkpoints/checkpoint_trained_ch_regularized_dense.pth.tar -a=resnet20_cifar ../../../data.cifar10 --summary=compute\n\n\n\n\nGenerates:\n\n\n+----+------------------------------+--------+----------+-----------------+--------------+-----------------+--------------+------------------+---------+\n| | Name | Type | Attrs | IFM | IFM volume | OFM | OFM volume | Weights volume | MACs |\n|----+------------------------------+--------+----------+-----------------+--------------+-----------------+--------------+------------------+---------|\n| 0 | module.conv1 | Conv2d | k=(3, 3) | (1, 3, 32, 32) | 3072 | (1, 16, 32, 32) | 16384 | 432 | 442368 |\n| 1 | module.layer1.0.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 |\n| 2 | module.layer1.0.conv2 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 |\n| 3 | module.layer1.1.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 |\n| 4 | module.layer1.1.conv2 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 |\n| 5 | module.layer1.2.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 |\n| 6 | module.layer1.2.conv2 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 |\n| 7 | module.layer2.0.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 32, 16, 16) | 8192 | 4608 | 1179648 |\n| 8 | module.layer2.0.conv2 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 |\n| 9 | module.layer2.0.downsample.0 | Conv2d | k=(1, 1) | (1, 16, 32, 32) | 16384 | (1, 32, 16, 16) | 8192 | 512 | 131072 |\n| 10 | module.layer2.1.conv1 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 |\n| 11 | module.layer2.1.conv2 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 |\n| 12 | module.layer2.2.conv1 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 |\n| 13 | module.layer2.2.conv2 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 |\n| 14 | module.layer3.0.conv1 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 64, 8, 8) | 4096 | 18432 | 1179648 |\n| 15 | module.layer3.0.conv2 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 |\n| 16 | module.layer3.0.downsample.0 | Conv2d | k=(1, 1) | (1, 32, 16, 16) | 8192 | (1, 64, 8, 8) | 4096 | 2048 | 131072 |\n| 17 | module.layer3.1.conv1 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 |\n| 18 | module.layer3.1.conv2 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 |\n| 19 | module.layer3.2.conv1 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 |\n| 20 | module.layer3.2.conv2 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 |\n| 21 | module.fc | Linear | | (1, 64) | 64 | (1, 10) | 10 | 640 | 640 |\n+----+------------------------------+--------+----------+-----------------+--------------+-----------------+--------------+------------------+---------+\nTotal MACs: 40,813,184\n\n\n\n\nUsing TensorBoard\n\n\nGoogle's \nTensorBoard\n is an excellent tool for visualizing the progress of DNN training. Distiller's logger supports writing performance indicators and parameter statistics in a file format that can be read by TensorBoard (Distiller uses TensorFlow's APIs in order to do this, which is why Distiller requires the installation of TensorFlow).\n\nTo view the graphs, invoke the TensorBoard server. For example:\n\n\n$ tensorboard --logdir=logs\n\n\n\n\nDistillers's setup (requirements.txt) installs TensorFlow for CPU. If you want a different installation, please follow the \nTensorFlow installation instructions\n.\n\n\nCollecting feature-maps statistics\n\n\nIn CNNs with ReLU layers, ReLU activations (feature-maps) also exhibit a nice level of sparsity (50-60% sparsity is typical). \n\nYou can collect activation statistics using the \n--act_stats\n command-line flag.\n\nFor example:\n\n\n$ python3 compress_classifier.py -a=resnet56_cifar -p=50 ../../../data.cifar10 --resume=checkpoint.resnet56_cifar_baseline.pth.tar --act-stats=test -e\n\n\n\n\nThe \ntest\n parameter indicates that, in this example, we want to collect activation statistics during the \ntest\n phase. Note that we also used the \n-e\n command-line argument to indicate that we want to run a \ntest\n phase. The other two legal parameter values are \ntrain\n and \nvalid\n which collect activation statistics during the \ntraining\n and \nvalidation\n phases, respectively. \n\n\nCollectors and their collaterals\n\n\nAn instance of a subclass of \nActivationStatsCollector\n can be used to collect activation statistics. Currently, \nActivationStatsCollector\n has two types of subclasses: \nSummaryActivationStatsCollector\n and \nRecordsActivationStatsCollector\n.\n\nInstances of \nSummaryActivationStatsCollector\n compute the mean of some statistic of the activation. It is rather\nlight-weight and quicker than collecting a record per activation. The statistic function is configured in the constructor.\n\nIn \ncompress_classifier.py\n we create a dictionary of collectors. For example, this collector collects statistics that is stored in each relevant module, in a variable named \nsparsity\n. The lambda expression is invoked per activation encountered during forward passes, and the value it returns (in this case, the sparsity of the activation tensors) is stored in \nmodule.sparsity\n (\n\"sparsity\"\n is this collector's name).\n\n\nSummaryActivationStatsCollector(model,\n \"sparsity\",\n lambda t: 100 * distiller.utils.sparsity(t))\n\n\n\n\nTo access the statistics, you can invoke \ncollector.value()\n, or you can access each module's data directly.\n\nYou can add other statistics collectors and use a different function to compute your new statistic.\n\nAnother type of collector is \nRecordsActivationStatsCollector\n which computes a hard-coded set of activations statistics and collects a\n\nrecord per activation\n. For obvious reasons, this is slower than instances of \nSummaryActivationStatsCollector\n.\nActivationStatsCollector\n default to collecting activations statistics only on the output activations of ReLU layers, but we can choose any layer type we want. In the example below we collect statistics from outputs of \ntorch.nn.Conv2d\n layers.\n\n\nRecordsActivationStatsCollector(model, classes=[torch.nn.Conv2d])\n\n\n\n\nCollectors can write their data to Excel workbooks, by invoking \ncollector.to_xlsx(path_to_workbook)\n, which are named using the collector's name.\n\nYou can use a utility function, \ndistiller.log_activation_statsitics\n, to log the data of an \nActivationStatsCollector\n instance to one of the loggers. For an example, the code below logs the \n\"sparsity\"\n collector to a TensorBoard log file.\n\n\ndistiller.log_activation_statsitics(epoch, \"train\", loggers=[tflogger],\n collector=collectors[\"sparsity\"])\n\n\n\n\nUsing the Jupyter notebooks\n\n\nThe Jupyter notebooks contain many examples of how to use the statistics summaries generated by Distiller. They are explained in a separate page.\n\n\nGenerating this documentation\n\n\nInstall mkdocs and the required packages by executing:\n\n\n$ pip3 install -r doc-requirements.txt\n\n\n\n\nTo build the project documentation run:\n\n\n$ cd distiller/docs-src\n$ mkdocs build --clean\n\n\n\n\nThis will create a folder named 'site' which contains the documentation website.\nOpen distiller/docs/site/index.html to view the documentation home page.", + "text": "Using the sample application\n\n\nThe Distiller repository contains a sample application, \ndistiller/examples/classifier_compression/compress_classifier.py\n, and a set of scheduling files which demonstrate Distiller's features. Following is a brief discussion of how to use this application and the accompanying schedules.\n\n\nYou might also want to refer to the following resources:\n\n\n\n\nAn \nexplanation\n of the scheduler file format.\n\n\nAn in-depth \ndiscussion\n of how we used these schedule files to implement several state-of-the-art DNN compression research papers.\n\n\n\n\nThe sample application supports various features for compression of image classification DNNs, and gives an example of how to integrate distiller in your own application. The code is documented and should be considered the best source of documentation, but we provide some elaboration here.\n\n\nThis diagram shows how where \ncompress_classifier.py\n fits in the compression workflow, and how we integrate the Jupyter notebooks as part of our research work.\n\n\n\nCommand line arguments\n\n\nTo get help on the command line arguments, invoke:\n\n\n$ python3 compress_classifier.py --help\n\n\n\n\nFor example:\n\n\n$ time python3 compress_classifier.py -a alexnet --lr 0.005 -p 50 ../../../data.imagenet -j 44 --epochs 90 --pretrained --compress=../sensitivity-pruning/alexnet.schedule_sensitivity.yaml\n\nParameters:\n +----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+\n | | Name | Shape | NNZ (dense) | NNZ (sparse) | Cols (%) | Rows (%) | Ch (%) | 2D (%) | 3D (%) | Fine (%) | Std | Mean | Abs-Mean |\n |----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------|\n | 0 | features.module.0.weight | (64, 3, 11, 11) | 23232 | 13411 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 42.27359 | 0.14391 | -0.00002 | 0.08805 |\n | 1 | features.module.3.weight | (192, 64, 5, 5) | 307200 | 115560 | 0.00000 | 0.00000 | 0.00000 | 1.91243 | 0.00000 | 62.38281 | 0.04703 | -0.00250 | 0.02289 |\n | 2 | features.module.6.weight | (384, 192, 3, 3) | 663552 | 256565 | 0.00000 | 0.00000 | 0.00000 | 6.18490 | 0.00000 | 61.33445 | 0.03354 | -0.00184 | 0.01803 |\n | 3 | features.module.8.weight | (256, 384, 3, 3) | 884736 | 315065 | 0.00000 | 0.00000 | 0.00000 | 6.96411 | 0.00000 | 64.38881 | 0.02646 | -0.00168 | 0.01422 |\n | 4 | features.module.10.weight | (256, 256, 3, 3) | 589824 | 186938 | 0.00000 | 0.00000 | 0.00000 | 15.49225 | 0.00000 | 68.30614 | 0.02714 | -0.00246 | 0.01409 |\n | 5 | classifier.1.weight | (4096, 9216) | 37748736 | 3398881 | 0.00000 | 0.21973 | 0.00000 | 0.21973 | 0.00000 | 90.99604 | 0.00589 | -0.00020 | 0.00168 |\n | 6 | classifier.4.weight | (4096, 4096) | 16777216 | 1782769 | 0.21973 | 3.46680 | 0.00000 | 3.46680 | 0.00000 | 89.37387 | 0.00849 | -0.00066 | 0.00263 |\n | 7 | classifier.6.weight | (1000, 4096) | 4096000 | 994738 | 3.36914 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 75.71440 | 0.01718 | 0.00030 | 0.00778 |\n | 8 | Total sparsity: | - | 61090496 | 7063928 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 88.43694 | 0.00000 | 0.00000 | 0.00000 |\n +----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+\n 2018-04-04 21:30:52,499 - Total sparsity: 88.44\n\n 2018-04-04 21:30:52,499 - --- validate (epoch=89)-----------\n 2018-04-04 21:30:52,499 - 128116 samples (256 per mini-batch)\n 2018-04-04 21:31:04,646 - Epoch: [89][ 50/ 500] Loss 2.175988 Top1 51.289063 Top5 74.023438\n 2018-04-04 21:31:06,427 - Epoch: [89][ 100/ 500] Loss 2.171564 Top1 51.175781 Top5 74.308594\n 2018-04-04 21:31:11,432 - Epoch: [89][ 150/ 500] Loss 2.159347 Top1 51.546875 Top5 74.473958\n 2018-04-04 21:31:14,364 - Epoch: [89][ 200/ 500] Loss 2.156857 Top1 51.585938 Top5 74.568359\n 2018-04-04 21:31:18,381 - Epoch: [89][ 250/ 500] Loss 2.152790 Top1 51.707813 Top5 74.681250\n 2018-04-04 21:31:22,195 - Epoch: [89][ 300/ 500] Loss 2.149962 Top1 51.791667 Top5 74.755208\n 2018-04-04 21:31:25,508 - Epoch: [89][ 350/ 500] Loss 2.150936 Top1 51.827009 Top5 74.767857\n 2018-04-04 21:31:29,538 - Epoch: [89][ 400/ 500] Loss 2.150853 Top1 51.781250 Top5 74.763672\n 2018-04-04 21:31:32,842 - Epoch: [89][ 450/ 500] Loss 2.150156 Top1 51.828125 Top5 74.821181\n 2018-04-04 21:31:35,338 - Epoch: [89][ 500/ 500] Loss 2.150417 Top1 51.833594 Top5 74.817187\n 2018-04-04 21:31:35,357 - ==> Top1: 51.838 Top5: 74.817 Loss: 2.150\n\n 2018-04-04 21:31:35,364 - Saving checkpoint\n 2018-04-04 21:31:39,251 - --- test ---------------------\n 2018-04-04 21:31:39,252 - 50000 samples (256 per mini-batch)\n 2018-04-04 21:31:51,512 - Test: [ 50/ 195] Loss 1.487607 Top1 63.273438 Top5 85.695312\n 2018-04-04 21:31:55,015 - Test: [ 100/ 195] Loss 1.638043 Top1 60.636719 Top5 83.664062\n 2018-04-04 21:31:58,732 - Test: [ 150/ 195] Loss 1.833214 Top1 57.619792 Top5 80.447917\n 2018-04-04 21:32:01,274 - ==> Top1: 56.606 Top5: 79.446 Loss: 1.893\n\n\n\n\nLet's look at the command line again:\n\n\n$ time python3 compress_classifier.py -a alexnet --lr 0.005 -p 50 ../../../data.imagenet -j 44 --epochs 90 --pretrained --compress=../sensitivity-pruning/alexnet.schedule_sensitivity.yaml\n\n\n\n\nIn this example, we prune a TorchVision pre-trained AlexNet network, using the following configuration:\n\n\n\n\nLearning-rate of 0.005\n\n\nPrint progress every 50 mini-batches.\n\n\nUse 44 worker threads to load data (make sure to use something suitable for your machine).\n\n\nRun for 90 epochs. Torchvision's pre-trained models did not store the epoch metadata, so pruning starts at epoch 0. When you train and prune your own networks, the last training epoch is saved as a metadata with the model. Therefore, when you load such models, the first epoch is not 0, but it is the last training epoch.\n\n\nThe pruning schedule is provided in \nalexnet.schedule_sensitivity.yaml\n\n\nLog files are written to directory \nlogs\n.\n\n\n\n\nExamples\n\n\nDistiller comes with several example schedules which can be used together with \ncompress_classifier.py\n.\nThese example schedules (YAML) files, contain the command line that is used in order to invoke the schedule (so that you can easily recreate the results in your environment), together with the results of the pruning or regularization. The results usually contain a table showing the sparsity of each of the model parameters, together with the validation and test top1, top5 and loss scores.\n\n\nFor more details on the example schedules, you can refer to the coverage of the \nModel Zoo\n.\n\n\n\n\nexamples/agp-pruning\n:\n\n\nAutomated Gradual Pruning (AGP) on MobileNet and ResNet18 (ImageNet dataset)\n\n\n\n\n\n\n\nexamples/hybrid\n:\n\n\nAlexNet AGP with 2D (kernel) regularization (ImageNet dataset)\n\n\nAlexNet sensitivity pruning with 2D regularization\n\n\n\n\n\n\n\nexamples/network_slimming\n:\n\n\nResNet20 Network Slimming (this is work-in-progress)\n\n\n\n\n\n\n\nexamples/pruning_filters_for_efficient_convnets\n:\n\n\nResNet56 baseline training (CIFAR10 dataset)\n\n\nResNet56 filter removal using filter ranking\n\n\n\n\n\n\n\nexamples/sensitivity_analysis\n:\n\n\nElement-wise pruning sensitivity-analysis:\n\n\nAlexNet (ImageNet)\n\n\nMobileNet (ImageNet)\n\n\nResNet18 (ImageNet)\n\n\nResNet20 (CIFAR10)\n\n\nResNet34 (ImageNet)\n\n\nFilter-wise pruning sensitivity-analysis:\n\n\nResNet20 (CIFAR10)\n\n\nResNet56 (CIFAR10)\n\n\n\n\n\n\n\nexamples/sensitivity-pruning\n:\n\n\nAlexNet sensitivity pruning with Iterative Pruning\n\n\nAlexNet sensitivity pruning with One-Shot Pruning\n\n\n\n\n\n\n\nexamples/ssl\n:\n\n\nResNet20 baseline training (CIFAR10 dataset)\n\n\nStructured Sparsity Learning (SSL) with layer removal on ResNet20\n\n\nSSL with channels removal on ResNet20\n\n\n\n\n\n\n\nexamples/quantization\n:\n\n\nAlexNet w. Batch-Norm (base FP32 + DoReFa)\n\n\nPre-activation ResNet20 on CIFAR10 (base FP32 + DoReFa)\n\n\nPre-activation ResNet18 on ImageNEt (base FP32 + DoReFa)\n\n\n\n\n\n\n\n\nExperiment reproducibility\n\n\nExperiment reproducibility is sometimes important. Pete Warden recently expounded about this in his \nblog\n.\n\nPyTorch's support for deterministic execution requires us to use only one thread for loading data (other wise the multi-threaded execution of the data loaders can create random order and change the results), and to set the seed of the CPU and GPU PRNGs. Using the \n--deterministic\n command-line flag and setting \nj=1\n will produce reproducible results (for the same PyTorch version).\n\n\nPerforming pruning sensitivity analysis\n\n\nDistiller supports element-wise and filter-wise pruning sensitivity analysis. In both cases, L1-norm is used to rank which elements or filters to prune. For example, when running filter-pruning sensitivity analysis, the L1-norm of the filters of each layer's weights tensor are calculated, and the bottom x% are set to zero. \n\nThe analysis process is quite long, because currently we use the entire test dataset to assess the accuracy performance at each pruning level of each weights tensor. Using a small dataset for this would save much time and we plan on assessing if this will provide sufficient results.\n\nResults are output as a CSV file (\nsensitivity.csv\n) and PNG file (\nsensitivity.png\n). The implementation is in \ndistiller/sensitivity.py\n and it contains further details about process and the format of the CSV file.\n\n\nThe example below performs element-wise pruning sensitivity analysis on ResNet20 for CIFAR10:\n\n\n$ python3 compress_classifier.py -a resnet20_cifar ../../../data.cifar10/ -j=1 --resume=../cifar10/resnet20/checkpoint_trained_dense.pth.tar --sense=element\n\n\n\n\nThe \nsense\n command-line argument can be set to either \nelement\n or \nfilter\n, depending on the type of analysis you want done.\n\n\nThere is also a \nJupyter notebook\n with example invocations, outputs and explanations.\n\n\n\"Direct\" Quantization Without Training\n\n\nDistiller supports 8-bit quantization of trained modules without re-training (using \nSymmetric Linear Quantization\n). So, any model (whether pruned or not) can be quantized.\n\nUse the \n--quantize\n command-line flag, together with \n--evaluate\n to evaluate the accuracy of your model after quantization. The following example qunatizes ResNet18 for ImageNet:\n\n\n$ python3 compress_classifier.py -a resnet18 ../../../data.imagenet --pretrained --quantize --evaluate\n\n\n\n\nGenerates:\n\n\nPreparing model for quantization\n--- test ---------------------\n50000 samples (256 per mini-batch)\nTest: [ 10/ 195] Loss 0.856354 Top1 79.257812 Top5 92.500000\nTest: [ 20/ 195] Loss 0.923131 Top1 76.953125 Top5 92.246094\nTest: [ 30/ 195] Loss 0.885186 Top1 77.955729 Top5 92.486979\nTest: [ 40/ 195] Loss 0.930263 Top1 76.181641 Top5 92.597656\nTest: [ 50/ 195] Loss 0.931062 Top1 75.726562 Top5 92.906250\nTest: [ 60/ 195] Loss 0.932019 Top1 75.651042 Top5 93.151042\nTest: [ 70/ 195] Loss 0.921287 Top1 76.060268 Top5 93.270089\nTest: [ 80/ 195] Loss 0.932539 Top1 75.986328 Top5 93.100586\nTest: [ 90/ 195] Loss 0.996000 Top1 74.700521 Top5 92.330729\nTest: [ 100/ 195] Loss 1.066699 Top1 73.289062 Top5 91.437500\nTest: [ 110/ 195] Loss 1.100970 Top1 72.574574 Top5 91.001420\nTest: [ 120/ 195] Loss 1.122376 Top1 72.268880 Top5 90.696615\nTest: [ 130/ 195] Loss 1.171726 Top1 71.198918 Top5 90.120192\nTest: [ 140/ 195] Loss 1.191500 Top1 70.797991 Top5 89.902344\nTest: [ 150/ 195] Loss 1.219954 Top1 70.210938 Top5 89.453125\nTest: [ 160/ 195] Loss 1.240942 Top1 69.855957 Top5 89.162598\nTest: [ 170/ 195] Loss 1.265741 Top1 69.342831 Top5 88.807445\nTest: [ 180/ 195] Loss 1.281185 Top1 69.051649 Top5 88.589410\nTest: [ 190/ 195] Loss 1.279682 Top1 69.019326 Top5 88.632812\n==> Top1: 69.130 Top5: 88.732 Loss: 1.276\n\n\n\n\nSummaries\n\n\nYou can use the sample compression application to generate model summary reports, such as the attributes and compute summary report (see screen capture below).\nYou can log sparsity statistics (written to console and CSV file), performance, optimizer and model information, and also create a PNG image of the DNN.\nCreating a PNG image is an experimental feature (it relies on features which are not available on PyTorch 3.1 and that we hope will be available in PyTorch's next release), so to use it you will need to compile the PyTorch master branch, and hope for the best ;-).\n\n\n$ python3 compress_classifier.py --resume=../ssl/checkpoints/checkpoint_trained_ch_regularized_dense.pth.tar -a=resnet20_cifar ../../../data.cifar10 --summary=compute\n\n\n\n\nGenerates:\n\n\n+----+------------------------------+--------+----------+-----------------+--------------+-----------------+--------------+------------------+---------+\n| | Name | Type | Attrs | IFM | IFM volume | OFM | OFM volume | Weights volume | MACs |\n|----+------------------------------+--------+----------+-----------------+--------------+-----------------+--------------+------------------+---------|\n| 0 | module.conv1 | Conv2d | k=(3, 3) | (1, 3, 32, 32) | 3072 | (1, 16, 32, 32) | 16384 | 432 | 442368 |\n| 1 | module.layer1.0.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 |\n| 2 | module.layer1.0.conv2 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 |\n| 3 | module.layer1.1.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 |\n| 4 | module.layer1.1.conv2 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 |\n| 5 | module.layer1.2.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 |\n| 6 | module.layer1.2.conv2 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 |\n| 7 | module.layer2.0.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 32, 16, 16) | 8192 | 4608 | 1179648 |\n| 8 | module.layer2.0.conv2 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 |\n| 9 | module.layer2.0.downsample.0 | Conv2d | k=(1, 1) | (1, 16, 32, 32) | 16384 | (1, 32, 16, 16) | 8192 | 512 | 131072 |\n| 10 | module.layer2.1.conv1 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 |\n| 11 | module.layer2.1.conv2 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 |\n| 12 | module.layer2.2.conv1 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 |\n| 13 | module.layer2.2.conv2 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 |\n| 14 | module.layer3.0.conv1 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 64, 8, 8) | 4096 | 18432 | 1179648 |\n| 15 | module.layer3.0.conv2 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 |\n| 16 | module.layer3.0.downsample.0 | Conv2d | k=(1, 1) | (1, 32, 16, 16) | 8192 | (1, 64, 8, 8) | 4096 | 2048 | 131072 |\n| 17 | module.layer3.1.conv1 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 |\n| 18 | module.layer3.1.conv2 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 |\n| 19 | module.layer3.2.conv1 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 |\n| 20 | module.layer3.2.conv2 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 |\n| 21 | module.fc | Linear | | (1, 64) | 64 | (1, 10) | 10 | 640 | 640 |\n+----+------------------------------+--------+----------+-----------------+--------------+-----------------+--------------+------------------+---------+\nTotal MACs: 40,813,184\n\n\n\n\nUsing TensorBoard\n\n\nGoogle's \nTensorBoard\n is an excellent tool for visualizing the progress of DNN training. Distiller's logger supports writing performance indicators and parameter statistics in a file format that can be read by TensorBoard (Distiller uses TensorFlow's APIs in order to do this, which is why Distiller requires the installation of TensorFlow).\n\nTo view the graphs, invoke the TensorBoard server. For example:\n\n\n$ tensorboard --logdir=logs\n\n\n\n\nDistillers's setup (requirements.txt) installs TensorFlow for CPU. If you want a different installation, please follow the \nTensorFlow installation instructions\n.\n\n\nCollecting activations statistics\n\n\nIn CNNs with ReLU layers, ReLU activations (feature-maps) also exhibit a nice level of sparsity (50-60% sparsity is typical). \n\nYou can collect activation statistics using the \n--act_stats\n command-line flag.\n\nFor example:\n\n\n$ python3 compress_classifier.py -a=resnet56_cifar -p=50 ../../../data.cifar10 --resume=checkpoint.resnet56_cifar_baseline.pth.tar --act-stats=test -e\n\n\n\n\nThe \ntest\n parameter indicates that, in this example, we want to collect activation statistics during the \ntest\n phase. Note that we also used the \n-e\n command-line argument to indicate that we want to run a \ntest\n phase. The other two legal parameter values are \ntrain\n and \nvalid\n which collect activation statistics during the \ntraining\n and \nvalidation\n phases, respectively. \n\n\nCollectors and their collaterals\n\n\nAn instance of a subclass of \nActivationStatsCollector\n can be used to collect activation statistics. Currently, \nActivationStatsCollector\n has two types of subclasses: \nSummaryActivationStatsCollector\n and \nRecordsActivationStatsCollector\n.\n\nInstances of \nSummaryActivationStatsCollector\n compute the mean of some statistic of the activation. It is rather\nlight-weight and quicker than collecting a record per activation. The statistic function is configured in the constructor.\n\nIn \ncompress_classifier.py\n we create a dictionary of collectors. For example, this collector collects statistics that is stored in each relevant module, in a variable named \nsparsity\n. The lambda expression is invoked per activation encountered during forward passes, and the value it returns (in this case, the sparsity of the activation tensors) is stored in \nmodule.sparsity\n (\n\"sparsity\"\n is this collector's name).\n\n\nSummaryActivationStatsCollector(model,\n \"sparsity\",\n lambda t: 100 * distiller.utils.sparsity(t))\n\n\n\n\nTo access the statistics, you can invoke \ncollector.value()\n, or you can access each module's data directly.\n\nYou can add other statistics collectors and use a different function to compute your new statistic.\n\nAnother type of collector is \nRecordsActivationStatsCollector\n which computes a hard-coded set of activations statistics and collects a\n\nrecord per activation\n. For obvious reasons, this is slower than instances of \nSummaryActivationStatsCollector\n.\nActivationStatsCollector\n default to collecting activations statistics only on the output activations of ReLU layers, but we can choose any layer type we want. In the example below we collect statistics from outputs of \ntorch.nn.Conv2d\n layers.\n\n\nRecordsActivationStatsCollector(model, classes=[torch.nn.Conv2d])\n\n\n\n\nCollectors can write their data to Excel workbooks, by invoking \ncollector.to_xlsx(path_to_workbook)\n, which are named using the collector's name.\n\nYou can use a utility function, \ndistiller.log_activation_statsitics\n, to log the data of an \nActivationStatsCollector\n instance to one of the loggers. For an example, the code below logs the \n\"sparsity\"\n collector to a TensorBoard log file.\n\n\ndistiller.log_activation_statsitics(epoch, \"train\", loggers=[tflogger],\n collector=collectors[\"sparsity\"])\n\n\n\n\nCaveats\n\n\nDistiller collects activations statistics using PyTorch's forward-hooks mechanism. Collectors iteratively register the modules' forward-hooks, and collectors are called during the forward traversal and get exposed to activation data. Registering for forward callbacks is performed like this:\n\n\nmodule.register_forward_hook\n\n\n\n\nThis makes apparent two limitations of this mechanism:\n\n\n\n\nWe can only register on PyTorch modules. This means that we can't register on the forward hook of a functionals such as \ntorch.nn.functional.relu\n and \ntorch.nn.functional.max_pool2d\n.\n\n Therefore, you may need to replace functionals with their module alternative. For example: \n\n\n\n\nclass MadeUpNet(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 6, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return x\n\n\n\n\nCan be changed to: \n\n\nclass MadeUpNet(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 6, 5)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n x = self.relu(self.conv1(x))\n return x\n\n\n\n\n\n\nWe can only use a module instance once in our models. If we use the same module several times, then we can't determine which node in the graph has invoked the callback, because the PyTorch callback signature \ndef hook(module, input, output)\n doesn't provide enough contextual information.\n\nTorchVision's \nResNet\n is an example of a model that uses the same instance of nn.ReLU multiple times: \n\n\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out) # <================\n out = self.conv2(out)\n out = self.bn2(out)\n if self.downsample is not None:\n residual = self.downsample(x)\n out += residual\n out = self.relu(out) # <================\n return out\n\n\n\n\nIn Distiller we changed \nResNet\n to use multiple instances of nn.ReLU, and each instance is used only once: \n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu1 = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.relu2 = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu1(out) # <================\n out = self.conv2(out)\n out = self.bn2(out)\n if self.downsample is not None:\n residual = self.downsample(x)\n out += residual\n out = self.relu2(out) # <================\n return out\n\n\n\n\nUsing the Jupyter notebooks\n\n\nThe Jupyter notebooks contain many examples of how to use the statistics summaries generated by Distiller. They are explained in a separate page.\n\n\nGenerating this documentation\n\n\nInstall mkdocs and the required packages by executing:\n\n\n$ pip3 install -r doc-requirements.txt\n\n\n\n\nTo build the project documentation run:\n\n\n$ cd distiller/docs-src\n$ mkdocs build --clean\n\n\n\n\nThis will create a folder named 'site' which contains the documentation website.\nOpen distiller/docs/site/index.html to view the documentation home page.", "title": "Usage" }, { @@ -121,15 +121,20 @@ "title": "Using TensorBoard" }, { - "location": "/usage/index.html#collecting-feature-maps-statistics", + "location": "/usage/index.html#collecting-activations-statistics", "text": "In CNNs with ReLU layers, ReLU activations (feature-maps) also exhibit a nice level of sparsity (50-60% sparsity is typical). \nYou can collect activation statistics using the --act_stats command-line flag. \nFor example: $ python3 compress_classifier.py -a=resnet56_cifar -p=50 ../../../data.cifar10 --resume=checkpoint.resnet56_cifar_baseline.pth.tar --act-stats=test -e The test parameter indicates that, in this example, we want to collect activation statistics during the test phase. Note that we also used the -e command-line argument to indicate that we want to run a test phase. The other two legal parameter values are train and valid which collect activation statistics during the training and validation phases, respectively.", - "title": "Collecting feature-maps statistics" + "title": "Collecting activations statistics" }, { "location": "/usage/index.html#collectors-and-their-collaterals", "text": "An instance of a subclass of ActivationStatsCollector can be used to collect activation statistics. Currently, ActivationStatsCollector has two types of subclasses: SummaryActivationStatsCollector and RecordsActivationStatsCollector . \nInstances of SummaryActivationStatsCollector compute the mean of some statistic of the activation. It is rather\nlight-weight and quicker than collecting a record per activation. The statistic function is configured in the constructor. \nIn compress_classifier.py we create a dictionary of collectors. For example, this collector collects statistics that is stored in each relevant module, in a variable named sparsity . The lambda expression is invoked per activation encountered during forward passes, and the value it returns (in this case, the sparsity of the activation tensors) is stored in module.sparsity ( \"sparsity\" is this collector's name). SummaryActivationStatsCollector(model,\n \"sparsity\",\n lambda t: 100 * distiller.utils.sparsity(t)) To access the statistics, you can invoke collector.value() , or you can access each module's data directly. \nYou can add other statistics collectors and use a different function to compute your new statistic. \nAnother type of collector is RecordsActivationStatsCollector which computes a hard-coded set of activations statistics and collects a record per activation . For obvious reasons, this is slower than instances of SummaryActivationStatsCollector . ActivationStatsCollector default to collecting activations statistics only on the output activations of ReLU layers, but we can choose any layer type we want. In the example below we collect statistics from outputs of torch.nn.Conv2d layers. RecordsActivationStatsCollector(model, classes=[torch.nn.Conv2d]) Collectors can write their data to Excel workbooks, by invoking collector.to_xlsx(path_to_workbook) , which are named using the collector's name. \nYou can use a utility function, distiller.log_activation_statsitics , to log the data of an ActivationStatsCollector instance to one of the loggers. For an example, the code below logs the \"sparsity\" collector to a TensorBoard log file. distiller.log_activation_statsitics(epoch, \"train\", loggers=[tflogger],\n collector=collectors[\"sparsity\"])", "title": "Collectors and their collaterals" }, + { + "location": "/usage/index.html#caveats", + "text": "Distiller collects activations statistics using PyTorch's forward-hooks mechanism. Collectors iteratively register the modules' forward-hooks, and collectors are called during the forward traversal and get exposed to activation data. Registering for forward callbacks is performed like this: module.register_forward_hook This makes apparent two limitations of this mechanism: We can only register on PyTorch modules. This means that we can't register on the forward hook of a functionals such as torch.nn.functional.relu and torch.nn.functional.max_pool2d . \n Therefore, you may need to replace functionals with their module alternative. For example: class MadeUpNet(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 6, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return x Can be changed to: class MadeUpNet(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 6, 5)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n x = self.relu(self.conv1(x))\n return x We can only use a module instance once in our models. If we use the same module several times, then we can't determine which node in the graph has invoked the callback, because the PyTorch callback signature def hook(module, input, output) doesn't provide enough contextual information. \nTorchVision's ResNet is an example of a model that uses the same instance of nn.ReLU multiple times: class BasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out) # <================\n out = self.conv2(out)\n out = self.bn2(out)\n if self.downsample is not None:\n residual = self.downsample(x)\n out += residual\n out = self.relu(out) # <================\n return out In Distiller we changed ResNet to use multiple instances of nn.ReLU, and each instance is used only once: class BasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu1 = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.relu2 = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu1(out) # <================\n out = self.conv2(out)\n out = self.bn2(out)\n if self.downsample is not None:\n residual = self.downsample(x)\n out += residual\n out = self.relu2(out) # <================\n return out", + "title": "Caveats" + }, { "location": "/usage/index.html#using-the-jupyter-notebooks", "text": "The Jupyter notebooks contain many examples of how to use the statistics summaries generated by Distiller. They are explained in a separate page.", diff --git a/docs/sitemap.xml b/docs/sitemap.xml index 91a80849001f8b27ffa6bb31ecc635eb5ae9b486..535bd4a64546047d5888efed3f54bc8f83ce617f 100644 --- a/docs/sitemap.xml +++ b/docs/sitemap.xml @@ -4,7 +4,7 @@ <url> <loc>/index.html</loc> - <lastmod>2018-11-21</lastmod> + <lastmod>2018-11-24</lastmod> <changefreq>daily</changefreq> </url> @@ -12,7 +12,7 @@ <url> <loc>/install/index.html</loc> - <lastmod>2018-11-21</lastmod> + <lastmod>2018-11-24</lastmod> <changefreq>daily</changefreq> </url> @@ -20,7 +20,7 @@ <url> <loc>/usage/index.html</loc> - <lastmod>2018-11-21</lastmod> + <lastmod>2018-11-24</lastmod> <changefreq>daily</changefreq> </url> @@ -28,7 +28,7 @@ <url> <loc>/schedule/index.html</loc> - <lastmod>2018-11-21</lastmod> + <lastmod>2018-11-24</lastmod> <changefreq>daily</changefreq> </url> @@ -37,31 +37,31 @@ <url> <loc>/pruning/index.html</loc> - <lastmod>2018-11-21</lastmod> + <lastmod>2018-11-24</lastmod> <changefreq>daily</changefreq> </url> <url> <loc>/regularization/index.html</loc> - <lastmod>2018-11-21</lastmod> + <lastmod>2018-11-24</lastmod> <changefreq>daily</changefreq> </url> <url> <loc>/quantization/index.html</loc> - <lastmod>2018-11-21</lastmod> + <lastmod>2018-11-24</lastmod> <changefreq>daily</changefreq> </url> <url> <loc>/knowledge_distillation/index.html</loc> - <lastmod>2018-11-21</lastmod> + <lastmod>2018-11-24</lastmod> <changefreq>daily</changefreq> </url> <url> <loc>/conditional_computation/index.html</loc> - <lastmod>2018-11-21</lastmod> + <lastmod>2018-11-24</lastmod> <changefreq>daily</changefreq> </url> @@ -71,19 +71,19 @@ <url> <loc>/algo_pruning/index.html</loc> - <lastmod>2018-11-21</lastmod> + <lastmod>2018-11-24</lastmod> <changefreq>daily</changefreq> </url> <url> <loc>/algo_quantization/index.html</loc> - <lastmod>2018-11-21</lastmod> + <lastmod>2018-11-24</lastmod> <changefreq>daily</changefreq> </url> <url> <loc>/algo_earlyexit/index.html</loc> - <lastmod>2018-11-21</lastmod> + <lastmod>2018-11-24</lastmod> <changefreq>daily</changefreq> </url> @@ -92,7 +92,7 @@ <url> <loc>/model_zoo/index.html</loc> - <lastmod>2018-11-21</lastmod> + <lastmod>2018-11-24</lastmod> <changefreq>daily</changefreq> </url> @@ -100,7 +100,7 @@ <url> <loc>/jupyter/index.html</loc> - <lastmod>2018-11-21</lastmod> + <lastmod>2018-11-24</lastmod> <changefreq>daily</changefreq> </url> @@ -108,7 +108,7 @@ <url> <loc>/design/index.html</loc> - <lastmod>2018-11-21</lastmod> + <lastmod>2018-11-24</lastmod> <changefreq>daily</changefreq> </url> diff --git a/docs/usage/index.html b/docs/usage/index.html index e08bf5a51cdfd594f6ee493c828f399a3a3a6a16..c51abfbdab02bcca85fb0f7d90fcbb9dc8275f57 100644 --- a/docs/usage/index.html +++ b/docs/usage/index.html @@ -81,7 +81,7 @@ <li><a class="toctree-l3" href="#using-tensorboard">Using TensorBoard</a></li> - <li><a class="toctree-l3" href="#collecting-feature-maps-statistics">Collecting feature-maps statistics</a></li> + <li><a class="toctree-l3" href="#collecting-activations-statistics">Collecting activations statistics</a></li> </ul> @@ -412,7 +412,7 @@ To view the graphs, invoke the TensorBoard server. For example:</p> </code></pre> <p>Distillers's setup (requirements.txt) installs TensorFlow for CPU. If you want a different installation, please follow the <a href="https://www.tensorflow.org/install/install_linux">TensorFlow installation instructions</a>.</p> -<h2 id="collecting-feature-maps-statistics">Collecting feature-maps statistics</h2> +<h2 id="collecting-activations-statistics">Collecting activations statistics</h2> <p>In CNNs with ReLU layers, ReLU activations (feature-maps) also exhibit a nice level of sparsity (50-60% sparsity is typical). <br> You can collect activation statistics using the <code>--act_stats</code> command-line flag.<br> For example:</p> @@ -443,6 +443,96 @@ You can use a utility function, <code>distiller.log_activation_statsitics</code> collector=collectors["sparsity"]) </code></pre> +<h3 id="caveats">Caveats</h3> +<p>Distiller collects activations statistics using PyTorch's forward-hooks mechanism. Collectors iteratively register the modules' forward-hooks, and collectors are called during the forward traversal and get exposed to activation data. Registering for forward callbacks is performed like this:</p> +<pre><code>module.register_forward_hook +</code></pre> + +<p>This makes apparent two limitations of this mechanism:</p> +<ol> +<li>We can only register on PyTorch modules. This means that we can't register on the forward hook of a functionals such as <code>torch.nn.functional.relu</code> and <code>torch.nn.functional.max_pool2d</code>.<br /> + Therefore, you may need to replace functionals with their module alternative. For example: </li> +</ol> +<pre><code>class MadeUpNet(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return x +</code></pre> + +<p>Can be changed to: </p> +<pre><code>class MadeUpNet(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.relu(self.conv1(x)) + return x +</code></pre> + +<ol> +<li>We can only use a module instance once in our models. If we use the same module several times, then we can't determine which node in the graph has invoked the callback, because the PyTorch callback signature <code>def hook(module, input, output)</code> doesn't provide enough contextual information.<br /> +TorchVision's <a href="https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py">ResNet</a> is an example of a model that uses the same instance of nn.ReLU multiple times: </li> +</ol> +<pre><code>class BasicBlock(nn.Module): + expansion = 1 + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) # <================ + out = self.conv2(out) + out = self.bn2(out) + if self.downsample is not None: + residual = self.downsample(x) + out += residual + out = self.relu(out) # <================ + return out +</code></pre> + +<p>In Distiller we changed <a href="https://github.com/NervanaSystems/distiller/blob/master/models/imagenet/resnet.py">ResNet</a> to use multiple instances of nn.ReLU, and each instance is used only once: </p> +<pre><code>class BasicBlock(nn.Module): + expansion = 1 + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes) + self.relu1 = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.relu2 = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + out = self.conv1(x) + out = self.bn1(out) + out = self.relu1(out) # <================ + out = self.conv2(out) + out = self.bn2(out) + if self.downsample is not None: + residual = self.downsample(x) + out += residual + out = self.relu2(out) # <================ + return out +</code></pre> + <h1 id="using-the-jupyter-notebooks">Using the Jupyter notebooks</h1> <p>The Jupyter notebooks contain many examples of how to use the statistics summaries generated by Distiller. They are explained in a separate page.</p> <h1 id="generating-this-documentation">Generating this documentation</h1> diff --git a/tests/test_basic.py b/tests/test_basic.py index 108de0887865e4990d12c5fce514e435ded91d62..5d57a350ff13ab4ceb239ef7a683da9377b0a6c4 100755 --- a/tests/test_basic.py +++ b/tests/test_basic.py @@ -17,23 +17,58 @@ import torch import os import sys +import common module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) import distiller import models + def test_sparsity(): - zeros = torch.zeros(2,3,5,6) + zeros = torch.zeros(2, 3, 5, 6) print(distiller.sparsity(zeros)) assert distiller.sparsity(zeros) == 1.0 assert distiller.sparsity_3D(zeros) == 1.0 assert distiller.density_3D(zeros) == 0.0 + ones = torch.ones(12, 43, 4, 6) + assert distiller.sparsity(ones) == 0.0 + x = torch.tensor([[1., 2., 0, 4., 0], + [1., 2., 0, 4., 0]]) + assert distiller.density(x) == 0.6 + assert distiller.density_cols(x, transposed=False) == 0.6 + assert distiller.sparsity_rows(x, transposed=False) == 0 + x = torch.tensor([[0., 0., 0], + [1., 4., 0], + [1., 2., 0], + [0., 0., 0]]) + assert distiller.density(x) == 4/12 + assert distiller.sparsity_rows(x, transposed=False) == 0.5 + assert common.almost_equal(distiller.sparsity_cols(x, transposed=False), 1/3) + assert common.almost_equal(distiller.sparsity_rows(x), 1/3) - ones = torch.zeros(12,43,4,6) - ones.fill_(1) - assert distiller.sparsity(ones) == 0.0 +def test_activations(): + x = torch.tensor([[[[1., 0., 0.], + [0., 2., 0.], + [0., 0., 3.]], + + [[1., 0., 2.], + [0., 3., 0.], + [4., 0., 5.]]], + + + [[[4., 0., 0.], + [0., 5., 0.], + [0., 0., 6.]], + + [[0., 6., 0.], + [7., 0., 8.], + [0., 9., 0.]]]]) + assert all(distiller.activation_channels_l1(x) == torch.tensor([21/2, 45/2])) + assert all(distiller.activation_channels_apoz(x) == torch.tensor([6/18, 9/18])) + assert all(distiller.activation_channels_means(x) == torch.tensor([21/18, 45/18])) + def test_utils(): model = models.create_model(False, 'cifar10', 'resnet20_cifar', parallel=False)