From 816a943d7b566bbe63523247009f13d9f1309677 Mon Sep 17 00:00:00 2001
From: Neta Zmora <31280975+nzmora@users.noreply.github.com>
Date: Mon, 8 Apr 2019 15:40:13 +0300
Subject: [PATCH] Refine pruning logic (#222)

Add finer control over the pruning logic, to accommodate more pruning
use-cases.
The full description of the new logic is available in the updated [documentation
of the CompressionScheduler](https://nervanasystems.github.io/distiller/schedule.html#pruning-fine-control), which is also part of this PR.

In this PR:

* Added a new callback to the CompressionScheduler:
compression_scheduler.before_parameter_optimization which is invoked
after the gradients are are computed, but before the weights are updated
by the optimizer.

* We provide an option to mask the gradients, before the weights are updated by the optimizer.
We register to the parameter backward hook in order to mask the gradients.
This gives us finer control over the parameter updates.

* Added several DropFilter schedules.
DropFilter is a method to regularize networks, and it can also be
used to "prepare" a network for permanent filter pruning.

*Added documentation of pruning fine-control
---
 distiller/policy.py                           | 105 +++++---
 distiller/pruning/ranked_structures_pruner.py |   1 -
 distiller/scheduler.py                        |  55 ++--
 distiller/sensitivity.py                      |   2 +-
 docs-src/README.md                            |   2 +-
 docs-src/docs/earlyexit.md                    |  45 ----
 .../imgs/pruning_algorithm_pseudo_code.png    | Bin 0 -> 140746 bytes
 docs-src/docs/imgs/pruning_masking.png        | Bin 0 -> 194351 bytes
 docs-src/docs/schedule.md                     | 122 +++++++++
 docs-src/mkdocs.yml                           |   2 +-
 docs/earlyexit.html                           | 253 ------------------
 docs/index.html                               |   2 +-
 docs/schedule.html                            | 123 +++++++++
 docs/search/search_index.json                 |   2 +-
 docs/sitemap.xml                              |  34 +--
 docs/sitemap.xml.gz                           | Bin 205 -> 205 bytes
 docs/tutorial-lang_model.html                 |   4 +-
 .../compress_classifier.py                    |   3 +
 .../plain20_cifar_dropfilter_training.yaml    |  71 +++++
 ...ar_dropfilter_training_regularization.yaml |  71 +++++
 .../resnet20_cifar_randomlevel_training.yaml  |  87 ++++++
 21 files changed, 605 insertions(+), 379 deletions(-)
 delete mode 100644 docs-src/docs/earlyexit.md
 create mode 100755 docs-src/docs/imgs/pruning_algorithm_pseudo_code.png
 create mode 100755 docs-src/docs/imgs/pruning_masking.png
 delete mode 100644 docs/earlyexit.html
 create mode 100755 examples/drop_filter/plain20_cifar_dropfilter_training.yaml
 create mode 100755 examples/drop_filter/plain20_cifar_dropfilter_training_regularization.yaml
 create mode 100755 examples/drop_filter/resnet20_cifar_randomlevel_training.yaml

diff --git a/distiller/policy.py b/distiller/policy.py
index db45e6f..b918ffa 100755
--- a/distiller/policy.py
+++ b/distiller/policy.py
@@ -23,7 +23,7 @@
 import torch
 import torch.optim.lr_scheduler
 from collections import namedtuple
-
+#from functools import partial
 import logging
 msglogger = logging.getLogger()
 
@@ -65,6 +65,12 @@ class ScheduledTrainingPolicy(object):
         """
         pass
 
+    def before_parameter_optimization(self, model, epoch, minibatch_id, minibatches_per_epoch,
+                                      zeros_mask_dict, meta, optimizer):
+        """The mini-batch training pass has completed the backward-pass,
+        and the optimizer is about to update the weights."""
+        pass
+
     def on_minibatch_end(self, model, epoch, minibatch_id, minibatches_per_epoch, zeros_mask_dict, optimizer):
         """The mini-batch training pass has ended"""
         pass
@@ -100,64 +106,89 @@ class PruningPolicy(ScheduledTrainingPolicy):
         """
         super(PruningPolicy, self).__init__(classes, layers)
         self.pruner = pruner
-        self.levels = None
-        self.keep_mask = False
-        self.mini_batch_pruning_frequency = 0
-        self.mask_on_forward_only = False
-        self.use_double_copies = False
-        if pruner_args is not None:
-            if 'levels' in pruner_args:
-                self.levels = pruner_args['levels']
-            self.keep_mask = pruner_args.get('keep_mask', False)
-            self.mini_batch_pruning_frequency = pruner_args.get('mini_batch_pruning_frequency', 0)
-            self.mask_on_forward_only = pruner_args.get('mask_on_forward_only', False)
-            self.use_double_copies = pruner_args.get('use_double_copies', False)
+        # Copy external policy configuration, if available
+        if pruner_args is None:
+            pruner_args = {}
+        self.levels = pruner_args.get('levels', None)
+        self.keep_mask = pruner_args.get('keep_mask', False)
+        self.mini_batch_pruning_frequency = pruner_args.get('mini_batch_pruning_frequency', 0)
+        self.mask_on_forward_only = pruner_args.get('mask_on_forward_only', False)
+        self.mask_gradients = pruner_args.get('mask_gradients', False)
+        if self.mask_gradients and not self.mask_on_forward_only:
+            raise ValueError("mask_gradients and (not mask_on_forward_only) are mutually exclusive")
+        self.backward_hook_handle = None   # The backward-callback handle
+        self.use_double_copies = pruner_args.get('use_double_copies', False)
+        self.discard_masks_at_minibatch_end = pruner_args.get('discard_masks_at_minibatch_end', False)
+        self.skip_first_minibatch = pruner_args.get('skip_first_minibatch', False)
+        # Initiliaze state
         self.is_last_epoch = False
-        self.mini_batch_id = 0          # The ID of the mini_batch within the present epoch
-        self.global_mini_batch_id = 0   # The ID of the mini_batch within the present training session
+        self.is_initialized = False
 
     def on_epoch_begin(self, model, zeros_mask_dict, meta, **kwargs):
         msglogger.debug("Pruner {} is about to prune".format(self.pruner.name))
-        self.mini_batch_id = 0
         self.is_last_epoch = meta['current_epoch'] == (meta['ending_epoch'] - 1)
-        self.is_first_epoch = meta['current_epoch'] == meta['starting_epoch']
         if self.levels is not None:
             self.pruner.levels = self.levels
 
-        if self.is_first_epoch:
-            self.global_mini_batch_id = 0
-
         meta['model'] = model
+        is_initialized = self.is_initialized
         for param_name, param in model.named_parameters():
-            if self.mask_on_forward_only and self.is_first_epoch:
-                zeros_mask_dict[param_name].use_double_copies = self.use_double_copies
-                zeros_mask_dict[param_name].mask_on_forward_only = self.mask_on_forward_only
-            self.pruner.set_param_mask(param, param_name, zeros_mask_dict, meta)
+            if not is_initialized:
+                # Initialize the maskers
+                masker = zeros_mask_dict[param_name]
+                masker.use_double_copies = self.use_double_copies
+                masker.mask_on_forward_only = self.mask_on_forward_only
+                # register for the backward hook of the parameters
+                if self.mask_gradients:
+                    masker.backward_hook_handle = param.register_hook(masker.mask_gradient)
+                self.is_initialized = True
+                if not self.skip_first_minibatch:
+                    self.pruner.set_param_mask(param, param_name, zeros_mask_dict, meta)
+            else:
+                self.pruner.set_param_mask(param, param_name, zeros_mask_dict, meta)
 
     def on_minibatch_begin(self, model, epoch, minibatch_id, minibatches_per_epoch,
                            zeros_mask_dict, meta, optimizer=None):
-        self.mini_batch_id += 1
-        self.global_mini_batch_id += 1
-        if (self.mini_batch_pruning_frequency != 0 and
-           self.global_mini_batch_id % self.mini_batch_pruning_frequency == 0):
-            for param_name, param in model.named_parameters():
-                self.pruner.set_param_mask(param, param_name, zeros_mask_dict, meta)
+        set_masks = False
+        global_mini_batch_id = epoch * minibatches_per_epoch + minibatch_id
+        if ((minibatch_id > 0) and
+            (self.mini_batch_pruning_frequency != 0) and
+            (global_mini_batch_id % self.mini_batch_pruning_frequency == 0)):
+            # This is _not_ the first mini-batch of a new epoch (performed in on_epoch_begin)
+            # and a pruning step is scheduled
+            set_masks = True
+
+        if self.skip_first_minibatch and global_mini_batch_id == 1:
+            # Because we skipped the first mini-batch of the first epoch (global_mini_batch_id == 0)
+            set_masks = True
 
         for param_name, param in model.named_parameters():
+            if set_masks:
+                self.pruner.set_param_mask(param, param_name, zeros_mask_dict, meta)
             zeros_mask_dict[param_name].apply_mask(param)
 
-    def on_minibatch_end(self, model, epoch, minibatch_id, minibatches_per_epoch, zeros_mask_dict, optimizer):
+    def before_parameter_optimization(self, model, epoch, minibatch_id, minibatches_per_epoch,
+                                      zeros_mask_dict, meta, optimizer):
         for param_name, param in model.named_parameters():
-            zeros_mask_dict[param_name].remove_mask(param)
+            zeros_mask_dict[param_name].revert_weights(param)
+
+    def on_minibatch_end(self, model, epoch, minibatch_id, minibatches_per_epoch, zeros_mask_dict, optimizer):
+        if self.discard_masks_at_minibatch_end:
+            for param_name, param in model.named_parameters():
+                zeros_mask_dict[param_name].mask = None
 
     def on_epoch_end(self, model, zeros_mask_dict, meta):
         """The current epoch has ended"""
-        is_last_epoch = meta['current_epoch'] == (meta['ending_epoch'] - 1)
-        if self.keep_mask and is_last_epoch:
+        if self.is_last_epoch:
             for param_name, param in model.named_parameters():
-                zeros_mask_dict[param_name].use_double_copies = False
-                zeros_mask_dict[param_name].mask_on_forward_only = False
-                zeros_mask_dict[param_name].apply_mask(param)
+                masker = zeros_mask_dict[param_name]
+                if self.keep_mask:
+                    masker.use_double_copies = False
+                    masker.mask_on_forward_only = False
+                    masker.mask_tensor(param)
+                if masker.backward_hook_handle is not None:
+                    masker.backward_hook_handle.remove()
+                    masker.backward_hook_handle = None
 
 
 class RegularizationPolicy(ScheduledTrainingPolicy):
diff --git a/distiller/pruning/ranked_structures_pruner.py b/distiller/pruning/ranked_structures_pruner.py
index f813b21..39e8e99 100755
--- a/distiller/pruning/ranked_structures_pruner.py
+++ b/distiller/pruning/ranked_structures_pruner.py
@@ -505,7 +505,6 @@ class BernoulliFilterPruner(RankedStructureParameterPruner):
         if binary_map is None:
             binary_map = torch.bernoulli(torch.as_tensor([keep_prob] * num_filters))
         mask, _ = mask_from_filter_order(None, param, num_filters, binary_map)
-        # mask = mask.detach()
         mask = mask.to(param.device)
         # Compensate for dropping filters
         pruning_factor = binary_map.sum() / num_filters
diff --git a/distiller/scheduler.py b/distiller/scheduler.py
index 7104ac1..bf0b937 100755
--- a/distiller/scheduler.py
+++ b/distiller/scheduler.py
@@ -19,14 +19,13 @@
 This implements the scheduling of the compression policies.
 """
 import contextlib
-from functools import partial
 import logging
-
 import torch
 from .quantization.quantizer import FP_BKP_PREFIX
 from .policy import PolicyLoss, LossComponent
 from .utils import model_device, normalize_module_name
 msglogger = logging.getLogger()
+import distiller
 
 
 class ParameterMasker(object):
@@ -38,28 +37,36 @@ class ParameterMasker(object):
         self.use_double_copies = False
         self.mask_on_forward_only = False
         self.unmasked_copy = None
+        self.backward_hook_handle = None
 
-    def apply_mask(self, tensor):
-        """Apply a mask on the weights tensor."""
+    def apply_mask(self, parameter):
+        """Apply a mask on the weights tensor (parameter)."""
         if self.mask is None:
             msglogger.debug('No mask for parameter {0}'.format(self.param_name))
             return
-        msglogger.debug('Masking parameter {0}'.format(self.param_name))
         if self.use_double_copies:
-            self.unmasked_copy = tensor.clone()
-        tensor.data.mul_(self.mask)
+            self.unmasked_copy = parameter.clone().detach()
+        self.mask_tensor(parameter)
         if self.is_regularization_mask:
             self.mask = None
-        return tensor
+        return parameter
 
-    def remove_mask(self, tensor):
-        if self.mask is None:
-            msglogger.debug('No mask for parameter {0}'.format(self.param_name))
-            return
-        if not self.use_double_copies:
+    def mask_tensor(self, tensor):
+        if self.mask is not None:
+            tensor.data.mul_(self.mask)
+
+    def mask_gradient(self, gradient):
+        if self.mask is not None:
+            return gradient.mul(self.mask)
+
+    def revert_weights(self, parameter):
+        if not self.use_double_copies or self.unmasked_copy is None:
             msglogger.debug('Parameter {0} does not maintain double copies'.format(self.param_name))
             return
-        tensor.data = self.unmasked_copy.data
+        #msglogger.info('Parameter {} before {}'.format(self.param_name, distiller.sparsity(parameter)))
+        parameter.data.copy_(self.unmasked_copy)
+        #msglogger.info('Parameter {} after {}'.format(self.param_name, distiller.sparsity(parameter)))
+        self.unmasked_copy = None
 
 
 def create_model_masks_dict(model):
@@ -81,6 +88,7 @@ class CompressionScheduler(object):
         self.policies = {}
         self.sched_metadata = {}
         self.zeros_mask_dict = {}
+        # Create the masker objects and place them in a dictionary indexed by the parameter name
         for name, param in self.model.named_parameters():
             masker = ParameterMasker(name)
             self.zeros_mask_dict[name] = masker
@@ -140,6 +148,14 @@ class CompressionScheduler(object):
 
         return overall_loss
 
+    def before_parameter_optimization(self, epoch, minibatch_id, minibatches_per_epoch, optimizer):
+        if epoch in self.policies:
+            for policy in self.policies[epoch]:
+                meta = self.sched_metadata[policy]
+                meta['current_epoch'] = epoch
+                policy.before_parameter_optimization(self.model, epoch, minibatch_id, minibatches_per_epoch,
+                                                     self.zeros_mask_dict, meta, optimizer)
+
     def on_minibatch_end(self, epoch, minibatch_id, minibatches_per_epoch, optimizer=None):
         # When we get to this point, the weights are no longer masked.  This is because during the backward
         # pass, the weights may have been updated.  This is true even when the gradients are zero, for some
@@ -148,7 +164,7 @@ class CompressionScheduler(object):
         #
         # Therefore we choose to always apply the pruning mask.  In the future we may optimize this by applying
         # the mask only if the some policy is actually using the mask.
-        self.apply_mask(is_forward=False)
+        self.mask_all_weights(is_forward=False)
         if epoch in self.policies:
             for policy in self.policies[epoch]:
                 policy.on_minibatch_end(self.model, epoch, minibatch_id, minibatches_per_epoch,
@@ -162,13 +178,14 @@ class CompressionScheduler(object):
                 meta['optimizer'] = optimizer
                 policy.on_epoch_end(self.model, self.zeros_mask_dict, meta)
 
-    def apply_mask(self, is_forward=True):
+    def mask_all_weights(self, is_forward=True):
         for name, param in self.model.named_parameters():
             try:
-                if is_forward or not self.zeros_mask_dict[name].mask_on_forward_only:
+                masker = self.zeros_mask_dict[name]
+                if is_forward or not masker.mask_on_forward_only:
                     # When we mask on forward-pass only, we allow the gradients to change
                     # the weights.
-                    self.zeros_mask_dict[name].apply_mask(param)
+                    masker.mask_tensor(param)
             except KeyError:
                 # Quantizers for training might modify model parameters in a couple of ways:
                 #   1. By adding a prefix to the parameter tensor name
@@ -220,7 +237,7 @@ class CompressionScheduler(object):
             loaded_masks = state['masks_dict']
         except KeyError as exception:
             msglogger.error('could not load the CompressionScheduler state.'
-                ' masks_dict is missing from state')
+                            ' masks_dict is missing from state')
             with contextlib.suppress(TypeError):
                 msglogger.debug('Scheduler state keys are: {}'.format(', '.join(state)))
             raise
diff --git a/distiller/sensitivity.py b/distiller/sensitivity.py
index 591f509..20d2ccb 100755
--- a/distiller/sensitivity.py
+++ b/distiller/sensitivity.py
@@ -111,7 +111,7 @@ def perform_sensitivity_analysis(model, net_params, sparsities, test_func, group
 
             # Compute the pruning mask per the pruner and apply the mask on the weights
             scheduler.on_epoch_begin(0)
-            scheduler.apply_mask()
+            scheduler.mask_all_weights()
 
             # Test and record the performance of the pruned model
             prec1, prec5, loss = test_func(model=model_cpy)
diff --git a/docs-src/README.md b/docs-src/README.md
index e1b6bb5..32683ec 100755
--- a/docs-src/README.md
+++ b/docs-src/README.md
@@ -11,5 +11,5 @@ $ pip3 install -r doc-requirements.txt
 $ cd distiller/docs-src
 $ mkdocs build --clean
 ```
-This will create a folder named 'site' which contains the documentation website.
+This will create a folder named '../docs/site' which contains the documentation website.
 Open distiller/docs/site/index.html to view the documentation home page.
diff --git a/docs-src/docs/earlyexit.md b/docs-src/docs/earlyexit.md
deleted file mode 100644
index e28d21f..0000000
--- a/docs-src/docs/earlyexit.md
+++ /dev/null
@@ -1,45 +0,0 @@
-# Early Exit Inference
-While Deep Neural Networks benefit from a large number of layers, it's often the case that many datapoints in classification tasks can be classified accurately with much less work. There have been several studies recently regarding the idea of exiting before the normal endpoint of the neural network. Panda et al in [Conditional Deep Learning for Energy-Efficient and Enhanced Pattern Recognition](#panda) points out that a lot of data points can be classified easily and require less processing than some more difficult points and they view this in terms of power savings. Surat et al in [BranchyNet: Fast Inference via Early Exiting from Deep Neural Networks](#branchynet) look at a selective approach to exit placement and criteria for exiting early.
-
-## Why Does Early Exit Work?
-Early Exit is a strategy with a straightforward and easy to understand concept Figure #fig(boundaries) shows a simple example in a 2-D feature space. While deep networks can representative more complex and expressive boundaries between classes (assuming we’re confident of avoiding over-fitting the data), it’s also clear that much of the data can be properly classified with even the simplest of classification boundaries.
-
-![Figure !fig(boundaries): Simple and more expressive classification boundaries](/docs-src/docs/imgs/decision_boundary.png)
-
-Data points far from the boundary can be considered "easy to classify" and achieve a high degree of confidence quicker than do data points close to the boundary. In fact, we can think of the area between the outer straight lines as being the region that is "difficult to classify" and require the full expressiveness of the neural network to accurately classify it.
-
-## Example code for Early Exit
-Both CIFAR10 and Imagenet code comes directly from publically available examples from Pytorch. The only edits are the exits that are inserted in a methodology similar to BranchyNet work.
-
-Deeper networks can benefit from multiple exits. Our examples illustrate both a single and a pair of early exits for CIFAR10 and Imagenet, respectively.
-
-Note that this code does not actually take exits. What it does is to compute statistics of loss and accuracy assuming exits were taken when criteria are met. Actually implementing exits can be tricky and architecture dependent and we plan to address these issues.
-
-### Heuristics
-The insertion of the exits are ad-hoc, but there are some heuristic principals guiding their placement and parameters. The earlier exits are placed, the more agressive the exit as it essentially prunes the rest of the network at a very early stage, thus saving a lot of work. However, a diminishing percentage of data will be directed through the exit if we are to preserve accuracy.
-
-There are other benefits to adding exits in that training the modified network now has backpropagation losses coming from the exits that affect the earlier layers more substantially than the last exit. This effect mitigates problems such as vanishing gradient.
-
-### Early Exit Hyperparameters
-There are two parameters that are required to enable early exit. Leave them undefined if you are not enabling Early Exit:
-
-1. **--earlyexit_thresholds** defines the
-thresholds for each of the early exits. The cross entropy measure must be **less than** the specified threshold to take a specific exit, otherwise the data continues along the regular path. For example, you could specify "--earlyexit_thresholds 0.9 1.2" and this implies two early exits with corresponding thresholds of 0.9 and 1.2, respectively to take those exits.
-
-1. **--earlyexit_lossweights** provide the weights for the linear combination of losses during training to compute a signle, overall loss. We only specify weights for the early exits and assume that the sum of the weights (including final exit) are equal to 1.0. So an example of "--earlyexit_lossweights 0.2 0.3" implies two early exits weighted with values of 0.2 and 0.3, respectively and that the final exit has a value of 1.0-(0.2+0.3) = 0.5. Studies have shown that weighting the early exits more heavily will create more agressive early exits, but perhaps with a slight negative effect on accuracy.
-
-### Output Stats
-The example code outputs various statistics regarding the loss and accuracy at each of the exits. During training, the Top1 and Top5 stats represent the accuracy should all of the data be forced out that exit (in order to compute the loss at that exit). During inference (i.e. validation and test stages), the Top1 and Top5 stats represent the accuracy for those data points that could exit because the calculated entropy at that exit was lower than the specified threshold for that exit.
-
-### CIFAR10
-In the case of CIFAR10, we have inserted a single exit after the first full layer grouping. The layers on the exit path itself includes a convolutional layer and a fully connected layer. If you move the exit, be sure to match the proper sizes for inputs and outputs to the exit layers.
-
-### Imagenet
-This supports training and inference of the imagenet dataset via several well known deep architectures. ResNet-50 is the architecture of interest in this study, however the exit is defined in the generic resnet code and could be used with other size resnets. There are two exits inserted in this example. Again, exit layers must have their sizes match properly.
-
-## References
-<div id="panda"></div> **Priyadarshini Panda, Abhronil Sengupta, Kaushik Roy**.
-    [*Conditional Deep Learning for Energy-Efficient and Enhanced Pattern Recognition*](https://arxiv.org/abs/1509.08971v6), arXiv:1509.08971v6, 2017.
-
-<div id="branchynet"></div> **Surat Teerapittayanon, Bradley McDanel, H. T. Kung**.
-    [*BranchyNet: Fast Inference via Early Exiting from Deep Neural Networks*](http://arxiv.org/abs/1709.01686), arXiv:1709.01686, 2017.
diff --git a/docs-src/docs/imgs/pruning_algorithm_pseudo_code.png b/docs-src/docs/imgs/pruning_algorithm_pseudo_code.png
new file mode 100755
index 0000000000000000000000000000000000000000..23cdc8e068d7647619e06f5076587b6a704ec5b9
GIT binary patch
literal 140746
zcmc$GWn5Hk_bw@dfl^9?q(~^;VbI+<h=2$PC@}PZfC|#xDLn{7cZeV<ATbQxJ%q%7
z%rI~^&#V7)&a2O-^Wl78elW9V@4ePu_gdGw)^+niQ(fU22@MGj4$d_t#mCw>ID{Y^
z90JA5mw<QBPkQTda7bXbj~;0%J$l5X>Fo5}_N6rrj^c;-1R_K7397DLRj(Yjx7V{p
z<iv4v-!g<8a&a+dza@LbR2Y2C$h57R>5iINbY5^kv9=XYFyWQZH-w+ARB#p|pS8X3
zz5e5}(;Q~jdk*6_myAgjIL;7rS<Bd!z=<LfduYMMjW3(Rq(iZ_(i<EW@_hb1?oG{`
z0yx5Sj_;!=Z?6;G=#TV6U>b2!sGX9(b)b*e&h;vqeqQCp(PaAay6LJT&6k235?z!b
z{J65rm#PeGNNnmTEK)8j-B`#GNE7X%7oZdAs<l~rd#RCG@)AzQw}oIbT&Ctnr(xWV
zcgTWNAC0SiUb=F_O=0{)YR3X|TF{S=RaJTdK6Dm=b$%hSKWmd-$0X6Ie|)>vVy*At
z?)(0JeC5&kbi{c{mG0iDMAZ!zjt@<N(a{lGMrOTS=EPSP6E1xd$P|(AONi~5wK(|J
z|0*=YhM{Z5J>%<Li%*`b!Bk`W0u$K>+{TpLANUl_HaR{p4ko<vd9Tnd$?-#K;vg-Z
zg66@X(dm!8gS2R}#m4(xYKyl%1hR>71WU5bB_)(c(I!^@DE*eJx%-ap4%^9(#ThA9
zU%4c!mir@mWI9PSby0rM#E#5p$~#7>ALz<F)mMKK@OBW#ZINCk0=*%;{h7W@?($^<
z-)=Jsoa8<G=L3STOxR5CEpo71^BXF{I|Rn@N;mGjf0ta(gCig$dK@}8Y-4}rqq+GH
zVs_l8RLnNzer;XhxLMbV==16bGu5SWhPyXcHB8Gn?FWJ~>YqmSCR`HCA{g}-)?Q-&
zj5m%$G><3whV(K{wqnLToLj8^f=qbDa@WVN?#nTc6CMRPzq&zp2|RyW6xX<o`W3Dz
zu81sO9X`Vw=KCa|H^e^}RON2G4dlE*sU0B5WY&GV;!RQ%`7-W_T$?5Z#TB-Pc#2n7
zWKFUj7GCBGx-N(MKKR{s9nbaJX5fxo`*+d9OX#aPPx(~?;ZLPJ9(;RiRK`a~!WTro
z;3`Tx5a|9n?-ivFg`+>;XOVe{A2&|%G9Ken5I<3{{iv#`8b`~8Lm93t`$3adldzCX
zmlApt9<ljw=Haz$&`0U)@)ukw{=#w+S<OQhFL=f2n{k`z%_2%4)n@T!J7otAk*_=5
zCXuJK3@h&{dk~%U_?w-obUuMjplzpV=X6`xvsue>LvejZ`%qM;yhUs|vTUjxQ?53C
z{UxItfy9UQg}7Ipq6(%s5rKUjD@(nv6ke~r#@1;~D@YQ<{A)jZFXrsw`S7+7_!0PB
zpNsT+pLwVIhAZ*WTO7Q|vV^i&O|=&s)2v}+`N0*BAs<;D#3iXtt8%jmlkt%=lFh{^
zvhu{x-wp2IaJUm66A@b)^Ij>l+u4exf>!K3H~TIVB1WS7xZCM2MF_*!cVuckxm#JI
zI=tEpI?~Eod8&Ei+LA@#+LYQn+E{H^F7a@~F!u)K@We3NFh`z~a%&uO_j*rV&nqk8
zZr$#*1nQ{dxV+w!zDwQtJ&uV|cf8fyRIhUsaI7(R#rDNYaJy?cz76?kr!!M%x=Cdx
zgyILsz0cbi8LzT3X5PMC5+uyb$Kk}r$lEI4^6>1_=?(fPh@8reqQmq<^25f%K@vL>
z?sp#V97w`Ruq2EWdK6|9OO#hAQYm&hef6JldUED*ZtL6WdF6)b3u|B329@~8o>c6s
zP^N?gk{IO4uDtDq=o!Klg$%*zn-*|KxRMZ9h)*cnIC=6(b>gNg_!i0v^$g{*xkRIT
zjpJI2e0is5_}W#^OQF%doYtn#R9`^tyzE1SNNtZ^sM~JXzq4Jj6S9>WX15clCa(&u
zmZ;`%6?8oXJG;`j?jXrrlN*#8Fs_ZRGV2@2C+iMydpPZg!Ke+=1}U;GBbaqtls1=E
zmwH;bgNFZ_&edbuBbqk(F?l6<N_mlR3rkwdmObC4mM2M1+(HRLp;XtBqSOr8jo7cT
zx97X+$c&hd=#LnLf~gevW}EBm_e2EjD|IXFDv4H|SEX0$SGQN4xWl;Xs3@swdG01;
zB+&9qahsb^n|w8)FQci-H^!JaRqs@t*;(4NRM=Kg*TT!gt4pfrpAIMaf7||6>~>qi
zpfTLdW=ncvYTRZrv+R7Nt%LdNLU8*y8J2BN1)3NpYA5DQM=Ew*>_OVIbYE8*=P}n+
zQ3ElV*F$a#^`mu?medt7MhZqS(|*$@rTwLh(_1rRjo`YR>E`Jx)83NHl3yfyr5L>J
zJlVa3J=8snyh{!?e{_0aympTmo)(waZ+RWPjVu}T%2a8wn!7RQJjdZH?|Y}!&2N0J
z%P;c0J+DE9P{jma7DwW##^)Sa?B~PJ<^g4L{&JLZ8iD<RE&+`X3z_ddig?>Z3cfZ&
zjb{4Bq)InN%}dWf4q`lG{v!2}^BJe}>t%E|tyZtrX<^L^Q&$gHwZmopNNexJ@J_?7
z<*sS7(pI1TefxWH2Dzo8joMypHY+`bsxKdJMUV5P;iyyE@JG-aP>ZyImv)v~=Ieii
zcrNWl>^<2VB%>n_A?qZgCqHJ5;1J`8XKh|?-0M4m#Wu&1vim6#D;B9GtA{H(D7P1U
z%}=N5Ce|iM)GvY=S3)&jaDTgXJ#VUDS1t3#Nr)YPR@e%)L$YA*#0C{@a4R{fjhMBb
z6~vUj1-(o&Arv4qV;?@|XoBIM62KII*TK`^bg>k%RG0UN$}z@C9Eaek@AX>Vmb0Bh
zYM|6jVRDgzmuEr<?pn^{nS@}TZBIMT)P4Du19T~~IhT2yOc~q)*|s}0chI^!@w~p|
z<y0*ms15b7@=MGYzNz{t*!k15M;oLg5@c?NTU8UDPj9&;K1IyT3^XD`tU@b7<&S>u
z+TMLmaf_mLs?mA3nyfe`5EG2)z`R$+Do^#g_PWJG?rLzg>MR%!=@uE+m+ZIu&Yqp&
z^AgdJLg~3Bf1W6uMFs{nU9Z;N(2J+*WEc=XtrBl)Kys{VpoDsb-U*r86IMS0w}BHk
z9gxV#LWYS69Fbt!&d5%>ZK+k?PbEXSdz$3UML$GEd8N-z*Tyq<Gk!=7dB%8F?8WUA
z^**<Ak-IWlQDgm4s0duh11;ID8r&Fg3p!dgSM@8B%JIx!{B$~zz81W;KcJR5C@nL=
zyyfA^y!S<Mt#^Rv77O2O3x#XZTE*^4v4JGabn6*7y>-(A^-Tz4UjP$;)y|gH4-Gra
z%$ydZVEt0UQamCKPBo}WRLwEZh6G}F+HWBNQ*y6%qSk5J$g5!2aeSuDKgmB^ZgzgW
zdDI)WYcrGSRkp#rYSG4$tiTqj$7q0k{?lY6s{i|h#qq)p2eSr4*eeClX~rHVaj2;v
zu7GV&FasWG6qZhXOt%YZ**aOjvJhI7c;eT6rghRg4=<82JKM?#PP+}^@sh$m$C%{R
zR_}hA1*>0Dm-fv(^4)yYrjQp2&P>W|kXG>V6|P2K*-_arXoqdj`Ls%<si$U8IX_OD
z(8qaqJ1q+5bE+{8r=rcnZNfL4LI{>Yk`yG;hWL~t1DRuEjsJN;4d-nG&O8EFmYRQ)
z`FOZHCz*<PuRS4w3=_mp#?7<(EIL}9{NS}0x`YO&&JCv}B)_2GNJGP~L_VgONLB%D
z>LkrVg8QbDOJMX-KQ+Pm$5tH20D=nDNs(Isf+c>XXyk%}L&bLSgR7*?`U3|?7Dwr^
zoURA%*36Y83V8aDbAk46_pjoIO-GyHamim5$tfgG4Cl3Yz<G0~+p?pM)sA&BM(r~z
zNy}|W&JOLh-qbgJbCm7Mix2N}`2|=6rbNjunrZ23?zq!E?XbB0d=_1;>kUQgg7w_N
z8s;r)o>|DWhV%n5xRg|@7#O-7>33>*kRoJHL5xdqLl);RFH8{sp&W%`{mheZHam*v
znsm5-Iq07+$`VNboX6LDuA`bN+z(o&{I}zn2>t6_);f>4)GW;1<o<Tei}!xqWcslg
z*v|EDw?&DI2P-NpC&jzW^akh8mw6W1c{ov!-rqlcBg=(H0MgO2Wc-)Ye^z+&xtR2N
z@!x%d<F89_xhel+r}V#^{!Ju~zX2niKJ(vh4S28U@(mBQC*hEPIXx~TV4Sq*r-%RI
z9BsfiDIqkNzgiBQUK%hC^2+etU;J`$L%DZk=Y=SRj{eQ}K7euP`7)A!d4P&f-h8&_
zvfB#$m(%|Qj6=0qQ2zN){_#ds2#!BOcXXWSUrzu3{NOcLD<kwS6HvW)`BGP8#x+*l
zc{vzbT&xo*b(91xDk5%@p?z-tM!K&>%;xcbJJ^2_aILW<Bz-7hXS!a8$E-onH{#@A
ztvFf0rt^S%IRgih;0<{g{>OvMdPQK&`U5@fGFW8dSpTHjX-+z7d7(WBo|g!Mu6|1<
zf8j0M+amkq$-AXoJw3e$$>eGBtU+g(M&Zpr{6rBU8%h9TTW@ffs?iLwANxdHEk(=n
zkMsU~;bHrJhZI2~KCGt@tp#KKmw0AkBrvw>D5ibh8Ac6tnrqp4rr`TMOD;l^`rp1_
zK<IquNAIH~XViWc=<i`36(Sp|Dl}jELPsaIF<9~sJN^@%QMGtt4;F>Eq=@Go=UU8~
z&(AQMOOY}W^flhcJDXJ_>N;>Z++SVC+#E(jOB*k_XXdww$yZAqZQN}so~*LXxpbYb
zP~3foZr#I|R!C5=@Oai+!(paDs3w`t`|}%Is7b9OG&i@y3Go)Jy-P3QR*1zM7n91I
z-q#-lHG37!HhW7<MwjI%$eEd$eR2%CPOlvwA76fy@5R06vzG3@xV<$}5GlSCCZKC%
z^qjZV)vUSHe|7bFwdcX=IGE{WL7H<2jikq`Z(V^ym7*c`aL5mz9RYWf?!P>d6W*xg
zZi56pJ9~S$5W+NPtkewYy$YPDsJOJD*ClnP(Y-4GdUT2CI$ba!Kiln1^((h{b;;RP
zU)>_=?epx6pf9d*m|CrG{^p(flH?Nctkixti*gN0g^UwaZKs!ov(NxWj@G?ik|gEm
zLiaoQ=CEdMCqkjnXT>)CDR~_;aqJoypLSZ$eMg{iy&qqJ&GE^)6PSKV9Z%=%G#`U3
zo4@>&2bXM8_I#C&mC@~Hlw*D2Hz2lV#ob*hQ5N>+f?kf1?b({BOwD^ZlCRch+$<kK
zS2XK$(Esz~XnU$aSw$xm^>n2-esi-J7QuLLA|Pzi=VT=@oJDy?uJ#tlLoWRGgD)Q4
zUqf$o1r&6~OTSY|;MUS9P)ALCsqgKR#|y}RQ|97iqaI)(BqTKOkjEX1LKNqzB-*jf
zhV~`!=;X+Uhd9mG#6Ha>n6>KoWFE7f;dkcQs)kn*+6$k#Uay`PAvc`BW8$Nrui#lS
zi2}@WAqtRiUB5wl_a!3P;LqUBiXvPcNT;<2*$oZQMemVVDZK(Oc6{i3lN`oRFYfvx
zi^_x0%-W{DMCPS&i;w5S?|K2+?$fV>Vs2Jhc7`d5E|*!fllA7#?{zV1a3<np6M$DT
zZ`X~i59Q1}*=q_@6(TgtQ_ugr#8W#>HixLnA{yI9<%{>YEyk#a48zSxb*9v;E6nFw
zATy0!2g!B%F4cpx9!udKjn;AjyZmbSf2qTq?EYz&o8G4n>WV9_?zu*~#=l7-cx>*u
zLRtN^Enr+xnfk#dg9TR`VlBg)S62pRSpIN(qEf+Oy6yoLCl-xpWw7&0UUTh@yBh&v
z9p`d+fAIjzZLhE<ZC*E2Hp`RpRdQTOZ0PA85_4JYIO~xvHh{Tpv3JEpSM0FXwl)^v
zN7>?7nH0s+86;T8Iellvwf3$m4x>x3rOj1ublX<GOyaleJ$J=x{OvaX#-?<#DgN;F
zy(oi`QT%))7dgUC`Z#$i{x9i4GZ;_oriWRpZ*$DlJoc=9Z*4GhmosK4j_gLP7#oUJ
z=eFW~ZM+X&4Lc3U!%w>v1SUbGj0~v-+6+)O|KTiBWCiJIsQKv^P*}6__W?nLPv6qH
z)<$A&X@Y4T>*RX2&&UiDJnhv;p)~wfieFA)3Xu$OQhF1&!JGDj%}}BaYopb?_|HWI
zbu)5S$8y_Q#RP8$#gU(W+Ic>mP~mH)80-spd5`qHGTW2wO+6ypT2p*4GW=sO7_(iA
zOzlVzcwMsUoQc|+5NMNaO(4_P{xdmWrpS_2|F%cdp%+jIWTi-_$s(PCpB3D!spdM4
z;DoHA3@g{7vnbBv(~OD|Z(be9`Tf2fbZ@XytDx_xM_2oord2PL#QRS!wMz01o4Ai-
zRk+riGZhAS<P3CV4ad|nrOkMZs}!j$l~!lAfh?AO?UbgvxQLz%h>`NUJ;76sGYw+;
z_1I~!>alTukaTQx>u&RLhTT++!xrh)9hqR!&)t6k5rnK!?k-IUiHTh&Y@+!b)c(mY
z37}zhv7Y8A*4j51w&Xcozc+DnHNMDAB$B?^S{-54BlS@belu|Aa{)maYWX^jN~#Xz
zd$O-7^$=3Jw&iTn9tN+x%VX27H@QIR8n3KEJL7`ree`uJ?t{G$BU*J<#Q9p^i<beV
zNAS=0{wO3Av9d;`g8G@{y0BQY*%pX*^LV+>O1y#iK8QM*T;K1_Xyu?Z(!1~U>n-yw
zaG~Z3P+7n)`TP+h8Zu@*lIi)Z{#@?!dI1hQHyd{#Y*9Wxe=Pi*hLU~q-S6R;Nfq1Y
zf2N}Wq^s-<HO~pbJb&N&klb<*!*em((w<3XRD25wnQEd#PM!Wi{b0u%fV=wddPor{
zt4;u#seR57DC6-X%aZ&!_)(WK&k{<|%YnS>SyWc4B~fIUZAUJq?I}ZJXQ(xUuFkw3
zgBKWK<}T(q6}e0CbOGjvEw(Ja$s#YhU!T^KYoFxizA^mO<v`W6_+;89L-W&L;5msZ
z69*p5Ld^gM+8$pyJs5OfZstex%vXea@2J7B&e_uxrWr!^2`M5@AY-`8Tn)ol#nCFR
zxRrFZQj6kw2E9*2Z-FqT^3Hs>%~+%G-)IPz!1y5(A3iMb(luwp5~E6X;l$jW?@_Ob
zC`BBnUyO4dJR#e{v@@P%P1lB{4seeUxo(X=u*eu9V)VJsyv{Yk@3TZZr^#<7n=0(^
z%*CG9sG`GN@DqmkaDSeoHns><Z%Fmdo@={dtD<ids#m<&xamS<vuMndD;=6s{<qbX
zBn!F(BD#8ee*IE1lu0oLrzE2@k`ZQ8S>uB&sii5ncG(!0c&mPm!mP=ooJPX!<ril3
z@$N3I2R*-4uqC7C&#@m1?ex|sF-iy_xSQj6Sz9@BZ>ekP1aR`ZQJdGCRDS?F9Dh7=
z&i<~WbrzDmGoV^u9lc7dzFE}JU_AKz<pfsO{kUYqLSrjK#C3nUr!%XVPGq9cxORnH
zL-J;gI8O~^w6sXLLuw{a`VM@X_G|T~^mA#H5wvqF=+7uxPQ5VC8@0LeYPQB96<SgP
zj;j|yp-^_)lhqN=j$hN=WBJxif#Vf3H9XTM0D)9kA5#zu*bJ!FgsyoVSK&FeoaB?;
zYaJaOjT6j5*81rpKe`FQRphA<aCox**WMb(nUpeh(#wsoa?Vn?&rT%)H_6`&rtI$f
zY;S8@lp^~2V~)!EYqYD~jRHGFulkc7JnDH}5v{DO92cexk8M0di$f*^pP?!<v7cH#
zc&#Se(0NL*V0wLEzDf)PE3>P;_h5(9)&O>ZswDDmh6xP(m}WP*_t6anbq8vo6h7ur
z&~@+@J0d=VAIPR&nLS{R5syP8LZheZGIFeBM9EP8&!7x>$RrWnbyaf~ay%1}!yxX;
zNwxkuQZ}xM;Eh#oc1FHNW~q%p%LTR*A7IXLQi!0ly}GV6uN<IP(E@-)I{>gmK-hld
za}XaX$?9svBtpKj8F|h76VauPgQr^k{F+SVDa`$_sLiH>fzbGsUdb}8hECdIp79(z
z>(c3!zQlXf+xL_tr(H^vPezM$R=!4G+`V(A6nZ_Ud}zgay9eVqUH2MVUq4~-@Y0`s
z4yUVslTTfOtMb}Fx}_X1#8-{@aGd1^+tXD_SJU!m@0^MF1ZIZ#Ge1q&@Sbn6)}mBo
zHHJU?<6FYCSJh!HXsG3NgL?S%j5KlZWY>5Lq|s(-rYoUoR8GEmQ|E!A)HLb);p6^z
ztuU>I&F#uWl>G~!_`8+w>7mC2onEm6j~*#Nov!+q_A*3?$w{F}nWH~hK%w4gyGBBT
zG-<+mzrhP-2iMLK0s`zg7^ond@DF?{StAs#@z<7X9NfJ&@)D<?lw((Jdri%ZTxQ*J
zt(Y$=$Os{FY_1vVLwqvIu`zKKQ#Npa<aNA5f5o}6Z^koO`Qhe5{Dt4_GU=tf1pV6>
z?xvLm;Qa-%$r1vsmEBwmPku$(fNAmm)1pLkUondeZFfJVH*Sbhv(BmZZ3k4)9i@pZ
zxxg*G=;q!+B~MZP3Ng^vuNe>LKUEGWi~6alseO{Vz;K2(x41z~hDxv0=j8D|fHqFn
zG7TysXJvLA*j_aOeH*+$gRu)*=4Nd@&^ERaAsf*PAL6{n(l*+kDneUuBg)~cvb^S;
z-#K>2jC@}URD9ItQa;G8mSSbVcf=*P;X1n%5g7fK-g^IWmetYLM0wAx0=3ixIM<Ym
zw7ln)3OXHmZw6@}(J?CGRi!t>^c1RNn$+nWsE`Ur(Ap=UZ{3GwXEO_9{YR@In4@ew
zJ?*(bB2q5zT#K#<dU8ik(kb?$NID%*G0+8oNyzA~xiD<-og@+?b<Ny&XSZ*k+#4Yp
zXE8z{Rk%CEf<F}PqAYvRx-dQLc>So&a0@wydIxmvdIb{vocb@3D)<jqld??bmh@Hs
z`JKYMU2(F-rz#xQF0L8hi0x8t394fATKFRi%}J)%i3xg5D=c)94y^9X+aW|q3^0aA
z_PU5fI=)`Z2~=O6bnMa1SJmF{|HS2-^8QrGc3HQxZ3X@)uKxfO3V=~TEN<8vhy0m0
zFAgvw11J%;7wZ2)N_++AplT0vO2VDLloh5ovIzj?@tiniWI0II+Bz@i<3+_=?)eKe
zy3BOraTa&C{L4Vu^2<P?EyO3fUYMWHuBWX{hB4rFC`kWrY;bXsA>tbz;R7)&lu@#_
zcLUq56iV2V{Ap(t826)!5y#rwzn$VTljb@=>s)CLvSs}9s~1*TnE|}aSdSR<-#?~k
z06h3`*JAQ7mFFK1-U2)bc2*DmyMy2H0v`PCopkhfPD^%-5b$7n8u`CCIQz;C51vDY
ze8}Hz15C$*{eLu_vky)Ee>;wVtPogx-bY&>985r<QZLkIp0WzH#LdevLJ^=c>s_?)
z@GO9#fdXKY$B<S%{FmQ&*Z@c6EsOEtCCCEA2-n@aAF%kW#{|L-les(fYYgD!?wjyZ
zGYPg<(f$l66Zq)pGRPW#>;+JjF6#WA@HJXtRkfu1%=5Dw4*J0v8z0I<@RV2DF?Sd@
zlZxz%^8D0Qz61PHOT;R8{kxpQY}166JUiH5)pQOhZh995Ro4z^Y3<qq2qXCUfB9SU
z=SA?eM%r3jCsF)?1p0Ni(U)ICDD40!3>?LCkExXvFs5INn7W+bWmQ$dsKtu)&3wyF
z?hcs`&{}A8rfR*&pe9-(c)}b-x*O_KXgPXIrSvqd1$#6_p5oSiO@uX0E2=@3z0_%B
zeGKT~p#aGX?d|Oyz1NrV>n5Vy@umyn+|A_Q9hhYS#qstjjfkUZi%7}*CV&PoZ-oJ7
zj4f_O#Xqdnu}Aks29Iq|{S^f!#@SmZnq8c*VAB(^nep*LDjwsw?^OWJ#+DrxFiOb(
z8e*WU%SpW6Ff!<O=Hh<S^H&Ywd<6s*;cS~A>Hj1)rgMTbOzXKs_Asz+tvaj%lpU$2
z-KHFfDH{P=iM*Hd7s&SiLIv=E<5(4>v!<;4aMUl9#3VxKys~x}>3f(%z@%gJMV?~$
z?HdGzG=Bm2@5Vg@V2<tg4uyF7Z{or@ZuW6fk^NeaRi9*ePt#^w{aVFc*ER7v0L}oo
zV7D3Wh%5g7clF<g0hx{z!IjaHpC`szW;4iuIvmmHVwiS4T-O4hoval{vnZ@;UMu>Q
zKfd1kJ}gtf=y$qDaC)6y{C>YIVA8E=*AX>k2525f51(O~EZ?)=nVrNCxH(*RUbQ|m
z|B4EJA&W0>r%RZG9{q!{vVsYX(QAia-d!O-bgEl;|CXP`+2o>l^6S4k-kG_G^G-0u
zuk|f!0i-lA%I49dKy;t^*++dCsAO3sn)T$F>(G;1MOS$1WP5a!WAjQ(YM~sNKHAM^
zXyj&hG)sB^RJDD<ds4>IWvX&tEV{@_<5nWaM9aZ|gz_fK#y5e%ut?_@c6Qpu`X$-`
z1r$ne9)Ud>^aH7-2r7Yhrp0QS&oZl)K5%3NBNQfGwyP0P=11?>Q^gWc2P&y~0~geu
zgVjzY3W+FYQ%MjFYr$?i-GlKZ9krpBl#^#m!N{?%k$1sqVlMe>iP~ZVp?occ8J?@u
zsGY{$$)Lz;+tZ(Wn=Y9cQ*BG{0$?e2pr>~D_O|9)ThYA+EguJ@F)!I#eNOUpj}oPQ
z&auZ!2nC=H>+Q}q<HN;95Q#;8E5`MUA@`o~`cfquCx(<a@@h5JqAcZCC|0Vu!Col1
zfo0~kBg`W3SPcx!z<OKF$y+W8PMIFfKm_g$GS$mBuNKE*FDS%;bP3C;J{x%e;)G`^
zpv3}s3+yT2YwF+8lT&#Z5USW+7m;a(;55(Kf^qtt)lDTip4UZkoS0(M9>9g56;_!N
z76Y+6)j63e=*5*pbA5nI{s1wE&fGR@aMc3vamLypKUPBbd*(>8N2c;jt;1Bl2uBU~
zoKrw9CmqU-@&I#xX3)4iHoAAmmSx%dq+eu?E>~jWH>BM+{STtyzyc#5p25@@5NhSh
z5!}F>{>lq9z0#^(ii}^&16lSQ^5@@@7HYkH(k0;+Jk(y3w;Y}sI??fDxW@C~dA0LO
zuc6e;)YQoX%T7+rQB^~OI4X}1(({_mTLJ@K>J0Ck4@R5-j90wjjmg=$!%eX2#2B*9
zITf4g)UqL(HuGE|J3d(&(qLTmBDztOnGd@l0@cN92EM5>K4>-%H`v&6vfHJ1pI*SQ
zNzx(^E`w49ysf9sB65;^ko}`6Cn7ZdW1j*+s^JGU8mR-`Q==LOtNl6v3eaYe4=dA%
z{y{T(7iw?B6}8oT7i`?<R>I%U#@$BjuBmr6zIgUz2-(cknzV^~h4%662s)S{)}{g}
zO_IYY?Y<>U=o!A*ys5Wq>YcgCp1cLKLe!16@2*9f4Hlxiq%)dU_t(NegSv@oyJjcc
zlE>37mj0@(OTB?_?d<GMg+G*pE>1?{_pA*aw;VUi&XG^Y56fIpa}b7rtQR3%&&inF
z3d|ba*sv$G@i_*KBzuq-n@CQ-vB4A}m7cvrR+-RDfH7?jw*H+H@BahnjCGBlZYvjw
z0nKH()(!p;L8BPY<mp<ko<yIM02;w{uC*wKmCy_W0ChK8#xJjgrV<2UW2)q8VxuJi
z6pFgm(EIw*)!SLQ*c~hJ^FUJRB*Aq%MjiRq<d}^*{(yFR*LcW9BP;qgtYsZ&k><_w
zS<84@-z3F^ja>uSDM%hHHxd=MbA~RD;juYD?_X(9mSVes$We$Sh_rH0w1!Z0ZnV^@
zp)$n5P^a&DM@p*~%V#ZCHZ838SVfyd=R4e<@ff2~6MGWe&PrV!Zx=GmnLe0)9~Fsi
zwu9FCw5$zdMvr=2{hsU{aBQBHPh}Km3b<kUQCykt^5?TKKF&lRPMOE=J@MRD#4$a)
zOtBnUnUC&X?<l)VzN<r>XRqvw3LVxOaIiCY&`g^1)5z&~0pNbOzWyxs*#-O6hQX|U
zPeMtQv)1up`*{U6gBjW4dXANd*i-H7NOd^kgr_f&H<<xoDwZ10jd!XuUy1;!9|k90
zk&~C`JF(7H_|D4S6BG71cBz+BQYQxGwK-OjnsKd)yVZxsyk)w)<P?w7ys(`W+xNNw
z6C?(EnQD(n9NowcVu(o$%i|2o@o*}blI1HXEVVAmG-SKdkvA2QFfOCa(tKti<9-N<
z<*`9G)2yz&8HFR!Dq|74V>Zy%TwX2gY0u;H<^45PE^JZ}WSJKi2DmqND{=$`F6$dP
zv5ba<an2U_{cLC4YX53(Eq<q!A}?n{vWZlUD>IG_^C_OFFlD&hvP<=A+%G&R>rYGp
znTPN8#heE4*6v`~h&St<x~gjvu^bv1Ti#cxvm5-*eZ3Q@Jma;R_?1~VSr44iY<wc#
zj3+Tp9Msqs%KQNucncD4TR*O<HWs-4M8W(V#C2c#)i-P4Ejp`IXB^f)7@0AbajaM|
zZEpEOun=24R3%Na<T^K%@AZyHK<tSpj|yZtVe+Ah7VU<#g|z!2Hil<y(HriHWwTe{
zLJCZn7mX?$y*@wf(LG<@TvLTQCQoY2y6+#r(?2%3EjPw(46r5NQO?hb%G~ae$M%_y
zO{nJ|Z<shW$7_ZIgs@=tH{RgkA?fE7u&?{lf~f2r8yOZkzSxy!@8gEh-}4|@ToMZK
zyq-MN17Z{-HgBb_m&}I(n)l6jw?M(}=fF&Gsc$DMqxsq1;@5KQZtMBk_yb^=vUc+B
zTtNmM-E5L&aPrjxD1FKi)K<&+lANl(%tKT2(nG*OGfy%+#ad7C*m(E*!VE4biza=~
z1eMP^Tf9v_9(K{y<+~|snyz-mXsPb!=`%2~wi+hKOoupdl)G>wtIHlGdahfkD{mOs
zW{&qf`gZu)z5&xXxSi<q0jVMnS3K_(VsSWD<{#$Qxoft>IXl%eNj2Id6|WG7s5otV
zeD>;@@;)fW-ter%u>4Dv(m=u!TZ-IhP^N_3QFU$4k+;w~DzWFdXy)kUkpWc3kAFy}
z{#4BJSWOtTz02CuLY@>bss)nB-+e&lYF74C?j;)cWP*2&I=5wqb<R5X3>X9M=-IC5
zcxLvlHJk<6G~)3?;tWEDhmdq$p1>LmJj%w{H1Djm1onZBhgJ9{LYsbr5!@?)%i)3_
zWzog~<vT(A>EJP=a@jZnt8J5|l{T4tR=Fl1SjF|@6B0UCEnF!wzP+V>&DHw2oA3I5
zpFf?;K$@6x3xyXO@zb8uN;1(n;h%)VO%d6NQ){)1am+47$@1mh(!;zbUj<*pH1zUy
zJv$!N(fG<JxWe$jf^m09fwB755^O&--ZpOE9@b1XC(n?vpbm1l(WsRpFD9(v?$_1k
z!_NbT^aKjz`mLly*LY)JFV43GN>^(*=`~zrx>z`9mPjuB@FtIzc^}A6H9GppE6WxD
z=NY>rRu26~=)&+A_#D!OW5O4We%Wf!Zt9oplNll>jJXHTlu6#q1Zw^etY4LH3i6DI
z8{0zOJt~8C4=9AukE|^?)XpTxK1Ex)kDnBiU$^5_L_whE*#<J74Cr&*Mh9(;YcP^q
z$De(C7~gL^!rC<V^y@w+DNC)%x%vw@@0(;5;=}k;hT>Wd(sz?WEd4o&yC*d^e3!yS
z!lpc>zALq;s%Q$sSy`@HEx(tT@j{mDd^7~fzZ!47IzjtW>ch-zClZc4-Cr@0FHDI!
zQ%SIGHe>e}b~oDw=ljMytwcOx=WX2L_eT6Irsqz?bJsNMWmV*s(}$TqwW5{CYixz2
zS;6X^A@z*xiJxY*K4~DvUHcWA16pdalk+I)%ZpY}198N@_inSzr_FTt*<?-LT^$`+
z35#|r-r&h@n3!*-gfFHZVSUh?GJskub*DIaiApqR3_QI^21%2=`8-%L@+YmA#N@>x
zce+GQ@(w%=_4FunC(DaI;5j69%7cXEeAPYD$B_P6S`uFA=Dp36dcnJoU^V%}L2|8E
zFcX#6e>=+{i`%a_!?!=vWAVe=!0yDeii)S)`K_}U>@X8YBY+LbR!QTX>z|ZrWJ*`8
zCzm!K880;eA{*L13Gil|=8jOz9g9bg{1GSCI>d1sZED``_15b0t7x|RC%i^eGnhV3
zT;e!oXBkg~;LICo{*A55K{rO;ghb0pe_QI6yRDL`rSDviMKY!}lkOo5JU#2P-I95f
z5u!84HRi~sS)wBGJQto+dD$MGaFAa|)L?;+>c(x)YLWOsdYC2PAsQ5F4W94XD==gZ
z+D`Cd$DG6#o%Iw9CcC0X?B`t39mu2&Q;B?{JYd-f2+zd&wlwZEs0=>0n&@MPXOe68
z+{ffEtrIQ6c_#|K&jx`Uu8ok^<oS>$WVR0Td6A&I@F0u{H~-UV0S#HeliLR{Gozxt
zDW}%-R}AhqzBkJO1FD%P<xPlnHHf87)D!bvP(o{20r?9s<KXUTzPX2$Uoyui1M`-2
zM*4ATt!m8!tn<B<9lkPcD%KtcL_XhVbPJhBydVEVBj48}It>_P*S^UB3fTwS*(mKx
zc+G`6^k!t00F5FlHawcWP|Et@X&@)NSY1*0aif7{EM&)H+lFg!)mtbXIjDQ=+E)QL
z_uW=MI^amf#+DW|x0-uzI~b$ku!yxZmxyw&Un?zkX~n{XMIl1B^>l>~4m4_`xk|BN
zq>rUgqdMUzwd(JK>~Kr3?OWA32+!PUnAy7{=|lVC%xHx8$SzSnE*S`;6Y+~hIJq}3
zL-D|GD&Dz&1iJ$A`;kyYmLGJf$1OiPJ|F8+{ms6^F3Rl*c4f?J`RvQR+S`2&Ry{G2
zt_r@kE(NZ~?u>jdT9Qg(h2bL*C!*~fRj0L<gqbxJxA3}Jz78LL7F(Bm8TS~}qwF(o
z#T74-VRPRuwUsrEd~|`Pw9RQ#;3s=TY5MC*ra9s;2``FGM8xzCuZwd->@*>Zdu4YC
z8jw*q!y=?|1vkCe(pUomnEbLdP=XDX(5`vNIu8dvv#Pu0p67x?C(VC=CyCOVo3PT^
zt7Ks=+l$@onEgqBXYV?4V;_;j!AK^1VSy&{nWQT}>5_WsLzsEg_EbK0z+x9uY>w@H
ztxHYJO?o#r$gYRa&ez;`rEh<KrH|gFJ-@Lvho70>*CD>0Vi-kP{kmay`^L<2eGx>e
zK|~Q{EKJWgA+#v%Ub>>@5F?&%H>$m@GDiAtAJqTuO!*&cIc`bY7;mN^1~F(P3_0<$
zVkhH+y@Gu8dnJCMP{~|D#$iAlH>zh~7Ox3c6=hFjmBrm{$5DJ4@ar)>xeLh5;M@7F
zijBe_Y*wx`O0Z3t<3aK%P!5jyF1aSFBDeI|6`XoX{Q38FHTL^T2-HW^$WEgQMi66T
z&`j)>jooaMXXm5dD*SvRX&}$-S#ewm-A3<M1ow(gc#j}YwGtE{JgE_hJ#J{orN=8)
zb~?Jd$Az2g(&`Ud4Jv`KM1U$XWhS;VRa?%d5oRiDSoC9t9b08|_b`zBllu(vsV=9e
z>JSsoh{OV!I@lIqS~^GMw_iy1^gL{pUW2T2bcz3N4xame%&_}xzvz=3`0ZX#`^V)>
zRvjkwUbf)1c;6p^jJFNgym8`R7jWO|<+~dm9^P5$MaUga8^paeUGMUR0Wc52$3aX>
z#2IBmVLjTHA>|#nk?0D$$##@IGBYO~ac2AykvQu%;(Yrb)OMhtMTM$<DV{Mpf)x0M
zB?>EZh>4!dkK1{TvP_wzKw->e%qy%<n&uk7_FI{W_Wv<><DZm$f!`|Z#i1x)2-&g#
z^QL{*Ayygh^95so-P7N!dbRM7ZDhRLdqT?3TbFl(fn)2qt&kqOxr<oqX(6II!2I~R
zw<NEhd=~I~ocoSXbJX7eMr3B@v-h<#BXNl0qDgLle=W4+IECUo#lG~xPOC-xr6sz5
zM#UtUCsQ5)+!SueF*QJyBsy?7A4n>`@5T;w^OP!EK(wA!L!g>4GRe~7Vu2l&oY#OB
z$llcE77()+wd|`0WP*KzrT{ZrgS?5dA4Tj>*kn3<y6Dm~fZ8>})E4!Yz8cTdsV*=Q
z=m|9@yWr~CABBe_>D`R*O81jxT-U#kx8mYo;`B71^z*5VH2Ag)|Ao8YGWBRNOY_7D
zk9g_V=W_Z1srk!i>Nil;mNX8gMn*;Q;WT;qs>x&+jy9R(P{CfJ$dt(x$7I`H5?IiS
zC)-J%cA7|IiP=hlQTEek*?Nm8>SxnQ4}Zl-w~7D9>rJ57Unx5yVo#)lu2O$w)#Snz
z_lzgvp72YqS>tR3#<^h_l~)fPu@!;d`zjy8C}M*ol7{g~C7*H)OjIyPd38*(;3jR1
z6x^%7HW%mExGk*ndc6YXvvRK!lCPdlL)*M~J)m3Buqc(_?eZ<h4HYTKNuRm?{$rPo
z96&;aEPmEE%<f)+G3^Dy3i1->Qw}oMx()d9XfxKeFQ+A8Q0my!GM*KOInxM0a~o<_
z6G(`i<QqK?U_wb_p!7ov@7_Iw(y->*;cKf68f4Ycn4cE=`_z`oPOaFQoVujB;)xxY
zuJ4%@)Xy2^()s%Jb7mHQ(US-k8TSjFm6dMp-TGGY$B^x$9sTG`6c&vNh3NOV<DPKS
z$@n%k09;r6>4RQ-G=~nh(#WlcEs-^C06CCaNKdZ*l5-yNLU!-&e#-l7Aje93m;csT
zl03;OT=R$@3XkQ`&<~~JQC`;|Zt)^i-sU^zfA-L~?LHxcRmOvpFPYpu_?Ar!6)`X8
z5={AW6k_rK@{DhS-G2-t7TS7Bu0R|C2<@a6ioOmflBIzoxe}x+6ym$_YnHR=KnLx~
zlQBDIJv6(BorkS0FH?0dD#X-UBv8yWbm|jUQWylDhwU}Q&liV{stWZu)B%HAp&edu
zzi!ARXS!EP#v9(Q*f&Q;HtCcyRp#keu+5u@Bi+&sQRv{q@k|+Cuxi0SJ`3WtEdFxz
zLAL{Du3hi8?8r7^lh72Ywrf2xb{05#g|BJeL&N4O7tq3xGNhH|A>5NMytAcsqh=7h
zX|%rE4lsBeoV~kbJG*(rv|7K%HiXQYqb#62qFq28_?^CJZXm%C=gXlrx9dCSS3!Wz
zOu_4(Ils-xyT|CShkfy>U4nLxeQ{&?&OjGEa|le?{rsmt9yhDKd&Q$=B#QFgMai(L
zDJs<%E3g21==OF}nKU~+pJV$TnafWcmZ!c`=#L=~99!|928bo<NcK_9T>Xk;M`ehr
zP<Gd`!vhW@`q=c2;tP}6jl3(GL90gbpHn5+kcsyZ0|A6rrh`b+5@o9T7<HZ5APZN_
z@9SFwRAJ{nCw(}FVs_N=LcxoC+dBXsdbV+i`*$lXcLivr_2fT!%-}7<58US<?mqFS
zcbGL&;%_Ee#GK}N1B4*-<HtMS9BrLwSMZCPxL8;e0r4QRT0C!GH+{uwSOi@daM7{q
z(p#~H&1!7yFs?2a)kyRr@6jT{rda>!ns>z6`iQ{F)fy|OJN8DsfJ8RlG)FGxTlsBQ
z6R`|LY|qSEj0DkEv?W>`MijC>m6VVp%%9Gpm<(|b^qvB!_5%RpYZkHjQ4utsk#RA|
z$(0@^3VubdX+XiMM@A_C-*VHnc$ay(vcsO&;5cHKgMScuH*b>9nfrJx+jf&_M69`2
zK9XU(nyX?zYgcCkzcQC>Q1T$GoL5VnM&m|BS=3yR9OgTFtVHP{F0ETVqWxqG^*=q|
z!sQDmHE84A;ukThxQXwq0`u`~*n}3-8N4AtfdF|No8OEE)th+pMXN?&bSn|y8G&z1
zJ;H|r>PiSwsEftiBJ*w@g|{@%^2Q>dP%bHZ(*%yO9Y8wQ!)Kb$>R{6BRo!s1nyOZ8
zX>BpNA|1TV&0^EUeF(9lO}@0a>v!G=`UEI@b8>k=a+Kbd#OzA1Ok48|$ABtM#}XDe
zvy;%!vV6*Z4>D_Fs}pyZ#Ii^O2(~gT#c0pYh%3x`%CuCAI0oI!w~Y4OEGifUCgyg#
zRDP%w8+G54M6ny#h<m#ynB7(xX(G4Ntg!%0HlKymii;3Q&iyqw^1PcRa(!aF!pFBa
zEjr)o*j{%Rejnxd;kd~0X{}Vq*>I#ipTgcjSE;2g))K*EAY-$IezIqma$v-u%a$pT
zKPV+u5XXU0XPTP8#tc-mwA~+Zr#+cvm+4v|-P+gKL{i>Qka&J5wiMWM9^hzG3tzTI
zdvbx@3ltb3qh=I?3JbSolDZydw~z|JgT8R;JO!A)wES((|I89_{7-LjCQEs{RSH@H
z8s3w+lwe~f(yuW%Ig$=l7vpUALLaS!n}l*PlBz0r_4^b$6Xx2$kU<aN(jkE*JC=QX
zWJl&>D_6i3R!)2<Reb|*6S4e_@qHYUWAk-uf<SuRcoPTrt?`?wxAf!#dvyX<5(k;?
zPSgj|J*v66HaTg`Ry#W}0L;^L_OnPg8m%rJJveNM!LIn50!mYV=)w;2KBi!EUb;Y6
z5K#(~^}rQzt`!L@$50eC)wUnSHbbJ0v&~vLORX`6`?GN}TixCdXH&^^g~Bspa2_Rz
zH$>jUH66ipTsKXUHpq>riRAWnU_nmbHOtW*yoZXa0k}QaoDEX5orX!z<LY}W7IVcG
zNCzjze5Y-+{7H(spY{G)<ZLG=T0thGIqe^2QX7J5tjX=)Pggj5hgF|1)!;%zNN(+3
zOzP;ATWWi3vhM;iUYE`NK0XaYwwy84AX{U9MK)+>_QAT_k?+xw@9qdN+@L<sS}Dze
zs2<yMMW7#qY(rigf4Kb<RqbHqw2KI;D3R<?Ymq_r>`so>TK*mj?VCIW)b8b%;#S-R
z_;tLAnJ!(slVZozn%2jM*yl@++v-chj9b)@R+eBSV-n{dL0x3?y&W5+7rAlSbToTr
z*(dB9An^ldMdG<G>N6}`C+&`~%zNo8g_HNdiYv|ZH8U2qiC#*yBh3jbqA|<O8w`2a
z9-6gtIp6Zp<KWG@qMwRV!$QiaCg+{|ZTb_l>RP6a7D#$G@5WNC*8C}RUzF0?TBPsv
zAMV-`?qjqVQ7bZ)07|Kv=r*vOW$7({Bh$L~B&`3ad|3$ajI>;h4B)rzetH;C-QBVV
zf9lfFb128$tva@dk{(hDaO87;TNgW5y)m2cTco<&D!XqHcQPk$4AdQJiZTVrV^m5T
z)cFNLh%poNxDb-|vGdzd=M;n-ZXY2mkZru%f^mG9tjcKZ^)x{>lOIt3yxq=Y+G5NN
zBl=LNxyk=}vEyxy)Q`{j#MTtc`ESAqM5GSkQsNRK_8KL>XNz38@=fw=J+E(YKEzGl
zWQIo%f=v0}J^4PQ2<pnB!zD<=Ycfb@c$ai<Rl4QpVkjI?%#k$!BjD_E{^V2;-qW4C
zafkF)XTzo{{BtM6c`EeqnA6jf1AMiD<?(3MF+yJ~(wrkrXZ6miggV|8!vD-<DXMT-
zKvx>PZRzlmDMfvLzFOvpn#To(nI9?Vn`ugHco|BPFh2qCySGi29P*iOoy*_oI8X;d
znv@sMZymp3VTwe+2DPt=s)^&}2OXs_OePJ8jo<o3g*cl7^TwTHt<cf?<gK5p6(5P(
z$`dX#a{=2Fx*`5PUa<WGj%wuHOtfe<mDett!(83#oa0N<_7_@ppl8-Nc{x;34CuQP
zH2ncdFLO5cCf;n&#)p*r`Y0x^QFr1z1C^s+0x|!Wz_yeA_s$F%mcWFGxSB!mh2}$Q
z!mm70AEs7@J<H5qA73g__-w+IIa_>sy0@dyDs(d1<-4s2PM6I3@r$3;1fZ921VweE
z)bnq*t3CFfUF_qqZkXNLc3uE?M={%Njut@yRn4AX?W~u5XYq6A1@EV?j!MlMxsvri
z?}!))OA+w?yg5Pkn+nqVXF%4kePTP3zhX~Y=zfWC8J9k0d834tKot*=B;_y0^#(|@
ziRJ}XlH)z$jdI%6^F4dp`t+bAvc2~HDZ^1<r<I0pXYQ)a)VsM<so!cn|3%6q33ta&
zdy=Nb9x#7d9*%jM;G#m2-n;_YTVRFn9t1I$m#4hrGi8*-N6W<L(Bt3Vc86ZB$yeyb
z+?SI1?KE71B)d2Ee=6+$bD7|aX>msu-EU&+_r-r1cZ2Ciuh4H?_TRFO$K1dkqi34e
z|3z#9JgXI&zX)3Yxx^h{_D=JFGw4?-`F*+nIs9^y^R>T9bAX$<3Fu71Z^yO&U8RBp
zh#=}O{kI*zXa2!}ghl>p@4~-)4u}a%@h>xFUEiK90c8rQIQac&3mCbWRU-k)M<S8W
zKrK9+5Ft(TyQBQyKB>Q}nUD(qs4quALrXJCj5N1RuBX*7l*`v2YuNAYWeU>iLLwox
zu~~Xg^y0bhvVnF9+1@WM!#RMwF8u87d2Xih9s2PGy=DooDyX!pV!R<`Vf7j6bjMv!
z>BF+_R!SL?=HC6&TaCPvHQ~EUvybsIyCNCFQ-3<}AztQKudi@mYD!DpGQWtlTEMyV
zP^+n%^Ko}Kv#PC8%RSIR$gyWe`|;CHe{6@sCBP%8Qmx{6x-$OFd>$s{RY-HLrdxcS
zesoRPNxdRo+xrvFbW8T5SVx5CrY;;Zkd`||iM%v;yj9eYH~w?%7N}5s#<O5k)Jic#
z;d1R%0}^K7kjSg&c3Y=3ejrkkj5JI6^C7rciW2ww6e>bO7m?~D>{xF%ejjc>CEV5E
z3rz8$XIOgc7$cVvHE60HN2@EkGaL$D8}LmP9ZR<$Oqff5@JMLR+v!u2<ItVzHnjU@
zg*9}kP%FaWZ}KfI;`^=-xO%8W)jv=cswAizvQP)+0{Ro(C|zuc6XC?C7qvwU0_-a_
zj3$lx+fD0tQ2MRD%JKp>TtbFjXTZ*t@~mP=#K4tXop^_4RkBt^LS(h2?oQt1g9sA)
z)D&c?KxyKe7KwG5sj{Z&I!M=`->jkIt(x-EllyW86R<Bdds44fRZ;a)-lG;Jer_P&
z`blv}1LOth7ajV4EJc6Ci*W2so(Y8P0qD@ep4Gf!^kul|p$cuLw>nsjEGvE}3jcW5
z01iR_q=&8}uKAt5*0TveJ5<&q=YVqAFI&H>RtvPYQzql~o1*DY`@ouGXzm&wCvDkB
z1&4egSlW<p*$<f%dzJM%onp+ec01F;L1*DL=A3r!=livme9A{9T9sxX--#+^lxg`x
zRBh>rZmjCst8BOon|{=UdQeF;OyX0$gVO(w^WVS8jsgoUhpK;nulLckvjf!F#idgG
zMfj;o0vgayoKhWHO;KUcHPShcWiWl_P~S1r92-O2PKz;$8yxDhXO8?+>Hb7!*huh{
zpTxG6&CcWswP3Y~#0a#~U~^sInXY`JTji?$wbV)QR-t;G+ol!Pe<5z6-7K%jn#fpg
zRUg=I<bL*QRuYws>LU6rtoc7u#*I8c3&$zsa%iYf>va5SVvfP9`)2ULWLY>EBXKuL
z7VD4SdChvX4AW*6Si-c$5P35%s5&~i6aPK854eX6ZkB3~svMInwx;Z{dF3!yUA&ge
zr6C1(ndwUeZPpJseXY4=u3KVCdsvoF63h>A+%g1zrh&XHpdVvoRA=?9OKawV{r1%_
zCK32k_6Cq*<l>w?Qb`#hg0|bzp3;!T=P3QGB2>@RA{*y={G{=L-K0kS@#On`4Z`a@
z=4ZC@CU))<*2X&{UkAqU^HeuF7TPaLN-iuO^;A#-ZYbAP^FGLA0*OLgAM+_wIK*?9
zpjWIh*gi;^g3o?l0|(i+i~}2fFfY^5(3@k5vys&^wII*D1k0O$q}>~e8bCep^5to$
zRVDw<`?+MAV{<bMWol1Yw-x=)mvq4n`QW@@FN*@ok7PE=UtSsiF{F^+AAhJ+apD;+
zK|^xB-_F>HG1hmll^3_+Z%$f#fiO!I7F(rFAabu}<Y#5kiw_ElHyXJ*zQIyF7h!a8
zJND01?9T~EHRZF%A9fX1;<=!G(kaIRBUCpEEjs4gW*YSnz&3(Z0}~b|nWLoCDm3Hy
zgBJQLXLt%FOviT_Qmg1C$Ll(gYMgQku$y_E_?{HK_|?#oAjOHInWP8)`M1kzNike)
zBcRE+OBP3dCXHD0BP6PE?%jmcVNKxqNhW;`=y(Qfux?p~|EJ3GDcO&ZlcbtF?k)m1
zK%|``Z`Oi!SmIe=1NM7vvmQW&y7Dq6&ylTAlE|mYbCMb82Zs7~i9&c23{SR>IFFMC
zvd1hNf`^Z`XMz;(o}bN~<xjrwc5A}A@!Y~Lrg8)p_>LGP4KzU7FTtNQUa}4eg-SzI
zQ|0y9b3mj<Yvnp6lvxSU%YWGd$^@vrTuDEl@o<!NrKnWpxS`z&(i46NdKk@=Ii_pt
z<ag9gTdkmQ3`mj0yO^VGflF7}JoJsn_!B=6e7<hvU#E}DfJ5nI8vKs@+BGQq;^)}`
zn~Vn$9wk)_S=K2WwQbNxHjx{5#67-YX+C}^SF4r$@n)DYQV+74Y(aX{!<%~BPUf>p
zzOtyr_6B9J8{=U*%onaTm=P(pI?Kv(GCOzPBJL<<+NTb7YR;QX2PdSj{AKsJqEeRi
z9lN`F@#)ie;TqD^e84sclla8qvAr0LTM@<@FvLjd>m}WA{qH?ASoCnN5;REvIiHu*
zjb2b@529KBJ9zGUcCzF<sK?2QqJkd7L7h;`<WifO-w9AgIU=dt8Wu?+h1x>dJZ9um
zweLVTt~A*(<(M^Qn%>o+88soLkSLWJ?Xzzs1$lmbP+Kf{TKrMIRk`^{wYBM3Y^|_=
z(jtP}=;LI6a!!2NUqrNTZ#{+j|9CD|u3%}I11*=Qm@Fzm-DjcJ^6b8!_FNJ5`+osw
z6#Mp}%^7{Z-*nx^e7REoD9*l343qS~*n7{Yrn{#9R}n-(#R8&qML<AAk={a45b0gI
zN+%R)LArol0RaK&Af1pzO6bi(k={ZHRq26*UPIu&b3IqN?@OL_-kkH|zt(x<3JZZ>
z*?acPcV<4bf?Jon>dzblIrF|;Ps*;edHy;c@t{<y8ts;eEB45(6pvZ7ky+z?6VdSU
z6wBk*Bg68{+m6z%OwV`h3?<aO=8bPb`UIA*B%se^7x>@WT@tK5s*k}qq-@mWt{zLC
zCmz)PdF9-L?@*yM7PI}I?MZnGaFcbe*zay!FTCaF5hjDylM|qMM!%lb>-%Qp(AZRr
zgExoX%#&RAFY`fK?FgYLXn#RE))I4b!89!hYDtA`LAc+Jb^yIut(kf3rG+kI_YHP_
zD)Nh0dQYqA&oL~NOtawKF#0<LUcr7N&u(4xr9=5BO<N;&JRc|2#EXz0FS~Pf?vgd`
zEfULqMzZZ=MAyr}I6e<k-!b9)q#j4AuJk9L<K|5Ns!90|)9Ol&4tfIi#s_U#<!eNb
z9Q|&rBjHo5^Fl(=T6lrB&j~+Lys;r6QK-pwY%016{Db!YFeP->wR5#Y*}Lf-8?+@}
zByV`)1apO_E>Hfpn@Q4%8wNg({Q)ahn3+AIOy3K@*6=~+^}+ZHveNHzaS>A=hr{p;
zk9udR4Q|}8y<WLLPMxcF6X8F++9_(}zF3mChRZD0sBi!6Akex(p(gS^o7K!#=Dw=t
zhTlg99%TdEyp?yY$V(ere91;wUsb|}Y$bwPAuh82NSfNvy|{PNA0mQz^6gJgq6fNP
zViPN~N`~YOdn#z4RxS|!Lu}l$Oy4W@^`qxJ(pti}1{IYmu`0-fyr*;mLtDe{VX1Jn
zg7>()JsfN>))ACQzxn)C$!R9uh5IUh)%yL1B|gUS-jT!Psp4l9)ngw}X3P*LlleXm
zDSZziNS^&BsQ#pZe$SM)H`k$aY7ROY-u2p??pa|67wYW74%}1KTdI0#a;46nsB`Mm
z#Z$4XcQV;@shjkU^Gq6Se0vG84?&x5ly~Iz8PuYgZz^E!_sD$EpFOAiL`)>#>syRK
zSVULm;g0lvIVDwgba1Z7i-@kXS4PV1qj1PJCtn;YAajvWSv3+rq|c6Lg1veEnaw<!
zw+3?ViHg==xz&FjxHh>i|DMp>MhR1s`6qHQdx!GPXHrlw@80@T6)=mk8*HbesTK60
z^0)jAdyb^7truRCFTdyQP%WaPt$Ef_&2#coK%$?KtbqkeV>!pMc75J*HOVLcls-!z
z@fCyKVro*FXFt;TI_b`FzZ%C-7|H6WNoH^uEIOiW(b&BbC*Pmk67w9Xb)OD(uD6n+
z*L+C(_lrD6$p9=zs*XhQ2-48Pvp0);swE4)7K~%O!h!Yv`fRJ2gYMZImxY2G9_=jX
z9;P$7*|1o%EL&Z3WGUOO=P-?f&9AYW^8?zMW-hge;nio@#to+Djwm`JQ$wYxuib}4
z<*cArLU?p=vp>4dKm2LBS-1%&3i45y*PNmJy5>a@zGS+h(+%I|%cjgKo*+H}G2%a#
z!yn<(U2=Yxw*bAMA%)mBTfd<_m7CqbuV)!*2Tm63chy8%|NNJ3b3cG(ID3p=fHuHe
zuiRO&U->6)hPJ56cHRK(GTV8h^4Vy~Sc@Qaa(M6JcxB+eZ=+)Vg44J5DClDBf=G_n
zoWzVcTS?z%-%>tW@qLFv$tID)Z#2o+s4I6(hK@>aYy^@FF%y%;jBd&1nvkRgkDB=q
zpC_zW^2#4qhiOXuc4X710QF<_a|bi}rTFvX@0(3OI}ta%inl()uKC~jcm}SFh)5UN
zY439z`VwNs^|NS-w^f;G?^@S__U6_^!x5@Dp8m3Q&4(8jaYY>)5;{I`E8H6znI}65
zW0bNF(nXXDOe-V#d^Qd6`QFaD>JO!>+Fc**2cYyn9Ka6XJx<Bq{jvM!Ag@D^(!9a4
zhVkbXRt^C)8_c7L#tPl#yk9ckIrEwaR3Ji&8jqy$)viQuiT*fWQoZ%)D>s7(LCf(g
zw+gM*cl>cEzm#08$IH*m_rt7_Cd=R06iQ;f-J~v5S)0B0cC)U$!TWCWb-MC57Rf0m
z>td+&CLwL3bcTgc<Yt{i8fJdE{CE*M14Yz%qmv5ETclvUZr2WI>-Y)clNVypE}2sc
z!xwAOCVY=7GHbt|R$CCZeBq#PG?!C3b|ejJLN`Yfzj0reefA;WMI+`rd2_=?7#8u<
z^=U#rCu+gf2;27ZzViw{!`XWVK@kc%n;0HT9&#$pcw4RT4u+xYb8h|EdDz51(CE~}
z&(Z$&*ox6x)_vb<@BXuk|N0~+Nw%9|=CliyKRi`^qvz8M3iy(|Z4}xnOo9x(cItfc
zs(`s{)*L-b7;XH}pDp_yRAXB>Ve=ofh{~+KvmI&{x?!epVewS8j|2{8H7KMk`$ml~
z-3-5)ytDLe<54pTyh06~a(`dHXv!AV{D_HzoGsSFpJ?Z1(RDQ^H%I5iq0fKD%mTwE
zRWfH!H3T{*S*#-m1Bm$J*2}@u(_5d$6hH}mURRdvd#9mLHhih-7lf2szgdH*ikuLR
zzOPa-TO;fYik(F8(l%7dl*FQKR;={R5o6=*H4YDD)NI9dOyH6Z{f{w_-4Od*&Hvt)
za)*MnKrE){ie6|4OqMnCgz5i$`Y^1|JzGuN)}(ZHXkX)^NHaQCM^Ml-r=oh>Mxos7
zv6{0A-FsN=X0j#j$Y3V0V23xZy;I+lgvQ|VsPJnB7?YYt1a<hGxUW8{btFzF+{>fT
z>sW4R@_3Gpak|-Jr6Atn+4_y7#{ex9_UqKxUJ0k>0q4k{AQ^$r9Zb;4M=$TZJ=<P2
zRDswhEl-?lCeav#vg9_O)TNsxR*5}$iiJ|yMrDq`4W&eRvxbWIvn?R{GA_yKBtmOL
zjX#DM#a;eZw>|sU2$4AEm53sQim_~a2<3*s@P3V~_t=tSobT0t^KTD9&R1b?=)d%3
zU-@g<{D+qy1Ol)h&)gpP7lFtVn&MMjz@|mdJ-2GKgYTaXrrkAY>|^!*b-Ym&fBA%v
z<f%l<i*r#l;Soqe9(fmfX*X}{RcV&8eO@nsHP9`VE4NUYI7<47Kl}##NKF=*_|GZy
z9z=py=+r}6-Q~`XF1ye@{AZX`RgTTC+tXg5(9gfiX>-RiAd}aUii%t~4wswlQb2J}
zbGn<_5;!G3P_n~Kdl>XyviLKr+%X>K=;pC_^I0=k_>%sj&b+)fyUA*9iG;+XomV0<
z^{;n)FnNVSP4<>U@4V-@rjohDn~vnXP<`r&)0^?WCe@SptvxhyMrXY<d>5A<F1eS}
z<=xKWFVb?rQ);Go_e4D|jFpZsn2s&KfK7zO!=E|oQOrFF{MjzWiWvwP@eK1Td`IcR
z)tR@iS`wZgdk9{m={U&u_HyRLrJ2|+Bp3Drv%J!hlD11&&_B=r&rkK2KnIohjsz!S
z80T{}A~7TX(`C+?U#a&pC(QKC5I8~^XZzK3^Ki$3(;YJt3X*Kyqcf=ThUEukHT(vX
zoJTx=>lqGWVF1l->8w$77|G913ew|6`tS=Gt;aLw{rB3$;QrOVjr?Wx@kDe%+NI=<
z(&rx1<`y-kp6K8X7|vPd0eqN7<DgJ9ha8G>89i&{b(JyAtFoy5_|<3rr@C+KHC}~?
zR#6F>#WNsFDEUc$70)K4r)ZXB5D9sk>*Huzt`iOFFI?<==Yx!sc+_6a82v47{<DEU
zogV*btbNt(-N!m@DCN(2;zn+s_iC>>Jfj7&e@pXK$2dCTjhB&898-zqOCN--CT7}d
zWOQ!GaQ%LXnkSP8UkJKwH!aFrSVSbZcH~LAL#2m(mC#uae9;4W=?;qD4nvO9(cm;p
zkG%GE;=}zfQZoL<Fa%#gFfvlMC_qNukd-HRDyPcC&>+t}|Iqf&o0h-OSkLV#oB;Iu
z%AV)^FYEG})4hrmNEBo%72BElmU9-1iRTMRg(5eF*8eT#as-0IW7=g5{iLmStBEsg
zs4r8&DgVJ}TqP_Qw?{Im3V3c1bhTPlRTc9*9D=McI=m7t<?%{8>2p@tfQ)mZgR^rx
zZAB^@vBbrY3ma0Om#>X<3x%;ti3(y>xPBAUI3QFX9XKFF0UiedP98z=#~eI3ofFYb
z&%7A<<wmVnr>iDuaa{;ac1-BJupsf{y?>w2?>oSnZ<`rSC;Yx2$gB2!6fm;xg(f-l
zzdu&;o}MfMf|DOX26upaN7khzKpmL~Pv!G_2kX{yvOw1UzxmJ08Oa(ZSIoSqQ&xuP
z@Ms9<HTOB`-MMo|A(IK=sf=*Cf_CY@^ibx{q||>Xb>xRfA5ZRFo$tC<YKy_zVmjD~
z%Rh=lbF=ps$MjTGUZ<b8AnqCJ>5?Mu56*r`$Iv-f^%o?cIPgV0=dRN`;ys+c=)V-U
zLxcwEqpH^<L8FaBM9eTFT?L<r!&2<IV5Cm&k9`lzDrcxUDP0nlFPm)pSFguE+Y@{W
zQlJNBCA)g3*Ig5CLfC#0PkL*Qd+F|?2ie`^HFEipu;`?<^POU>d`G!K(hvU$m8<#f
zD;Msy?cTRC*5?N<C3-R*O&b3rJluaF$2;K-pRz1-T*|fGhr7aNP{~{v-r<+Ge_FnN
z9Fy!5wW`@=TXyZXwzg$y+Jk=^cyxwvW$Vf5vc<&gJ@1`6xcMGs_=^`W5*KH7j@piU
zwj{S1Ay$L1A-KnQ<d#IO#NPB)q5aQ<iCP3hsm`Lak6BC60cr+NE6E*z=$+O2(tYHl
zs}{Qv-P1fcByWQ-l)PH)_an9@PZHx{=)@1V+;>ij9HBN4Ag1j1y^F_e&-fX&@Lb=J
zL5Dl^zS*Cj+cpln^IJecHi$H;srjGdql}?^{8(|=HK;zJY35OuuDUXMHgF$VpSLk@
zyRV3r_1-{=B<auhMb~)p^73kjQ2VJ=moIA;;P*;g%@%?lWM+Qf^Z$AO^`FPfe?F-9
zu@twfR|<!WOS)f%v4l~ZWpc;*Ex(atsMu|>=r7^dXLs~h{@)$|s6bhN)!%R~G*922
zA3am2p&U-we~bbxJh75p9+%Cp)4l5MDX?76TmF7r-`oJn$H(^zUP2i7R6_8x&^P9k
zBW_BVB|VbyXyymlGBx~Qtty6XPm48Iik?{nR4#a7s#|iyK*6w}dl#FFG_u(ky~5&(
z)7P{oQV)s3Wr(u&m5D_zivfKFiFn~8;&yDZ7k%IyZHlaTOTpLZi$V`xMJhY|C_WXL
zyf<-H1YaTbsAc!Sgq+-^<5$898FZlN66y8f9$c*Iv>jK<_4Y(!lIAU**z=@GUQDiT
z>3!$LAb(uN&~=IOEF+oKx3#?91HEDa;s)g}Pj^&JpAuR5>bg7u3M_J`0=Hb&mt6K`
z9u93O^?kLi2s9uq^eM|6$@9&Z+*PeQb+5dGv(c@8b#A{>7WO7^zoQZpzpmL3GqN8l
zv3S<A#j|LnK5RG7Qxm$)jX^^-1s1FC4_2W0P>$J5h;k1u(rDh1AVU|yzJJ+@Kfatq
zJ(be<SSxVcXccTD7KiYOMrP>}7o7yzCx&L0$10jnO-*g6<?x+PWQDBzp>L>w1~cM=
zO(Je3kTneu&Y~R^3Y9~#{2Oe;)`C7&qPZi!u)G4Xlba_hn#16Ikn|d2Epkn~fv28%
zEReUR2CB};V3XV@E29y<2-k~~d>C5Ab)X^FvZJ`e{KTmTyDf*XVl^?}oAaP(cQVPf
z-!x^d^&c(Q$R)88UNg$9^7v=W^3<oyjaryVx<xX%kmgN$IBW1cv|J9@ktkE^TpN>I
z7ZM6(l%^pg0>3UYvK~CJLaZ#!ndwK~y&6jqH<}t<lI2nwV0?Z1*Q-m6_RP^a{=`VC
zcjz{w1G0Rl<v27c6J5)sb3N!WED#(V2j>*j=HlYQ{j{d-U9sKg3>YBpJEciZ6nDU<
zZQ>%<qOJ^aX|~~#?N>2zwkLF6BB-@Udz!VnjQ(>vPGZChZMYb?v#!NnH&>X1zDuI8
znbQ-Okex64+cthF)o5xxCwzduZjXd{%$`D#&*N#7UTw#TSFHDfbH@_>GbQzi_NP5Y
z&p|xZA|9l;FMc6~@t$p$b^2J%*Ev5|y?zFDueuh`imCQplp_}Y4p1a7g6+#%#X*L1
z98w>#X*=}FfoK%+^%Q<REIK*P2E7%CDyGOtkye9Ssw<1k!N)ut{V^X=@S>f>tQSc#
z!>b#+MH*77>RXL25)@z7*MDZ+d7LPG$z6d&Nb}5#-Z(AbEk1xE01T|HddP!t8$?mN
z_nKW20P(?C9^#>auu1A9?_|!DYwmea*#TBx*}EmQ^S^l|<YZryRM4j+2M%0v#i|tD
z_YfqgO*=Hhc+_nCq6d1j(y_OHp?&}^YZhdAXIAY|Cr0*JYmekk)p1y#kAU_|y1<;N
zwzshN*ecP2Uba;z%HB-dUU20=M26fKpn!&^@^`R4PG%CFeS3ywE=uu3NnvABKxCkA
z4?Kc}*nslyxY>>I?_B0wtAqPG32(P&d+6{KE0X4-A$Ahy54?e^C&)G99C_EJXYy%v
zWIwc`jBQ7{EjGJ;4%}=uj)p__{5_i+SBIAGD^`y=V75P8ubEH3&)XyBC+UvVVfXjD
z^5&b{i382Q+dYtPq8y31mO3X}{{@^-5o3D_2=o06PsKn&Mof-siZjWL`f2<@CV=B~
z1UN@uafGl<r!}%`?eDMdGYnO6Q6$OF$06{e*cG1=je76@I?ewQwVKH8M!q)x`asXy
zU-E{;VobCX=FUv0;So*dEaOt79>Wv@v$$WOfGAM<cK(s1sn_i}#t}wplHw)NDA9xV
zJ$?C8wj=y*Z(2nyrrrE^<6g4p(hhG3Li8CEiBr$crC`%t+K-y|ux9$^|NNL<JtDdO
zzrjbej!|Sj?|kKFr-+YG3`?CiY$x(qcIsohJ$~J87tQtBlntvO;n9pN+g5QCjp0(F
z5`9Yr2L|6{?*&wFIIKUa=HD_jH$US&R#R(PH1W~U!Y7IoWgEY)7A6o_lD+!yzVE4p
zWBT79wHvmL8#8q3EX(r)PY-slysv_G2wd@MM@?Oqr;DRsS48;bo+7AA<HNq1GHgWY
ztxz5&OemzhxqW&V8mlYG>n}}6u^^^sc@9@iur_XBoQ9-UtPXrDS1CbkZXnoK40m0d
zst#|{Veeym747e_#<>oG2BNIO`aE>*c$ZslTI_8Ep#2+(VTA%xB{|Du)mkFG?5-bX
zbh3(z_4Ex&-WuAw;AJtxEPn3@@WuS_MW}jSDj+iCOdV_)Q*Tf{DT~2`n@Pe9Li^^k
zFJ$7FA-g=vbaZS>A3!}&Li#neENJ%iyl95c(r4)MVse-RRy^0^2&Cv|0-K<vQ5oU6
zefl`0dXWSDFkQx4E;kP@m4Ph}?&oH{*hYMazmCC8HX}vMfG=ASJ};E?TIj&tU~Is_
z{>$$oW90Z)Y0buwu~imG^5`o=i7iX_0A|`k9M8^!O>T7k4v)N{R1#_M%kiPXc~G_V
zD>g~@Qq}GbRt|%$@l}?d<&!d9spqG_H!A(7BSiMI|L5#X0INSr1~&DPAER#AwlZ6L
z4zRn}mOT;t#*MMoX%;EBSiMndFJXAhO3J><Nw%ds;p*|2@cpT5T=GRac+im=`mrq4
zM$HZ<#-q84@0}JVen8;{8~1xP?^Ux2i7vFJh1D&&B$M{nzCF3VPuQLc^W1;@#gNte
zFI36jm-5H;(7*YEKx%o;LnA>Hp@sbbld>{5RN=`tncz1m)n;H|Xs{F&5vg~M)|`TQ
z50-8`M&zOP2P9!dxXwL#Oi3PMLRvnI9V5E&+PcPIo5yJd+MmI0o^{Hk=cr#~e6Q|m
zAAj6FM$k-@bQyDjeQ+ozdAnXUAmQ(JzW-W{K~?AesJm+4izuvu8VEDQ9Lm{+jv+&A
zJN4VI1S!>yXIMB{jfIJ@Vt~_Nyzj$BX%@l;xyu^tXxXoiJ!eInZwTxkt@jboUvvsk
z1a5F2n?yd9S@aI}?u-Z3YhSIuI*b16<;d;PfcS5$2HF)L=)DEW;YJ+YUtjYRh%MV%
zun1VZy1QeD^7>zRMOt@3XiA$aRx}V(Xkd-2`eOOZc6X@3vmp24FDm5o?b>QV^KNuj
zltVv~`+tpd&H);VolJcFhTkIYB6jc|hJAnSL(IO%auVIB{?0^GfcR9hreWdll~3R{
zKtnWO|I}%;8AM4ILs2{au=~3nn}X%HYVrkJ<M;gqd`**Fu^a*&mk$ovc(e?jsDHXM
zNME)XlLGhN_e^mR-zdNhqO+HK``?jbeDH``<N0_@s39rAm6_(?Q|bz^9?yFd1Uh4y
z`w*B~Gp~fjIKhh|=06|@l!t&7SbrG=iuK(+#pnJy7XDkIDdYwLJG3|P*umd^LuO<b
ztO?@}bhZu!<%8I%jY)*(00bL3u6#tMP+yZ#mpbrf<T#+<l@=+rz5cJY`S+Ei-UqtW
zc~#9GjROtI=|2xR_sa)=`|5e{tp2~e^$*r~wZvmV(ti+^KpPowo9^lB>4`gBpDHxh
zQCAPOWIY&5eH%Q~Q|8oX2P)^X5_&;@<)gzjjcmTx_z%~-cyV++DrNTyY!rsa<{A~*
z<Q5kGs=OT|Xn`cRI@JnoERLao`u=PCR=c<&ApKsANda_2W6+20$9iz={OXYI$;olg
z&+aoCXpcD^v;&3@4Z|CY1n&7+3Wzxlq$c|kB+$+yk^tdkHO#H{lQ2?=sR9b!r#o*S
zp-f5~j~DrVafDNwaedAMfn@|_Z#jIzDLNnzg$h4V@H;Ih2VUItxaW8c+RWL@D;of8
zasUuCr$w;VG?ZCP!wfcPO;A^u8JQ6f6r}Ti=Qs8<+R)rp_~)qaqUR(RnT8|T9I&^V
z^1mITWVNEd4pi_@=6HL0qB%9WMS@%cRuh(40o=za@t1s-#rufQu*h*oP_T7i5eC==
zN>$VIB98%%NEtKcGcTYlYat}`wHVaK<yq7Q6a)%&II42RwRnOS-sPS#*GAT5>&>sN
zakp6SkjaGy`y#1jR2P=xvq=#9?kilJLS%q5aWe(f+jtK~0xs(%0~hc6(YrlIQU;wn
z0iTF9n?(wi51f=MTKU<&gmUGM!UNdV99okU^_xvQ6mv@eq{F}Im$5w{yt#`OkU-_j
zrobuD{QUefDpNrw01Eh8=8(E*Bz4-4BklK8w@qH~F!BGsw}LL8<+!JJJ4s!)nhM}~
zSoyQn;(j$H2#Jk8Nl)LxmyW>>Ho^2LIqmQ#2sr%hx*q_*M+VC4kV`<oGH}9{IGJ#9
zva73<3>H|vjPzE7>@F!{vecO|0Kz=5c1XU%S`ul_y_(hx@g9g8n^p5=_dfJtxEVAe
zzPKOJYri!9rGfhHAn}T?Iy&jsK%PE7+PdX@BQMwl47Vg*f8DM!#l~KH{S7q6X9M~t
z(~urk^g-Pr48laLL$*7-+->Tf44m9~uRQwUqO4z8`qs~v>;{`-fwk%H4s!A2TxqU^
z+cD&{0OU)~+DjgUdA0G$i$dQ^(PxL}K?}QI!k@5nzT{YwHg@mHGa}&WK8dq`I9?Z2
zVA2ypsj5y?9Rxcinx#=g2r~EV__+ngukgz1ouh6hq?En2Y`HHcgMuS40E@k?o+PP{
zQt<NzEX`$W-=Ba!qzBsnN{hk_UEi1ACr!VI-g^u=(c*fVzX;T7D@RcNh~k=q#910n
zXGIs=bzKI@oX-2<e|a|lgbDPo<+?OPZrF8lC^L?MTwGWcZ_%TA8s%lMGw3^7n)<Gr
zRa^mxn^R}+0{D5h+t?@@<US2;Ku{yFIg-Jxgl{bXSGJ))w%zVE3_rV~U99V+Djt+3
zM=_dp<*?~-St8nOBdXj4wDZ%`UPuVzv<B@7p{PhXkIr)A_3=^`Ii;1z-`5u~;8(GA
z^RLcuvQ!HOm0E$SgJ<~*y8whD6Y#E)Jy5#FQSh#oh7zn9a(ue{f0*MDGufIcY_w|?
zbk{tiy5fBzsc&<sg6qk$I~Jv;d+VA-jr7Cb{9@&uyGr}bA*f;dfzJ;?+tw)S0BLwe
z)^@mEZ<fuv3YfSao8PNu$l7b1T-Q-^>)Yv4;+r#&{lOxlHNaF`Wm4+E;qcI*7J5p6
z$kXnC>{rF#vej4^<409}vXVR^^JCeH(mpHlT~74S*t#%Sh-6~j{Si@@(R}lOZEJte
zoGHB_-kw``E9qFo-``NU|1QI{Z)ktG=fvHo!=xqt<0bceoolwI2R_iA<%o%>_c`k_
zN9`3kN3-Vf^Eo5VXlx1~pN9%YlOg@ucMr%d%B9Xyz7bvGlffsO*Cq9%j&6R79Zt6Z
zT(hHS$HBt;TDlZ(v#1q*uoM!uEyCGk@>00Fr@){GsW6_y^;ERy#hud^0rl=$-5eVU
zH=x~e8>2x?vr3Zk>I*~afhHwOZzGp|cgF|T0n2YK#NSx)9o}rx9h9ReVHR`taFlYw
z3M-onb;uI)(_3Yjum#=G5xh)mYk)C^Gx`gC@E^6lhbiCakqH|FlV<B@8T}qd2y$@Z
zB`r6p-5l+77J2m^5l^yhf8&M6PBkgVMLf+&R_mc`@;Uz48C%Tz5O>($!dHS3J4c}1
zoHeXOuJ8q!`BTEz3Zz+D1qZ)a)H+r50kod?ft4F_sjn#CNG|amZu}9&BUM493)~9f
z6uh`83`PgsYLklpoSs%Ea~1}ATwfr%1>LCMTi=HF_a1W-Ex2D~qrSr<>-|g(t-GV3
zN#Mw@X*Y*a+hiiNGeJv)<S8!qj{jwu^TUF{L=DMl`D!Ae5;IU>$$E|O01G%J)Atvn
zBVLNK%KF_WAJQc%0AJw)DBabf;l;zG_dMQB28ACSZb&5Hhbz5M=@-vFks+=1M7P>P
z?*a7N@zpQQ;EnR0QX?Mab*Fa4*PJR89QtSD|MQ6+Dc419K0@alY0|~@K8N~J;7(79
zB=RXBgY`%B-&&E&0ZfaQGp(3@Tc{B#kh^_Q2QQ2!a}!TtZXXX*VtC@2vIS^$9*hwO
zV6#CB&mW!i1eZ-bj^7~vvmpRROQHc_@kszcGbXQ=wF5ra$qP49>qC&s%{F$I#)bwf
zy%taWxEh_C%PC~=@YF#E+WRv>pV*iA^7>8PL%Tyzh0=iEcVP=a&hbxUbC0XdeW7O8
z1KcwA{77&8+T~D}?`(u#aF%eDRtp*xUO^oxD(}-c{;9dDiXh>{%IYe_$r(5G#VAX=
zC<`C*p*;J1*;==1$U=kYf5jeunolag|HR<N<KvmQ<aMN8adVnZUD7yvmDCj;Fj+=P
zrffH<DpC0Oj2kqc7c@`2VVM7+We;>QN7ldN&W%bA16#~i*SNhNHVWV{7ZXG|jztn5
zS^z70vXJ@UH&h1vuf&CMG9)1kX<qJTeJ$02+R}RJ-NjohLj!RDf{T4y&<_?Za|jY-
zwwu%%zv#3!HEGDSr9dn?m|{Vnj?=YLTKBZ1vnTp>>FM~T@WnlAig~sv{(TXfEmoK7
z@LsZg2kAAFFF_(ybE5|g+F+3>dHnP1K#pPcW6)OQBU)4CI?(_K{!i4E#|N%{i^Jg@
z8ZTaXdTkX`(Si>pL;LP9npxR~qT=kbeXlqdg2@S1g~<j8<LIRu!v6m6t>a=1HGyx~
zRaDbL;6M;1S4QHZ4}9MIb+7uP%=i!21|6YpklNWV@Dd!?q4#3jd<%c|w2Af3tJF{5
zIX?4WWsV3n@>pgH<^P`J+LjkV#W<$kY4^SU`^kzkAr;qc{Hw1ZFFb#rT64`I_|dD+
zuRm#5S<2C$(VFCTlc<{Mzpv=2zNyxs){z+JUb8OUIG0C2BKPz*AMd8%aChFGj{~=B
zy+X6)IoHh<<;8ePV)C>Yl+AacKx@=S80!fDMWQAfn$^cz)m`Pkfc8TwU5>ILC4txp
zY0`s5u$dS=XVri=-u6z3zVZh|(<`ZxMGRX8K&1qmX|dXE^{XlTtDvLtr2#SMXXm{G
zB>;6LFpH@NBR1w&6h$j&?%pQylY(a^yu0@UCAnR{==t##;1zVVaxxnmWwoOF&6)GI
zfm?jjvN7zW(kq8MVC>6Mp<v*5TRR>e-LTcQ?Ax6=kEnv9g%!L2z$R`3n>GCS+ez5X
zExq@C-(Q_kN{5j$9?bu$e;cyC(ocUjfK2GP(TEc0XQ>5Sv}7<brIt9ds4y=DX3{iA
z9RPYhZ5Iy;k;d1EHyc?dCcnM~phfetx*zMaU7Txf*AYx$%!myo9<=ZAF+E1B&x4D<
z@zo$mC1K4jpd(qvsRYagO3V&U3s^bwh33NifT(>1Wp!(-e_C8gfuL*=XRQ8oHlu~#
zO}D)f;00bB9LUJMxYms}RgpKNvUif)8V?ConmpL<KKKrIKWR4!+GElaBW2#w<ZKIi
zu88>ntSISndR<DuFr$aQTi#)jyWwp--n(W)Tj?G}8aXu__*K2sm8=BkH<Ef;t${(&
z2JhG*vM|Vea&DcGIM`K*!=v>r*DD=evI6V@b~epVuOWq2)cMqSjz90eQ~%^+&u$n5
zu+@7>0Pbh_s1<-O6ZEGU?K~!N7&zyVQrHkR+mpK1Bq_1t(BO)Im5?d`Yh4rjK|`tR
zl%7OCU<wAa*m>kw3<99z8OMq6rM2wyodNdA*-kGhVjL7rB|)Dhr=dNhdWnCG1x3R*
zG{+8Dg{O`BK`_F|S34DBqOhIQ3gGdKe6oPwqj0eDq;8lxIQb(g<i|B0wU3=1442zY
zS5x@xj9Swtu$$@OvO-`HpLFPx&hn8SRae1jLYX(q;;G5BN_lNS$GOa&t?6c}|Gv+=
z0^d`Gyyx?-7MOo-zO8xZ#p_TDlHIfX4c$W~GdU5hD%}8EuAobuZ&oQfLv}1rhT53d
z2KYyQ`+V{MNkoqG{6|@TpT{sTSfuBNN{FX+C3FmUthW)(AGmTM2qz(Gghl5o{Xi>t
zrFn}=7_?M%LV9F^c-X<qti~E|(!3Xct1s#LIG9W&MAM+eMfG`v_ew)3Gb8$<^+A?M
zU5{f>tB(84H+wOGAYDxv5s$ChJsJZiZQA0k8ACdN$0ziG=OFHOXE7-95XJA!EU*TF
zCe9brZ<8Q_0XL?Cid1a0-5anO-af0LmrI6L4$91S3WHc!G&ErP+Bq6B&NfW|L=zEO
z=5f}bVzxQ5%1@y9$U!PWkdh#%9_sSC$#ehJQzhgnc3J5n)!t#m*j9s>&^-FI7@eaX
z=fQU7-qTp9NG@U8D(aZ@?$WnuaM+6iQ+*S-J}ua(JUCLQ7QkdfR+wF?ZbJW;T7^IQ
z8tcy4^<;&{i8{p_R)|S~R?>9#gH<M1U?)!VDE?lU`ah0}RtC^wFaI1k`M0$5Uq0jM
zS3s6y`u~6YzXjv}mmZy5%jHc=u|E_t%?H4#vH`OivcL|`0*720ny=p{GmKU0Mc?nB
z|DXmQumG3W?b9KgRG-O|*&@*V%kh4FHD$j_`)FZ&v$tprEesqz)4$#dwkNiOkzQ)x
zgcRBa?2%zV;Oolo0iv6z7w9vxIY64~+yYIzsek<bbP%YIwZ@CIw5CADt{E3MwSK&r
zuAS_zE1_As3U-`La=?x~IW*QaG@L=^?O2P==GEmfd+1M~%BSO7C1!U<fd0UJKrNWC
zEYJW$QJT!8i`(Ik=L-l8AHj8FYZ3r9Sz>Q%q7Cq0tO2{jsa<^HL}eZh5_{c@$zeiW
zY=pj<-yK9~FPZ~@L;d1_WKOCp&_10^Nk0ZTw2COmG8wuW5Ny-@L~Lu;5iZv8UQ1u7
zl{Ngk3awSsBR-4ridXt>Iwws1F2pj$f$gExmzH>n0?qdsycew=3;%VDR?YCmP-FaX
zdC}ZZsmoFkFsQd*_L?HeJ^D67^Qc<cq6I;<cwt_>HShusyu@XYD|M3g{}IQb)k6XN
z2oWZM3NW{h8Uhf_E_XU?e|~-bWqFO_8ma6{uj$Eh5W^_KHD$LawMN^(IQc9v<f-C-
zFfaAY@nXl@GhcIoCVrCE15EeXa?dna@z^a7HbFZ?&n^-CkTfi%_C4G%NcCfAZ;tkt
zw?B&`%ui>>z7lb1AFF%Rb^qVhl5(LmOMyfYtgA5dVM-?R2Y^?Vreq2TR^SYtE^N6@
z%TT+4g{B=a=g*D>l(c#7xPo<U13>ULfG02wLJ1LSmd8)$`cW1)7SomTrc{B9cZ)nT
zsw*)ZP{s=e0gXl-WnLZJm`xJvt@H%=d5I>61k<5%Vh%zwxf`e{wkv4ORsEXvJ`)?j
zwErUOV6wkQQU`F4RRZ&ZR>S`6E=E*l6wwvTw4D<Xq9QPy^<yejY_uuS7Lak;fd1sX
z_9$IJ7i$Af^uX&(BK9+PF-_`$l$l6|!L9y}=7eIuZMW6w1ec@wxOfN{x#Av%T?=<w
zbqFB<@}oCTS)kr%x-y;nY}(8C48i8e@_=58a;30_B+Z*7ls`a3pBW}@n68xrr2n|&
zFdAx@HBcKi-Ifq*l>Hot!<-+&n@=!{UentJQ*BzwU~4_4-vtH-i_|4MJ$#CL-^HsB
zGPLc$25vUCr3H-U#f$s(;>Owji-7f?zFywVSi4m}UhoL=(PPM|^x~$qw5H7Zhl_wt
zfXoZ7(*q9Qt)XGym;gfd&A6&cu=d$6v`NH4Ynsd4-RF55SAZkh22k;q@R&it!0i@}
zCybTaBpTwYEigRF*6K|3^|JBc?_UtbZQYU`IjcHGdL){?p$Ap%$}ysYp79pF{%WJh
ze%s-7oU>)D2yPV&i4p^0S>c_Clyamle!NuIJv-TZI?l**duYz@1F~x6o;$#r+d83w
z%l1;hcE1P^{gblV{nOx}KU=}Z{$xC;vt~z8wI*ZnP@5$jB78NsjnaHIw}9<mr&JL3
zoZX7<NxNbbfGcu8YKB43XbpCKDoJ2i>Z;ieMq<qmCxo1mY}yLv1{zSO0i)mEI5p(h
zRJu?&WbaAN6Tosys{;Ee<B%~j6<&fVpK~J6TsGu0bNj(15rTAh+1k68of05vTLfg4
znsI?OS9wEx{&*kI&)oXbqwNziy_2;=ElM1XVkQ7wE$MiL5+1y!_|)17@W&g?Q7~bc
zo(x~lE6q@v4n_}53C&|%I1Q6^!j-8?XD3#yMtTPMKVC=fPY_s5O80ZS%ADOgOwx-<
z5e<YALLu`p!}6M!zxN?@mQlw0jq7u+?`Fv5DGk|wFh!Eta5e}7xjTosNmox#WCI!T
z#hWn5-j<emP@<pw1+QoMFj)X8QS`hD#yKVCNBA*WXZw3P`+Nycd2!<6FUDvtR79?J
z`dbV)gd_8)DWI=O45e|TrF>dICz<}z0edASd(1g>cMZk9JpvZSURIjl2Uoo~7!HV7
zb{1mksq@}5Ni*lES_%!9#PT$#RLIyZv?ec=_Bt0nP`yX{u3L8PQpjgzHgBt6dAku+
z08AY4%YX3nzh3HLN(aSM6>4*l6%7}LQk5W;&_|dL6g2N<m&X9Gi<v0y3Vh#%ETa@A
zr`(8VWSUc)koCQ#bg^P6?vNPok`;TXLAg@XwtVN<{@y0xdYW~c(_9#(-qRl`5Zxe?
zeJ%)1iNo~{&{s%3la*S){#mbjDga)kw7bKp%P=1E8hZsIV6xizqtR-(v4ruu>4mkp
z_HXT7bA+tMu5|Dmro=T%N0?sYs70e=EiS2~XM2W1A^u9C!J$m9`Kjsb9++j3#)1Wg
zUHv4W$+qnrd5SE`k)*Zv8ZLRhQJXc}xt`^|sf?T-)i3WGVsA5oMp2A8GXZ5hp{?An
zS6lgcA9PaauUQZONVrU~a#5$MuoCB;Vr79viU_xZhUrl~kc2JRiJg=ctX|W(1$0u5
zvW!Dv`@dM7GL+$@r(yobBN44B=5-xI__ev()<$+xo)e*JRm0yZGT46am=r?q^xS&C
zMTcYd^W*2ZpRL&2`q4^lW+jE|HTD#?GKlNn=%BkybSfm})NMXJd_)K?BDoBP8Ffse
zQ=QV;QZ$G1S!V1tZ_7Ulyy9?E-f(+I&?gcN>6XIzB;K@q;iR#Y3psSP6W8%DA98J$
z?o2=uqnU@c-M+9;{YGu|hOk*sf+P^It<a-;W%Gb#dt59*WJkQXyJd@HYvDZYq;&rG
zwQ--kHb?=h%Go>fq$&zup2Wurqu#87RZCwxku*=re+1q(;?$nVVqQ9e&=x$jQu>3T
zOz-m#*4%8%SSZpk^u|W^g{>c&0l##f#Z18;BrDBLtI|~5p{ilnS`7(c_g~XC@U)MQ
zo|nd}ey{La1d}Zb<PA3qyi31=NdSXwM>*3;(65_58-uor#H~2;h!DMuLuHUh!o~7I
zaXQ4>x^H)!osXa+_*dz%HEl%~;lCwVQ(sWVdyMOc=TxeExe|Cgx3Xry8p2Tq;Hp~s
zOzJb#)G5G4>lAI?>N#=s0gk$(I5c7RP+VpEbz+c_6h$Xeb6z+7#YhJ4`N*1gO(Pu^
zeXq>N21Xj_{3_>*6HAQB+jYY$Q(##8v}wk39?`LZBW2Oe-6@=&;P{PG-+h{k9oO`V
z7~EQ=GSiLU#HjDF<Ps_rVpQ?_3SrTGB_kD{wEEFd!q3hZR{yNNKR>0~P#X8CskU@#
zQH9X&y3crHs-kI{Mu7@r19XiDUAjq!H^C}EpgzH7-w#6=qlIes)=}>Gv-v$$g#?p9
zm#XE4aaUh2Wr#RAl~@4}nIezT)yt}zfB2HsMtH_fuRHx`z_@;p;(oz=jz6uys`7FL
zrxPurWn?s^;)_|fwU@a@VA6_x0(e6V4?LoF!K~H!^IXR5Gnw~1)UO=JLWkVjC6w8{
zqX+_C-;3BVo2crl=-ZC(7yVY&18+xL3$E;!J<M*oN(_?tJ#C|>MqXW+`LW`jc&rE|
zlB0=CA^c#EY=3=@XPV_wPQ)w7PFJ`>P><I0<3a(C?}qR4znTwI+g%P1WXYTRXc=~V
zD%!QQl%EvKF}PBunG`5Nfg+<KXWxVDir>{St+?X0X76Oe+wVu|*i1!*_LCpeO=@Ds
z!^1pFJ!3_LV(0}Pe?Oyq8<k4``_eZ^RnZhlHSb9$&?N4J?+meFu6_dqRHne<T(dx6
zljHVC#8`R{wC6VGiS-8Wgf)e$)h9=}WlEA7ECC8*^OikF+X&B(!u?}4?2Z~b%4p|_
z_~=#Xk%$QL{8^)_C$2_!ceSz3kFhDV+XI&2>b*z3Dk$)sV9d^KqR8{m-8tp>uM+NI
z{|@1WVL{xGU;3>1jR9cpm<`}-I;@qzAcyjt*6tRwwLrEfqoPlC1L@s{e%CEZ=?X{(
zkDV~Tj|T4YQDQ3~JZnGL08RrKoQfA;ZM_wOBk#h^8%nz8uBgx)bEvt$zYKs_B3}&z
zkGX|7Y7a4(z&horpTo=w9^DzWKcpa~cU3o!yzcwsTZeh6*W5&+rxGmDS`}npOPK1V
z6#G1&FDYUK?tiWW(k{2&`RL^%cvS74Pp^&or`D6H^UpLPq&05W(ZnnNRzuWP$kN~t
zjd&Q)qqe+v=CW_~oE%VJYpPLd<kN!7ePB<;6jM1r=pz7!w)S>VKZx0muC`!P-zk;h
z>NRP$0``exr5LJ|yjv<{bYQcPlU$@Fap(k-@KwDG?Ev6S(?;tnX6Y5VCCz|<D)?#T
zp7;aQ8SnMEp2n?oM^#T#;9lgs0p;6PD@;G}S!if~#o&N7mEFi>?bTMZ{()E$V?{3A
z?}!w#ld!9I#C62`>v5h7R3C<wo6v}pPqtT0VZ{)!zgPecdBHiU@C_9qEJ&7(xU~rQ
zo|oNU1T_Z194Hy&8<9mvQ=RvH>U*3Mgo=BDmFe7`3oqFN%DUBGNt=VSP*Rh4mKSn0
zFN!=0bF!SlWHx*fro6l}<Fz^B#UuG{@5axpV&%CCPs2ysor-iPS(OB7Rpb2>he}m8
zZKZyz4#^1sL7~LXUH2S^HA}q}_9%^=*+4xr=QuW2uc%<-BHIqwTeq6y=DMt9_ItbA
z^5cC1&>ZW1ZTD)qR`%_gQUnr-OszxL21fdUi<8x)dPDLyz;w8-o+ch;%0)<oOmsI#
z^A{PEf0+Y=yTM6owE<>NDp&_7k*e&vK4}$LXxdpHvDzKCt>G2hji`-Zxn#c*QoQG%
zQp{HJo6H!5W3cNEGNek0xo#F;<hA?YUQ3Lxjjj0PdqoAJ%*KEva4zkN=fOZwC(*_1
zU>U;D7(+gJ9+eG*>UgEcT;vZTZT!(Mf!ZI<zeitT5E2qo@!u1b6dU`M=-$38)-nk)
z$4)&Ud~gRjbKB<Tg7+mxT<y9&7t8ven+nq{gCVHzU)>EaX{1sGz7|W~H};1J$la~}
z-uIWl1#Dg1r!vheG8KHe8lEH^`0F*#3sHYC9!$X-(t|K5)#@UbWoXG7P6HtDDa1y|
zep>Va2A`V>oK@?f4&^3I$r^r0>{uOPv|6h7wU1>L@iJ(i1NwNd({9&enk&3ov)ofa
zzD|xyt}}~;0<5J=n3;c@NdI?a5x+CYidJ6m<9j0Y==-ZCxrAVZq$a}9L<B7_(Ss-*
zWy%{ZQFougW;`9|iZy-o;uI?rz5XM%f#Nnk*JRx+wIA^`it@gpXWOG4-)pW?LO<k>
zm=pGPi}M=kmhu)Fm|c36x$XH*j41Ef8BOUUwF{idgGCb%Z>+2Dx2w}w4bAHWW<*3w
z-FZsU*r!9BV<9b`E|vA#_NpZ&C5V8W>#3>VSx$0zTH(1^D9EXa&dO7T0zZoiNw2od
z{<iN0%~uDHj7T<~ei&gGPD79F$^*lsXb`lpds``%VG`Uh)#`D6i|*k#pF~rL=v+Yn
zQQ`WKQuOv57$5vD3EnqW^05k=s*OUTus<Os_&!A4<8ly=MoVf|r{;qhFPD;m{~m9f
z?9Ht=Z8Hot_)>4G(y!HPn$P5kc$zQ8n2$rJTa$dh$uxjD_s+Wz)3SMH2cy6zOHqMl
zIAhQ3$WC@D;kFQUp-$BmH!f1ax0AP_Z1%+JNGRm@bd;RaapAc$C<1MWV=3?FUq`8&
z%7dQ=yvG4YuM~bO1q%kEJZ=fBk}Yy=4OZ$71#N#&1cflcG{J~joC=fxcQ7HKhC`y*
zDx%SAv9=B%s-H`Rg|n7cf!X0@`nfULP6n@6^$xiMN#hdrx(jmK+>^DOQ$$fp>Y}>Q
zrKqxuGo^dNlrWRcol@(a-r?ft3ttx4Zp{@2r|cc`3bgS04(}tEC8eoHn!@+ixb`;h
z<tY3vuL7k<U2=(>$fcHtn+9o9<p?Wi$_qp6t%k^&e@F(*r22;$u_8#1s%9k<=qEN#
zZa{UgiJs!ql2aBuygD<nw%H|t%{>d0{BhKERdY@u>nQ$1IUEC6G*lf32Z~NhAsrx3
za00P;3ubO^k_;`j2JcK<b3KmzM(cXF5S4DMpaq-6^QZ3?L?9qPuX>)C8{1sH^p)1g
zeRD@@cBi}gtVndwh^T!n_egu6iQRr>`V%MCjn%p%bj5GK%#f1@&11j@la-YkB7v4+
zCVob|_|oqpbPjKj4FCzA;ucN1@lZisL>k}V;M^c!QbOtC+n*z<u9#Yv$6owe3OW5_
zcQ!rzo&T+h>9=d_U45cV`i4!G_4Nzq`jy&rbJ>$pg2QPZkP(kxEZj0Sn%XPi>gB=d
z8@n_G+WWX{t$pFPX8&Z@nH)yv!8tCg?71TgcMb6AUAbf*uhnyeyP(v6JfOle*K~cB
zZRJQNK~}p(g(xxylT=Dn?_G9^+c8(sJu*w=XutEt$_sahRKU^y_M7kwLhAApAbM}S
z_xr8wUyk$iAIXRF0?c2^Gu&usns~Kv)T&ViNV&Ju)veKPRF=Os_-#_kD(*?tmPz(p
zHI!>f61!!e^n8FGWV@X1_T&%G7_~|`uv00o>pI+nBQr?x9#Tn$H$_!yY3Z9_*|bl<
zG}%YW8#pQ(=S~q^y>;fs5#qo5F`vqL9uuC+J(`zJYez+KthcZZ&~up_0(yln!9qa3
zaj<@T70ez!aggDd0L7KqCp(e1<PQZMJ>BVXg&^I$I@3Xy#q-MH4N*v_Z9TE!?nKUn
z){Ea<(366oD)i(SwSIpvkQ>P!T{ad8G(psOxvZ|!q3BCmN{6e5TUpz70G;<Em}d4Y
ztPE6}Xw`JaPCbWl|DGh#7X(?}yv0w=Vx2V{*V;JesqS2%WP<;hxN!H;b}tP8GqjuX
z`-9D!@BCIR8q`3}5;qK$-bslY9oKM>rf|X*A5Ur}J5=66bHArC^49*J(->seL_JyB
zyHvye>$Bsb^R40uk(4+b?jCk*to*%LCy^MbsVwJ@%{M9<=SgD9rTWbmJ}p<rNM0pc
z-(MM8yzt|YOD$eDJGN#&F|XVHd%<j;;Zq57c&Urs#echZ|KrN`<^Y+O2`?W1*M!Y~
zdAJyUptt)FU&I`g$_G+wC@4QhbcLl4{_E%d{8FnL<RbrX{_~xMJ_$-K{}B!5umu6$
zb%F@AkMg7gCNFvp1CO;IO>{kh=~3(w82tP&#0X>;KfBeEHg3uIti2k2uk)9$$6*az
zJl+r;R*L7xdcn|eAeT`=o-Z_(M4x#=#Fw^#QEqKn>IrH95*E5=>jXxgX90OU+FtHr
zn2^h;bP;gVS|!K@fr&s;v=-NaMTd6XB3|Q_PrY%N@b+s}_i#n$q9T<Uac%CL!`D6H
zq6?_dW#i-2t&)=<yo}~IjQ<is{gTNO1n>!<0EhoSq}Qu2!7^~m(cz)+rd)&gl9TFo
za1gf*{~^{YWj&-?gV>NY{+u&RT(|7IGqj8lR`j!*C@Hkrm;TXe-&m3`Q-bZ_7je0$
z)8jREADypH6PKtVTk-~$K%nW)7ovH?c(svqTBO3WFW<;!XKfDLsc5}ejLcsmK@cT{
zG;{Q!*XpP5>OKI1cElGHsCmrxzJR+Yp9%)A8lj7UI|9!)pb@U9xTI^j>UdW*Y~%@L
znIKb^AT3E2Ov%VD2YG}dCf|AnL#t?Qtv_zN4`ISg<pIi=QucZUFaKSym;owoL_Br)
z{wX21Q=<r%Ub)*h=Ld?;!1U6Xd!FY_eer0cljMTv!oR1N9;WOD2-WzKtLvBEE6Z`3
z{}p~{Nl}nhw<0EH{TTEaD*a2R#gf9NKm60HI0gEdjWx`0N*oFt1bictgyNqjCkS`;
zeyMX3h!r&pnMB+so50h`GvsinI9x3k^>@|(2VIasjn<8=IP>ZZC=LttUh7g0VH9&3
zslc#+k`b~BannTiuga#ATuWqGl+}kUH^&EEi^g3q+g)i1HP4&ko?5J!zh3(H)IvE9
zeXuTb-E=Ne{Gc>0akOujUTw8vE@Jr8VhDe%8}S-T@?0P@@8R!)apy1GcpztW1hO@L
zqzB9tZ2Eg{=^=_tU=G$POI^mC_m*BlK4vHTG|w{L<RS^dfD;}+5hm3>+m^FUjzro}
zfh6cEx$4moRM`zwfkcuGD4$dc&s@h3^-uaHAV-slz`ebIR-Svs8P@bJAB+sX1&$Ii
z5DJ~b8ID-&bjP-W;x=`V9Sg>f?9WQG1#JCP^YxGgMa^j-2k^eq$Zk-krrpTNxhG5g
zbLjL{vqG!aaT4w}O|vO`b17=Wg6I;rQuFBY89*ccrIzF{0VWDP3Or*VL#|hAwFIEz
zQNLAn6aU8=*-5Z?iIpbKRi>dN5UQSEE5!?XZ_M|1BzknKOy{JB3DHDO->Y9h$@+o4
zBh;^)t&!9UvJHtjZBsQhpLPuUo59eM^*XNDJLP>0mJ6j^p}s?iTLRg_C~Gv?H9k?p
zyx{LT@bG0EIM^sRZE6--H+LsNn#aYuCd4DOp)DetB<eWuU^H-_^uX8O)?>a;V1?Ku
zqtrd66WYBqs)9AIG|Sy|K*F*;NrNR>+i^C5ncIuufnk{&;T9<?m>rpOm_D86?gdNq
zH<B3#0xE@AR9v3};(kJX=9oximT-0aii|c6eMu@*ZzDUjbFm3HxSH4`$G@AJ>92G;
z0;BXLA%I)$qgH2FsFNkt*LJDJDvs2+`C(y5FHjgRAF*OOvs}ICFW66r^Yn?rfqP2?
z!5)oAdF|{xWBpW1EV+sqfNn}lx4{6ni^RLbAU)I;3#K}0$-tzD+hA6Z@=@hqVYMKK
z$8pSQGkoVATz9Ae18-a)4=L61+(=Y>z8(iusd&Ke9}{eEhC|#!`Ser0)@Hkmh}%@I
z{`$>(+B3eS_5PXN`r5Po{UGz^)Gj%HErlb>Fr^Bl>|XC3iWtm&_FU&3{-J7lY5LAw
zm=xg#aBziz&8w2R-Dj(lU5vCA;TWsV6%_!v-`j0myI*Mqt*G?Z8_(|tYD^HQNF9%R
z+5J5<E7M<nl3q32w7-zKTpfk^Awkn!VK{*`bK1;R)o$Nw_6miwmhX`Y@M`ctNB^Dt
z>BWnsN%q%bi^6*<N><JIpZl@nrc{A}Yx@L-B6xlZq-~jtUX;en8dvgs+Dp2-IyN$o
ztZdJb6`LEY2XRq@z~2*xPhT~904%9IYnv)E_l(|WU>`xu++QVcn3OjiyuvQ1+bO>v
zk|;cz=TnRbD1WC4sy>e)bj)k7@rft$+h55q>A9OM>%TL3q9QLD_*=|LwJD#%0zoQm
z>SP60Qe;Pf?+ym8%^A?CPu##^`1PJEz4HKLg0(yKrT9K0_J6*<$An0@GMnd*F~nJl
zMOW0C7}iYPGq)%YMml6uxXSo$w5~LHovP2*{8rZME0s3_3V2#BdSGO?6T7QoJf*4l
zh4~<cItibGk*>t8&6JWJzwghlgwpeiX4>mDZ)#k^YZA>v9ZGA);B&HQ*(m}_c#ot~
z8MshxIKTPM$-+2giZD@(#244nF<!fWS|z6|s$8eMI=)*bqM^331O?JNj~H*I?6CIk
z%Lo=lQvG!jr``_YW`BYaG^S?t(2{@W(>ki|b8Un*35RS!=Uc1^%g9A$?2u|Z8}ZqA
zpjNV!O2Vao*x9mLaP~xU$4#m-xqk8yp%LV>uTB6$Il<<f0Mu#$Og&Z@a=7w2;I-hD
z^4j;Y6@ix;F(nw7f3AAzSU{z0b4>RQjfv6g>y#6T-4!tyS)(c1@6kSNCkq1Om7IyR
zq*2$+wo+4qFZz8Tb#GtUyx^EOeNl}mDYGPFeNj_lCp*5@_RU<QDe*&Pc+_54x@d#h
z(A1VJxvHV)zOaZ^jh-_~IDiNGZu9mq`XNzO@K)%(6#oq)y_<iR9?AWnP?uebpJx;%
z(N(aGbVSqluCh0xhoJjls5FNyD^pqp=((v>4e@iJN3lhzN!gpXoAS1|5z_YuOH|U?
z(<7CZcu((vmYDjZStMRoHlNwFoV;Mt`rR~&Up^UFb;9t+Ou}Mcf=jtE3Z=03QgBQ{
z3GwIKH+vp)M<UZ(04`%_?rojcmbZiN5p~lnd%wu)RqNM+d4f_h3*2r`|F}N2fO>>Y
z_umZ+Y{o!<>C(+N84keoHM;i0Bx>l$bqLpDz^0agw+iHl3ZfCY!{=Q4U*Oq^H53V@
zDeM7TX=amumo(hf|0AcS!$n{eZabVUp)X>A+gYD)1HCS{mOwDqdW|SAQ~4{1n(f#|
zil~#ZRwIadE!kH|dz-Z`UaD)k4<_C)@V~>U3AyPC(&#?!m!aO51ER;u+efRCH54D*
zx`rRR&H+muYQkWGe44!g2F|d>9}&YzGtl=ZnlcrnS!AqJq~-9^dfbrlv5Q*R&6uMs
z4g_`XArib8lq3X%mp0Dh=w1@Za;91A`+8=CG|s~8mgHtxM#P<|dznnxt}ZiDr5PG~
zdTjRQEjuBv_a2{(_7I$7>gxa^2pd`j6O(EGVe-mEjaTFXv^00EI(}`3f5zXTqdJnL
zzfcRAnoV)oOD^z_>a*n{{vY<<I<D#U|NmDM0|OP1BS<I>0xBgSWuP=jNlrm>45THt
ziHaD6Al*na7!sqyqB}<o6c`;6qsF%HHP6w{`<&zZ_wVnY_xa=8@EqJ;ab3^rnfFK8
z0JQS#SU^q>iR(R~S5LG24$qGrvp)2lf^#P}BV(J<KA>xH@qE%xr(Q9Knqgp(c%@W9
zSHB}_#R43F6EE&4_RF*6GgEHx(5`5L293@fV95iXAeWl2DaX&@&FLgdpxE}Tb^+at
zKS<|Uq8LAP?-tRWojdD7+dV$_uw`+DDOs=kh&yB2YgHLhQV4J>OXiTlR#KizBdxD9
zc6C@vKq{cJL&RF1a6TzZr<q#nvu3Tn-Kga}%~{EydF5QOS2i&~uqD(kYhF!sUYjx(
z+g08G<Uc_RVRP3`w?Txx>0EU}KB0m>Or|{5w!0>Bf*k!Wbi*;3I;&BzX;<T#x4u&x
zw2Dz!^5GjpEbXX{q5Mse_$n<c?IyC|##oXIK?{TED8pK_v_vdTq>bf~F=oNb*Ed7Z
zXe;zKT4*tS?C=p%LhkA1m)B}GN06R4L<{CD)4dLi-2BhD!7q*a{BpVib!KJ9wl(#f
ze>}XJbRtMV29Jm=`D^abnkx-jyj7MM0R?8GN{D-(zLAw+r>!2yc8AYvEG&8Gv^Y0l
zMR!?JSR%p>m^0a~4cpJZp5vp9n=p59u0LZlU?!F%c^6HikcGZX`;zHF5RroyW~rM^
z9QIy?8?+Gcwdx{}%AUKB9Oqt%R1co&QrCp*Bk(t01ai#MO<SC*2?h^zsnYbhF1Spc
zy^FcWSj#wJsiv84=Y9CF2y%t$C@S3)iKy&pG|Z_TZ<lQX>uN43%QedGI<h0JE8me#
zXWFQFGt#8A@+`JNrN%^Gi>H>}wK~2irJ7}e>RNm=w^2cgx^Le9sf}`i|7Nn4fGl>W
z2GqhH_6y7)d5ufd>rY8oaAGNeGxs2r9c<AQy?!}6VHsChdfKuF==}as?l{>P8j1(G
z%XYyK*6L8q{ToUnE0}B=E60aDOX!Am@yk+%-CSPV?YJ=r7Xu@|{_5B+n*{E$?B!YD
zs;#1qtmgC_$dG8(rL3KG7Y?sLPJZN~L*aKoJg0wSzS%@C$2oh`16}mGlKz#uAl^8b
z#XY@&e}eoXWjWJWJ^hcm1gA@5vd-@MtL7TSo!7FNcbZ$eAkNcy9#h24OjDAs+L&1S
zx=n9Q!~%7}MC{6|1Aw2QiYQxA0gl0W6PZHdWF=|lAdmF%Ioc1G<i%P2dLp+qIE|iN
z!hDuMzcL5WM=rza)5qikAUsThFiuV=J5{jWB5C<ojzan&1hvD(I|RH8S?M6#q!at3
zgjN>YxwGX;@cT98-S5cL2JYLn;thSvQS+dn2&up=GCt}HQRdf*6>l=LQpG9g83n6j
zAQ0(T^VJ;HGt51mrkhGNJ7ABb&dq}nxf*Ld!cLa!ou{v>$0jr-$2K)b5IO+gYp7OE
zem7)pZ<M|xg!P&RcN|92&B??mV7G^0f&`;u@c2p7)S<x>V*xsg+Qc66@fP<W@e-f&
z*mB--$z9kxgkcCv?$_m8zj|8ul{q)HzgXDkuE14Ebp!Af1IC-ZYEO&JdK;UwNGQw&
z$1nC_YpO)%Iesoml!Kf_?k(&pPqmv3h<S|SA^`u!|L~F6ZbC!}^}T%(5hPlZF$)|Q
z$0Wv`#{O8_4s>hBlSiAd^g)ENCl=^TZc+ql)PhVBFr<h_F2|UT*5wLeLS9Q}Y&2mz
z>iXj*L-2L|c_dZBc;X8zHu+f=Uuv1>MBCk;DW>$p7svb7)J_Gic^tM{BNdK-`d8<M
zr0x>uV>!)oc?t-9ZWttVaJlg1UtB`QFIe`J2jAZ$)rlfb=IC?z5w6iy>#2!4G!l-K
zB9xNAXqT<yz@1HciHyhUXjgrSpxGVOVc;)SK6ZSC>opG{AE+~L(IMa~m3J)-P2}g|
zd+RNNSmt(D3D+&l4N)nzueN`zx?^Kvc96Bc&IXlsft-Zx9ch(no#6|ARsQVt9p0CQ
zjolw%2sOKY?q@QnlN01;<TkQupZLT*D2!F>(%RlIOcoVn1C2;Jb^7{|P%_FTc$8Y=
z>aQx>Niw0;RMVhC%p7ZGm2|jRI^y!W90YaeQN6|cbVPjx-6@Sv;pPLq==>|Miv!ni
zan21|MOPQ0(m6FQi8kov$HnpQ+6D56a(R+F1aZ6%<_;-heyC-0S&N658NZo?#OVt{
zvgJhxzVC*N?2%upU)TYase-m3`&-gApUe0xDjzfbI+OH4?mqj#IPW~RD$Q9%^mTX0
z9X(D>Jl%L*`Z8j(G8DC1{aDZ?laATGrU%#xWp)XXduPg>{rrLSWYf@fiWt+T!VM!<
zKK@RV$6(mh0^}tu-;w#`?74$XVA;e<_d$Gy2YhfXn0Vm})q6!BOOMQi3*YL?l&psb
z*v$pr2~$SgO20GjJ|8HPS-kAI`Di<m0IzkyH-F`8#|Vs}BIfe%{8(jr^PB25(27n%
z#N5QSVZ0MZpC*jKbBuR;RB+0(D9a;!LN?InQ!Pyxo~pW4{tdLGakU8i%1x+Ji*494
zW-~Ye1qpKNiwms;s50Q(p2`i0dqDvO<d6~MV4+p$n(8solKrT~eo|B4V4&K^liHuD
zjRGQ&C19kaTPD9O>c-Xe&g&c6v~a;kpwv0l2p8vD1T)p=v4fxPY?cxxi4L!@Q4#`u
z;Esn&xE4E#^YN*{GJ=IKZ4%rK49b^!xc#2zvL(>qSP>^I0ilBjJAd|58f>!PY^IO~
zz0=Nd_0hxcJwSgpmDo*QY3s*^FZZc#7#2{-;V5Jd!KdCl$_{Q&7@}t9T3$n+ni2BC
z$1idSq9VHV@>70v2w;%AWR?-7T`YgG2K;EKO(TSP2*sN_Vv2YcMD{w#dV6KybF^w3
z`QJR+-&2d-E=XFw^{kj<O9C&_LM^B*2a_9A9iazan9~~zgJM@WXz%fA#@nv{fzYb}
zAz<pOBO<c1X>ZSOf*!nhYL{uz6Hb=Hs1gCrG|D#vNX4uWWcyVYYg^wf4&U#7m4bKs
zrMH_l%A3`2WBIZoV|w}d;(=<_r4u`8y{q6MQEOj?DlBH{%NJ;mOH{v%oLK5<q?<bu
za%KEQqduMdTuq#mlleoZ2$og?eOO5-1bNO2-f$xwYwjbgH$r_#n}(NuJ|tc@=ZrVm
zZXC?RlP++MM3-g(BaLPPqk4LE8DC)qpN-N;FE{8`X({Ir`jtMjpp9KWo6mxK^pTc1
znyPHqqV*A5rkD+9G!vzCrJC#b1%MIEgCDDNUscmoJDgkM5gN_VkEjM}S8F9UpbX+w
zs#2&VVzUhdcj$uA#+YVmV3A_q7sZk?rEob*zL3jk{Yf6k+MS8GnV#h;;7-Y!D6=YW
zuPlH-`cv*s+*yYC>=rAy66`Kv3(>P6xW8#$N>^i_W?cY0I~8lNzd|cfd3aU8%j(vo
zRDS7Cf4l)&s~q|#?OiQv4P6s_E@9mcNC4eC3VG7rQh=nJHzp=M^U64)CEpZos(!tN
z0DlCsUN#bZOPh4CffME<3`S;*ChtN2P_Fw<^(cky5|oSXm(z6~JdqcMF*x6~HrgJ6
z<omkWdtYQh_P}tM-EB~J+D};v?U$Z(?WgR|hK$qjILf|L&f))%M%f#p1_Cx8YyE7Z
zjiBoLH_KIP`Z_4AAxnMw=<v3~YqO+$ts~`;eb1@Wb9ItD(}-i+2UCiJToP?h3YEf!
zBQb_Gk~WK9nR@4{b9x#*m;B9p*fl;5W+9QQm`h_y<2#wgwKl5>S}F5u+xitD6GM>T
z<<GU``P$&CBcR3L4<qy^`?c}Jfnj}}o*N(o>#+PwzH$$Zz!CB74=Cl_FLEA?HgKR3
zZ*5@r;OVF%>SUBZH_07NnCqjq(fcnLtd`i-yb(lS@=l|s7K2U9^t=k%WukZL*_s}L
zZVu+&=l(~SpYLS%l8#6}Dws}~BIv(_)=Q~SWA$Mt-5TFJnHEI7hRuAd2#{51pJCAm
z6JK`ehUt%iKJUb|p|4GTS=mR&%`L+6yMYVMChAxXulNX|q>I9vMCr(#y>BR|xvR;A
zEQ!~YFzZp8JWFFq7Q@lYmBrxp@67aTGu7>6hh;72Y2&>v`<qf@*+6fj$k<)bfKyje
zb*_3>Sz^lP-5cn25$~NQwyXE^#?8ZRuxr02TEfqFPVUS9)FV0<%OE~>h*qu}1sSLh
zJi~C=P}^y?J6)NJZk6^PLBV;*aeEb?a?d>L(@tw+C>QD7K*eye9Yc8@t3xqsR;#?Z
zQ_K1w=+>8FEeAkj&uEa{|AW@)U0C}fsC-=pd9V;;ws0mFa|BPn7ceFTr<dgCj%e&#
zX;2_{2}^=1{Uw>GvgB8|yFJ?cSC-`O<hrLX<;&1Hzn{iwW21sOtnlqVC|6an^!vFR
z;oeTnLs}{c4oQ*T!X~a}sUUN>L+&Doc`li{0>#!L+~p)^*I|LL$<h|CeJCFUka&!d
zelHf$FS;cfBhafmhO+yE$;Gsk1bSLS;*LcUNP{5Zn(M+efY6vX7f!<+b55!_v@y~%
zBU)-+8DC$kP!eQE2!WY^FlJyQonS}mo|$klr#|l9Wh9~kr1f{-Y0GQmYu6>&u*}Qp
zv_r4==zuM5sSLaIt8Anls8|HXL6`f38F<zCttraoA?B~r_oR+KtN>@sy(Mj0J@F^!
zRJvP=85{fJbTPGez~Hw!NQ1f@Fd<bCqI`*1UlL7{=-^^syDtngoYS6FLAcD0)O)O~
zF_4_Yj)8-c^b;=~3HwqoTdMR%2*QeyA1ip<n*y7x0?N>-4WwLGYGq_}6;;z>b8r43
zQI$CFZ%Tzb_8zUxKQdT*_aG(O-WFp9dHMa6<7AT4xq9-sSLLNg7WpdSVZ_c7(IDtx
zf4Yw@a-d5t#Yb1EFYBZU%eL-qR+2Z<hoA~Az5F}9o3EPlw9v5PcCB<4CLdGG^=b43
z)XlP&qhhyYs0z85Q`PB%>GT+0JThxCJSwiBHk<E4CQc+;vXCbFZK6=%fGZKr>$tOX
z7xo^T6FE^MTHMZC2|tG|p;YTHY<>d`=UkMsG@T`tA+#%h9V#2gP?C?2{DO5%S{^D}
zEEx_&M7h63)M9I6e&NqKVSYf)`JjG@dgXY%c9L@bOy)ouMH)*nozDypUuM2tyL7EY
z^nnTGO0&?G_QKgW>FmQLx7z9WZ_-9ANN&rkEu!vhi1w$1TUN09v)6Vm;w&Qy<NLRB
zj@69}`zLy)45Q3s8t&v<^`)ly^l+qC+h}ans*r8^aa?)cuUB+rRV4&B%49sweKdeM
z>9SxQQx-+se1`jr71S2<O&105V;rTU;yVp_9U?!|a_Jk+DOmfI;rXjQJEJmF(U#{2
zsnN`hvC^S(z5K=lzUBi1OE0exWo9?LWj<?BXRaYPe#HZ~^*JdQoAu~#fxK1c!2X5X
zZ;Rzw<LIZKDq;?$xrYpS=isYs8_f3(OWDDf^2+T*XX*99;IYT)PoSYDR$p1koz9lq
zyZ6Z54v?IlbGVrIv(Tc<1uDAAJrVJTXd;+9_2}z@eS@KS8SnW$p4~T12<PhTr7bgL
zr7$USM3sg}Y;J)n?6reB2w+lPW=PU4-p&}Op_g0N?!P2qBUS74pg{ZQbd&%<2y2`P
zpF3fzN&`|#>LfR&tDWL(qE$sLzS*9xJ%u+PIpU38dqzV<hEAT{<al0s*@hm<WWR5I
z<=akUjlx3p(9#R1=#5+bwO76y@&zo#SzO*Q7q*6Ms6ak!ILAu#H26pmmcyp@5pHP!
zddHU^r^53K`ss?jN8D1)lkY|ZW$OGay7mC49$G<c#%{Un(wlA@tF;~DNsf|=;35rk
z4Y$Vg$hOu7jy9NHaaq2U=;fZd{Vf+3bDjLKU!(_fT}*7eQ4V62+Ym6%wL|W#u5emr
z(8#PUzvN2Mo2Q|W8L?vc9eEM7M38I#V3AE6gVxncZZ1*g-q}VKZgyE07LRV;uzLpt
zz*}<^mDY=s2AX6J(f;z;tM|B8xtzH{M;)Ru!h?i+t6*xf-I4H#lXBJ3Q_BZRa*=L5
z(XwFrKliQ)Gbb1_Zxwb`I1))&aWY{g@n@^g-wmP1-r6M<bjZV6_g>FGwiFX2O1AlK
zG>266I&6iuTwB%>N!W4+*BGo|(PJV8I3!|@BUfC(?1v2k4f+rZuRqiZS02eTkxI3r
zb*Q!T>UwuGUB7ws*EL=P5FXxV_V#f>915c|w?t>OyICz&4sFo@QzH)g+6k{;w+~A(
zfO)cLu@E~egucQYVVHvHGbLdpry@K(W0>aW)RRQo+7yziv>pa#lB>k{E8(Mwj~S>h
z`$eU~hJ^5@0(TkGT8@m{Szcd04NtPa<&z(&r_$QKOJvyhHnYrhtikK}@T37~+a~E<
zcGe2d$Wu@SWjV2Q{ZIdQrJ*#!%E4#7syD0<Ip=9vw;l1jN%P8@*aw;&=3I7g<Fbk4
zPIl(z<t<eO745A^=S;6}qLokdb=zT<HJuo*i|020IIOKI-KW2Rox_aNlmiruu2*0h
z_iL(^J1tflqqT-hT>8fTn8#S#!6lVbjpg}<^D(~1i`jm5CD;nmyk-_Xt{L$Gsypl4
z&fv$wV5_5WvN<c&BnCcj!?^XR#jz(H?z)!ncH<IK!FIW&c&GQ^y-E*TNBM#G!*14c
z@}?ZLI1Y)82u0VQuI!Hw_lrIG-Vghu)hCfB3AFmOi6=h)SI5j>O(cE%7vVeW$wpPC
zC>Ec9-(n&`uYv~Hq<OPTsVhHU_1{}iw1cR<3I&F?3_qjepS%0d-|sC3HS*Xcd6r*A
z<o_dHP=D$Jd`1k5kN^L^{x{F>e-HnEOYeUh@9zfXuVL!{nVtU`-T&W?9AQ*}PO(7g
zhsZpWR<xO{MjU)Y0<_2<hgB8T_(DnE>Bkp0>GQaLX1@P%az;>5nrEhagD)ey7^X56
zYNytA=2}xJ+sF*iCmOR;yOGZqQvr*6vfG$?D|Xr3L-%(>_dj-p_L;pIEc_p~^J=T^
zlefBEr_v3Rq}<)zX%}KfGo);upzGrwt{YwhQYrW+wY3*q-_wec+kH0%OVks8mG%D)
zaL5V-=k>F8*Ca%*=_Nv(^=bk`YhRD3m?5R1?(n9%Q<34QvGvGH+8Wo<%_2d{&`XGz
z&-G7jt)D|kp8b-He?Q!R-ngO{z;5ualh%v0uA3|kSHSooFI(xCYh+xb9ob~#HWp>A
zS3Od!UPoFFbfr4<U)}m5XAPR!V*ZPS1gAE_@4(?I`^l-9<)admiz_!=m%fBrgE$wj
z&~qx}J<R9I_X>-))l<+54x0p4`OB<Qx$O(<c|G+uktBbogbgiCog-^g!$^<S->Ha!
zt5z(hW#Rt5lZhqUuFGTz6GZ)ns$i(?hl@EbNj6P{q*c7G#&ob;D*`q|t6*qmIv8cp
zMqcD<mtK%;Aom%@`#vh@@T+dq%$z~8Cw!Z{`ztB^GtKs=x8`!B_`>GicXLiX-ecO%
zEvNIYZQHlHEe$k{P?l@$-e<_Ie?i8<<rTb3gUwpyiN!BrYG7QrwTo2iVD3d4ycb9D
zfjfV{{C=v9mAjpNSkvIQkH7OS?TasciXDhTJ4@b~oT<cBDM-igTv3e+x=6!yEh@t4
zir4Q+>WaznG)!zO6|Pv#o_AhHimXY*6j$TTm4FPNO>=8Q99q5H{1iR|VDGFwmWHg1
zF%DM7peN4=vs%q$jVCTgOA>&j1VO&_tTv@PyKEiIJjY1P8^j%BRgjF6M4k#{S?n(l
znC&8b&`xkB0>LgeS<mQ5VdF2sSAw9K?Ih7<5h-Z@lC&pVQO8I4{5)NfO>aSx7db|I
zC5qy`+){K+D84nB{4~%!J6(V+oSlSI7yI^Z{;3uDzid4`QzHjKZFY;gymzs!0-+d<
zSaL${DiL`F3wm_q_$d!b#;#qMOy8O|$F+e3ip_dqwv#!$a3x?tQ-<O#X0bJ+4hI{|
zy8UfPbP{P!p|GUQL9h0^KYKv53A#d5GPxq7<`^Z~?&&46vtHC#`z?yA%EK3wvQ6cp
zdrkRIttbkq&l4@sWwwQov&|il-K@9yVz>&n$;nja;f10QyS|L-K_i6_bT_4+L&5N?
z`%U;BpT!-JoJ93ofB3eKf%aKyh`PXUd_~}v>s|Q1Gi#G{I<4BWVIz8RzI7Ps)f1TH
zZT^50qc0@OkaPG+5p(ZBn~2>#d5~?0{G*0=f@nirjFjB{q0;4<{Pw`J7$NyxkPHgk
z&K2Xx^&0b?AFHu;IoAPMjrMZIw@hs`(Kqy3w$3kKby~PkhLiD-P?@%0#I0Q{szQv(
zWv36OpKLhE<p6o)o?Y&-!xJ-63M~rpvK{*LU+oRQIw$06;rk&Mhw5edZ_M`i0r9(i
zIfzLsH~ag%y$8(}xz@>&^YZ3z1-uuGJkw?p$3@p?Y&1D8^~W1fdeSXBT1$LR$Hp9%
z3MMqaYAd-o)=pQkBxxqQ(U|i+f!-k6=CoX*KAJ|usq>RjPo@uqFUn_SONmd0Ra^{;
zRX+H7%zIPAM%Gf&iP*a^n)j0K-^25N8rncb^vCGBUgpXUJHB{(O*ZFOcKx|A+k*?0
zt?_(h*`mc_%Cw{FRtkiUmRM{?E*@4-xcK?34#jr2eur6`v|Lnsi4U>uW+P?N;OJTx
z4Q(^5@l~BUxzkdwDc%B1Y7OzNlhwxd!_VGD_dm-3jg}~NquC>sZLqh9`#Xpfq;>=l
zqKNek-hS|TRB?ulo^nO}<`cQ80FJyPLH%-bo##CCM>{?rm%mWlCc9j7-+TSa6}=2~
zj~5sbDC@7}{?7pPPjED6xndnnzJ1uSjHR)q$J$3^i-*^2Yt1-}&%_PF*ta&l2z$Hb
zM0@6X3*MgbnJc|!5^}~JeWsJBEjF2TzaV6q{)*dU>9yq=HoE;zD2|mF^C2sQD{;J&
zi|j2uAc|NQ67wD@i0R5@u>w39>z)!=WoHU=^*T`-@%7~O>)WO=`>M<DjhU=PHMUi9
zytfg-RSS9A?ASY<QTusr{5{lsqnG^751SUGT8KL~RxZKd7Bp<Wx4oAAk?F{s`t}(<
zUtv(Hccazex-KTmZQgjiV#rKFYrZkXCe~kV%GbQN)nR1I+2CuRqmu&c&Crx9d}yV{
zBMxp{TJSohgCY3L{SBN=g;y=2d7sANOREDqDTIH|xBWOQlP~(cYFjbBTjb&dSEr;{
zH-4yXs@B;HjVpa<qa!heDTQ3Px4fkxN*szv2}P45Gue<CkZ}8&mc0pWBAN9zQS!5Z
z!FK&1JG4EQ<D75z;Y{E_ygc6de3SE6+vwjdxigJ~^f)2^z$8Z<i|40Uh`TF^ixgMt
za8%N6@Lj|<jw9*Sa8=rBM~PEQIWlTr#gr*zPE`Z<wSBAt-=mRk&_w<(k5n<4cVGTC
zO)S|9S^SO5wIhE?TuXQxv%*okS(^8trW-bkt9Ihi3bFj{Btz!|TKmi2jc+^Q$Y1lO
zC_YojCuV|dGZ)elA_g}4n2_i%_pcoIt*z29Pu4kTjwH{JVv<pdvmUSwF}~w+ZN4j=
z=RTTd<e4m5k~hm6+Meumy$=$GjZ`(2vF%Fjqm&dRoS*)!ZCBK%D|0T-aky6y)NP8U
zm-q0owfBQkRvM<tENjB_@qThdJ=j-FxK2e9jB>#Zjg#G^`^_lMj}@orwLdm0bLe@#
z`{xX;Yf9L+&&luyo%+Ig!x+>z*0ViNp8BjG&)cqzI{kdL{Iaaa!l68}12OY=NShx!
zjXy;3>3;K4ue~1tbukm+v0PPp6-XRrLfD%tV{ncc&5`ud{JYn3CFaM}Mm~+zu7k9D
z!|`_ZMb@GJqH1v{?R+PvAJ#HNMx=Ku3PRE4Pg0&rc03r6W3--`dCe<supF9Mz7SyP
zP%<Z9$=*3wHsOq<bUQJWI&b?pJu88gLIOZ-`@uuJMpP^^dW&f-52Yn0S{sW%4p^@I
zDi!_bB2?5pVf*F-v|)qodWjTiS7vSHIYDWV>H#}3lk&UH=B20g=NbEZQ1T%fVyICc
zKE0ChYNn}TB)emY(=el8eug?=R)Iq3qL{|c!_US=KAwe_Dnx4vxQig9XLQySlC1Q0
z_3NUfod_>d0&{C5EUs<n7_>EQHi}oie$@ExzebJzv}Qt`oTAvdq3qX#Mq^K^4>~#4
zcoD{iC25mxZ6-lwJeSx7g}8h2Tn&5P$=@p%V$g$Gi;%vRPaTolIpZ&kb=>N7$b(=x
zL#MB4rXhwu?;I6xF)!^oATAXrg|ry*`3S954YD9T`fT4bzQ#m7@Y-T|;QEGUdxv*!
z@&1>b*0EEbdV9VPLx_%=BsN)9)|H%Qq&(hG=}n(pCpdM7bF(NqoqZXp$E9VUvXy8S
z`3|z`Bf)3P<SKrrA_Mq$bKs=~X>G^lH!Mwbltbq}cL9&1V7Q3p@Amtr<L+`q{PtIU
z4H=tW>8aIA*t%lJsM#TmjWtd_Z?owFG{tF|>)Li%D|t~iT3P|#e-=k^vXC5vp|=}2
z;<CqxG20dtQJa$4A-m-Q*!?g`?!o&0%#PE-3y*r`(w#-mu)2Kts(o)ON^7^)Q6jXZ
z*2`KmYu716RwCR}EYnxIYAG!xlSRW>@%Qh?Z~VvTmUQ$*H!!+Y*Z1a=c`R1n&X6>*
z0^1nG7K6swLW7`By|#msA+5`K%0$P3vX2hK8+Pz-jz!3TeZa-u9IjL%-3i<28LDT$
zyM~$L8}kaP-Z@%SfS7wMm)+i4ZK=}!b><9%mbk1~s<&rs!;<eP`{h+#*nocNw1l~^
z$wI>Z^Z(Hr@%N|5dFMwrxR?G>sNNnu1-7AO4uB(rO3)QWC!=tsx(%9BAJ`A}QP!L3
zDu!UIeMxD@R7$#5!riL4haql53d@7%wwzqo`d6b8d>=V~&Ir=%1iDQ@hqUxhjCyNn
zE^2>>NQQ<V|J6+Vw=<2ASDVQX`zdxryp-JhzE)x>7e!f{3B3D)kNmwh#eF)_TZR93
zI<-`@(o7vPtE$QFLx*Nv4`GkUUZ$8s&Z@V)5qNmodUtM09c|Lko&hHY|9^#1uQaE&
zKgm|MMtMu+Bjc-$W?HR<NADxbZEtCB(0?|GDGI<?RCC>fd-Q2sHZj3S!g$^H%l>2m
zcwRNGEh*o|6}h$<YHh_9%yYyJ^jED!Nd*xF3sItpbG=`FFQ9Qq?^?2j9~SBJATFlp
zLBMuY7cu(CttGfuzZf`x6yh<$u4wS@7PrOD*1#(+T0M6;HZH+<W2Y*yF_N-LVm;QQ
z_l!Mdg(p36fylPIEJdHsB4!l%{)=44k8jgGwu}1^Ne~UYJk`!8FDBH#6qDz+>1gQ~
zLo|ArNZBgQ3qpkJWw_^tYz&rDYGXyQTC(5iAtaAREsyD>-NhYuq#<QZf9*9Ac?S$c
z{w<FCxuCM%E7mDj9apGImtKoDm@*`!n%9ybCT)EXsHM4=XOvL^x_y1Ndt`U5$%p+7
za;`)6Y2onn;^D?Z`R3_8J?IYl*7}{-R&^DN=W2B|ue8ks{F-C>PfNUq<``e61?c9)
z&VL6u2q7P#U=)2#O_fWOm!*3`3}sw%?Gwa<wjtidk#)@<-S+fk!9fpu$0=c4Q82-O
zJG^?|Z%8ITzF?B^E`WNxuK{k>X#xXlpiD)?U@1b`FQjxq(riddYE0UWV{P;4MvXd3
z3%;iW8Zb5W^jM{aO+WL(+aj#dZ_kZAG*(Q1)g;hK8hz1#8x&4>toL73M-Kru2(@gR
zdA9(NFFmx|>3=@lKYiApHt)oHP<F`M|Et&fZ@UV_@2cQ`>pn?i`TdJh`~W7dFsJ@K
zOZ?Zz2ml_S#^Esg-@avVAz<hAseb=2w|gj!8%)XZ+|KzOhnIR6xj+C@=p)~6{UV^-
z8U_+D;hfq32E6@wHScMJ7ip3XiSzn__Pki2!!ZIz!I~UKs|I&>rxm0?xy?a6v$6c|
z3+7MnPp<%cwBXX&Sc9q$px4O>j8aBq-vf18YtU%~tV9<L0Gxra2rU6-b^$5aHPCLf
z>hjZZ*157Sy71qWk<zKPAt_(b7SqOLXnG$=R=v6H2BuY})_b(1K!b-$u^g+uc`|6M
zNqJfV=4`_G-U?rIol*z2#j@5{-#!P#0aT3jD&E*OYa(*qH;eh-FZ&I+Wbgg7W@7-3
z`vm<0aPS$yhjM8z{@^r96#U>cmR~;RKH`!<Px#F>p2})<{i5%f;FgB+VPPStUgK1~
z2fAD`T&LSJfB?Y9j*Z3PR7q4tAxPjoA6f^5)#t1DX7OI<7I%4kZmB_jo=dE@)IVQV
zpCG7-zewC3-K?<z!$b@P)=phu#Cg;D?zImm@0LBh|Aq*<hn{E!WdP1gQqfI!5x{xr
z9|1Q}Pu8J|qvYMb4jwC$ba^=a9Te7}`KaDMU4KOlfP4&^;Whun=yy%WVlPsD5V(&#
zqowDh7P`}4cfDv<&3rE7nBE~4G|@MQiKs8vJ^0Vd#8Aa$Ps$k>8Zzss{$7<3tT^?@
zC-a-)Uk$159nnV_!~kMp1NEIMv4K(CivjH3St>i@?*M2thPvXwXy`$Ol5<qfwbxpk
zR2vvJfYxl)iLwN-T#&F6=?3kCExD%Um$OveH<!o8+?Gdi;?Vxw<hi~jFqYntYf{?o
zySvh$B<n+%vpO!n^|jNBJI@WchDyWhTZlMG*poHk5)rSOm3r2gwFS_>E)Br98ZD9O
zxZ^pc0RK%TL#Ff?gQ2g7qoAR)Is6uzP*9CXpBeTsqE}w}R(hF7<oJ+wgL{Y8GzMT9
zsi@QBGSF@F@oaU)=B`40R??Abz7fkNRqV?ej&<C_4|@;p@6oY!{F>#hR$Ve;8RebS
zK%f9UkbMepQGwa`V1Q2>m~VtJwEQ!M5?A)?KCcm^!|m@n1V)&pK~HHL=n8e|_n@{z
zfsw>7j%#3?alev&<wr0(^HebdAhf$j;=w#x1|*(8e<B_<3o5&Y*7LOa#5vI&pv?rh
z74}q^D-Z3f>5il%eoiVWRm^*RW?)Va3s8${sgFI-4>-DNmMZ20M^d|KnN9D3iQv;1
zL8n2WXVV7g@aIck+f%4qM0-~OctT|5*z+uC^O&-qdph`!-6LQSG8FLG`tpnJG+=`I
zI9Na_+-@Phs^ls*^9i|5y(PJ(Vxh3574&depf;B=ck+dOIHkcOA;QzihRKgW@9Gqw
zZWjj7!Fj942o2n{@0h+*P_;X#Fv^Kj_WS>Zo?uaYoPFSqauR6cyyzkf>C_L<Pj>7~
zR#N7w3rhpwWLhrm)RG|9gg-EOCK<`WG+bZ`Zpd7TDX4z!7J0CO@iA~^p`!q{XxO=u
z{Pg%`I_+uIC{q`e_GDC^+9DDxQ&)W}DsA!Jx`ip2uoHTC1rTH6319L?0IhW-r_m)y
zrZpmL!(Dm=R=H}%Fr+zbTkH;gbC_|?xM`w2My%l|`_;1KWK_EfkaT{loHxhb#N?)*
zP=Q^@a>e(Qm2_8h^s!6j85QwvnZGzvVQ=D73+?qb?dG@(uru~xLNfPjBKDBmzi`SJ
z>acVjyIv=uYOTE`M@K0Z=!3|Ld-pk}alWNdQ0ZQ2;NYl?VV@Tj7nqq0`SEVr09=ow
z3WoroRU`f5XD)N2)M@GEx9%B~)4a=pDCqbJ23-zRV~Js5O1=}Pm!E_#8>g$~4>$bY
zprr%r_HYpp2bw<!+QoMbSl9G4ctVK}TJmPz>k5w^w;-8}=I$%#C;b4_?mTnCzVgXh
zInB!yHo8Vz#;PRo3VkkL3BfBO5#F$LuOuQYu!1?Yxfqfp58uU!ZAU>6(nYznLGqeA
z2eUha|CJZXIuC}SPbQp{Xt4NVopL_t-uhECogzPH&aE)&*feOt=!gd0kk+)aOulVW
z7GiGicUq|=8u6h8aL4pC>C=fm=vtNbD&?_lB|h56;tlw%m5HZzN5G`rzDRp$&8=Lf
zvOB|WHQ-P=j418^%x@<9w{v&Cw`j2(o)1yizo$B!qmg6SLVxx9<65YL`Ccm`|2CLa
z3(A0L$1UulmV&(<YC6*dxO#c}=Cv@%Yb8B5Y*|T@aV#I@5Z%Ch;N+|?wcsCWbO&36
z_;lT5_Ron*LLVliFC8PqPy!C)xCE1AJsoYwe9zFHl&F5LQNixcX1jA-<K#O55xxBC
z3?LA7PwLo^?gB=?op`n91HgAC%$;>Z&WQ;=GMm|qM;=}Mr}^lmp7bw9jlq0%m+#YZ
z#4ZjHS5FFdH%ilT?h5RQm2zU=f1Y&G5pjZFFD6Roy*n7b`grz*cdLnCcbe#Yp*{`(
zG=GQ$aN7spr9I?+RZ1iJ;>WfFE!4)M7&#x06L$^13eMqmVhujht2*K;fG#hRRrXdE
z>j`Y(6iV-_-d;zA7Yl~=;AI`6|KlQOkujH65|X^FZhmA)5^LF&5|j`KOQO6h(oB${
zHL<HEIeWB}N9X7FxOEJ^xhm8$8#~`w$8PAS?J0A@?7GgH@I3uN$8Ov27Gc%og&NS7
zZab+~?4}`P3#6oORoZ<wTEIkXz}fA?-*&;Xmbs;76W42}5#OJ3l*K)WlpwrFT978<
zC;!&b0z85&ChFpnOOHBdSM$+Sfv&ltiN5hFaiM5wE*Jj;sF?8~(A@N`s6g55!Hv~L
z2F$3F6Z%7Xu$1rCcXbIC+W8Jb&Yr2meDU`f@3u#XP|1(>v<@R_kE0dpK4>9U89d*1
z76zmCls@U$4twIJv-dt`kp2W5G)-^k2oZO<fgSY`;Oo&w+p%u~Wrz@-hxx~Xx;44B
zQ)_#&#Bzd*m)Ei^XW0AQ|A?Rp1Zg|>dIm^pe=x#UlJ}&AHq@ynoRT^5FUy#t$mtoL
zOUH6IPs0|BP-WO5KIo-RuLddh*^0Cg=Q4anO}lq9=o}+sf$MZS=M#PVGXq58%D$qy
zVCA)aNi1dyyg?hTZ`-9I7qK{bN29%Ll!2uZgyMi1_C$*v0RyW<F4a`x(48I6T{MVg
zV5~S$jsTpUfPN;i&8&>m58p0Hw;eWX*Lz7cC7l4Y+49+nwdq|mjJ^J(-F&6Sm>7%K
zf`Y(_@_qjQT)BGXiY<apkf#zI2@JikXTqM-_uR11Z7PNA_4&0}hJH5O_7?`NN#6%#
z6oh2|K+gX(pbP}0m@)2`S$O|BaLOP^mF~$FK%w1U3#!uR^a4I(Ou0T&E`UEG+1y{K
z$4sTUy_N0m7Z!DswpZO(8hmZa)O}M3>mmR*3-l?N?$So5vt#KK=V<kuk-ZP6Sq)AU
zL@UVHa?tkRyv*Gx^TbIif{aT!kUwYClw|Odcom|M1>aff^gQ}cRH|PFRXFio1A!*@
znzGSONIH?L>iqGOjCKzk&Izcz;ZlL?aP{soKgFYN>2lqmH7A@(<{-`7ngXbh4#S(1
zJHT{_=pleh6R@hmqiYL^nj6(KUG$nGmmqKG#GtX%nR~$c7}j{w0R)DguTpuo{E*wq
zqV03wXtQO;-J%YOXlIlM^*zrbiWTdAQrDBS!%ZKKJK->u<a&-+DEu$C?gDV@1lAK3
zAQJ@*>~UEJl+a-yD91OH&m#GXP6Pr#?HNc4`Q|}0cXP7dzCOuQ7?uT+WZJhS>>%T+
z_s57hFrV%4fU31t<S=+y%wyp$wcXE?VWKuyu=ui;@<(&pMx3-I&TM_4CgW=7%&@(R
z&ZbuiZ0%P0E!t;C^4>1~Ym?G~2EdF?HPD@Sw6745yc(s_0wi-fwv=vGf2=td_CyHK
z$kSFh)<W!Gvs*3$YD<4s)v#lw@FRiX$`ED7KN*j$KNyc|lJ@)Wqr2XdhuaKu?}p+h
z*;BJcGx4L0Y!ZbytyqH(3C17H@*D6V^Ez{ysVZ0CAaH_yU?iyqbo)>ZdmDD0-TRwf
ztUwU4DtaRxOnRmWoL+6SGHbmwaPS{NhMzq^jKp5cw++vXdq*R)<XxG&CofJ@c2Qi)
zqyUdXR|vWcJx)8O%~_Z9StmsQ7Hu%_<)XyC%h+}%citzQ$^w!n{>_aGCvW#uXU8<R
z%a12bjsPYbo9sb{FcJwSKIgi-weqn8j9w@7oz^&<+*MH5$~Vu^GNo4C0h9Ab*ss5j
z^H>=8W^JVWE4=+P+D-1FyV$2lXEq=5wF1oAF!beoO^XAIX4<-?WTE!b2$Hd^b05)l
zWKL%eKpE%Sq;mR8eZfXM0@HjuD6g@}`*dUbB^7&~q6_ea7%xD<4`koOuG8#v71B^*
z$>*2qOj1-|wPrbz3hI%$khR*#u<n|U*Ra<LPX;*p%pIy1{uPyAj$9m<@Mi~%G{dY&
zfL_Ns47@)#ZP`SnvIB}Y-QqNs!8pc3S$9vFij%S7hoXUUW|F`uUX9pE14w6~0_hWQ
zL@gfR4~CAY&d<#SB8*FNgIflXmkbKfOVY~1nX~NrNK28mX)6Pc**atI*4lGnQ^>v-
zdPq&9T48@v6T7i#gh@E|_U3DmipDRbuG?$)9RY0hf|J^>R_MQS1%Ae-?|p&NRc9tQ
zfyy&mj$PxMb179xE07QAQn9E+Rc^VOh1#zTPDoQQ#FESI(s(YVjpI)tq2lEq6vkEE
zy%&*j01(UPV!~8I+8F*AHx7nwzYo9!tH*(bJIc0NKAm_3-_x8nn`jVcpPmR*#-7LK
z2#BmV)}r}nj}W43$&U)b$k^AAQz=2O8?zGA)uN_Gbl1*Q3e3?T{_}Pmhez-LD6!wb
zF~+R_NXtmta(PzT@&OG@cc~L3wT7CL>HW7B9?*-6c)8xksNx({4T7cXvU8X2)1KTt
z328=|rdr^r3U^W_8+ca7vU6@ngKF3=S=@NA=cdw0+H1u%KwHxqXb7diNhcHN(EN`9
z-Uj0o){se^XZFP2d+<Iu?_RV9-76~C+!{<dDd|YG?L$R{l&hrj!-kn2_n6F+1eit^
z--=`|Ca#wS-LL$i!opI)Au)}j-nWEP(G-_~b^+K2)JWTUVW1!eada`B6QF-L|De@y
zUl!T}L==`f%`0lfF@A9y!%hlPtQVMnc%L>B88sh*b0Q+p(4eb^-K5zLZ0>5=$!dTI
zg*;4}Cn~`&ftct5*P_-xbI!?;`*S7+7mQE6wu4d$H_4wU9izj0H1tQd$;W8E;rN~$
zdi9<D1mwesOL6zA=G9<@Uo2AZo^rZ}n=y6}XFctSv9GIC2|qumXJ^mT^++oAT_CS%
zQ^nn^msX*Vo`$s?SPrs1^^#ZB;D-5)6JAG@o>6>lW>u0B<;A93rPo?xVzdn9I;3%#
z5Cmy<Nz4YZCR^)4I-~mXo^VZC@(W!Vc_37AtagfnOB45?nk5(ej9~NBd_Ici=6xXL
zg-`9?{SL+wZ~2xr%RL@b5P0vv2FBll7b_v9<6%k$`Dmd7P2Zd>rXq#I{Nov05#Oy0
zk-nvLh#2pcs&Xte|1BCyhT>E{Mu|7_1k~$5eSxBf%Ik==V<5os?UnmroM#WW#g(m%
z7zpw>*jnZly9#x<J6gGhEXn01#vQ;x$4yU_xeu*rQx+wHPdwhUpL5UNLyCK7{`}F;
zgY$76G&r>w#H0+Hf_bRqhjHQgO$J6#dqelJ=JsaZx_h^0bLPEy2I#3WgGqWK`7$Gb
zeziY`j!o^+LP3MztKqV%jyu3~;B3`8(l5T6u%~VLJg)DKZ^JcXk2U1`;)OyTRZq$Q
zfzHBi$n?-cv$0#n8Nm86DXhd^pH4ZJUMzV{)i5vg3Xe4x_2QQ{g@Z$rw9p#dw@kmo
zv4TmM#2}&TGJn5ta1TIwQM)QqxltcIbM<w;KI}p|u4`W4kM!g|&AAk;^J3bHR7wnx
zeyR>1)}dpEb?bFBpvvwt(*R~7+P*e3GjoJP06qSx(5fYnF~^Y|$>&+RK;`4;)w6MI
zR<x&o?G64u^heV%yTNU;I0?Ijt9Tp#8U-aSL`j1wGA<Q6JjQ=*GV#W9Zm(Pi8QyQP
zY3<xz-Nk_E+Wn$iw^^^P6tG#}U%lln614|RLWr+c+vk<SSOHq=g`9?46__?`i<N-d
zk1~sQSoxzQ?g*tNnB)lMyDzoySmpdtSAOC>oEk<z#7{-B9+bMR6Ni^y=&zIh_jAzS
zzcT{V?yPK{Vj)O4>H+_a%KmAK#Qf46O(UNrpe#CaOJFUHe=XuRVNc_#nhRdQ#58VI
z$zF;vtSXGEt(G0m8RiMp6Y7l%nncqnubhYEo381(&t+jtcyv$UZDT#e&39VjFpmTq
z4bczsc|2LXNS9h)`<yxxHd(OBga2{oVZ}m8d6KucAx;xQZL-_m7$t?=UfnlCRb-v3
zKouEcWE0C!A-bh%Uu(*rXzf^zXEmD!=gVvU6ukTg<H8)s1|m}$?GAyWD_3Vs@zmU)
ziwoC<-JKWLf3DVF3)lGOS3q03Acf1<r(w*)S@TQiXEF-zw4CzpSyiLE15t@J@MscB
zD#ctIl2`Eq(hI%fi<5Z|F(j?eVnJ0U2%n!9@AB8h&tc{(z237H54xnJ<{yd|x4Xk{
z@=koE(J~>sK-{&3NhE&*CsQ=KtT#Hn(BJW!cYr(e)jk=geAMgQ9zXT{zIF4!JDl$a
zN9pFxRrTnL=T7m_{JQ%8*nz$@{O0iUU%+1q=Dql!@y%@tNC#S>t2aO?b#v)<#d&WY
z2zFOvO`bT|VSwk^``5Pp+eek{e<<;_`|DYn6Elx!ywlC%l*;3h^6nOVn$c2{hv1`(
z$g0<sSY6WzJ!f+Q#Q$&a|N0P_xIM41?d%ZmgL@C{KXk#oYM`+WOod+6b|(OtJCthC
zVQ@UYF9!%ySUA_emLGUIFL)lK{O*dI0_!3Z^C65!treADV}DxMSg*t-Xnr2RumTy;
zPBq)sSRpgG+8E%j*5Bu1``5j66867;w74U%XU`!wQNNVy0kI%698S4G9RkNIB((7O
zkq;rcJ9~O80ddzZ_^dQdl7hvzqyPGx6Wou-C0_X7049R<aI%Aj<Q$y1T6p8~oyjNv
zG=Gk~A4;bk+2K3Ke?5)=cstY|#r?s<$+(w(=Qj`T$FC;G9Zr6<IMtQ8{4eYJ*Jmo~
z9!h=$qcafn|Hs$=`p$|N@FoAdjK7YhiWtRO$EB<AfWJSkH{l<EYF#Ucc3TFCo6Ao1
z!EDf-jk-u4fm#FnaJ5hAV7aSJ`S!W%&!Yh@u~e0D=*RQ+6XAM1j%hg%zjTc=_ya&H
ztAqSkGt$}Xw$m{`fM2Q`@fe0#1GMiC=pN(`N8m2?fPYy0T<i%i`4TUJ+DYE}JcxrY
zMVWaRX=1SG4BeRiB?&{H*NUj+Ow5qY5;l=`@l&SRWUhuRsn6I|77m+LyFDv|*xkDQ
z5v0UM9Y)G7g(-8h`c+MDg0QBw*skaNwYEgL;Sr#i9V&kQufy2-gyPE3(VX7Hev;=o
z@mQcPc_wrVWE7=<x=KjeNjtW^aOp=tGT}T;;~h&tQVuQlcs0{fq_E6g8=eosZK$`Q
zOsHK?N|&M;O<qkBuU%3(#OIG;f33<!qd1RMl**LH>JUrEHW*9J&iM}TI))L-*Cc7Y
zI+owK&jYekD-braJ8=ALw8mZ4zl`qOiLcesGLOrS`f2h4G4`(HRp?Y}Ov1=w$^0<z
zrcn>r0p&p7SeB=;q9P)NA*LjZ560RvN3`qSh#_KZpq1b4n5CgTnjR9fBbo*#6XifD
z&w(8<HFD_4<Yh{YV9+y%{i}&%`wkqvcxP(o8|&EC^Q0TVmQg*GH?n&f^UJx?ul;TB
zCBHqiORS~%=zC_U$2_Tfz!YJ@OC?q2o>~ANXTB=eUsm#s6!_a-4C(TRicjt}3O(^-
zw{pt*8H{O};1C{9`Xs1=oK#w5Xe}s&=-Y6orygdsa-{SqkaU@*wT<<8PthrHwg-jo
zHl6y*DzpL9f{Ag<W~HI!J5hQh|H73+e_m-kbt&XIDoAHfN+Zc@NrqN6fWV3BzCGhW
zb2EAv7{3fJfUZF)LfpD;>f3QMMkzMp9!rYy{I%v|RUWrtVpXCl&UGHxUjnF+)C^j<
zl#|TEuaBj{cyk*l3v?a`<2n!Qub;(9TLP)LZ*jBUwzv{GKYFkCC9K%n5nw2v>ffcR
z8jT#)(-H9a1elUusT!D(<Sn3mInV3-myv6X0M^FT`OcaBLm`T_2vVz6lvzLG3D+g+
zXFvJ%*$;tPJJwtWP3?2_0mU_aZ8Sio=7^tqrEw3i50&=fKRo~o;v<y%+Mnlh;VH$H
z*ob=!EgUj#jk^uV-2g9uO$Z{`WduA4fiGB2rE-w6x{fChK2f&-SIAWh#C8iLw%TVi
zvWT8lvK8!0+zcTL)RLpUD_=vIv17YaQp&6R)JQ)9vK4Yx+JbsGQrVU+q#WWtj;+Tr
zvYzNO(k>6AYl&K<M?i^7a5YQ85CX?YgG?gMA=I@LPB6L*WS`#0-$K*^{i!oWzT2yZ
zQacbEMZ8PjnhlovQR1bL`TjgqufQ3q&JR&B!+Tw35~x#OUmG4H_ZvW#dZ;vdDhaeA
z5SDU`RWiI`I5_H&X8e^d-*JYdiy+Yx*~J`UoD#aq@_CH3<QcK;48#_qsliidtI@P!
z-lao9c~ec9@`UWmZ;j8!dgYz0ues`#3p24vj9-m!37ZFyb<r`_g^EW&Cc+79FJ1FE
z!jfzW(t-dolZHzQu}RK|Aw6pMyfrB1;&&?`fdIh%NVJfUDQ9h^2W`(RUrvjtH0ie&
zuBl$}idaV&tF>Q%%GeQ|*D^;df`q0sp}0Boa_cshA|SNeI9-&eK&4d<xaZ0PF&0*Q
z$2Q0$Zt2HS$+%ho`8oCV<5T6cKHlDf9s3beSC=t4vMmE!g%V<k*ekD`%sZr5UIQ7X
zIFK%gntqz-q<MAa##(5IFZYHR4j!y7QyN!L!cBzcR~tw?@UB;5DJ*m&MobsI7e)4W
z(ACP<y)Hgpry1E?GkgdxW%T}@(rw@FzxG$m;q`Km+GweqH${>NF+!<Nd=JfhTLp~m
z3qs=1K7NIpU@vHCD`h~YoGNBBjO&E3#i`R(UTcERlpD<=n|`|V7k17QKolR2Utb&E
zqPiY%3Fuf2d`gS5uEj+d@ZH$zRuiRV;&xtC0H0;WQ!LCOQ{R_e<xH59AOc|OxjPAq
zuG|HTUG*H)!b6fN6ZTPWp<u)(EAVFY@w?B;3Yzc8_l|+GWuNb!HYlPl=p%U7R#XYb
zT4F?5{R9Ags;^I6PuH%0dm#|k6NFyu`~wHvS!-&c4a9<_eTHm351XMPYA?K@x-NNU
zmHp3r56{0YN8RnC7bY{OmAP|TJtf}6(R6ul2pe5fpKl{x{iCjJ`^{D7O}62D?gmE_
zl(VnT{F<P6aEccbpH06HBy8ilmz%;i04|vtJn4%y9D1hws=X|kn1*;nz&e;BNDp&3
zm~jum>0wY!w_r}6usl*O7B(_dZ4mKgrD+Ypda~r>?f7`Fz|<Sf6NpT2Hwd*_%OEEt
zP|G^f@l+e>1BKa*swpi5fSNk5_wlBuRwHt$qtkg0(1Z&P#Bi*Qf1XyuJM?Feysw){
zoIW5Iauu+K9ZyaFb-1<e2clE$E^Kfy><!xTQ*USkF6?=5(^#~TFO(Admi-~&VWi${
zj%2tYeX;X>5R`<fy_!$C+5F8|39c<q@|FjOX{@SYpDU;iMhV~DmASq+22L>d2y}_2
z(Fgx)S>{F{xG;>M_DZDD8L``D;<;4~nZhkh=tomHp*Zk|h`zgafw(uADig!%wW?`D
z+oR$(_5x-+sWrVJXT&mI-IUvAtxh~RkqaG;f&*m{i3oIa(lgyeS0k9JGdF72Sjojd
z9G!BRJfzFMeO{B{0qiZ$WD~#m^xIL-?)4NCto;ioCxkf<Tj~{Q6ZhpF5_j6&RU#Kp
zHsba%T9|ffM`0ScffPxrGxG;XS&@>rKU!WN-8Alt!;<=&2XZahC!(iGniKChK8MK*
zZJRzwh?W$$gf*#3G0rtb@wI5zu0A^5PAvN5BDgAZ6(|blMn|M3l1S6@%{Mjw+COK2
zFWPqKV#oN-2L*h%?j3yr=t-#=wNPlnoH@!Jeek$ZHuvG2F-rOsbo?sl+Z0}M(f2;p
z0>y|51*Cl}%{UzK)+%N$g2|kJrh-*dW|mtjcE%KPb^TQu>pHCgB)oh7=EsZBZSh4x
zr=n@Y*m54Pn(a2HIRH30IB~V#BT}<}JREBp>1&~{<s6>QktZ0($5l#iw=C;8=sk@q
zGq|qaJ)#@C;>8pu5FV+l8Ih04xK5ak;ce;Ae(zNF4Be-Svcf=6rZdK<WlUIhge8N;
zsi@OZ!Cs@wb0kF$zBrAJoz$4QCQBG;t3jdUyne7tlb+~gQ@4{Esd>Vv>`W5VO-R7d
z4M|2Sb@^GC%F{o6$OpV2BoO<wE0{%ij~1QrcCR&fb4Rqj`vyKA48q5{H(n#ONsZ<9
z-RQ}0dWFT9HldWp-qq(z1;@2z9yDcrK>CBO@ax<2yP&WcqOVRAVXL8#mIHDGqp&F0
zofq>Md#SRTOGS~M^Ld`;U$!Hr)NWOi>&!ZyI`?DP_)NOjuV{L;TQ$AIs(}8swnqHh
zshWvMl;>z36vG}>l`jQ$^2>N<sEo>Bu9vtIVSDo6*m*51{*($GthzpTtt?r}(&0@l
zp*Y29jN%;+BUBr>l)5-{KRgm-nYbw{r}cbvwEJo&YT3IcdvofkulJ_Mp`bB<m7W*8
zBm$V@IpR|1v?3j=joL{z->l&VjBvIg_<7&RRbh?45<=onflFYm4NKM}$(;2b(AD4h
zbUrDkkT;Y=3}$fuY30hZ{N5rjz<V|jsRfK77qH6~Ucd)PnaCYJlM7us^lkYyJ2^TD
zDi@?MI??R)IQgw(K(ljD>s1g64&TejEZ5lJPrV@LD2tx-?~JlAn(@M81YK3<?dM_m
z$Lur3es5~@o;@+ID^Xh5<@BTx6jktERm(2vQz+=+wMLIiK)q@gjh&<X;_{8w)cUzu
zOxl<UvLcPmg)!%Zs7o)8%!5zEl6T)soA%T5-nrrbq(5xxJ((T3yP_TWDFffcufjL6
zB&*UqbQM=&0HaI<=aH*ky=~Fbs>q0QUM>opgJ(Qv6xwJz!A9^(mstWH)9Q4LZ~Xk#
zr6MHAjW8W;&g8g=R}g!k&tHe51|eSITGMoGHQ1b>G)snxu^e9~f8~Zt9YnJkBTdW9
zl?dWqxv`mYmBJmR4%b)(fHfwFTD{_9Q}Be%7PXO9aCDr^#+Yk~7oeC_sKr2I(L*tL
zp`rV1Lp~851d!S$y#1N@a5)DSFLU6a&|y+6+BiLsAY~PM)yu``43B><T*92y_}P>2
zN7N30$*>F8P4P)Smx5?R*pmsJxd$!#$^p>(YZSLhE#uu)1);i>9c{H3>y}=bp^Zgl
z4(~VgM&GiI6V&LRsyuzrw=a8us#p{el;I2AYcfXK9COkVx&_ZZWF$ZtXRghf?5aPB
zcrpKU!0gdC|Ih`amRo|Tle3H4=>>hyPyYEvmI4lM9`Okh5%(1k0*16Q@~hpI?Z~rU
z9r!E$uP!X&L_LSPt~v%|=b%Dpd|Q?|$M}*gp}8De>6KS;cGs2Y)s@pB1W7D=6f2Ri
zS>0j9y=@QAXm$^#!~ng+kVtF^g1>)SpMR!EAx)jI%(1gqmn?SOptvGneIA>?%ayr0
zYnl=RY-*CV!!9wedDFnw?d)33*tTU38A**1KAqV4sF268oTIw$uw8wJnaNTJ&Jlxo
z&>ELMj7B;($SxK=ECTPri|sk%g1poCUx$}6HG+R;%+TB@!_7$EaH2?tn;Iyq{6EsZ
zIv}dG`}&HgfB}MnfPm7CN=YNlP|^)b!w}NaVxS-)Eet6oF*8c{7&HtGBSR^j0z>!r
z%)Qt9d#}9jU*CTS$8%<$^Tgh3ueJ6~sQ1Fr05b4u2USI8EIo5hoRh#D#?z2z=LhI~
zv=!Ut3J#@&^M(i2jb+Gl8iwyfo*uK@j#ISPD%~x{)@Hi)Cbm>wq{O#4f2|j(XNVv)
z&qoMwgl^^t{`_p}<Je*}HmJSw4YtnZ?06V4wa}V^*(?f|byp~6F&%b2Y-!_Q)=#Jc
zZuWfV<PUL)D3f@ECykb!zO|#P%viXCXWn2yo}S3DkuXv>{Y6AGJbrAiw%Dy-tf_T~
zcDue3m`vv`RDIzLa@Z8iR)xopqbT!MN#OeNXkVl*ovXjErDK0!FBzL{jO&H9c3je}
z$E)AvEZQBahHgfxB&-#daX5@|Pwq5qa<x7dUMykk_ii=M_I$gsFMZv+_vT+Xvpw-a
zVv;mgYpN^tlPsx(j&2_PV>pZAfIQ#LjdoA?Ih(7;{dtnQ(^5L~z7il$b`_1d2qJjT
zRjfRf=@nH5P~p(^O}Tncw@F&YWWSA5KB;h|v~jcg?dECQzJn~Yi-j0(RCwUcib07Z
zs<t;Zr!X|%i%L3ig<Q?LdAh3Yz1pNjnN@Gf{6DNoTex&0D?cueqk^Q1XV|Q*Q#_rT
zH$PCP6n9L&xg&>MZ#;aJxvwo2!W}-bH{24yhCn!*6}qZU3_@H2%o<y#^P2x2Hx^5#
z^n5JZlyU@^wez(*KLOQz8vt^;;GGB;Lz-u!g~p2f;8U+A`)Dm=Lu6A@3bLYIJKRv`
zohWM_{rFPc&Q+mte9(2gqyTfHZ)R!V$>D*$u)ZEw8ORY-x8^)(D=$_g8$i{~|Ej6s
zqH+-TqxMICCvQqJe>6hLAx>{*L6}(-)2mOgN4;@|{mMj<@M!H#me+p3eH7Vlcm*C9
zP=+OySSqabe_?d~Axe@KIa|WDA)Q}*^c4n2j%gxQL+Fo2+_Y*JG$`BldM84PrbnwC
zQnzOg3Ixl`tQ-f+#w=3wj*3!nY3-SN0BnJk8^35{ScoNb^r&n0M2vy)F=4NRfvD$Y
zZ!aRgRth;Wh3o5Un@{W_M-FVgl-$UCFp$}$lVj?R7W~X0^(?+r*@8z>enEyuWHG{W
zY`dnra}Ddb`1CyQt?|9ViVgsoqCxM9XRt46fJ0Yh0dHi%ghc8l2)Ey*hHe&)l{|6@
z)N5!>7pl_>>KnYe7kT9lxw0|e?%y-*CP5_)ktT<d(IDUU&nDwc-c}lxD>4c3nf~%>
zuS*QndXeG`3llp#q<+4Q2FTX~=dm7&<^~5K9yB-aeJqSd)>7M=4l6gmAKE>rTB^`@
ziQL&0yKNQ~y*IM7uU_Xm*WdM2Jw*dP*Z1M!*Qe;c^n4s0o~uR$na6T$W555At*R;-
z$x#?#<-OwVJ8F8Qc$gs^yrB?fHV_0-|HJm_2&2O)-T3@Jh?T>y6u+7;cFaywNP7X>
zG}J8he9wT9aml`_)~V2xdgsqw=e{a|K`o`bNt+gnX43#UYD{*xA(Y}-BMeSRB$RA9
z5d?B<LTF(#lR=A0xF35NQVrwqHQ^AjSE=%K6Fy<HRcSBHt3crAq$Jo8CRN1;BuQ{)
zsE@G*))HP{fKX31_u=oXF7J!fWk;n(9hE7}y1lRWb#HK~v0J;J&TN?#ZZv@Mo$OYI
zmg$CzRhd?uCpzP8*4m3tN=dB?vxia}WZTLixhWc9kC5mpm5ciNF4tkMs90TSa)!ve
z2&3VfMP_waz+q%#ci=ZTOIr{PPPq>?xw5*#dxoDyFZ3{9{1fM?4h6vnL6p}V=G$G@
zWh$C&-#VPD*z4KDutY9xv)QtPTY`PQD#*rGT>nMijJOE_X!q%5P;<2P0<K6ufwhl2
zC?cLKj2k|+`x7TShzksGt{t999QNK8=WE&=%pJBV>0HN8;^cuYqeUqVvrNh9va$8J
zKKSdhh2HPt9Kl9V2+T6%DKx$k6mRY)6u-Xigqt@yw}6hY8Kw2V>03yL8r5qo2?vu_
zY?mw^8k~J|N}a=g$Y$pC@GE=ZU;Yt2gT8~vG@3MBF{~7p;;MF{Yl@(n|MNX|*8zb3
z{+o|c?r-?GVk$Gx?>6$tv>G@}A+?pOQO7$`pH()6GDs*FrsX6upzD$MUuheJ;BwVy
z0kENaS>k?^#9Mu9^*0%qb*r_H0sCGQzPBPvvOnnB^0+LzMvV%gM(SU7_T2)nQg(SZ
zI$_)7(g2&t<j;pkq}S~Cm<w0#w`keEufLy;Gh4~@n(XyJjhEVM@A5@*K?Nh}=gWxt
z_f*x25rI2j8T53K!gr?<M|7+8tQ~ILR3Fmm_j5XJ>R73i;M%btNafoUM}7=Cr}G{^
zf%~w$s59A}3!K^i;Brl-^9-#IYbVUwB+((lyY9~*v-GPdz5~cprRk%mf0rEX&H_XZ
zoq?53b)_{8IGAK09b{E`3VgGA0J<tNz36)Bhtzsf7h$^#pdZ+|x{dkFCMnS0c@`lc
zvX&jIeh2h?4l8xTn0m`k2*v25CWN?l@Xquwh-Vz`G05>h-zjD_s_}AKO;+pp>*S1B
zZ@<&pc6Sk5?lu+B;m4cqIcRwAvMiDTn#!s<L&(L=;$XTH){N(@Xs2eD<7}=tFxe+x
zK~&Fbz=YrjZ05;EiCfLW#v0%RZGU2ENw|@;aGXJH+nPMh?+N(L5yX#+bW&K%HCM`#
zM2Dq+fB<l1-?XL8_4}vb8PFy}09?a$th~!+p6%mYkNu;#T3!a(*6fs#(}uG#RQaTk
zj$i&gmlhv^5c6KPYw<b$Sfanm0!sJ5Mo(gZ?%2!M2;elRI|xV%*cws@)8d%(=!g1l
z=^HLa0lBi_XPnmkxp=n1k-kJu!`Jqf&+hN+VNmoCUPmmTE4Lj+lPe(k7X%OP?|=~Q
z8bm#HD^$au(ZB{IxYq3#&50RR3|d)p-q5=VCOGeuNbU1y(V+zuKxNLIFTQJlP3qfi
zeP6JqMqSsOQ_X$w(`>Sd8R|RczWbHo=+S-VfZag}l`z>V;U1q6(Tlo|fC2mSn0DBX
z62}G}hQa7k$~%7u*0LB2pTH`|ApBI>cf_>4*NrZ&-Q=M=#o@XUp3wydph3<O=M9}_
z$4NB46q8A|X`!_j-!VYgRNs<~+~??5FiGa#1wEY1jyn3I?#5_mzTd`H1`6=kfm0Pf
zF)bCl#F9_@Qud}xlJZtfk>eW+(?nROoG-E|hG=b@A2gclSf5Axz&KnI%Eu}#nYzB9
z_loRZdS2U}M8DU}njnk)1&+#PnCJFh>5O<%L<@35PJCZJodJ$%Xv({O$!~2&(#Y#C
z;z)5Hb6$@6-lJ`55a~qfm&aqQMS$z`6ca;!B1_MvQA(qE&;<MR`y$^1H93I<t<F8Y
zMj9rlW+sNe?52E*OX+>{;shjS*XZ!)#;)~{z@Ia8c{ZR(FgK#Z8a+?VZ$mf>k3$ys
zJ2KC0_Fd)DBYX|%v<a!DwvHY^C4Q^U?gpLHiX#$unj3OxrOtDb-EEc2i0<kKjfqOz
z_^KpVHrmPp?RuKjT^B1aKRje$>{kF&G-87<>W)2ril&ZzBZ;W`X`Q*Bgmbul6-*<F
zDr7u`@y^OKhJy#Xi>_VnZEuC!3z|+7`$T#t%vO6Dyxh}lA>EUlmV)j3{K`ydSO9w<
zYbyEvPpJ96(Sp&aepNB&O&L<g{4<;nf&mkJdXL6fqSN;prt*2c!OmkDN{T&P=F73l
zRkTZBi5mxW7a~urzOY$ic6C`4g?j)3GK}T$+@`N(vJJ0Fh)dA^a6}lnF=9aK>@=Fa
zJnN#_-N&mBbX$Gz=wDqW$9lqjM{i)632;3ejWM(c(92htMkMhLPxh17R$-wdfJu8>
z|6QxzLoKk4>0m7yWF`$(#-eBV@mdY1%B(v=-BIBAK{&P`@+;F3!ed0aT(>n6K1O6$
zlKE01*evT7CMCJT>MzMUGv;cfn;~mV{w_%gsDicmbq>e(-aK^DEGQJZjHg6S$J7(q
zC+ZrS<Pz`2djn9p%H@fsMu@m@b{JH}e22GwXQHzGVvcZ|VT8To5|vz|HA@_83rWN1
zdu9NsI0<7{YG?FaIugmE#IJ!!<Yv`GZ|@@q57mKksl<38kGbyRoS4qVGrO<){ng1Q
zymYtQ7vQJhK2i-%**|8_H?HG1FXiNbn$KPP!KD75G&s2$jdl&+*j&;hHt&9}x0OHS
z1m$Vkb=!O?yQm^5dU&IK@J)PKI-ipXGK<Y8_oH*5@48uGy?~!%?`oUn_4V5E7b9#r
z^Iaeik7M<d^@)u%w=3ZmyoS~3B#B{DF}BTWuC)E3U|qCrqCe&N-tPsp)FP?LiNpr!
z{`_y)1)sT|uVamipOC2^SU3c^h41F~Teh7kudZk<NC5m*0p*C<FMfB9ZF;!U)lx2r
zD*wY3Vu^#M0eA$(0hv}edz4if5>sP6;l=H>-qJh_67Pt5yXQ~$wty4|Umt$Xo7mU8
z(T;3zOK4QOe!e0cH>WaxydQMjr6vRn`FK%j)Ok7|lXC9IxQztl7+{JM^y%#HehZux
zMD>=_O-$|8>IUI1`P@oSH5~Nf7a6*Z7ER>wMr(3Fpz!nP^k#a$4!okH++g||?qe=r
z9qv%SP#wO_qLQF<V6M-}o%&ozRJmPedqDhJjy-1iE5LbY720R3gWAk3*uS}gUd^1B
z?01AF*E#L?mDlq%Xxg*4>zOx04p^bFOz`3RVlnsTHzl<<3rcYPBzxCE{A)X4gzp#4
zV5|z&NDp?s2VuEge6zhy&y)F~-7th-s9e>o*(M^Z&fBVLTsf24x4y5WFwNS^Dnklg
zbvFk!MpI1Ud0R!47d)*~V3KX<@Db>Iwyzxhn4X*Is&BM-+6#ENPk^4m<=54hCWW@o
zqiKh!{$>UR7yU&gQPfq2V;YKuWOkc~?X<Z$u9fxW3gy0uQ8s_F!Q~l~fiBH%8Mu_j
ziM-{zVdoC29sN4xd!CCou|{X-Zo<tz?6TNO7&@5^P!|}(_;yh0i?0)O3l?;T0Y9Z~
ziAr}c1A9}S)L&Vqt{k}9ici$~Z98XSI=N>7>yp~(QRc_O=1+q^BIDbjvWT>_G&zeM
z%hX)|tm_9tEOyx4_;M$hRO<+YkXN1+g|ff4kIpt7E*~#oD^)+(b2=(b!M$zQqlBv+
ze#jEcEaS*=XmL|AN5r<xV>qUr>>|GLEtMG<Dkvvm1Ffz5?P?s%5B2wp*-1Dq!NZSw
z(rc>6@}Yt}=HL<6OMM#ooSj)(vvA7m9LT3=a}9NvhV?@0wE~rGRtf$&zGd42tj;5;
z#H$PdbsoT0Kn0kewe8&7Wr@AwcDTPSycez4b;mWJbh>X0s>@3(Ym9<P*_)3P8+O7|
zGQHNXDYg!_9IC_U%|JV^XvF-GZu+^*)qZV_Bp?B-U5Me{9$hOt)Kw53<$EiT&SSg&
z@-9{!>?`MwKFqe;t-p+n_)xkjux|LRJkyds{i<`oRxSJDeOTqZ*mGJ3w3;x(FYLXP
zuxPj_(EfZ3%9~gz_^nRpwHQ_N&dq6~AVV4W%bxpn_PD%OS_hT3mkNi{HcrEBOr2Wn
zFXA9V8g-U~pxjZfdC3r4D&AY3Dui-ki?XCYEOP^cZod!y9j_A5A)4r9Y6Po~FRz=S
zhXw?Dt<Q^hi5fS)@9}av4q#QD@aY&Wb<0DiXEP|{vYmw0j+lL7r!=KvW=aColckjF
zjeM5sl%*_$Xkz?%^9;9RWOr$+golo@=T&$lGktq!NC$e9jNo2QsFLt3D~BomhoBdN
z?&$6ps`~r!HWUKpG0%+-J=@vPw(ZQV$3IAezS%@wHwVV5a_R1z{Y33bmV%7RgC-FZ
z@hU$4dODo$>P!9}F$-y|)KnEQ%)6U5;S6B7r1)I_sxwP75iY=Zg|xV=@u+&~lP6_H
zt&88D@^&*5RzH<CgZ2;hUG1K;GA94vE`YQYlipc&2{LBMHL-gk)Dc6Q1PI}J)A}+-
z)BYb~Qpj1_mxf9T9ri&?W(KP5w~1R8gi6=oE~%!f^U)-K=0Dh^KPZvQ;=U~)1koXC
z8Ia9N=!%EbQdE6VatNKg%0hCAsDbPOyy?l!s!1Y`MU!IyeLI6vsIFidXy?b3*D8g&
z@}QoUqvFXyA|+4D>^>;vGg@#20#+uo8^=`+rECnbKiE-_4%+pvd^!ui$PoO=;+azY
z<25EFSc{JEMmG(C-_}xV-NOKTR#1`NC4T^)X^+K*sbm<>?%7JNCnY~))F&*9LuhsE
z!I}q)J|yzCIVBzh@BILw+FI-@syEVhlM~%d@n^2kMgLig$UuWqgwy(|rL$|WwZ<IU
zN>5{kGU5xMHqS13YS3Y=nTUA(W;fj3^e_-K;-z_t@B0Y@yKPZsah>KINHP!(Gu4ZT
zLP)z<u<Z{=Iaz1kY`Ly|wR}e{Et)@vO%z|oR;L@fw15Exzye=>&xL+ap00*T^P64m
zvmqlSlD=V1P&DB0qh;($?6GcwQLq#9{)rbZ-qttu)a};lj~oF7!VbUy>;Otm)w!gu
z8LlsDOC?=G`0k3JWuwFN3c&H4tL<*`il1j|mUQT6XR09Z8u6gV67u>)O#OY#F|&2g
zg05E*1xt7W9!_iOx%7D=J4=!?%SMnV(y#EO6PebFIV?kT6`5figr|tE4RXe27!%#X
zWHC7jQ)_dkmuLzpTYyJo%F+Kl2X7PM;}`3D-%JXxO$J{fCdzoE_D6nu$rbx^Q@ji$
z)uh46ga(CzYmYuY!{6swXt7`$lT*0ukJ5$~(!VkYJBd>!7fS;#yg9+2`@331Hg^l~
z&%_+1=P&(3>pFIQY5&U$;5W8tw=cNbSn90O-|5_Me+IwxxCRnDI-v}YbAP^}Uyu2Z
z3&Cf)f`l`iX8w=g{rgAJaB0+~Mbc4wZ2}h;fU4DP#LT;^9vw9^xG1D}Cd>~<abR<0
z!@m86%>F~6`fJh^5@!r7Ec|dWogEi>-0f&u(OJ6*cS)KAjoC?x>*7Se_iLm5X3+dA
zse<hnxKg+0Ep<yiGyY;Q{Qh|}$N<BX(&Cz^`70*H$hQ@N+hQftNBijLm4L46+groa
z>J0jMi!aUm(APm{#3i0Xj@sGU^$s!)3%=@>3k&lcd26KqV<6=5q$E40-g~&~-|B`P
zeI|%Ywu6o|`i@^vwMoR*IiwVCR>sIfQ7U|bD84GXyL*!i`f(+^F;YdQ(2a^kgNaW=
zCFs={#;xb}eX6Kar$>rXPSPa*8WU^qCCV=Z%nJJNbl)rqIP@)XpVxBtXbcN`S{9U1
z*sPXhan{WB1+~}FuFW^lo#NA*o4U?~RxExGFuWx-*`)!{X^8t2o7C;*c<u+Ik3RHA
z9*wcH(Cg#Qi>+2KcD$-`M}Gj#N7L(<=w^#2{-*ByHbpYF0f4?NF-SMuI~HTT_!`J(
z?lt?)6^iZ0kDR_xy>zEf+l}<rPMo&taqwx1k8{RaDVOL(dN0(i%J7$O<N@Z9=K9OS
zeKaEGj@Qb<zziTHX3qO{ByWwz>_0uM7&kl89=pkJ^LATeCqte1zfBFW2Ih3Z{Pnx)
zZ7U>XwX^EhY~_mGo@5Y~wdoX)m`#J8jgQN1-fZ^{vv-FF1M*>Po`$eO(0Qta;XxZy
z7pD<BFNxi35d&;dp++lR@K6PkKe&(UU(BRm>n!*SMT<U{-p)_P2mC}WcQ!S~{7c$&
zrTMX8h1z<hS*#ko$4O_-PZUT0opb(s0RR$?8Hk(9^ly0wG8N0|zL}nn{`yjFoF)NL
zO^=&*;ApK_6gziGXW@PP@u-&ROBJW4vb^FI4#O658m1elx|M{ZiJdT1QF3f`RQJLA
zt8JDNk00sp&RV1UlP{b6T@d|;0S*p0KCmR0#k^j`nvnFFC-_%hQo1qpUjB8*`)lp}
zy0a;kO};QYwZ}8m{FPIRgQSIkROGG&#>nW$Azz)<n6{grw35bmTg#@mX`+o&784yY
zu$R6M_jA>3noI3Bi<6?)XJI|BsW4~^F|`4I>aDPilQdQ`*6%CNu5`+C5|Wd>X%7y_
zOEVIleWI0?21O9h#JY9rS10gqtuS;1eK{vdrdkpD3_|Od=}{)o%dj7}1CZQ)Zso_B
zrBy~Tw>DgkSe45oTfQdGzj~NWabhY2-vHnC9yhZP<onavG=Ae;)li->_W|9RQmwR8
zw~@!k3&<nRa>uzZr7;6bp^KQ8yh+;hK_jaEOCP@rlE+@&abXGcbFR*Jp2=c?&Kmv5
z0c|m|7TE2qMZ|KZxE{|{x|S6zR6>iAB5Acm!B+nt1C9TYokDu8O|B*N6ZQC>o?|N8
z$RpV$LBpD~jW6dvVkUVni2Wd3M|iu+DI4yL&G*&myo(^4OYc$p3K)%OL^~f6=;;BA
zDu=do;Od7^Fq~@7RR&IeN8tRphGkq$9a&n{%(AEvUY&}yM8Z)Uz2Bf^ytP~F>9EP#
z3zn3;8(Ob~Vn2a4-j;>OsPThbYD&M&D5|q+Z&#a@5>r3gR6Bl}=l$ns_<NdqoB<<f
zdb^`;xOGx7{Fa|(@{%_0G%bnPgnv!Q9Y1@d-;qT#SSpFn>?jFbMuUPzCju<ou8TkJ
zuFWfrPJU`a-0<Qii+ORtUc&&net~<hA%7}=!Uo}qo$>+k^Rfq9oQgyn<iY8}cFWb9
z@%oJQvSqHA)sn8CEcs$6>MG)E37-zX*6eScd0525(FhlGgR51Bn0Og}5D#4=G~i!z
ztUO7)W+wpOK>peb-o$%teIJo^gJdtLY`Y@kX!qx`qVGmK5^lu{HMoruUSM~Mq$D{<
zpx0Cbgiu*}@B%9aBbvhT#4D4Tw>G+t#p6_sE}R-vy>Kn&?;6B^yoxi7U=2$@>w&+k
zeWdDRw5Yp521C;dy``Xg0XNfU5O+3V=d!;@{tb7q9v#C52x<3($pg-kMowu@?Y`!V
z%)WU1GU(Rnlr=ZLLEPcu40Ld}<|QdiDz3<DGjILfxk1$)MWWr`Ppm%(nl?@o5E>~<
zC-`MWaZGM-?$xk7Tq>O3oZ3UPN!gD$)vE9Q^j^GbkM*pJIrhqDXuf!@J5Fd!!lPxQ
zR64&l0POWo`?4O!vc@;JD08OM8&!0^1@x*`WZVQo%OAu_HURGa{4xGZoeOGkBuq$h
zGj7Px?d7fa0^1`u60c?*VUF)e#In`2zPP&*C#X&Imgv-pMPSDmJZLAD{Ox9eGao_m
z2yKAs)I+d&KLbX&-ZNdu;m~9MpH;71Flky6r+s}F5}<k^!Vn0Ct}j=xaL=vo^dNG@
z^Yz%*C~>X%*tqEC%@vn1ZROp`)?nMTUCV5ba?csn$;hTxF`J7e?z73aqfL&lQtQ#I
z>pf35aSIi3T{hxp@7~Gmee5iG?C<_P7uL~wp6aYyDKF43sxA`s-uZpTOJ{iF>HXe!
z`yfxAK&$fFvsS9xpI6uw@Yb&3+L=PLP`hZQ6eTZ$mnz7S?YjvXvJl#g&H29OTDsLf
ztoP0;<e3y#L{^~(9m+;+*C?oY9LnN8pM8}U3a?u(FYX<P&k9loW_{TQ-d_>JF2a5g
z$k98(K~+r!0+Ew%c$4_Sno;^?kz~0~Wh1eI(|!t(0*YHJ$l+u&ONME7LdCcu@_oVi
ztH;$}uD9P>dQQ=?#i6ZgBvq!oD4cV&#aGq?`vK=M4qv?caL)WdzG_!8GG?I7eHG{k
zM|c25OU&leJ9(j@3*5J<^?Xf_rCYrhS|AP^RxRhGuBq9%sgg_wv%Dy{*qq2+q+we7
zO`%=V?6}K5FNq=&{Sl)AIr>2>tcR$;WycPxa*6Z`og7SId@OLW&Gef>UFfv&&j=vO
zZiqW7*`V21R{z-p3L@%7f_cwgUJ}$!53Cl95Y8VsKIn;JME)QZJ=}0pISqoVw=wSq
zNQ*;E*S&+Ns6lVNCQs-M6wf$nYO+lD>v<_4P|f6ifhzQfzup{wfKJ<=cs-+U*%Y_l
zy1kk?S_Avp_HKAk)g@BI>E5q1`=3c_bng%S)9{S3GfqdD>I~LZw$F+f&#{f%UC8OR
zQ9|@?X4k9;1?&k4MObI%(=81@r1EPo5RTf1&_b&|?{(AN3ao{hhy;~vK#p3ja&fLv
zKoYXcJIWc3KE(V?vs=pfphhELqZoCaBPuB};<>Dtw_aH4PK1k2=V<IHmh%h3D#b&O
z9(U3EVM{G*ijs;BU}xWi{g=&!>3&yB!Gj4#buqFxjLXu>_5&0j+~Q(9+PRQ#dvegP
z9q;b?jhEzkM!{5&0X1P5irJ2Q<0kZKP$IA3GYEnhaBR+4l9Y$W@L9Sv?<WeS6+dk8
znH|#Rf6(x%t;(->*^+_(G-R-_m`wLtiJj)|`=FN=+{rJ^T;*9bMndD@Qe0J;*C<<V
zSvN<9TwN*XG+H}bAf)JfZ^&Vo2bXNm8Z;dAU({ym%-wJEAF#z_Xw=C_LsB*`8r0AW
zKmM@m)LKzk=pixv<E3`Hc>PB5!OCf5Iq6{on-*rE^~_eB8QfdA#ZCAVjo;6zkERt6
zOb^SMiZVG#tzuQW1$OM{m#-?$Obcf?Sk5IB*&f~qsxH5HYy{t_0Z|e!%JcXz-u*b6
z9R(@#MNgr}XH`$esNMM>NcLK2p)56b5nm6dUSa>y#9ZnXkY^>oo9;fV=gX2a?8jI8
zfn3A)j>Ci;e?IdwhO_CeA?b0VfgehP`^C<Y3Qez&=2ZLfI|dx>D!I#o>OR++Z{#Tf
zAtt_>qL`p<S{FJ0Mm^p*^|MWtJdf7r{Eh6_3wb&Vk0G=|(1c6*Umq(D)s&RB4%Xz{
zUjG`xmiEfPnv_Otu&WokriY~V6wo-TI~y5tazEf<y?-SOL1~$>J9gv7i;+c^>G>y?
z5v{1%H_z)$g^#~Vy}jLR_E3NAW6s4Gnv1Z&J#F_C{hi9-({*tkJ<BkmB(SgMaK+u(
ze!jU#vHwNQ&{KDdhvgW-CYpa*L$Zo|ky@~=Rx9jcLAIPmQ@qf$+t<QEeb|xnmyhn~
zm-RUjMKK|t(-uiEB*OM|ZNj<P{z3U3*L{k*ks2b6;Q=^h_k(j%c4GmvbnCs|C}^bU
zPj+8Kb-~SctmS~M=oM@9dkUYmF2d(u{^RWY>qnr1cw@5IqmU}oZ$aJ-ozt7=ByK($
zMzK(CJZc?(A9sIR6e89`3KJJ`HI$m99Y3z8o@BlRyZtp(&<l$yrPgM9@#h(#PID&T
z|MMjd4x!Nxc{=(nT#hua2TrrRHpgrRFED6n#R<`{!VN9G(t`qXc4lINu;_S$jj1@X
zpRZUqEQ;8ntS(1&uioVTmyyROan^G+H4hDIE?t)py-K(!<zX@}xXPWmQlJ|>AuD+}
zuT&fTid%S`=W1L`F-K;0=}%$*$Jzy3ToV2}AE<vAQ0<fS*5_R89pmLdofF|@a-^H2
z_iahji&T^t+xqh9N}#9t{%)7k$2M2#C}bGiq`>b^d3NQoY^&nxiVCc(@l3?@zPW!-
zOSAoD87_?K*um>h6x1g_DH`w}%Z8rkODWxmC@VA$F4FGfbR5M6vG1;zBHs>gJPg{)
z+baEG+0_!AYdhN+(oU8Cy_3h7F`+e6e{Y{K?h_LPIlIqG+pu4EJC)YVGL{$BvK6!V
zj3PzOaM0!=g^J|UsHJZYg(Uah?*_Dz_<s8EaHe|@Zu6+sy5T))mzJM9lQz7_jhSb>
zK#*xS`LmQgsuifsehZH6^g)6)>^X%s_^H6X?dYv>v{kWeFWc<Ia$aE^wfN+6Ce;tx
zf_f&BI^6eXO=WPpB_{{npV>l8>;8yHuY-3+WXDy`rdM73;6Wsd55U`PBdx<hjMZ|p
zioDtEnQqzge^9+Dl>dke6JFiVeWdn9zU<_rlOdZkCgLD_S4bq_P?e_^xkpNu96FHO
zyEyIqMy8M*8B^m{hCAq9u24*7c=Qws-7`-N(MYFnb%%sPUro;N1#rahk&E~WkVm|}
z4ulXJcM+s<F5l#-8m(7aHuMnGJKj@q@3en^Zufbx0c{ajCGRi=t0wrF?_(Lb99EYe
z&6l|eU#OcP5yo!L9?yo*7MR>#y0Bw+xLWHoQxpR|v%fvo3*8SiKzZbgw3dW<t{*(a
z@CwuT)h|ycGJz+rNn|NB&a9N4hnw+_cB1-bC=P@FEvxz?cnm*{w+;HVLZ!13{FtbC
z?qEN0s@!#(dPTk9L?q}q)4{Flr86|Zl6byZYE@b{y*%D*@pzOaQ6~ojG4T9+CO7EQ
z<`d(YX<!iN5*Jsp`K6pKJ=}`=KK2_(mF1-?o`bJ_`={5@=$)=Pw%_A{DUA%jQmWmO
ztR@=5$fY4|Pr=aXG5pRQul+?iXxsbqhCry8kc-f>T7qXyQQKP8{Jy0d*keBjkAVC3
z+9d)HAB1|w+?vZ`fWw?tWJ%?57hqz-<STJXmoeeryv`r1Cn<uE__y&6lf3~We(1Uv
z%zTFF*5r__8iPhIvtC$V&Vy7bDWhVI^o-+fyWG8xYEpZea%;XO_U?H_dZnAjPEpr!
zU3)z-i%)95oS_w-(TczF<(Y){8Ch+k@fk)XJ{_$F!9KMEzlX2v34VWObvVE)d1+V2
z77$*qU6|v9?KnA3!7h(gA0NG%@cdR}h_(w6b!Ywn-S^_}D0J!0RyKp!%Dt|%sSXXY
z?O^mh5l722cE9y1Qj(CSBx1fn2I6QTTlJ?gR`x8xYK8jxg5xfqo&Y>FS@w@3^bZL9
zw-LC}O@}}7n(ulC@Qw4<DU45QUK6(tOOz?KBlO}mro5a#TTsa8%7|w9<9G&_<MsPm
z1-d2x9s2ptkKQIwyu^U8=n0y;@4ou|-ulvzE$r*dOS-0PnVW=rwTE7Eua1B}<Yd=~
z7tF_U7uO=lxJ&UQ=twpH@`)jO`G=%gjcrj)b>0f$l|o}kSPe#n*5@K;^iY*msEGZK
zF9@6dOQ}*H9O8GUBk62bN4;Uf5Q6lb@#7lhVtZq;4LF_E^B}Qs@;iTvlK$BWr8Pm+
z$<wBELs6Uf0*z#C!I5cm9OA1@hxELWeBL$U0CkbZw^U~DiAsW&jr#I7@>Y)})-OOX
zBuO(VpHLzu`BzVT*k^wr&Q@Ed)YK<__4DmupyVl-cx2J~a&G4wLlJ~6^BUpNaKLs_
zT^jo{NEnX4+*weTO??AFhKBudu+9nzIPpT%@bcH(o}ak-K)cTbmK%z8v;-^Ecf0*d
zOK;O;3KD)Fr}<e5iUI%LHB;{a6u<MD;>r2(>j76@;rUe;&%XQjfBf}l@LO<?MyPE#
zQ{C@K)ISd+kpou*pFOSl&w&2Ei*()qy9j?%wAufB%HJ7rF#CTz`CmUu+u{X~)RRhl
z|MMvX0NF6g_$+<pw^8`#J{kP@>>}JXo9drm{TAX&2jgc#_k$ZGzi&mqK6y7a;hv|0
z@Z1@2LGTva2bXb4_np;;{D6DF|KQV;t(E(q{ca3eAb=EE`;q<S6BsWx0t~np?d>Tb
zKqaTR3mi0e+w|)#wtkL6LoU+dweOUDNON}=KdqSnpcUk+;K&*CM(vNvLF(`dEZ{7n
zySD`j56QqlL}wypO5=NJpE^HqXY{I?z;_x1qEX>vh4&|yPXc~Mdd<z>B@|8n1URW3
zqu%3Ac(o;NZ9w1L$v3aUamYfi$lxJfNH>Tug#vHFG&!egh(5<DSqKJ*+unkzM;qSH
z@w$i`vu2%D7~AR#KL1QNO-rxU@1G$l6eq@AS`TalCFI%Sgg5RR5F9P;fx@Rtbx|P9
zjrKmDI4JQ)bK`pmGb37X)9q*=b#uWhrTFoVp`a2|AkLi4^k{QHUy;NggmAAKLVvx4
z|3i?A!K_GrL2r#ugQldvBaioxTxBV~3;Fjjgg^+4oa$-q&Xi?xWt=B&S$<yh+Ud=g
zeIOYZYCADNH{Rf>hN?T&6(6`a6vD4PWL158|0tFZtxlXeQlU<rrM~dezp5TI#g6CY
zF)DrU_A~AJ-AA7ylFg6zFwu7}ThbWToTuUq&+t3iodpj(F~x!v@Kvuklf*>h5*xLo
zCqlVB>4kS4yamARy%=EXaX~;y8SQKeYLn5G5gn;wm=>C?oyjP^<A7PQ@sEjj>6yA)
zume$w&iD?fBkX?{aQ|6`U_OQr3wZmjgfzZ;&_lcv$mRFd+s5Vg{8arWr7Tui{nO?w
zXX65JEq(SvG4i@$`P>CF=8viw(Pf2d9l%IQbL@gzyq&gylgVHOA4=s8{P%@+gZ5Ot
zq0OYCJ^npkq`-Gh&t3xMHE<Ht$$ocF-#5w51X6?B|IN?9?m4JqM%-U)yi;6@t@SC{
z9bE~u#Zws)jp-$POOt@B!UWp$61}(`UIbhkt(qd3I-Fy)tJ~$;OlNBru&G<|YN*;1
zx+^#bj1wwi5L+V;76%Y3KRZ{j9X5Ci+6vU6lgPf`_`cctXVSo1B}LA|TO(~`7STB_
z<wx)6oPq*oaIH0ghra8e9G7(O+1aG+x|IMMz$*VZ;esvAzjIFTm{lX~I`!9YPsd(}
z&y*PF%tVfu0Jo#el%jgOpBoz+l|wY5R9V0qap?UDNx?qe^p-8vBPKfDiv9;sgTK<j
z&swFCC?(dz7c)pShyjwv0sP~_dD!zf-3x0!UF|qjX=#$0TI`hv!ovAI{AVr!ni(aU
z%O}^45rHP!0NXp|LUYRSOTY|&`fa@0e5eV(Zh_p80%i+{cs9UCCVo+?d_emHUb6Kz
z2()m<)A!<`KU)F2cGPb7;02Z0Ev;UT9XC7Kn;gg~jh=vk>XhAYQmt@I;AC85U@Ck8
z#^oN}86atacr@yXNy1ag<O6`gSw!J;e0v|_2sjB;cn80;7(}JN#rS(I)-57xR7Vta
ze65f3qz>(uMqC%yx*F92^`N^v_4B!~a1=+Zxkk(mD4#M80e(1bXwBsy@oCzv;&xv5
z5<*PUosq|r5i`N(=wD5hMFY>Rg4~!MmkJ6D;^%E#Dp$||iN%YFfljg$I6*f%s9Op^
z5TVmnXM|lR8o>4xdP9B=XkwhKd=eBN@n|0I0kZuH+dz2x9X?*i@%qv5YquHo<_p}k
zM$$lwcpJb4bsflttibDY$P1|WRifFYGF`!47+DJA;3UETbT8`F#pp)OnALTM@A`v2
z@eQ1&EKz$vvHy6zB51#2v*EP~ChEM_4o0iG;7K5N_d)y_mxS*}9~iA&X;AMrt>R3)
zIws=~ZCTKXBms{FtY=UL`FTSkir?4M9|VTrv_Z=tiLG<8tAUWU&>=xffik4tO`1B#
z#1*bnNHQCTcmB{39MJLz*}DwUex{WnC^j`DY_4bM2T@L(?%F=HP!Y}iv+J_q&U_cC
zR=q=>x4B`S$lN(J{<zp{jepEPQ*z~tR@?r)_2!i*?%de3n_nyD^a6?t;yv?)9^k%;
z%%)7Tu|)1msiyeMB!#xKqUWNj+FmqiibEr^hRy6!Lwa>`QwI?)61CRtloiAOrIx4#
z3Kz*VhU+9smnXSQbGjcNZerSNk`~K;=A{Q>ffZC1<R0^xZ$Jq<ApQd)*q#$?#y?i&
zp4U@2==>Pyz}x4mosWxPr-aStOpq0Y>YHlKZx(~SmSKBb$8Y7&yS3jyRqx?9&G^S#
z@lexl1E^{`Kznc_>+z1h5fWFDZ>Q%x*}D2=@16`)W?k`6bG-Mgq3|B9vGP#C9woQI
z{lJ`K;LBU2wc{`vs={;+xmxoBfSK-DRq4=_vU+CZX6up=xp1U2{c&yV;7>!_)T{Gp
zo3z)JnNQXSeo=2beF23+F8Pgha-4E~ly+TYEwD*q8V`4_oJod`*lBkz=gRn6k6vU;
zc_brJu(;J#NAJyh=FOtE&TjS}>-$^4XMqC|HqyKP)7bfGJZGD>PoaLJsT2y!t^#Sg
z{}`CMwb!fi(4y;rkXo@AqD<>MT~EC}gw$?>9Nzd*{|;HNthr>2Qh2__2+q7?8s%u*
zc*PT)vxje2freQgX9hIV(T{suq5?Ovlm^$kq@>8Z_}|^e+#S9F-2iQ%2Nl{CBP?ku
z=u^wR0v1hbhRg}c|H~#s0uFHPPYzysL0LDPF6nTMfbBTCT`Ofr&m;Q6nb%6L6tj`v
z@dQsfqdF2gt)KFDt_oPhdun|X+)M2Dt)wU5_`V0^8QLqc=?7N2njDsVz>TxOgF_^s
zac2e%E5r#}O70JK7t35c!x3f<s7?)S`sRA$djn3gQlgcE`95g1{mB=@di1ldmh?2u
zrZrwH4#l*CKY<9H;^Pg96H(fYNN|YXy;GO+lS_3|bQ_%gP{_^c7_$<uXDh<Jb$Z$E
z9NiOfh#2j*d*f8rN>%#Vk^?dFZPnaCWee)QY8|pb3cT&}I-q5uuY4Mn;MBML5sdoy
zk`g7nZ1C0KOP5sWw0l&&;78<h&vmT3`E4lA)8)-~*P#(rR<(Nzh1##N`A~0Q=X;q_
zIbckI<nr$s^(mseAh!~|73SgFcvYX7ym^~=5mle^K8Df?_&G?A>IF>ejS^VBSI(ec
z2ZrhTZ8fv;a<fyi40~@V3WJiH{kMe0dRdJ(Wq79RdG?!VUB30<G{f|rNXmhG!T0nl
zZbu_=EB%ZizLsOJSUl@Q8$Zu#%L&!1*5gJt-Ir!rPL`FGWHR<rCz*3vV#Q7!Uo{`c
z@+0mAWo1?RFTV|(2xTkJ`bKwiJx#*TVJj=`3xaxGB$T`?xBE7g?B<@7UBeYPvx@TU
zz68Q#=}WA-L5*#Ss{&Qzb<>dFa{zI?{5aMoq#7^aQJM!D4Po-A*E`7ZD6l{CLWgst
za4k%ilVnMXl|l#<OJn^hcSj?lNk-6NcB4Y+u}Al{PWAs_Oa0)L4z{UF3@p_rFrvSK
zEQ&!X!1hs6-_1?ow^e86xw6JXdPs+nwZ^hI0Wz}<@Ls*@I@}oKB>&x#4fv1WVc7rH
zd@!Z6hPN#M76>_<eQh1f;^{eZhig$mj{fPzH`9`1u~)cWYf0$1)Z~>-VmD3CoA}P>
zEK6Sa#i#kd<cJ3kr!}5_0Fwd|_htLtO3UU*C2`&f7kZCYmc#~Yr<x5AOUqdW0s#c$
znsZp`@TEoc+phWviBM#Z!b}U$8ix-*QTQZcjdfk#gB9WXYtLWKI~h)H5Pmb^k|@cI
zTaAaPB=V~Dcj8amU58~HB79S59Os=Cr8CwZ4Xm=odM7yYQpiL1qu*_*QAU##XFE=<
zfXGsKvuVJ#X90giOxx{yfer0j-#5#cUGYcXKPmYf3+}cwUT2DBRSWCab2(DDLnYoD
z3B{zBptY8${O8C{q&9BMfbku9t3*c-xLYc%VF34KX{>G-1Wt0jqp!G2K~k~EN{ym7
zPs-4LKk0RS9Jn1$=uZUX$YJ&2W$p)p7!!)Z`~8c%fvZ=o<d<Q56>+)pqnl%ig2+lK
z(seLKI74Izv~zF1oYY84*+g9F<MP?6itZ7JSl*~;Ov=9AXP`88h_T@5>dS{>^!>Y1
zL`tTMF8`L}{XvL@fnf9uH40b;C4-zvF&mtL?u@Qd6E2@T`_zP~2PQ*eH6({35(09;
z=5a13$ER#i-|le9R%*Slh*sb4rlHOwY^i-TZUM#+HX>|3==@q3D!C73Ru}m*H3JT?
ztY60RG#CJPKwloWw=Bmi8qFN|AayCaPcutFegf~5%xpHL#D*9(KU~niJXJ3v=@w0N
zayL5_{62yA-l<}Ns89N+y#b}^;yKu@hfmkwQdjk>i-<;nNsVx24-PajJM4(#&}N+1
zQW_GCqL*;cLzpBPC|h&k%=Z(n!sARyie+?C>*Wi03bS+_lqjbq+@_Rvi?4hHCqX%^
z%wIC-=%AqV-4-0u&8ueD<bQ<MgJkXkmC9Stu|aPpd8P+sXb~=IG#po6`2%Z~$n3V5
z1E9_$I=XekdJ*({Yr|*&OMrDC>yT?>jZhMC!UVV3NkAniB@B|poR*IONhmY}uh?W(
zKUvLxgHKL-$)OfUfDT8)an1Djl~%pcq6UZfL23p3F5b3gsjvt1DX<;j!j!8UOD=1Y
zuu1K-@p3dOMA0wXay5?zHdIM|IJ4>CKC(C^zXXe~8IA(03M2;nV4ayYG=`X*27EqN
z3THGQn*VSpV;2gyS&pKDtbYY(qYL2QTS$p$`h(UE)?6Mi!^L+AewSg$_-1vJ8m-(K
zDXUhh67I~C<e^}0fXn<D!BO#<Im#dh3OV$D{17?>1aumXM-xxEl@9guWqoqKsV&%Q
zK|+UJP!c+7EbxTJ!Yj0cZV0U#j~J_{xDCiEapubMOU6YXOdn@buNxw#*pkHB)`7JN
zb1uS%QaCT?(M1kZ-Cf`|6H|FnYI0U`yGp4S_$o53F)E0HZDPC5SL*ClSCcoyzavp$
ziU6EhYii&&sq8K?N7XwH>8pry*i|@>()bIG^Ho^2l9zbQYe`j+C&n)~MbH*<t3aJ@
zF;72-rW)rjeG6TAHc-dQl88NDMRs4318vpFRkiry-6lM(w$5{OJlqlZmI%VU;*-9<
zBt<6aJJXNcho((Qq~~m4MqJ_AriLEQvn4JPMk?y@eJOZ0XTbr_Vlr(5WKkn*v=wjx
z5>;t_;_a20tXC>0Bb7!XnDd2|=_>#`-LBnp0$T^1lAN~Nnz;~_jQ>5alukExsViI3
zS>0Pix@3!6C#y<JGFZ=711C~96|L*aNmE<~y{~RE6oKXuoK(?I(faE1PyMYSpW`<?
zjR*2G^u5lJ(wj_gJpkF%W%ClPWJ49bil~M3gXTf)H}SOR51{hn1Qu5~InV~Vt%a_A
z=E*)2k!3fgZ>LXFeM3am&XmwequCByeT|D9M)NNQQHa6DC0MOF3Z1qEw}BTzf02}O
zoifhA5a?Qki%fSryvEtTf#A3C>wkeDlAoAZa>}6xwe)3!Z7AMosf6{saVza9pYFQT
zX<Gp254C8W8Te=x;B0m*-X%#1cL_8%zO2EYV2{hyti%S+iVT)_GA}Zzp&aW(S(7xJ
z1GmNPVSAUq1e6_8p?2iexVD%#Sz9fwovf~p7Ax-zACyHZ8|2L=Vj*tkF%x6D)lG&!
zFfpmnlcx<C$h)4ue{&YV4H=0<{<zrm@KTRqrwaKAxnz|GphrjaS!duEq?B@U7t7ED
zK_o4Uq6j|66(M_*J^(gdeFr08JU(Qt$PyfzOa4fhzrX3BPrYfk&gc9SC0G&-Fo%D<
zBsvAW5Y^cp0XDE=_0G!i(F!2s%#Z<>eA(=Y75`Z=fWn+iqbczaRCg9P9<RGj)Y66n
zk$++T^Yk8)u-vqRY4aoN>_8_Eh5tLie|`5Ud0@MRV^o$9B(@WNEUcO;IuPM@DHy@F
z+AT|(bi5{Y^l}Ilgi5;WYVq)~2M$}eI|D->9|5?zV-GL+dH9y;#8`No4VDBAQx-Kn
zr@?ex38(LQEkl8?rnDBn_RSP~`G{=p0YTB|C3=%;{7C=n<f<aC1O$xs&4+8rTzZ^^
ziWx5EwZ{FH4W2|-qW^$ayvlm2`s<_r`j7Xm2<$$GiHo+JxY@5OOIZNj$w|bK=;H5>
z`*p<?y#F9$)TQE+bcnwbS|?@jdW<j8%KQ(O=^q60_?W0A>zU)PoBsNMzkobxV_*=5
zP52tp{XegW2Js^gZte!{iBt0LQR+?rfpz)xhnoWb>k2R&>}Ie3e}_XF#d!YZpWg%<
z|EKF92-DFZA3;AkbsAen+j21&aU-LwK(G)%z<pN(a%2DXm891JMF;EOlyB6XhypfP
zgLt6N1aG?xEWGmDVyOwp>?DHmQ19AgGYsz~5Bkcy%~FgiH)F>Wqb-P2@JP}oxPRA!
zzqtDU3}&!69#42?5$WkTRNPnuY(9&O%G!#6htvoNw?zikb(iHHfDU3a`GA-Rv?BZf
z2DE4Ep=ihE2c&?8EiXO;J%SprK;(1<ra{3Imv`_gip!An&tQuzY7qG?mjPJJ6=XyV
zR7>PI-l{HFbxx9?J&{j%Ha(~w?>m9Ucc=k<WkfH@<ZZ11F!^=(0O(diA4&o46Q=kb
zbY6-i+Kh1VXF9R5XI#eb7?}#xL;^{ljt=*kcrMV13XrcnR|0qX6XX5rzL1|c!B;Hv
zjDiUG<$?Goyr|Eqo0PSZ=%c{he;0Tn@N<h@r0rftDB-2+cggY(F{mR?W@n~i#P!=2
z&y#LFl-XSYnc|k#k0|ARgPq8`1fZ(}YzA*|n$3-eddCoK%i_&3w_H4rjYb2>6_yk7
zG;{G_ug4|;t)or=(@H8}MHpF3y_Du7>z;8aP7nt^(CtCvyPe)o;RwT$+>|3=M@V~E
z1wlOsCSYM*whWd-aJBX3s4)59rGQGcbWHdMt~&qc1NTm~U#uAQg3(U@wnIug5FBbH
zW`K;V+k*hG(FKaJ1yueFp8(p9)X%<!iQW2mi-3)+0gwOg0{oXEj?*i_E6q~5rMx-_
z7!wj_dA0YkYdbv^21d-dnS_NBp{o9-5a5hT>sf<7g)de7gI$4I*{ah!cBbI#WKX1x
z{{(^MjzCocoCEkNL$Nb5D@k=32Mj2}PY#PLC`SpDlBDSCi2$Q(AyC#>=~xLm-Y?Du
z6I=*n8h7l4ZA1b4!3UuJ?VI_C?fUIHn~8={Yx8%-L<Lkp7^@$|g<Kh~=&A)XY#Os?
zjb<-YGZeZ6nwjVoJv&EAb6b;McpUFIAGkj}o{WciIka=*7Tvg_QF@2=eN^LaTAOs<
zBd*h~Ztaev)n(jWAVD%#*4y>HxBT85q=THc@u~o}sur2gj#Z7h?V`XiY1L_oXl~^~
z{E6jW2SJi8=oV0lyVlTZ*_Zx!g4oYUvNKtbiyTSST4J6lLg6s(Ml{_e+Uz)bREr6b
z2L{jFT>-7x`|anXc0Lz~r0I>7?v`xv^_wM^)?%q<fiPsW1+@yo7~T5xa5}N_p8>7$
zg3d(?qge-{0tjIC%fkjt5Q~<)jV6&*%V$Sx97gqN>KLX!dw_Tq@qPxF5tbVkTPZU~
z{C403XA>?!tzrh{>J!M`#E$~84t0swGB$Y`@1Sf0+`<Ld=h{CO1%R^8W!MVn^K|Fv
zEVGdNAg(-(ksbp#wguNvb0uHNI|jBkHrY#FUqRuh4WbJx2OjUmPvszLQY}(*z#>vl
zeXG3+14P))GhOK`RD8{{cj|#Bu&&$C7YD^aCFpJlTyUnKIZ-f5(r9<Y(#(A{QJ;kC
z1+7bMkNE1R)6TK+>v#|tfwLYewvIm#jmB{)+v%YOk>)PhcO%bh72_rMZbRsF&&U_o
zRC-v`lMrGdkjWeh_{}=(@^Q<9^&QV@99I12$D~8|Vf!g@F7r|7O3jL7R+%AJzhyzO
zBqn<iQ_j^%W$K~1+sqErTY1k=D1*>pR>|7VDpG}aBnRQ(Jn2Iu)odA$%RGMDLF|dY
zJKW&;NHCrFB9&Wn5%|ItU-87JPSItC>@8YzAnPg0_s|vLP%F~SHr5!#Ih)Fhp+Kn!
z3ZP?T98Akie6wF$uyGuODnl^>aUAMDvTG3*9>+(AnG+xFi|Nmw;oxL0G4^IpZ}JDG
zqq=&~%vwl$R#f1qPp2o!dDA?h9bnYcn6mS{m7n-%LTeW9hvhH_-h>SyUp)$b1r1m>
z?}SDU`Bu1$o2AL(FYTid1txFdMQ!Uubwj99oXw9y1`-b>BF`gUH6efHY*s?8{oqPl
zJXd?0UtxBUF49kJ@hhp}ilX7$z2dq0xw8TFm;YJxf31H3yuA!lKgoRs5*S!v#Srp<
z%v8{IsG+UbCgy2t*`tzq(Kd%c(me}46&1t%f%As#m;!UiX^x}fgB(rq%Hk2XcAk6I
zr9uzZP$`8Y<Hv{NHI0RxT~fyf1p=@P%!}CPz@V48Rj-voOIrEigfY6U4chZge)_il
z@xh}`k=Y&P&yVQ?Jh(dj5H+5Q;-#!IVK+HGHt<~WpWXsmGwL=<WvCa5F(M~?>UQ%8
zt1OfyF89u4bCmaMp=t4zqnxXERQB=RwRGJr@;8f6-6Qex4!v(Y4k4$O_HXMA`_{Zm
zcq+lHNWY!Mts0QD1Un}b06aCfemAxb)&YS=?ibzg4lNw86%1HQm{3Q$0Nr@q%zR$D
z5U@XcuZ2+FT`;z8Bhf?dU~imBsn==Gc~bvmK~SYG(7`;1Uedo3Z6s;K!;sHS7Jd9#
zkB|eJBlqZ8T(<?&^vi3DBW|wiEF!?3tT}@FlsQ0}iodM)r5}03SXd`scF?z+Fs_pT
zKJ(KarWHZ;`=<?Ec1b8WzD=jQetvoeKU&##9?xdf=QbY*M+J@9F*r&1ba@)PjJN_*
zdF#N1yv@YLnRv0M_q(}zo$s#Hu8-Ia__%BrLY{~cyQ#!jM-CL2xy!igFDf>_iRnZs
z4iq4xq39)Zavz+7`8M0AY=hg{MB}s++H~~XNJd%VnkuaWd>o3XV%18u(i~oZ{p6!D
zK5nKPwT(OaW*Md1HsdMP_Bx+XY_X_>6E`oX_X)`F{uSCr;A{ItWt52^O7LSOZ)sC9
zuRYWs0K(_`@O{d{BPyF?!5)T#@8>>8XiWKOm?s1KQO0FkFnmSI(F$=D^;&&<yB@+X
zda5dwl-Z^$klo>nk#TTrgFK98UeKFEywCBejrDy);gm;Th^OBphNXas|M8~zv0w~y
zW1`ECoC{jtAnL++Us31M8J|JP0$lB^FHdmJC%l;&%WLiz>A+tDV#1cZn5q9GXd{b3
z9yd@-qti6Dq%Zg3N;J1SH-Z{Y5wy+-r#`0N{DtkqdQbvi1hGe+7*kukH7$-5swEU6
z)D>$?eSG8zY(NAi;xU0E5)i5KDukfNN-TF^_iPlcmFCJz@fF30JDb)xIHHUg^jiCO
z2+0KIpO6f^(fWW7DRnrn0fl&g284156`v{NxhiUZD95%@%zDxEp->UQ{jgevWB)=8
z)I}-Y#;2&(Z>WQI)Xos6E605HK>|UU*_+-5{kMZ$v9q2*NBvrqD(V<tZM_dim!@wH
z;TE#^pkwZ%ZM->9rQjR}KL4EWpx=q($8TA>Pnxag;DL>0<6#~grn1g_%<Y)^t+H22
z%Cm;q_Y>tB!0yjiv?#lSkMhVaA7|fO-a7>SN*RpHY67i%7S9Fo2uub}RU`>X0^_h!
z`QLHW47`g1OpVa|A*V`~Z$eBlLCm<maHS;2w9%)FdyL%2vC6>BM;RMUaI4)sMrcI5
zc**mbe;Vr$cPkHtsJ?_&aw+Fd0;;~3i8&ET^ij_mzn>NYNNDk9lJy#`RyDS^o}fKf
z*mnsYD9CBf-xlhc(yO~1iduo;dsCvxa-a(vvm5+f;<4{nB8tZG%^e3NKry&Q@bJC0
ztjkE0qp^KtmO@0BRn+?O^;WsxKrQJf(kN*fWhc4sVO$6$X1M>CYRVK}K`du~wo+fB
z6Q-*Wr;_Olj^TK3cVv!ZyOE^kIbV@j)ax?nW|DkdRM5esVVEs19^jW-cr&qixDtd4
z9rjoK$+aZ@@k~w}!usQIk>t#6j1$9%Wu5+qJ|mSHPc{)3JycBTsIkw&?S$*ew#tD-
zb394I`E-YG$^jfw%ypr46crS--og|m(EoyY5u=+{aG8U3GVs3Jp_NM2&%lI@ro;D_
zi4ftOD>Z%g+3IcoUZ$h?L=P5GbDUh_!a|9mGi(|p!QNN^r(d{LPa+yGOiy7A3jVW(
zI$$)2iw-$YwmZ}nNE!Vut*cXMh9;Vs<ULIzzd4B^-5zb(090=M|M7JdU{Q5z7ZDMp
zLs~#ex<imsN~A@)rKCfUkTmG-?(Q6EkQf@I8<mC;q=rWSJ@S3`e%If7pN9trJj|T4
z&pvy<?|Rp}R#w~+U%4p1RoN8Gpt{_}vbD<b;JcU>40gCb%L+^Xd73^5+y)}7G@?rk
zw0^?;1&bqMFwZ<l1Z;IB<5v6QWtM=srWxHL`EmB+w^-6c_m@LZ<(~eh?*GOsp#Jru
zn)J)L5<*sj0N&<p<ARX(c08jg<=8J16BT%?e|r*^<4~?^#`Ya@TWHF5W>%|Vq*Xb!
zcGN@T`XI!;gkEl>j9%@?JhoBmrAV=^R{yVa-DBgGcv4?t<MiYdp>3Pub+%d0qBpiX
z!VqGcz;BZjsf&mxKj*IxrCK2(BeTNUX%5HAkB`9wifXWUQGr4DpG|i{2>_b5;jdH8
zR}6jJ<PP8ZVmlfspBqqq|Inpa!>d`p9sqVP0qzp(qKDIbR6OMiKRX4%_BVmZdJnbt
z-S~%6Ka8o)HRo3)H-YM5c@?xsZ8t_$F;LeFgw(|((%dx`l23xEK%Y3&r4)B7Oy7Jm
zPo+5**J@1{2VVYRRH)WK>lEx0EK56`aEa8~9YtnupsGZA4EKa9Oy+yTHN4dp(d>Yu
z=j!X45`_bZpR?f3bY(|q+%SDSNAh6KaCBN4B{>#lu%lR2T$6eakewM%bNs6t`tSJ^
z5d=VmXLZ`?TI!0nXr8Ce<#R8YUM$#y3Q65aPd%X(1`#XcPZ1C_-FCUoAFZzt?_Bcz
z=CJ-SkZy`z=h`kC2hrE795*dtkgkeEO`=C8+qKl^&H1PwRSK-Rvz$#8UiS4po&09&
z3gBPBm7zy|Dt#^%&^su%;a>r=YJP=1X2BBjB)aPdyB#>)XFksT#H(H_c4vH`%&#Fo
z^jlj69RqN&=qw0|9QlB()8J%5^YL(X9t-GpI%ij|&vHuj>HTz8{7l1G_NHW2t-OTw
zHk|~mcNMDK>}N-bTyN2JV5-}rZwbpTiCy^8y11=x1+N!F9ZZ91ojtG^&3Wu#qfboi
zUTjcn5@8Ks$Y6p>`puTy96RS7j0xMzgH=1ao%5S<RtXWKYR>m6pW8{I#=4yX60a11
zaV-zkeRW5Bntgsal~I+fK<I6kOS2ZkFZhx=zuTao{f>xrqXGEAOw|Zf<?#XyUT@d#
zhaJ}He~p0;0iQA2)eCnT;^u&h4}JK($5ZR3!J%es9Jz(xU-CgJQ`k?+NcCO6-k=R@
z$!fFN>4~J3_P#uWFIMf}MuyKLJx1U519rl(u>(M~)6^X{pPt>!HATLiY}ds+Md*;J
zb-5oa<#_k*6m=$G&9!vyJQy9SRDNAG+5{@*&%Kc%AK;0dSe5o%9+)Z-D0NJ^;8g=T
zL|%o}xT#~+l5>Cl$)IFA2P^@YSB#&<V}64|v6pKD%KU%Jga5rnjw5u3^d!6bsOi@L
zoI(3273haW)=}*Vd;zN9$st>n51)|O=&P}u1kq{KC)9eBzS4142zm#wHIC5>U~7#R
z*cgfMet4mAG^Y4qP{H=B{`>inx!AeZApa_5`)BdNbR~z|ykl?J$IW=w+zV2LN2No_
zZWpeN$WVoLE<CP2a|WD-#=)4&+?~bd2T^$eqoYkisjtG+_*_{Z+W4)EnA!}ipn~L1
zIp;?&+Z%qy6a)e>?9I0j->fc-^%=oSiY!8iu1PQPXVq?fZv9p(9M&*Z|FBGJB~JSI
zA=AiekhF2>FsgykPMp=`B_o!q2IXsn^<rOITzJcy`~R<&<V%L~qrkH`GElCIW*3N(
z^3g=-pT{G&eIQfS-K-*X0%fH6FVce`E7X8^sp>Nsf*KN8eG9M<-M7lNU5dCX!8}s3
z%5wYmw-uPdPj^WgEv}GW)W5X=ung=!-Z0I5(@eeE4nPwt(f!Eqm}vJooD$~_!_~;H
z#2Ct3Pu+n@bk&o+y;TnH88Z$lo7P89xR~sDe{3Dg-b*z}b6pC2JYy!ap9UL@(&&4p
zA(+j#UQYXA+w4$I%uha%yUPaJGo0Vd@tATD6y(rD2e?|j`lW5;P__J6UJRZ7h>4}F
zD&u(i^T$5N&!^AH7GA^SP4#lV!gTWF9<rI{lM_y9D{Nsy*^&=05L5)Z;<ZMPmMu|6
z`UciEzR9N$gvxhGP8#rhU0qxuvW5N{RHD~Ez`>nhP@h))PT%2ulySTmXe%kyhk_nL
zAy`$CVDCw@B?qY`i;kk*kvj(79{RN`%?qwiOiOYj2=5nlyQon6t~3rWXtG$H8s5W*
zVC?cOf$E|<3Vvx--w9%-H?>1$(!BW6e=kvQ3f*xbL!%YAh3#v;Pf=TB8pHu<l1GV>
zte73;9*9jTxw|Ir<~HgIOlQCe*O7CyE*roEX0e*?Gy0CfpLR6eLvb)?|6K;IFEiB?
za?q<A^8<@YRCu(Dm8jg}#PZJhu?y3K?WBb!x0m0V#c1rI0Z^HAdJFxJQ?s9%Wz%)Q
zqgD9$Q^>grS>El3VW}J449DG1PH(%2eZRN>mPG#n7LU__Cyq_%MhgVt6y|<MgQoEb
z_6Lmpe&VVgOq=Jlxyi-$HSouHn=Fb|aUYzC_f8O5K@6GfiVkq~j<T*_jeH~?w5xsh
zu;}}n8yL^~ExrL7My&(b48|>sn5OnqU<!D<nz`xc^XoJdQ^^>E8}TJFo*MXgnPE~n
zO$Q79@AjEWM+Da<@2$qpmJri5u9@jFTK*Kh_=?jVs4F1C_8F0@7b!NHExh*T(ZhP|
zgK!?h6U@0t<uaB&?PgBx4y`XqEWHgWrT%`dqMfIXedp5otM_8Po8WIoGs@v$<8puJ
ze%jEQIRH#8jp;u@nP+7i7KyEyUev;zHe&VF%JeFyO0-m~DVY>ZzS2qMP*uep!&tO0
z5Xm9PCyNX3NA14^owBP~X1b?iA?DeuIVAR_=?m^oJLk}vDK*7p`1rwIQRRnnUZphQ
z8KQCu(68$@=!~AKu0NGt*>_(4nnmF&aLm~7PNg{aMuh%LnUB*htKb}3Rh_JQ4dc?q
zVM#0ib}Ff24u!=tzi<khf#zzeCf_AS?lb<*5!I~E_~@sJ)|TI=aHSo%`?|$RUy%0k
zOD4hPH#-^)8jWr_Xjt!SU=N*Nc-4yySm-*Zd+dI^*}uJ|Yiq^3_S8f>Sa<FR!lgrr
zrMqcI2GY(eZ$o<Ld+Sdx0ZYr%qcD@-iBr_#K*;aIqV}r|H^2?M$sRq<jJ)s5SbXP_
z9)15aqg6ien24#C3(Vr!_z}P5?p&A#uuBbosYvo`6U!{=OIy9#qGgL&IsNWVKn&5e
zweWeiMwk^-Ny)rLclC(lKCCEX%9P#!f5uve*-D^8MR4#Ae0U*Xa1HCu>tZl=aNhkQ
zh06(@=re~7PX(>MlbnnFt@sE0WJ-e?H$qK&;19{Li|+MOfX!D&a)#mf+GNzkcK%x8
zg+pFqe$+=l^+TmwQm`l~$DStwf$h_My5AGtm6u8YDQ^)#+7Xmx^%TI8?G~^Q<y$!u
zj(zlDaV@05u5YRs>#UOVkT$KkEc^b3k|}vT)u}U6+DZ$f!Ft*1E^u&ND7ydK1d9mp
z(OqRal#5B0Z&su(tb*QRcpnkNe77rTFnXoH{jh7{=YCpF$-W6&dQGbOYBX{iJU;1J
zSk1BpOhKn?#6k777E}-TQx<XGvglvsn;NSr=7(!=JpRZi_ki3B$Xs`7C(Q{TC1|u-
zdkQg-j`31hr`nQR3?F<mXF1DP$eG{e__@zv%X{FK#8*-o-TFoQ?&b-@;Cdr_B{{}q
z)W@G+ZAWutYpdeR=}G4rVk!C~VhZ_Rzl<$&Aexn(qo?{eQv3@v(#7JuiGYz-B-=i3
zUqa?=`wPvAn2>!D6d1yn`^l-#jI>Zr4<bHlH0>-jez2`Ycs|mFmpln3L4cBYFpvQK
z0Z9HEUq?du;e9l8V<Tv3;5-zA@M{*z3Ip2hPd-6tBoEo@Jp$hoN*k%L;AVAZ5hCBv
zDh<WuF#KAkmZ0}A!n#fQ77IQ5Bco?QG)Wmb_L)%uT=*ex=rerK9<uvUW2FBK^c)=<
z6PZdAd^vRfz34b~Gp$AV@^!=OHQ})_1!N-Rgqi%Z#NgEVJvSabPu!ComsXuZv`MAf
z(qz5BXa4s9M|^kUmdyn)Qz1-XD|*0s+sEomIs!n?{*<+ZUTTW0nGtY|sHe|@9z`Ib
z|MTlvED~0^TGw#9tPjdoC=$Q6>H;X=<h#4+XQNcumNs~C@K;y{V9QBuq<=BadjYoU
zrdk7|5NL>c$@RHX9f8bYnl7lW>=Y%(8y+*B;vG=!dD<*|?;1Y!KfeHq^}<!*eQ4lg
zAC$*!@<`Py6>bqu4Utd$4m5R8GBh^ut7%-mO^<wuBT=s9dwpbWr^>}ApVo1|v@Dvr
z!$BAL%aZU}#*jTrdg;v1^UgA$I#Dhg^VTC|I`MyhIZOH^TTMiAYu(1D_I1S7tar>k
z0i-I{Fe<T~K@?X1XI-+H!!N!84=acF>(B7FP~Ormd`1T)F%99vPZG$m56_8;g~&Bc
z-_er-Ls>fDVoYK6R*26ga-GL)02eS~%A07Y0J1_8Y^L?k&jqiY<ugV=^=T->3kYEq
zFI!ewUL7DsaNqN+#@xDNebWi9^aT{DkiG3c3BvV%3YlYp5l0Ni-(LeLh@?&LONa&!
zm0#4ayJSLVKdGfV$Uxhk^hD9vN}({g{pvsdbq5K>eOiMsd<JO#^)~<i1sTybGJW#6
z7A>9syea>2gOJfa?P?>L5Iy|&c>Z>cbe}4~F|!qigR{S##J|o4yZ|TUH-$`HjrKo2
z%TEd%<CkkU8Taq2|NXQxkdS$sh>#Mc{`uy=z2;>F$MCayRzrS2)ql*28@L%?Yms_b
z-~5lyGQtDL7;Prk(*JWz{_{1XkN7fjpCR7=|0p#Y`)|`+mEJ$S$`8TB31HW22&f)f
zqDWybq*br<$Dtwu70tsX$cvFjPT)XwA!(XlZ_P*y?n{3z&Jzh_HG2EALyM6D725CR
zMQ<wXX?+2iBS7%wS^%h31%W0Rbm4cyNqllOwhOZ)twhM6Tyl@!X7*X3DnRwSfNtz9
zyk<(bC-$}49|n^Jf`KSx0Gkd3Ar=)-JuIq&Nx~^EpC+tzxIL$#9^RdRVp)5qvqYa>
zTcXfBx1C<7aE(Nt1~ZrVhV&GO!cSKarY@ae$M7^d%(?f)odRTQ@f%a+=}H;Nwhgif
zV@6_WoL|d`_$fM=H(qyC9^v<-B6-~pB^MadKxB7ap!5g+{8>B_Wu43S#ccxQYAT>z
zx`R=C8wc~!z47fg&K6J|;ybn+Cs(?ySvrAb^A%9MaMQic3bnC@!}(h+CzyeH@wK6$
z{gJ?<aqR~IWs!o?5{3EoM-v#OM<n{qNmKg0z!^zgd;{dzsO=;XX+AKA9^S^=$L*&F
z>%qXxAeT6X>FO!Fb+7Ha+%1_QWZ4W|6i|yY=Jg6C;mXo|^OVf)C>@v=9jv)451w!}
zHY8zJGdXMY3r{qRYcKd=-y!2{D1v`H1wmcj%@L}H8I!XQC7ny+$)AXnA{9Ah&KkSM
z7U{k0N)vK^!m0ydr9K9hT^-^i*GCELMX>q6J!(rRZUlOA@;F)PzouZu3Rnx->fbO@
z{<RQ~^Y{Y$a>*v(03E3&44josD5S4e&s_pn&=_@b4QRFkE#aXgOl2m6HsFRqg}W6g
zLy7*5>uT-Dm`A+h2QS7u=qrjyJo=l6hbcoFCC&SXoAC&uMD7YADv|5gXn@}zlqB5u
zY7W0bK3;rn_a%#o6*usj+C4*^SLm?b(a2vb&p0-in^rO!F)=8PcMa!8h^p^yAl{YQ
z4jCa^M^xzyQ|EHtdbT%fRqoa(GzVoievW#zETDO)WR$03v1%^9FXKl`h$C;4E_S;5
z#99f&3LRF)$b`NkZ3w3B5<rs<`EA>Uf<O|SO9CwXY*)FA5lcq|ioCQh;N8pUuSNbE
zHfnmHs^Z2Fu@5A=zj9x{#=MG^0HlXMe9U3K%Ri&t<XTDA`SRDI<BLxK9_P-!%olXZ
zgubAH8Vq=lx78^6?}Eq9R~Z?={<a&=`gFe@!T^Q}&=Vt`b2WvO^^rkSQKd?q=lM|}
zkm4gN=Zd(xX}X9Zju%B9O%Zl`+VtZLwgg-e@#;Rde`Po?weytIb)HZ|exMI^0L(9Z
zO_}c?0L7OHr-Vn<SWY~_q`3d-e7*n_S81$Ww7&(M0Yf}3amqU2o36GP2RIPgfGwww
z5UKAcKh*~%nCAd7X+K;hg@aAZRElWa9GJXk0USer5#n8hSs-9yJ6V!Qd<Pa|J5wcv
z;9hiZhCSH9h1?{fdv@^7pmYk*-C0f_LN~|qb*StkhQ9;FxIIAC4+Q;8#9D6H&JADv
z$U70n7JVAfZU+#FrnTgjwN-;NUIJo=1i%ZAh%?Hso_8~|<M^|8>nMLk9L=N#rWL_c
zzY-ga(|4O6b9C$xBqR~`r(rbV79_C!`mH%`1C112h-uFu=oxf3e*29W*qCLK@R)^6
zaV*zxXo>B*e{Vi!HpZPTu-WFx5d^jO6JVy&bV9;0`@znRBHZMMY?v15``Tl=WkvkN
zi8>W`m1XJFd{bYpb&%m6L3csqjuM1GqnZ~|BE?cR^=SxJMNsKSWVI=)*rh1-YOfzE
zgam?Qgb)ZU9xlAy3vmEQr2mmx^H5p0w1~IxThIz$z|Efm=$%>4ut{WL>Q)cw(R7|e
zk57JxdM0VKeL42)xb`*s9xC`-4Dt?LPG_zm$=XOd<GLY^VuRB3GM1jywM6CE-uKw!
z7geGHIkMqGfO)KQyW>IkOACs4XW@Pw8OuAm^EbyY+J?Hmb0p>$DnLEsq|TwNrk60>
zi@0`2;lG}T)q9^_0G9aTkPt#Yv$Y26@gw0+;a~~ndEuu<4*LQ4AU&4U9mzX&>DJHV
z2Y@3XNpMGp@7b>3O{5DSP0%iNFQ03K*cZ@j2=;mM0r@)+RK}q3;b}X-L9_Bl8>H9A
z8ObOB`z6G~#%+n`wP(DImU(ju@K2Nji<LYM_DN=Ng;wr~gV^)V0C6Faw!D1SO$B&Y
z9={QeQ`s!hmibr*u+U`&#Me^Vf|E>wi`^~>3p{oVV<v;R7Mszr_V*Z4VE%SyJbY_{
z=WDF1ST@Uz60j8?VF9ivHZ=NK=4a4*<&%kuighXDU&`_2W>~MXzi8<YHrHN=<uN<S
z%xt49<y>9%K6h&J{v)0Gr6870z-^M3Wj30wx$alM82?^szUHZ4t<{t}$E{qdZW<^x
zflRXE(j?QCe#RshtX|ejV;gDtu71VV<tPIWrjcvli<jVLs~WTbkm7p!w?~vASe(zP
zLw7*KU%^c`FOqUjze4O`6UeYPnD@of@FBQ4LA-o*7*AREuJ|es>hu;dy+F`JeQ{RZ
z__h7Pv-t>m5vb#sYb)5tde#|;m$meTp2Ao6MSVQ<!htaG2p}#nMZ|Aa5MQ4Z&!@fG
z+c|u}9`w51;bkfQ7Fsn8*b#{>oZfF}E_WaH;$q^sehIX4lF|<drm|pgIhrv)fAh6l
z0x6`!2%E<U563PGl$?nyouov;geNR$?M*b%1onhEBWi2Gr1lLwtASLi4I{TV8=N+m
z<iZ3_Pww9C-UxcfX}SDxa)nb>AZ+BeMdrN?4QH*fVmGPOMq3@m;hdW_$(-3NqhD`&
z=PRZn3)>IYzWg{X^?4G;XWAE|oXxT%<KQ?Gr$ULjrotgXtr~?jfB3`Sv4YY8-=CD#
zB2`X0?}*Z6UY12{G_y3p>mAIk{k|WuDgXML8!fnxmkev7%L=#{%M^8YBb-^5HnQ#*
z8WYb~FOlh~aMXwl%<|L=L#3N3te5=E8KOHvTs>Be9H&zryS#V{F2uq5G&xNS*WgZv
zF3%wM&<dAdb_;uIp_f)Vx*m)g_EKW<keuI|lLqIt*0!T?zl2QHBe(gR`y~Fl!r7Ta
zAbXEjE2r4gxk2%3kMcq3Qvv~I)Fzt1ClJKki+XbTkeOh~zgkE_%}?fz13@=t9HV~|
zT%a7n8!%OFAU3xu?m9m+74ebx0e!^R_-jTyHn=yc6C(0AyJMPun5r)V3ux>jfl0xT
znltQ0PWvw(Sp!w6YO3;-p0Y2kEkKl|qmJI60OI(4?0P}s5Iy)dYix~!<3!wDa!o1H
zf$H&0Z3{i_r?3d8d6ls>w=k`7{&->x9XEH^MPucssn3%Ahq`k=Phzjcv&!~!4-RlV
zw|@6^>_`g7Ocmi1u>|&5KJ}aZyqiP(7({#W((1|A<oS*oOp5xGa}=hwNOqPIKjmVB
zwa7#voZp9D$0;8ocu98Q7G}=wL}&WSlZx><MO4*X7{r{nvpeqt!!c+D9V8s!yxJ|J
z5W%m>E%e6CrH`cPJ2HYKN7mKK5#2T)7KoB?%nFmtTqR>+TS8_bUsThDQ*0nueN|l1
zj$eFLga#gQ3J!dEd-tLPN34b{4^HJhEP7tT#KIvWO7ND8BK=vhqMinul{??(f`ckZ
zDIMxbflyLeqv5wKyJQnNe6yvMDmIBYMAJy%V?MkoKHAz*S?C=;y~XtijOFkh-O4(!
z(ZWk4E*(*w@r;%6Q|p%Sv`?A!Yih51GLTDlnjg821Sxqt*uFeLmUfm3355`s*L`??
zN4pL<#1%bP8;WPw(-w@s$M80;$&9<|P2kS1aQ<Sy9`{85lhHhw1Ll<5<JKSY8&7X)
zrIT0X$=vyQm@n2_q#h=)=xV&GE5@|!ng>JUUUxu@;HVqSYm6r;g!Z^5!pp5wd-Gq5
zxOj(Epwqh#rI2KH%Gt_1=0&J+-VOfB2t?IlfBeCgpNN|SXqDBc1VkqIz94YYg{yuB
z3=R#3VBz-Z6VNS)wnA*a#=w8;t&v9QTm1lw>f0h=TmG1w_&!*ZJ5}`{I-OxB?byr4
zoUafJ=$m+^bP>?5EqCJUf0NFd+C;kw>Bd(c(aknR7fnT^&B`H~$sEa<0`h4i0v>p`
z_T-`ajFDHYFBwNU<UKd%gX;}a$qyH7u&X8Zfp=Tu)|?SMxL?+wuftRaia{`!5804=
z(Q(p&S*`8JPTE!Y!Dc=%nqQGZ&`c&YK94G^Ow%x=1B1DQFM#+>LkC7Xc>G>F;40Zh
zX@+v4Rn0gp1^Mg$q`A<V^&KY-tK-OIGqO>g|4<PpIAn9|YHdxghi6PX>C7is!__R3
zco=wI_k)|{SU7nsw4@+=Q!Yt1n&Gs{zH2<*BA|Ob4`zy~bpEd!<j+2Ve!YsxaE2q9
zr}k&fFa*MF;;Mb;Zu6p@X_`g4R4|GEsAzU?LobBsa{uJ~^H2S>{l~u&TIJ^;Rg8Nq
zm?toXiv!j4RAsn8dw=&9&dJtw5u&dGf)_n^08QpQzG$f`md{pKU9}=t@eQc-0O3Qg
zYV%=8S+w899(7=?xzT;eJMEYjMZ!kwPf_bhG5IIB2FD*!77$ZbEZy%3@_%6e`s;gQ
zW^#qZ#THtzu@Vvz1%>LEpTGn1nPi*vqlW5UoL3Tln0r#gNsIAX0r?w{8!8z()-l%>
zvIaADAn^oS;inOnISnzfmBB4{7+mocx4xlG+02-|7{Q`QFfy}RHiDNx`KCg$whYN#
zvPQd_*}LTg8#%0-I!76|TsNa`obl)@8<jpBJ8MP+aUdT%TIxv@ogjJJHjzgVFTAuh
ztk%T3jl_Kcx^8yQIQ7w%TTC45x|2n%)Vl+PyQNEF!_WOUO>y<}mgsACpy5>IH{E_9
zVnBRbo}4~3R|eH{EPd*Pc{L5aRg72QJ{5KuTa6Xjj_1_!`t?IVZ1ne9RNj)I;aph8
zQsPG}m}WYnLV4^f+<li*+H+a7zi9YHk(c#4!*T-~d&gCZRwM%!?X60jm=G;mhGSs=
z(MI@<G|MnT&1Lh)GS$x)Ay#wa=s0GaeR&S9IySS-F3=EC=r>LRwI9>*c=E93UMq(%
ze$w5xP%3qX{2$cZdNL%GuFhJfy3uL7%cBsjkKk!E9qm)$i-So!OSa=kT_n_splNWQ
zEddu9zVeC#nmMACip|NIMBRjW_A*mbYfGL87TM2`30jPM(Yji*`{0o^Cp^gXr@Zwp
zJ3Prt6O~3?u#GRfV(VS#1Eb9){p#A-x@FTyR(YszRyVah)|hN081MUvO(ok%re9_r
zl0n-0cs(0Rp#of;&cw#FE%T9b&k9VRgX}vmNf%JE6+SKjQd03d<5zYus-dNMxi{sP
zOIY$$s@~UbYEyMoLu}%OdO$qF*IasMn%|KRE~WY)_EcJAuFv(1t2k`G4kuv?yIwCu
z0&>RS85epmCuOc6c9o~Qq4BbZGsc*#_QhU9bKUq@l3q7-$Z-GLNA%N0kAp#9K1sZ=
z;Y*Ob1gU0M4`PynFqz8>#7Eb^y7mk%S#wh>3D*>%Dr#YiwzhxMP4387H-hu$c95G3
z!eoACT4>vfdp3q8>M{5W=Nge!3-2Mlu*A?BOYY-;e|rX^+@kP7A&HkOFy2a29m;+N
zDScBWXll{VU`!f90f{FeNwyp@H8Lr-OLFLri)WjkRL2$Dc1^sAk$fQ){?NkdDIE@Z
zeu4eeHd14TG3qKHudcGetAQGd!^MdMAnjqm93MP|g2_AAkjKAFW75upu0Ojqj2%qG
zh7aJr<4*M{m#z~t=9};&A?G4ugp5F;%d?-oR+b6wY3~b@!o4Ub@~k{DU1lKFTUYT$
z!%nVPFV8c|Wi9nPs}58>FDa@f1Oh2D9$eUd<RNg%YCtp;>!(Hk%9dkd#65!bWm&{4
zFA$#TJ2)rp`Q*rOv=avtM+j*OciHAOJ$;@#dl3HOOtoPFc|Q=Az(z#G;$A45qr)QG
zCtDxsj$J2fhx^}s*X3~XZT65eH}Cz|+ujMOtlk&BFs-qEkE}2$2wM++-W;Qro&iUY
z16h5wz2ich!fB!?t&-}nWiHnQdR5N$cZj=W=mUt8Y<tIS-)g*p?ZA^e9Kp|x2v>sJ
zvbr-E5z$7d>ND5LSJVeGb9Df7xGw#~t+_;I{PPhv8uw3F>yKg*Ad;cvL84J9${*LP
zj8({x&4T}&l#xVZFKg;>e}Y#BcXQt?DNkW)l!S5;vMfr_F#2$E-LGVGBrU2g3tC+w
z#C#?i@h+3eR?|~qucKYQbzMGMyL}(%79(&C-!0q<rSsx2VOBNTFH7CcEvIqn?*vq(
zaNd6AZ5$;mFEN5>6|2A<Pa{l>v&DqJj){4M5w2E{LP6id^^vMP91{|rjJvgLQz3eX
z+X<F^d`XLO@b;|P0LoU%OO+7ibsA_YC!|!nmRmoJj3YGg5%O{cnk;1<;Y153!Pv^u
zcrjXP-I`I6Ra8aYHWIAk_}dH*akkZ>u`$SG9!0i&F?F<SIn5(3A%9w_-rQ1yw}q5l
zY||`VQd-g|oY+@2TC-Uo%5cDb&Nal7X?4fM)3zsRF(<k}_I4hxLq}DZxr38Q6n#->
zUOmo=2zKHY9+6)!%h~kX6Vqs(66_{v#D;{Gh-BUrr+e-Y;-I1y;EuaBlI_nG%!{)D
z4Jjf#0qr{Ea$!qG@;NB8l)EeDV@9X_HA-TaHBrl-#q2s6OsDP9f_MEz9)JC41?V#^
zwR&99_vPHvInjlUoTd2T;}Bh*S0f(?3i?RYATpQFXCc;o97UmkJUwVvL|`J{UtkT7
z-v7em5UmQ=kR~efU^qRXc-z<0(Mc_<nexhq)b}hzxf!$m`Ue~70!jmTBN_SL`yRkg
zBdAM%lm{d`0suv=pi6y-t9YX39nxsqbjbq}O9eS+!+HkKUgVrEX|;y_<U;+lK|9Ma
z70-b<jhXiBH1vZ{SJOm6s4b}@4`MeeGD_@L9y}UZD2f(rnQgIiy$pIcc>wdOdhkN?
z7f#lqU?<Qa87KB?{Jx6Y^ebX9z#$#Ce&R+6fv6-2%sVg(f(GEWq&ipew_kaRJ38*Z
z5=BTFx*m?mbb;(NRO|W7wJ|7Ii^ws~xhpbj*~l?8LbA_tOFiPhmd#Z$wF<*XCx7ZW
za8LBAdAraX&wI4H8XZVPtD5_%CriyhGb|4{K`~i#@|Dl<h61*qqV-DQP(cSVnQXUD
z&YnUQHD>xmInXIO;X|Jk>JlN8R*#ktu}@c904~E;MzbCmZV0BQZ|D<mps++PfZMbj
z_F&u^_<kp%-l{=Kn5|*))C_VWqnLysMra%{LcE(c$y|4U-cQeb&bjc8vz8Oe$KVE>
z@u!Uf%rSp~ceylexu{Qau;-A!xnkJ8Jc+^b9!(5!s6&-V8w4GWV%++-=6w1RGH&^v
zWcp*_$`6^<@OCNSo1IDG;=F+P&(pc}*G;pzAAOraen6W{$!)_!pqm?iTF~t`2^nul
z9*W-Rk^0Qv8^Jf=ti!*0lhnqTByOl@iSuA?rE0Bs&7vq8_M<G0*g0&^3lvEg(?g5d
z!YlT;v8yes8Rtlz*--L}$h<eVxyjLd_%s8}qtF-Lt(I*?9d(y_xDpSQp=2F<M&1^5
z)8!d*e2deuMc3_!XKdh7Ut0jVmbi!T+XS<wqpb-R+$s7o@ila0c^k4`8)JnO2ik{w
ziP?U&+Xd$BG)@sK^&e4aWzTYkcNrGe9m@N&XgPbAMI+Yln~WMKRR9*le1bI~-$CvF
z!X{&aLA8s1)ZZq$7|{;JNdM}{da5jS130m+HrPbM!Ol{PFC#JooP8YlZ%wkuoqft%
z(^(Pp+6A!RsKyFl40w2LI;e2Wp<*FY9ae@BJildtyO53<LJ~LqlgR|HVhO)#2q-Lq
z1<HmgxIY^Pv7UQBbEw6A_F|H1dxN~;p&`Ja6A#o#8_jaoQV%ImH9}4SyXoM^?hMaa
z?%6&Yvr<U>SwHifJcnmf34d@^=);LMyj)lXOl$U`X3H6$hb`4w5DB^WQFS`0l#dF!
zm3xxzG-`_F1<(|5)AzuqH0z1A;C#yot5zda)lyF~9x-+$Bh4Nq+><Afxyp8K-m4K<
znBgB`1{rl$w(!n%sSt?<$|xW+DP$u5JbHm7@vo_-Mhg<0iBC5%5RFQ2eGT3j#z_HT
zn;RiHoS(i13?HxPC_3BF&MDS&Du3ajvG~$){f=2M78!4k<{A)e3i)b^^=-T`T7!zj
zNIo6yNj;)+@0N7t3e<l{$Ck#VNI0En(ahQ(v6H;pZ9~&5+DQfi@W--rhlmXx9_12t
zwxKxT29lUXes2jIoiCAy(AZ{7F3x_+<`fWKIfJfWzX@d}!LF=9AA%<uL*G5gu!tjJ
z?>GtY@>dEP`A|jnGM1)Ubt(j~aZb3P2`Luw=BmP^epJmQKe^ZkQv`W-18YL^!{aB+
zR3Kahv%)!Pqm*Ni>tn>kIuPqUPq=S<<_W8qW#|+QRGLJn@>*Q)*W8F7G>@Rs)mpiq
zoe<qYEFVEbQBlgA;!exC?J|A5eCUl%%z>Af;Y@F$Us$C53&ubSq~~-pFS@rxD3X>t
zg10iTEUMgjA7!Tc{@hd2ZP02I#Wr>YqZ3MII6PdfNFYJsY{W}uN7NQ@$)Os_k<Ts(
z?^Ltzl7{!5RCryw54REb=wh@zI@44tnw)=!PCz3?$!REVl2U+X6!MYM+YoJ;(M)PX
zOgK}H#2l{0@hLnd)rZOhAicNIcm!2Ns`3P4v;E#rG%n7?twh9uv1K#0cf%aBNP>j$
zhiVz*y~0>#F|*8bJ_&IHY8KFy2tJPWBo!h-c%|gpjkiYt!Dnr*46}gtTUMj7w2~Ps
zCkR`dDY145hQ;LFK`1YQN8iDqBfrlz^9@#mwWk^|@UC>|iMYFXd3!lxXIBcB|Lf?&
zN^dkRY^2Jx{|UEs)$LVl1I~IAGJ1(=xN!Gwh2ue8T#e0o?%SGStSH&I!+5<{i353#
zH6m}-jfLfkXrIx@57iv|W<fK9@Smo9S&iG-Jy-h4mf4;8SILC}<^FOlg?qu{JwUO_
zailz!{CPKW0?nwQzB@K|U=Q>X?TA#=M{oskQCepbCud5=e&jtz64Ty8vUrIQ1Xz7K
zc^Dh=JVkQi0dugnjSf3W<Hnqd$Cz&Ts6`YcWFItU1@b<Z%qv{$r=AL-VM*p}2L%QE
zynwJU(l;l*#f_HL0n7&R8pn;4Z?jfpXT6==inj%7-vX3WtLx2IzOoTDv!~<JFB<b{
zD3Z0ehK^>ML-L0Mx)^k}g6&C_C><2|#{29k^mx<`6y4LNNYb8#H7&!2-<3H>!0Yc?
zx$mAs1zL<cPxiX0$UFM^(>8^o9?A4FEzgAIor|k1M=0bK*pK1kIp`lruJ*&OiZNY?
z!YIj5rQAmyjk$F?h&<>TF#1e?p$W~yPjF7`{3`Pi7$Fnbg+=CIij?(Z6&^OJiFsiH
z$uwQ8KBu%eu3PW*#|6d{Od~sO=IciCvrB*oekx><eNJ7)MtXJW4VX%$eT2@G@v7RX
zSsu-7w3vQ3YDVs3TLnNvgD-%mYU=$NcLVH<Vb~Bv@I9r{F<1=Osiy3Bwh($Jky<{r
zHf$OqT+>E=8NF)BQnUMdVGo((Yxb#eRDY@*3@y(~J!l(HAJgEoSY@-cerGh!@S%4D
zwZ1(qd*!Umvbhyd8w>S6yG`>=$$Q?oceo<xcaM<%!$zL@qp7PbO5{tXB7^ztW6Zri
z-n2v~I#`tYP{ptDDv_8+U}YMPXLRK6=J;=P@CjXou8s(>O!sUz=c5G2<YKUUcCeDW
z11%+wnS2Hj=QktNn~OyLm-gb1gafD-5d~CK5xz07qZR3YpO544n%@{Yx=!HOHw>HL
z1$A1D=E;}eo;HHQu`g9LgMULakmY&~G!F9mX=IxZG3ks4(5fnx$EdNr4lj>Bw5q-B
z0vyxkVg9>T2_4*ooFTAuLUV<1?r8f6_^S!XXpG(S+_Kl?P_98f;t&<v$(Gb-N)Cdu
zsb)6)QEan}Avs2u$lL2cp$!+%l+)zZ1!pH&JHj8A;E~61oSg&1KNc%P4erY&$<M-c
zOiCez>?HI|8nHin@1;L$UKE5XmE;ya7)z>~#v*g)s>R5Wah-N$>bGCem#+du-S<^>
zZ7OjFrCg7RANMV1*=)F(!;_~QUfev9B(b3%`0C{XWE;i3`8FX1m3WTHX~gm=${!FK
zkJ?gW`$`Z#-ALJKK3$hV-tGI>wf4z6B3;thVLX{~SK+n3BfrfV1V~4?3EB_57Va;i
zWvYl7TlLH+a6ohRxyXB6xv4b?NlY#bmlGAfT0dig#j~}l%~E|I=Y6<b#jpMP<SkaE
zp>F3;>~saPfa!e69#o*LV-1gU?YV~h$~Awz*K;)%#(a=1x8>~;4~gKLevJ2)C59LQ
zfUsiGPfotSVdt?YtxKl1SHJB~<+V{73RGECM{2D!VCJ4h-yHhXm*<8)p6q_@Ku))p
zg@W`0f4U8&A^5qEhqK6C5&jiR&;VMhY2X@nXd$>^`Y0pE2E!sMj$^so!l37K>=-Qh
znM@Id#TleTV80T6yB3pF4A>cRS?R=aXBaHyVxOU$(T5b}Y28m4iIkKUF(9#5xRkVg
zWo}G=+PTUEi;Y#8byN}E3ft$B=swJn>tpQabdZ|sxJ2@ng~}y<a3APHc9Yd|upPTy
zmPpYz?CvemqGh7I>iAJ&kONj<qM^Sn-hBLM5|9^A&g#DTGmy%|`(In55ZtF&?x7>4
zQJ6oG-IvjwsDED6ms(>W>vmW<IYGjHp!9kGmZ;pf{N!ej5><8-z@TMY-44u_gCBd4
z66Y(JkPh!pMvlh@a<z{N2N$`miC|~4&d78CgEkBzw2MWRJ6<=JW`e@5imda$C@|=c
zJFM#AlADCcDmeB}o~2#g&nxc7IMX#z0;nphzI%<;6gw_y|HX!Z@(mhO%#1K@<O^=W
z7R!<2kT&8Q37HBqSHPbw^5`y^h)v}@JmVK&`-PU;9H&m`j62^S@u7~?Uo4xVPFJXD
zs_6hr$t2myS)N|<y5c*n(lcrMHZHrxCRF*{S4Cl3A3&jEG<h<iELB_ODPlEv;fg{g
z(8OE~1?wsN2~v9Z@y1Ap1-!yTYI=QM5yRLFk>`aWb(f;%@1v?3LAD(W$Y5P%piBN9
z<^_Nzf@(42IPQwP$9Dw<pzEtfHZG#^s1=;QYWhzUe0cQbN-1xFtB81qFm8*I48Kh*
zM)9k*PtAc$@nN5)-=kQ;$EI<X?Bw;SCTzSWaty%UeRrvIEATmOYOH5pj3r&;xEi;Z
zHz4rMRlfc{K!Q5_=E?n4ZpCffHin14hD2Eo<-YAi6ok<VAK49JBTB$9@(#CHqa2$q
z0q9Lo511l3$AqqdW-MgVHlLld{xb2qm{$Y^dqp2VjrGTgM*1l7!9B;DWB1CJ66~<>
zdzQzF!MRLQqX)?&(<!qc-ajah(fsLO?>{CD0>~~PBGd_5+6_*V0W-Er9v_+)hL1d!
zg{Z?CYF#VjNdZ!svcXauR|pmx?hAxv9uKY)c<^eeIZWxU`C^fMA>*@@B%{Nn_POJs
z2QXy_j|Oid;HLP5B6YKolln~w`8Lh*r=$0OepHaX4-3G*noTPbhCsMdA3CqgLe(<B
zCxguUZWZkVb`=!6<^K=KhhIY??Y#6<X5#D6(<`{LPr`>NXn*5P)NiDufBra*4p<a0
z$;cAazo;Jn{LjB$i@PD1SCo3n_`i0TU)95_!TtAlU!l7o1W#p*n)vtc{@;O&I0!mX
zHpY|xWW`-=_J4g>z6WyZQUa+evj6z?Kj>26_p68xb8~tq`#UG=if!`i`_?-!pPaKE
ztH1sA<o@G-{{2T8!iWSuoShN<e_VYkA_`F??6v&E(EQC*jPVB_wmNcy9zo#vFJA=&
z0N|etq|w6quZK|k?&H4DrBt~(pK(-QM7<pJJ_Q0OFE~#j8TB2MuhZ9Rp~3l!(Di>`
z#y_rYD;<$Oj&Wh~N8eK-LeM)f1wdo*Plb-qtZcuw{JY~SDi&%=fJn{sWBc=uL6{w4
z8{jb?A`B(>#|Z|r7mt8JzM$t4?5P2uk3;R^{weU|WEZ_WROR>vTH^76Q}B`A)ynky
z8D#uGw=eXK+C0e)7sW4DFU1EfSHz&>3m+^c%!%(NZs*_KwLo(&v1K`v**rhmCIFfd
zgn$Bm5Jm+E)ne@0L(j5VLW;mD*1H2rZoo-ro#3A0vkT!rj_L~%#LY`ggAd2;G4U*j
zq4A=KVr7N_HG1b^r%3?jqThZ2R6SZ?;lld?!yF=@^Ep0$aj&wQ^5AsykMCIEjkrUt
zVOkR44#}Xj{Ji|G;VAU;;-IwO$FLrGz%LdXbry`(V~+4YPEITzF!)&Fy(Kj05#q0q
zHT#_bVjdep2`;X$e$xb~6SRC0`0O#Z{$GZ#1;JGNd}g{029^R(v&tlpBOj=#oL@BL
z17c1~24LHrTpa#!8jcGfo@1TXk~?5wp+(+uFVi&30KrA3pz&BK@CsI2GVEvqK(oew
z8O#DZ#HD}m(s}?c{U!<sp}+N;w!cFcZ5`yX7^VB13o^g+|MuA&zEHSMt)CjtK+asX
zdAKVuQSJgV&>H3YfFnpqtZVQ40_YgfkbtT_KDg7q@l^X~TN<s*?uQ$9XCXE$oWUG@
zrV3QCt=mHU9J4?SN@=H`y^+<?EGz9ZNK!gMFvD1pw{GxbpLj2}+(XJ~b5zPO&KU4F
z;2V)Y5YGspi79!UZRV%3BB)baO^59n#{m5pz_49xs=p5_X>FAgblcBqqvCf9N=gju
zEZ{lxJCmAFVIy$M#*BXXafD$CaH%@Mq=Rj)Mai+_8Cp8NAkd6=2K9wS(+zwZ1QjQ5
zW3mGLb6OTkAvn7J!3`CgVx#d8w0i<y$Mv)JjX%#RC1kT;S<lH@cB}V^u0ZUlcp3y{
zFD{ALbs0f<h=A-SD&N2-om$ISzD3ZENqea9)?}q*(^OP8jFZ2eO|wFLn*TuqkkV`x
zdQVDt_Z%qkF)6ol-lU}xB@H(om<Qs~BtW$V$l$`oejbP+qwez##fXJ;0YWDspk|bq
zxB&^jz^-6ojf*pc1oDkpe<w~M|A}&-9m-vVG>70nExGhMfZYx`BuAoIS*+G}pKQ|L
z4j?lhx_4LbU!y&<^EycW<X)7>X3(MwclUEO@aei91_+p8=1wv(z`A~9*V3rV@DHn6
zfdI0uWriPLkd|vgAepI?)a`uvM4GdsZ5Gb!pH?{M@K9?Jgb$<CesDH7s(O+{Ba8-o
za^>RP;Z>x1kys~ATjRaS6Bd0$P~mZrzKx(7pzIFWn6Iz?BqlO-&<yA^5}{?>93(^r
zKoVQTEr*KdzH>G^W=TTie4YZi!5L#JSh&VoP5?J!_f!qX?0b=7szT~bnOfHI851Wd
ziUnn1pj2OcmvWB_1ETZ6b_qz5Tx*I{+&1Q%?Mv*6L~jkqAPdK%@_c_4|NUd7MJ#?`
z!2?fhwvgY5GZ4<cwrNE8=9;3w5j9PS0>H{Javma?4Q^rkJszNI0vhFqa@V{k4}CF>
zQQL6cS9<UatW_8AuA!<%zg|l;NB-Dz9cp>uTet%lokcg*E)pJEa=&_>O&@J71{!x*
zES6IY88!fk$wMWwJZ0imHnmw+5w?XqcKW_CI?6<0wZVjaN-My5TBx0ms;$CW-n8s)
zIIEW%$CWv`EI7xTQ^$L`q0kP9s@*Z;kP&2^$!G{>Z+HxsPJ{6{h@EX@_wa`N=O+92
zhJ23LkRRS@$IT(vGkW%B&jKu}Y1ei@XB)vyY>Hh>f6LoO9uwx8&xuMbecm|SswOsi
z_I3o#>gf`X-Dy1>Xi=A%Z4PD)MuBqjqwq7*V<48X)En3!p+Cl|*}UWKz$vuzxz&I=
z9LS)(_BIS^Qo9*unDb!Z-2_%Vr!Fl!W^@nndQ@(jXkR#Ke#0J-*GjPvNr>79$JHkj
zI)dGfHyS#=DYB9pIszHV0uBU~SG4ueWR>xt@(#d#VVj$PXtsBFk^0@EJNVX*lN1AI
zS3KVqBx%j-C!qq1${aFjFlnoWf;+LkLyyY@$YaB%exto#ZF82%U~%UwXq5^kI?aKG
z$dkk#4wrHr;6Wukp<6)6X-BSn*cFJ?6Ne_h0KH}NJTJD$2}~8f6EI}T#5KEo*SG7v
z#&-&Oq=5d2<uvHx&Twx&0IORNP&9rG6)bscHOqRdpLatJuScPD920)u5T~QIfr)hJ
zBR+S>3n8O+6g~`?pr}gs`d$$ash-0n0)!+RY$)3p_B!G*5Z?UAVAJrHi-;#AS7L^l
zC7!~RG><l<bh_r2sj({Ch8DY9#xWL&EEj>M9$5yEPEXs@Nx!sY#xQEVSmp4HZ#P>!
zqw(4tarRUA_!md%x9$B`Xu(%t?8$JCAN|(LGgc`Q7Pyv&rMu^=e0R1XfPXslDKEGA
zSj^pr0hC80+ucvXlmFf*HYs<fdCCF(*(eWaCN|*g12YgCCWm>k?)@X5tji_L(TLOk
zE9XEjR;Q5D#+P6-o(3BHef9XqK0}oyK0D)yeOr7bsl|<m9q7$Y^Ra&V=i4#{c}X`>
z9Z7U-u&r>5hiWdam35pngPovl^tz??Tz!jQ8Cx2g?$+3=)USWss^JeoDW7aC+m`eF
zp2E;kwb~771`pd(A?JlBo@-Q?jhYhtW&KH_wt{)CoHZZ6n7?eehQ9|&5yrukWMxA;
zlAVCOezIijc74J~LBlyO*PX3bFpi-Pa8^BJ+e6B@vur<2#NXy*i=`gh>Zhl^0DAZl
z@y*85LLXWJlI{g%6E!d0P$KEEQT$wUYn!{oF2leYZRYWSy5i{W(e~VV6lm3e{<kne
z6tU<myEm`K={%nhHy}{I^dLiJB&mJof3nc%ay<FAOMZwmp3hTrQDsU#{Tybo<keT6
z_tuEaPj(=v^E1ejn)tLQ?w{4$p2#&26{MvfcGQJ9Yvf9_oF$d*C%KP@ZvWh)`kASJ
zWD|2k0=T=Y-kFS6OKo_?Y&Nzfr&Hjt>P8mEUFy{Y6n)^-=?35ulC7ocIkP>V&DKqg
zh&{U(->ux?s`#fq4P|c>vMGbutww2o>^u8v%9kngE)NgdDu~u9W>i6K-$TWanhX?o
ziT!HjjC!g_i>mUDZ-1N4p6ZVi!7K$Dhd(K&Q+kOEaDD>490LR8(OPlOKt3EZyC-k;
zfa+OIu~~Tw5l6@e&MY~JFG}8LV^1ED?8=0YsO&ATfLikrzea~3f9cay(PZ`JxY+EZ
zcy~%ulMl5DeBs<F$3Q_8^SLtWA2u>--xPFvsMU~<o_TqBy8CLZ29){H&!!feIY^7s
z2^}RWk0&iVDS9>g1LrL|>vUOj1&;pS<&%qp;qxj!(hUUt5)`r$XE%b74|7q4UVYnO
z5eSwc(~G<rckNR=R>f+m-BIo9V_S}*78Rn9G9hAT8$-`cbWi?L*3UO&5;dAB;q=T~
zwP_{9e@Ik#oe;5`CdS5Cez?+7oz=4~Y4LBE<zNL(dEipkmZx%ts?W_XIyV(+zYnNZ
z+*DKDAX`7HdN%c;>pcKDT%q!O_+|-;iQH0#3>^GU+KsPDCm7kroOkB6(%id&L{Qth
z63jPKO(3IPxrpP2VV^f3f46)||9P>VW+2e%y=`T1(5_;>x#)Fnb60>y)x3MDK{T1j
zsO&yXxw~W02nD=h4@wRzbT7g9!|6x(yay{Q5vM5@v_yUa6cxqc+QZJ*OIx&}lW=*c
z_Lv0C?AtdA9+dnfgX~y=DvKNF?!9&hbZW~@&}mC-P5dgIfuO{5@N_;0;MSs_;`N)@
zgowsD`qa%FKuK>g)ZifxrxIfa-5u<Q3Ijl%ca3FuWplQ+nBxR=p&ngqu^O^U+!eS+
zF6Pz5@dG@7B6=NYEUC6A(sgoSRVg_#3mKHPybnh&#-L&jOYb-=D4I|4K||IbmKNn`
z#CFM5y9uTDl%UgbHk5y+H*NdfQurGU&T?5xT#0C~l0tTxxkC9`>=8vjU4de1Vo`Zw
za4wOdHJ_m80jo$@Su)T&+){aZntKhwd-pv&3z-yu$ALU<dM!g>AVzI&5R@WVdMh%M
zwh;OQ!)T>`iG5DJDr_vhZXnz87$IiUUO7k>1{oI5jq`T)x-ZD-1`7#8E+3ah=H8Fz
z(+j=*;|}*LW9G~20RY<x9W*KR3@X!tX5h-Xj7`o`s5mUsoR@CQzXD`~55G+RDB0||
z)owjep|qpmR|^_h)T+kgb=w9;YZMKJu2oCIXTsbUPh!OSGFZmMZ9PLj12u7-sfF0h
zXC2J#h5>O_&F$XXwNp!91X+m@1bDe}R}chsrU<@d_Db2Peu1|ZMY}Jzd>D^D+GE|1
zak-}R`7CbRkzP05v}`z|OtL0r!%y3jfJcrS^@`x~TUIoFAJH8m@5}kTh==5^7ii>=
zu*$S+d#}{Bn!oxOtz&hvR7c|AQ>X%<pOtkECsC3bsWMofyeEy%BaMx1$GmsqG-u|a
z9vbkt$eV#<ia&Oy<#1GPf86Dr!?WRj;;$Guahji&5ei=JH9_8>dpQe4uuBLr0D9K_
z7~=H5wE(tI`bi656*Qug3w)#rruthAZJK3BR?wmyph2nD+;U-*p$CLtO%ty-Z}+}q
z7wi)rqi{a8mR7W;n*xnhno>1(pBIV01GU=s!&IxMfMvE3oW()<wej&)chlbh#NSZO
z0s^c+RXYVL9_KmG0Z|fGm+jeA41cPxTi>K5>wr&*#~x6em$h4?T53lsF8|yBpisNG
zoFw34PITLD@{se5tTFZ08?6-1CkzgpS*4nlSyiNjn3bGQX~ZTrrR7NE)NXc^=<{FP
z5pqIX$<2#i2l_zK`HOfwp&;d8RhHSLo&y0F6DlUjmm5Zx({$KY!&`D<hN<fHcyVdO
zA>T%+OAbi*trIBq5F(bi3}cu5trv1%$dGkk@)F_G?*2Ice%#ewVob8Xy8grP-K(tO
z=WzN_;V=1QawF-7qFMBNZr+zLgYyg5kK$JV>ThAI@=uf>TsD<IVA=wFotRQ2xVe@1
zx8E9p{uM9%J4SR-e%#HYApXH3=}i)_+mVtWJKf~NbnB5Z$U?%~D-;ryjc2!VAIm=~
z%9M*Ea7H0ms3^+wRZ;s%ai{<KMw20~)a6%1AYfX!#x`t(<QJ*Bin?r&kcs2Bor6v8
z0<`vf<1tak5OVLt3?g5$`sg~~gEhEe^jJ;|++OAS%`1i9H<GeB4p1mr-)0>6-!(9g
zrIzDh4q5Jz5ju<o^HQ!H9sntAIq~_ZyZvDDUDI#ZclD$40m@L(DH-plTZWU_KW3UF
zjd&WKdn%88d*WQplz21bJ%hxZ5bcv1J;1Q<p9!!`g9sHk*kfNeq3~DwWCQ!~<HvC;
zj*0KtvQn(X5E+)_Byz>wSl)Td_>e`=j7-wefy~w61oXmN7$aN|1Rl;!uEFPBwV-)J
zT6*u!J;jubgO0an#d6O!@N{2}AB4lYpCUP69tUI(+d^WwenAFxLSyk8pU%TLRNa3I
zC9i(m3Igjsy()<52_#jg*2ksZ-`xWdd4Y#?nL)f4$qN9Uz&zbKI>whl$wGuyKfwr3
znt34Zy^4y9)|hczmMvpPLn%Xk8Cx(c!6<0>#D+&N@;lhWyOz3w6=vLw66(=J^M-d!
zvw$$tYiEt+jr8;Wy8F-9m_|3?PnQkXc73nkL{3)gvbhdAsvHc_(dNwbdsL@5R;J96
z<WA*L+ks5YV|{)~y_X^V4~bEEp1ADHGga&*4H`l2FKtw-!|4f3&VWwi6R=6jP<f66
zNFa_~Z>y0IvR2MPBl3sMK^&P+>900iB7j~LpZ1Nb-l!m=$9(J{CI?N5PDphFc_Geq
z%*UL}o6urL>7MnIz2!weorJ(3Q<R8s+ufSgXnGORe(7c#D2En9Ut7B9IlfWU!`ZX!
z3(x@&h`>Bp%7FFJwPlm13qG{_F+dml{9BJu)^6SEV@iivxu`l>lU+lmpSq6VqL>z4
zg7}<GTajKbOb7Ki^tB~@+^V|9nFO=>q_Im(S(02blmrCHGAQ4B{V^yKqUH4d;ZZKF
zyIh+S6w1A^@w{UL^=(vdK~6W;0WH<Qo0`FQm~z2ZTBF^3JF}P}>?A%9CO=`lly$yN
z%Q-2ou(cFtyllkGA*-7c&hl12PA<&CG=jTplNg)|rnWP~F4d3Sn=n?#7_7JDmQJ;U
zeQ}O|v88`|CX@|O>c+)AH=}C-2-6eThH#QH7y_ytd#BFBMQX(!4-qz5!%BZ%G(8dB
zcrEb$S<qQ{pT>uDW;#cq0c=<ptl>s4x7J)Vl>}hcPC9~r(Y}5Icp2{@Q0+qECsLq}
z@CbPtSHT%rN2F|(rr6CvN*%+Z5jDjB7R&zv5Pk}oAzG~|@Y+fyKrd56RI<&&Bxs0h
zjJ<#VBUJY<7XNSa2`d;$V9HYS%*+P-DL4K<d&$`Wux2!11K_14V3QE*vi;SciXUo3
z{iond{68+9;Zung4;Pr{pLDxJNNIOv{<y+^vPa;&e#NgRQ~o=)PJItVrE4g0Ggsw|
z-}dLLA9;1bG5op<)oZ_9+&_O51O;urdldu!s}=hzKDdKlx6=52Z|naD7T*&HmK#A|
z)Bn|qjTH}$!O}P`5&Y9wU2V4FiXg%pL}#u3AFbFg>cBBJbq(f&e~h%Q);~uYP{mL*
zOl12XpVdJRj_H^XvHD{+b@dcP5com`SNpC1_)%LiZI#B|oj^6Zd6v&&R1$%mrZ^L=
zzlzmc_`C7D(|RQ9{l;{q@r+cS2c7a5B7(JmtGMFZ%X~T99LoaDsf+22x{1*k$Uw~#
z7d%#Y2yzaha^NbS#Ag-v0u+vezZJ)IgV4GY)G3(ZK&7b_=pj72yFbpN+E(1vV6)hy
z#i~^$38c8CfMKF!3Q&-I-sW@d`Tj~TqHpwl4LZ@UvJhN6Lp^QXQc987CQBc7E87Z)
zdU*r@xl@YDm+OiKBqE#WD|_-EKy7sdG)AQ>3oJNAYl7Zdju1+)M!A4<ckYf7j1*VE
z_t~=osGR5wK!I2gSS?0&zB|e;XAQ`+(ve(wL_E)&3^du?c4;N;Tq3BT;IPIu{aAMs
z=FEww3!navv9Exta$DDy2Bid*4oQ&)NlB6J?vie4S+pQsf|PW3hcrkxNQZ=UcbETM
zdd|Ijd-l2GA2Jw<y4Lr7YtDDR@dP?FZ$1R&fy7}W`Hkr@be;kFU5h(-Bt*Z?Gg7hN
z7dlA#W98|o&Lk4?JJNebK%Lo}2{Ds8qKh@j%!xMn3y@4;ggzr3wQRuWEyY@NzNhj6
zoaw*C7DSdO10IrZH6O7l3H9HNYJ+6zH1+e5fs-SfHZMywk42s}i0f#$+D5!9hZRG7
zQ91tt58xt?^&V>#b(;w!a^}u&L$=0f%D}U|7YjnB|G%$58yqqSsC9L&J7rxbq*uRU
zOzMyEhhoP6Q@dcMv{0wGk%7`}w;LWD>j5Ec$p5qZIvzsV-LN0DfgpP#GhV8~px5<&
zGe7AR5WB%bHURs<ivaB4x*y$IVyRRpi&i{f{BRFN$)^SCB0_ft>R%jCKBl%{aMl1Z
z0NiCLSD7@iMca9f5;Xp|uV;bh%{ww3dIayxx`OEST8)pR6UdPpSrgml2Bd9(imT(I
z!S%WIIgX3MbJ%L5#?ov$=zzl&`lcqmgyN~j$Wu;4(TL%n^N<n1e$3_iMgHz>_$&GQ
z<=#vu&2RFz67mfYCMihlivmsNUPWR_Dbl<>%VopqUn@>?R4hGKqog1druDERsEPe$
zYndCS_AVw9J%`tT8+5My9eZ(2V=taX6f2ho78q*cFg9JJfBA?uq0j)3F&zN@ZIO@u
z2t=yXGvqjrN~lmtmFhzYDOk4GsS}e(B~Ql&g+oWQi+F~{gA#2Y<7US~3JMI5`dNp%
zM|%%{^Tr?JSJ|?-92weRD=KSPmRA84DYk%h@R^{4G3?eovI<gf4XWW9TLWhEv0RN}
zSR)Ygz-$pZ_(&S_9S6c9Sjenc(TZ;*C{bQ-7XRK%J-l20db2X1m<S%KOd@BQ-C)!V
z>08jUBtYnhd;w-PFLVsRAtAsG9&h%tida6FAqD|CZf)qzy(;ecAb0>R!)mtj3*en7
zLhSw^lBa+9H<6q$6G|jEcedTpxcn=T+%);GL~;@rsFv?<A~_uXZz4JIUx?({p5~Xi
ztiOrmkGH9<{!Jv`_(dd72&5Alz|l8vWVe~u>cM7_N#UD=z`f$TIN8XG)xp|tx-RaY
zFBk<%Zlnx#8|h-5EZ=}yH;=odySE9@h-n8ZwA2uwC4O)ZXKf_YAAP~|=dHwb3nWO*
zfH_}VAgpkgRtkw~E<NcxAfFINF`>KYNVciPb_p-UxYVscX@SMiSsnp_VRH>ockPH{
zHfU(V%W+Q&(>n#?$I6`~m9n#SN&%k04)+jf;kg_#>p+V+&L>Z;@nfUq)e>Y`_A);b
znWdj#aV%64GR?aIf#Z4HCLm2JV9VHiQ~qU?;mDtLPIT<GdKc-+ssy!JeD0~OOYubC
zYNKLrKvUr1Taci?0lkkVSEgj<;-;&u;ie!rCA=T9_9TOvVPwhna?W#ER<h<KFM+2<
za#0Hw2~=|*p1nv2uSoMtF^?hwndid@74-Pn2Sj}hS|k-=B<6EkCuN0oLRXwa_W9rS
z^3UqYW?Z$n4ilbk)v8)y>z@bHEx&%!*j`kif#H%|<xZBuq@CmL0pAB&kCM!^qBm=S
z+oY;73o_=7S|_{--x(ZI*U>!GJs0|TMz`d$kzV#IO*9L%+Bni(z6Qy~V%RSXluP}#
zR08{ec5y!qHq7_Tu26z?yccaOt#x!@bj*U^eL%&sN1ZVB^(`1lPNeJYDA<(ZrXG?*
zC*jG7)xN*j<%*x6lfb{Q%bD-GtE0mKyWAJB%XvO_#xU|#OghiRI8OAnl3jzj^QaG;
z#|F8j3Jod)xkw0d<|v$HU2o*PlCGsCwo{wqc#|dfJJ6fx01mmQTdFt1U7#~$(CC&+
z(fcm)Lm3^n)dKsb>5tYA>9Q$&Z-4|S%XJGg3bm>g-mBNA(%n@6I@!A>p1)|*aOgLr
z=7U3j1(#MtvIsMF()Qq1-P7LY4gUC^HdA?(OB7GFt>~%|H^=ISGdtje7XSl?7#FG3
zTABxo%B;`uz`w@K=n~Ydr#g=$ntAH*eIimGB()c*jx`EQ40U`{DapMHEN|OO!hLXP
zr@SA_9)D=}h!InIkSZe&ZTmJ2nJWk~_-)$IOcv@HwKyuz>pABM{Kt8_k4-)b8OI5C
zv0zM!sxJE%S({y}sm#S+g>E((N7d(^5sS)5<;JlXNsx_i4`GPyyh-d%kL7e6r`^nV
zC(}(RuHZ3Q=K6X<R|cjoYD23oVYFt~I?3D-F0rQ^EzoIW@<V0+s<g5}J`lFSNc?zx
z=$z%pn#Qy`_W%ql=5NlqaGIKr@XtaxKcw%5%b+KUv9$EHWo?k}+H!&Nf?2GMlW>g*
z`^n)!bt#z8rHQDnj8>S)+93KR=j7dh_T_7QZKLEk7!$i}x5H``jyf_cz^QdpUzDbG
zx*ulXAp8oq`Y=`B?p;j?_=CpAeQp))i4$`j`Ss%2SaBp%R(%oG?f{aTByPw1r2&!r
zyN);^6=zo3PB)||U=ra$D8n$z(%atVHum6Q_haWajK=*m)I#CJc6}$oC+m?P^9F$8
zT=^KxZpVp`_z*TCOT#l(E*wHBf~dIONPVW6yunjk--e2fb_DDy4~8M>(_YAadp1Fh
zZU<XkI-jX?9G-<872fiRKiSH&BQDW$+i%GS3M_d(f&z{VD-oAj<7HC&M;nkErg3^n
z68p4z&ITzv)r>~}9tcp}DD5jNuEnPg8VTM$LlI-wHgOr8FV*+?$Zss`SKH7PP3JD#
zo8*rk;jHcnCr}NNz~x2z3wqQi(2`;gdYptAttB$GfKD%$Jl|Eg!i}klNlxbgDjc10
zX}fK=t|zOpr2jiG%dF5w_w9;z2|YSQ<S(umsFWWyph7$9pyP=G`^0f$Sn#xy+yA4=
zLI(k2Sf<U%ID!7eNg2E<+3H+VWF>vTDX`b09a**Cf<kVSvYhl2wW^G|kyPX{j@7s1
ziz*rL@*Ksu_j1jSdAg*`qxyn<^5`_xesRw~c?X6@6Ynj}(=@;Nb{uvj+DP&0iel&Z
z6I?k(El#c5AdRGJuVfenw;!=qeM1)pj(fjj((;OG5m>y{bhbyt)aBjA5(&=Nuj9oX
zLNBt{Md?s7U((4xSv|s6l`Af2iuFhDy&<@_Q<_)abGvKf$%?=l0<siJP}M2E?<0@j
zT%+py+V-4p4#S>lYuY%C^!69;9LYG>0|=Yy3$J*fRlGZm$6>f%HBkN0WwxegyCzN2
ze>$AXAJ2`+*AsY`s<c%$WwwFz&7!wgGF;Y053|c;JYR9Lv;VWs0LNBC$S!Rl3}3!S
z^)>^p$?h!s^pf8!XRT#l#45nSgwZMtwA&Ae6}^kJ6_t)X=UQpK$lz8bEN;Gou+4Vc
zJDxfGW}d&nLP-mHc;cGF=XyU~f0`VZ<j{p~&<b6**|!$@z{sUUa3(E>xQb`6C$^=w
z2D!IM_>r`F^dp}JhDmQ$_&#R#$owd~YoP8Jn|;$TIyK{}RC|$6S76U3^C&I0yjphj
zcsb?nB*)7fYHcMDhaa(ClO7YbqP6QP78{G+dqp5n9(kQD*39#U*8_@1c80$@%v@!V
zT?5#@dB1pT>+bvf#-)IL{{0ORgQelqq&nO8)?-n&P;h%z3Yb;Ic7c{mVag@cJb*>L
zkUS(53KtPbwl%hXlfi`RJkGs=5YMjlZCQ-J*Wde|Rw-4|06-}ZIrZS9`%q8m`(Q>F
z<s~x(8PF3p&~%`o5+KLN&<W^YJksPY9VLpv7Jw911US;jvu`0eO|?t0H5f~m#1gHk
z;^iI!E&q3}=yU0fsq^gq>fnfnQe$>YuR2zJhY}su%juUtpm^xT6JBA>Fx{0Jo6vK`
zhAfTEvhTHv6Htc{R%CQa-TJ?$MaNw@u)A;Z^~Z<Qy@U5}LEl-2uw9WuoDPB-`HOiT
zNSD)XFyop7Wu8-~5zzyss(KTBBKySIy?rnelh6_-xF%bTaHrZ|Csf@<;f`?cfAIjV
z)3z%7hn8dUirg{k%P*Upbkd(Kgu_Y9u0XWT`jMVPli#?#y5$i<T_c<8X<DGxHNJKd
z%Ap;KExeJnafae&n?f?aZppKq7bTy`P@1U~M{B1gK6*~-O3eN2*ydgy<Fst1E-|p4
zS?E(FhL#DSjXVzX8b>{Do|sG**Wgq-{y^wa=QCWx)to~$p81sna%V7WX_AbaPR0K@
z7A!t5wfuvqvlAdH@2_wHb-aD5;<cRQHm?t-zX|CfX6D?pwRF1H@IKOxC+Zq3^jjYi
zbhVJwZQ&5>p%|3%Z=dd*l-9mL5!4!7t)}Lmvu^zPZ_=>3PVSi%z)hjyv1CR*p;jNg
zW_fV<Xs#($lP&_|*T+?jgKg%~@2|x6mWFn@(ETN81XhG^)ZN;Qqh>Xprk<E0iE-I%
zZBZz#GHpkmg@}A~dFV@dz=e)Apz5@&DP_i%%;;twr9?V0X-I6<c$<Cd3X<2|-8Z3}
zTXZ3SCEYUySm{!R>cT`uFU9QlZ#oz~ew^wypmy^1ti1K)z`L<D@TI}<ot|3Okhc?4
zap7ODv6d|lk2k*2N9~k3EDz7C@2ro3tGjQn&y6k_d!q4e+24`_d~_OfltyoY7*_}D
z8=)-ZAAD(dX+<9Ut_!TfY)uEYTrw+pvFWqa_g+n!C0C61kmQrKPRujfX??WfG+XGC
zL^8TXOWhiDvSulrU19;97}C$6dGRgeexP^!|I*Vl^dh2PI{pMIgD*NOtTucJs6)!l
zh~}x5&`S4!%Y7P<o9P#c=07v?<+Nz(sk!>5!e?C>mu7A^5};R??e}#!yYG2P!ig_Q
zTs6WfFvy~bQVAPT3%fU$8^(kic<3{6minG^#7LU)IFS3vbdzP>I7eh~<n;JyVTiO9
zVtL2zY&FDi)i9D)Qqu#}b0O(`K*`Yi?TKQP;7iQ1UYKIM56aY*-%%uS&OaA5w&Sq|
zujpMLs;uBr#QSY#icu*L<8%QrfYs$`!Mjwns4l~U2f-z}4-CR<guC?(qPy&6KJNBR
zbhT88aiu$;x@gssbzx}M2eM9xjB`ji%bR5s&#nw+G>Z1TsR#Pa+{=@OaVx1FL{CR?
zm}OE63n4V+?yq!yB{Ke2SXyH>Zwgcsn?N*^as+}Zw*>cC#Bk2f%R+|8f7L~yqm%~E
zC*gseC|);~2&N;|E7JE^dJsh%<JW>CKy#g|)Z<y}VP5lHEqbdm2Y@>OgbE3_gXyXk
zFLTkjFH(BtpbFoSPnr~)buY_-OGsT<JgRE=#UrZxJ#)9j<<Qi`%Z#5>FX%Z^n{L~8
z7@~xdN0>t6lWPPMW?oI4WS2|p3s*BHSSP$NdxJ%PURnsvR5THe7e8guCwRPDJf@tL
zL=BOx+9+oNT=0P!gTB5e))tHbDaQT%K0eY;zBpWnX$>#9X+EQ@NNs6V!|lk0;sx6@
zp2mC`8v^#P@J?hlpcV9u6H9d>bLTj+tusG&J+Ztphi64Bf4Ro0>q$xXVcUc?spjXh
z2^H$eAMCZk=KJ!eg8U>L)``scF%3a>E}_`+tDUtvhS$D4zY-Fe&+Os09V(1tT>1N}
zSw0#UXkH7Q@2LD`lqP}P@^~MRMDq@Tw$zYI>3RUNnix}<19t1-!E*PCWd+B04HbDX
zM4P{Bwvx_Ig}1r%2<T=Nik{!7B?}H>vWr}hF`0*wjiNnr+2o+ryn}G^U9!&YjJ;!O
zSi)falp~u`tb_HhpfTOl>OesD#yU3WO=2aiI#;cAU-peuN+flW3EOhQ7QD;InX{&L
zdMA-)9mNm*-PNN$Mh;`juKsjR+z%HlrQOWP!XlfQs8X%(ri(vR`P7`tR0abJ)47ZH
zqbk;s2hYw}N*Y#?4JeJ|!qt=Qdq<LkBFT%xl$+ECg9-UtXQaRL#bgzmjr#Ri^|DI3
za;)M!J{$UZGt~J^jf2lpL3n=IS3cL*Tew;_AVz5tr2soXXRHPb<n~RfmFTb$Sctyk
zZ2BrX4p9RwFU+1x&r5+G&Ve%&VAoFnbUWPDahYC`jaji{MT(VH`mh(@IH3$bN2l1v
zkV@og04-(q$8p><f3#;%(a?YxHh0ptI2g1%_gdq`Dw{4P`Mqx^)rB~?<02;)ryJpK
ze7SL$_k7E&#`B{O?Gl+s<AyPD{e7OQo60F&_u7TizM^(vLP2<9%RJbK{hOc8kwN=9
zq^${=i|lRmo}K!PA|cn(2B%GnOBf0J^zsR_=8PX<*mOWP<8)~yu1?sj7JWGOxM>P|
zm~HE_)J$MhIK51wZAZ+LA7)oNI^54@ZSFO^W_C|gXXk1=>9e4adjKWKwQB?!ggn6J
z-=Z4N5XCS!V&TCgKnTYBMMpOfIb@<5fwkh$znpeZ!(-VUmF3^<d(7%xCXUBW*qVNz
z&%ym27f4YryR(q9_I%b^i#owoQC(4%-R)z89vpeaQin#aV<lNuU|j&RzK+xMO*Xe+
zXJtsQ3O@}7-(wE0JYdKXaTISTs+MdZryf70){YWh=lo`E3Y3UK6_NGio&bcJc37@O
z7fh(Wm6}5@>H0jFkUNFvW*nD4{Ys7awD6RtOe)UmR&FOy&%TOJMk&L6My(SCSYi1<
zW$2jA@j9blxHB>mefq)SB`=Q(dWPLl0o>Z9ftcFO-6(p|O9RnH;^_J6iVMI`%Q05$
zJuY=LDet!n$_J<n1Gw#_CB*w}W-3VJFiyDt!7~SjT=j*Ur#DKTLKeE_bK~|<rg^r8
zu@0z)0Moo-3(7P<`pq<_F-dG_je|1HqgZwNTGA{H<nZF+9G*OWGJ&4Hmn0cO$7~#K
z^tSY44)-#OuF1;y&ptB$ha$Z#AYhbtf5ggQQ~$Uu8pCH?BmmdM1V(}eyUggh24Tc`
zF`a>AvpIt_c_bC&(tALM6L72xb8RUl7iblbT;>Rh?k^&MsGxwkr~sEG=i{Dc-%wV?
z)qv1RlGuak{}%Vqa!-{1>f@opl!kaGyfSh-1~1(w8#{KNEE?;PQqXkch=AvioAnVR
zwF|ko(*#XOpyH?c5X{m>t{8QI&N-G3M=NsAbL2>(*0<E$;kRC3kKQNy%_QftI&B}p
zGb1u~Ptc0t0rC}FD|O8Yy;UFd8y0}f=()+Vxg-I*v4|}#oIjvF8|l3nbyRz&_A;Q(
z8442cE<B7zoFK+v^@7JTLVXrYh*$7KQpeEsKC8?l8h;Fd!DO<oa5{Io`$BPh=_3<M
zj#8M+1Lhs7)a*&UkSAO+hU39S3`RF-v1=S}<d-F4pr;Wuw?4xr1lC~o;6(W{@S^1P
zBH%0tkbzDeRco}a;jD-%m^c@e$rU<}6>Goz27GPy>#SgQ09|}M4yuyUIsHtHk!C=8
zTup4y8_e!8sb!N-qRaKZ0&ECGYrL<T^BUAAw80V%zCe(^%%Jl{oRzW0s6=QYTjOWd
zDHkD2-AGZDPW9{RPi?2)95tcx)bg)Kb9y#%<U9EcjCB#ybvOLF07TuecT_3aLL{4I
z$?xl85OXt_yU&wMOuuga#JI4u;=`=ysc)3<I5RBXXi*N3nNg*dHoiZ~32<Fc=5>`4
z#vxXD!j=9UJg@QrOTQ4|-iICStEcH*;SE=6aeG=v&xLg2hMDwQ?qxg!)MsYD9JGFJ
z@W>>;?XuES#}A(5HCVyXXVQT5cJ};eL=e=yV%kgZHin-<pIeVkU<)=_Sg*lxBkfDu
z+zmQ-70mX9=hL7M4I@N@>cF{#&3E}kqL`l*ID#8MaGxCn`oGxwA<FTaFOKUu-{8`D
zP>$#X#{2~sAxsxz4^w_5d<TlBHuq%+oqjRKfjxt8i&q+f;K3sDwBDmuX`(u%IgaK_
zX`6G)4O0NL-lE(mbR0<<<7%zQo;<F;Brw0W`^sstP`ycEQL|m`20B2v!mX(CA?i$o
zZ_e%yTHTI)5(LNrsju^h{2TTFe|vnT{6wg@%!?6Lo3%U|o0`xdq$l2KiLE0_|3dz~
z>%Gpbcvh49{g}*Lj{S81UZo?N-%DSd%ZyGwVe-n=bz3xoPS}N$EgE8k!)gkb+iA;r
z^^K2%9@93Mpf|v`agu6(wCiRG=2S7b5gYAFSM%Lucy3js4fSCwA~R@H{C+*o|9x_7
zeS@qkun>|ba$VUPYWo8ygJ86?9L9VMB32zaX>4$in^?G~z`;4ALkQZRI-#Av&wG3m
zsiBoHWId;4P)$>Ty5ULpL%<zRQ}PH38}K}Un=y1#YxZa~`ToGL5MgnYXx5lQV@$z?
zS}G2cCVaQZ#qbsVD_oO<E)vdG&LEDBt8m#3tWI@z6=nyD1=8iw0RW!4?;D?(#{iZ1
zk#B3=(%#>dv=i$Ck2z+wp3iUdg|wzf0x<xhl+CqX1jT<gBjht+cQ+4cAT>!IWXqr{
z-!cg7e}K3f%-Ey`F(8V9u2)PdA-2v`CYO<u6ty6GLK?G|H2~NKXA!=f>^CHK$byhi
zIenEc`3=OuKJLeJ$tO5m!z7&t>8o#hnCMDMA*);-6uSocjuDfVby1VstESiZaHmmG
zvL|&*9Nz#Lw{bDOE07M7r*b`Z*>S4tG;QRaTfM*$fUDBnuoo+StrHib-WgDShbZ^h
z^s``CQ|3<<<<SFZ>dyr;_)dC|?#m<o%N|**bu=G82lY_QY;V;y?Y=H3{(-|?)iGcx
z#2;g$!}yu}DpTXrP7KxWD^@Z|2vD&mRK<5njnZagr5aYczj_eQMPt|Hxur*9o=#%R
z^88TXVJtCkMztI0W=jQ}oSH!epasj#WE|ry^a)j-R4ZIi{)l3iU;YTkUAx&?2J~{D
z!=gG1<7z(@P+uu+6?1tSe)hui?nZ4Sm%sElf;<M3OEf57Rq3#}3*%GHIkU3gi#o;8
z$*bBcN7e6j-6*I5DU+NZV|99`_^iG+ABNiCGP;9x*ZCG+WV~eK8hG#s_m~!akHxVx
zcn$V=_f=d>ATH<I)P{bLr~?Abiw17kO)`Rp#}(4&UsittIFu^G4=W<4==P943Ur&Q
zv%NXZjWyf|*;^jwzy|T9rLIK)GKr!H3`vM#W%08fBjJ2Up7NVEp7Sr-c=3$t!`+Vd
zHUy9?%u>YC)1l;4atlvGG)2@vGZ|sVl+d5;^>U{>;4Uv2Zo0F1@s+%s?Sz5wL@lt9
zE8^L<4u?za0f}2S0Lp9Owmgf#$Gh=E#Y;zYSvOjYP(f5Rh(M<-;_2=h^E|mJ*yNad
z$y_mhqAdN*4s1}))8e&vsF$S;@`)y|9OE+~R+DDi(UTYYTL$I{5*aVq+}Q@BD$jXV
zd=6#m8ae|TP89sk@KinZEbOw;1`Hc2PlX_p=aQNWlC%b6xbk36>IIkSCbfTH;+)Oa
z^K9<@30i_?pe6X(Pb17sucl?jRo-UqK%5b=7bxjmWO~cnfVeZSd6MgI_5ImFCq~Mu
z@2(h+!-kwL-oexnRPC3!*xd5dL(J@^q?jMJPDmXP3Fw53=~*1%)>$uYNkE=r&SwT!
zeeta<6EZFX<G9cdpAl^jd`<yaqX?hQcqn<-y$;2tjW8z7%U0`WCZ`4DQ@pQFA;*lg
zQsZ)ENv2T(XIA{<hn*VDYwwwl`bWMTuTVICuA-N;HqBuA<X44JnF2Zqf>7mL7WGwA
z4aNHymue*Jbh0PsfS`lVFZjZ0C*l)Lr!r1R<R?w<qmCXxrpbde+J~g5$tpT$JKB~R
zFrK{G5)unP$YmaN)MV~)0aAHe<A;}7sKuS6hRG__VzjHa-`FE`=NYyeu~Tf)lYF_p
zZv)v@+>H{dxojg{X|5x;!JZ6W{hd};-+`~l-ge6C3SpJpDyz1R4BM-AH^WAKGhJgh
zky{K5+ck%zlEk~bGMBNs9<@Pa6PG`)J%0}244ra5WtD~Y*`qJlzVSuH*g!;>cukg_
zbN2wgC?Dzarrg72j#2xBmnLJIIlZtM#*&~fekN2l9?A`)p!%iQYZCE%Cn}`9mc^SD
zcIwdsM3Ei30BPE{$8FLNZB~~!Ewcf#+llTqqz3Swg1~In7hm(V;qZx46H13PO>fUd
z1gnG!9*)cBqfTR<t69z)xd;(uH}h+NB!O(Fw|adCtn#7m$CTv7qR~Xu80|3YZT=V!
z+>FV&phG}Ca)J$pAn$&m@y8<uLd5{n)`cZB=@|K&S3aD%OKUdG;*nVW_<`1YF^GE<
z(?qcN%on4^kKuVK8<%|-;#{?+eF{%bq#LFLQv?=#<;Pzr!rFXa?#)|ASuV;8@5a;8
zFQmw*kuqv5XOYlk=oP({=QEaz^T3qEGs2@PHg&C`US!;iqdD$krbSwLy4vm%SNwKv
zh17@JnoMSd8QHA7*PCZii|Nu;X8Py4%f7F}RwK^cFJ}3d9sg{nfERDor&T4<X`I#O
z?sVEt937^UQy-kT(D{sIier1net-8QWOTp0_o=$KXyOsYmxge4kfB`16?$ULbr=zG
zB!3d@bRM5K^5||3VE<vB7{T*4q#(A^abSiot5Ccz_OQ*N@fQc(+RX(|HATHMI{hk{
zmPkyy+d$m|PR~d9CR>GQ2i2~~kS!59`rSnCrZmUTksMSKe%M05a+BqFVH-x0cG0)g
zYTcCW7S%3`HL3I@1jB67yI-UF6H=nS91C7FP*^cg(KZrKi4lrnp$yM7q7G&3v$b`Q
z*QF61X-cY`7hjgFmFp_BQeXSfls7aIGdX7P-QOY8Hr}KPH;Z0%Lf6RTB?hffHE%;^
z?~+z!G!xfy_gcQJakNnH*89MI+I@o<S8BuHC_b5m!QLhfJBp7l#kG|88D_B^J~~+r
zJr|w1Dxf>!bY}CIG7KJNw=x?)pr{M(<F@9Z5skdCUFr-Z?{l&Z$q43Lj(#qw)tFgS
zP;eGsdBb^FQnPgbvGA%Jz%Z93Uq_H`*C^18y~Pq;@H6x#RplOLjTzF1)C`b_Qk&1z
z93E!H?SW1?A8!B@r-lTdp2NWX%vT)>G7@+QzoH3I?IJTY#sd!$Ey1hcP-1Pg)U5oh
zN=BgCK($L_UGP#oH3VMIp$QnG!KQ<TG9zZEmmOA1q2{Q_Fc`7Y=CphYf;UeDf@l*p
zurz!q5iJWSdU^hptSMHgWxfdWpGTpZ^a8|yM7}B*?K1Hk=FDGXoN(1T0KOEd6^cJo
z_V)xTfMqdi$CQ~lZ(#u_&oWhNhp1(N7lsZn)u};oJB}h^3=lAKR<NMWX3GHU0sl+s
zUO)%>s=_gs6J*d=9m6-W&V?@xNit(C1{{<ZN)&9$Hlrf**`Zn%`3eAfARz^qEm*Yv
z>9<J&#28D4nx62b;+SCkb`NC?3&>m2&%d<Dqm)y@?5^Nn5sY)E!^rQih(U%P=cIe>
zf!J{R#c)&S<qzk;4kv}f{if~7h5O7GhGN(Y&`xH&qIGGNQ-DF6z!;zV6?`l1a4CS}
zm`TNi?&4RotGR;YniKwr@XNdXkPMhk8f<T#Ya!xOLy|t9=f{PVut2o_v0}E=L!kKY
zv1$VVYPG7~?u-OWV6ggtj+XS)?-*bgjru<2fgbcLlotRuGlAC*oFg?%U+YkKn`5_#
zJa{{m_c07!3Leb+t?tV_e&Q@TtQ61s5PE-Z;-vew!RIpURT^Xw&Ys;q2-j-FO7YYv
zud($Of{oj8!2%ekghCc&?^zcsYx9<#z{8uqmA+TtEz-g8+X4V64IB}J%P-8D5Kuqf
z!*Ibg{&n#b-lniSuHQm%0i8UR5Z*NFnLqI}g#QD%MFiDh3H#KO4_$@-<?E9VsO(pV
z;j;dK6#n&(xzJ>0;<snaU(jrS{U=Z_8SrL=kkm)u4*%O1FrNWh0}50H{}=ln9lTkL
z5XPtf%ZnroDhD9iYpDE>sNUZ{?1l2H>^8g={@bmCpN~C+a+by73-ABe2NWT|1${Gg
zUb*{VWB->gZ*C}%j)*2o`5zg$zyIGeB=62uOrKxZBe09j>9iBa>A3aD6!3#hfm%cA
zs~SJfw*VA@u4VF%AC|x^=3>a?)i+8D=pGe@!i*jQ^Z_<6<OCCpj<HMz6H&yDPfI-K
z>ccpHjoJSjLmw&?aGKHgIkyMkQQD!}^#H*_2bQz8M7)Jyr^0)S#aGgCOlhwcp>+FD
ze75|H)gE9)NG;m|A_>@`)XzPV|M9VoLG4<$B~!AB2S61ERlIKpeW?xrFlz_OvXay`
z^xbOQrjNud=jz5qe*k|Dkk~+d8-M~d-`SJ_AOAmNmQRJiXL<F_Xw$+Dh?%B@fUt9N
z*|(U7fY;X&$Z~<=2F6Au@QI#xmlLO&z}bL~i9m%L*Hl^(t9$!jXth5scZ>mCsD-Mt
zax7uR4U_?1=e)mno5bV7qLdAVFgxD@>Q>Z{E}|^BVni8nBe5Bk$Ti)8ixjt`z^O)8
zXJKC?61P%2YwmbH_d3Rl0gi=}|HkRZfNmdu;sS+WK=pvE0!!K)SkL^!9*SYm<v=io
zO9p<J^9zcyyhcGP<E0uAW%*tTPm(K(GU>9P_r%btFVR9LU3|@ZUxV+D55%*Io*w-p
z>NQmc`9*OQq!eG7bQtutb@PA%0Tg`<<Z=nr9w>$@T6uz-u@W)sm7P%;`6OTT0+96I
zl>xwQg`7MP^)1zJYri_(ZhXi2CZiUZ^SGAEy>0j{4gL2$%_id2d}1M`|0otjBA#DV
zeQe|ZU#dPIs1u7|h~BV&2i=x-0On)71M&_KEMRRVuLW!?$@UPH0SXWkO4SFU`1H=O
zs;q1Xl%^d}ZajkfX@TxzplznUQOLV;Ec^3BcF6~WFJk&#7m#v>NTS?8S$UL%2`G8!
z+bv5NKlEAQtd7Wb32Yjk-W|m@2SAMmmn?LzKW_l?m4@%W8vtECw$^h37_{rH{otD{
z>adVul)EL)BySqDUN<l>f?xR{GzYkIG=LjQ$|S_nJN<4K`0I|+AJwl!O2QkT7~2l>
zo`jtRR0o`mfS#~Tk#1C?`b-dmiB8DK2@K6FfTXVEIRnUP=8(-X)aJx_6Ob`6p+e^$
zzWomj@~@9rZWjJVrEI2Ha!x#a+TnjF`*Z)I>>GqaW&egYsL1tLNpX8ht23|SD;vVd
zLmV#PH8{eG)yiH$J3yftTqMkIUax8;Lv3?l5oI~G(F;Sl%*USLRoRU#3+mKB$-lrM
zR(UCp8en@K=QCRX^k*vt#)zKkB_Qk<^w6$Drm67sS9XB%6{YkTidqw3Ty~m1!?1Yo
zR}}6FQ0B<)LR4(5+G{>Y826cceyBKSUv1b*dEmw~OOAGum^pA&R6bVr<H0rBgv+<=
zi)B=8OqYyh0HO{)9v`a0%Gg=H13f#_cU52CaQ;IK9(xAEz$T~Ap-)9*j3t7n{ge+i
z4SIsd)#7yRE;KJBq?rPQ3<j>0z7`LNr;L3-V&nQop15D3vX;pp<1^wRlOk*X1`;`K
zNYPs;34MGcXuBJ!VTm&;gv9g4HZe1r6XZI1i3QY5QmxM{o(qK{@m5*QvY(;O0%j#%
zXmPh9M`pq5)F87(72sE;-F$pv*NazwP;W}>!rVK8B*(Eib{BZ69aT7bf35&M@t5?_
z9~^yP8V8NIlw*`3#UPh^Acok6ZfvoINbuVCuo(0vy<cSCQ-mnujyP>k48#H!6usNs
zGmIGa?yVDuem3cy1JW|BQ%Kj2;JA0fUJF#r7V?&^Iq@q2Z8;@Y?}Q0@hRiRBYoU^5
zQ)WGY*lY`m`~;yl<OlK%Xgd^5TH-KmYdFWv-RH8Y-(|m5=t!w5TIuN?o0rzl4VgzW
zL^zGTkE8AIvEk8n?$FsCu$y_4WkB&yXPx&9DLCkQPT&o~tQ|fGA|qCS0}GPUy<Bk$
zD39={&P-GQ16UAf?kjt8XvTfi{_VvBl>ux?{6$u)cEEFXENM6j!U@|x8v5{2fgWzV
z{*AQ84N%6DZ(K?0ll}6;+bsg_dy6DT;T$+_yiaUl^`t~RviaZEf){n44k3CuvEJ1$
z;to7snB{XoH4o`y<T>tVoi8+CdwLRFM`1h6XzUF2G)a~}f4Oie6S*=^XHLiUlqjT4
zFDMH`kF&ilGq>Vq<!+~9C`P_<l@XIeT9fAUL&K0+@EFSA-)}iI7EDU5gIIEgV35u7
zD{1}X=!gIZG01fc>$ECOKgyEzPxuOo{d`TB@?t2N_a)Qv!P%_Mc^1a89xI)rwo>~C
zLAjKUixxPHi}H^N&o&9z04T3B*dIKBSf&gl#Mh4G^p>9xe6o6qfU^-Rx`g^y3xFkT
z%4HhqoFnavJ$Qg|#CrR6IM>4?%z^*vTew>fRDQZ|-M<Ww=L3+LG}`p?>j2I#Z+_T5
z^MglOEsg_1hV}P>DZ|eErv&;?bg*p&W1-@MWh2l%F;dunc1dkTE6gl0izHVI^h{!#
z>ih;?oC)8IWDI3S6MT<fVgcBr|5(9LI59|rSdxAy+K=C^aWu;}dLKh6M>vQ=@qE}r
zI6ovYP-Uu(?s*(VrCdbnHBMF{O6ZE<@i5i3dga#JcwWy!B1}SvSPaET3H=o2avPu^
z(*`=2D?gPugb+LBoNcv~+x*TCUlo$X@l<h`c(yxb*cK|+b7dbEBa0MYd$}D(#K?m1
zG)iUkTW9c(WXu~2>p;_2;+dWhmN-Y;(&$AlUQnETfTON*P6=<GP8*l8+Du4s4LV#-
z6GOfQ9Az&+iI((i1fA3sM$Eo&Rf@^-oTqQoCd8prTkJzbRCLEK0d2;Mr42-pg$_pE
zI(?NvGPVw3+zEd+yksk&*nL{%@RV_aba3?X*IZQhR1lW<Ld+%!&x~IEGsnja89oOy
zM&!kPK-v7#{@#lX_*iHt2L{WgIvQxNF5yt8xSz#uu_cqc6K13`mo2wEa~XFO9=DIe
zg*InsIPF2U&;{F|l++E&cXwgnvPJauqV)meBfz0J`yh!C6F2M_D0|grT%iL@#93U{
z$U_>KR`RzS(i^{$Uav&S27UEGB8VmKR(Sg`Bz@-5%1}MV2BL$zx<4Ii{68{VQm>#i
zj3)HwSWrVKyD)6`iM;*vrEeUe3Jh6%R^Z^=svMVkx6=J_cuCvljPgM}@C^qiUAwU&
zQ0&ZRa%W}J6+y&6b<p>l<P;GWs-7Sn6Sl#K&_CJurh{+!#tCX;zilz_V&&DtkXJz^
zS|i(9wGpG@V9sFI8~@L&9VMT8vG}=&j*{_LSMg9kG|Fs#HC@D15UlTzY21LCaRXy?
z5AIs<uov&eYd0`ncnr%FXHb#Xprz&0P;gWxLeGviI@uL{xm<B8lf)g!;{JU9z5F+K
zJ<veKqWNc4B}W0M&|GFas5dV%r~fr4W$a)1W@7t><GRoqJb|X6zKn^`0S%p^Y$P0a
z8n0ZmZ)0_>0vBL{9}MKP^1gQ3fBJxyn6D4tWpW5k{>M-X3yAdh=?d<7e}$1GuVLp_
z4VIsG7tqyM1Qx3=wvzs(Ec{0?RV5BBt~ZOo_Te)_C*)i_p5<X0&foJRu$F7g8~kE5
zyC!)K1mmu6fZ&K%QyHGIr-)TRb~#i&ZlfH={5wlx+Xm1GP}<)gSJf?|;~+I^D@TbV
zDk#x?UXsJ5^6MHFUKawOtJwh)+CQAh5dz{+@)Z`1NQ!cuVNaymh(4OkI3@E<-safx
zhX*+MI^SyTO;S>bpF*n&r~{**H(JigbV+ouZnB<8?$*iB^tgo)KM9LrKuaIYZH0$B
zmB|R?(+7DalA6W$58Ra7vYo^e=NGg~1`=LB>HJ5M(_7$Pq<Su;xIUF7iKDAgNMJs@
zt0U50$Tfc9|CTr9=fZrWfVg~8F!4eg53XOl{t8zE_OPiyXxwWu>gTj;&x6~$P9Hv{
zg4k;XuKPZqhQuy0qhP*iP^~DoV@h^4i0SGMl#g0GdK?qm9>7S}jwEvM5jl_t-dAvb
z=^(DY`HfS3Jw(;n5&RlI@hm~m2Ca(m;)UY;y-UbBgW^QPU;>Bi?p*z3zi|UN;`zvj
zNKm6-?*!y2Jz!Jhzp)rW2}sgOz&O?B&PhyjikJMW`b&E4I;Bgvw`WH+vV9jNBjjwV
zz3W;u{_>9|y_)^6UhX$U1wXZ_<(J#$PZ_z->T?eknbhmA-vY5o5AZXnqDW(Y=4=DX
zpZNs-6-f)FY+UT_io{XHT59SZkUkz!iM6U50h%~TF}EOmWDcCK#Cu-lcuKtFVLl44
zN>E9474dk#?<p4DJfOWTt`ldg=QDxUo5E@m7U^FWipMG<ox;bP5eL5k!dLW18<PvT
zFh|FD^0*cI;!l0KQx<wa#FX6!0<`5v*X#$NU7ex;{>8-V2t0U9fqO82{U(+uu!pR@
zgibU47^<5+iZ0+$O)tpo$*2?`>$dsWA_t14Pj-|^xHklXUnBK5$$e>YtN`BCPl_xP
z@nA)ZBpY`7B=1o0SHvFjQi`|8B-^E|%qO0eO^@1z0G+t%iiXszi`u(OG>?lmlI$W{
zUZWwNV<{6-t@lMNncj5JH3(S>q#l`|Hx>zNyGhg;WZiTJtdf~Z>3J~4mw2CTS1f+7
zx}bp8s&-7ev#LrFOiy>!ZMYZgv&KZ^wcw5r{HMbRGqr*}Qc#g;mw~l!`PwI(d$N6c
zh=03P=$&KHM@+NPfAf#J>BS)AloL-Kg>PR8pm=5sNs~yWHbor(o}|8zrN%~&VrKDG
z>S;lPP6^WClpFqLoeE>=+~m}|-FBCGHiiPTlvGGFV8j6Wjr{zZ!$+!@CXt-tq*^0@
ztJ|pBbDX29>#i&v&(e{iJ#4fjQTlH!fQAC3A|kO*q3mpIF0(QAohKU6SK?fg!}qa#
z9eLGgf8Ucf960w*U?;L+4C}3|tc=+Yly`O+r419cVl@{L)&92ReyzYVoYXE0)C5x?
z7WsT*{nn|ayZ8z^*PuXtL?s^4MWSb4M9m)<OFe}|iSp>u_2pIHm8<JjEVb=Czm8d)
z!9lnuZLy#e)>}seZ6kg#y3&c8`|y0(oZ#o>DkS&w*uZ0lq!(rii)|hExl|?09*wgt
zlGwcga&Bomw{5H|SFjXgA~e2#-3Y(03JdIpg0+iW7GO@KG9sz)UVS<e-ju^;eut2Z
z)RY&+6r`?$+v?lT$pzInd0nq}ztlg;Sru>T-b+fTmK#DPV5f^-^%R%{I)sTD#<ITj
zCKF{o-9<%3RI`^`B%Wg=m}nLI4;X4%YIiN;?`|d3%UrjMzOZzjp4y0p$<C<z!QcW&
znrGR4#r2SA2fve+s@=?x5La{RP3>!tej4*)3|t;f8H7(47)!0&7so|su&~UsmdI<v
z<)nDjO=q4%X05(t4ImAC2UujYV1~w%kxCciKevzo=_M>1w3I}&N@TN8ISSAAC-x2@
zpy7Q8b(AGagMYBP-hbz%IsC(Bx}5U#*pY^u-!`@-io$iaTpKTVlqBTjzTbsxr(-k)
ze@0A7E|QgK=cj1mh*}Q~qUyvQ)|7I){v$?BqNC~L9sVE+k0V(~dQehj34CN551sUx
zu4z(Mqv2fY8A}(HF=&-bj?2%e4GO8sO&4OH;Gbz7mgwfn)0DvA;Y<^=>j4x9*Q``$
zf0ZkavxCD0MzqUcqrtD>AeP$#94UZrEoJ4Mva%Hlri2%5|J29J*4MSWsbzY%j`s<~
zP&H+$Q)H(tVN?|--KtF{XwH;xTJh`7lueV`5M?@wrno;ta3G2Dy9;ANI>C*IE5WFn
z@fIUcwU|QQ;Z+z*r(w2@ACAVZ4P<|=W&suiTyxU+U6G=P0;E*1&;)tZ9Gb9Y4qTf&
zVbn>CaRj0v*#rL&etCtwS}cZ4MZ;iW;Ga{F^7?_?duePQP(ilPzI^OGke<RAjPiGj
zPN%N7iS7T-cSe9mZcw?Gq)p}}FoaceI-xm=z5&H-%rwNi+@4MjWS(Cgr~KrxO8(C{
zy1@ktuj4ACX-9!fD)$s-vx?hnBy()ur_<^ds2qNNPL6$~$AI<cd4YT)1w&js&N|Ds
zn~P6CFuDdDc2}TFY-1C4wFc1xXj~L{-cEWdgU?kFFq<4B=ij!WZ2|9IpD#PWz0@h@
zDfWNFIEbIX0B9D}ugC#OmT}gwfDGhEoPhgv4)5#|^b*C}{LvzQZl}t6+`~W4f@dYa
zK6s*$<*J6g$8(w`Iw5rO-w^fIALD@k@eW(=Ki(=|#7j(T`~@3Q(0^?HUlCvxL;wv2
zv$Ov^DS!PnuxwbYGXMM;PWbPK(!Z|D+YSX2xY@qA7X0VG&VLAAolSbc`#(O(Usoeg
z3%}J{MgFfh7Z^b}vtB4hZ#YkO#yuSv$ah>3Q&v=rI%!M4`*kt@`;8J4ye_N~dm!{>
zhFRg!9i8$4302riy;OjWPJ4?dcjj<ex!Iybxl1a!@T#(kt1sf$zy05(EawiELqW*^
z*Bj{e^1kN{Jg#%kf1Yui&f9oo@Lg=mDz_DD>-6|}9Ywmo8^Etx&Mz1X+hYmE(Jj2%
zyfmPal5<3xp6D-^fAaKyUlsa03LUV<ml7i2em$gTB6>dulgzivjm+1rzT`oOUbFT1
zN@a;!w8b`Mes&C>m{<;14ZXc`X?q}MGo?+yo;wGcsx!CU<oQk=_p=;Q&svr`=9`Nk
zLREE^%j2pAcd=UO8eJj!YOp?iw0~Q!atP2{O8v<7`d7sG(jvoaye&c`6|GeAb6om!
z=9SjFG;^+72Sz!K$*B!``&;Tpb)*oY?OVmsY!_j467I~iZ7XZf9|%=HH*=J8(7TD7
zPF1R8e-dyS_?f5hFI3SS{SjFqsrS4miKlW2q5B3?6b40tc=L`LW?-hSw*i5O=dD1`
za%m>I6&zAN1FxTjq^m1M8*DdoPql`_fl5#Ftc@y*+t)jymr5xyPfoq}wEH+qZFyei
zVgK!}FdGGo?zTejIH$;<KNpZfnD<yhsk)wBkO_@7&z}Z3tStj>CcIB|b#+SXP44-z
zi~_VUS}1QPOg2NeW{kV8PPc48FVlRi4W%3NUW(S2LE4ENPtWlzpA4*sswntCR}PL@
z_4&$AQAUwBFovp0Sxx0hSwDB1^FCg|XqWG&h4UBBbcP;RfA##|75M9Syo%6hW~mx>
zAMCr_nilKZsB5+>lN<UvD&q#FdIeV;dIg(}l%<4wxs6iU9NR4$VWTp~COK<eHnZ90
zkW+~PlZ1<9w#nQljr@3=sroBoY#Xu7CqesD<-&~xU2oAO3{;Ahq#>LzQ9EfU6gI=B
zlLb#JOcFZOx7}v?7z-R-&-3~EVhch|64T}<+wk3XzKP<Uvt@O`|LqA5SHQR4cFa_S
z#b^B5r_TWKo7?ocpsG0B32qLxlSA$yH*UQU5}!18A&E%Ut~XM@<;*q{(E(2`Tte{+
zx1Pvh!Aa^Np3w(&y-tWgq2sDOUwJETZ$A6>wDjKf=~O}Si;Je~k@v+4YaKRj<VB{c
zii$H0V=m2mJ=?Mwj`y5fHok3Dg!5+4B)g4hHpn9BO?Jy6)tNA&{5@W!u7hy0>tfaW
z5W1zxFijg5VY3_}s+!U3&>Od&m&mZOv6(jzLt11s;b2n~jCaOWXN4O#y^q)O45(!K
zyPb`A_)Ws_)BT@Bym*CVfz(%1&~!3QawFir*cYW!=942O-WNSzyvwcjnU?5$Za@3+
zW&2E3e#aKu%cW0>W8GioC895{FV}Oj<NpY}e?2_B1PbBr1j82~>3iDTg)cYEpHF&r
zV^($bJ2dy=78F+w+?V^jYBidZ&{J5Vc^X%IyVo#EJF1K1JPZ523JZ09m%uu;qYT5-
zW->LY`7DDZ>j~O0uM@${jKZ;Tddi8zc%u!a_|W8eipLDy(&Im#kUuhbLQm0p^r2;q
z_`?VRlc~yiW>mp_7`RtOcb~_j9CG(46X=P~=nD_go?=#iu`n^oF`qE#`8?(^=V;8b
zd`dBQea3s6N-v(kQ1{G$3-6CL-ScGehenSzzj-(aW$hYz;dd@~khzNEZu=pc%j}Zu
zTJ`a*l$nGwU3=xb2#<@HBz(uAF(&WXxtGrQXJ`Zs%2|_UQ~qzKugi0?zF5>JJ!EQJ
z%1#(#p6m15n6Iuqn+4*4#bH_=wO=}uCv{cn=M07rEt4((p5u7KgM}b1XCSW(eojgQ
zd(@KqxUS30{yZ*sq)VCjDdo>FI&J(dCUhe1OvDX;o9VpE5y{=@md$1cokEB+Bkx)-
zEnMFR^$1y;$%(x*G{btIZ0Ww*OM~VEgi}8T<$|BP8l?hN-yKy49iRO@dXq+=!ua8f
ziM*7TgvX*Rwdi|NB_6JRX?@;o1xXwt=ME6bM~6YF<16Z7ViQ#5+G>iT%EM;2QDIZj
zV~G{%s^8B?)J>bnQ*`Iro7SG-ENDE9EAOCSt+ps4ev+zko{T0j1Rzuhaz>Y8x1=RZ
zd1+~ajt&?crHV+w9btcD=-_1AIGkaf`IsS)nhuMEK`57?S}3;<L=_9n;UKKm<7}U(
zJI*ICG`L0hcbkM|{V#jo31fsL;ZT%HojVX27SXi9x!sg?H6$L^;cSptH^`%%of=jJ
z<xoxj+;h&CHQ(FtO%$pfd3c*z9J9A8dHH6Lwf@Vt9xq)>{gsw;sGO_7`uRTbtTJrn
zEiCcMYQF2}xGJsVcF}w~If+NEIRSxoN4L=5kAAVZd%QZ>6M9NG-kiu{dIbi%8mBXf
zTwCMJ24k9S{snVaKLi8?RTbA|c-Pj5ZzD2Jv8i&|>O@y`hhEAyULUjpBUNJRJQL)d
z;l?B+M+`>2Fw-&Mk}1A_>czP6+*@K&Ew}!pR-c+iHODnzO#;i5?Y3ZB2;RKHYUso%
znguRfO8)vKcVb6M3LEYCtYalQ?s}B@X_elHw{${3Madt_9R*sKwGu>Zk;^f7-*R(a
z?P1I2^43l2-hO7Ax6WEm36b8EsN!B!&T-}QNQJZ2ZvNSre~K_`n1tczC3CmCF)y&(
zfm5I|M_%2G-tUJ}T6-p`V}w9~%iog!{)E%Y)S|-flX*<agyy=?ZNNjbz5%HWMz7iP
zVBHtN<xP7dU*(SvikonB;;Jl%TXm$R&411}<~P=(AEnZX-`UV+X@Ttv0vbvLMWbs%
zXV__W+uh{w7n^)jX?J(?&nSOv8^Z@++c;gzyTVIQy%j@>s%|<8+8UP4+#eC7#MX2C
z@WI@%G*6!YBzHB0RjIG6>p2R=CFtlj8lLY@r8#Y-?q1hXFMrHQR;ZejP2xbZ71$TO
zYs_KIbT-JiLpk%>990fR@2`ct{(Sl_)ZZ&ac4nB!>jm3Zi7V%<c<)Ks`{MGQIE#|1
z;CG`~CojG#R+NgL=c7mM9QtAu>kt`7k6k7`FC6-zT&GhshX==4Pgm8QocnE6r)iUF
z{Cg7DQJViwM_TrR`^v*;*Lxr264EjMc8+-ogn;zFGnYB{KCCU~REBGHQ=aQid>Pv<
z@6FbUP~_Eyj;~b|bpaJ`yMTN5;WO9Mt*p<h-jXIsSt1*rkrc@1KbqXl{n0!3)T^c3
zFJ>3=b><q)Ft!)J^YQLw-9(L@C)$s7?SqNgTJs8W<<@b#%)ZUsXYMaj9>FCcD>f7B
z-Y91Yb^|GwA@QiU8ae*0$WdD|oPv3K_OiXs8@8sJ^>FPYv*SO7-%qbO&A~Ouo<r!b
z2zRZ}2=sbaAiN^FiqG{dSG2av3I86|Nt;08wmi!|hUy)Jd}75r(<KW@JR9NP-782T
zTR5v7%H!isRno-Sr5Ady9y%~!9>)_PcDI?+DjDhT*w}2HG;_ETwdI8_Hr_qd=!hYv
z(175yk(%EeYu<=TtCz*G-^ei@JZ5<9PZn|G?1*`XtC)ISb$-tk6D|uHL3M!i?}HT&
zt@FXXm*$^TNA6fs`@V#xPFFL!fqR>;^UsPvuj^$a)x8IQ>U7$>nQ9^AT>J3+J9gz|
z;bj%12KJAtNni*&fIVpB*o~76K3_%vlI;Pi-WFny)q5XXrWb9O8$8X|`t5ci-CvWE
z8r-c(B+c>0N!NNexkUX;+xOODamsI=+$`V@uz(nIob21EoOVXcWvo*mH)vu+e<NNZ
z+#2I88K{<$)kLX7AK$({w{S9y-R3v&%SW5Oo-MS|*=5#|CjN7y_u!Boe7<210kS%>
z!g#u6sxiqQ%fGWB=5k@;JvdpsyUnvQr1ey@*^Zbu8NM@|o39h4q~V9bHcjU}wl6pv
zvGPGGsKWmW&(zVDD9Lg=F)CZ&&7z<6^yfwR#P@IFn>{lN9CtPq#6nXt^`{ZIj+446
zz4>2??eSX4@;ScT;b=w7h*k$W-O81{8Xe@nd3q{P-l^Yqe-3r~kMf~iiZs7j+Q3Oo
zDF+J{i^*L&{Bj!e&AQL}fT9l^B#oZ;REez>#{sXxF6&tQM_<$J?$~I%kCW?nO`FFr
z_TCJi<x{4d6~@Gb?Qv#(ap>y)Xg0YIGv`_-kD+*Y7el}0<-r-VN%`D0`UlKTOJQEm
z=Pr_=n((ph-Ci7e?JK_H`R$SddeHfNKI+u**jRaAd8<q^c0XgEJn2fen9*lMrP_F=
z;>4*cDDP|Cr1fwtzf*0hnajH*aki@Jh@RCy5*E@x*v3dTvn@hd7!;?S@vmW8d4jQK
zz-274qq<l(<w>RZ<NOj?=^5->S?-?Xfw;X45)H-fHft7kr4M+OCXd-6@cwRVAosA@
z8QrsL1n6?}D&hpkDB|ZW=rmzt%jch&)pzHb4iDP$&+-d)=xiVrgRVm1>sM>TY3BZq
z9il(bk7gTvA^}BtUW2>mrKCA`N}%`EQ#<_;Qqht*rfqS&%~}fxjO4xP#{uZ|^u2$6
z8j>KzZ<rxJtm3jCe|}nwUJ6f%Lg)4>)~wXyiDj(zZB@y11y>IBhE9c#LkX8Eev@G!
z{`HhWvGB@=N2$C*ujJ_{hciod%Pc1IWp1<lY1y`az{pHBsGq)EeKh%|O8ne!h~qSg
z>8U}qEw`yTY)kzUx^A_-A<ja7j<To|Cr3)p+O+dIRi)CC5*3@tyGgC(8A_Kw53$F&
z@qm|qKD-XRBq&cXbDh4!Pi&{a?DH+7npAaekeVr;M|IsIWPaIZ7riIkYouw@>@jnz
zD;h4dEzKc-<m@+?yt{yKjDS3xjF4IuL``Z?M9lYYn1EQoBS<&FXlGp^g4c20#eDc^
zbv~QF)=Y8GUsv2+x_9FEg(X)-QL5_n-~$LQq6>0dCiYA2Fqs4XKLU3;0)S;~jHeKn
z1X_2#Q)yD>*Q0)B<odRsc0GlpQe)x<jj{356Nl(lc<qYF)vAcI{@~!&7%zCnZG3zs
zcLRfIpJIj3A)rN`ZC){d{<e4w*5cKc#hv;zUQ~8S!;`4wgqIwT9f)psDmpBK9RFyW
zym=<D$mSOsdGh>pbeU@pg);!37rl_y>H+@jxbjr&mmUtCooBpG6g1p0GpyW_)k-?v
zG1csj7lSVwllDe=b-%!)lE}?#5Y5fjJ*EYjtYg$uHI2^Dg4b+Q)PLM33c`2!QmnlQ
zY4Sy^cX=)I#R;cskv`755>^Vt-^cNpoDB0Zu_<v<c_n4EhS8xVH{Y0$eViwP6@@;n
zy9EC`i&%6^iJ4-5Qgsp^TP595n8jN#!FMztGk6nTbL(XD`RAV{cnm1PlgE<-dGJw^
z!=_5pQtO&s4m-@YLxFQ`<z;3+PnJ)|zEn!`r!C_m>fQM0oSg?RK_*!;XBP}nK7Z|L
z(2Z%sV~B^XepfW%La<juUL;e(ci<J<4C%*>zRKw%nkuI((uy>`6GURSsB?|o)2Zx!
z*+h@QuXQFgmA~)kITFLheUC0rx|#U;|F!p>VNGpaw~E*RiAPaHsnWZGfPfGXl-_$6
zY0^YMLMN1?s3^S%Qj|`BP^5(t@SyZ2EkJ0}2~7h510>vqp7;HZe)s;o|BjD8B!=w0
zSJql{%`xU2qf6AT<fOFXuO~vDC~$}Po9~kZD4c7<@+wKYcDxCw2U$VSFs%Idutv~3
zGD{yUlAT-Gm6(y6FF~krUp1+{@%e)wyJ`2DBhAcLzQtiR&xQO(8WyC}!v--MMq9mB
zputwqXx#8|D&zX>($!^^hvekHP6siS_@jgFZ`ym+?0uKbZz?_JCTx<q`=I%$XOB*6
z2fgO4BiBAS&>1NGa)%z&gY!bSPgAOLvo^3}>%#KFy`mROCk<TNCP?$0R)*U)^-s+p
z{ijjSUtEl98hLNiiXcxYaMBDOEp6G}elc7yl4j)Tf6<wKem8t8nnHMKve-59mH0}=
z#0~~p4PT5=(tjnp$l--^ezXX}Mi|RM&E=)}>;p{Zu_r1}iTP)jx5Ch}VsJEW_XDIK
zQmXEGaLqFp*B>(VwhY;+e+Kp&IH%2Z)UQ7Abjn1{arF&l6Wnp{S~%^h5J_i!%BWcN
z3bs;XC&jf7(yyW_1wchVzv(9ZK7$rt7mR#zcAk@KZ%rk3;HW%Cr&qR6HWv<?l5&cd
zQps>*8rEjQHXa04e%OK$nEIB0!tXqXe^qYo-S$RRj!<NRwnN4*8;_jbAsX9~*5Gus
z&O1A}Vx^30=V`o&_vbr~oLD$Mu7Y^8SK003eAtN`f&gQki%DuBBjVxESo*t19mY$(
zGWAVoTghV^by}5?SCXpc&JSx%K#EMVE+-0WnF)zz^fmOW{BpfRurIn1Os4&AWA>}y
znzqetF^KB*qkgti=u|VSy8Cxb-7rUYkBWKia(`4&MrH)7-7DnY3N38eeu<T=c39LL
z>iz6&jw}h$%eB?7_MSpB_`+JWWoVsZ{bI3R4wrvDmK_OErM}kUoMZMjd>x8;KEJ^v
zIrCXbL2W+}H(0?OxQ4Ny_nT0?xVpNkFMNh6MSMM%rbK&SB!Am=$}$!qhWkSAwBk{D
zcim!f?6#IuZ$qu1Dv^B)y-WR6H;2BF>?PJ#_ALM@{Jxe3etr5c>TZGG&n)0^^W=~|
zs_AdCZc->6mmL(wZ>_)OXkhQQ$>wMp$Zk!giJR<)94Uw2l~LST=dY;Fzcr3)v-V9*
z3VLfq`JvsMV6{wLt4fqnOF~p{=`E@yoLHW0QYgxZqp!_ya_lW$!4$fl=c4PNb85I=
zQh#nY08;s3{Wc5p&9}SKPpP6m$zzO+5}5*4cN%-k<L~_MZ{sjJ3b7?F3bI0qM@zyA
zT>ZL~;jygCb-Q_lwOw8u*O|Rf!d<pKm{)!-M$%uUKoM{1%E)nYHe6~#=_ETz<thKA
z%kshfjTWN9kO3zV(fI&ZzMzt!!F0lDhVs9>s2YeL&m)G*?oHSGMviBh_*c8Q3tb`i
zDI7mDRp!@pWO2JqKz;gSWaM9lwt~MUL&g9qd38rrUyCz4X_>#adSz){ffkFcG(36s
z0MQDo3`3q{?2vese#@j`pd<tL-q~mj(Y=`dMx2G9F3j|3eNgMA+oP9_CZdk9y69cS
zvIFEqodIy%QYtHGa^0%fF2Xd+KE`ck?L7M<q}9ADRN;yQVwX&c7*(9E#cm~0<<g@p
z^4AV=iHhY{yO(iEiCOwqGW(ydJEYBb+Fkr@t{tVsUjQf6l1uXu=X~7eRmH||Yf?9C
z-7sNN*NDInZ>Vp*0fU=1X8Z8&1K5E4EW$NfSu$XZ8r-ST;#Mz`d%H7A3~}KMViSJO
zPMabvD`5R<b#uFB=2bdGFrVKU^je7>rctGs4m|dhVM)fPbkirBzG@{Rn5}wzMu3GY
zjyc6Tp6B<SMv$TbF)t_C(?P9V`Z$g@J%yveV(%#<)@#-qo%PhT;kME(a?PmvWXVc~
zzmQFu>BK-8<MIue)DPjqT8(xuF@oOXI*}GiXvmnTM}MaH((c7#>$$e3>(Q8{lVhkB
zYaxTE@l4vtgx@0eS>;1unNr+Tw=lr?q~?{?>E)833%lC1%aZ};mf|=*WGA}M-Y!`Y
z{yrH7$+c4{8Su!}>P;=6HEpo0UXfb`$da(bg{!0VYYeAtY_CYpjp#O@4b4^j+SUP%
zh`DM&|DmR-=S<vhoItQ#0>GV$%TGjgeo=M9w7@XW;y;MR%VlKi?6Tedlx<m?RFJl7
z2}_nSOI$uJwKQb+YG=2H!6CnSw_r;}V`wtn-L)Dd^GiizR7~I9+=c!x+)w5q<(j~O
zZCAUe$lPGDc1CSuFyL6#_AU!<dO=QFL904|yopZ5cctHOO+kEyD2gn<`qJD`Ce<Af
zYw6hQQ)Re_8)CSC&G$EwFq1)wQtA7)Os-*vV6Geouwtu>5$E!bH!;n>M!5TI)0Zz^
zyLch%oQ%=i=*gJ7HY2@3_FU9{6ZnNUh~aAc=HR2?n%$8YnR8Qpvz^T7eGj+1G$tm*
zjvV|!O#p${R;$8+&FM3eUEyTS!-c&JKR#C_>|<8<ymgB-v7teqi1{yOy)>7zHT&ep
zwu7xtmp|lYqlWyGmig}2{6-;vNM>u#!oOI`9&bCO1Py-m8{!o#cL~I{r#hMH*%VK<
zBVFods0^mY&GjvYp<CKxyXS5cEz{n}5#*We;?7go=~he3|IBa1CA7q+Ug5x?l^AS6
zzzwObc?ztIm&&^iD0ngs$~0IFK-{rX*0&LZ513rWlT-E{+)t8)Ku79~1TpePM5=px
z09GeS-NWj|QpQUUBMscf!O_UK>k{EGhiDxv2mv_Xdb9IzK9kh<df2LQr72uzbGsUy
zQ&nvIj^lya?O#q40I*bU8dBlQ7a*RZ6V%q<Xan(niNw;c<x#D-nUgg3{=ysb$j9dw
zu-@0Db7dkvN5Ce8NT2>Rt>CfAGV^l76-(_rfDUIxL=A-~GL+9ABQ@2EuY`D)KhC1P
zttvv2vh#_`)yb-_&nAPjg=VGKs7U2z6;{7N78ZB8mOJ}5W@+pWIy$BaC&W}gNy>v3
zOhnzv1R#)sW67}ydjEru2F$w{pq_|0;7rOlWNIr0z0eCIKEJVj`SoBmc2vt74884}
zj5!7}(FWWLX^WgZv-h-_BLz9qZMKWonJk01Y1|BOu@0n-gJR465R9xO>@kowC}>&#
zBH(BBC`8oQS^Tq=m3Om!&yDxb111VuT_*Cw3Sx1D##)s)eHNLIN}bDj{vrKRGJ{Im
z2<Hdd#~uqC5%acQcg6l-rvN`BT>o|(N~*vO9vyA=yZJP5cdCg&RDW-uDF23?<<~9s
zZ3pFx2wMY&i{V!+r`y&i7@jn5Mc%51!(04fug^6ecsm5iCS%1PcAZisFvv({Xz06|
zjERypgqzR2gfB7?97v@%59n!h_kyIJN!_+mf<<-puH0Mm^kW<$qlG#AR;K5k`LF8)
z-oG_j`qJ6|`?UN}PNQ~=KFe7-*MZi)oN$8&pJf6#qfK29Lsm!rL3-hHdJaji^RtZ?
zJ3DuO3952YV0#$LN)eSk>DkjMoJXoKaWc$01~6L1)1^2|8%LFcISlQ^9KlW-d(uWw
z(nX(yj8pX32=S;1?pmI;<d+N1{Wh%dheLcD+;|okocQ_eW@u>axgr_8q@;`)@8$GV
z51AWPRZnXT`8lu7_9BT35(Zhb^2RI!vjgI48~RqbfyPGqw+Uqr*B&dfc)GRLeAd-W
zREcFSJLe*%LV-@(+s>Jg&ZaMN3Ez9J^E*8uM=87Od=5f+l`cA1ldYY)G;zt=Xk*IX
z&2+l*PLRC@6{np4)B7(p*p^34l9b$rvbP2URG?=ItH~<xzUd=y=-Lf2bEw?L>-RmX
zm7gL7%?9iIXfSVY+&$_1rs!U=%#DyDQDvyPOWW#b!H`6WSsrYORMYPxLn%*Zbq-af
z!Teh~hNq;8h~(V3S>T>Ptt?#0wen8tuh0E5_BIIv9Qdz}y^7*yIti1tK@Iee#jX(l
zWH$WrF_Lzu-u8Jr-ckY-d&h1zXOsPmgGpq|tqJOrQ{GmleFo)L`z;b(h)L%|mTUhS
zQ}%=~L}WAPvsAEJ-%$8p0>9jY_WU6yyHA;)*gTbU?8XA`+QREM<y)bmc0RB2j;t~C
z!g%e9Q?|~Y3RjBMuiUL=s`e_3WuxN=beU1C9P$?27?#(T4%|Ko)n*xC@T-+~bN(97
z>xk;dRt6IB6}K>5?o*_PrrX?L+Y&S^8LfEI^=rM`Dg9!wb`h?-IH^aXmRW&=1gS@!
za#8c&@w|CHHuuLmUq@Dh+t}FVy|ZSz6;>#l)0#z^6YaHqnKK3qlpO1BO;gvaY7I`H
zr(HU?mK|5xUN%-c-wj!Ri3!@u8q7TE#}j&j8Igd$2P!)LnVL?sAEhW^F}XGrcAORl
zvuB*B172#DS7gn_mbh!Zm$>8NV%g>Vx$=rTbOvhCfdjD3&vM6D>{!ST`rjb_rmphP
zNjHQ|<Ht<*yf8Wci<6UUI;5(qP7KHxzKtR$mqXY3W@WcOCkrm7#%+IPgi9{Wa?AO*
zlxTG5WF3<GuRy-Dy;Np0w9q{%^hWl;w^9rGs1q8mj3&eYQjpEi)O|n$(3&0!>-Bm=
zbl+J&oUO||^~<*zEce&(Q}tZ+M+<^(h8U9crXh!WApt6VrvGRv8`_<DZC<@qFL?W<
zLtT8iA>69$_8O>Pw4vYIdR_GCgvI9a+883NnCr~U(ZHvnM!$^Lk)z>9LX1vQnaJo(
z4~RM-*lUwktVw#-%Q=RazCKY%g=*xB?V{MH!{NlSW=LU5AyQR(`VUY+b~#GMu;DSj
z9RV!E96Rx8*BdOH_Xv78T;Y)QMA&dbdWMYI8Jy)ckM?s$fBa71pH)5$GMpEo&z;UL
z<s3s2UcI8U@K?4ONSDcGd(YScR-i~Ik_7|lbsB(tavp@VT?sfKF*svv=O`UU8Z1s8
z>@^lDw6bgn2ta05revxLDIe@<q%H|4as4`OM=0(FHx<TA|3C*>VHCna^ZbrpIBCaN
zoHWdZuf%t{X|N)B$+LyQUSzRw3nu0m%nY-ID}^U(_y-0J3m9P2M0dR#)y!IGe*a)0
zFwcZ8{CqGzp;GvUrI<^1N`)VdNhzx<IcyC(uwa+_PG-lZ+)qr5cQmMuncXn<7LQu&
zfrd_p7w(J7Qvu6WK{rOIrI<;Gm2lke0L$4&r@kMwJf>r#{xQKr@YMT+Z>^#*9K;V+
zWSORuyA#dE;G0|R<Rm|Zb{YbOPxdD*VJ1#ve^VQ|`onGE%&HoqA&2?vKgdQoaZp*5
zpb>8O<JAl2m&U8L2xZ%o9zBu(QEqS8{a{8G5fe+!svk~!6sl{uF&cTrO}}^oG9NpM
zM0(jmu!NeDz*<ZkX$MB*8m+@C^3U~mcqZwi*z%db@q@~KmJ>{Aur|0DVO8NU^iCmM
zzj6H?MOSDQuv=8mZKw+TLR~;?_QRC|^hKeZ<M=N)|7^k{^tZW6`4>p8&SWY6D6<MF
zr)FGTk8y$tK=-N##uhV-&cL3gKa^R!Vb${)lnpw^GQi|QOB6QH(xugf7RZB_42Y_-
z&o2LK1yFiWo%QKfQT#d3s;4R}{T#<0`V%QKt_auixcd4hvO1Z{JLeu8r#JTe5Vz*J
zKq+i{`qxoCLO}~A1T9_pkHO#*zBWuxH$j>XD^L|Y7_N>dohokmHapZrEvzmG5&c74
zTl)L3nL&xvQ$>i4>TgA3AyJgy)$r`pxj#PYKF75OgFJ*Jexx?(q9ER>(rsC{5~QGR
zwL^$JN{gt#1qOcZaoZ8OwYTv7)<;*e>3+YlM8J#+(s2FAb!IrfLaL{bxQ%)}FDcM4
zdJM^)9w@HgNBBx6IhsX-FL+K^Vhg=T(HiVkto2)51FPw+{rWh{LPXSgSRk(T*D|1g
zPO+mDz`S{DIJ4FHs83PoZxir8KiYzt=#>A=!(XQ8k9Wu&1tE-D&gR1JbKvjk#`*?C
z7#Asl--zJ<R!~qK7Hb{3T=>tq|DQ`AzBUR_B>MqthTn(l?>o^B1|Lt+XMFtE`10Q~
zcN-T#ci+%<{yzEtzJ3S=C63BC+~Al04lzEpI#Flj3OZ~H)dB+>E(32~+)xCWv+O~t
zK*9*hGY_}4^i_3h4Qo_cUG2czef8(F$MS+}{V06odENxv`Sdk3UhR}{NZ-r4bGpc)
z&Ts-$K{|mJCqz!popu&pP;zPjs)O&iR>$%~zkNLKM8wY**hmou^@g6%TN)?w5fppN
zo66$)C=%-P7RDqN;oJ5fx$CrelNR>Stn!%8SxR{Z1)%%Q3G^^K`gYL?X}<tLVVB?p
z8c7`!YQN7!3;~mdgv8drcSV&$l56W?Hu)5w*5GsILBx4SGgIJDopRItzA3Y4!i8w4
zvbf7gwBhMxCK3Pb)r$J;F6Db|@JW>k&hJv7R63UBuGXf2<R$4-g4*r=P9uSyWCu6R
zfbzX~Cr7po>Zt?rjwr$t$gMlf#pu+?@gh$eZ44i_K^`_Ww6wJR#ow9o^Naha9y~Su
zu}D6jmSX<m5)S8aqA^!9&2$}6+Q>)&Dufw0Q&Lhy@UgLM4Tb#4ekl%|f}=Z1KStuc
zThBq%SeMw7D7Bm|Um{u<^wuZ~f&?k+{sCV2AT~zn?iY%j8s^KFFUM0pCM{2q*QWMO
zy4Zir{OAW@{Xc&w%Xe77^7ovR^P<`Z{aEcczkZu-9k3Bm;7n(j+MC=F;e&y4Wc#_8
zpL?@h>|u0A5%Q%e{y3!!;r+pD%H24;Cqs{^&r5a>=Tn@KI>%1XDDPDRN9C^2IsKd_
zV21VDeH>Z;^0w{A+pG_a*%<T^f2Or^TA==MOwTlSR_?7_$lE-!YSbS;KQrj>&m{6b
z5hek#Av-!WFvk|fx8=)*00-4V&_-#;_#GGsf|s6^1UuIp^^NBFSuGWkezmE&(oa>*
zgYgs{8?d@IJe2MBhddl>gg5SvFUscifOfyDRc#3H^l}6$eSErUdtg%GCm>(%g?fpw
zhVD#(9LRcm1rud`LPUQi?HiEOHfvX?T;<q2Z2wIAbmQm1JwF7bMsl-ePAPJ08yV#Z
z)qnkKNxUr)ETs^>i}`ahm9cw5Mseps1?c1Y9Z%rp=%D`<S6I^1#?C%T0lGLn4Yi-r
zIl8%d3$gz!f(_vS_{u9^XD=8G0VRKn!hyPReS39L;#rUFQHMex1!>WKUcNaSxH=Uo
zG5abufKJ-O^)$&RC@AOzOj}fgV#VwMFR`XljvpL3^u$%gu`mC5UQ?OCb67!ZTKl-b
zZ|Plly=x~-OKO=z47kULJ(`v)sV8MHww3@(I(>^dA7njV)vJRyZ{(9m?wVAayIR_n
zD_0skM35653H(FHAh)6ywz}Pd^o|F<jk~bs+EE{9&Tm+x?5c&k3D!&#B)FW3C^-lE
z^~?yEMzVqUJf~zX=%>^Q1buy7IOmSem6ejl^~!2T{%?%Zup+6QanB*p&740Mg>xy0
zCghws*sgBO>3Id(Pz%8>@)Y4&wwTDMRt0-hSzKcYTwK!C#3OVC`9F?WCFLVs-%sSH
zX5LE5(JVplBD~$MVLrLHGUerJyjieF8eNxMsOc*7X#m~J1^&^gS@Jf`?NUH|RsYah
z!^m=-9yA@pCZoExH+=<%d9cLv-azgQDYa22l;L|~;tGkcOGeC0CIXe-yef@B#jT8W
zSE2=yoDJ6#K#PBoY4t;kG5_}g+piD13&aFkzXH*3-;Hl|-Wxq56U`84kx<>Lh}U$}
z(G5~oaj_u5mo@8=w?*c60lO*-^}W}w@iUe2JGuFhk{Hy5?LhgS5@1_rd#%F(d9A>h
ze>6Psj1mDgXt2YxK0#i)OJv9%@l;sN&MSwHoFbZ8FLWi75u!iWvD^%$(?U<$ij0ZF
zaFNOL!T>E~>HcST;(U_-(QcPk-r1|9tvu)%nrIcbU@-c!ST5bpIo?bo3oiqw$@FNs
zB+^kMS=M$IU9DT+{xs66E0PHWZbdwsdZnO`710YPZ#rVjIy<(26yt?UjHeOJASl<%
zRlv=*1NvZLRlLymR`2a!HJsj1c*po2+8zeI2<$t1dtN71wG9{QQwV_uwr=Czj6Vi7
zKNW3AP5N#w*pZs9452t%=?7`G0AvB&O(OOw3w$LZQ^2aE&q^-aP~cJKC7bu1_mQqf
z(i1=?6bCvG4INV5pN`&XaW>A&@3AY8L$2uJDguZe^Ox1WCm30_pK5EP7bHVFLXpRb
z?0tlsDDwf>21Akqa#NNn>+R-*0gGoz_k+P(B8WB9GkC$py@WzR(xRcZOhm)RlhNu_
zS)_*tl(^~uE!31=t0;1Kee3((&@?2+jtX&R2;NZJp_(4x$Y&)qwhT;U3|q9K*5;AR
zk&%%vS8q<aiSn7*-sRoU_>>;E(3crt746yQmHu$DUUJbPl&5I!btv+Sc`NVAi9n$Q
zJFYvv8{$}jt&~%>R*D3oO;4(%>N*q_+j|mp7rp$SP3D|&&7*flQ<l#WUJ3e!evfD_
z-{5iRGq`C>!-45VaC~KDIn{i^!p=<BP}Ufw>+I}oTGiDV&T*R7L9PG1VMlv40f-3p
zTz;Ie#t1YIEg=^%;uc;tvysvsCoH;QIVV%0s4CtD&V6|!6St3`0;^C>>!feL$LqI+
zMLhDZ#L^Abemx2GK7-)|SnPJCUfBv3+abn8KymJQR)(cYjDn)3^nGjS<E1_th{;8-
z7eL6+84EWTweM;J7saCvm!=&YuT;C$(VLj9iG1taCq-V->S*`*NaR@lqW9oMf14LN
zykTQb@4%_S&;mUl)JQd;DNiVx3)EWYM&3(2@Zr$rv&!9#yLDD+vhy$Suz{l)TQ4i)
z0^TLM#RkWw&G<D|u3Y-`xL`v=r-5FHgf4X%72(d<N~clPF>RMM$2=;_P_wJcsqyZF
zWudt;zk~9U*vGhnJm$1ga#HUMxwmfk{?BvgS>>1DIQ3dAA)x7!Y=DntW$fftG6$S)
zR7RiF8MZL55V@t_o?we5yXA%r$9SCQQa@#y8pBexQ#mN_clL<}nOTfX$aY9}(ewL)
ziWR6Hh-On*9n~H<mz|KIJf=eSNr03PIrhRHl&vhjx8gO!d8%#4^b$Ge0Uwyrrq-@Y
z*P>&A{lsV+t*5_OKB!UTaXj*c<Wtq+D>*TNG{i~oRWDBsquENFNc&=tE@Fq@sA1oi
zIH)&#rZ$~~6&$MHdc4WKNlgzALK0nMwmpkiOqmz=iVM-7<`Qs|99}PQsREz&{@MT{
z`847TS9#6&auQaJ>NCHao4)Y(&MA3rzQgaEU&tQ_{2q`lf2YIB@ju4Is0zY`KUur~
z7-|&TWU1|TezfI+k$H@lbibMxXzxJ5^vdV^BQR5*oQO1{|Ii5bjEJb++_6fP{=Lr$
z*(Wn7|B`SWqr3Ea8?yLyIt)@PK3^;h&565Lv!6Ytxqa!uC#0L31m>}MZ*c&*uifh<
za$e_>lDlgiE>SDJlefdu1m^+zW)}2v6yslwwaG38tV?hEp>hierrq=B2zXtz)jXnb
z#jmGQGrn_vJYk1Nn&^L|Lxka+3dI=yy<;-1D!%hloNYZJK(EA-B{2Kd3Q2s*ro#XY
zc3|^8m>@)oTkRYAE9jiH)&Bxazk1t58QkBC5b$sz!J&q_evD!t%9^oD^h#H~Z$er6
z-{dQY#dMZdfeJL1SsP9ims$#YzW?FsY=o-YIfhnsW<%@PgAeD)P3gSAx^V~CYJfP@
zu;zlT68xHnmYDR(G7(%Xq`WfayU4_er5^CqW2WN@vDmw`FFji0o(1J!sUMUlAECR#
zAi>WgXk5zrAV$=RB7KGna<t6CqLo+PogqTikFV{dvTty-a~s8I%>G;tN7*JfgQnbY
z()BG{b=A>o#y2DF3SFyB&Sjt&EX{RNytFd`Hy#_P47uoNCh0IY5*E9a&uhRpd=ud;
zjs5n-D>mcu*x`7iuf@p4^S4)WESBz@fm3p!vdd=X4JyxQ`JJ?DD=8vFe#I%X5WiQ=
zdj{GQtNW!G&w_RT7G8`$0{Yq}h~a7pK8SN=3RqN4?j^r;7aEY?AOYt+<v)}qi4=M*
zD~}2H<BzNZS-l1kUCmsL7n`Kv!RrW&$K)F-nZs{_6<g6rBatqWZJ^DUmoWIQuQvxQ
z;iTP^Q~nRXL*XN?cI!;AboIu4d?#68;U`6=oC>DuC9&ictbai|r40#UcNsL$(Laj~
zE+P&1_~t+1S7CMQ01#nBw)ZopBfz13?GrnEU%9jlg)(X11~zva`|Ry5W)#LE+eMVL
z^Owqx_jgI5EH#+A!er;(Ns%pY+F$W7#~;0+9$VXS@}B#lDV;7P1;3+t4O<J&sNJ?W
zgV0>0We13u8G3ee^0&!!`tUO8m7@3kFX-!n##+j|;`hd55z5#O1-@+8`P3L2$gam<
z<Sdy^Yr2Yj<5?c$n#+aW*7kLoa|9G4opy}n_BKl#UUBoS=Pj(e;eC_AyQoOEC3Kv#
zjK8m#Rbf>ae5~F`A5ZGA;>HBXVmx`*{j1^zg|Yq!nhcx9JlWl)tjS72E4t3T1papa
zM*i2hMtB?jYZ1%5Li*xy1so1nFD#jps{V!!vsYYTrC%~%FLA+Ba~C_r2*18Ty`9u+
z?~lKHW8GV_IGcIla>tIRW5N1FPCXZ1Qew52WrmImMX_o<ob`8-<$lQEWerD9vLUw)
zmEn;t!e=>;n(XmT6dIMl63_VM#|8K#ymetyL3bxv@{%7npON_fzTS8FIUoG%Siy+<
zL@17x1(uV9;2(u^)F!H>Q;zL>iP>4KaljQ%$n<>)r53tYm}|Lq4+YH^dcq+m0IxVf
zDvKVAPUnFI`kRgBcKSY4kqI3eCR{_g5F+oJl(ir|@8S}AE14cMe!7Xm8FZocfx?D#
zA+ec-e2v`-sbeH3optYtXr_1v?B?jY^yu)WaN&aUkel^F7&6WSZIxVW>wgB}fLiyL
z--GO>^JVa=m-1rWYeNf0JNA-}A0Nq1T}esUSDkXpq1EBSxL@=<phcMVN&c~&DzLbB
zyH7#2(~k40yrnhro$f@PMXRuv;C9L>so~Par-L7|uVr*=m{|Tjfm524EGT0QH<^v6
ztPXZD3`(3VnQQ5c495c)kZzst1J5Q638Us>UtU+_v<g~>0wtd0$GQ9^_5I0EV7KQ$
z4nBHJ1Rv%nXiG=ab_x+jmvk?gY5K;S=#2bPn-9qWn|x5EPS|*EDv58Z?aibN8OmeP
zd}gEpqIv6^qQ72&<7n<_bv0ovV(_XI{R<0dWi#XX`CHZ5eDbblR#q8Vxu1oFhCzsz
zUdqpe@hzCcjw{S9hRIvqZ)@OvUT`Vv?X2J&`g}#P6WTCLJ@m)m9UtoMf&4vqNm2`7
z__U&vn8!;Rwo@c1`+#WjATVvv2R+fp!oQRWQl)A3ldAe`XiQ7!c5>f(UR{+m>DhTw
z>UFMB*SH-PR&Q&9bk5h`S=Y5VFW;J?CH*XdgJ>?4aXQ;#b#QRdwpZlUPgv&aVX|62
z8p=-seTMb|TPG<<$LFG0ji_)R96M@d)!*15jfmoPBXWiCx>4M<fbWms?rxSXRF(dx
z_o(fzu&aYNHL0QxnltB~sNmyNFyR{h7N_ewpv!5ufE2rb3?GbC@yaflcM-)o)HYmY
zf-s4_GcO{PjsJA>PM}a-mBjsz&u-K5i?7r~s_+>t5ttv}L#<28ub}XSL{l$cWlJ<h
zg?F+f6lcG<Xr|>q!IS%b?O-5-T;WMi-qBA!XpumF*`FCeg;3hrZ}RR~$FEH4uU@ai
znXK;|H)6_e$V5kViof@r-!(T~_pctTejUGNOg)NR*|=bo_v&_4x75${USBu(-3h(|
zmitpW0X$pKfX+s<<(L^&<YWTU!@ZjtSDCU>lSyV>#(#s?#AjUU)T;8}^j+#nc1Taw
zDH<V&ieD1RO6e)if0b2Jr68`S5<%>>JLvI9V<+Qf++~wu7(EL%q)}t<^3ag2kDCR$
zt%LpjGvk${biUaZppW#ejJkF^ey{lb(HCtro^E{iH$w^+YFUmWU)#tdX^%-7_SW8{
zD!eV5v{25sHOW-%d>g&$LAovQkkI89TYB~<)_DZ5-Y1$qa!RXT6Ed>!3^YE>8$HkG
z&{E#?zHGg~a`0VgaoEH6rct>_m@MU+3=Ib?+GOX@Z@%)*Jb^MXJih%{_?-OlQP~F_
zCkMh=f^NFH7%i)qckMbRwA*lD%R21+`cW4u-RUqeW{r6Pryiw)zi#*1VYUySGqBZl
z(o0>p_4X@1uK@6*+!loiBJ0b032evA+f&W@2MC!FLLL`X=99UTHpVLX6O1CC-yC<#
zlI_VJe`8L#v8X=C1*Abh8=*$$NKqlJZvObrxBoox#QBovFQ0NB`}v@+4~ueC6etav
z32ScJChZdN!4W$TSN5gl86fHNUgCV`vWpG$!WcCxCIpD>$=Ily6OZtR^xlsP2)3+r
zN*Dv!T<uyFb`ZGNFOO%_NKgAsgEPCd_i&nt>!M-w9sL#*vhyuTDVc?US{<|GjS|HY
zSQ6q^UT4-$W+AfR(&mLhe=Iap+1BOG9Zg2s`qfVY871VW$+MF9V68fdOhI<2&XFUR
ztyL7|_4hrp;&)EK?{uJq572628S(ych3TKFl={M6169C<IoQxU{he<v7;R*4&V0Q$
zJ@$m=1ChULS(<p8w2x^fcG#XcqbuB?911uGRdVtiRZF|b>AB-ME6BPY6c6@Tek#8{
zt6s~kgp)osy4rpH2I3j@z+TIqE(ua94?kr~M;pVY)tCl_qsMB!3PoQt5BfK|)Q%9k
zVXGJ?>FQM*Z;eG8#!r=N2!%3Vos3T8nhI0cy0ysTyVq6xg6ee3u$9%zQn~ph-u`e@
zF33HCNPZhu(fk-Ep*x8uPuu!yb$eNm03OjjQnVkc(85_A2F!k_+S)FTzgQtv1vhuq
zs*bN~$R~IW>2;i#X!J;PD1V5~(6aF284A3G6^mc_5iM0}xPZPgkH(uA0P>~8D4qBT
zA*uY3+qg*W*ER$u>m3Kd`Q^MYFd&D@8&1A`tXqd6K+kUVIq)x?7+taL&sOn;msVDr
z^(?a~Jk9Uv>8Y@o1Zaojrajk-St`Gqe*q!82e3FyA!X#D@w~Z`-#I}m^f;=^_F^4+
zR1=ADx>fyBrL0BA9GfeogT5Oml;Jb;ya$I03KGCPcnvL0yRzC7@7#RQx+hpYuDb<W
z3%u)U>n1(rc)F%T1`cr5NhpJw9#U<uQ)1K}oIZ#ty0Ut5lX@hp2PX*jf?G0P0N6^F
z?qTQ7UwrFw6}f}GMJufT`R2r%a)TQE4QgaY$4ME*!g0eVL5HVm>C>2xK)X3LPe*$q
z?MeI3(pc79fT-IHUQ06fc>Z2oWO9)=FEx`xneV8SZ$id$vRla7w~Rwy<3(`}IW}me
z$Wd8hma1PwntKaF51#s>z@vp$G0&yGcaz_g&)yB{=Yf9m&e;gu@JFuJ<9lRw%pHgf
zqV_z**+CY{nDkOz;nSzu!<`(}qyfugrHxngbj$$akRuWGj*`sqa%}wwfrO<^DH--u
zyY)gWa%POyDd|2wOEw9*H`*&z$SxBxm(CfYz%Ztm8-~<bJn?wpG<WK>ImZVj)Oxzl
z%;}9WKJnKpQ!0aZ$!m+E!aJ9Y@X*tBDJ2F;5MLs-^0cxndz(KdRnoKm3x&B^*_56|
z<40qZhuv6-{G3`pkS))@-|CZ)9tOGfHFzhT*J?(YNdrv1;Xz>p=5Sb<^a>{F(OWsf
zcB{i)w5sn^jW6I)6(B%zn4g!o%B5kxbu%li{U_Yd>PX?GQus&<S+e!8e&z-LVCJ0&
z5n8>_5_x%De{~+;BhmX^?H2-!0Zd3g-8oVttiUXO_J(m|$%7*Ml3E-hl_0rRG2oHd
zZovAcY@`!<J$^cW3-|3Hgx8h#gUNx`e0HEZgqG;L0CXAS=TNaLH(yvxRkBD6W8HVQ
zJUegFIf13<iaR^+TqQ0zXP)S}3*3U;)2q(~B&4T2;Uc2&GoK>T3e_ys5;Dd4a?{EF
z&qos}D}a-#gMC9d;kk+!8j^kXHIFIl)XpGqXNKCCd-yiSW%zc|3*sLEY@sH)W4G~H
zbYPnd514wlBd=qn+-oK53LU^0$r1{*>{i8>HVtx$@C{GPhu**LH318APVF*0w$W`r
zBY|Z1`e$Jn>uEp@zB1t_eQ$;K)ZIPTSE^Sge+m=M;=iYoh6d1KeXt@Oq1R4;g`8s%
zO2G+ZGMBkA7o|sxW{N*lFU!7op)SZtfsM<9$s4J`K^Xr5i%2KAv2}V@I?67VuO2u+
z_e|J^+EALDjWjRDcosh_m`no?83yM$gl7vC`en9dA-efq)t?j)8Bp5gj{^#u6gyn<
zpKpgD0cS-sj%;Y6?fXawa>hu!ST@J_qIWiUaC2jKdBKBjCim9BasU)5;}FDHY@VU!
zIW*w=$k4v8L3$nYr}OJ)K(x_RsfFX`Ce#ja?91<~;@on=f*zjNA3AgZbBzhG5z>Ms
zSSArUi1q-j>yCj^vYRnw2Cy1bk=j@Sf&Ov3AxDYblgpi&;N%58o0Ft1y9^|OG@P^U
zV|pE?o-ljIFB+nH7_gjFAg6Jy(?sPqOI;YtEr%bV1bn-@yH$HWUsGubr!h0EqTkZ@
zIzZ1Q9rNhk)9Y)N-k;y!)VV^eW`RwTy}}xLRJhY@d;JEU3`;I!oK7hI$zWYlaLPM*
zOkZ+a5M$G4j4XKZSgVcTHEiOLW2e?@Xe7K+ar<~z+t*vxvb`hC`(T{Rn!Y$#<n*nW
zi{onH4im|4*0PJXV6e;~U9o(%%y=$hLPabITz#?s_Db@~q(zVO)eKJs^SLpC_tlvF
zx8b4fB7Zk>sT^iF&BL*fd=aE6(yd{hy8$n62lRItl-NQVfEXq(QCtSYB7nEn=qNU;
z77~2x;Px0e#BUc#FjY*nwA^(tksf(KrJPRmZD^0fxM&Gfb++Gh19Djg_>)=u?PW5=
zxj4`RmW=$wmobZXYr3RXlEF&t&&10Pj|YMv5wNNh5b$<3)eD@fI6iQkeu;(hCgMLK
zg|Tg|9~iy$7Y*6=yiG=DCp*Nm)O9YY1-@vg+H}n;1zC24Owub<YSuZ9ZkR<IaT93A
z6j(PuO&-!Mk+-RBXBiW@>r%DWK>$YT)p;#Vny&g<-&smyOY+9`vH&dI2qSHv7McBr
z-z4tky-Z$K&9x|8HoXuY5THf~x{yn2-SHZhP(|Y}6Fz}-9=G*H)SA~J163?IpdcIF
zuP{w?P{YfPeEuBc&cxz9?jBOuUdg=?BCir^eBU;&X3}j<>h5({<fg~e8@wd<{LMoU
zAR~;l3}!OHA!i}0Le+IpLI<?ljHVVi#P2*DeRv(CII{PmpmBxy+?J%^lC?iFuw(l_
z$c)BM7Fc)L1^S2Dm^`6kx8qnDM-7kGcKk`bwv+TkIrga+;TrgBf~OZc^c3u_#ExOH
z2Egs|SXeRHwNqkb!OVrI{hB`+uiI-5GEloW^4P?a`0tH>BtfgxwGDzE);SJvxw#BJ
zeR@I+Bw`y1_9K4wSZ}0jo5%G@a1z@Qm9QD_@6ViL<Lrl_Xu8B#VX=hy5OY9!S|g^1
z-t`fhD?$p->7<&KPXz3^b%aYcniGU>^up_jRWS`yw~?-%<K<~@oj*;6^JYZp(Jhu2
zgqbJdb0W>pdswJy4-M(5_|?zpDILH#gI8)b)rD(V`Df1$4=@^)-F>odv@P5!w3o!J
zj%<2YkaaKwU+=&NOr5k%3s`yBu{eM(&$u5un&-lvzqYpa25*gf(6KNEd+lM}L8seW
ziWQqTZx!6(CU&tN;C;2k2$fTDoQD800|t$fwb+xumRaT04=O$VR!~y1gU(Lqy>2~r
z6S-ry_dWBq&`tv{R+8@VE%KiCyA5hU4m}EL=W5)g^3<erHbC^|1XbrBG8zm18n3Go
z*Z%w_Ht%UMg%7?(yyeDClva|Jj{eC<E0+f|h(t6w$C{OO;L=x)Jxp+aRP$ffa)?Tq
zH3&FXGqegTjC_YP#q02d(#R&=4bGQfYaIIuJumOq67GA`<c8Qi{-3`Tf47O!($m{a
zpqo6(2X;U0b4Lf7uOcw9`mwKoW)5Oe0K><vphf=-$0&haUbeK3((<#g4EXkm;S<nw
zj0VK&t>f94V(1c|SOb8aW=*V}p8Tf>!0UKXGv6VnC~(|1KOE>R-ZhnAKwr(s@Kmbx
z6n`oQxiD?I_U=rlwcV*lNQY>}6gHbuVaxiPu5UyRv%VXHXSE#pMf6}HKot6#O@;_e
zGNVq!xa~*aRxSm#5wn9npuBVNLl}b?t{+0RB(>6&b-)NYlb<;6T@q-9VlLJZY~4sZ
zW-v~+jy!1Vg{Qld+9WqCm}yPOsGbb-PRO`dlJ^=fQi~Il0(B?3!-eKZj^LO$S#ZPX
z;NYcA4c;7+<BX!LQJ<-ws}}%&KN0mhc5<$5L|kZagYW7O5hkwK-E&eXthMyl@FMlT
zF~DRGM3Bw-uOzr5EFd1H;Mn4nq5jdRiNI=&3FDacryQjDnb?d|y!@7e%@^RTMQ;kL
zl4j^tyVLTV?0eML-me^xpd|k{CAkX?&-a1~oO)z1Z;D#xt%jn)%*F)qn<YU8fkd*^
zj1BdppzI!q@8mb(KzXLU{dRvzOrEYmP>}on=lTPeQ6LK9KI0>X+M5*X^Y2>xoLpa`
z){tV3sV*^@J#LNo78tWBlCr7W)1;dUz0<JqIAdzTGiqh`S>Rp<9E~7G`N!`<50tzD
z5B&d1O{WU5Dj1#o;tCv&4RAneOB1^Q)W;L%l0?!xI(VHxGUixU%CehM6`V{Ha>&P?
zKF^gszo}h(gZ8Js);XbjEk2xIFDE_SZ)VZQ*jecSZqnfWs$)*1j3F~qajmR`PC=z}
z!fuau<gk8AaX&N%>1+K&!b+plD&X54Lh@p|o~c40axZF&&>UVMyMc~?EGpr4OM9c}
zANLuYVD2+|l{Cl4XN%e`IWQ;)XIa=Sn%Rr;rf}bnX*f{c7$mha6+U$uQP`bYB*>6l
z0p1fw=QmMxu(p9fp^|qrWF<bXd+M=)+<mh8+&9#tai1J=Yo+Si=&YB3HndiypgG*2
zTT&|EmS-Yvm$b2Ur9!r!0*V<9*x1{RbiZ*ZA}#}<MSH^u&^+n{qF(_~SA85LBdQd|
zutSo0(8M9l&fz)}>m#|HKV_9Ns2AwZg2D!%CbgTebKJCv&-U+%1RhY4aNPxobbf5S
z1U=9zU{lcV7OX}f4NB+;A%oH%ugdn7qc1j<Ohcc|JSIODJjgOiyG$Cmi=k&gC3ey?
zSzs)i2`X}}zGVGbdg~aAsa=RG$*6h<okzw9sy5W}c};;NZoyQuPJfENA|@^A_z8;5
zQ8;o_0)tmfoe~p6tU$>M1uG?bVrR}E3g7D$0CIb1zxQBX=fcDXIX5V+3bP+q0T&nH
zdDv;M{}k@zfL{A=0^h$X3~@G73792X)=|Ji6UI)h7!mqt$_`H7*X_|l_Qvj0CT)sN
zLSuw{_xATKbAo~(8~!xB@w;s^8XWV1@<vs33g_eZYx%QEq&yT0N-U7IiOfs_?-quj
z{9*w<h=vG)n(MRw3K_84Q*eDXmG;HY`RZ0bKTpsaQ%SgxU#KTWZ=SEO7pIG@<`?rR
z)s@&UM4-OU=r=goo(g3H?UiF~9sxI@9ln8&0{PaV<8XHP0kOl#XKFSutyfIuyJy<^
z3B(}%L4>Yf*HFsdhLNft%B)~y=Yq4I`hT^l%FY>=%`6G`U78g$?7nUHOU2NCi;q~_
zDRNt#OE_34y?*Vn_@B4W9QK<=7T-?$pO^mmFV@c>`#V1opwIl@5B+a3)8W|v`|JM>
z-T!4~{5x&_T{8dNM*g#B{JUNJyIuUdUHnp;{O?xz?^gN$WvkS7;H!M}<+f=1|GVP;
v4~hmZt#tE`xT61u;_4~Zd(_HZgo9&yem?Vx21NNI;E&23O-1DGd!hdaZ{Q|_

literal 0
HcmV?d00001

diff --git a/docs-src/docs/imgs/pruning_masking.png b/docs-src/docs/imgs/pruning_masking.png
new file mode 100755
index 0000000000000000000000000000000000000000..0c6d580443c8b64f3bd7e6179bdbb366926fd6b5
GIT binary patch
literal 194351
zcmeFYWmH^Svo4H7aF<}+IKhIuySoMV;7)LNg1fuBySoHJ<L(Z@-9Gl-@44q4NABP6
z&+RdK^y;;$rqq<G=c&2E6y(H_;6KBIfq@}ON{A?dfk9=1fkAr0!F=4gdqS821B0(N
z7Zz5K6cz?3IM|w+TbY1?NrWXP!D?VEVGp0nxaX4xBImLTa)B2F5{2J<`2x%hL>C5>
zgd%Bb_cj7ZWOZT-L;cH@jOjw55h8q{D-i0aN=^)WgU66B;A}UZ*F83#eKt~`)0ywH
zI2{kN&bh&2U^#^hzR*GnqydyMPxr<`e}<cG27{w1pfQ875!eLBVBsFZqW+5ZS$J*-
zOT)8Gofvq!KX_NI>v%w<2U7s_*>@n?eC{ho<sQZgX95=h!ZfIx!JD;U8l=HVqHg6g
zXK)M?G83>5H=7*<!n6Z<V8F5_wnEXt0iD7xKWS}9&_iT|7iD^O5Kvvj7sJvAwtyKS
z7kLd0s?44Q1_3QT;qed6DfV$G1af(S2VEv=u0Onkzb4k-y{|^SS2n0zzHm37l2V3s
z1jNQhooecgebM`bu$=@m!JN&`?UNKgux@ZY@yjkE%ZzAv?MK!iwLyj3ekk_*74uT=
zHSISn+AszQony){;;AG%&tUNp9?A>erR$7L%+HKdnlBfH*BP<s+wEV6Ww&v{0?0Wj
zLwU$IQj%(8@RRE=swN5)&VvX@$R97Z*Lcai1yhW>zRswkE2n&JiSa2*9>|WxBGF6_
zBdBqc+kb$fANZ7TiUJ3#>kEzBLs%mS2M6UnqJs&RdTD7o$zrEPu1&K|NnygIAu%_=
zya-WsL=qg7+DZq;%*t^eu>mr(M99<AyZA%_u8$2gtM%y}js(v^DkUszfzFoW0|Sj5
z@5^h~Qdv%hWVPzYj3vRa<Uq~(u_^6P^gt|v!EQqE_@cmp<w|7HfZ>q&u>c^-1(6pK
zuLOaM(0Be0cBlj}&YQR#;NN=j?7+3b*##I{Ac=f|U*UCqKV1;X2%-iCP@!Te`Lh6Y
zMsVwVQ(`c7!5;;C6)-Uo$b}#z5cdSMa)e6YzJwqP-u|BYZGH&hgmfJ6N3j1l#|_LA
zV!l3;Ou(EzuPft3pk@sN0X#zp#+DNY{$#+9o<ch;PfQy>h934!?hDixh%6Cs%uk}?
z=6Ny-G70!!z_22v1i}>X6`)JdRj|s?=Aw>;)`XC9b@TXgYrlU<^J5d_&gq;s_)gD7
z*a_ZAs1sEs+?>OZYnvM~jd5s;3onLc^s{!jhA}opWWqv*uLw#xz<fx1XtnpJ;kr?+
z2A3MKWyI}}m_dB)NzF>_bFJ(mvK6rl6n9wvR)XCShqyLaRKWPa-p-hvxc!0sYm4Hl
zIM1hQzvdo~?fgp!Px>w>A1EK>jcA|XY?2XFr%!i*U=YzYNj32bvfn9J$$p|2h1Q99
z<&iQbq{ytw(2}#EGhh&-Z^TKE(Zvx`hYnC$lO)DP#aG1zOJ<Ka7?alFa|Y8=oC6l(
zxJT|sY^gEBi3WqvWk(B6b7qz4m57x2q!bHf3Kx}lN+Xr9l<1URm8uIqf!aW{M_8aG
z5IKml&{nED0XT9v+A?Zq%r>Gjl97ZLlbTRCmNpJEQZ#Cl%uC`S>mq|pSxk8V9F8B4
z=cfIkXcHKoXQ8}SqJ4~Q!FtQ&oDf`iG_%-X{0(@9TN%OzWT3PqC#LTf>k@jactIr;
zUC6ILD!s|P!MJI^nS!@~rwwuqvWAa@e}yN;RK?W6+`&S?OvgN@@>VmXa-%AwI#aVy
zbuaj-#-@a<q+97J@K|>xjg=N20IyysuopPyrK&L}!K&e$d2BFeGbhRF%*w!;`z>`@
zv@!YE$r<O?_}1{&@p$L63KAt!msstPTjT+v8%#v(7?p{(q0IL(3wO(KRuuEQ?{elx
zmO<uw7OdvHAPNiSMvR7tM(##RCl;p{X9uUxPJd3&ol@H*+n$};o%j!rPDBr_=Pc*&
zXVhoSPRvf&5BXVga5?Y`@Kx|u*#<r{At@u?<KKPm6`L266vGl@k2EmCH|o0d-suue
z5p{`xiYUWIN{NxxpwOg1qUbMjQs$r0o>7}ok8s8oXISrSvAkqwwyamFx2T8Rci89K
zZ{0uJx263_+k%aS-AqTFl$C@}w?eC@g{L*BMOgE>q3GMQmTlvohBpf%OVT>?2E69E
z+Q`Pr210#Mir>W9M7awtw|aY|i`gmP(#oRQa(2!8Oz!}2a4WQb5&f0?Qo1bpCx->6
z0|5#rGACn(VWzj!XNP&GeGYX_e*0;ct=8EV9wWTEI8AZQYVBXzqE)}Dh*wY7=G&cH
z@>e@o5mr5Tc6s`E#(0T5EZivE*<9sZH9ab?k1vK?pWV;zi1f>ATTk8Z0;4OZ+_R;-
zj5km>95yJu#k@(nU3?ZdhJB*n`wQEop{2DT1;DuV<$Ll4UVCnO^!#fC{RFWD<pX{N
zIQq8>l>ljkqXIinoRQY>o&Xa78G?B{dO{)$UE()jA8#I&A(eyu?$Zdq;+Wz~Nz-?2
zCs!xgn_Z@86OZJ`A&ueP;Z>cgZqHv=mNW_K1yvmFO77%ld!rgMRwAddiwqfHa#&_e
zQH1Jv?7hxAe|EYyTQ9=hb}pkXMK7n&2{6LZhtLTz?#ZGkIVls#I(OSI#~-WXJL6F(
zd?Y?el**>cMM_vp^%oBoWnzzfQi4iWE3N)kUnc*Zb^-^vaHaTMHXHRZ+=40R=N_JQ
zDoerA5jK8kHwKCsr-`bumo{P7)9&XbR)5wt%gA{ft!LU5=I3JPL+4fJOwKgUbjRR@
z`g!7IFze8j->r%sM(ab<vbyQVY#8jtR&T5ew9QoaYe}JWXKog5=~rT1*H2YIy)SwR
z{55k1C%xy>8`s_EOQx-rRx8aAy1lnT^?h-D3@fcG)$jUm!bd1G+~_VhrwvPP`ZzAh
z`U`7olkF$r#u0T9Vs{Vc=G3N`IGEil?GERS=;d(%&!Nu)&%si!QY&LlV=jqa)bd}t
zmAAf4tCW6gt-R{@UVnRoq=)^CQbtJ2^YAGC799}Mf!wHaq?$-DL^R3u(!kZxc0#!?
zf6F??8pNt+3Ee2{-0PftY<+SPT|%_91jZhUKNLMgaK^juT~Rq*aH)XNS$e_2LC^R0
za<G_vo^`=H?H1=&cbV{~bj;MkQ4nFauE`{iwbZ$UuB`IBVd`koCFE{jPsXQ|H{Y#j
zyW(Xj^C0x#YEm|PijRK@c<Sl~yzG-W7@LH}A!S(a!gMM<s5{>)SLdnLJ~edC>^^qA
zonU>oEUsp*ZeFixod#L2t-X}rR{!E<<E3M_wr#pyzHPdvJK|nAU-j8adak5tUTU^o
z)pRdDw^>~4^-J-~6<pt3?40$eJ~vy-cCR@C?i=)yriznCs}iffnm%aF#QgfbWN^Rr
zhY~1H^wUmU_a*D{IeEIHIH8z)iY03<TJvWn-aWy&SJ&y|A;MNfY4W4b$eZHh*yda*
zzs}ofR%iyU7o9urtLd{=VRPeo#k#W`j2xeL_MP{!aIbh_v~zY!b{n6#r#D;U6T%<q
zBlZ63vklK~-VC|)ENlmnj3qU&Al!@?u%7g9U{n%jLTAvvRICf6Q}AggCuW~sD6>xT
z%}gMBwq(Hq+rTy#zy<J_j)C`}k^EHbPnZ2kN$AfZOz5<9`-ZWxav0b4?oXAU!CG9v
zy26W!i|^#+eJaJ`I$;IGpR{fHNa4YK>%TD13je}`de7?yv+;+jlUZiR`G{aY*-2<R
zf`K7m{PhI~OUu9o1A~AuSJ80Nkd@&wvbCl+Ft#-`p?9^m`;Z0$<8kHsxU@ELG61+*
zTiG~rx$=_yEy4A1{nu><62RXgPL{kR8nOxiVOs|i04qHU{Z|q`cmM#v<6vycr6eNu
zFZGXqyd>Y9ob0$57=HZtLH~o9-qyj4fsvDwli@290}~V7hXkFYn~jr!E1iua=|6@1
zM>!%Ujz$jVc24HDHh{m%H88YwcH$)=`KzM;^ZBQpCa&iHS(A<9zq<9&L59DcFfh`8
zW%!@eoXk!C7q!2h{G;}Fzy7I?=dZ%J6wF;stTaW;txaqkKbpqJ$j-sS^LIV}?a_a>
z^xsr9{!^8SnT_q=RsZeLzp4J!7A|=QbCVA<{bdY3MjnR$cJE*EJPdzX>fbE)&#?S`
z_hVf6;CUGS&lvN;i?%3lgMkTvNs0)nxPqT%!uqNX&J9nw=;KSDpMpdL1|Td{1Q=@v
z)}?>4TAZ?`iFD%{MZ}T~*_^@{vAba4m%yoEq9JtNum4Ei<L-9zNIG%kes)pkFuU*c
z*yv<7Gks&eUr&AKb)+zl4{zasi1H=fg!^B9J_!Ig!MSt_|96QGZB$gdt5JOa|BH-3
z--igb!S6Oh@g6igC<5^R@rN?I1L=Qh$KP!vLj<SHMu(}Sarxg$_^a5f(Ut#8hob&p
zQ~sxM`hQmW|Jf=M&T#*+D*P&u{AAPF!rscQ;tC21E=ncwj4l-gv|snf2mjmRu?P}~
zbEy%&?@+7d1*t{*BCS^>k}<!PjRIsylM<S0G#VBKY|3FL5e`3=_-(jvtnBC(g{n6Q
z|JJl2EP6fpVTT`XR|$}MqqS44l9w;C!;PoP*X2F?Un(kr`&fEkgiP^yZ6I65VANwI
z$;ruU!82-=5Vxg}AScEEoSd3pKKH9qW?DDEcPxIaR|Q!pshc;9oqlOszQW7!2x;S^
za8@RdHzv(ai%43H1|`!8E#P%>MG{u|{AgNZkdlfDEk1ivATBlaXk1d#m(*V%a`yJp
z2Fp}(9qwJJ6*bPd#7yeU<vq!WRT)Xy*EgA4+%R(Y>}T(%xrr`V$N#b?G~q#IcrKfk
zwQc)<M#Hpa^iAw(yA5LUSZC5~@=T=FtNmtr3}!Rgr&{Ow_)W9ZG@xSP8gY}28ZWWH
z7$cyCLKZ|$!z}!b)s}*A#aKG|t`)??E`60|CS82z22z<C*1~RK6&$?XG5Z)ceP)Qg
zwQ4q-RWe6x+V~@8oRPru{ZVm@{gEH_Nuw!vmLjHA80LAGTE!78((!8M)F6>_Woq|s
zrb#$tul)7?`IAm7`)RS*Mp2Roc+q*w4tiOW=&DpuxnmaM@q*&htp&rQ&a%{Slbd_@
ziVW8yuazU=W7dC}v8d6$@$#3UR>IwQw1XIO8VE9(T?$3Nw5f<a8Kt|@yEJq&8>Nhg
zzQrbT0W6a#;xg$u@hr%5)WFOQz=l=Y3YerzNC*ci6~_Y-@xw*cj=*fXhQ_B|b<#%#
zGwTc0y=fjtqcRI~r16iEHYhYOMGP1i^ro-7&OheW3K2w%T<D{&KS4I5N3G}E9HN`!
zgt43B3r{MGcdNv;vH1#{^0F~o?0%|d+<HMOS)u7j5t*}nO2vI{0VesFvI1t&axOC)
zw0yU8g6ncYb{vW1x(Y4%Abx?}>o?6V(?(kDdR=T<1tv?ku$$8j?Yol=-Mf|i5X8e3
z)ixIkRk4!c1B_StfA?f8rcj0q_lDj~l??r~*f`=j^dGLFlz#dJ?a=UL2#H*Zi%z>k
zsq4w6spI}q#qD|KBi0)?At^^7Z<J=H9<lgBSI<;vK4Th}wypweXI@90DyUr+wRRIc
zATT)r+wihwS{TqQo9@IVEm=!;xuili7<6L1*49{OeET`I$vqQuN`;ArMkn|*hVuCj
zNcE0rQTFNXn9SNKEkbf44$N>o>S6w4ku|YGRX`fwr<%|#jI}TnkJFsXBy>lm6%9OC
zj4B$oi|aOh)7Y0(a!-=5Q0`<kNm$QskxyI~f$+mBDZ>59$R|BD>^;QxM2J*<&-BME
zoy`W7T9+~%UcDUakSVppDzPFk;XI8z8JRjJ`+P>+Wf`b<<&Xw#a{0JEfBb9w<UjpB
zKQh!|y#_x`+D>?sc>hBesRG8w0xJ9Tjeoo5vS22-Z(D$)ll2Mz@qQ)Vc(sL`i;i5i
zX~Eo~GM!!B(vN_88eE$xfMbLL#wbQs0=Fk9Go@P>zsEjQ-)daUu~yGQ33c0AiK$Ek
z)^7BPTDw~bn@*2j?}UX<Q-zlMY*tBbvAGy=rTzyXj|pTH!N~Oq3+$FbTt8h1@Rrpm
zAbHiio=t_)c@xyZ>E0#r%z{_Yl~U*UZLH`<o7F-IfwEoQd{@91b|8<P3OQjt{)<Tc
z%LQYd&Q4zXv&Qr{$uHW#ay9zc=-K9k37WXqJ1fd!;<o(LcC+kFhH$c%H&x(o@4mmb
zJikgJa1S)a75^l0xLLL2VMnMj;U2i6xpMXK6h<TC^$xeV*x1~iy37LgYTe~C>g<#+
z^2x-GP-Q|rqs3*Y%kY!p#kE_-<`ZV|yQAveF|fMX6me=KuiWg(Np5uH)cLJsg(gxC
z;ZI_{6LEsrH*q#bhFXw?b%NT^28jdP3^AHqyw%1=xmI7kk*2fJCX<gRjXY>_Sr<`h
zPIBZ3L~(x?Uotw#>h-~-78^I<yr?Aot$|u{)ZbdPwKFd1X>56#B36A%KCU)iZtDYV
z=DAFjd9Vn(`4dZb_N_5#x2TeglT`W>Wu{c|l~-M9orf}<?l{BO=p$rf%%fw>&N!s<
zLgO{V)Na>UPiD8oo0AnKZ^xHOhu>Y?$DOKlwJLcg@USoA|2PNza#4Nh>`O)cV<mN!
z>T_-gKo5zMOtr^RVe;;8VH7&ZLXZ1DOx`c9=JOS)7z!pO50gnGo4YefB#WUt;>2nv
z^EjjJ#NMR$ysx!9_xrS?CmF!k<>t7CiFqlmy^<j1;Hc6#BgmGoC9GFeXaOyuHX~HD
zWwT81?_bi{DiZQoBaHhx2J5WmVK^PSCmrS)Ax$o9I_DHyk(e_5YzKZ#)+gyh<t;Rl
ztFAM0-y1CtJv1!d>XbxvZ@Aj}uipHIWqPWIuG?{G1?7oyJAbZvO9V0ZdyzhT4`0%=
zV8pPmE<^16b<gN2A?R&QtCFoCB}Avgq}TP}5_H<?sP%Ye&L5loXH6b7H_dq~!e_0f
zXR`d7@z18*6O94xL1DYvqF6&=U^f+!2l^qViW5GZ3PT~Mrj}IIkoGx?SZimzsI2z_
zHB78$`J|iZIXUBHJU(D>#vdA?z+^n>>TbnWx?HWY<CVM(y$SeQ>n6LR;Mbg(C$>@N
z!(kJ?`uG5QwK%R^Xep?SS0~;@Mst*qSAa32;?Iwf6?&}y;~I=U7P2+eM)rrYc^e0`
zoSY@i=&x^Mz8Q&1AoEyHE@wXiyGciftKPNU4h$cX_On@Do}c-9D{9BH<4Y~5n+BaW
zYi+-&$;_OX+#CZ-9aP{E5c=)8qA3XK*<}f6=;_tEEw~07;nn)M;082X><oTO7X#)?
z*JQ@fZ7TQa=;BP>+rF`=0d`??5vi%El{6RORje+z(0@5PQ2E4pa+_qmoNXyOoHK7o
zW$UIgIo)T)B`m4EPMQQe8Q#;j>xurM)XIoRqgu?MpSfuHQM7F<#ZQ;kl$7g+V7#^M
z=5XQ0Fv2PBLx+>iCDL@V&PP_$TEQwg)z9@0qG54?I_2c`dU9%gxf(aQ+8y1}{Kec&
z(O2(!1S+h1e|sL6WnUw6b90jen}rfU(K3RHb0*g}o-0#j`te8GYiog|VeK=`6<m}6
zIh+*{k}CR${HrT!O$ZV4-~pXBJqjP|x2mppNAoob&EV5{(YX5ML5X4|8cKYYL8%tE
zud3bT^s?+iB86<zAy#5lPUBXjkJkI+Cwdb}!|Sd>Rs4>=8Q!N^pf1_#7*QJA*Zl=$
zM<_!$o~Ofnhx@f3A<aTD0_+=}^_ZBL39%gU6teKwd+)F3KI50e#Cl>JvS~b$a=d#g
z<}*c+o|l6-4#R{$Mz%tTb*U@n=W*!1k##?>og2?rXl_LwZ}tZ4-FLrt8k?FXIm-a^
zb-nhm@c2Ai3JME@c-DO0UpGh-f4)2&msjt$>_o+9WDwwS*yU(@-Ohp9oKKi^5>3mZ
z<UXfr7nB?G-7ix*yj;YN?O~3`2xgK|x91m>p>Q?gfOw9ioCI6USY}_#SP(W=2CfVr
z1IShqbOF|BO)A9hj=Nd+#bdnZzY-|v@xO=@wUo#Ck1k_(bzk(~&px93g9rVH$sz8T
z4(GR;$Lgo}@O2d~{Y%+EP1ahi_v;>eff#ZzK;huo6USDz$6F458>mAwu^?~5m)b+O
zjUP}J0Uz;CqhMw1P#Ci5IuWlTeb*V?qr)=OQn{X`<gY(urN6$F1c_K#=vc+fF2dM>
z;S&y&k_*P~O9|~s=-r+xt(rDzzm61N&9=|%+TxtyXFOfSL?^Y!szsVv(g^p2=EVCv
zO1nKB6$fZud7O9RNMs2dCv@@V?Sxh6b@N$ODla$7>$PtBBkU$9%f-IG9Pm3V8iWL0
zCwigbv|f$Tm^_~~C%6A;ldjQdJ1#4Sa2vFrnZjm@_)01sm2kh|qi55)hi$%4rK2eh
zK~tHRN7A|#j1L6??{+mx33+Kepu&-|$<Xd?PxY4kydJX^Ef*vB>R4#5`$JOIvUnFn
zZJi$2Jj95Io>54r@c#O&{w-dcT2Z<L=uWKFY?VadGid3slw7|XUhn;xAWmzk86X&a
z{Ez?tErD9o1wJ*e$!r#+y|=$Vw%lk5`6-AzjCZ^|-bB}e@%59}Kf#q15*){41M6&F
zQZEc+`GQT%^ZRR+w!?|Wl0kbBlhbgP#o4T&5{PNG5;UYV4h|#1|KZw_xNz0bExyOM
z))`+C;JNJAUp0>(jHFT`CRLc?ne1&ymCZy<kD%2|8Zl>nX}+H1hQ0b^2@ISxt9vuB
zcwgCgyMraKZOQh!OU-ZsNt#R&R?a*Hz!h?ZH5-qj_of{;t=J%7%WAM@`n)~%Ivjzd
z8IXo)`7%}4)3WS`@UD6wQ3B1~6|X*dRd+pEJe+>aaZtC9&(UJ7Q7AHF;`dMI3q4Ym
zEZtcw>XR=s5m&tlgCD)njdQpnm#xeMjN&sW>f#BdOH$d%sh?C9G+5ECIN`mGEimn;
zi#oishK$|uatGe!<;RNoh*woKcu)T09tAw+-5JCenXHkwG$$fRijJ5wODXzhJ0~-_
zLvZ8<m|t0^w+smXVnV*ld>C~$#Uek+jd9!juA|Ul?n2#oAbrO8T*ryLB%`W#Su&*G
z!2AMtYj#nx*cDX@*8W(ToRd{HF8x3{;)Y;}+cH?&;TiXF_vF#x+Jrw@?o5jV91A7$
zU5*373Cb-#On?YD&q=^&D3U^SA1>#^9PdGz0w@B+bP>Fp!0G8olfH=2>2+zdSuFyV
zmX?6LTn!kMa){5KqBA|);YZ#v^7BV~Jeh``VwukKng(T?)jqG5t-Ep3eXHIPdPQHs
zxdjpvFuQhNqr2lft3Pbb?3AW+?kPEXl=mn-<G|7&U))C3600yMRb(<g)UeJ(R+&1H
z!mjm{aoy7K<2tu)p+|Q%ElK@T3GvEVD?Px0@b6IGDhTd^R`de0)nQKxkIgDY+voLm
zi##^7ykhZ&{E&P1;2;D?^7hiIikAN$tNXPHe8gdG0=3}I7hn4vHp|cVM{2u$bi3wD
zX@p^%;&xX(oxRJYjgq+fBGKfLQl{zTzKNS`3F?v@rP390$PyN#q|itEa7hp8JhbpQ
z_oIQD)raf+?y)Hk;0gRJNMcl_An<wv=YBVs&+IN+RwsVKpn&p0DjEwdeg4xfU!!u|
zEZl8Z_C=aBBINOnahQNKS;r;yW#j!-&ow^WWJVZ;_a*@Oti$a(A%^agt*j*3x)t#i
z!iJ*EmazNHAzD+|pUt6A$~dXG1#xd`*tdNXyx)wn4q88wSsGFMpuX2PDvyBFF0iiE
z6Eot<pxoJy(&B)>aDK&fPI;*5w(se4>K`Kexi0_wrKVua@GnMAU@4DGL;Sq!`A?$|
z+4R)jH<K|^&cA%50>WPrKJ@oU@^Idtd~b$jh;1)!N3puJ0qxXB3FT3Pm+^Iqbg)fr
z>PK9mGY*Ac9#uej$%u01B#!%2aoz{KK;kv`K*ZJBbPR>aLO+|m$C?ucbwX#-3QV)B
zi++@G-uqRIqFP4P$%%7dY}eyH{^Pjb8;NqU98zz&<*o6Nj{P7Ogxjt3E&Cw)1geBM
zp>qN`VMX%8<>Nv2IFZLG+6&JmzBFZE$4_E@U7L=}Av?t4BZ&}X!oK;?stZz}UI&s~
z+`>8@1FX(f9OCJ;N?Oxt(SWj+N#<;|G9a~C3<IFin6gpUkRcZrpFjbrTr}o~(@j<V
zW*tw^BJVRu9xN-Q|D*6<u<<quY|18(4?=DH2=C~LDwMEX&eWA+;jY!nTzsaqvZ^Gi
z>@NRkUff-Eub6&HeB9I>LucrL#WG%3=dk9AZI-PIaW|h$LnZP3NrI9ua?>*xxvh);
z_X?bw?*R8*Nm=l#XC-Qmy?w9ONA!l+wuyJNP$2>v4INiB12U8wbOx-#;cHjI;}c7c
z2bn<a6?7z|Uj;zA`bxhH;#Bwfa6@Q?`!+JFg$YkwnGgNMb69g;fEI$|N4fMLoX&oK
zF{7rUf;+=3r*sk&x-6Bb&|z2QSH{tCQnVwROyDEUI?WRByjgWWsjQok<4%0EPy4zx
zo|EQjNEejvxIR#azOn9cu7f=Zs(sPmq#09}jY>{fZx)Ms6hm%1HNl%z4l7pUB_nOm
zyrLIdpX;4zrbsPYs3|m=sZ%l?Lx#}@r_e_c`ws-J1q-pdfY!uwj%Qp~`Db`r9Tw?C
zT0>)d$mf(`>S38@V`pd4wyB9&b_P`M*c3?=&2M@zwlWm5_v5AZYFjjVFeLh&5NX^S
zv;6y#)kdFq|59yP)1|q%Pd-Ymlht_bcMk7l%D`GX?1iSlgN#B&-IpgWQ@4hjZ67Tn
zy7Or|dy9bf!a+8okP`2eX1hzbh~rX=>=x{en`Jdy1VIaB?0#72qNygvO2(g7-MP&+
zkF&p76(Vka4bL3t{fb#*@RoaOmPp#Sh9~0{r^D$>zqQ^v<5V}F2nX;s*EdK=#lA_-
zKoITx;ZlRSIV)o))f?-;{xzOIiwb_-Y0Uoa^LnCZ0L`gZ>K7Ugk1eht@++I4-@WD3
z;IcDd?`6}#5QRP?lJuo1<mVZKPMZoggD$}cdEd(}CI2+mnW1;CuKN)=Nf6eZ>0}0V
zw)a!1w&O2WlgTkzSgsSLQ}-9cZJDKmF&vWve?)UoUAwVy<Me5YPjQ<}E#pZZQH1bP
zBa`t#+$ebb)VVnY_ubL<ebtb*a~AvGLDpv=#El?CtLoFKX&G;l+ISKj0`k<%%nUgL
z1H(g3-n)Rtz_x0$#0`?wXMXnw!ns}qdQWBg8TqI-a)2y|BIB0f>lGdC7sTQ1XQsrx
zeA+IU@crC6j=fA$hTVA29cc{Oytc4@TRt&#ho2l8HGW5Ftd@P8t3drAT`d_Tekbv3
zA$K+Lg2wODBth2k4d!cNX)NYSri$_{uD}hrHZ-Rh>_hx3r39Yq?b%~!0yxVp;R5#;
z5}k6CapeZY;DAKGIZ`(?OY3n{9FY|R=Q`W1%F~x3?+-Xmxcs#{oz)T>lxy<<nC|$X
zA|Eaiu(B)Eh#QE<(5;0BO3V0Ye{Kn{eOn*J;o4a#8Z4FT!oV6DEfxW$13KA!)0w>d
zkkzu1`C8MrLn+a#Rs@aw!<Aqyq{mAG@dvaVJE`xfQ$O21?nYr0czA0O56pp&RpmKA
zofbtaD{^V6>v`RxNKj4!Wt`hT;6=c-g2$mffs)1D=~8bPhe#~11Sbx|B})FGtw%`>
z2@$B?Y^vsLrdVEs+IBL7(_uGOJeb-o<Ew*Jb>|&C3MWxp0k7pe?c;U?L!a=E4?psW
z5Nn1IbN*udkCx*;L%g2ubKcFw5}795b+h93Xio&H!f|@_LGUPey863)oRd}Z!99Jz
zX!E~zH!KqUDbptsS^*&+!-*8et4Z$jz|vcz;V8O@h^bOG>*W!?=TnTLz4|e_0M~(=
z1ZaA*5byDLc|K}+{#Pe}&E1BN59eNjvXCCQ;ik{~bM^(qpPgua<SvHm>uX{Dz#H$&
z2t6p`gopf>kBALh8ztxbBk)4}2+}E4lVG_+u3fZIJTH1-fiDIjgxG4xZ0O|~R_~j?
zKZ??G(^w-FfT;P7_y?iQ<?LHhSE1;8xP#a0?ZOkpIgQ&%WwUWxmB7Q=kBv{S)W#u%
zE^uCLg6(G%{da5XE+5;Vz-dYpF`2G=&!l`7lc<fVI%3DyU6cL2WIcMX=R2faiGvT&
zgi?+kKwXz!6i_X90i}Kt5FG!6b*2Rx_IQ82-)mgZMR1*=kJ0UPmu$9K+sz3;{-B9g
zsweI#y#xX~=88K$5XfJ+3|7c=v&ZkMT<=xN)h|Mzq=t*d1eeu9r6-FGsun`7VqS*R
zj=7g{+@rykKo7fry#|u<IGOEslrkIcsWl2!Eo|MU{jAR_2ojkC@L-)^2`Hj`DcPj?
z9PC6zQ;+>!+{JqP&Y#PmsZZYLNg0pjdrX7XQXNEOZNJ)-)o>&}&hq|>Z8zaE`I{-)
zvT3ngrm>!NasVs}@>8o^o?Ezi`NlcfFM-#lnkTvqwZt^mq`?m=NV3~as4JybqUj_L
zTmDzO9ki9INu{d64N$Sgv&fU%G3|7*vGHfio~wCc^4unlrke>ht&()Kg4qhZq6QIs
z<ZFtq@^TSqX6)ebY>3}scjPztrs?~%IB^i{OZMAMu>yxg2E9(($VqiK5fRwy(=CFn
zvg?U)vo2B0%&Z_MAvhPtgMQg*t|p#EQt?5S2fc>gwC2{<nDB>%ZIg3_LxPd#6V-L`
zX20(J8NiT7oR{Z5?{IY8fqWbobM<-IAvSrq*xI6#GBq`=tR`)@tz@80ig>^Ixyo%3
zj(CKZPEvs*Xfn7IJb9G9yEkJpTVU>R7xU-p-ZCnQ;;6K7$P*u8GU9KjBakl)R-R!L
z&C4cZYrE0qg*$PDZ&*Y@CaDTte?YT^J9{<Zy%mJhg%f$>8f=6HzJ#9GkHXIrL+J6R
z)r^B+4hP9>ShI9N*J8;{S7nc8)M?%AAn0nzo|ax>X7?~Z8o2=L6$nRJAwkQZFCLAI
zy*wXM%Q0P<>7iKJjKY2M`;hwaBQEOqUF-8aH$Fn4fn<2%`tgS#%1|&6dQ4fK7Yo}F
z$oI;?hw6svT7fN-{Y4k>LDmEM^B|I1cuT4r-9oF=ZQ7_c3Iv`00z0|gcQ4mdA<02S
z>0Nk_OcNYYdt6@ZG|kovl{%E~pD(Lx@@fao5PCz%QIfP2d$9&u53*=t9A+^Pek&|=
zw0M5ZXGk=|2hknt(XOMNZ@ssW&TvJ$XOZmda0R%5sC-JqZo9GP<R)8rut1x`1GQ#4
zjQeqhZp7X7U+fzMs#tfmh>+K=6&$0|yB`|`r$&?}r+1D3$|5HmOO=<lu1HXu@5>;&
zl?q@z%XK@fM#%K0<xGorCTa9J^T)b*aUZs4%tJ@K*$|~--UkhSP<<t?dYkb{zRCh>
zle*=|YPQtar~O^_aU=QKSR%XXSX<jRM~-I@Ooaf18NHqfkClxFUVK8&Eu-Yb+Joz@
zx}!}D1Y}Kiy9Z9_R+#3SkN(*y<P)ei_&n;w6E0PtTyE+xd`qb+A&{GNg6`+pc2d>e
z2T4j0>Up{82exeu9Ej)Tpp_65^t)&}c-V<XdD@=RYAI{F!wxBRHp_HYTf%2tFS*!9
z#?T|$ha<%0@`F1f9XK!;U15*P4UnTT+(YEIxV#-eFxp!`=uQN-J0&D!Xbdxm@eN*-
z2aQ#d!9!^)^%zU^F8faZc^w`r9X@SLOqhz;`Uiw7EWj=HU|;P<asNTZCKYsz9jwpr
zV;+E0BajIYcC8YA`c1jT(?$IF)s^=k!*T2h>~hLOdc3I3K-TZ;Ly+gw^Ts<7%z{R!
zz_NgAECVgq;Z#iz{LB#Q2nQ75E_57Z1V%+yB?<<_fS2a`t}HnP>Nd08FbLlgt3q`9
zno!liB!?kTpH<kchQ6;28i;sQBQP$Cqe~{w^C-eGDJ}9OnqboF51q?k0_m~s5LIe_
zlMDdW^yT^lnWPlrV-9-dPF=~k6_sFNrquXf9u;R3Rm=jYN@$LMI*L&ko*CYTe>-6p
zGOkJcIAOBVWQ~6%@)=cY7Dp>o@EGO_v9lv)*M!^}b_?1#Z*UBWZx;h7A55dh(;tk4
zFnrXn<g(0c0zkZ-?ew@8uu~$s=m{qV?SBAyntoQQP*2Uam~gW!H&c$84`lV5e)^L^
zcu-tycwmT~Ng1&+g!#fiewC}Fv{!=qunVH+hkIQX_zJo+CoKqAL2$Yfyt=-I<dkry
zl?fF6{1KAK;%rr9ofWbU_xQO6lIjHm4|<Suk$CSGjdHuhf8wHfhhQLG?CmN;`s%im
z89^qzcf6IZKJ>Wa@sv~|tF^%g?8^2|%{1}(LaKUwFTrrI!&|k~=`la(%G@mpa6vl2
z-DE8ou!yO!id%}9AG2gum!7I{qOMvTb`m!IkuJxrssohyrb7j*Wd2Q0Li069FTL9}
z6o4w;(YX!OM}ch`;k{ocUD90YWl7n%yQqhSM*$MOr1USS$>5;0h0JAS%w5PRDJl2S
zqYyCf6$Xs-zkV=ha1`6vNgdO`t+SQCs5N9QH(D9zX0|n(zUKP9hVCwg_WSYpf(%cu
zLxZ^hw;%nw@pE+IhW&nml05|?wM(5F%f53}uh)eeE}0E<(|9f!=vuH<Yk#Lqh*2P>
z4|pN9Xw?DU09k-aO&gnm1z~_A8~~;Q4pQ+6q65)L;O88=S4adyS4)MD;pNh|?7^me
zbr=Ms6H5UCfgS<m1j-nqjvJC}8E~eE&wZc;-xrc>wWbneZkXDu4_-1@Sw=Y^MgZ^|
zU|C~aCQ(s6G)o7y9bD?D;itb9pn+S_2h4&&#6=Co)m@#IXYN%BX*d*TtVH6^(hoO5
zLuduaO#5r(%YT1terRIld9+qD)OZviN+BvTQzgSDel3vqrf)y36X3c1v&KEkiMxa;
zXcr>jcKwSTver+xq6e^$<+h(1gNV(qc$JseaB|fmdtdJfDgs`jUO?)R2J{etVwqAR
zJ{8G<lFXxT2A71i2k&*x*k>L9F#uDJAy%UTGIK-G!CUYu4NM^j;LUXFP|Bp3Pxeu?
z28Jqy1`k?NDoV_^m|lvQ_}Q&+ESf$Q&8V4RyB~h4_zOx2o+SJYipyh5X4!<Z0=P<g
z4T^ys!ux0+&a<0t+`|ujYL<@d>jLmB=<9s2(60MaxK873LJFlaH<Z-{qvk6QA+Kiv
z;wn%p4zH)MPP-g=vtg^v*B9O19RnN(#vbyA2L46bY;pa~cc`~)wgsg)7Fl`Z0cF8v
z@H)xN!V^N@({>gX2tNa*c+1e(((DlTy71Bx_lj%bc}8`ymfZ9Uwv#z^T_i%fb^WFO
zsXqIiX5`$|`h|T1eH65Mt8MF_lG$g^P}lbB@R}U!YFGOGJPRMFuVYF0P-b4u7Yt<l
z8f=L7wka_>x=M!Enn_j}=0ZO=9J}>R>s3pgN0-Fvr$IjMp0PLn*Y@YOokB?%k<2_*
zMab1Q-wV*+$AxNCf%>oDD@q(5^Yd*vg;N1OZ`av=uKql!9QKBQI6zj)_K#)rVrWC?
zaRRuI6s%d#ObZrWIo{jZuc5)NePjL)$=VK*fZ!B#oU>w;1U)AqFoV%vfh0cIfD;6U
zj<2X-s8AZe^%HLvy5CgkJ9j|>*wbd)g*dwy#u%?8o*Q3#%-}WM78e=&^p_NKAk8K4
zV5SipQj4Ic`@#76&Zl35bq(+WG`eAFPYVf#0VI6yFW0_!1-`daZx<o_BfoQnAvGaw
z5^(x#?lP8`{?yy&%4pQasC&f1T?+6>(~uF|i#`Af@k60TFzuM~Aj%(!cs(Q(4Ss6u
zvZpBP4%`?mLAXI*igXI;3m$%TM(UP$e0&sSFh=H!1keGeIFQ`}<RlP}1MB9JabaQ<
z67aXfx~Mr6HsF-kYN!He3(f@d^v?#I1VaVkC_{lz<-oD;WIwks<c^bN9Z8Xjo`ErH
z8Zv_W0z-b_k9m<ZO&83;?{rQ=D)vO%Sj4j0MuCk^R@s)9h?y4Ryfss|0*@EzLk%)l
zG6WT+fRI*XfB`TIbtJDOMHfzGnder96R2dLYu^iv6KKR{<ktiRK4=BE3<mMZimZH(
zJx$E(l>1SIn95f0^J)Zc50*7;><Teqp`!_#M16-Ym&E9F@SlMhl$^N;4z|#7#f({6
z*R3Dm&00P_4F!dV2_%sUPZMu~vrjxd4#yfi(A$!gjT*gq_PZ5)ir)jx$p?vuW?^oX
zGfmFjr2Q^X&9ZpVJ8Ok6F4WNC9muE+2MyWZMwKZ-Ldtl*pSp%BL^2Rb6pTWqz#<Z&
zzwliI$A%CQ0{QMXw}IjuTuMA0M^JTBZh_aKiN!^az^cOA{b!8bt>mJy7(qgER^@9j
zgLSGZA%~?Ds4H!5-SBC-%p~ZEsHY_;PS+Il*;qMQqp{k9f1T;c`M{BnqioCUB|P){
z^_TZPr0T9NM!6HxG@+Eb*uOHl)rmYWfX+Q$hPXMl^CW&`{wzyw2bun=Bj+CwN*~zI
z=&{fIU|J*J4NR*_1eQf#!_CV<OTh?&Hr;)Xo?>t*G|B`}f2%WPa_osc{L&}b3c}r0
zb~9gM^k{{V6x1G}#Q!b~_xrxKM$r5yxex&;{0P9MUH!<o2vPN8DSfi64Vm-LV47Cv
z+D|cpI6|8n<uhnLY1DbphYl#j#8wpZ7eEHR2N>7z44hCs6XIp$;B0?#06&;zBaEtP
z`OdK$BP{gRJ9#BbXoxgZYcwpdtLf461+n;4(NBA;{$AGucKoHhs_n}xa2F_~i!W4y
zah%|QcR3)f^YYmM91Ck~IOL6H;PV353~?_!EmgB7tmsZ{v<=kur*ZOQxCo#`r7O5R
zUxV!NT`w##;;Z@7NsLm%X!r=F;|tQLi82{5%8EzdTycX#?!fT_hQdOiF~w|zuCXtI
z?x^u0IKy>AD^_gU=^E<o@@~eT-a1@^V}g4E9>_bttQe^%5}tp|%3<I6#x_~5J^$Y0
z5=Omo{;U@<m~)V>QoX*0v+*fp3;brVFKAC0tGHJu8Fm$U>E3e5wz`X}C1OicaNvo*
zg8c#aa`r3x(-VW8k-Va%aJi7WVF^?I-PfG{-gIa#JBgd%Nyhcr>B}5uY+i8<q6Q}d
zBH~L|*8u;?hp&O<QWY43_;~xZ*PHuina;<*@#e+@5}JMH?SUwviDAfa16U|B12-YW
za}OkR64xd)^J5poLsI8NzsRV|^NAoKKMrsOqpr@DmKe^pKQrOIRoB)XRCSUuu;0N{
z72PV>b9oQ_lex&@L)~->{rY{N&kvk_YPIaoLG8hJ2V5$#Yja427YPzXJ|Md5V_<cT
z@?0Ej?SVTB;Wz^JQUwQWuOBotpY0)zz{J<gFmMa!!kh$G|ANQcY9GQpMK$T<mt|?L
z8Oe7DVp&>{&^ugj>AfUnY4k4#I54N_;$DBq-n;;#gyAB5`=~k3`~h{i3cdN}TCANB
z;#UHgho-uWBcpM6Z&dz|?ahRyH<#v{E%y4?AgUa>9^WT{5)8O-i&%M}*iSN+6UQQ4
ze_Kp>e?cNZfh-@Y#Fs#CLg%nTq^^#iu5}&a*O@Ck><=FPJh^MX>A$40;U!Q$jMoIJ
z<k1O*Z^f*=SjX}_HZv(u(B@v)nv^37^^i2!N(hHi^p7rsi(t&FZo;RU&9LS#{M?e#
z?u&)3cmia9H_|`_qRPlzGOrQTdyU8iHX#5%)rz>onQL-@YIu}>Byj4U@?e{@iK31R
zLY0dy<35U*v78U=;68lKoSIx><I8+{INMeB%F!NYF+`$}RIAiwc*kW_gKuMt-L-<T
z`@>LZA<??11gQ(HlMMahg8y6R^$!bmdr$eiA_}mNne=Y6BxIH+><d~B{f%<Km)?d|
zf~Tea-niTVedVL=sU_R>&)XiOL~L4D0--J!6^=r>A2SFa$;_?MjtBL!+xH#nWI$6l
z-=Mty{q8V~>8k&c>4tKRoK?e;%UqY!rf{IuaIQ*L3-7TP6TClVqixLSN%LQ6+o2FJ
zZ`YXSb$1$#im#}<sozpBIw%N~_W%j5qoM?S1jA#H<)0;xpHO26P}yZV1@j<RzH`l^
ze7t;@^Ae121GkGYFT9WHIj}<t5r)yI5`J9cb>$y~IWPEEMj+$IAGa+`bFGcuzU6t4
z833gh`f=cf9A=WC`^BL~!|x_+KieskU8i_S@GDyKk@YSfgzuGb%h$b%j>HaHzO9L9
z@;z}txK((@UgU*-|BZ0q2HZg}-xq=%VeeRQZX`IZF<XZBiCiZR#YhGaMYYBaW&*lA
z)>ar7Xcz)U3Ci#1E;|ECt+8;P>lJsJ?HJV3P?EWE46)cpx~3~Axv9Y{BWLIL%E<Cn
z4ZC)YRNpAq40ox#(}fc56)X*@C8Qd*lW1i$V)g?I9EzG+a>K9e*Hg2r?)TT~2}xe;
zP}542r=`4^Dr!qx4QcWdS^~1Pq&e8(e7DFyj>vu^B)kJxCNpB3!SwMZx->>^Ty&mZ
z*4;i>bLwSTa*e1p;xi?+8za)^6G>HP%g10<t!y&HJ*r4KNZV<!t34<LX#I^AghYL4
zB}5Rb6EtnP>P}HRaJD~Ci7cFy4xOyRQr63y214pLcxE-Wf0W4e3#Xh<LQYJE1SEQ*
zMP1OSRVKsg;ZpK?7)vVEH;o-!3fz=eqaRAiQ+vNqo9e>`>ubhym2e@U{0E37hFfGt
zQ`-E%_n*ix88W^+3#b^x4m1F=h9#T;Vg1H|z0I-<!_6|HkhiEDS0({2MPY&(TR5RO
zYB2)U9WPQf^FUXVDX%_eR581f)dtNO;uwN0<5iphedFde7X&SsEe!yWt&;wQ>a~tX
zL+_J=UxR&{%+ulc)D%kYFVMiY-S0a}FLvq&_P(}ZTAjPK9~z-%ZrdTemYhSB;T~|>
zqw2cXDUB_+>2MYzf^O5awi!m*9R{^fVSP@bQJOlc*u)4PFDhF+1T<`cr55`v+)_>=
zw|1*nJva3qd;xs}^s=C;;lr%4R-#;}M8&1u{M!fJ+4Svf={U6X2i$H!;-_-vK(3!S
z<Lm}BeZ$)+e8&(D$h92W3od-0v#w{wS!nt4_ovc{qXZ=ehj&C+s;6I3CA$GZm9J?S
zF7@uDJ_+SKwJ%A<^&J;sJo~%KB-3jUYwmP`iKlASYXFq)i&Uqxc@{Qb1e_R@%`#YP
zE7S=9@=FgIj}Z-&ntVWr15{Z682fd&&0%7uaC0ZxRwNN2hljM<x^QWm+ftle67fW*
z?MEt&DI|GrS`zn<&dA5H!_o5meJOD-=Crs;cs9AT;h556Zn2C2yhT5t21&5B>gSh^
ze-1VY><IcEv?6H(am${*_B3KGo~Fu}AIb}ucDy~E<l3Qr)gZdO8hhzinZ*ec4-ou9
zhO8zH2NfgRI2>lL+NVMXvo7P-*N5-L)>BDawTEI!U({hR9Y|^#D9ExGd0gv7>xT+Q
zS;Vk?4u68xw<2Z#aWn$afEJmbw)67%JE6gaO@YWpSFP$FS{b+;y%#ZY6%=O)yz!HC
zO^#5M3Pq~uAku0qNfw=Be`?zY+CKDB3d38`C0d4xkzftJFCcNEpEdeiWd2!Y0~e*E
zY-RNwo=XYR%XMgRYFXb`54#LB6ckXAh1WIt(@LGS8L%lnPhD9bjL3`LS{k$jN3Pa;
z=5CCkceC<P<EZ(=DSs+uaLQc(MebL2c#T7zL4k0$zUhaPC?j4TV0OaLe35o*J9$ot
zm%oj4Wu2*qd3g-PyE;v;C-1h#mmKx|bqlV|3Y^t14B!|<`fC8u`q^JD7{qhHQEOmv
zP&C)At^kI7ZXNkkPh)Akiz#9cAAwCEENv9gQIB2c;$zuWKsBXvWl_PXNUV!_+^mZF
z^>4WhU1wEM*+Vzl%=7{ZNinI@{HYf)+VEyW7#DrGBz^d%|H#ns(?hwg3&-gjK=mrj
zh1khd3Ts3WGWvs^d@sTT-r%E3;NoMd-)j<*9OID1g;m<IDZ^e!n6&+*`uZ+blwVeP
zMAj{XB?924LQqDe<`(Mie>#>S-3h`q92h|pCjb_gWs?kCUCk=XCsrDp+Dk?YwsD%X
z>G9&|dD^E|%Z)25VBedxU<A7xBW(G~U2F%)_1RC*PRm5@^?MK7(pjHi2yvw$bK?*X
zdutJ>RS5&y0HkOVp9;PZPJdN<SKUj!)+mj9wX4_^k2s3Q^5TewChm&Oa~~g1&V=2d
zI{FN+G>8*O*Ogp>2z9{4oQWlRz(agxbZhBsemkC&2vd6C>NUNgMzI3w4$&WEifr<m
zBccIpgMDOL^=PkkOm8LH6umCp_IV7fxyX~OA6m9Qzs*&9V->~LDG@z3*5yRIMoO+8
zC>xbUO6cMme^v2_1lKOdlthN)aGpPrP7qfpo1_omxP2V}RLy1h9%Kc$ABi3Xv>fLg
z7@$Xk*^VOfzg*M>x+*LNl%MMJp4Vj15f@NA*48i|&?dF7XVOEiuqN+1k^jQC3u;#(
z&s1tqX0(;9^=$egwq9Il71!%l3L>M^gl7j`mYDgL%gCL-hkq>AE#C0GR!WAX7r8kN
z#$u^8qJO<?lhcLjOKaecC|y_og+2*^hlU2@2@J98^4&=J2Q5#4?8}6HVIX~AvIv`R
zqn)aCWwzjFxbZ3Gvrmt5qjdO(!H|2Nns~S>3o0_~0d|27`mO$==#vS+1{wZG<BLfP
z*TB@OBqcbS6-{qowpAi?(fZj3(G29{>VXytPWIFvLSTd1;5}^W2n>j2*mlTzIL!}e
z7)5~eB#;okL)`%NZo`-z!VO!JZc47rus86i6v5`1f(j&oaL;gLG6^b(u;rH~)_qNv
zinRPPpIDVx_+(7F-#_?HnAPb3l>|A}1EF0I!;B!WrdE>?YNNjSLIz>;gBp?d%p1@d
zSOEapNkoehrhMaQ5`#}~%sL9UkqmhXA?Q1L{tJ3@%6J2EQiQ@uUDXo0PV2@4v2ZR4
zEsY&StwB}xPX`a|?fYq#5UqpG{j81~UiU#$AGsuAxGzOb+wWsz)_bA*jljE1PYbx&
za*3JG!xgR4d%23rCJV@1zWvA4t0NgKr4U^$Gl<&>)M_t1mj;8^r}0Tr$;j#I{Gkz>
z{Dgk);=Y#a14QU|?ZBqSUnqmW&8j?a&#W}B*Q7>?<|Q*}K+{zlSbqeRkuu0+CHFPU
z!%bes&|94ps#X~r0$8RdV=m*9#`tx!DQ$l(0iGM=;!UN5tQFA9?898ru-Z>ElIo6k
z!oX*6A2It|?DyszcmBmU6G12z_f*t178(ZVs<kmi5Uafiefiv=mPCOqfv9j$T1`l5
z3Q)*<^}X8G(i?2r3o1QK88kR1++Ch6RUW5xz6gS@H7G#gngAi7gF6J{xFBw$!JHy?
zAwh-$M<_uxOt=QV45T0-KsD;P^n>D)2?x5*>0)i~x4o+NKjAQvhh2N4O^M`UM74;*
z6&<trgN@cETxA}}I{E?q)=;1sf@Ad}$T-v!+GCgQ*SlxkwIuv%Y+;=%Y<WIGDnYok
zXfkPp47e%SxUfWQ+Ybq*az+I8>3O@7L!RMCDt`_ChXEy&qxDV?K!1yid2^G{?#Hae
zha*)hgk&g}28D<35yGA%ApIbOyT>cKM$ek7cm(u^prG(I)RfeFYOM?T!aNw#2TGF)
z`+^fNU9M;{ou6RE#jhyA0*~AaTxi`Dnq+;b9}6D*cC-gEw9)$A(u0Q(HmFcH{q(bk
zVu0faD`^rni?h!Qhf_rnPLm2+4+LjRb0WfPfpbgFb%tJH9d$Jd-d(~~o|hqSR|g0#
zao)UI67fG(I4!y%+>W9l_XhQxlQT?9ug3iQ?D)U?R;cOfGeHH1lczXLIh6kz176qv
zo!w=jfU-gjgBIHg?u%iCxtM#@SpDuc(FP3~go!l;$3#+J^V&|sFo`glC%S8|YMyWQ
zMXW1zbops6*(8NKJc2;8B{~RmHPGDKF5B!cp|2pPeF1KE9UD}Lek(vgQ(tKL^hv&B
zP#SyuFRI~dA=>sQVi8oehn3#3u~4oEb#<*Xq<IM}>wz&FG$7mdxXD4269Svqi`U~`
zvY&vTNa?B33<NNw;+Ks%-}X-aLKUQRfVl&HBVWGZF4FJ^wTk!uQFRt<adpeK4#C}m
zySsbi?vS93yE_4bTjTC7!QI`1I|TRO4#C}Sf9LGI&%Vzyf57V1b5_-?8t+)6;OdQG
z-)JS|f*sCSX6A%fFZP7VVYAv`M1}D|Nnm@x@NQsUWtIFq3{{oV$^=cl@-uvZ+}jl!
zQSoNXx5d+U7d>DvIwFINdSx?d9xWy%&I24{McVW5SO6k6BW(w@f`Pd0#&sFl*BWxg
zOlO|w20U`2WETWT-Y&f41Qm6O&7HZ`#hxFhCciLqIU5#tVo%AszH-wj6ZUZ;keO_i
zx(A-Vznqog7*aO4CdrNw4E~InZ^xJZIYVKHdJ6`a#zHX<#_m0|60*!498TV~BVnQB
zOI*yd(~Mh+YbEY~C-2yQSXmse3&~?MYE%3Wd}``^esWGPOZf@{aK?-OaA4xUx2OA|
zJ1;l&=ov2X+QX-!R+;@}p`p8#O<^$774Qp<vG<E@=HURVqS`P!vF6A5zfQ3i)S3Py
zcUj}66=-+WlFLWnJDCdF_g1?@>wKL0K{%$!X*JQcn(*q3Ag%<9QYBeq1oTBZq_p&e
zfqA~~zb8cAz@up-kagxxD99CxFkR>eSV#=66p(hLqw%rA;@;n0LRo<>8(_h!QIBlm
za#9B*sz9wR8gUgO>pl-Q2TiuSO6{c);LD>kVUHv=(luHB#4}PSDL*OR4m@O}YO2oZ
z_v8{x_Oa`)TCLYR8g3IVD+u8VG|)Km6`ieHVtT5=XbfS;bS&O>v3YZ=q5H6Xtm5IV
zrNEBvi`<W-hG_r;@NK10AhX+Yn=G%sp}vQQw%BJ@NFg?u$-%~G1oiwXFO($VhJ{zS
z@etg=sbZA#_4OT*2G5Ewp@W5KUsH*`Pq6J5gY;dBiwno%rHSeO8JC|Em+VS48qb7L
zCFxl#>3daO8m(F?V#Cpnn-cxyeNqcGTnwhi$Bee#FLz~k>mxkgP)Vgjh2%|J8$jKn
z?H@y?O2|i*nb_e@vQ0qRaLAAnKdk(7#zkNg>9UW499J#OTd9@e>RB0g2|!?sh~B1*
zbSKjQ-#35gF{?X4DLX=MuAddV`a(j%BG{%;me0?)?96RtEvUb?WxOt?%j6M84f{o1
zn@sze46OfE-rtYrmR`$bvk31LbS*m5`c3_)L!m?teU7F)K0kY0Ky`BQsiDMdaJE1X
z^T~4+p>2ONaX;Pav`AAU;ZLqaaScQK+1}mtNWmNxg<Xhlt4WOQ5}&+~wi<0is*SgL
z^N&o<JJ_bDoYaw5&2pW(eha<GGsZy|qhH+M*=21tXLYX2|I&5KCuoL0qeKK&Zm>SY
zo3J!0U%>O{{^kBKTt*$+o@URA2n)#X#gL+Vtff4M`wFgW;B5m!V6;XHqbuGt@_`~V
zHD-t~VhV<a3*@k+$h{;BX5Gr+uy9+hu^h`rKG714Dl?1Iee~#)gXX*Fy!i(sK_0-}
z!wo`Mq?(7k>yxtq!5|~dZJi(rU)w4zi&Nh*Si_s~uC+0(;0wj!gQL?Xq?H!Y+m<+y
z&g+qz{557Y;%L3g@vP%WHR4FgQ4)Vl;{8ZG1>}{)8ptt!qxSO#c<_V47+fp0gDcdR
zzem1A=$n$3KRgx^=dgf2OX+viXMowAR@-d?E$!H7l~*_lyK}T`5{u<+!=^ZMO{6%^
zx<{y?U8(Ky3j#LiIGXpc2YeCgw@yN<PnrzVt0sce@(xTc0r$1@@3(Pu@zFo8h6J+u
zdwm~6uqun@r_&<gbR+Ne2nMd!`CuN;kShWXi^r;oGG%=AwH?PQN<Lo&sf(l@l+BiF
zMQl+A^YWfE{pRIwnvRR%E35aZ;5=UA7R*w1OsOczA&A+5%h^KcmeK1Qlk7B3&QN0@
zM(UGK3X4SPD0o>IcNxs%F1LSU(d1r?zsPXVJQ-uOnSV`-_&^_v75s>XzcxfK#6v*+
zYInGAz*7g)=2G@doYpCo!M$sT9-D@MZ%u@S7P=#!^mVmHIi~Z+liT&ZhoI@LtB+~!
zyNC0i?+s?%+5n;%Dn0h(=uYFF=Ig(MYqGwPE-MD`#Xj1FV_q3cx?QQ!ybT>uvyT`0
z&Dpj!55Jnm)VRFYG|#^D`|l@lt=2_6xyo=ByQ*<j|GO6%gSPb)xr}l9Xx29i6JTp(
zXlvBhOW%FeK+ZjgEY>T7bc1SQRQ^@HVlG7!rngccGz}VCgXCb%8Z2_!&eHOD1MBcp
z585XC*}4%B5CTzYWjF}|6fB*WV(H9`d1#f!X)*r{hmzxUQ(&jWQAmNz8;<?R0hPw|
ziRj8`*l{R8Z8k2%PEvSMGGKO$mAh>L0MrNN7oYP4s4u)UF&&VxJE^VbEgbe}MzfET
zQ}=<xiMDvEbkvEor?|7AtX!aS6>q0LQ_X!86(cnwMJKZ$CENrz?hUh{`GPnc!$@5&
z11ynJ)U2_>D5Y;hAl1FogK<4fNM@6;PNZU8FK_k!?FB$gw94RWiLD-q!-BHP3|3#)
zV_l6^V2)ufMD8c?QG{}K-7V_$Tf5UXSOuSPG&PY8j+TT-^sG0UXs*vv6+yNZ>+*(^
z{Bp8l|E>M4RW|0uipFE>B;fk!13AfN>Wo2ejXR#pdLbzf8~Xc_ZBkducs#n_x*oFt
zxxpU|d62m^SuGIO?+vkj?*I{I#038$XY-u?1=*HDem)|i*Qg(nM~-NKE(a+JvYSk`
zskLAIEAGh8o~TDD`<DlvdR<xC;=2GF*<(oGoAa8_&<g?zF>_zkx_%@xD9$cGHLr81
zlTgNbYuNZ~{*Y3B+Q`d>!hmJSCjq@K-)sWed{3hBt`DWV6R5^{f?%1abw?ITmmz(J
z2=$I7HII^#s8`cCw;n)}1@ogqshavy2~oLSN~_IN2|jKYlpXgDEF9R?sT)a@(~GAN
zHdD=7WzMtH9pisG+~Tm?dvs%GJbq4ov6D;a1pz{)wYy-IbW}<gbJ7x)WU*~(f^K`!
z7<Gzit%!9bhij|5jAM`gH~C(gNPIfnLW;4YVj1&yjYH?*VoHoxMpHJgG7<6_Be+~t
z^E2Cyru@|o*&B<DG9vu6hnddqAxAnt;lvIF8)!gCygoqXheptB%z8x@kXAfg?yr#A
z0ai$f2{ah;J#K+OO$kLr;6a?AX7pK~=ZHqsKX9E`dk|Spk>azr0uEIAdFCGphhFG<
zGzi4&s9@YVtoV$1G7a4@BXr-S8})@J=lme?8TT0<M2F$ec`B1r1<A43U?p~DfJjqw
zU~ndw6R3>3B$MKYWLX^z#?hn2(RVi@O&_Z;h$N`mTy;UE%hfYk3?XH$EAD@e!(t=L
zfdds0&k5L-m+be}M%XK|7*)P)`;+)78-Bi$13U(a5qlox8D>j&j@?Slm5#rWjK<Y7
zN3OTJt_1U#RHLwKA1RWm<7vQ<z+-oWu{TP-KBXVm8@tBq*V0{|=``$xu#Fe$QJWWy
zWu5#wJM!TVSubQ=l5{{(7(b~7bu#Gd&|<n^(d~3`IyV!33?Lo<X5<6Mzx#?3V!~ck
zC*((K#{LdZ1bP%`Uj_BflO&;80FR!p$2hOYU6+<hrG^ZWWABtqX+)EnZ^#)=nb1gg
zXwACp)Q1O+$9Jc1q|mBd=N@uMf9Go21RVFL*nip-GcH-lC`3z?(v3(8ou$%Xhnxc@
zFPMF_M^OYb#-p0*tkMSRSh0?`1xSKkhe8=HfQ&9@9cP_%;W>;8qhv}2{Pb>7c9KuA
zArEgS6pqWkr)H~sinH8K55cV<>%eZK0wF>ZEW`5*8+ak>YsRi0O%rj7)>gRqzPf_y
z!V4$Cl#@tQ5&Ag|N;w|<_l9rIvlwkV3(x@Qv{RM6s-i*Xq1yX|%;aK^SSCq@W=?DO
zOQ9E0+p)eQC&WGf%$|5kbvd0qZ;;$bp__fL3LCV}n2zQ{6%11*&EJ0&kakdK-KcHG
zrjU@*VdN>bE$ZvGms-ijFs(s8cTJk&Um2roQ$)a_vu|-0Nf_hO?Aa+SN+e$tjx>>4
z8G#Z(Lys4BSc|>ymJE}Cca0MOvjU_f<FJ@QQhmam;D{cR;nuKZP#!_Ns*?}}4k8l;
zwb1z&!rFVok-VXW?wZAn97|9FE32yn<G2@D;4}mm(EH<R&H2=H$QuC1>exeLkaZb^
zNG!%ajbl5&_w?rQuOA2QS#)vp@yGj}ijyV);uPzVcxghs6BOsN`e&+2beA(ts=di;
z9FoOVv_n}O_OpXb2bA7Y>B!Jvlg{O~$K52XuCQc}<yxcOL%jV1{eWOBs4VnvSjQ1d
zdZ|6~p~bFcv#xgVpSQ_pdIt-&01i!9;I>XuF>-@YkSMZHMz_v4#vAM^WLYJ>S|zvf
zZ#%MnlfRD{Qjy0J|3G_e6M9HKhL~E#CzGGAaVECmKS)sJ+%M5F0mN&r;CLV|)pUDX
zm|>j^MZPs;6pw|&x+Z!tAJOFcw1`bTja#t4>k!U~rVL!p>L#IcazW5+Rfcb=R_Kiv
z&r&Ogb5cQWf7*u7pkMX?wAwG_hnw06E!t$0GV;XU;rX`LT9Ol31@NwC0U&SDq+CU)
zIp=QBkO(5?(~EKn;4w<*1r5h&^!hPiBl%nsg|E>Chnz64CE99>wTjFFO~qla^7~xG
zBSY;I%da;t83XrkhuezuNNyUu+dgLB*=@D~eCN}-qeo6R;zq`M)|Jfe%S_tMQrbrm
z&iUOD#+s5l<GPjS#yDLcmAW<qU>(WQ=AK<P;3vNXQPf4Z34vC#Gsw+WRg8dYcuZ?_
z8H-?l-iWBNDs_a2Wg?DjV}8ckDpT2esv<G);7gBk1l_WBXg+{VFzR1S?IP$Cq$Es_
z!kbvWkli0J$Y70=)XN-ba>0Mm%7G*|gM93GzhaS!&bv5W)4YQZ!}<1D)SHX(`53gt
zi?^u^r=f+^m+qXJn@bY2vXxEpSO=yY4KLeF=O$}4`YceyJi=r9Z73FP<A^^e`33}l
zB|e?op<{4F{Mga`-GZsTs0oa~wcp!SG;D}Wj9rW$z=yb+>oKbp_MHIdZNH=ZNM;1p
zN!!)b;EEciLQK^S;1hDL4t0w13(<y((AcP&e9(0!q;JKkrMt}wAxFX_+;qgr((K_X
z$A%kYIe+`K$L&?!J-VR7OI8aYze<$SINI!)Nw9B)?H>Kbu03DJMO!O6E69<91jud1
zgJ05pPto7=EH^t>h5&(ooL>Y6@YUH){tBFXRAZn8yCZ4#W|V|M^bqI%R6UKsTh!{#
z?iQ#-NFEdIYQZocDvx%4XPzw5p~+5oSamthLXt(JAUI+E;reqF#wp@rD7uba!|iI3
zQqms!C{X11H??ZDP(f~%pS$MM+V{D=5LlX#^<XwJR9#LP(g5rH+*-2eSs66n{1hUj
zTJ<8%1)neq6bB?8L8_$N^?`lK>jM?Dg@sT}p1Vyhzb+*PZ7sqRHBc~<hDz=UX{V1{
zxh2;)Zc}NXfV4wpj3hO41rSwBMIB(NjKC1%)7dDaLC-fYIhWN~P8lb=Nnd6)+&yQ)
zz^QnfD3RCPrg?xhCN=4Bv1pZ0M$KTdplt>sE>R>u8tyYIGk3fSQJj_ai4?6B4O7qf
zKwIkpOHNkd&)XFvi=D5+*TzYdkEy1+cnp}IUJkD(8~3-jBYdmrvYssHdlQogDXL7k
zI;qMuS*nG0%=C;Z#iP}+&m|kk9GI9>49+*nfCABT>gTQRq-<7Rj0v>{Ye;y2+ODWq
z_i6z)vntbyCyV2%JWqHUPPIxx!}K!jBEt^ZT-1Me;laikiw1*i?13NnFy)U`|G=YA
zhpXI2jkVvBc!&IxL^(mq-hHHmJPc?W%k>aGoc8(m(WJdv?8X`l6!R>|<NXv;7=+H3
zWW81m$&)bKk9$T7EJ2+dgMU+W?Bf}#Tgibc=}ic8H>4qqzF}xjV!6Rh0sIvpygW>0
z@}Hq5QgXE?#DN7TkG1ipC$>vts87vcP7KC|W`uvF6D-2!;LW9GW02=Gx|a490(u1>
zN8$TF?gz&XDb5QWXKXUBLaGii#_n%hJ-vbjv*I3go_(IkBud~Gg6?I7guWFf^^8wu
zf`xC_m<>({=2foOU%oalC<nTWix(?Sts1nD?}i82%VX;OGin(N*<`Ows8y<WlhI+{
zIFit){JJDS|0<O`hzcv;lYfkx4ySJ}Yqr#J95tkPD^ydghmZF;)5WmS`p(m;7!<wy
zi5cnilqwZz%aPcA00{~c)U+bUtk>X$cs$Q6Z|`wt(^jX6Xm?r1$QJe@2OoWidPc8m
zUY)er*&yRv4b=w&jFX~3+PO9V3F39sYx2z2Z>)P=*bfe^p7j!^ee(ZPwHHvKJ^CS(
z#)R{SPgIF;UE>~aSp#;E3gQq~psAm39hfwr)#hsg)Vt(&MbU@SDv*0Btsv0Khng;W
zX^scM{zyn^Q@qW~EL56_+&kte9*4YONnn=aW|}?r1R=2R#}{<SA2w^%Ym`uRu{x(T
zLtGbSG$Pj~03|(%z5VL;EJ{WqBO^@bpSnPU^O~xfrO?2DJ+qSWLOh23K(FqU?+;^%
ziKxfbs=2df&+F>fEi}~Qk9;;DilfDh*dKl?;?1fR?Gw~=M&$<sqw@O)G$kYWieAwP
zc^$6f35c=hbg|vs;!1qAkzZUUQcB+W6=S?CGj!{{as-?uTCQ(MAlnu<8wsF{QFR#-
zt(Q!jAX5^xt*Ggz5H;)M1|i12X+M#zy4(Ve7q~P-40uI^UY+w^pTg+?lMU%vh1h;o
z8K>lsgOU-r)W>&69I{t?LXA25XKJk$X2+p}=rApEse(VOX=1wsB;Ag%IqG2EsaLBv
zzb4%6bF_!zcke%?*E>|qOSx^%Mp>KpvP<$SOG*wFRk2A8EgE4#W=8&NLP;U@3#RT7
z)CNYZ`#6bU)}>+yedcGbcjQ^*T&%SldH-n+Kb;zU;CuJ5Zou{h&^dsC5}+ecA?t`k
zxV)hHfr%*7lSjEE(kNV|P(wn&t6)M3<9032KqlwCX3%6yF>T{vSuhLKDF%y8jMU8M
z@VJcNKJz$p9fvY^mIGGh$5H!!2O=X|>`D_lN#KPWI*mY*yduBdxAY_r6MHc}>IS~n
z_I3p;4x}{FA({z+nVP-xmQ56iLUg1(_`_Ks=3rz~(LeZjo^4j>5vl#keD9;o-6cUi
z%;6t6p}5P<!7+!FO-pRcaRn0oyLr`qqY(n{{BJOqKZ&$nYTeDB#Mf%wu5Ra)$HCzL
z_-cBvnS6U(Zmhs{SQc2tO6CAuN)AQ8Gdfnx5s(BYOv}q@H}I`u>~F_~zCE5ye;9Sc
ze8Fp+rA(6d&I8A^00r6rxItz|%VU@1vKwr26kN*QW3koW)D6C;zyz`jDfztpQGj05
zjL1<6hFf+|>a{n@d)A$WA<2qf3r8?n+=gGRNkAFobyks`ds4vV^(h`0s(UG0n434d
z4ybMqg1Q!0o>GfUA%JN~dR1z>{yjct8cJ=gg$AQg2VumwBoStCVX&2!l&@-+U<N`_
zS*l{tK&*-GCI2kU8va<RW576=F$Ce|masN;%79s1l@RHWOIlGFyvHSrjQusE3}4hJ
zFVp%}{Mk(IgL%@FhUoPpU(+1-bvP+mTl4sn_ZCW(+41$A^AgAAXw}EF6hpPd6rVZ&
zaY@WxYSkL8M|#<{v@7Fgohyfj8tjpYumPzw>M8xX4wrhC@!a3maP`8HSnU|rCRu`t
zJ@ewx>H8y68bToHg&(_tJ|Um#vzr4mMfLhOCH~HmsLWR2``wt@iqdFgWb8S!{OzB1
zwVT@Su~{Ov-E3g{fsfx39;z)~N1SChD1XTdntbRis1+CR%jG*1+IDcd%qCaKpQhJ1
zOy750XG_f5b-#O3J(WN#sr+Pg`Ws_t1cQ?7W2|8b%vZmf@$3bZXymGFU>%Bc=zz$-
zYl%=Y91^`8p`?gz_jmm~gKWcl`OO?b3iZR1qD606_)!I*_a-X%v4}K71!JtrXp;c{
z>CfjpB!w>16T*Z_*FP#HXuFu_fsp)`cyfC?hllbS4QVX!!TK7d{a5_}p7=ZvV8rHP
zGNA>hJkbePM}5d6xD8#las!&?;)_PnH#3$Ls*ise_sOt+&bz3xVnvg}E!-g_I4ub`
zP)4e7*)AlAq=lK>Zm8o$V*`uKRFnY6lu!L6?`XwKqVu@l`uwrUK@gliQO{`dp>)zE
z^x?lET#&lyX!amTUbUQv5lIGiBp$pRmiYIGHiZ@D`s*W|zY@O+iV|<Xz|+N!)Us##
z(G+uL%-L6#gNY2K{#+(XsJS)IhrdK&C4qr~hOU?xWrr*cgrHiGp&>o7D$X?Ev~r6d
zv3@X!8=}nZZb1`PSG&m?YXq);p&7foyJZUrT?Vh#@Ok6H7iZ)f1LU$Q4B;FHR^o9%
z5(FaeSTN+pKXj9=j+X5>{deHd_9jvS0t4F!Ugak(J7(=3U7g+G*wpXSf;_qoXo7nI
zjfw_e2qMSpSh^nt&`hR<UTjIdfHPNMD@R0AG=Qap_p(JfwJNe~iCtzhnZaDKBH~q4
zQn1Vppa-0_#S(NYz{<G5CL-UNUlX_WRE)!ZH<_DYAsU1ggBkIU^{ef6(w_Zf3hiQv
zvl42tveQbU&t@$iFadzaNUpH@f+kh%ejphak!{AlYCFo-(8<<ExhPX%g?7g40ZFbr
zjw>^9vIoF*KM1mYOfxZcB>E)lbqnRVcjps{IMUAJR!<3ASU$a08KA0;R=T>|B7fDw
zma&0SM&jLN1Q+J&)b+RiZvce?=*4{zV3oU~?tC-b|0RR6X`oz|dNw(H<tvb}<JOKs
zS#J#?Rl#hg?vZtdnsRb3aSt`to5o>cao@e}HbaAQ4#)1RO{Jdc67kCQg;T}eRfFqY
z%{o%s^%Toq%8%5bBb<vmi4cQERZ{6M<fBy*(^J;*w*zT|2?cmYabT<%u8?0?zZzCT
z$Uc{<<KB*{<;dDfvpXM!B6(!OVM#6I{LeR-M!Cclh6vM%rSbh<HhI(Qd@(yl?U2la
zk<^R5aw@gzJ-R1xtwn}9e=6b>HU3wp&b`8lTn4@Hi>tXCDrG;lG{0yalNsK!2+i$c
z&U7}1XpFDc(M3zpJl5BJci~h?*X~9H8>mLN?u?8FwRJv`k!A%_(Sxx^TLQy&^7zdT
zI<>)H#Ase@A!XGvXo@3)%TnE{IkVAVH-f<7P?{IHcvy{QZT41NtM3A=I*()H_C`k`
zTL(}%=^PtF*)4nk6NJuUI(^E|7M0di=gKld^gq_=jeEtjBz&E!29<G$(=w^W-Rx)_
z{CG7F&C4l9XJ62kMlBE{_fu3oM^;@Woi`#I_ARWd!WRw!O(`nwpO2?}Y;LfUN7glq
z_!qGQ6CY-gAZ&I=bksc_ZvI7>x(0tTfz0U#@@y|1#fhW9zaVARB^Z{Yc1NSZ;mFW|
zDW68XDE2fd$DPS9G%)jM+-Mq*Fi3Fi;}jNRg-||ha2Ae=TExDO+pOnduk4Q&zbAfW
zd^hjKBw}6j1rtO2jdMzfb2X$jzd)kAg;3bcXfkmqh89lwgLNj4F~(fIsKT$Tz!_Gv
zaBq>nTQ-q5m4q;D5C34U>SMnl>&&JqTi|-|g_cKo9=8>q7c>o$K_~#)I2VQ9piZN5
z++2hA>(l^B3P#lqV!zj`uihslXOl*Ps@)Uhv=Pca=c>|ZH0Fa6@AmD-#g|WmEWYU8
z<lky4QDEypUx$+dc;R9?>J;)THE^~hHb8U2?^mdNU`(s8QNS_(+>pxc)WAL&QyIX<
z`*8r+(e7nO+V%v^?}C5^ysuz>iJv?cZV%o4kMGO*b^{pChsSoGM}7irHMWyFL}Ifb
zn3*Q?%kaQ8Jv#s75f&EKol|I$w9Jjmbwm9rL?(saKb%!h;;LpFVa>bp$zan09iJ33
zXU`mJ3<lG4*C-C%X8DfOxxv<PZjLhAd+gVV_!~6Q$J`XVYjMV3pGL8%k8nQ1rCP*@
zy|G08@??eL*QYuMvzGOwtxf3~r{fK3K_EHnn|=E8v8bzf<ez*q*E(jS^k$W6;gZ;H
zqHm`KtnEq)Q@27m#h;2wN=CARZT13iN=AQ8DVp`3jn+G^7+Ym-v=4!9wxV>hS8D6G
zC_XZ&?3`yCtHLTZ7%BqRTH-(t=0&l%TAvP7lLQ2eI?S&V&ofWwJ2<VfmnbhG6dXWs
zCo6n9C98Hxu_e_hknN^W!<)(qx>TCXdL#|c+oDyT#pSuB*SDOik(5aL8YeX3H(VCI
z+HE%#>dv~;=GS|TcPg!Uv`2o=iAB!t57gInO4B->U`m**B>hidl6RklD#zs1g0LM^
z7r;#Jgl^P&u*ji03GOH^&v!UU-AGbcCShrqIWDkRwq80E$-{fh^LQq_EHHVm2m7vK
z^r*2?vEJAu<@N3OSnj9w_7^B(`bIli_(`R+7JYKIic;Je{K_1;Sag_tkX@~F#7lIO
zQUtx4NhkQG5_GxkzC3g<u8W+QM>BSfc8wAF0x~Iw{m>~S;BVp69kH=`=f#<gnKSc<
zPNvd0SgunMB>maWv4GB}D){lzOQ@w&uk!Ar)ZKa8GjrXYkUa?*Tp+TT_*G*B96E|m
zd`?d#2jvvp;LA2|LaLu<XBd&2R!-Fx`foFqeTJLacarJYb*e6|lO=R2%)p*?mRqqT
z^;9-ILzTRzWk~zf1^a!|b5d_$v#41l&~o!SukY`F$;3s`4Oyd>x0ZaDB#YhMC}2sv
z9Z92Ogx?=8JEUWYdsV_97$6mkV+RpImM{-MShmaHJ)_IWU|Cma_VH`OwUePL#;hud
z;ZG#jU?XfWdqP=k7+?lQ&D+nG-(N0BMrd{nlO^0KAV?SE#ztsv4T%D=5PX|z5g=LM
zXjup0-R9A`pVL0e0yceLn1mtgz($s>%iLLvhvQQ^{19sy5rcx4zaT-%`34`)OT8*l
z*CNheH)DmKj~?d5WT1>4-a<qKjL0O@op`6`xyE7^w0scELQC-gE~Qa~eC&Gx+<IU&
zAY(|H7%mo^+8EqQ*dXRL__@Zx+!e{mievbY5sC9Xm|KS5V=^McNcRQUELMu3N%nJ8
zCASZ^M>K<dG%G$QFk@7)oFwepf+_iak1@!YqwNd=9(&!#pu~8VF&P~y!hyNV?S9C3
z88jOTwC^ux4a$}v-d-93*AX?z%JrRKPfEKTwkqr=!&}(i)ktrRAmz@S_IJG-Z2cuZ
zM;|k|Wy8P#yLJ@zh$bW3J~dHTF{46#Z1w>DsCKR{S3sIH@cisn(7DJfRkv+BfTbup
zL7qD$^fE4il24_?(udtBZ=K&anQ)el0w{PKh0SLe>*l;Cl)e9^d^q}>zdI4MHprMC
zxD)dD*>JE?Mi>v!Ra}OWr7F)48$N;J-X-MOy#6ty%ja~_?A%)?xL3&_tkPEmT(!xO
zvy&^&hQ4*8&~7%)M)LE>JDX_;-#R%^ZNkT|p8KOP>&hvl`NsG5_t(AiIs|mRi&-9{
zezSgk@}<#F;*&b~@5ic?3aDyb)?cVk8;v`TMRaqE=smyU#e0+=wj8Z|gnDuY9nhrE
zo5{{x`pm!ZrN|lWRhN*et(Geh3e?O8wFS)i%0S8|{yj-O?4D~*j5+y^AWyNX*Zdr!
zT>^y*1=knCYi0ceE!&VHcB)j4p`UypJW#!YV^y`1#<-!PITCI0kV1{u$uxI+yV_Wv
zKSK;h7P=jYwiYvy(WCwMZF>Ie7A*5$8ol{3yZNKKdWn+#yY)skLv)+XaB1j1%WMqg
z#_YI^XR@^)Ehy}AC4Ndkea02EV@}d!*%P#ngT(exjO-ZCcbl925~jJvvie!$aHOMj
z1`EPfMQmn0nvz%7z#+tMC+FCN@{`?UzQ+@#m{U8^35gqOvV)FB+MhQ6?Q@3#6}4<=
zGrq{NB{6aApl7x)8^}2Z7Szny0PB#*&cTK4)&p&Ns>buM2o@rFgD)we>Eu7yzTsrQ
z!`!H^+8M7-P<JdkCK`bbJvSWnv_iTk*ia5+h7Ga~qk+OGrLVK{;4q&6>s@<utSouw
zLMX);tFB#~V%Yz7-b}HZPo!;qj9vC=q76VodW@m6NoC_72kejDEg6u!T8av-s7dvy
zQzxO<xdoz9^shM9Fm`7*)B<k})6EtgvW7k3Fd_gHgjNX-Hi^6zJ^y%0adP*GTZ#zz
zfS<zbO*e){n^cWd{9)CG<>^rPgYF--Mied^IVECzU%8zI%qo!w=i%C;tuX>VRIw3y
zuE9x_#LM;k<lDn0%aZPQ#T8EO+Zm}+*f3)k5kj!9a+Lyd5Y`g9-Iqve#;Oqzyi=tQ
zGCr?NjwgbjdZw)2lxXoxNK{m@&rh0+DT-Jja+;5wy`PI?#yTUSV^OZD@Qcw3G*ASy
z{=j^JI4kqArrj9+F%YT{?y4JSZgGK7i;~a+&FX>zVq~x_k!dB<Vp|E|NM<pW=8nrB
zF&2yHcTKRMidGS}yv1v0<V0GBiv6OMn)tO!KPSo5_K(A7lQlVerI7XeT)dNIo&!@t
zV-VOX--p$}2i7${9dS{%VRBa}pWEM-c%W)QRM$;IwX1FbX24Ap9D{!seRy6CyZF6N
z8b7_<%oUY9Yd#OR`SjDf4`%qrV-RGPkYjM)i`2*xtKiz5s^z%>0OkmbtI<|+2?H?2
z>xMWsM1RXPG)f3p#f4OyFEt;0X)v3FP|xSpa@yPsYyYA(@zT%JdGkU{|MpbY5JclA
zIWo+ilKZK`IdKnN<T>B#(n%aqKGRJjiSW6upf8D)GKfhqoNJD^>Fd*}m0+QT@AtN|
zi`~6`D;v|Z$2<1>d!nL-1Uz5?Cq;g-5#a7?RFX<yBwZNRVQyGc;@8*=%k~6pkt(`H
zrEeZ}oFAEFYn?dA_%SJd^!Sz6K9H5yU(H%mNEnNQtlWC7<@~C3RVkI<e`ItD*th9=
z0xhyv7lkCuJPjfbwCbf!CFG7A)j1&dX>`vEeo1tw^Vs)ZW3XTU&23S2=F_tG^Qk`j
z!>+EsXyvAhGop-;F**gny8TnqgUImRes&kA(4@wGfA9@)9|!i}6tV+2R6bn_;^$(u
zLJ5i3SGkj40<bhxnqVUA82qamj)sBo8e(k~c)Ohg#o}a6nWDs<(l-=J=3*LU;Hq8%
zo1)5O6!Icbz&-`WSI)OH(&OKxiLgenU|x%duSt-nyNQMYP!;$ZuYIv9qN1YdBvgo)
zBrWYdqmUYusRq6tq`4ED7*}|fCV7F&TSEWf5GF{!96HXq+rX2E&okymX)4fVBKYe2
zi8n$0qMR}P>2A<?tOp>rUY^+FMy9D;WuYfiaI&+kjSGk}ENTPaHW{JVX2<*17Ow*(
zaWt4K%R>zVOeNIRJ-GC2nKo_fg4SG@^u+o72LhrZalDO0c}4Xz2Jtj__J#=!IL^mq
z8Z-8&gsL*$^&8w2Nv(+8w*umX!^q)AX=h9TR^yMX1l+d3Uz?6iH&<(WwYW;Q&qS)*
zttgIPV%3w;!)JJb6HZJV=N#bj77YLl^8x!TlHne<{)xXZD%5K+iQ&^2ofb4L)OKRK
zi*Gi?mLU_{vxybFhcg?@m<e07)T;DN*Z8p;%>zSo_DJRhHt{e8YfZO3qe>ap2DSJ}
zE(4OHP8bo@ruo6=lVfW^HYSr6gakuy$LCMsATUfCak<||QDt$caJ<2!{C;)eU@B7V
z`MT6wq^~T>>v!gRf~L)snUmhE;UhR$nKPU86<Bula%cf9%$KXAP3JRz65NW+ka06Y
z${fd&k$17d*h~saLeYC>`HoYa3~P`<KZoNGCm%~6_uj=_!`XX_2S-p_UFCT5OA|Fk
zvX`v!Z}pOox}Fr&q<TnkRkct^ZXw?HrICRvfO4Bndlq|RG$29qgO`=}r1ZGqLSy9T
zyWM_a*V>2`>cm%A<+AJ7vZqxT4%2$2CewR0f?Lbt@j3D5q|gx2^t7Q7<qc#peLnNg
zvLd`OnT^(eD1j^+_xtqmW|Gc(Tein5RMHx>qrGP$uS)2EgIL;PgT!Q|RiN;TTd-2D
zwWj?$&E)*kCM8gwOmUoMt=*d8QzM^d=0pAplJvMR7P1D*iyGFV&B#dX;iw(UWP-Tc
zXd-&gnpXXMAX;7w{9HqW(zsr|XoB2)rT`9Pp^0D;q_J&u^D(k@(<tW>=>CkhV3;ZH
z#{&7KOw)+Oh*G?^RJOMWPCA!^tF4(*F>Gq{HkWn!!`wYyz=Zc<L7TNXj?E7;XL0YD
zPgryQMaHY*bw&6mjZEAUZ32q_1Ee)OLxJILwUrP5HxS9s6eew$@x*rK`a9WL7k^5_
zRjfq?;Fdxzx895T>Fo-k*Uj+q=f1q3KS>|`+4_nBa}HvFRdhp7%smt+^c1W(tOLUJ
zYn3&2ZEr$flNC0HcLBPYmJ|h0|H<H#X3<9<A`248{gnkBRv7TeCRSbna@-1JA!Ss&
z<O+hGPbiWX%lRF=k1$o<J6HM3om-q(MB7Mxz+>cd48flPNk*1=90}q@>!jo|(<UsJ
z1nj_IWRyWNL^1(V7hE1R7zSoIlral2k1}OAjLJb$HLyKW{(hxjIjLk7b1ht85a_md
z33~tySL*}zwHKj+xxWWsmwt_}l!U&4#<BEv3`pntt|7BDJ2E+%oYUo*g6?<}>Q4mr
zD<aKn&2bDei2n($1ryN%<1By_Mgf+05`49s{T8?qtj33x(P_TpJUZUeSG|XGx5snh
zUj6jr*C^D5irK64zfN{cj;XA5KA7mgQLV_`&RXJ1+@0m~$Wbwa|L81rztreGOkE{=
zEe`<DVpXpWWe<fU|GVK~Rf4c@s{)E$BK3`?!#n{xj~SQz`I@#y4HF9FjFELf|0fh9
zw{e~ddDEry3|K*fDSd{OlU9NIM?_Kl19>MTclzorom3F-KQh@SZTZf}DFu%mkfig}
zJJDXb>RrBOr#rS`7dE3brzuidM!(^)<L>Bh2<|=1Nl1eofIW&;K+ip!DSy+>qstI}
z_E58SjBssc3ZG8Bi?+UfbNAa!A;)Y&QKWW#G;Wv7J<+m;k~(5pfRBz`aW$NjZ)z1i
zQbk($;>IDz(#QnDZ7qTxDRwfcb^c3bU+6aBV_#6ba$g*EpaZQ&9H^8u?C2z{pa)3#
z=Gh#V&T~$FgHsjHeIi~Ne$tzYDHLzX4<r%IFq#z8y7Fo2!Z1Va>5;i#tI}(Ga;V1E
zBXg7q8?dIR8@WOaShakv8_J7T%<fY8>+WW4Y2wH4<u$ql4Tts@+WdI<qMC>-w~@_O
zP1PBH7TYLx$U2O=V3Go@hGFrG919gQ&Pmc_he0kyzjm1c{a?ilRE<;lIjfAWcJ5T0
zix(9)mFt9KH@Kw(<EAt~t4`!^A+pII%v9^*{@|*g{~9(W!s-_{VYxVYBiY!#)&EqT
zwaBEuyY;-b6D<Rl4PViQ(AuZJFbE~XgWW#3t_o#>oj4hv@#fjTa&H6m?qh$$#+zgF
zq$nvi$I*W?E~0i~sx%sNkb%gO(4^eMV<X&|YuUvb9LA4U)(a~#yZc<%TSCqz);ETj
z*LT|&^5xq8JmFPT?Cx}_rjHTt9lNB@KOS>SLO=*j1o{bj8xvlonP6dB=sXXAtA$Uy
z(;iS-9#JXgXSQGa+~i5Y(oC+e6|(D={}ju=Kq?F-HPDtBl#*b?`cGy>VI5D7bRu{L
zU`Vi}%jP)Nh3dSut57_ys^;h)K34cqf3-%~y!mOLS9_+a+pZ*d)qF9@=ncT}L?}dq
ziw#N*5*a~^3-UwoU=iG-x>SLS!;$e13Zg6&DJAoVFp26N>e3u-_N-aE6}G6B(jTv9
z(z5$;7hT2FGLe0g>2>($>3#x_#rOIJGL1nrx1*72PF58)$rjK4p(XX+q)6eLuf|dw
z|2YF5nb^8|gz3n9Z-jZL8s@Je4|16>K8lROiI5UUF7G1Nln0UVBDd4H0uq5vJd>am
ziXV&;<{7&oD}uUK{#}kt-BVCCV3*7qfd6C%0js(>@qA1ZA-+kX?y~+Oc@7<{S*Zzs
zy@dB{o<z%=F{XW|DPhF5s^ia2=#RI|^Va8e?=!&fK(Q6+LhAL`8^u<w;=){j5E1|V
z;%<fCdhB#oVk8tFY;j4%GKJV))$oW#%U&WgbS8!N@|bHfHJocToQ6=@1)WNF3-`j$
z=1~WMMhX4|6dwWZGkb|@_Y^g`S|==KziX8uypy)GBmv3@;N9bQFNNCYbSkT_EA-!j
z>cyLg+`+RdGVb!KAw=ct#Moh`kAZk>G#F3k=&kqr=dY;wAD-T*?=SsSyk7A)BB$#U
zx4r7q@BMTP4B~+tg+IHR;AQFP35eU?FH=A-vXL{ihh<%CJ+Jp<0-7@c;{y${Yh3EA
z>bLWSY$v{r#l)%mB*ZEfk~gZwMFo0Fe-w+zGsZotmKH~-*yMIF8{y^6GP0R$74eCS
zDJ*zm6+Tmne4bGjxk$Vhvc6ChH%mTnh&75`TdO;B8$R8-Z_Z^#oZhrINnN>s^jUh%
zI*ja}K6(=jY1;g2vH7CI-H|(4bNJr==RLTlVeO%r*_;2Rt!HiRE#~3RQhjJHY<q0V
zAa!Qwn~?2=Q7mt>H+}Q1LdwNx%G}?Yr&R#&BHNksMu~7#-?&?)iRGVW#_hE=lly!7
zHYD?$E+PxslyBbn2<4Dtb8S2I-wPAjs3dn{ILY{29|4Z52b3}_)!)oLW*gmXkpFs>
z5JwJ*54qf%rK2>=Mw)0^QTRR^Lt|T1)b4R=)Z<o>$LP}kfibyJxqMz@caiKw&Wb#C
z1evpwEaKQP7Glx}Bv)+P#E>lZA=e75KE3U5aiByh&Dyw{6Mm1$TSVQnZ0|wwncH?A
z_ifgb$a7bai{<GZ;@VUQf4DuUHebJS6P$|wG!{-yXysFSdE?NtOm~uoza0Ku^6Y7=
zz{|1xtmefMLH5mOi^q$Ig-pvuv>Wz^paC;SnyYcOJI*nld%pLqStfrjz>;&WVK3{q
zk-uu8x3EVM{+bShQvjhR|8-kFwqRE`#HMEz%;7luoj<)&c)t&Z&0%v1Lt$2STE@0m
zJDifRu2v)!`%7Bjn$B+jlki3eRYFUO&W#GS_2Jc+urf4&&5-nLpS6IKZ=O1mW+u10
z8BR+tu>8-N=O5BL-8nUtB#>F+tW#+n-mbIhkz;3!-dqNLdtuvY?okx5p$LY{)$XzX
z8X=3uwZ<+nB8MYkw^kNPr;D1zyZMX7aTCR<tx7y_VdE;;iruyudRhb4Q8wng&x&2h
zo8C`1m^bHhE`s90-7EtZrc)ypR8*ON_8;YMi_f09XC5_YH&c+yj-|2u^V}=1=U#r_
zkec7#CLKy!E>L3=>E!*cNviFtmf6*YPf=+$?oCzvVrG_JL&>MRiC;`x3LahR_d+a%
zy@XO_Xm$ff@n|a?#EfG|oUV2xIR=P$xn!Y>1)O|5Ei5SPKaM+SlgX?bilh%y_dOrK
z9A}+Z*$)#58-%Kai;}<b3|IV&3{5fE70hPiQVek&G&!%PW=x8f@WWrBgg!-j{z}1t
zL#w%8F?vlL)n^51*p;26t{fYeXj2};nC_O`G?5xB&#K^~!oqEUWil&~G~-JDYKeC{
z&9F0JW<tcmIH}0Lo*Kpz=BcpK-7>J*Zx+lB3awl+u?+Re2vf?qZD1DuCe*&Eiyq;k
z(-9jXlt@MT%sHd7CdoNhI6RU3{p?zeMk<YY;l@O+7<O)OTlrz>t&tLXhpNcKDsLW9
z-fr%~x`KK`L-?WS)f~3_Hqt-GCZe5`X7p_7d~j(z*u-H7&uSDXV8Ph^acll1#zwMJ
z2a?b_*xC+RZ+4DSM}YIf+gP~yhs%zj*46LF@1Dp<tdsE0%x_04;>*jPpX51q6?GPU
zh-GKgID14sC&pr;_O?*fcIPD7hr9k<S{|XqdK~bY*^FRvopk?5Z!jIHgzpk2&rXjw
z`c;td2|n)MYXAXa_?%3mG8}^67FMZbUdUvM*S5+^s>DU85p%}{GW3s3w9hP~$zF<s
zjaah#WT{2C+ZLy+0gBLZVHf!+z3GTr!jLn<X=a<*W>Xcr-qW7TRUCp*G89OE4I{y>
zkK4H(e@L?gvxG3Y#96&NgYlx5K8mX$wfo7CsBdqMS}X=3P_W}h8>f9LMVx2RjQ(|5
z_(hNZl>7L;UH@U>$6Gad^81R(@>@)DtbNHEL0u_>iC*#kypQdy8Q><Hu0)Uaf<9!0
zy%erIP|aOmn|ecfe1_w$=vDAx`=J~C_cQoAJtcPiR|!LYvx-yNYM+zF;*1P-9l3Oh
zfu8mXN|CAHUdBdcqmEuM*I|}79R}GO`_+jm8ON)@tXtLSQn-iYsQL2Y+8X6oAD!Pw
zPJ#iW!~JZRuGaqSUsA@@#`B}nW&EN3Z;`ITef5s`h0j66OShyI84&=9;kfRy71Gk5
zCz`XsF9|8|aCT}5E3<TW_KQvq86mnEtvE_vDs(_`RQ7J`%0^zIFu~xGisBq9MrJmr
z!TC$hnEE)|m+V&b^U@eNAh8Q4+V7iomc!-^^ZQ%!LYcF|VySFADf2FZAqIo4gGzsV
zeIC!ril1etp3PSD<qP4%2SI_R6x!;MweLqX)W+sU<QbHn-4Uw&xh}kAo2+W!ACYJ#
z@VdW^F8iO~Vdw~d>jD+mTMqL2bb$R0idINc&6PK}E;~|dZn(hF90s4D5t4>HDSg_?
z9idVs)w873d`@T9RgmAYJ=^VX#K4W=`8^EB&C18iqsu(E%g=niPc?8_!z0x#;BhZ|
zI4b39Qo*%abq3?1r~q06dB8WtUqT`ZxT%quQ%t=l*PcP%itLOkLx(ZH7dBH7zL5b8
zKHoYp*om4nptkthwom*%8ghdN=Fp2r$1b4UQ(YvkNd<Wt6`v*L)vFa}FzzOfr+UTz
zdyB{Y8o_`=UA5cg^XO6vi`#{sAzo3rIRc6Lv*p5oc1RD6_;GfJ&37^7X|}zT?fahi
z6dg<U#s;J@e(?li84<a0$p^l^Une6}#<zY`WViW)__9{_^>fO2Il7Dz4Kr=P(54V=
zL#rMe!I*mDzQE_;9Wq%`bNJG@0`Wi0S^&h4n@cGdrE)2Ye5jj<EjRf!>gd$_>M-Tp
zo<qIDk;4+VX2Wu*eIBQRkgjHl($sW4z*>2LbB|HkAI+5WTAWZ=N(c<n|6Kv1<c2SX
z&-yw#HqIMeUM(I!IyS;$mq?h>jpIKJmcUi1uZ^|>0gjtqmV#ol@W%=fXDC^cWyIh3
zR^u6LI2szVGtSwZ{LY@YeU_ATe_x8bqK&C$^D%91Ga<%GeXdjf<@tp<i~pNMV%@S+
zt(-9vR$q1;HL(O>-`KQW)utnvT3^Y_R6{aa)!ags(05W7>V`MHFs}d}w!9ok+^lT0
zp3C4-?0-@UIID<!{w60Pm?c1r>iayGy92gB<H|5qDGdz#yzagmJ(T~)Tq8zBRqKrP
z^OxK{Ly1}G3OJTD)hGrQ+P<|8dN#VEPk#rZ!v6l!3OSLDWB!~(eLfPcaB?!x#AE0n
zS+rd$z@^nqp$e83KpCg{>+3xKii=MPQ|@*kt8RZI!A;c+m<m#31F#3v=n$-`QH@g~
zXe(I^Np_g?Zo0bHzuHv&iqV^Ve7Bh9V0=R3&T{=_@}Dh(L<O;iqtj@Kj?D=4Hw4=m
z(F#)MdW0mrjvP#;DN~Gpq)9Isw7Q10&hJ6#esoKMzd$wVB6VvVb&Hb4CCBefQOut%
z#+wcHMxztnFBM2;5QMX3y2^$MiTPFEV7}cs2u)j4F7mVMTQz^zz9j*6o0aJrlU=56
zh`NVauu%!pf5jA1Y<SB}ja{KKoFr-l<vQSas9pY}OPb_>PrcA*`RTNP@o9#>cZBm_
z3(}IX_IpOFO1fR!#o+TrJ?^?UK7tV^VvF(Hpuxq`Mnj6-{^JpExxE1^dlJ@EYY(L(
zGAiS%y=cGnX|=|>?>SybUVUQ^nv@iu7Q(-G90Ud~Z$Qp8k6;h#E8GNG!avciaP{aL
z_F90vyyC!N4IgC3Lo>yEAyMlH3gAzF6#8>{cB}t((Rxy@b5yHRmec2%)RB7_ygWI=
zfPS|H=n9ixXXpL>&=%9Zcd5#KtMzhF+Vm8@2OSUuzO#5?zbHA@=_t&KCX%wL$FB<;
z4r2bPPyVx5`@q2{+7UYI=+X%=7+-jkGBI!{%nn)eHxWhmJ6~6B3}W^8sVpxY%YN}I
zr&iAJh7Yf1L}=Jt-Mr_40VONzU9VF*WH-W>yoj!Fc>DGD(VO4P7@;vC?2l`i`&mm`
z3Z<7sA(eI-okqKuQe$%Ibo|n+!ktvY|JfS<WKI!IjGvERwwsEbgWQDHub9`JH&jNF
zzFAhyp=1py$D?G0Ltt%7Az5502+*7laN#SB$+yu7&xI3A)e{~9nUB*u_D>?`R(b&9
z*UWx!QV+Sy`3>o1ir;J^=Rv9@qN>8$$_g~fzo>TFVm5L)y5M@c&uu%~>QJ(yz0eWL
zABz9@(u(jQ>0(%N7%o?S`R6hDoY34+a$fO91phCJ`$Lfjf3oO^Ov>O@;<JUdmy;EG
z_kQtcJ!!UZw4c?7+TcLDxR-&I1WXLs9Pp+`MW_R8B>_$lBx19@dR=+&e%&t^v@J46
z16Dk1$5L+jip8_5GiQ))J(2zz21zsvmfptF?Txg^0ZNp(Gi098Dn?&VEh07<q<BA%
zZa+urcSzOmk@#%lxqtV;BL;RO%5RKbnx@D&(N+RsdWTdPH&p|sy$u0LSeXA;j)33p
z00u;a@;B8?C7Sa$ob13Fd;0!|oMnG-yB{r;ORL@C>=)kxty9jxIQeobYK?u(fTlwl
z?7rDjw2S(Wr36|o$u@8ZL`d30kFBJA&{ypU%%7la-~GCT9@`d9Mo~RDjv(EPXLbE1
z-0dk)c3F5w$s9rr`ulw9=T_i}N;G5{vgxtWdUNn#yx+XJZ74~Hu!|WNC!`UyJwq6@
zj%U9#<Bk>@G};-6)KB{jNXPNE9T5Is`QIPPI6x%JJxIdlgBAPnVH~E4{(MAziEB55
z<pbjMLEUIIDE!h~DNbLg;+y1WgEwE>%^ujB$l<LP;a6Jc29@ZgL`=;xgVs<NlU#!k
zqI5vy`&3a*Dlz)Zn9b^CTdsZI#~)72-5b^IoAJvI=52EsBNVLzXGPhkE@JCjP{48o
z-6{9QaNzViM5Oa>Ja|fjp|Zf?#Hp13^Sq6KXcEh^dNrJDLA29rQ<}|h5eh*W<kZK8
z`hUlrG$4#z<cco8e0LzYj;K}7i@|aA&^J~r6}1gIaZ8be##jm|Y7@W~_@^c8d+(C_
zr-l6vC)&Ni6R(<~y&Z429nTUjN_i45H`MB<73OuXH~011?PGCgBy;=?sQGc_t}%VB
zHmt+VSDY*%KKO9HL`nZK0<+IaSUL1F`VE*4=hnmH558ZirP7Q9$nb0!+o!*$<owqQ
z-m?6R#OuqM7^a`!&#LC1!RQpF-+l_Yyp9|CJk~|tN%(~YStq+-5V3q*i9&#glQ1dN
zIq#BFprkG*GUJ}mvd<w!b4z$?>KEq4yzyj;b;Ei!kHI7R34Cd3evUJJy}-@x_~l}Z
zMx*+LgtTf~qX7Uirx<A8L%8NrALdrqCMl5fXLZFKNQ-GEzCFr^_$c>Z;R#I#s!|(W
zM`N&+i~G}4;L4!;O%AHDj~!eSR>!stE!MazZmNb2<BzSzzy+Zu)sWTj4eK7T&~R+f
zWe1l<QWV*$p3REb!<@yw&7IL4r(}nLgmLIi`LDQ+^zVVn_;*fB_#kx0kPLnAcL9Ny
z9!<l$cN9*Izh5A_6q@5*r6ghp6Qyj~n+Nl{@{rCK)~=L-QK#3xLnj3%=KJB=10`=L
z{=cK<g8B1tKAOB;$dZu=3y95<71iSFhQ;07?s&OH{CN5>b{#i>fzA>LJim+7v(8;D
zyD$IO^HQ~&AWIInSwa@otmsB(6n|pYWk>O1SaUk5)Xn0iLiT%G5;Bpom_W>%`DPHW
zB4^d!o>8o%>+(mp)9oVJy#<|?X^m>>#y%(zqU0^2>}JTSVV>J>!yb>g6Sjij=CTh}
z$UibTpjm5Gmw|x#zjh}CMwleHT*z<LsQTo(`YOBZej&ZBsMKrYbx;1ag9ySeGs<{I
zWVG%;lr)>SPX1h#-vG;N!M@3W@2fk04yN`rJrVJjIIF+DWdDz^w~UIjS-OV90KpwX
zaCd^c6C@$Hy99^eIyfZ3Wq`rmf)m_=ySqCK?(ULza^$&lz8~K|)?zKVy1J@s*RI{w
z1`3(Kn%{=50&Cy}un@mHT+@Kh`VZCVODx%!tXkM&{Atz^m7@H8Z>**&?q5LJY`i!_
z^(Rsl=1fn=@|4~4Vy6?{Cwyj_y3_e$BRtH8O(@|OTbr_pV*!E`Q{>raremnB;Qzy*
z&~uIP&$(t$%_wH2m%cF7yK$fCi>k>=2--ePkfF0+@_xsCq52CbmYg}2+X!0_Pra=v
z*iBmdX9=RT|2BM9-_%*!c)EUNhJo)Pd6-jkk{~ycFIGCX*=*Bn)GHP6a>I3%W5e(A
zCb<AD)-F}n2^U;ve)n2K&Vr~O!;_b{c5N=S32m;3w(+SWaDb?!(#ouw=dgU9VTSU}
z>;)vlDHxNf5~mjLjlqS6-O&<<DRs`XGf~+QSCim!Y1QY-(tk4XAJ%{jx)To)Oz-P!
z#rwmNs~vyWi$r+YX@8wtEfPQOc!$_uQKQt(nk}6(6W+e>V0}M>Ui;x#<#Ae}R`Xb6
zBb20JgI|0`RzjP%_QCPDGs0Qu5Um@TR+o)7T>(lWZgw_zNc<t=u;**C)81~3&u})6
z@tLVnr+)XacU`~rBGj=_tnQ<1dsX|_R>^a!WxsD*VrD}*BOUX1P3LZlt!O2TKD~8h
zFKrl<oWk%lCaTKO8yyeF7gPb%f)U)PXrh1D!O>#Fn9!U~rEVqYP&9ya*NKJ?F>L#V
z?YIipTxEygJ(Id&|C1|0fQ0XPFGL{bi*MqdsX!VVbdRS0y+<<!$ckP1;R-8H&)d(y
z(`Gqb{QML^p*#4xdkUUW(&wdE=I-3^Cc9R+eSNb_UtZZoXv#O#df&0>EM`ctZ#GyY
zifAl@+5r04CG$#%*a|6}_d99-uwkAf2d^L7Q&rZ<wMW+97OSp1KtxN^y+&-N*1KWV
z$Y;~YAJVp&-IZNBs|x?#oo8Ci-)pfCE%@#s9|4Q!_3R*8g}2{sS<|DK6<@V^*kR~+
z&-6RGXQ>K>=$(8gHGs>(mXw=RNDF%#l_*h>3)2zgGvDloQFmh1<1W&ODEcyKmc;Aa
z%uu;lLZX?!L18pj9+txMbT;O7J_Azxpz?K7y@2}`vJ}r}=1E({NvzDq%ZcS$`)<Cq
z90n!dV(1MqA-@nep3euzMb>xjbWYV%XZE|p-2~26=Qjz+&2^rl9L;DS9pEuJY?CyJ
z!?)^jHqCbtMHlT*?FGD-`4a0i#grrqlrF_NA9spf5!lP9QEq*X-!FTw+$)W-wD{Jf
z)r}B>)4#7hm!O8{=TT9OEUoya4*y+^K+VH0=?_swRnJe!A>orhK#R*r`R=Tk85E8W
zlF&DU&G7apFD;VNX;ze^Po#S9iqSXKk`@)?OSn4is&}0>p^!FgY#~XP?CE2_U{|`N
z^^Iwap_lcv`K@M~J^Q2zcGISywpVa{bMCEW!tL)TJ0=!>8hjM<Etk2#o08u&kg1#^
zf_|(CnYP25sj#W@<G$PVVFH5wPl`qeURN0+bTxXZtil#G&radN<29l1z_oi$_5y17
za{Hg{x~FlJ6Y8L?2J`{hAV`l_-iX7H$U%3>aY+8po9=s#{_@~Ujeuq1dR4f}8)>-I
zK_wPW`_^|kQ%O&1PkZJV_gep*1&~mE9+~=rc4;9m=G{CrpO*t(ER+defYujnBf({q
zqbzY>WWX)G!}zWLM-u%D^G0BXfDMh0al9guBsrO34u9mVEiFC+U1v0&E)+}76Y*tZ
zgehREQJgkp2);FZccGHBiilOf@{O<_Ux^_?wg-3(CTEyyBCt@ir+0If<YK)fFxoJd
zOCP_Wd!Kzhzq<-n=xPkNAZM5`kwd8;S{%IHpu)ucynfBS&F3@vwg|^Pu|F@}YMtrU
zzWo|MvTryvS=u!AXU)FNsqj3r1oeuNBK?Or!q$t6j}tOtV>m6dWr<2su2cp(l>`{V
zjp(dD=@F2H`C7<PtoHf$uFTuFWq97l@o$)lP9+db*?O7qrxnT{`gl=s6*=X31WP(x
z)KiYm`RDf%Hme&T5MOKp2&8WZn08DSw5uZuz@*B7TzGM%d#1A4H~UqCi#*Gor?kuN
zOWt_}K%kG7m^a=3RqfE6#Q;4ijf(}mcxi=bRh(rJG=44w^kr%XBbf1#3Q-IbWrn7e
z5@Tg^VC~8TFs+tg{YY0x2vb^=<rrvP+9WQ=agVUA#mVn7shPvW?qHxd(0X};m42YC
zs`>5u$2Io)Kp+uIN(e}ggG}~~yp;8)ulz<0Bz17!AYy=LiuZ8<TEk8!<#0wCO+n<;
zK6HG>JFC0heg=M7>;bqdp-EHWuLp#>m2O=ILeCfG%Prm*73Bm3Jp)P164xA4)zq|U
z-Q>=6p_*~%tO;(zPp-A<w5(8c-Eku^XT2+LP^OZ%6i#0LUc*AExeQ>tcg3CCT8S?B
zC3U=^9FJb1$5xgOGI?q7d1D#-{6v<g@s_z>*z~TcMf&9B_|0&7S`<oRe`&gtT(2K;
zLHF3-XIP~eG+LRbjmd1-eCUs-U3xwooWWOEc@ezS8Z-I+o<uLzXX0epZe;%=mjWy>
z?MzLIigsDoTMV7b_E;p~l+fKWN=cT{Pv?77Puw>*PrvkRz}W?=5rAO!+Z(sW=p@vO
z4-YFFi)DOYFpM@jiS=4+7+0NVQHL`$zQdK{$nI%n!0xbl6JP=ArigtVgwal(wxzxA
zo+^`g)y>IK4<!Xj&U0HtJ8yR_u=z*>GK`}<*N>S>dYe*jHhSU(&mj*I^OXkOhjRGH
z7piQuG)Tz|43h~oL}Q~$X*F~#=NZ!*P6Ewy$Qw>dQ?Ud0iv-An`1dIH8<QI@kjVWn
z_EJ8wD8)L9N(K{v>0mY~z_B4;IVL`H^4I(7aAlRa@VlRNwHEv!3vi2c-O<l3)|Wbb
z(MH#7q-noqpN6QvK(jww&L4irva}W^PFqmAm_!@vQ+08vRd>Fv8TPb0uYsopS%Q5u
z1@(vskd``E5+c2wmru^XKwxK&EwE{-6%wY*Ga44^HA%xIW01c|lg`VzB^8w4y>D1x
zXzph%p{vVeouXS&--!0|3}KHx=$UNP)^hvAKFV*4ZLY}r-ax^#-Zg43&!@ZV-o1P)
zID{f=7pxHzruh50ME<UwA0At8ITRkxH$zG;Ks&mSc3hX}4XPVNOp4^@q`bZNIQVR~
z7k1u2Cpi95ig|JFHFem4Np~*i!Z$rdA#9r0s?EJEht|4kaL8eF=1s%zHIMPni)Jv?
z?)}{s%K4CFY4eO0@0ba!1<;-I@p4Sxi}Y?=$S@al*-nWqGPStbJ+HYtcKyI%s}FV{
znu+h}{2R7Ki{V3YTkf6Op6y#*TL~1m)3WZ2=XjurK11DUSuf`gNOE46Ea49c4RVqO
zE{sXDplV#Cgu@txG`IDuOiv6;x=~h=h6FLhND`PVnHz>g1-)ND;{Nd@+1`QyRHa5f
z=NF({T6C}sjRyy(&LCFpxNc+AyuYDzg#Yo~m&_Jk<uhFjTj&Sxq^=LF<ivSbWO^-G
zbVqCi<YxHS@iP9t5-JBwzF6wIi5atVbJvP(N*xPM`(GTGb8*#X|Cm8&6ZOxkQBUC_
zZ|I}4)v3}nPnUf!ZpRpg!YQ|$UsHn{0iIV`RItLlIy3!=plu{odgki$Y}1nY2V$-+
zs(=Jvd^B#aC+CD3NdBjIkQ9Ki&{NGzjNK-}2Fx<`AR|BM3u-M~(m~YjAJ=~r$Ywc_
z4WiFv|7=CNMjM%j8UZ$5zOcwXuopCWO|jn|*!`vbM+V$vsO+b!6h3+W<bk(|i9tw{
zp+mzTr8LY5!;q_oJq0Cu2;ZD=8x5Jwo%?Jd7Fy=*2IHmHfXg#&RX{jpUU;ODugEJM
z7u8+jaHB8ZTU;JAf`r#RS?WD5(%~LHk?UPm?#9=VF=B6T=OwqDHM<lV6ub=Zm;JtK
zlVQw15EX7S`<-#D3MAdIjb9M)-j?y6-Lw|4wId~c*cB%(wv+m7=QQVv?^c;Exw;(q
zKM@5bSA;Vuf6!9q5!b<5^aQPj(fIIwa2Q8VE5=E=Dl5s{3PE;^dg~g|s%jWza<M%W
zf?080>}my845A__Elh0FQPbo#I$R~hq9jtD8xL~BqJZ@!OTnR~V)zN4oZ*Q@JQaDz
zmrIS%*hug;HS?VxwxV2+@WS+x<B>osxLpi!o=2L9Zi)7qPSnD$;Ip8C*L<cBsJ4t$
zcIa+T)GayNo2&_l-BV!F0de}PfC_-3#B&D565BiEV@p{^4*tGrnX_N&`674{!X{~G
zBqS0oC=WGMyo4|S;U4m9yQR8A@>!cjkfz3IPpaUT3a?<)E*q@Vd`F~@AI~Y5h*-N=
zX+o-9E-sysCl+bD(hk0iQ(v#LRCYOtnIp+MnoLe}rLof>?51mcm8zyQdf2L5;Uu^b
zIzdbtm56N*!K#2v1q{CN-u_*;{<4Hm&Oc!b6<Q}!X}l9+`^(+h$)SuJR|s1&9+D(e
zKn7&26_y)#JyLde8LoS_RqR9{fiI@4%y==S8k3R2(y<H1noRE68cRk~?xssdW2dhU
zEUL4l^dE@$Y(C58fpMoZF~1xV)@vRY&n?j_wGq|>-*E6XneR4&fSxqbWc-@>L(PS0
zqfAyCoZ7l(_I?wFZ8!g=v%?8P;D_P23u{je!he0-D<sK3oK=h(5i<%>#H)a_SEo(4
znj7|G<sBr7Hj-k15ygzSpi2IQGg=Fn47KDfrF=A;n1LrN?MT_Q&<y9Taaoh#tGu$M
zpzg1+MuHr!EplW#_Ce*f=m=k|E5EqoEMtFv`N_1q{(P$cT?VH&Nw{ms^vziyoN<~z
zn$XM1EaB5lrrF)ZyGxU0<CG77@eyn(P`;K>zE)mWhCD*7AyL`Kgz4o9G;?<)Q38WR
z>$kYYoM*m8#lo(emKrPhC6-T~lFPL?#8hRDPww(wYm*+dD&3N+7JYz=Q54;VmVR7o
zX;;{OpzrDrSNMCIPO&SPMG!4{U!mP>L`3K$j-S}@BY6;#rP7t|i9z`9W^0c8MALrm
zIlN)fAlrPa|4HqN(A0i3$Nr8wAQw@K6-EDiwyEiS<k6s$)1J6}pR+nTP5YJR#oO+Z
zL@@OShpP9JmEUQrt;h_}cTK+2FHq61DrL_y*32djsa?7@Yz;f!m5;G>?W52pKXnc*
z2E#hwV29`cZuI<at~qR2ZF~I;gug*lr)OzV6#lXgrb7F75V~ReAJkDP-;Qd9bDAs0
z_C1)?4=ud>S-37?_9OXO=tqVmG{pxXRt9$76cLvnIvQU3Yepd}h2YD|`@_nsBu|@n
z-x8#T^30|<e&v<wnA2j1xB@sxzOg$Fd=)BPT=;(Q;I?^hAzk15>%0c^*C9flk2HSC
zkYAG68`B;GXT$tsF-YU->}A(sj*7@}_KCS+imn~ZkzbNa6qo*5!cyR3zVCOwL^vce
zF}UpqazO91R^?B0cRgMN!`5?3Q;U!O&RAJ`KQ~zB&SghxsoCb?j@X)hKMuKVnNqKR
zJ7q9mKko<{e)^|b=?@o_4n4$zNcf>T04j<0FkZZw$cFQ_m+ws(^WT3I;I@M~VOMLs
z8RB|f2$42qJU$ZBJ0B^izIg{ukRK{Eo8c%qlf)>D;dcj4Z_qhxK;8?r{30ar?Rx~Z
z`gIv12vPnOCGG22&=3YPC*vSVGQ%(lsn76iaQ?oAd#TAsm9@a95b+WaLMqxref?`t
zBsN28w6d02hxT1+g7Q$Y*=&`nf7rZg1UwNnfT!Jepn(h5+~e#d!5UnhPHq&9-evRm
zqCOgFAwk_<V_E1e5~?W7aZR!1)be6L`O}!{-QzD<EddW=2q&Ga4IOCHk=ikyl6Kz)
z2!V&bN6TTYT|2DfRx57%`Gb(LbvQf$06+-To5Hhrvp#^gy;I#QVnX>BtS1k!q=6o$
zI$xr;X#+&CD>87~fOj-PcgqUQ=_>R&qtxAfO;X)RGAuFkMd|?zrh|V>_GMw_wfrpY
zWgsgxrU~@nmKGyv7DmG7dFQMFt&bDjy^JY4{}f*B^iVR^3Rj&+p2P>P6rqQcwXBQe
zwzRj=xwkz&yA$jwBvpK7dN_crM5+p<Jr=X-EUW4k%I*~amG8y2jfC4;Nby+&-{JrE
zzEmnrk{0~iOwtU0fK3g=%Iye;t?SsdP+`AA$NM59vC=^S88RumenXqh$)ys|^Ejjr
zN6%1_pgr`Hr|_LM7+oTV?xUZF)1xH?=G=A+@FvH*xvCNCTE6w?0)1`|AOZZBkNd-+
zLs>?bBzVynF*&RX33uPiU0x;Sh3^q*lt3_t)Nsr?WCB-IT8fsTIziu7Nq3r7ezrHM
zn2%5bu(O<SPuKIv0A&(n^YbB*?o6oo$>3P;zgO{*{NPVd>KCI)fXsR~oQs>F{2v0G
zmsA^6U9i;|uR^|}p-&pho+0N5x}*feJ*g5+zp0h6oieo*9v#(_0w~L`x5y%&PZu-7
zeRV>U1UnGVOlt%G&X^t0jA_MY#lealf(aOnaA+sXG=QZJT$f2C-^_q>{4vg_=p%cc
zqfFBu{tQ0j?2~7-TClx-mRus^tzOlze&tjtZmXO+&6AERXKO|ophayY{FsktLC#G<
zO5`?8$VqU@{N_t4&p$0DF&mr^oO<7fvpzSHsb9*{VuJYxB$Ymj{P}*6&&&j=QCm+7
zA<m6~l4~sP@y^A9@3od@L)GplnVUw=dOv$tmCO;lc)8YjR<k3<*Dr1jQJ15DRTQ$c
z2I`(=M=_gc!{oI*DzKJFAKc|q)r`J!PEqCgQ8&7}U{=gse)%$3>21s1>f1%U%aOlB
z2)$UK@;6cT7O+TU1`UD`3P7FWuZ!dK9P(NUs|VADWdpidRkEPvqCQo1FT{ES4)n_c
z^XxKVe@S~GHxkQU&`T?sRR$jwm$s>Aa~7h3Ow|ZyW_m=NDF#H!+1m1E^F;JiT(t=f
z75_A^b-;jU&ZH$c;+>3@Np~G`skcP|R%Yo)-(vWX47m={eEVj>@L<nxVcnprus68d
z1xY}qSx!bBTa(7W`(M%+0Ud6#Z-uuk8IUB&Veo)85{s)FeVpZ|Vvdf*5rIR~{XIS`
zWp>Fl2C~0(RKKM~z%geCzP>E5*gS&w_~_C^-8&g`K8UmI2rOdpCwj4E9u@xas$h7y
z<0@9Jxvma5l2Ir$W#%^bHap2Wd_J$PDAKxQQK+zQPlCMx2?-Se2JJi#eQ+SuPS5ER
zx`AhjHy`?t$v@EzWry3d{s5aozGMQV2xb_;FgJT`?w5ul<iO;82M_EYm&M;U9(Ks`
zx^`8%oAx)Z<>ysrL;RWd&fsHGj-&%nbH5~J_3_YMI}}@?K~$$y9VZr~5MQS!sg$=k
zBf7ujVi!>wUrXE`zzSg{cmHvFRiYUU`Pmy0(@y!1<cvsZ?$#m(TAIN-vn(C)VPDi{
zsZiwq$&a~zipB}8!y0}!Bo_u@1oYWgx~Va-DGUbU=f?JHUPX+|_mU2TiQ64o1gq6P
zhOp>rwFG0~5o#DuRij~wc8)_@3%H$rzLp#*x=-|FmfSYHM|Pd101#>)5%RQ?cjce&
z!6+_Ed9Atabg{*RB>4(vbAfJ7Mcj^KNB(24E>W>uQQ86%RhB<9;ICBj3MtJW`eit;
zAlBF+v;dyHn<$@2{t=vsfpnLxw~ZoibGy8Cp3}9GwtInCnS)HaN^|p{Z-~Lk{&oIg
zqW+peQ?`ZR1_k_`&o^7lLf4Bh&<7+3UCt<Mmggem&OMQfiDs0j-}x`n^4MwW(r7OQ
zLmY;|aMCIMcquH}D*kku8iZx>yH6kIkU@(7Sc`gG^A8{XDQqc5j0+P}SsFgm_R)G{
zKmk{(cgY?^KI59=O)*TGi3;ilE;iH?*xLG<_<XLzccql;k$vUZ*1)|@-Lv^HL}zlG
zV6Db`7oE11;NM?1Z?MfNaT_#DAjA1)X<E8=CPYui0SYmc0UDMm+%!wWCk<xq>6L00
zUH_ll^pOsln<l@6Y*PjJVk62(o*gWjN1Xq{!at2^sdZj-|152hue*fZS)@8O5@WbD
z=^qv4f3Ft@_MeQxjHT|s6D@hVyHW4ym%D|8w=D!r<?}C*MK3h07Gj9O#$d?TYuRF*
zGF8gks_D-B8}cx64Qz)pC2VaZ^P&?tKl+;W36L!Q4M#Bt{z!?Ojw@`a9kAiJ&fIi$
zgHgl7Kcq3Wt^uO=hr_J79Q7EV0x%62zeqq|zOWj^&PL)`{;Bab!p|umjzH8xD8r`=
zm-X1T@2=9yJhdfQXFas~+xIkOzs1j_a(&cP1);Xqxr>8Fo3Dipjnl;R5<{AScBHc>
zU!<huQ}r|nzx!F5uD<P)x%@kY1pRUI&)-KRV%n9k_fC8VgQH(V=PiH=MOK&;yj}1z
z7HRUQijuKUi`dY-FnDq`0AXZ>uH6N3xUBhuN=@;R;>$N11BHoNN||0ZtUuG^BXXK?
zo&8<0ZM+Sta84QM(~Tp^emLfEt%=LhGVO<jLr;lXM@$f#rIeX8HJcXPRjfuf!@Hhu
zD7$9H_k!Es;<+y={s(4IKw*Z}$8!#6?6)ESM&e{@GChd(7EmIDc&eq-&f&=Yc)9LQ
zB8j^<Tqv}_r6NbWpjE;n9mhe4>K%+=_u;<RP#XCrhG;E{7xmuVFcS3oSjY#7Y79Y)
zI!g7gk>OKu9+`2+;-6E9Ot$UEV=HBvOY*3G*69#Eem=;;3nYkX|A+}3+lL44923^K
z!ZkH#O6TAT(f&_FUPVHU*y;$;pvL5en<k=XV1Q{g!c3>wPrAw7BK+7~CHy@QoPH;N
z=*|rwI4>-|B*4fm<-$nV0-m1_(SGiV((yiww>G8gGf|K!<{uO}kJHg55-aWtbi=Cb
z-Jylte97Oev}ftX7ZreCO8FM84y#}dUd)Hex}y^3{Hx7gQ)8aP9W{!ylH-9duq(`~
zIhQs!0)ArI15p00as9W3gbf)9^*yfxu1f9zHD(8#9t*xxwW{(qN)~5f^)x|Iz9LP+
z+l~1d!p`Oag{S=5rW6MqYazze=R*4n3Yq5AlV5JwDq!KeiOnX5PbnzpRa5gnDcrYb
z%I24_xx{0pOqb@|csBWwB1&^Kd`tvCnWewxBGa1eaD^<;{5v}gO9FBO{nIy2syI>3
zx5AK3>eA8dYuK{V|E`WEr2K{J_KH~(65yf~#e%Yn#yMN~h+Y=mkY1=E;HV#m5|d>B
zY*7XYV}Zya{PSYy*k>le==8l#nB?c5Dy?+14UVHZfG3lCU!kfJ_|ObK=GZ$W9OQJM
zV(JZ#tXv_VC6I3B8&5d?%$**R3d?@_ab!{k1htP7<@H4lPoQ_NbgjB=0y_|PAm%Yv
zY#OuT{nYaQLZLm1@$SoORkzoqpcF<^@qbdS(exi|e^8wEx{m|q5R1JQh^j<@%Ux7^
zniH|dOZfI_*oHT(FF&WInV^L5>&Nrn4gGU1>o}l73K!!2Z6<S9Pb2AcZ6F)BC>DQm
zC?cH1d{r9(KN1VwdgH`b2wPdh7sO0NRelKC5AlA4G%k=>YURW{BZ1BmLK-h8c>q0m
z)@~EsYTyt)sN>R};|z$aMiBnjuIHveeN$QgnM-5`(Zz#5k|-4x$}!10f8&D-MVtxl
zS3Iw>ZvirnuzZrv;Ux`b)P02&7m*lPjw3IxFn<0aWA$5pR5Vbyu|nIJZzMIzTnIF+
zYHcWwpkmrMD9dP?9{=OL2H_xedD)DJc0s^6!YBPp#4y|IUTKpO^Mwx><pHGWk6<xs
z%;-i^dG=AA=wQ616s4*0j`qzMC3a@mzv4)&9C|`thXta@kP9Hq8+Fr2HQt>avGb{|
zkyD*6{!lGWd^lGp)6^>f@&!8V6WsUSDI6~cL8uGe=|+h*dtt}oHo$B8k@!43Mk<~{
zDN_EEdm9$4n5;}P=NzLyRiknxZe4KGMpJlw<N&><G!TEFa+wt(xj00Eqz#w0WV88W
z(bik*sq6=7libDPfkof6^u{na;~;<QGJ#<hCSjP}QMs-#ZiOQ{s1GDF=2QIRwV0~V
zPw!E4olKfg5c8p^kw{5e@1U_z>}mBG5eo()rnfWE8@QE5i)||Ei6Y7F>{4JNZY-;!
zoL@2_SCp~%n_?w1rozr^Oa!PA1ksgbzo?BZ3&E<=<>hwaOS%Q&ueI7C3sJJ|w@E;x
zYAwTX8oWPPadszL`**5p9v8ne{`X;EUA;I8jj)*Zz;KlFX<hZRfk3NG@knX(n&iKV
z5^^V`2C8~XW&$Vx8^BZ=qr@{#NE3kX*AJ-6XQZavr5OE@=ymG*0kZrZHFV}GtvkxY
zvxlPQUND9wpl`{CVvDYEJrS)rM@goimL!zU7{n($u-lih6pZjv_W|-w#@~_OGHi<?
zyaxq&er`6jk%L-s${LABM3iO~D3uIP_e^P^2$NZf)F=}b8Jnmhb-tgM?bJt4W7P6Y
z3vH&_r63&E&#bGR1E+yXxY;>1D%v*x6eQ5h4{avZ7+cNm(HIG)tI8su&BzvPLZz$S
z_XM>L1#hxAaWUCihPV!1H)iW?3iODlC5|ia25aK}FeNIG!6n7>fDQ?Z@f0m%v`vh0
zca#RIh!3)|5*Xk^BqJGRB0q>5FzPHp2U0CWQ4gry?mzD-wD$Soozf=4G255*3{Dby
zUx)E45pip$g4#sqV#SWIpyvag#r9?q9<)6IxyJOmmJ9zU!ICc`In3+V%#jZ%0$MN;
zL+^3{6Tz(d)G=*6&PC^#UwyF><c<mq-1c<hXgigEp_Zt2FvvD1#d1r7)zdtW@+no^
za!{<QT3%z%Z2*6PJjH6mcBhx+cxiF-jWo4!G8PwT5B1FjuVV#@sLeG0UETeGxwP1C
z%iv0Hr#h1x=UHKX-;%^;*TuTfGybpKOcMDIb*F;EjDUnn36sbu7P~W+;%~3di&1`B
zDqZ+PuQ|%)zEbeVqM`b`z84ZNS+LKoo10e&tW2q+X}^PTI=3RmZM<1y!y%NjN<9&8
z3YFbFTD#SZ+{d-B`BTl-%7bfbPdyxEu|r4zo>C>ZV1qA-87blTnxT9{l7GQRF#wh{
zlob1}ju<Wh_$JlvDWc~$liWWhD<L6I;B9-Ya8y8X6*gWvLu74HMs`>?XM8fvyzH`#
z+gx!$K1*ivAqDtBJEmB%?o$w9`6p<tjvE3k)@i?(WL|7YQ)A}B5i1jDd0NA~)Mmp(
zj`*G<3qk)xgmISkKQRpb1UYgA0ed<pyO4EAwI%d7eN;x99YU9r0(zdCYmO%n_K>j*
zn0h+p??mb4qvz2A2BdV=3c+2jaMT|gmjWB5Wad2!(TF4?QqnhqUh|%MO>TU{E?i!V
zGCJ01{#A^uxb|B_uga$N{Km*8wH3UTP$@5Fzs<<Ea@q7Q>0Q9f@8e-g1FA~>?V7*(
zS2<x&mMW+CmzEax9vKYNXrY$#Tv@@!Or$y@_mMPPVFs4x{jDt0!DWZ~q{4XNHUx5Y
z5M|E!Wp!B&LlSiGVRE#l+avbB5{jIG#aR$)BLh-5GmMv&eoY>x;?Gi!YLyGZ(1nBi
z&;M8h)AbKr)sL@irydst%r>6q`xyp78?9fr1!?YpEyzj%6!=+cYlHG`9VBE3ZHUS~
zx7Aghs_k*=L#0qh<kC)k`0!O1EOvwqjgJVKWQgP2V0pnWk(CF}{r|i1RPz3isgHB7
zpyh;woLDeqFXCS!l$S)7zQ-L<*M&ZU<n%(Ik(h867yaG1ixT%z7982B=1o;)g3M69
z+4O&w#tAlDGb^R&9XgUBH%2TR&(rM1v)I{8=1ptg<RMk-#*Kyl8&|%KhpsPj(7IqE
zH`0F;u^?w8>4;0{Y=r4NTS`i+wNVa2Z~Lt+m%#D-goRpy>iu<Oy|c}`@W1~htVR7H
zk-Q!E1EN;;eht4##l9F3x_WI`w5(#sggfoH@n6T55LCEF>fzdn9n&5SrTyh?C767H
zznvQnV<P#!_`X@*$61#lpdwLEr}(l5h#^~I>syiup1Rj9#<W}p;T;}0^tn2pZ4<G+
zca!D_EHLTXB*fmMHG&2GzS-B`@oe3Rl`&5$HdvDTX9J;-k4@#z8gV^H6XB#o{PD5?
z9Hb{PqUG&qYrc43x(4>vEKJ9uO(@kRJ0S3*i*a*Xj<8taOJV^S$(uCTzx<C<T1m$j
zvFQfkY(0G!@~dPt7O2dEtf<f3!Hdm@1;ZplQfl2n(zg8d(caKMPO_fh;nVZJ<W<Ap
znnj~RM2Gnmr5LkTYRqi7VN9WOHWlSvFd08WiLV!TK4`3ispK~6hd0hs<PXFLLAhlb
z9E`#F?DrE!r~`s&pdDLd!GXVdX3kD<T)f0F<&~*%C1sTVW(#5j3HWQW3HYpT7`sSj
zYb%6_3FU7OWAj8DzqGoV=6**LK!rF@uvt^Bvo;?lD%?Vt>1)ZYG#55SYeB(HMdlc~
z;NTI9x$E>bmh2`7`Lv&Oi1T327BW2~6MPf+74d#)e8C`mCkVN8RUe^${FmXX1EGk^
z{(irlB8Nm^O*=7*i)8IN3Q}6M|7+5@h0|Hvl8&yiM99e)E8m95^8ej1`PUCYu~1l+
zoj24hLyn9%{~4$=z4BOcR=+#99!1APskHc=0X)gZLr0X6$J`EPe6N;MD9%Xk$}j(k
zu70VY(^@`9ir*>DcMm(?1u^p}@afpUjo_tD0Qz=#&*+N00|+X|(j4u0Tx&uE(?+m&
zcmBI)@-b@-?kjfI@5|fLo_Azv-wALyKT8$)T6B~Ald+&*XXFp{8#GLEce(&Aa_C*@
zA2&yWO5ZfsmcvBb8sHUwG9mzAaPfXniVEZH7wyxssP%6L6e{OP49IzyBc@YvB{x%B
zdRWlEQchiu6^-1T{=aq;tR3%9Ej|AdmZVT;sCpmb$p6}8%&8loyC?>r5^qPAA1kBp
zn)Q*t>+Cb<IJ^6(1_zS)D@hnZ$xcY<AMu@hR>9rW@??N*6T0z8{xdenigk5a``gM=
zs=o$SLN5(DMNhbmy-+i@=-|vY8Xh(VT_zl7`8E8+1Oyh{<<)?lTuMR^Xa3fD^VFvb
z%NsbkOrbYL3`jfk!k{I6a$o3;6FvsOL0n-}FxrU7vdfs~)9LPxk(>r4EGQFK{^Wn$
zO9_fdUVn;)f?QPWJ$clN`)_<&%B+61g+J}u>wLJOno3C@38KYC9|$oX7xnT9kebFb
zCOPJN&XbEA>!`S$-Rs^Riy=eWX3|iynRrjZDE&TnZ1R8rtnouxll<p{a>3~MosM|?
zIAZRr(TKG8u6e8~SV78L+W)DNSD~!`K{`qzl&(aCIZhHF(K=3Z>#;|s!a;TWWWi%v
zx1S*=%QIy=gTpBDvggAkI-*&>Pcu}pd>;edaB{fPyP8uYoYwn(H>i&1JvC5&M`gy9
zFwT4C;S*OV-6p(6#PP3LP^&qD%Ig!^zxG57@F|PQn(gx+Vy}GyPL0^9QH|P3AaC*W
zjM4hTPnjMX5KzEF*bdmD?{}e{sJ&6ND<Yl5;X&#n!~#8!-wFY4e!U}24RX{rC|%L9
z<*1)4_g!dg!#cL*5YkYYGqV-B{NL8|YcMo$I`RgZKa%$%5x<FgI3aQ5c<knrhBp5s
zpiOVd$@a~ZpL`{+6n{n!z)~0WfnTfyw8+F1+^X)Ex#fRcCYS&3AsO<=*(v4BrT()j
zD)}r~O^ysX4bv1+Dg^aN-y@bUM=Tkh>>50G(eWp6FQ-j;%jxi<|3hVpWkP-P0EgD@
zzpgPMo~3Nwpc+hvm#t=`a!Oae6gZ?n_6>j4r8#pt*1y~4&=RAc85ng(?Ijr;z)bUZ
z9rUN5#C28g1^h>g0kx|QbHVN^Ur6~{8|jEG3{Le3dY7P=vWUUTWXP{x^lE3-aGXOZ
zi(=BM3o6QW-ogGS^+0|3?OQ0!gWc?F`A^YGPKh}GYE#odDWVZ)%Hi65t>dNzEhO>P
zZ$`OoXt!+GstD*~8^~ZUWh;n{y~4r&iVU7I2cV7k&mRr>4Dg0TYz+SRPhT@P+&>6k
zvvNNdU%uPDJoRhp>DpI1!-X|rlWk}%ayA#Q{yGeOY)Jk#>G%Uajn3Z=#J2p${=AN)
z|7{Kih4@R`PJVVH+_hS*Ar^X>2?W|oylcJpBO0G3Y=@H1)E;{Cm2EIEey=vG(kR^B
z`d2aW!WaJL=_8x~ju-9w!(Bpy;^W@``RoupfTzi6oY5xB`Pa#-;9a-PL{?46hVE{b
zO3+eEYbzoDzQLA-r^80_ro53;Q`tfQ>;J!_#C$T^92M|BtQS0g;p{cKt`o2+zuIW}
zErN%VZ}!1DMw{gygOLZkrC5)`74(L@fK2X<1~BRk6i=nLeYc#Z`MuLFw+n3>p$9Md
z4kF(zd`mW;fhfC$kto_hTx!V!-&k^NXy&pIj;N=N;Ofd;)Qu6H3if7?1S4Xm=rjp_
zapnT(>j->?96YdeH(L3XKW%J#A!U0j{NtMs>1TcWx@kxEPSflS{&3PT0U>AJ)>c-1
z_^tN{aT;~x6GZmL>hxZTLPxdtU*C$-5{N$DaDV)GHaPAdUGye#C@>10%&zg$oxaRk
z3j3CbE%C-J*h1U&H&A-f7kby^?%w^Ulu8cHP`CdE3N-!;$+Iv5G9MpKjTYHMZSyKY
zNyGq>7X{pYwx-@X@|(I{aymh>(2*rfjJAGN+yhTR#kAdM%&;x~t=+Z8Ab^^EbVCH3
zamcGj>X?b$u$tDM%7+{}M!H98_G!NHIB|;crW#>-?b~-a$Ml6~SADKCNy+IUqv+@U
z2cI73m$5J_2afxOzLad7|B^>V(I=sEWPWOW_37}b&<2SzTf%S275i3@pG72-%&fEe
z6VUUqgERK#5<%i@?(X~{$eLO3hmyJDfdIpuG=Lg&3a;jv_aZbz-t6LAclBc`JNFDq
zOCTFR+}15|DdVYF9UZz=25cb;S8i(~$%~QLJb|a=%r}4C6AD37jrMR?kHqK&pP$$B
z35L+9%SrvSrm5Yqyw;xchnzkp=$a;0NGq-+B#gR+GLO1a72)!RM4w*~N*>)^L?(6}
z-}GOm-@HK=MLaCv%cUeX*jdNykm-KSBycU+{p^YD*xJW|f2<cs-EXJz6#+jjHu&x?
zV^{7}t|1QUMm&gn!rKR9q9JJ8$4PuvFanzpk_mr#ad-HVpq$$gg>&jCd{#)slI<-|
z?#nlik61E4IYWe-)_Rt|N<O_5@U}CFer1QY<htL$I5&pN9D(`jVQ(6vMA{VdPrF41
zCWsrq!$}`8FgBBAN$e1IZ^{JHfx6ueYyqvS3061+?h#r_b=HlUG>%(d%+AV7H68BW
z8=Ye{R0L<bTR7vtT++~L6t+F*!j3;U-MEu7xz4?utbrsPrw}Q#B#x&y2M`q<S?b`0
z52efDwcqR}02Mtw*hu8bN3EQ3K^d~dtsY#IEc_|{Ertt+gK|6znU4ua^lh2RTSxh~
zExDjBns2h?#O#mMPNy|=XIM!Ky8;9(zd#0EM_^({4ykSY-KvUbY%~mN^Q(dY^GCY_
zpYHC0)lq?K;AFVkroeSAc-~85T=(Py%ck##i;6YlfN(c#$kv>9BuX!?KHSw4`1i_G
zm*7|MqQl(;8@=L=Kw$HN#xw6P(rH&54=xVdVDzELL{W?1U5`412P6fz;jML?ZlbLp
z2w;RWhmnjGW9gZGE8!<<MM-Eay|tpc3Jisd5^rCvK6>un58i9pJws=t-FGglww=4r
zv)5mme~c5Uwv1q(%I0rwx}Uz}6V|vq#Kbhp+_Eh6B+ZZ`9C@r%bmFj(Gd-HBUNL(V
z;k)KAhN#Z_;2&eFEz@=F9;GLdbaoUiPo#xMb=W2PUk!w=PDD^J+VJw4GZgu}s>e8n
zapWzuwG`L(AW2EpQBs}>@d+kWGMI`8%IB%6d00<OaulC(6ARM%6|rmjsS2OKN`}5)
zM(TVXb!#`Diu)bB6ufr-=tnFJSv*>dTh!qZsi<;{XE}E1b~T1W?p|xxq~VE6=@;Mw
zD$eO1f5dV0o=<n9J-^u;ne?J5B%w;ewGcWl!AFAgW=Qx@%nYaaP<LW^5(Z77^p=_A
zqF6L`--k&#Lsdl;J32Kv!qLMjNhv8{I6}?TEI+%DO_o;i-0eBZDGAF@jDOgeO{7wh
z<1KD-UnOFNq7ySmOnut%<M`Y_NT}k<nDfPF8wHN{=P%jy2_Hf{01xDEk>SoS4Mii{
zOg$XdZbe5ILMba|yNKMG9^lLtZ(;FV=bPF9MD=8yx<nX8URER;CtqN0>S02S``=>W
zfQ9?7yQ!2(7r6~0XCkf;8ncAP>xaJi)QTe3d`eZ>hFvskLz2-_N&9NGS?%zU=9t7;
zhDn2S!OkK&)D2&Kj77Fbb!PfG<`4cR2O#b}XBVvJiAEo->V^tGGM)GSQn6XnjkNfb
z>hDt~mb?57*>rY=i**ub`C|;3h|o^Pc8JEPPX&%|qkU~Z=v}11h5~rGdeE%R1*-id
zNQ_YIs)^NOP8KBv({6oMk;%BnqIp8V)%}SmhFdFDG4_g)r&<xacIK@kyhSE8q<AX{
zzUtWW4g=X{4HWuCj7eI~yXwcyr{Vm}fvX<Y^0L@FV(ipSYJCA)U#P^JMrnpVOhSot
z{g=@9F25Pd>=-Jea{Hx*L)M<h!Q%6id>^3WwnB5m3#MCXIRb}%|Cpk<!6(YmWF5rQ
zWz$eaFuCn_E^QVM`b6?cM;0XNcZ1-UDbpZLna2qN><yW(a8z5tC7k5OrU>50Wrg-u
zz0O38XJ_F}0S}n@qfqNzB0F}(GH9_iz3q`|+?g{2YibJSZJYjEr1CZ$?n)!;_8p#A
z>AI@+REYf2;MwBA3UEK&N2@g(J(>V(1t*rBog2wwE3AI<&e6dOyp=%%-fOsO-A-7F
ze|?+ir8)1@vy8LtYeu_>G*AkDnA`u*J{MG1r*Twh-GSoe2Js^^!010pm)$iVkZbMX
z+pUpF5-vmZfGm6&+I|A&bk6RTmUL94_{4T(^$U4o7n)|~(Zz<`jo$c}7Ju&Uq2t!o
z55qBat8&XkZ!w_^2*5e>)Em!=;W^hHj(wYa>o6fB3vJ$p)e_x6_7izGPqw|bG$H1B
zL*>n?Rg)D*!kn19^6<+t7ik&e*X=(sj`cafSV3tYMN$&)i^+a5YI8GO{y0QhrXixv
z4~ovjd2dhw>r7jlJw>NF%sai|&UCxcDrysBn562sG#T1(^`x!ER}ZJXKae6*|CV%B
zn~mu|7f3Lx;W`Cr7GK=^nNw2h5;qK)>L9nQ$L))?+>p@2UO56<_mi*ZXSDfL)VIuu
ztMzI&laHVfc~4ydV&36Kd)Kw<xQt|*Nvb8+mG)mJx+hzhlN<sujK<S<VnljVi(4P!
zq}68UHrrnASR785v>MnDG+9%_ZqvBC5S5YiIoVad6V6)=hzzvfIiBVsQHRV?JyL&g
zCMcq@S^E&y?7SR;nJGsu9ocWWtoF;UooXoX1IP)mSmEH76I0%O_S!&JRe2@Euceis
z&DWa5Ku;MR*VExwxACV`wFm|KAU^KUV6cxvV!C|0;)=3xa!u%-LQ4&f{$V;VR+X0>
zD+x>@9lX+KCI<WkD=9wiHatf#;rO)Hx2E5<B<{c6a9E(VMrMPz$g1-VGY;z~jDg^L
zyR)Z%b==tH$-VCad#!XD%@Iib%t=VS?J(AnKCj-%rTLwxb<>y*Mhr1H^b9#TM6TuC
zGC%Jw-))gnckM-eY+0-Sb*lZ#_SUE5%MuWTikA3<!x<Bu2C5_w!Hu#NxA-7zePDgI
zjj#qd@gw5Z<H90qG!;o;PO*l3u+o@Ce;%H82ONQ~((&q#h+tYu7wGnhO0ri6>%t#$
z0ZX^p`72&8SQH}H23#yOP(6z|U<1utUY|3=!-~ZBE|`11q3AVde%?kiCtW8EWi%6o
zIvgFrC%S=Ted$1|7WH7!!eBq1)Qt5Xx5p#nuT$$ol+Tg&{hy$!IU{X1&i6Pa${Ol{
zUs@lCcZLljS^ZqFShRV___ZmuWU6@5*Vpy_rL*v%%z7XAvM*;LEG+pL2g6Iq1sF6G
zq!x%MD_a3+YTEX&{;nbP^Qgqt-o$YjT?PPEdPDZ8C00z|HhA*z-!eBEh6cxiJ5Pl-
zG8NT^O>9bb5sp9%5VGx?K@dImD|QAH6O}$=Z3C*Ocj|XTOFw{T<$~uOD7~9gWCCcr
zEEyEJ$VWaim4}iv>z;p*dWbr}W{s-Aoa(*zWH3uQ;dB3}`IeZmOb*E3^XHP-?1bjS
z9!sSVldO}m=^gdit*GHr$m#C~v{EhAfcq&<hnM6j8q0=m&5Qgb3RlA1B30p3xhjnH
zo0c!<S*;|aO^*%O!Bisry4@9WxcP5#*vbiOYquLth<<l#`{{zpy&-aXz)@l(1G2Rl
zaZ|lW@iJ_4X+Um-zX;-CrpL=Zp|x4?ZBRLpnU?hM@)_A1jk1+MFETG!(2EIg7#;2B
z{aEY<Nta17rfJT4T?dW~`5Ry3wcyV070n|ax$mKqM35*Yx4YugzQONDBI2oUq_5yL
z?De*-Zzd{qj?ATn_Mp%aiN9NJ&8Gj%ygeWy!#rbdI-jG-_DD^oiNA47wsad)g5b3U
z4K@J*(?J&k#nTYQ&%D~YpIX6UqDXg7&=yf?)&{WZa`=F*8$7SK5|&P6LtJ+(@ZHO|
zni%8BgUDQ7w!_1spDi%X8>y2~VvU*~<BZB@g-nD=V1#wfQAOn3DYorsi=B_P1Q0(H
z?65H6*Jbf@@vCmRVc0$xMgAUR$-4F8Uv$8Oq*(B^*2KW+Ep&G2YWomtTSRB)dp(y4
zDS8$bp05F6N-3rEGX=wq%58}BCWpr^g;k^&&yMh|FD35mv}1uk!s*)Mb=E@^W`~Ln
zMK*}H&Yp7(5lGH@_509pa+X82_vHH9^Wuc!w&FJ%ug!p0J)-M^zbjv5KAmY_eEXEo
zUtH_nE)}wYI!_n6F-Sr&C1A=Udm|G>)@pk?of?1qcy=i|i$ycN(NCKx3M~q}QAK^u
ztdI_|AriCw8Xty<edMGmdqFW-6Z&J8RM#PIImI50LhjAPeLHT4<ZsRkRy!kzNPA$*
zPkSe)%%n5Uo!3WGEWguT^BmmpRpCtsKLz{Z>@rtwtH#Lvi*Wy!u1}Rn-BdO8UjsC<
zMB&tc2YY+%;jFD8fhcd*p);quqDTbKpl?HG=jFi}J}%A;yW>e+jfE*a^S`baW{0tD
zE33Gj3>yKS?vnP*k@+H6j1BZ24LGk4B)_ij)|tdjzGZ&5u<@ASJ9S~*So>rhyyzS=
z|Kr9=#mi%jgWl8Ub7r0<vmT!qz~cyv8g2C3-Qsc8&%CR)FT_K{Dsg0)V_Tv~jz)KZ
zl4NqDDy1(}8}JZ)66U@kYTtmm%Q_tYS%u0itdDtf6?ID5z)ySJAM*Qh$Pdy7vgzKx
z<e+BmF>UBs8lX`fCs3q|)W=f(s%k&`U39pw1Dq5P4SfGDKKP;Tmx}k5>=@(*?Z@3@
z0V@~GK-qE2cDLD|N<J$?foEU%AQ0c69r_sitNNwL)8Fd|e9(uaq79=#M&u#sg6_um
z7uyXti(ed}FFb9kW<KCT$8-lkhoF3i#rv7`TA{seX{3zgZsydmr%con$GL!RqxoL7
zXUU2gB8<c*M6Gv<H0twt{R3$+vFCEVO5aDJ9+$L4ASU+=!dV7UM!T?~fdB7?2faL6
zIyT?WAZxxHMHL30SdAMQPjbi{n7mzNKt_z{yZ(Hupih;!7(cakgw~kQ6VJ~nMcO-h
zVKf;u(4d?Y+qg5MxOvEDLKVPHv3iH%_kwqV=n&D?XX@2?^>-QX14Cq4u{LN|o{W#4
ztclSu&Y<&xMTZZ|dsb^4IjWUY=Al=md_7S#?8-XJPnrSH3(~GaIMcEe-tNBtA{8md
zRoOm$`qT;Xy`qv!=GaDr2Ig7{_m_nGri~z?Gw6#~lr%jA`NU*oe%*BwFH$`<F;xR@
za?lGekl)XDR!=8A!hE~9$xoCmZ|y>#3t+F0*^c`0(>*hg^l#r0knTBotq(Gb*YT3<
z=ACqviRy;d*T-!l)EkE7blM;*ectOlU6Db$c-n(vQdA64^gnz6Z0Foq`a<gm-$zm`
z(QK<D-1{*}z&A-TGGa5|qzc1S1O^jx9BHv~Z30YV-$*~7Yq#NF=j_C#Kl>&LXhO!4
zDLPeC^yXSdUg0LlsCm~=s<r=Du~=GH!uNxDK1xa|uqv9G%?9pDB=0;e!|%o8@?{IG
zXuR6&Aq}kR+D}PMJwK`*Grlypu#7|IG$_ZXp`n?tGD9)&*v9)au3D?nX}cociyqzd
zbl}U}&mO3sg{(BWr*>(fOE?Z^cn~VW-B+4U-M~WQi>wREL>cdYrEK`2f1AG*azb56
zDKA0F^5JTjhOku@dRe^kUzf#4*FsyY>le#K3DM{tEwl2?Of#u<tQmC%p=iiq26yRp
z&vFf#t_XjEWBdNFsl)(jAjjB_Bk&|1jc<2{&a{pstHF{(C>d<Jmph7>2eX#@3?q`3
zC{l<J`9~~P%u{kaC}xX?HYju&eA@HpDnKsS|C0RZ{Z~X8!jIkyRc2cCcIj9NaUpmn
z20qv7dQJm0<_+mA^^PG|m(cO;A(vyKWNVMBx6!GoCE=6`Y)XO;RL0Y@vxLw=M87<>
zjwd)6mu3##Oh!li{L5?McNG`|1a*xCJz^ybDshaGjC=aSm+QM(Y88+57;oG{{yk#h
zxv1%Fz<W+;QDhwP3c5(EKZ~4fZ&|d0!sHGL@?G##&YW{$*ZS6o9x))Q1X9gf-q&LY
z#4eEkDCW6}3oYbLYhM-#(JIpw@>$gNRh9%I%b@CrpQzAj5X3xmV8L6cwL*je78$N@
z>{{>5P3D6|pFZd1=cl|?5IB-+$?&}ZXC}G+(gPy{827few>K_^Sb2vsfDk0Pbb%RW
z41A~rw*ntpI_GH2LkvU@+*)jxGKIKITIOy(bXRm8bM3^_>PBVbire%_7A%N7Cy1IT
zI3b$!Da4V<e#$#y!7|$Fi`DYM#A}*{&cZk*p>r#F9u)VuDmak~?72gMGF}E9U3R38
z)Dn?t$6hYAr&P2ef$n3AKH_(#H}uf%J~1Hu?EcB}^bAeGZFn0Ia1i?wqollaIZW2>
zH-bWD%J;lwhL0baFy*CN(z$x7QkKn9=l{Li+R*78xe-ZF<4W_w*rIS1HN>WuResM4
zJ(}PtoCA~i?%X~$)hmmC2rL5mJ#7M^GuD-rl#DL;CqRsz!#K7k=#;F#^v7(ECA`PB
z-7+&yf3=|FtJ%{EyD2_;+L67iAR98i-#=$Af7!yySgREIf{HovxFY}P(2M`_)nkFh
zbXV~!uSxS!j;(9yix#$m<e1cMFix%Ru_pSUL?b=fHkXcS!}bw4Vz%0-@W?#Vw6Nd0
z)n+aJ{lBf?>biznUdcbuFE6X+DM!sl4%mTVeA}t$cg<yY|8SZ$wZnl%3WL+HNUAbt
zy9&BI1!9NO(vq`VaA2!k=1pOROT3BdQxMdFF=^sZdNU}wu*X`EdoTV1oQB>Sl_rA0
z)8OFXh*;2YP(ID9tK;#$ZCc~*=)-3<uxZYAp<-&aETxmz)N>5<sE0({Pj2>{RNEf%
zlp3`B5(JT^y86vHK@+Xu&e%!Gsb>U<pxZhWp0F8f-l7YO%==6SEmT{injFx&d4NW|
zo>P*NETggk%d)I3-))!bl8<Xpp9c!&hMV4`cqIVepDyZB9A6r;Gv(qWd(fQagW@|E
z$%LCSKY|j~QA-c2RF5<s<GbWGvi0IXz5aw{w`C`sVj>la*sukj-;<!JWLyJo>DHlj
zVyYFCbZ6=a?ZP}A3C^uw^6D{Gs!d<>KiwiU;jPJJ`N0(vE-7oQ96S{=G+mS)khQJW
z)kQu9TnOmht|=N@d(x>mf08abGeshKlNA@pq0BRA$tBvaE8tBRFrm;cGL=^#>(3`c
zD7Q?Dmq;s@<j*<E2V0_1c98Krg5Ebp;jSorjq75F7am0?vfx^>suOtXy(`$aq5&%Z
zIuCuuEm&1ic!pRv(Ta3?dpn88mXP%MIM<v=f0eUHewKwPe_=DY{ID_e+3wb3_t@QG
zJ6_XxfAR-#%xm=<0jK$T>*EP$gpIFTCfH-H%2H5Ie=Z`m^)YU^csXdf8A<qi0eFD_
z_02wZRxcvJHHr7Xvj8x*bG+lg7gerAuo^c@a5pj!kr9Gd0b(?d)IM3$ZSwQ2WWk!>
z>rbgzL>PYMNwQ*N3tbdF)(3B5ZZl%@vBQ-21yU#1Ez)LaCv*?AEJ?Sn;J4SG`j8$D
z5wY?32{l^<UStUn1~C(ZQbkIKSO+@aC({#|<&{c5etSAEbR<EFpap1nBf}BfFW*GD
zEjtapYq*}(ZGw#sMqv?nT!~{)9WHekl$k5C%=ObCuAm&%wQmXhQr<EPRKdf;+weK{
z%EWT`e*oS<A-}%zm9Ic&*VAP83IF`r&wkbn=hJdDH8q*-?!EV3^Zt!HzhU0P_cIIR
zMayD`<>{<^nmitThJz8%!tF2%d%V~@um8mLMtG{Ks=~f~`*6n{ci^U*Zi0G3X|ne9
z*u76*^><7yM^J{6-Sev#@xq2fm{Bwi9vy1OA2}U{!a;c`7v-azl$UZF&4}11>C+{~
zjru_#!OcENKX9FJQ?~CXL}c!Bk=njTg!p70rp#D#`ls~abmAs`AfOEs<L>kUR6jTQ
z!{6V;1ApCyr4#cJpRVyTH1WE%43vqoQAWy&i3CVsbQ7>bOHc9X_19iEL-9^O{dBan
zv=~Ts+xq%?XkhY(9(qWAm`mvPdQ!nh{~U3^5ean1{QP{38#fN0``qWuEOLhq9Wp{r
zWo4xiuzJ>j?yt{X8i(q!#~#DmZ@&!{yL7modQJWP?|(;OVPW@id+uLTQ-fW*cA>Pi
z6gzkBG^@+X%8b{P#;;N#$%<?}_toz<;P`KT^Ba8O3tup^_^Ah&e%AE(<BuEh%nDp)
zVZUFT`_kW@dk5cpa6QhQmM8bDVlyP#w%7g6GUj#=&oDU`l#B9FPRh$kqxZhfK1s}-
z=p$~BUQO(i3}!X3eLpeH$Zg-9sYXt00k<Fl5*Vcf>~On{+sg2XyI;o{bFvVZULqX`
zxBNP)LrCaX<}e5uCLv*S#++8e>eP89m@~adI&seMk5PIG(QFbJ^#nA&ktQp=+;-b-
zxclzA%`!U_&#qJPM~B><efHUAy^1g@NU2EV^Z77m&KxsbPsJ+jC>IwO<F32zGDGxi
zd?+2hcH@mVn*DV<6`aD86?SyUn|ern<Rc$3t3UIZ&)}kqE;65c?xPF!cwheVmyIxH
zHj)>erSH1yuCrgw`}+0kF?a4<v%Njm5gW*tg$oy&MJiNj(D5;2#^9=}uENTdD~<T1
zJbEj8&wP4*x9#mtiDPutk6*!QbJO6`dzRZPVV>lk)q_RP{YTGt@o<v*R4#hoI&E&N
z;k9GdcuXnD=Y4DRoCvg(1fqrj+xNa!Rphqs5#jly4<>Ok&YYdtzMfFr?nDIi*;0Se
zV;UF%^&r!sW9=OQ{P>|)ke<?sgp4A15>qKW9fekhDg+H1Wu&Z>nX+?+-yO9B7mxr6
zj1~f2w<8sy)~s0r9cp&YIp-Mbt;nO^Q98rj_19mIZ-4vSMle#bN0SOXd-m+XrI%ic
z>gsB<OxNl2)84&%ar^DJ<8Oca8@}<4Z<rwF>Vc$!kj_M>`CG3lvt!qb-CyG_*>v^Q
zSDRsgDu~%~*>(D^$NS<Jzi0x4uUN4HFTC)ADf5qi{A1|wzvau9<F(gb!^DXb&2GKr
zVe{+g=rA9xI3<y!j2Bd0U7Zox>~{Tm*Wbgnz4gn)BR_j&IS}YTa+cn+(w%!2p9_w}
zY0eG3Z>1Yf%1gN^KXczYa_dp?jeU|)QO(0@q&ENTlMEa=+xLDljL`NyBE+YT4j=py
zABC>a{XD6-`7sIj{2{dY)GNnHj)SKC#`nGwupO2cU*Cw|JhdAOi}K)3k(urE*JFA-
z^@34<X$Trd%1W6jJ582lFFSD&36MaH6R;wT##Xxj{`>K(U;WAqsZ+D<Iu)NZ!1$*=
z^(oAsKi?41j<5<r>KUXSPnu@Unq}VYekx?CVDpP#{KCXg%FoNk4}S0i;}NC9<BpZA
z==1#Z&v$R~```Z_DhgSzqwW=9A7;*+iSy4tA7`F<CYCK*X4af?%KvBYI{@P<uC<SP
z@5PcOS#s|cjOiF~Lhld~OcD~pBOw?<C@F+I!sAbYmk=Nj2qf@GNTVc#*aSjsOf|*b
zxEIN`dhgPz{ogltN4i?Cq+MyHT}d<cuI}DDcglC?o;GLB94AYcE|t?yKV5lj(OAZ$
z+o*oHeCaq*nl=Up1*_Oosi~>maj;x&8SrP93gIBK{)1ie$X|BL>AB!pu^FJh<2^QE
zf6&o5*{N_MUc`;~F=;&eyL+r%Gdjswd!qjDPiwo;Nqq4XG(9r(-5WFmkG}ie_~dA7
zmsR)tQ6Bm2YU#pEOMiljH&C0r;~Rmrgh<Iri|)3MI47OQHz;2Ao(chHKC5e+<mZ3b
zAhV`LOHgbU&M}a?G3WISCvgi=Sk0Omt*|48miLpvt#91S6@$RoMIgv&tpDp@|0;~d
zMB|N4+W4}4`*wNs(MOeM5wB^lC%qhx%h9+jjd7ruwDE{W9~~cxv6uew>Oa&qdQWlr
z9+RtVrOT%G(%pC8Ei^!_UcFkLeDX<k%zJdZcx+ERua^ooHDOm-BrXSJ;u1QR-PgS;
zlk=;ps!AQ{e$>^~$@9-YuMBUx&#vvZ-|u?NI!R~i8(QR%=Rc6ixsehaN1nw96NdJ>
z;??^%u)|V%;zisZd47Y!alG*@qm%R=73P<35J)#V$&i6G^nEB01{QtyoAIez48XyW
z@<u7GZ8oC8P{x(HS0SK?Q%JDfdi{m+=>Mz~Zkub;&=6Sr-cEUILy2UhO^{HvbivFZ
z_m8M^mH}90zypEU5kr#<0tSKci$GYYwHqAcJTb0PbaZsL0f&5R=FFLD=ey5;{_`?p
z#tdb=(MB1fMuQFwQ(PiPgADJnC;fZbvSo75J@*`Q-8C=EuC1+A#;mKZx=PMI`)paU
zVuf6K>7{D7ypoa<Wx&$q+V5xmx^4QeHa_t;Kb{zkSm%Zf*2WApS~1W#(>2#zqdc#)
zfy`caf5#e<^&57}tLq9SH-kJY5m*?W)gNxwHH;~YG2%tsUR`%oh$B<)k!Ix{Yv0{{
zq)t+1bds_5eEroQ($8R{llTOFL*Iu2V_?yDzZswGo+we_ArcYhx26o`X_|Wj5`iEI
z3lGKYu(btrKsGVJ8%2gm<)OB|MSl0nW|=-MR)V4@VD6r8Znu~g;Jxr?^MD`!0W)Gp
z42h*XrO}_QG<6#U3<4vAfECwLRGcT*SWh&v?AfzNHH3yBd(y@q#(83Fr<|M|by=U+
zd9|^JMy%tHKfYh%kIwthLl3DP>h8VwUS;eF4-Xd}GyUjCKT^gqomZW-ed+UzVYP1E
zy6$Cy`n)acF^770KQ0YC>7<hsCR3+QRRPlPyYD`=bdNWAQkE{9iV3}R8k$<=*;h76
z?vyBtXMv9yo~2P5=&!~1$+w6faU`CMRihQZfl3&y*L&$C5k@B&t<Nzq+ii4`p*|Nw
z--n7I?++aM9uRIJ+<tPbGd5og8&ig-2w*%R?gIsS&N!IlJpz0mDYI|hen{4=FOuaa
zW2c~S1{pUq$ey7|VI8{&0kbUZ;Iw9azQS_($+H!$Q~Ho+1({q10fWFOMnD@-va+(2
zM-l^uGaiyQ@Uv?P2?_G!AOBdzOv=p6>~&4&+rNLmLSf33DeA8-SN*VkF+V@rBguBD
z*C9QXxEtNNx7KyXkYWbLnp(SdtqO?V($XR^F)_W`)2k5t;PSZT%a^Oux~;Bx^l#l}
z#;khjrI*x_K;psWe#B(aqD6AT2`30+K@C)vE>r*2`M2ySkeA*(B4?bKD<KhaD97+D
zjnTk=oqP+p<xNNjjs@~9;Cbo^mM5Nmegl^?x^ozvWOP5jz-ztHNrw7N41FIeg1kR)
z=zBmIpL_wSDZ?OO5bzxWoIz&X)-~&~Tr49@f}&EfBV{m*dfXVmK1b-gI>%l~m}Ow>
zCtxQTg$m2lPnxY}=f_@OO^*x$1_5^nXoY>!q)9@r9-Z`g{!UI#mhXM<d-{@FkDsEV
zB6U4IJ-s`hy91VMFWX%no}-tOeV03XK6^YWo*$?@PV{(EFY{1|m^^v1`mk&UQP+Xg
z`I|{A8rCn*zEx}@WPOc1D;2Se6weAVJj*lqY`Ms{Ec_(3P~mwBj8DYZB!hrKz#!lc
z1Pp!m73zUO-vh!egf6|t6Tnvk%alXo2$whvH1lhaa*qH7f2PBQ74pQ}`LZ-4QG&xU
zgKT!Va}QXbXQ5~TGh#;!i6t>*(ir!7tGQqhFbIrE1p0gT-CZLe<M}t<ctaj|-~r`n
zR9;p-ph3tJ@K~<9xJo?dt}T6)>1m87-meW%x?cU)-WKNZCh4{wDXNlZ)*qDPCxU0u
z>k0j!JYbSH2#{~VYaI9;2OOUTp2U?&<LpgoqcN}1Nk-#S3*<H%on$CaN7Efc--m*t
zzBe%FyWfmYmU?S}(c<V32ezR!$sphx1iG;EPMfr}TD$M~rp)nEiqrNapYAy-^?U0i
zE<Odz7a1(q{??8kJcia&2Ad317!pfj%A_$KTG{4qgMdL`v?0)+VWq!o+zIaEk3Sa1
zX?pMd_f)sFP(D};Sf9H|8gGnAgQ?qIudd&I&f79IJ>9wIh!pRtmG}hmEXI5^a;3Le
zTKO5WARNm<Q=|xZ5?3aTvz2FTecPoQqmzuSXY2po7@cG&&p^{1L*IvjqWxa~(|5la
zpStIwa4<0?T2kX8?Z}v88v<TV0^05ze}@!cJ3Bh%k*8P7svkTr+&0%F?+{S#t&qgt
z+mtVogDt(o<IUu;cM$f@O=3q3fh92|BQ;AHdP8J>`4<7Ms0j<f?5uyAVQP1S01Zs!
zYm5oS;M_}>E>%0yF)PoTWYJR^58m5UAd&^6lj2zhn<0%2jxFK{Jc%pu9m{;Hd$i!d
z=p^n(4u?cHPVTdqvn~i2ox}wYm!I0O<Z`;#Prc;M(Dz=j4DE-X>AT-!K81&c%A@zM
zly#fSr7OsCFCQAB*4>Ig+Nh)_1Kbz&`nn9p-RbD)l2B*|zLqm}d7~AZ{;)2uYXNX=
z-Of4`oNU9Pzgx%rHLc9J>!`j~Ti+xf?kJMkNwE?f7VUJmHwe1Jtp9Uxam5Qp42dN%
zCANw27N$md>>oYXtE5$$Zn<9b$8YXu5P}A=O^Q}?qXQC~QIy#BrnWArXla+s=n%zg
zeA$J_P`(2qt>9xOd5?gm5ezhb+ikabU%)88W1x3l^Q^l1X4$agsLV>i&a8@OnI(6F
zfAoB35yuNS&Pt92u7yH;6XK#2KhtA!@a6mG&Vj}fTQwXyiISXb;uG638dF<44dLK<
z!w~<A<uZ*{Y^>Jz{at2s61U*+xs!%dL(^lxp3$H5okNms8f`M29Y6QS{-^JL^L(-c
z5)~OP5fNP0<D@?Ji(Ve9!Ge3*F<M&NI;F+Y3GWvw1QyZsvN*qH=YWrn2$r1GNC|Ta
z@PlcPyBC_}5F27N7E^dwutbH2ba!p&BdfcpQG3wtT-r*(kp$fxlh@}83M*yp!8%Ef
zgCLLKqi!>gS|moe;iNslkXRB^V#}nl9^uxE>gq_#`Wm|<)iH`mG&+IANM%!}Y^ZR^
z>Jo?Ss&12J@S1RV5RH~ZJX_G_851Jp`qR=SIW|<<z<9jvf%2SkqmXcn|FhU3Z&$Q=
zUFK-PaHQI4jeGYonntP({-{UGFYLd&q)(HszImWdQsUx3Zy`_W3+`?*=LSXq_z}mH
z_&DHNJ0|#6H~$!*_rB6eNJse@3;GBl%SevEcnBZTtEJy(mS<06Ss!6PBSMW%Vt?K|
zZi7xz*XV$O$2o!L#>%6~xIgH7J1TE*xYw0j`!ec&`<cE6gz-rQw^W19y>Q{_LnV>#
z0HAG{VNHw)lW7xUWX_~mnUo$Wkzpaq2<2&6{?11N%@;EphNt6ZCP8rxQw*#x!rfPU
zX>@a7w!)tHi{&hs5-&5dxs9^@>ex7DpV)VkdT`jVtxRrtXtNwCY3PU8TbNZ<)r#zI
zl&I)93>K0JgXQ4I60ie?3d;kHqOetk8BD#ww6(26>KkFi(1MD~`#ln9vJM)Z8d^JL
zXI+P!JS|p!erdM6_{BWA?d%CMAt6jYu7bzqC`l18l)Y2kE*C9JlRK`RElZ}xNoiAu
z@_zEP3*;pYn8+xoYww;RBrhgQ4@LR^x^b%f=5skx4?YwS0*9v^3$T3FxOX3;DZrZQ
z)8^DRI1m(_GD}~RCf1u@Rq5neQ7}Fkp5=pQzzJLME#OI6K;f%O9SKV7gXX&w(Tdoz
z^-WIxfouKAhrZBBNCU@;K0;PYI=^IkqRh#S>lZs0*n9mPulfi<;9Jw*nCk`982_O~
zw>rpk-+FYr{N?pSaE8F<JkANM-V+V}q3@JmV#9-FW_mc}n6an<`Io-?ec7F_-Z4e!
zF<PP`LX=S@J*jsLDGv7ih4r%MNTVX6zN$Bn%ORnQjSQ8zs8A`dqX1{kX!CD4aGI7C
z3xiyz<d-xDayLxdhKK-nZed!sAYFO7-SYTOx%jvwx$-l45*JO6LyIX|JG@wV3ks{T
zT{S3f2;L*!A0R6j*pzT5gK}UTYbM4%=xbzEG(cM{i&y-d*bugvSf+$v1{v5QQJFGn
zU<4?L5UYynR=MNJopR0T>2ltwIm)oXhs-Cf6!U?L8gdRyUP>(tPm@z4<nb>|lLgb0
zdzJZ3jL9|c9F?EFTp%%Fh(Bi=g!*yY$84{g@q;5m-_lopd!GL8w_oe4ES*O?Es+Lv
z*Kcc!&ZqzCHt;?!*;|kuEqNJH^6t)BX@(&x1f+uoy6oggIp>rdscvYKU%yeT@St75
zj=?}}({**%?L48|sn46gV+#R}5sr;FHWbOaEmd;I=cmb(Y{Vc!QnCyNABTdbqXk8v
ztH~I6p@>t+8v|s3u#MUq(v@eiFHB+R3mqnT-A*&CtA6|@j>HqVDtyrtjkh0L<yF<1
zf0R|XDE{%K<r$z!*@}Nybjac#UKKh(W<K){Jjk)6kFZZV*^l+Pf&Nk-iGw~8YxI$U
zpc?bn6?q1IPDzN6pWQfDo`3C-eC21G<Xe~IDm{fXMjN0sy%|aR&Plno<qmo3svP;;
zIaB2Ok8hQ=yXqt@CIs)b`^UpR@x35T>5KJfIm;c5=e-ISXBqBm@$p$!=qLIf5EGGV
zXBG8S2>;*`A7&_)rY)PDBs4sgR<+2-yQ}1#Z58s#o@%M6ZIS8OabO0)N^oH<YOpw~
z<Y3VczrPmn9wwC^$IhSDNiV(!Q@0nzD_xiCvnOXp%ki_4V1(jJgI1%#v@UbG-R6BY
z>SHnBp3=Q-o6fJx(HQlVIPynqgEqR1eC>f*V;D5&vzJYj|NG8jCE$Mfr<-N%M@4EZ
zE2R?;km5Xt$^Ih{<WnM~OD|s->_GP+6nv&!*0dT+c=%x~G@jG%BF8t+uc>Uayaat%
z_mGMrcEDC)2~3HtdKClVMHy1ZM&@Qq5JBAA^l16T*XPLggY|O51Dj;aZY*B}w!BWk
zRtqb`)ha$N(o`DsANcw_Wq8`Mr&1nxZl~P#%PsQI^Sflvp&B{=v>cg*Wo-2@zL2dk
zUSA|NOAJ(wQB0<FE2H+dGA}=@hr*A}&toq0(}{U_*){P#p0{7}v~IpbO!_YiVOgxp
zHQDQDJg0Zz>2=Lnnev@4&5`Muk$6WtB@~T`fs5FYDu+Dw*FEw$KI(n78J;|C8|&kT
zhCi;PZpVAi-&&z}ZJ+r)HW1)=X1e^WT>1Q&6XmX_cFPkl?va{$hmv7<Ss9^_k@R%m
zkt$4>kY^#_xE4lK3I6+H8@Y^C@htKs@+@0(_`Y#0=hwH(Kg-*cVVl1LCMmuJJb^3m
z9gTcT^N*YfG4c@j$L4*ta^r)WWGiSl$5;>l=s|s`Itg(aZhgdRd(TkU?@LcFJU`N9
z@eYnj4#A<6$8zk6W174(Z;<*3%hIp(K<(0f@if1^ox^!dOpQ*`gD{^HpHUk0^>1A>
zTkiehbos-Zh4MWZdh+2Y!uOINCVIZ^A4&R-YMW7KB!o3?#KJHxAl<!ax9*80mjo%=
z-hwgC-@HdBzE70jDEIMtGd}d5x6^qo%QQg0VP82J!TW|i`H{W@0{^GhR)@4XxGUM<
z;O6*jgE^AR?C8l+P|_&xeOe`jm91*V^+<U$epkx}pH@mc9=tWEEg_ah7`^};D)g3c
zU7|*Gc%2_6=2r|D$IMG*LJu?MapzWPo$o1w?WGXnN!_Ya`<JStr>tI=oY#1c%~jqX
zJV0W*SSLR^9V%&(;;PnOOvanG5eFug$+%j<7)?Cq4nr&F$bCDoWU;BW12|ZX9B##Q
z<a#!=nkcNtV@c}umrjQwKU@C&NrkNX#b())Ut<w^j?MNCDX(gT?Fo*NjQ2Fi_i|`0
z?R_^1HI?~AlcHtgUGrr9H)mo7EkuoR(s_=-NNA=!|KV+OuzO~x3UKV?UA+tQ%P)*W
z#(e^&<<*T~Fnvs)_@!-uU#WhMnQ77T{jbcG+pd@_zj`HK{^z$lq_C_}33Xaf8Lsh4
zkGr)s?Q-)4S;`Q$_M>81{6E{}x9^tA2m9;g7wgJo;g7e;eNSzZS}a|o073&xX(JTN
z(l)6;?0~keAX!t@A)g{X6zn!CkwUS56g)~>@eBXgKoKp$aVIXkUfC{nE$}9C(pR3Z
zz;AnEQKa`rnh~RbJPyApzzkHky$pTKd&w^fTe_qcZFWsOcNEung|SSev_lC@%vX!^
z@1uVQ8#>f&6fkanwOAf}uTr9&aSM4Lg)L9o#=ONS6R*BhLK<9C(Jny%g8keI3`rME
z8rujkxbf-7Wyt@1cZu?v_|gMg<h6H?D%ndf)9C2vkP7fD(pQUT`5zO$(+sko785EP
zZksLdu9_vuI94&$oa2^CND<=3-F5y%+4}wY@|n2_Qm&T0`oI0%Edbw2BHseOqm^&*
zS&)`YOplQtTt824xhzi}{#(BMzu)bU;_@aZ{{Y={3bn1RZG+ZHSl4jsBhDuFD|fr<
z9No_n2c+$~44!9uDL;9mj}S|pSQhJZO}Y&{&&z|+M|dyW#rCnCtjp5Ddh{;lVbXmH
zBz>d@mo69{I;9ahiG>C+a|4>{$@uxQo~bo)I5RI%9=UC?eCGHx`PP%W<f)hU371?`
zuH)thrM+~Lk-<od>3mddt3C1Q(a=;5`b~L_llzI#NjB6%{;6$O*LWZ0Jj!oxLM}?d
zhAx?itGTwiLxn1!{6hWZ6HMUMK>nMW6sC+`8*TETz0P4d_7i=lX6e`DXls{M_xw?g
z0_>ZvJ0H7ignIo_(g4KS=)8;=6(?!Wkp>l@n}WZ3_<XuZWi<{6@Qj&Y8A#F-!et5!
zRtZqXDa153x66^TCMhUuR{1#CXjI9a5GA=8(elx*YMGTCE0b{#Ux;cfW!rtEUTUFS
z(6flKnJQ{qWoJRXx}SBD&@=G$@iP<g^0Xp9<LzJ~2|@e%t(!NyCX~e)nG;Y}E!N6@
znqRM!8@>d@U|wFFGAwLBJELG`J#luj#K6UsiEFi)uMTb9g5Z%v<kWe|(hOp_6+w`R
z3){uniL?Q5VR=j^!pn@V%rsyvo|Yi-v3PM%-cjIppx9Cft(+tK&Az3^MPg|T*XmOJ
zA>rUzleDl-B=RQ4$xm)tARlcnlY5`rC(97O=8G3hQLeD{jm;8{h0a02A!^(JLBE4X
z<u$*eRXBsm*^>IEcDeAk`{l~42zjxnRbII+S1BpG4%Wyn%ra+3S+f;>Z#b~yyGJZz
zfGKwkQbIh&CIt{PHa&krq^BH-3uYwC6L%!Z8}A>LuRpRwu0APME`^5#UHVDLkWJAu
z&r9w<C&;7JV>#JXFn+Fp_h18f#UuYHkTX)TtQp?46s##&EI`G7-%=yVk-;inz)l#e
zRxVDE-<_E)SsAfPKq;+glJy%)<)JrAB{wEm(Ow2Czw@&ha@Gl%vg+46<<DQ3C@W6P
zQpS}7N9yF6H3hN`aaWdRN6Bxln<bwfsFAz>k}uh~pF%g6zU9F?f!DU}sgm!$P#|c6
zT(Y45x?OusY}Je(1gSrNd$t@ZY>+$uct~<%EjoI*u~QygIYn|aV`SB%pUOnkx$8Ic
zWnx;CD(c?rX3Bq~J%=#?^u1^I3O%DX-?I<~*A}_`x&1N`?<9H6K)B?gK7@c!Wasy{
z&XsL@tL3$g74qB{XGjiktg363cYxu8YYJroItutlB-tl3%zHZ`kgIo@bds{nwHHj4
zGmp=Zr(ZoNFTGnTt6)S}I5S1@`I@?Bc-v{7#R1`eo<#$2B@~aTIq{O37%6)S>g2Xp
z3gtA&CR}32iMpKlP`Ui<TqV1`w60KaDnJtX7U->$Z`C)njB38c(V+PU$IO#=Cd(V|
z9hDm%-Y(aioF<o?HA!NiN3=mFx%r;wr4Tgv8_-EYywpjCOCQmLqCdI&U~!Y05SX14
zD;X(~YGp$m<Rt1sOypBDb7BxUJW8zyVUTYcYdn?tbsH#7bC^=K;%bwA^^u*>N5CV!
z(MQ-`@-@~o7cr(LK;Dajj>Rw`RnWWk9c@sC9L5x+fq?n?!(}A(5w^vmJ(TUNwo&fR
z0_M5N;Sy|g5<QI6U%NLL@jB~-iE<)BguMKp0=ePmTjZwmb7a~4RB38x!i3()(s%1w
z_G)hT4|*~k#U$JLbK~W?^YbJlHCoAclyzQt>##iaZiP$*ZRcdd{E1QW<W*B8dqOPa
zACY@uXL#WGy;1-N8BR(*bVZJw1-UXB;c)nlk}lo<k3uDPVaN|tI+nh7@uh=z`A8(c
z8J|eF)H{lUBjt@!)<`cE?-K_{2b`volf%@5FREyU$4e*N<SgQ)N^};?9pjkD@EL@B
z$@C;ecuT6N40fsIdJCr|Dj{bF3_B6Z)vgmUlR^;iJ6sVr8dypyThw}S68AXFWWBwu
z5<3QUDi=K(ln%o1LZcN2Su6fdn;0pHvEfn(4=h^kXJp05A{c{6^!uw@HP-gUZjgth
zVx6p7Cgr?l^-3fOZRCdxfdrWWdLuEInb>Fck;I#d8QYech@~N5OiaYl6I_(R3l-3S
zmka|MUzTZD&dA{C#lU_7VtmZWi&skZ-oi$OA?q8dq?CDd?zS5)lvC%YC}UbXW<_~E
z6bh9#MzzCJj=R56fpERnfon9vX~IJW7ec_0cumj3uwN`Mgn+AS^ekhUoYm;-8uMao
z@II$NKw*XDa>*Noy<@LgZDE<L%YL7psbl>-uZ>GQ&vLEW++X8k;q?>dr^_=C#{Rap
zKyG<tn_RPeg7OfIQ0+s%nbFS)UBRXS!LkMGuz&T!KDqb5=F4YKnkct#u8?Oo*T}aP
zCddV+Wh<k=fB)ZJiDt*}A!Y%Q&<+M~rSX`;t!B7Jf%Wd`Xxw-g-%BoWJ9SA00#@eB
zjsLq@Zbr<2#h3&L4aPgnL8FrX>-;&0KYX_eK`SpgAzq@PpnSZeTwZ~}^P)5u+@0DY
zmz6P1j12(|4wCIqUcR$pg4}fVENN=#kR5v~)H^wAYO?&-6*H8<<{Q61C`;4A<q#Cq
zJWO!R%#D{{-#iyWN(}N>Vi#CUYRpNMY)q^i{Mi;cSm{u7_xw|{<=Hojq^iD6;$eJw
z12b={;E^>wCr&;=+xElL>ett1%h#`**?-+of?ZP+`VkHOIcG|u!nhK@Cp$A?!cNsO
zeNw!{Mp(<Y8?akVTO!K=7A@^+d0is{`%-91z%!aVB>~=PVbXx#Ttc`FxZHu@?BDDM
zmsNi_B-7(U6#pP5<h2)`kt5B`ZL)b+h0KTX@tX6eD0+G8OGo9zlrZIG;@THq{pOP;
zNzt^|ytlqWcHnZzPwjYT;obP}ug!-+vPypS_d_x}yG*V)GYd1!c<13+$ju!BfoGBT
zMBzQ+Zjb9fDUmBy<jGRJ%kirj`;1F3f@D8t$X6Yoq6|+oGQR-B^`w5{iVTdFk8_|8
z<Xa)&TMh?34fz)IejYxqaafDf?Pn$QOugWA@{bAfXD9zybxEEq0{<u~ub1Kmi=KPa
zAi^^o`iQ4;H`}@$x(nq{PO9f%Qh<}o>?_$5b)cyeV-+8w%+7qATwmOeE~M)|1_zWE
zjB*FmNANm&qmK|nB2OKIhOLzN2=F-vJk**c8V2Y|6QZH3MacVD-9m=}%DjQ3k5E78
zgwCN90Vine|9@S3uAIIgRq@C6c1!O%22*oF$?W0K)i}{`{G1ed^yP!HdPAwywMR)}
zL?gyVu;Ru3go?ELNJG0^JU>Bx^!0^OTi2o{LaBG1wKP+1TR8`^({6cqZ7C)qL*)Bk
zo~g$1Q?Kk-hTu~drb-qj?w;M<AP--VA=jKYMb;q%%A1=j<@9-pk_a82DrdI{J6xT`
zi)N?q-IHsm&!Y6hRWaoE;VZiTlI<O%XHbM2_x}D%=($Qp9)kzW)B?{NYj%T*eD6eI
zKnVmeS<aiBpbSB8Z>_)*3Apm3K~drLavBU448&ao0~5W3FvgUzfh9$tos1z>QQM}<
zUOFQI9#JupiS0*sBak^gex|`QY66Tp2eG7$44Vp41`IM(OpcVdNIYi8=S+_GsBR~p
z@6`$RiT%@t1^Y$Tq0Zw*&S<=~wNjSNN`!}1tZX}2tJ)n7=A#8@5=j+@1AQcMq_@;S
zxD*2yDm)SB8;x6>jXYS?fb#2MvF*e*$Ki73;#7F2M9U!rGAG`;A3Pq3#J(h8rep_Z
z%O8bnbuJXF17)pp`&E-=*}@FfgpE7O<bPi{B-18DLQz8?<E!$zuYH1R55ABu(-F62
zAFkc$dhNx0nU;=gDD$o>S>^;)ChvOy*YtgNUCA<S*IvRkecu(fYw$dJ;H3iVK9+eo
zg6`@2aP2_^FV^?nX}`vHc3*SIotIBSus*B4kJ-P!9hPZnh%r=-{#|CjRxUsPdx5&P
zugoFex;R@-S)3tXIClzm>k5~%zp+zRoH0pKV-Bb|<9r8>d6`DyLAuo8z<6B*CG)-y
zs^qNWO5_YI#rW!ldGhlQw#bc_OjhHX2DDcWH_2zQLj~7Y2Ufzn2Q2ZLC?!1taa`@j
z!@z25V7EpONz#;P(EhJoHeHs@tCWXcIwX@mC=o|+9K6h`E#spiKtme~d~OG@{A&v6
z1yfx^E3EbI%cQ{23c-Fp-n}ngG!=ZLO}_NA&GO2gMp!Rk>;+x<+xHgAg;0F{v#CNh
z=QqO3Fi3fgvTRNu-2TWGdF&JH-i0N0>%KQzmP4Sq_=FVs#%~YHds|9z?<6^IQIg!f
zrdUpb@o6$9NRFGGqIQgV?yXWeXJ)iqd%kCNyKX_p_nmzpZ{TEwbIwMQ016B|N5zFp
z=ZfG>y!1xIj{Er>IdfT--1mol^8BVc(A-d&9Bp}JahEAhx`d-##)Fy%q3cTn;IbA%
z*xKE7GBr6^?NF7DS>rc8D3o)a+>iB%9rESH@$&R{k5itGxvv$&OA4{H_>hpe)X6$h
zKZpJWEEFaHipI4F7~RZ!J9JnFTY{D;=>7QfZ^mJk`G|b;(H&9+`6?anK^F{ATILvR
z9lpAcv)IexLu4%ireC~#lFZCYkn^TQ%cn)nk_uVy5OmO!=B23WHX(-HF3gfoh0!&z
zco&p=yij9_Z;?KbdUGt%TlR*_XUH-b`^i71Y$%q{s6>ftuXE;7kH_P$;m}8V%iW!Q
z%iZZwvcCwrNJWdx!~_8+gToIusJ>F(p9o!r%YzDz)~iYSIE=x$1L#7!ZpOWFO~kKH
zeFRKoQ2Gd{H~GtK>{L#}(_XAD*?x#IfGoZ=BP&`KOihFbS1i1@Y83n8#4UM@KlKq#
zVu!=?a{qy9d2>@Gc3cnb<`W!iVX!8=^GUhlt6Ir-#nb$z(F4yPW0hlP8e&K$!fWow
zAJ@nk$+?o02;_jXPx-r-pj)(~OX?xVf90ad(u$Bl7yM|Gd<fl*gK;J%7e4sOQn~uG
zx$@Zha+#V4SqQ64{<;3J+;Mll%sV$)z6-DSv!Tn-&6TU7C~uwht9=w4<<}ooNj7A<
z%n@6;L4N6!X*f`kMgm{I8K0~!THhpIBw~X})<Cdk!FfMp{q&Vcym&nuaYJZX-3=FL
zx}<W+923{$9t7d0_frNe>y>ctR1d-VBtFfrQwAKynxS#01iLTM_{96koQo0UoW>^_
zoAOJWmHR7qnxisNT-mIQT(h!cysO(yXV@IK!WAy)^K1`;rD;PGXFeh^3rAxZg$O2H
z4qtZ8fOvw-1mIE%g(Cv*4ZVKoZo3VPfio%*AiVXMiKvEgDghp1tcx*07-x$FgG{fl
z#CCHGa2T*%LrI(k;mrGK3)19wm}z8U+4lOE%uSbPXQZGmddPLD8FpRg;yG#Z%=ePJ
zUr2f%=(K2dnmm115}vp9vPQxSLZ^kZQst>T64Xo#31@VqwUZpLFPH^4@7sE=>E)2T
zHXmL`kKY=vo)z)ZJJ9jkJhbuARdLoe5af7x(CIXHTCzNR6R%kY(gch%o#w#1;ukl<
z#goqf_i+sBG#iZj!5gAgJKMmpGgGZ4GoONK{^gxg`SC3i<&!O2<#0=@B<Pz0<S&)X
zGZI1&uc1H|LiwO~#wS0VFS8~mA|PR%-1pBSIXg8hz=U@TJTjP=kWQ)WNR%Huu@ii1
zaNeE)@+bnL8GL^oBxt~PI$WDm!LaXpyGl+^&6C7<dV3=1w-5HoRdf|&Pfv1iVuFGO
z>6L5Zl7Su7?Q+j4>FQm6>CJq31xx0xoMsuWUOdtwYu+nT2D7slCCgLWY9$HpDd`r|
z@35rkvCXw|^|UD2UE#olN2M}8O~wqhL<Y&a%~i_yL|62nf0ZE4Qm3rQi&jf}HtnpC
ze|_30zxnDUiH!~wPwGzB)+MWYt4|=p?6ckDgqPW0o+tg|G5Ly23sXFe*E8T|ep*(T
zBtfaEW1uD^mZA6&@*b@3kULHu7?-nPSbONhY8aowuw)}t@{1efoZsx1b79nqo)|9w
zE@+YiN9!Qe#Yzs8^?G=g6`|~EcpA@#ut&QyTIQ3)e#QiMNR@hTcbWIrC#Vj-5I$Fq
zEbgk9j@W+m;O+n)t3>P#ID05r!&lFK@U67&1j&$O(4<%QHONi*wK8{FlB`&gB2Vur
zkaHlPT{<yB=HMK8G!@p9FqX;V0x6Mi#g0+F<&J;c`&N~l1VuM4<}e47SC4|@j(1S@
zW%89iVwbx~bNiFKxniRc^pGJjCPKDLLX4S8$mDF;M0k)g6(Ljx+sTRYfpj5z-RfqS
zFZPr?!Clhlz0pS~k5jkEf}TeFXiQ_DD9h65&#)*{;UK_v@jNHJ$vOwZMd>4WxAdw8
z=J(|3vJZR*`siYJx5?*dG@FbiwqFHbvhz;1n{8bVj~A}Mpgv@hdjtp#bv5F&8ymRL
z^XZP`6J^hyowBb6u_F_+-QPGuXE`3~G4Z$z6Wr7Atl!&QA|GN!#+7+a`L3WvcI>N?
z(~irOlMxc+U-*3h#@-7tq5u2G+T^+QWzt^Prrw$9&}BAYS?bA4GG+ID3*`xT@~+v_
zAPG<_Io>!*1{!xm@3y6%t5q}3%0{Bz9?f~^J@88j1c`)=f(R2|@V*jpB@v-Ph)J!5
zCPCw9$A1!|1`ur~ZjnmjVYjmKm;{W3gaeQQ0~61YFf#U2HQYnfKon_^s=@C}3_hkJ
z%tWvnZ|hb!58{h0b9PJJt$y>IyRy(1PjTTgJNBK4e>gi%;|Gs**<=PZdQq8b!*j__
zja1Kt203jE<NZ3ZJTe3(dH}F3LrD}U*mn{zclEJsUb7pc?DcW>f**H@-f#T4OI+fp
zmpQs#rvk-~yToNY{J2XrQrQhv_B^o&uFsFV6bC7sA9pD}CLC`5J;ta?#3_61g}t)r
zP@~)qPp$<slH@DfK9o9mEO^GA-PJNY=QuWEA+&!|+9H4Y*CF}FRWrLiK7Rf30ZG7^
z8{+U8o`z$LV`K}QCS^`^lidG}Id~^9Gsb~x(x4IOe$PRKw(qlMu77xXvTWYERrb}@
zNNmbvp9~0kQzpXWr4T|pQ&Jq>Eu16Wb4_3cQUt@xG#JB}=oLF3LPjG57A$&}r7+O!
zDQr^LQsJTLP<*DREDGEUp@cSqc7jYvU9-iLl;I2BMDHAEkb@9rW=~C$b0$T}?{_uG
zstYZF?42#;Aj<I2ni%bI-7zjh&|RNYVQ#+E@a5!!^R1&D5Y8JCYq8^=PT^kKxDLRR
zYg%7zgCU_C7sSUQxGbOip%qxnM&HTnTTvh4jl&D6L(%Ct(1SNi9P;C{(<A}U<v|$a
zvXR%xp53(CyHluA8aqQ|N=S{|k4+jPF!N$OpS;7+m$~(rhy*{a$K==F5nm$|JUIhl
z0ICp|sQblq8P~pSgEwW-P|M8(K7iLejZaJ9p?G{;q3o|~ms`%ZmfUgTXZ3+5SpYf#
zJ6hl*41V&hnlZ|^6#szEz~%7UoSskjE>D(^Hg1uchI)(mc=0qi<b0x!pd7nCVwbzw
zoPOnQ5-cR@95g=B;KT`ImXVH$@frkirmn(vsu+pL+e;V1@BZp$+1@7NLag;GsebdE
zn}V!o5~SK&(|}!#A@u3VUh)F+;s!Vk#L%k?&(3b(8weMcH5L4ayrUS-1N|AGSQqni
zg<J$C?%6I+67|8*5X$q`4SIr`M!qP>^X!XB{Ugu>592?-en|fDNo9A;ryIVzQEK2B
zl<juZ^k1GYuk!J6f*=*EM3@)~fX|w8iF)F=-lWc91r8?tG1}z*=k~}QS5H+-2`@uz
z(to~vM1K0}QJDcN(=+c^BF^r9wJiIgZ!MM^F-h^r3;X30Ot7V3;sWB}@Fldh3old$
zKH=eEp!L?+w7!S0T9)iy;~m~Qk13rm<51p5-p`%Qn4+M>(}L%YPH0QCHb_vBbxqnL
z-52=wTXbHQ!Tj`Ep^GZLOt>bw8iX@5B}%!2me)EIq3e#xV=u_r2wh%Zbvv_JwYj;!
z=el*|L6%;(c!CRWu(wY=H&moTE#R7}(^)a|(Wt~33huCW0KpJRnC(QI_cK0LG8B*b
zhz-KnC>!_H^l3C={R1UdAA~WWpSP+H_1N3ZGMvp+2d?AKwT<>V1Lc|>Yt=qmAN#sy
zjWO$eV{mx7f<6mN{MXLPkX4_brHq7(g%uqWjTxmX5GDvIA>qJEp=yW0Z^xv?@fb&?
z)edE-q!2|zFk`*Z_T2qk{nkR)is>dyqjfu!kPa}U2pAL*c#;h_X~+n0S=(P;KOk>?
zTGJi#>DpW0l}3zXzSr6?GL(vF$jF9~qX0%vrg&^<5Czw0*ThFk13D9TUu0q|Jsx$p
z17-DRydXbobuuT?8qF8fxW>4z0FC?zah;|h*529h5c>VrdRaO*RW0v(eOnEzWTEOk
z9!%Z5hxM=rnkL#KEcA3)wT9VyC9CIbwDYw7i=XNmen-NMp2-y#X4SY@<v2^j1@(^O
zFW-G)*~rY!N;vt^h#di!&P|rI)EK$$#X@1ga)#aTNkTLDIIxIN$rqvYq%+Cy2+&yY
ztH0;VEG)x${FeFhvtJ*UC*C_O>0O*O;ppHXvBr=;8_U>p3$XO=HFzw3?wmH|p?K!>
zSb5-m%y`3lk!b@AQw^Y3!9hJrt3TUp)lI&IQ4hXV55AQ+M)?+Z)MCu1cXri^j`{S3
zPu^8ae+Dyte4>vi`i3!V7dCAJLeXaAaVK~478+H`F{#KU&l%7`f)V#ACEoH-JpiK=
zjVdg|t;|0_-L_^p4^fcwo@1}|obez_cV^Oi%14}>QxNFan1$e@%hIRyjs4-mJnT1P
zMloplVc<Ys&GJd#nOH9q|4^o+GsBW;@hZ0O`#Y-CIn5VUMmJxoz1=NjTQTWmophFr
zbEt2ia@-U>CQlI?;%zy5h^+g#NFMp;QMmxJ%~Q88!YZ0@@N#&y#`;eu;d{ajkGQm-
zLxmfu(16JlMn?raOzO5O;ic3L3LIyd@B%MHVDXC{*e(~(ijx~I%9X3nnXJ6zZh85r
zEKd)U|6Nlgzkj<-zIb}NGEm)n{am^BC!3U4KkM!uK2{Ar`quQl1F~|5#KlG`z1c^F
z4_M5x(({0~_$Gt%T~~#*fI!-<*WT2?he8h<2F~Wvu{5}mGC(+u9wa3C!+cy5%#p+T
zn4%#BbKsL%C_;zJ^mt;TF^h!XIh@qFUSIYO-~xY=;B)C66Adm5_{UEPmfSHQIosJh
z#JbrJS}7Um&w61kB1WP*P$IE&+aKqxp0=&Os$9!wot%x}(mPeVt(m+=1!PKQlv?V@
zHPMXUGW3L4B?yk$(&A7>_0?~-Rekv7lGHso&%drIP_F5=celsZ(7vvbz%&1c+sfqn
z2R5lVWY6BQP%gu6J7lyR^W2RiJvl}yG0PwjJo^|mw%SJSkZq?>5QlN{Uzbf)?>Ajq
zIfM7j%cmf4GZcMxKHw4>1uUz8X<AYYT4yzq@3TpsBVY-m7HAv?Yu-5`E5W3>o6eJW
zES8h7OA2XudnY^_N&HpPe?70qH7Dz3?16nnO)!wRD-VV%^P;2x-V5}+p)eAK4`;+S
zBKS8M0*#|gxrqIA1cuQlykGoO!i#58n&qNhfu#}Q0}cXP`#~?K!%)8lGlINr$*g3#
zbWW^H%8XTkxIe%$y@}WXuf*x0HJG}?@scra5|@^Tp}nY130Came#Bd{nrmwl`lx^L
z$wbks)D0OY*Trj!whfF+c@6j|E7Aqy-hN?1eGc$e#(X3JU^{%7*bJ7b`D|ZGp)cz+
zWmyRDxoe{&&m1jnlpj5{T^@NkAI9c9`S!}$Y8RpS*eGeoW)6YIvnVf-M!bBqMYbbI
zI>*wf^RZ+cF+b+Od$AGp<aNY^S_qHGo=p<0z6ZoN_!d15QpW(_vhxqdm01lBgq1(t
ztX3pEdHZ7Qs+FeXqXx(k`d#<t8E~u)pFYxeOKfCPWg-ux&QSt=ggQ+U!eDU6IHp49
zhDmIzr!s$Ev=>GATp!Ug9%Bo#?bOeBjvu}}{7Ww`>U->?I?sl<Nxa3Ln3t2Q)JgO;
z_Kopw`Qfq3gO`?=EyIrCYy<1#bM2!Vdlh{iJKB2dBz<NaM;C~<^?ZY5PkxQu^3Z1a
zH{xRb^2XVS!!}(Ro|GPrO%EtHkF-vr$32&=zF*{!4Up|#g_m_P+`IQ<^OqW|bef)*
z2<1OW_Cc=MgL4dSIU8Y5sP}I#wVY1g!LG+CkbQo=wN~ccy;V6`EWoB?)$oQd!Vba}
zaKLDX{Bqro_RF7NJE%MqXJ$l7Ej-1C-ADuaUhULN=m9!XY2<2h>5+c%NdYFNL`y1W
zk_Ve;8Dy-X0u|&tNaiD;w+i42;hD?vn($#B2C!qmZ+amyWjnWrCIRM$j42mZai&tZ
zWOChcpTwrS11h_sN>)2M5ErUd?MO5O@tv3oVPU5p#Oqa9>&Y~jx@kBdL!?p=k9Z!T
zw7%lF5sFO?mbP)%2}M+GZR-S)W#aBj&B#aBK-a`GwAy;wHdj~GV@vF*f!9dP`B6##
z<qupWE?iRa0d}r9THd7c^;e!%NZ;SYs1r=-;ip&2Dr}eQpfGAuHv;)Z_42*Px5^*h
zER-K!J6#FxX%I>*q0Gw0XEH4(NkHi(s1xIr4{s<BrMvc_A8@y<e^<82_b$!BZeh{#
z(e`q=<`>(AvzPO*q~S;BX2^%|f{4Jo5irU55-2QzDX~?84Cqbwdl4{=4cvwhMlGmt
zr>n33?<U!}w^p9GWuBTX(n|;^L8>9^!!b(*G6BY?=MOc>TiCgZhT1#fMxBJ9qJKNm
zs&=$`^(fr<VRd=v3%Rm#Mzp-Y6JAJ2r!7uLoUb7H5Stfpx5k5T+d1dN40Ubo)+#v;
zN@tmqPV*!Mp5CL^*%4mA|2VYcANkEH9>TOq30QKMW0kcL-cY!f16S=&_A*$m_jU6g
zIB7_EGq<#Em7Mfw$wVL5K;Zh@ffo7SbJArt;;eDAi5}ygb7BYga!1N{fy2%!Y|6oA
zJ3GQduvnRooxJ$Wn0n!YjSmjla9Ovd+FF7{ixm>@KPEX(>T9I^q+RYaA9MGhPFPbu
zB#FJB(4Y!4H+h5rp{6p;ovr?``heW@t54+w?1J^=T}xyE;w5TB;LO})kqYuG$aewu
zeE53Lf>QF<W-N<Env1xi-@q(472BQrtK|j6fun(nbS9t@a8!5#*XfgzM>XFfw3>fx
z-BT$yKDbG??5~xlZk?}o&a{U&$i}wmDe&|fOd2kI#9O(WeWmWfG!?sWb2SEa6NVPy
z1l>?%ewN8sj7DLv^bzL(?@u2gZRMmicesneN|to&q}qU`=bXT1+taYzi7P3}prg@1
z#eUk;K(7$9>sv@yxH-ry1TUY7c)dKfm$w}oHElmsXL~OPD&5`^>)7Zd$6~@WO3uNv
z-~`++|F}!;{rx_<c14!_<fes^mldbzx~A=vmq~v{ioWX+#c(=Imz**|e*cv`dHAYH
z^1{kIx#g@0@(y<GUH#5crH5?!&OEtvak5--T(a!={sNf*Z^IY<T_BrbkUDL4g1meC
zT)AXPGUUQo`NNlUm6OFj#GK})HTPdTRep2rWaYto>&gjo#_<!B7yF(PY)}TCL2vJ2
zOL!4_(RXxLV?JzkIt!+cuU}I*;+{Nu|4IZ=Er$!~-~zF65YuaBUtyy%z@$T&K4Vdu
zlw(lS+lhg<x%`bWen^aId@8DFR_mcp$L@AVv4tl|3JpRu;N%xKBG`DFy05qGLJvvj
zi_?vq{X8t0D4XRl7<oMcUvqajG9(hb3b^4FqP{HD<xSlbdMQ|Qwx$%b84S3$6kEa;
zlww^dVjbi_5ofSF=Fy1+o;&$5%>Z$wcL0?pwF@22u|52d3Xl<b+BOCk=RNv<Ta~QJ
zDpMUD`^9I)4-@||I4=XY(}?D7Hw~3>5MLG8`ZB1)@<8D2tjo2XgD7@X-eS!h1jz?m
z%jBLv9h4tjkt=6l_blttTMt7sTnZCpmbqCpK0)*AIMxHfUm$R-9A=!1g$ZhlWG);0
z?Q4hSqv95|<I$HcoGMqY$d%VNRY@hbLFIPw0U^v=KwDS>Q-v)+)X&9Vc#fueNi#@q
zieaFB=AVaT9~kqkmtk9M1oCIn4AU|S49}ZRuY@zPr0ucQM`SL7_RpW5EHD0GiBcZx
z8y#v{%EH;HDn{GuA6LjPHrB}1SRQx6yfoSKqlL0=V;O{X!B)KK*!3z=-rP_m&u*)e
zGbbQ+Kow(fSnZ~ur3DU)Zt3&*>m+1h0Ob8`<;oCr)(M$XSk@@7eOjjiuJb+lzb&<L
z8FtECHb32~y4eJJIPI%%mu-8h<h)a}<?)+l%fH?)k%iL})GnOd@lEjn;Lf>>DKK{J
zKHMm0ohWkM`I8XiFG9t{dgedHiq=!WXWv@U<|(lu@*LWB=>To}!}eNP1gvQk*Y~Rj
z?fg<Opj;*e8&8-tCK2F-+k2Z!<&l3JmKE6A`nkIn;a#)hZE$>Wknz3GnFyX$1)kN8
zxNJNhK#3t4axnH^!?^xxMUza<ikGWDWA))(?6^Dy24DTo2T-#u9Em4zokV<*MkC+i
za3Gy10-gNbtB2$eb^s>-SP0`K6UPx{Y%gC9Q2+oy07*naROT6~PQto|Lm%-*?q(a5
zaS6Hyb&zaKA~5a2&T)Kx?Cnrwe)1uxYf8WJN+01y9Z3D^Be>4zu)nZDZQgYJtR%%x
z7?+AMbEm^dM6ahkM;cT<RiN`8-iQCa63?lm|2lu73(9at!xj-CX7xBGTVZ^r8Z76h
z5iA=qvwCjrIdT@`@gJj;^gu<d`CJ%p_Rc3im2+XZdFINQO6O)SJB{I{F{JN`{)`ZP
z=VZZD#Q0>~G_J~8v23Dhyne5C9jcY%->;B|Un^2}1@2sY&-F=a3^m|=d;G6^<jJ?o
z<t)f{+~uCT^?v8ebCr(H_wMbDCGy1UMKUu98?(U3dI9cVaZ;AD6EG&*!++W>WsM!m
ziybD6;Y;k0pXj^ajZZ{1Dl%LJ@uUTPu!#hN#+MIwRVl)jg+O~W{^(SRS=_xKRt*rG
zX@J>spjHu1uK!Jep@@n1a|gVA*j<5yUMn=9b1Est+&WY*j-IUMdQ?&;<|orB#*&9A
z6S0dP2qg_o48UzIX|lYZw!)gu0qsrQ6u>x6jJ;J*9oyPA8VCdn!QCaeyGyX(1lIt;
z-QC^Yo#5{7?(XjHPJjUaoFsd#^<A8*b3+wf-DA9NJ?7JFPnGwEKFRbtbo`i&3umPe
z#>5EbSyVBn@=lWhgbfS&#LUGwqlpP7G8isF{^T^0@|=^E(qt)$-y@dv_0HYek<gH{
z<Ao;QhnQJ@o?U|MKQSeJp@b`gcl<!tUXBQ>@~y68qwMrTfjc`JyZBztW^2c<IiVm?
z*R)>tq=~)yEGUkZ9O`LBGAt@S&p9Y~6J|pbOoem393B#!Rg+!_!Ki8ERbQ!9Jy?U}
zW&Mbd9pREue}`lM2Qu0HcH2ewn{0vVhg_Gv_I5)zv=%zRPlqI>=3F2j(d1q?H#J68
z;mN5?qB+x7$mctBHp*y6zjZMlqLPInO0`hVKCR>^+!X8zBi3`*37EGzMyI~p9y{ze
zDC%-_)8sGxeLLD+wF`LlLP9NZ2Ee{paq@d#t&(IdV4js_rCxX6)qcHe@XAIguC4X0
zS&cukzQuxCdV;Pcfj9s)O+V3ET}gbX1S-(UQd%`E*8wd(zEppaXF-)FrGofOeP$3U
znBA)L%I9@CqG`FMETwDg3s{#c`j9(=<A;PS36W(i1i|aVgu9`W7G-vn=4vJaqPQcY
zfu}BiXcd3z?TGHPET)oE5Inbc=eqh#+~&&)fbZmx?>2G{`_xf?pkcbR1A)M7MsN1@
zr+My+w6{u1dylIOsq`-4!!6{<Jo*>DK{&?^+H&ehJLUMh4%lk=eX8%bGJ`*)vPsrC
z^LCIYW{v0}->K0~Qf#&2QqS3WxK{@;9lc=T`ldK23_auv>0pTFB;&iiw_3WY=zO=H
z+aVS^*ir9EYNhb2=);FkWdP{HhkgXAdE(b!Q&&(|tH;*JuNAsdlsEpZB|nddyR#!a
z=RqF7TQ^rvVzaCJn*#=F{;2R8NBGs&-Ld=l2|98jm5--kJX?9@@<UawWf7THOO|(?
z`Ed-aw74Hg9lO|`RAF)aaYfZnyIfNzE|JlSAxSO3#}gCK@^>}_O*^`;q?2?m58h9J
z7=`X!S%u?i5~EnI91uf`mD*4EoNlJ<JDhJxHjNFEm>_=3HlIw1$~lkyNkX*NA&#dw
z^pxSA^D){P{i4;lhoRAN!lr=e>J;tM@cD}dadl<qN4v`~k(AtzobcnGp~BH^%{eM#
z%@#4K?4L1Nyy)0L_hV}>K4pY8#5yHL{<_nI-nlmk(M5f~+5O?lia&daoDFN<5V6v^
zyz*jK^L6STzeTytsoLKYs&v7GH4bn4>cA|@^{eXwao3IgR)9|;eR_>{NKbH3YaV~V
z5SRuh`G~cP>?aOP$CDtRu<vqA=#5#udLLQ>ynWh2oD5rjXf%CyaS|v^xvy@ew_Mb3
z{_4BPQcHvJyjv;7L2K}FA^pRSLLCEBd{1OV#!y7-Y5LaIox8lmZ$sT5oakVWzYZZ6
z0yGxay5cv*4T!t6P|MQA-9?G)y}b!w!5R^QXS2vJ>A?Jlmk0WvAYy*CEN_(EZTG*B
zAYELw{aSzx+`@xrnd`LeH@Mdtu;tac7bY#H2#5HgEH_WARN|}{ylE60R}U{6ywts9
zaC%vX|Kafo*}TlbjYPowaYcgNGDoT{bk}}iMl7s8`(mcyUG>&Iz6{>-Yg&KZ;D;0F
zc(C%1;STL2H4rXd;@U5YgFT4F3An9Nh}+q-&HZ)BI~btqN4xJ>=bG7?!gaX&aKh~y
zooA=3YSG29^J%?gprESMY;Q}-+N>)9!xn0Ip0#41N{Hpty&PXkX9aH@zwfm6Ur`4}
zi6?OPKGQBOd#)Zw)9a*oB{HFhat5pFdbkCQY(BCO?-8vBdxh9x+M7^z&B^YNV`H}X
z|D;_M-IRsRRti&P;IBj%gESZGjBWtyVWaYNsdpoea;*yOoP$jR4HidhS*hgIRLof_
zbGhCmd4R8kq3Qfo#&m|JeJngTXkS%EVa}V1lpF7R-e>N<QW|0F`omifp3;w%-os^v
zCD!PYCaR<-{poPFSI|AhpSlMl-{*VyYma@O#VU{ZRE4j{nVZ_~Z(Gb;`8fi21d|?z
zg?dp`ec=I3+~?#?QUSr4<>kg44w_^vCQFHnuuSVIx!^sO+S^g<-6De>vcAtwitsE$
zGCl>Q#hMYnaOe4PRhh_D-JPdFYi$izMTD&9wq16=j6`0$!4!Z+HgjwgdKeaG@zU~G
z<FE_|V%EBbd@!Y%J__+zsJ!=+>LL(pIx}(8#cx#Wt*dqT&_Cn1@Cmc*<or|hWgKSo
zNW@cs03Lz+4yEgPAyEa~N@So_dSbyinRTZ7?a%Rz<T;T*m|;%X%ekV?kh=Z(!OIDe
z#OO+A-;sFnXTx`kDl+BiI98ig?b{XaZ#YhfhexB-FXg~*uJvrA-8-zlTZ_!!YP23a
zQD>e<MgW=sn~yBpX`3!3h*Fj#{J+0-_B(|)L6L495xmN9X*+&Mmx~E*I73#+!Tdlz
zWJ(W@k5pj;6QM#gY~0mQl-=Un8Ru*A(P*PXXpYx(aQwzi!e|#8kND%Jvbn$)aaKu^
zw%<dUJJ@V<>|D0!fdz?Et<gcKO07>`9Gkzte4{{5{En*KJ_qJ6d>MOt)T4vwgsHi*
z+I=igm~tPm6=LVmY1nN)T^(%qgYCX3#)&=C$d-`D6mFcpP|NLCvBx!b1h-kr^3yl}
zBhICtDBx`+P3B|KC$v8#6?rhn2O7R!l&Yh4*Ud1*jk-R(1L3hAEm#c`)$n8SC+-pI
zj?vEgt$QmqIO3hJPG;acaFgaEvQGEKgPcji|I6#$opDL$tKb#F0xdGm4;Kt<cBD$(
zx%0Aw-x;QY5~=Xy{-B63+P&xgq@lm@f8{&WFb|diz5{o&!w$bejpi733(3!RR2<a7
zM9ixyV};XCM9-iC(T}VXoEC#g0-M>sf|ziQ%8e!EhsC^(0F5ev9EsNTKEmo@ZFM~p
zEQ$?1$uV4KX-E%>40b?na>iRjfz2q(T*(6KVVvfqs^8H7S<DfRcgF~Yt_7JtGW;i)
zj(CYXnccPfodKfb;Tl6jI+|PobaeIPKH|*a1MCN7gCFbUrh3BQgLPjVAGh`#TU?q-
z11`$47K@%u>qX3Tgo)Sooq5M&>SH@LM(iy)f5b6)Rl6sh(|f;j;QTQq7HCq>gcBB^
zz0>osX#GmLtVigmXlY1%_VeR`Ym&MJ_U1bFy*jk}JMp%I$^cgMO%e)SkUrXeE3MC0
z)FUfC7N=##k(AOt4xkr$Q;radE)eFh4w+jbmvHdPN-y-5P;0yRP((QQwLA=wE2m|e
zD|~|r(vz*)SOLV>+b@8#D}i{+LRC`bTC)&L@kr>d*&2h-d<`;iQ$Ybc+bMUmVz4(o
z)u_ajZ8vjw6wFBit-g(g2?x%<5}JZru{F{kI32+PO+1){H05sGfaiY0`KL_hd#(W(
z*aLzpR47+)NTQ7_T!rf=c%Pyh73e-WX0(l6=RC*OX2qtY+41U;6*I-tVGq9EaN~zc
zhgRSUl240Rs`)Gu2D+LWXCAzNu_LK}NL>6}7{3Ua@La|DW&C)=JnE~<S^D<Rxl#Qp
z^SSKDfby3k`LfW4{xxoJC+V1`2re@<&HLuM?ng5eX=yrfCta7)yO^X2$Gk-7v2T^z
z%wrrMSt`fc(nqz1u}|+2kffvS-MIBc(+7}JD*5odE(uF92&K>P!nv@a<n=>-TH^Xb
zAp2BXr&<P9sL4+h&fxVjLx7G&YlDSGcJy(25`fAf$?-shU0P}}#}C&S#ss`~LigHe
z$exT5M#J=DgIr`Wk&TC25#ti!_t9aF{_@ei8Jd)X7M8+_Y&h<uvL5@KsyJV_m?s{~
zYe=KKas<Gc{Y7`vY+|pKbVjL3)9FaRy|oA*?gi_y!B+Etz;nuei_x2ZcE$S9|Mb>b
zy)6~rbFn4rsO-ATkArt;oxHh;{oF_xVIgR1aYaQb9M$~$g-rxpkBFuF*Jh&;@T7VG
z;EaQ2;B>rKP|VK(j_x_M+i1}KFA%>J%beSp$GR2w>CjBWKFb!p#L(Iya=RO4YRlHh
z;1w{=B`mv+oJ4cQHJ{eQ62rjNnzPuj-5}J306ux{(d-58<hZ+#V;F$hI>%g^K+nl-
zu*_K*gR?e@Pq(Kmp)TCTH+Ua)KiQoS!*-O2w96*F#**0c_`&&;54l6l`nD_e<Eeih
z=S${!I+sQG^b2+=yKX^t15N{5=@I%)!x37;3r*GJjQDRs07k>FnWdvh7OJ?4o_QX!
zuO9e!R-@AfhemBYMHo|uKj%(IpVxNu@>@pFp~24gr*}U3pq5e1I~`-h9893tGyJfT
zOhPX<C$-`+u}ZcST^s9$9U{xch%|Y4e$I1YHW0U+GMj84U2j8}7|}W#F%3#WpuM$=
zQzrC$x7+!0E3(a)lO-JiLvc|i;nl+p9c+#)oOWE>=EAw6G=WOhNaWuWoycGm8qj=K
zspy`$ImbQ5#`Ywd4v9vUMXNfm5h0G_mR4_YExKCwQW;j4v8r&H$_Y~3&f+{(*LF64
zDq{wsu~Am2uBtQ5*tD$wNthNiz2%x?JT-|uI%w7J$e8jo|0NX_ngge*2reA+;Ro0U
zc~k%H8Y72XSM_BYG<~p*Xil(i$34-UTVDY{1u=ap&**-d>2)4#wulZ)(>T8Ado~W|
zGUKqd<`!r7B(zVvW-o0O8%5~35qz&3no{_u{rU<L>JMMeNz$deWLx~?kY;i|IE{+h
z#3+oYJBvGUgrAsgNI46WRuXMJRQ7R39yaf{Liwfwf2GgL!w#QeFw5;Bj|;=9;m`)A
zeb0G{fdrW8*JxirMrw&@>3cQ9M>`s#scI0GPq*IE#oN(+<$Mnj-D?RzobaAc>|Yj~
zR#g3xvqQ{(u>(iEk9QpKaE1oL-OWW#sbJ2*Esg8y7Ll4HOZ#(jVXjFIE9i;(#{!Kt
z8yR?g1m>fx{L=u>NIty_Dt0jJdy}2*Xw1gz)TA#GNlbm+EAQl5__aoeI+Ot?8+`}M
zU!4R;Wix_uBkG(2=+`}_J8_+p8R=Eptox83E_r!s9Tg>t)}2r@;B*S&SnY<;_WA}v
zPohO1_QIqob_9Fewy%~u#CFs!UidOErGt%kp3ug7rMw4=g0{UZFw%%CgtN<4yX!;O
z0#feh$y&XOsmW+%*9&aUy83mFu+{y(g;-MIos`JV9`D-!E`}vos|k4&X_^#m@hVaJ
zF6jUo*LH7g+VKkc@$u|qJl&O{U9}oEHn~OY)RKONN}Wg>YhGU)JpN>EW-cgB(_l`y
znim*`M1?pw$_ZtA3ZZja)+PgF^jhh>CRxPGxLCvJ<7(Sdv0L_TfVE9;H~VWSJK<-u
zsabUpg7i6l9{LzAfp^{?>4@xmB9*`7eJfi|U$wBt0KojD<9B?fR#+fOvC6!;LX|#a
zbn5#Ax}&lqwI5}mP$QndrE=qb$2*?&Ww-&?P7>$J;biT8Aj5uej%XAyLazwKzy<WX
zMj8$@Z{dNABO?D6XS|%lc|lApMw)I3kI510jOdBHmoTaONznOc?zmq@x!=7TRA!`G
zB!x*YQR(j=-Y2y+TUceL!vf^Mx*hY(yf5{k=1U3_3&Ms3zehEAOI{%Fr?0P0E1EA}
z-@a&lKe0qZ&<3Z#VQU}U4EF5vxcogxmn_j^-?}@sY1m=2>2k#V-k#dy<kjD@n{T@#
z6o3HrV=$)@=LtO#7ZXhQ1BfhfpRW!!nLwKGhikrKq499q<OE8kl7Xn-Qw)cOrErkx
zLd~!VzUSS2APL+qU(=oQL9u>-O<h?&GXa5g5ZWmfbzv@br+yu!>YUOFj-Mg>E$|AH
z9{TG@;K;mO34Yji)Ix_p&bEu~{Gx3e-AR~#K7B(9f7ooUv+)WGL?c4QYhtk09{|A9
zSipBUde_Vwm2O{4Ms1fMBRY>Q0E(?4C0Z4#rwTbGB{_0r&b9)3db3oJZ7YXDux-3Q
zDlAf=lP1QCIHt#d1;FeO@(t<k1YUBYepO`W#KpoPnAfNhg#Of-lFlU#3;lXc#p09l
zNj(vRCI;!tdor4zZX?RB84W1driyHc<DE3DRS8_l;bBq?$?0>3hQ&ZEpjgnfHn>_3
zA{&p5iF;2vp0JMxd8j~)7&^#QyCH0qHYx$JA@SVSTpE19tEcIRsOJ7;4SdBOptym6
zTOh!Df5A`KKTk`ienz7nkMGXiQDFw@t~T`i5n9y|a@}J9+UAF&9jL@FTBvTTUzZAk
zo%GwHNp6%*69}%vCX7FE=%U4&+>vz-qV1ZcmW?a#^?V_5EDQmPF0Jg{d1@_*$pw1j
zz9nHT`=w^(r58!>{)Obwn5?I<BQTr(S32x{T2HBfw$Ml*4!yj|9PCdl^9#v`x3z`%
zu1kI`d~MX*4YH<d7veY`tp45nt?$4QdqWK;Fb=%s6G{!)?=8=5#~n2QrW6j5{Y}l(
z0dbq7qJJA4crSn?_vx`i!g4tn&?fc@*j&*MAnW)OpFfEJz<1bO<yP2e{Cd)jdJC_2
znnLmh_OkV+)|B(?yXh-4R$xDl(fE>u2BOfbSuJtJebYHSqp=wS;4INWqJ#N|zavdg
zjL%?FGjDW`Z_#*{J}ZMrw<axczG)e|aWe-l)f3Wyz)KRgZ#jecRSz?bI5NFY*(3yj
zICm?^{DwFu2t=G4{WWvi7vd8{J{cYCNJfGI<{#`?73yF+)Gb{euW<i{q?6N0#J3DM
zN=roaW%sU}ra!)he@fLwen}PGl?x1fS*Wgao{{B@*sFhTvBMu7f$|4x(8m4T$>qX9
z?x7z5R!IvUQXY&<EqcxW$NaK_CWinq)0gC;!RiKGY_4Ij56903o&mk}N63Fj?yYy;
z$E|J;m15AxE7jx#;K8hSSsXMcIDQ}lV!erWqv4`bM69*Tnm4|HLIDnudJ7yC@0Igw
zSwhOs)DBDr=b0}D(no##B(m1K3i0rLEG3D0-1SL*u_9Zw>F6phgr|%!fPcR0UGuW`
zB4{Ona8Ga|0_VJ$E5s|)NZOk@cHmFofhHSB4yWMLuC=N2tbM0V%N)iz{p5HBfM4^$
z`6%7t(*TdRCD;<U?6?ci`ymF--hcxT^2b3L^YM&4-b;M_+1!=T_gmB^A_9Vc_E{r@
zbQJT$M^JMFNI01I0!!yT<-t&@u%3w*_m^m--M(S{p+^D89aV>(%w^bo<}MCTC?IJ=
ze}Jg~*jf;vsDOk1E89Cy^R$um_wk}48eU6{qXWs0PWYc7NdUd;C4p4Wu-LYeF#|^s
zF5X@_V7>{*%n?*xQ8Pq;<EF*+J^_gBQdnIr*R(-m#zpo4SBxaU0Fp1=8agR`m$mUx
zgs<_F%>zz+<m&aPjV`~iH*y><f`}Wv`d$B%1VaKf7$sF8kSgxs)4p=}_16LhC16PL
z>9_zShV7d*D6jo9Gfg#ujI<1LmvB@uHEI&ckv=%ogAOmplMgie@BT{6f8c<)`&ue|
zU$$^_YQiMi0-o+>^*#3cL?C^G?;$0D;iF?5a$+`)9>0nqdZ*EaHs0-)e@Z5awiFhU
z4;MLjE=;6PjDTBSo9_=vJwsIW*VmcMvp_hyZB!6$K!uA;8L{pbRpT!rDzvC=HG@JQ
zeb>+{WS5FW(!sw8qK^C2DfA^(J~jW+)CqGvyX$5RpUxOSNo$4($_AhtX|SN1OI1TS
z2@Bxk6&O^%0tFe8TA`UIUMI}_D3jQ#!e$U%@^sh>ICoiGBae%n|Dgc48;Kqtwa0hC
z(tj8|-IeoB!sw4Vse#?(49fS=t3Fi^>l$c8&cyCEqi6OF_XJgENcH-%os_`JHb{)7
zX^}{z`V1csueIrsyz)WHV9|37@{u`ttWG#^>SqVMsjqgH!0gkR7|VCCcTZaM{oP)k
z*`;#HI5?|9DN(bqy^(MOe24MvzjIWo_V&?+M4pm!8h^Cc%q3{CsPr#l4hNcm;$=OK
zv?0C!!7m}7si^Y{`-+Vb933FG4<MTOeBgezPLnI@ix(5SRi#C8MPhKlKH?YhCEtR(
zbdn4Zgzh4qKgvGqzXo;tYkn`Y{N@V7mPGu+7LgI!HH_~K&@!x$W_-edkiO~dl|y))
zy^7s9-tOjlS$tWz6qPpph;+Nm-uVXn2BesHU~FoQp?F!31!K{N0KIhwmB4g|Jy569
z1hnD)0JYpQQ(pa;2vx)4&#*e*5=AoP*YP{D%TE}eq&^1v#f=yqyR0NqzKK|C2O-l`
z^!;#EuiS37NO>%dWT&jW?bjvaE35P>QW7akHwluPkY^p<Ma{dEXVOnSJ`L!L%g$GT
zkR4;;smUo6M7Ihs%EylYU^|VQRg1oSbJU2pUz92?bM5V?`QMH*`cZ<#!lNKk)qnT+
z$@R!f>9@R>yyF69NL06TL&NUy;OewrNvSK2v$mTJfxm(qI~Hy)o`@%BQTw^0+Tw)h
zX^5x#yvLIfuBT^V+>LilBIpn01(1f|dWT))Roj*0;*PeVEpB9*OT~9$E^3U}4*fq)
zzFbhhQNGTTSpGQrf&`)CSS74Lo0c;I_>7L*{K)f|l~8vX0R2su4gHbF`3>&Pe%-U+
z0zL@xT1`%PCzmcRlo*sB(O+Yckg!XnYRXC}LC>>FKSo-EL%^QLDoi(zWax4AVd`6>
z+LG##cHm@RyZNL!@|JFG8sKXn3#@lhZh9Gu4$v1G8vK11Owbu+Mpb~YPUjAv>D`sN
zWMiN^cpRZlG*2}cb*mOG#(B_`U5Qu?ap5V;2$`K^aOsrwd;;;<?Oy3|vd`SVTi{;(
zO48ss-f*59BZ!`s1A$?~WD5p}_qHFoDLW_bCxPgDTiqKiH|TV4Sao#i`0Ccp>M{Ok
zAfJ~x{2mskNr#5p8-&f<wdzV<3zU<G?Sr%1CfKWolx188MW#c{Yv)zVCUPIUcq;!0
z-UQ_PA!RpAJ^AYY(r2LOC)t#=Br55Zdv+(oS<$PpKCwcIrO&6z@pFr7{<E{$c3VAL
zm8QH;oi7!dCwNbTFIDS1YCWAfItX+egO!Pii~{@s$TtE0@nsiSH#pbFvc}^2@wpvV
zKyv;Fx(aFdgO`B$ySw$gUX`tTxa-TlMNsT{6eG#(R~hwqU*SHYJ5=smTl`;qD6VN;
zoI1K2nrh;uww-}>FP7w{20yR%A;x2d$2q@ueenr<x6TbZ;`NqE(YYZF(@_9*+j`Rr
zW0jU-{L0;h$4+VK)6Ps1Gon~JN*d@s0cE^DvlU_iS4cyQ#pKf*##8oBiKWi^wN*J9
z7!uM!k8~c*F%G?OXTH9R9IZb^6uMBy#691h0wJJ++B#F|Gtml_eb;Uavkk@Q42XI;
zQ38AjE)8^3WHz@#qyt&(>{r7E^KalY?2JX9xtZm>e~^4NbPP2{_Q^2za@tW{)Rg4z
z;<Tyd#LO+Olpmr9*4tkbet3*y+*O-TBVf$eFZ#m9ImDBq`&Q0;1kHv6nX<sS4)jm*
z=#05_@NkoMzqmma6)%rsM`9#qV-fg5NN0suAzXmlCX~LkCQFnC`?;#RKyYZ7WA~*T
zhg9=<g*-Yby#N>f$>f*ftIq5J@$<G$nRAFIppOz7L3@C8y9W>Mmh%UoA)+?daLoP}
z4Y5LSXp)4Q`cqIigH?vBPoY7sjA3>BggrUYXg6<*7!8+A!0(-;``uF5C*92v22rU&
z_#m-3c=Vs%6*lS#s<@)T9ZXYwsp-x>-t0On;MsCMUv2#472*fxxNEJ4qM@$F`+0~m
z*v)W37u1{<r0!<_m)6l+A?bu6ky^bBGaH}wV03%Qt9L5**Av#udB~h%hCe%WfkB0f
zR)@+ckL8g_A!k>MR}lqyRe!J1!06>z*0wE*AM3LwC!>!0Eiii33zIj%s+3-0Up@Vt
z&5-xR8C{A4Yqyx8TMdW~-9%VIW=tGG?YQxI4*qRV$50LB^T0S)3k!iHmzB$yGwKF2
zs^a5`1{pbFTjwy4Jzf%@5Yx3Y&-;+IcsU01+cj!7dbc#QjA>5Ya_$Q|!ZLfgO0xp)
zuWJ&O`!(Do=F+bWveU%q!UY?F%8o^Tg+<gBWVlO0{F*gG--V?YkynzMIF@s=3l%xE
zZ7B-L5>}s%_10F9ozB|E2ObPP#AHi`4B{!t&_@#fSh^hKg27;dmnL;0x2WRx8a4Op
zuKug5l!s`pLghXE95M7z8U71j=ucrXoh>d{S&*Xjw#Ndl1J#K#b<-PQdEhGf3LI+3
zIk3X{IG<5u-5L~VG^2>5T@4CT+f3sHg^b&bThge0Kfau~8?y~~OHBdh_-&mGN?gJK
zx5gD<F2R6x!&7u@rCG7)BVC=ksE~P>eWuh~-LpQ-LG#^Z`AM77{=ti%K<3*#AwLeC
zA%|Wau1$AlFe580C`ib{Ho`_)-sN71QYv8vw@zD$n??TbGXNmi=>4Z4UEDY%hG+oL
zrchHC5-}Uq67-E24C>jjS!fk&mYRG`Mh%XMwnj$P)%0|5S2G1xbuZ3^HMI4Nn43nf
ztGq0=?25n2`NxON+x&uG=%L!Zg4sxZkavUs7?lf9az+j85WkONw+#m7Sx;lyOBN~G
zpV1r@m7}NzM8Tv5xC#OVp`b&eHsBqVyKNLNQJ`^FtxH^Zc5oYcuBH}<rP{!0H^=M)
zZ0oc|9ZkaI6_tkSh?oh@I<21@V|I^RP=`dt^TbU*e$@HuTqSrv?AE$hf&-ErvoJ~5
z-}VI{!nQ8})JH>(uT6#rPa9S@y=j1vP$g0npdiLYAff(LeKUpzHycZEVGj)`hlh*(
z9MNOEJcPkdxrl!)6-eleq0z8^8{@X|GF?ljEuKG()hr3%Yp)bUw9eKuwoWDPBNdfl
z;|lwdT_J7@c9y>pW-!>57^^idvoV$fmzq$@E2XWm-%mvktCDs)3b{8!r9sCrJEJiC
zQsi7nX^1!&7H5&cEkMI&6Hb+$K`MGVS!B6tu46l;1<>kSOKM7_mBIFtAV9qAP}FvM
zDElho-hMb^?kvX6;v7j7E{@&EDmpolY;!>-8;OfD(KUo~mwazOmfy6GthbD5oG^4J
z3R>!dD$kq;!YL7nE>ZCK-8gh#|D-G_3Nv$6bqQ{YQ_b*JFZor+E$P6bG6|c5aGjv9
z8ewY^Rb?jjN&Teghmh39a$2x11Q<XtbNk6ywGXgJm6p|6RQSGpsgXU~PDCiFkO(;+
z+j&($W4J*88heWlzm(I#$2ZPqsPid|^W%kcpJ?@Nie^zHbq4A{3f@TqYdy+v-s=gf
z=*Rd_DEeqrq|ZV0^zbLyvkU=a_>ojJlRosp^vPre*LX1$SmQ0#NX>^<BHUEDFlWX}
z;z-!klCskaWV>Obva(BLveV?#sKi4<G1hv$_4$Kcjg8R>VQlhJ<0Pw6t7jd2lQms`
z^Q|x_h#qGbniGf+6sCRr$go@nt)^3zYtw!~%k?F{N^>ROoCQ1K)J9~5=wvh+)@Slc
zdPEeIuu<l$97-~YCd1G!5$T+T2WUObvv<WUuA5<4J<>{8Kj2uzZERxXS(wD!1*&Us
z`iMnM8&c_cMcr#$qf+Z^gGT_+HHYN!4m9+Yp;84yjt8HN0@2fSzg{Gztm5;T^&AkJ
zdtOeH3#>nbwNx(wh8GWLpmNQ?+C~=A@TVHg4bz=OQfwYig6ka{IaO)=@9x|x9g9lW
znrxXn+`@Q=JG2nw4jppDQi<*>{{uQvA*#ZGNG<db@8<U&F_jJ-k*iaa<S1rzJj~jQ
zdW`}(;P<=2gQ77%bO(P!S$CmT>8^!J45l#gSCJ5le*GHJsRnc2lt$ziQrQ1jZlG`=
zSD+LW5<75AClebn?pJ`fWf>_ZSi4&8N!T3fB<B{~gf7>Vy0E|Ek&%OEydd;E4SM}V
zT?GqzI-UqVx{9CX`HLbwo=02MdKobPm`-b&vyXCKkppc*i4a9~e4mklM2bc=8XbeW
z=P8{x9NS_mDpg@0TB|xcO#^U-z{J1KpalR%#zW8E96$h=o)%dozj0ok2o!esSTaLD
z94IpCsJX7!Q;Q3NE9AQCwGmh{6TOO?6t7j?Z#3Ua3dlRZz+QRHb+TL(J6Rd^5YG)*
zS+#YlFv(9wCITxYkY^GE@JHS70yHEX>WEue@Dp5$){IEL;?G-KKZs!}bIED~zJOGa
z>Opoxg`*L*EJxAzzqx1!ekQ`iZ+_|Y3sxtwtFA$7E}jHUOK&Efgw^ccSy>G^TIO0<
zYjmz+_~9K2ljNMAl?oc?VB=X1uzKrS7o}vr=^IyVu|0-^Vi7T-8)`vNmC>>2q>D+S
zM@@G_vu2gju?O~tCVS@-@PLYM;)2luFXWw`dUgzUh&X=Qr0vYa{iX*6pk6XI^FuuB
z2$e?rF3sJ`^xKV<IaC`A+bpf?)Y>j<m@=qc)GNR<jbJ&5Crt2=-0Dcj!VV&VydW1k
zm9;Nsw_A_!VS{%99<y4*&yF(Y)(mY$!Tm|dhRYsahL)#&rgPyi;cyTVe}kB6jF(8}
zXW@@ne60+wNv@DAN+&5(Ti=o7WUurKozE4NdZtxZOMP*2WR*kPI}oi|HyW+4OUK&o
zrDV5bdhQRPfbjeaR7lPr$4gNn4>PSN{OblT)+5jF@xJpuQV1?I#no#OcGXpq_1{O}
zS_K5gVcgA9`o}a@S~m$JZfBDiv<;SyIi!N3_K5!VsAnqzKgK?wgW6|}=NWmW^u0e$
zo-UY4@I9NtJ9eK!Fgf(i!x6T005L9k0i)kwtXCF&b=4{PqV>q^2u9_s-8Z^{fc@m4
z>ROnefm+&G6Da5E$31ve<QEWQ5M%{Yni^lQ+}&KVo*wtZ8uk-_a_o?l_>}P0A88o0
zO@1b?+`WG1r@p>l4~RpRjN^VZ1XCt^JuCxM;zZ-l$fI(E&*A6O1^of^tOGHbrMCjZ
z5#a=t-melH?lqMQTVy2M;}mji0RneK=Zq6I)ygLXQ@PXnT4_9?Iw41Up0geHQU}kQ
zeVZAS+t<UgWU0x+^4@t>*+!rPpU;A$tnA|te9an)z~oGf53T8N1OzO&OI9wcoHXw6
z*kbtT0@&jAaHCd}9@B&T1X`@<uto1e*)dBbFjc^QHBq8^z0ZzOUO<A&w$WDqD1@f)
zK7?Zs9MFj_Dmg0~Gv2JSR($Z(U(5Ybd(z?j3nIf6r4>3_5j_C_dA&P#OdELtd4l{U
zL+`s+?8vSY9B;GmkV%TRGUQph#+IjLN2!(TdOc=C4R#1jd}eOn`|)y+aZ3ls@U$nw
ziT1m2nxLNbKuEAh01CJkJUSYrEhQX$LJI%&o%TIBdxKC1szen5-}gmOOwqbzT#TG8
ztYw-M0W>f}$1UaLIbru7gM{2Vqo1oEjDXPQ)?k3MP757^gA;2<#Hx3=_4i>ot3N85
zVgU6fJ3HMd9xB*xeOFn=k0*%4n}89cH4C!uh0L)jqpg2Q4;{OWRk9!<Jkfhb1}kn?
z6O)t{iTPgp03a%}g~p(YXjwi^`#pPVLrr6^-&wi=jdj5OLW{K1Sm8IFHo&yP>(bLR
zEnhcZc-ZN{7MX-><gSlfIs?77&OuaNm#!`xE}narpMkj)DTb}@%e;^5Infa6H1=i}
zy^xb%p-ai_n6W$XrA9Wg*O|l80Wr^1V_%+m=cd_V*4pj(0daOLU7|i545)Hgrcxf9
z_yaD=5c6C-j2!EO(v*g#cwWhGH{>wF5jXP+jQ)lO{_a!;K;zd(2Wsv#gnm|Tpi$kD
zMo4GW4U6&pMcV84sgJeOW0?>l4hF@?9chq7Vh6~Mc_=X>%__fU&JlLt2;7!%Bi<ua
z2V?0#{p^|PL&)%fE2Ts+ENGhmKH$xG+i^HKP-Vc`FOc~*c(P*SsD8~QQjaBNA}7G<
z+yFdmQllUQH$B$<x2YTu(Efk-^YC~0GT%Uj=D`CeYj2mqa6!|?5`#<^AEkqL{WVSF
zj8y2WT*Yd9Mf|^_Y=JQ>UKb+m4ga&k-+;Rz0Jidk_*eld7yF;(i9$SUGuvP;$-Q$b
z8#U5hjmk~ASBTr2ZK43*J^u_tQa`*>hF@4@gfb|!JQCa-<;IuC@(&?Eezre?Y(FrK
z;WiwW{arTH3=Ca2t+LYRqV`xTO#OOWGCCTbX59VUQB*20l5`F`ZaCgRT=+`?6Vs0X
zI1&>g*5d_N;PCG+p?^4c4fVWqy`s;|WBX^jUoarfTvh=rWpz)(?Vkp(&*KNB%Tj2r
z^GS<IT3aiX0eNkpr^Ybo6P)*D;i#r0^DXB8T{V;lEcDU{o7;3+;qRj8&5#opf)V-I
zxnWn+yeDxbv6c_htK!5o93QcU>^mFDzkYYm7i@Di4+kZ{+`q>*m+x;G|6j5GSD}C@
zswIII;8X8@CFJw}NS!F~9qiW|N9UKYZtKY)BkM8p#iG0La4EpIBjRxaZ+nhm^m7WX
zFRc({ziSqUHwym$Ts8FFR>&|8rGY3UT;#!}U^mkAum#$nLcW2$rg$Li>+8uXlvbKh
z<Lf1<S_+9-Q;u0ve(aaSFT-<R$S1r{im529cZ1atz;C@BzrCDgWf^uBqY0LS0zR_H
zsm?cts=BB~mRlo}3A1{iBO0UsG;Se9!VRmkOuWy%?<!&w?~wK|vPpbCZLn8swmYl1
zO(?7F(oO&eiSPLi^v@sLig!ri4s>Jz0`EY;;Q8>QK%t4r50EwEODSsgjwwxtyxwD>
z-cujwE5ywzs#QPL!D3y20`Gjg{hik*MEqYf{E^rn-mZBSKr-P9-yftZ*xyCHFLkMx
zD4t#5RANGt&r5+a?jJLLZTr?TM7s_0pB2L40V^N`W555iLLd}bVh;X{|0oKJfoP#5
zuW4ktcH$a2C1p2-#xJAkxuM787!Z_7njDV*altnP3GXVFRF*^V=SmMukGH6FYs-=p
zdqKHTI4NvDjLNq~bDDzpAug)0-Dg{qj$!{}AAbT06fA=SBu)Tu(f}Zm$gf_JxALmr
z^&;Bp7NxkXzsAfq-lR^}onL^q(}3kBuSEL&Z(G3aCw}ib^IJP~l>W#gi%e#J&ssK#
z(<4n1H_pQ6Y5z3-7Wt@TaIl-Mr6SbeAE|+Waj1H3$ccnlbN#u7NS0Y4mlr~&pfJDW
zW;3^VD8ia{8{0u)ZT!zOUQ9^%zhd=qq-+p?vX@okyXGitX*2XexgD!~7QVSV$57nI
zaQv$mfGody0*C|4v=RHW1cx}EiZsP*Vr}1867H~bQ7PP4yE%4tn!k1cv<PY_cvoZ5
z0J}dN5=ZetGEh5vgeb_*F6PktwcQcm#|cXQEc#af<ivaz<bE76d2jZVQ}3C=v+|zn
z*KcFa^8><Z-z~rcYYa8G|0o+^U0@lAh2xEHEWjV&gN)whAUIIYo7eXiJ1ii?1pfvM
zzlwaMy0!mjV1Q>0>j8||$5g)mL+T`oqv@IP(+<M>5|MkVj9YnLaCuuwiP3+bko^u2
z?vMjLtRT=xzuJL}l9n|unk}hnkGHjshNp22uqKt{%bz<f1^mwrhywt7>>Q992fbb4
zs|K~(%aShRdK$vy-0a|)tIurq({>y^>w*49As}E)Ujh4|@!Mejm9PLn!c8d}i{ZvD
z2XT`2Ls0@8+f3L<|FHkgiyIaHS53d|N#LJdw!pv4O@~;NRR?unn;L#|cbk(t8FL6K
zCt&`M8XZ8qr4L`AGN0e%E=rwQl7F<s(R}Y`XTL|U%l+BTK-TGhMBxj8L-OPr_D@j(
zuXxR%70f1bDy=EkOgi5WRK34D++H`ivV#7P&c7kx|3Z#S&&2(s)fzdVmE4bgH`^aW
zKhX^EjP>aLEBCh>5CG1K>?M^6{r7eyK8I>Kl`7K|GxFj}fxb@{=K_2~?VJDgZ%BZ5
z2J?~1fd3)#PJUx^Uun@v1Wm<W(Ld_8CO`P(A2r}lBmvz~E<uOr%^d;WIpjRmB2_$>
zu!u!DbUNS@{9A<~v@xB-E9I}p0`EJ?1^OYFuEU=#5I<k^v@0&F#K2Iui`0!f|46?S
zyan4A`V+E%79|Q$oH$3x-<bCUH2FwWMOsaa;<ON4?G!1Xp9GWinEVHhklnvVg$V}9
z!z}YR_IVw^%&3@VU8<Cq)?#c!msCiX_4;oLwV%zv|D$buZlEp<+Na-q*ozohW-jJT
zzdpOF57*VmCym3*5V6ji;t!udCUQ~$OxPY<ErkG$BeDjR;P`MC{MhW3zmesKrF6nA
zGM0ZrO<2~}AJGF{j+_?Yf4j|zAKrpj4&Zfiy0J#h;oAQE5@|$`9=D&Td{Ir5{lC#Z
zCmpcI%(0xR@7o^UpwNIA(&!jtH#}Hznf>&55B>GqzbwNCRsn7&yZ>bOM^KsH@xhYL
zJlw<FjGa$DIS}B3BmUQt0Je?1yY|!k4}EMPtD{nShrx(y)q43j9PXYT8T@lG4jccP
z2L2L2*IBE)GoRjM=Jml#WWS`!CIZuNwTkh1MfzidN#-W_KRfXG0^}*wU{iqk=hBd~
ztpD^<*zVoX)ADJ74}??fN1~ga{6A>~xFS9~KxLmrc)y9iIRg`#<5e%S{nAgrIZEDs
zxNe;ujbsmeL!SR_5Ge>CC%T}zfC6}h&l$?Fh*Xc;Y`JY3H>G`1ij9zEvOz8RJA4i5
zzbY;Sko4K^{@a@addZ^}R4)i_kGHqY<(;k%f4Dp5pOd3-U^Dw8K48?A2N+(kJ=N%!
zx5#7;;+MvtF(;>|P`dhBerx~uO(3S3f95{{(+p@sX7kgQZ!D0(7-m_;DXpto^gcH)
z^K?5u+++HS@c(1ieE3N~mgOwIy=5e?8U%pNbx8x<S!Z7YZ);gY4KGp*3k)+DsQotw
zi$nx4Et`d91op->1(YxTwA#6Oo$7qW`+LfpQ8~U~?|*Lg<f{T?2(dcc?6$YZ$!X!e
zCE&Q)mw4<(v>f_5tKlQX9{Q&F{&*oU0L21xmbHeM&zn(2tKg5^-fR}hs@47M%EM;s
zU=3(Yk8V^d<ot)6H2^tN&96q>Z{!GqzgUb>aJn4v9j{Rj8*tNa&}88V)&6UtswjZk
zfdT>R4>J?Ypvlte7*bd)=N0VYVrGxpKEHz)Lnq|sZ{8XFE2V>vfQXv*bBO*=7Fr+z
zSzRAHDNLAOAcwfGUE^*}^53K%|Anhvh4eqVJoy9xf)&akD){h5m@|xTN&eIoh09It
zsQuOq!6EFI<3Fpt<z``E{;~ZG`?lv-eJ_p>J?_!8`Z1D-q596}>!a#)bnAqXXR+0P
zpBVvsGoJiU)W-(}O)@vtP_QdqWpzkPvtm+_MmRt$dehh2p9y||lsF)xyZ^9Vkq=sn
zX|q3c9Kxdf?Hn76JFDKKe6XMVuZn?>5H|xHS@)d(C<3U=3wjXADGjya3{_s+Qyr|D
zD*QT+QeOY>4))*XVupD2ODk>i`m55v@D){$k6!*uBm9A>*U1<UNWaLN#bs85Bjd}4
zk})!(_I&G;p-Qi&v?=HV&px`zHe+s=4J2=rT$wX>x!=AUx8|*Ub~wd4D))>JUyi~M
zg=QfCRdeu6t@usBAf^xbaT*Y**jUH^OK~xf;#2k`V5yeV=;_S@4d=)AjcP$x@pMme
zsiky;N42@PE+Kxg3HEcH>nMv_vr)4d<*K&cE@x>`(UP&TajTZ1V&3kKuA2LI)?>iW
z8DS|-jCy@s)FwSaj~6M|{b7$xo}KsA?CtdhEX6tdyp?G-P^3;G=3XGc+vIUw-D}ev
zB%<Kl0i-=e^!?3`ZxR=j2I%+Z(HD~dC|f2X_>4_tZysi5yE@}lRJFPvY1FO`VXak`
zajnhPA*~H9Bc{Eds?{ATKvqYttV=wk;&+?0<P`Pw1@58&XpH{*3JqE_eMOknR}|H-
zoTC&g?nKTDGh4399*&wR4B7>f3mz3=*xz&oLLOBW6(O;b@Y3r^F}`usdmB*BnZ8v{
zacF=uQf<crqkI+U|3s&EUc`t4mL9+Sy&y#8&5GWfK2i#FQ%M2+ERVo;n0HCbqNTMf
z-QB8?yHzW!wO*@OB}0OeVyNEhp>dA1J>PD4v^w<UOfg~e1psx|5AIyGQvGBFk^#RF
z1lwuT4sBwh&JqK|Gpnc~tWi6wptLj=Y`v^nw>p=gu+U^jlUQ3jcbBrzAD+Rs5T3!Y
z^a+!<D>wl5Vo~fUqVLwUG@_<;>E`Katro<sX_-BPjHN1FRZi#4^MO|Sf1?c_CKS`B
zJyUY>`UeZ@>Q@)@s8zZrf$H7m(oD8-;rmlPH|P7>P;>6lDi>v8MF+GQtNWP?-dh{i
z2M?3WpLvN|zw#3vXDdnX3y?`r%rx_%hF7H}bFs$jyN~S6>r0pG7N)3k6{}iSN*eOr
zbRK!^mnyeUHr|V<u6wsUYUn_~buzS9O!|B(mjl+33D0}f&AY)t<{oHd#hPn|Hfw*{
zhd1{Gq;5bWsXH!U18@=91j=)^joSsbb<7gKz-WWIXcp08Bt5QT!b04i*U*^gjDoDH
zQNKo0IH<>_*K|T6WV7N9jovFp1z96r#8XjY!k?`qX@Li$JB!y`xAa*gPOWduOi{IJ
z@Vk|b_+l~zr~4BAQx`_e36e-yb=C{`OG72pkVd!yQa&t()T^U)Qe3RUqDN|LMzI4G
zQqi|u$G7t0jZjZMSr|Y(KRV040*d<xicAa1TPmf_w7x&xZMv-8Af4LPB?gDCDFOBf
z0A%8FQ$KRXt<gc=|NaEm)R=H;nm__tqBs21=i8Y^8djE)TH~s*gK14I7VE-RZZ3<q
z>rV~07XteutC*6C^5Guq&ux;<<r~SMvU;3Fr&CE53$!#C$9qvjq`!!<|E)>+hJZ<?
zktUZPNR%}+8UA(`0}WY>Hs@;3x^!#BL|OBxCX2%wM+#G3{p~U9fF$PHdR5bIvl)ZN
zM*ryePF+OY7&O}~-iwZO^H|E}UZm_rv3`CEQju%6*;>oontIh}9*(T;yzAyy&dmd;
zxlu`5T&yqX!`{#;($eiULwk*8<uAY4K&9~*^+AA!_{SQHfL&%>dJYbdEJPSzt<dC(
zoo8nI?bGVh`+J&7tEc9{?kBjq(BFi^_WY!A%JDJYyc#%`SNRrO1d0ifunMQ%GIn~$
zrq6xMbqA}~PghP)%P44)_4H4l?w<Uq3XB~R+RE4H*2~x|VsR>@@w}xa2dW_dac6vH
zKnltg=A8nzAoxb$U+D6Rn&!?telv4=>`|d971bRQ1yg0u%M6cEUpVAlJ7is#hKfxT
z(mPRMi8;(n*X+%rd{$Iy8qzOU-Y3*wv>#VsJV}-NRGn(diqJ$^PQ9W9=nj|MFK0!=
z;CfvCN#cO2MXY0VOzckr#is`)xX6L(^>_%#H{H`jU5X8AEfraJqh8kI)SH7dDrS_#
z$DPfTs|cB;MI|1tNL^V|JU(k<<*tu7U8tHGdoAxjEvi>sn$_g!)U!cZ&=@bP8pmaw
zImga;Knd?-{72sJKnn>0)vbba^jrNZO1xp?yFx6Tto%FeUU3<%+->3I_C+{_rx&)8
z68#XA72F(MHS9GAJS_{?F}Qyn#dqqr!m9e6qOcV0e4QT4^-I0k?oz4!TA_$?`3g4Y
zf#;WY9eug_?hsz$^8ci`uRpw$&pZ%WJ&WEdbK(Tg_Gz`}o-Ld2Zu5O^4>W46R>hLM
zS~+?sxEr9-t?CnnO>N%>k!_7v;0PcUPM34)a2Ty(sZ}ZmdEADc>pZ^Y;(6x>4d}#_
zvaV&^|Ie6!InDo_&A;j-2Xoyh<VnBjDXBk+Eyk<$@!wNE2kDL9X7K$zz~QjpZ~#|n
zLR&w4Ywd6_J^DZ)uBak8D5h3Z+IRMNH)uZ;`u@3d%WTQTw(d`EeoN5!L!i1mLX8du
zSinGJ4O%JMi05JMm1f=B&Eh2JK#9(_WDV`#7~f<^j^QXv+7y^#p_aCLhpVsRCahu=
zA;CF|F+iyJgXPx3@vUC{Tb}5F4Y+=tE{*&j7Qp{TFMKuNT{%nD-_=d~zq_|fF0#&B
z!-%Ner@ucASp0kq-+Z?)uOcj5r(mvWfszuxM0u{XV8fcG!<dJw)mgwTqNDa7lgpw3
z=>CXX@EKS|BEv&E)VkPALUSGr-!BlnuPWJp2c8~0MRkG9!3dT)w|ECDZnox+>9QSv
zZ8p__fLhWO&-HI(g&%nW49Kkzg}eYcz*~92OQ_V^hon8;wl9TWF<^1^fTu(IH+0B_
z6}vU*axtHgfoE2&V9OMnjUMIB5z(&WK8?07PVl=y@iFIZ{U<-k!qDx@$vWW00cD;=
zoypm#r?0QvJ7(N`m*=tMUAt`hb<*;9&ZGi{D;IFW>GUw-qXj-p(txmVRdmt`*?*0}
z8&K`Y9|$c0V~wEtMtZ>as&(Fqp1gB8T@~=|V-s=9Ug@ecCS5`-jKTj?kyAj?ozTQN
z$J7DC6ape-_bDpP!@#GO11ZidwsC8S(CXB)2P2-9GAq(v|8)h0c$Hp%p}VOoYnS7@
z@kL+yZgltg#NhuJrYJezo}8>bR-Eu3rVw=88#MH4M_cUTQFi75$4|PcNb5<x@+0ih
z{1;{&0rZIJqm0q8{65p+tavlKKYzS(0F%L3`Bfo3fOmQAz(Zc6aB~~{TzwgUu)nDA
zicjO6I5M7g^1h6J90Six4GY!%%(z$xHXMJakMo}(@a;ccB_ATRuV`PrDP?_<LyIm%
z=vZ}Vx_}0%bi4`dH5c=GZgML3uo_N<MfzyV_2(FZm<U$dzQ3~>pc(A|W*4#8&w|uI
zf^v`??av5PZm(3am@)^dY+%$sNH?vometaPu0BgC`g}lLEJ>J4{`mh{MGJlfOiC~?
zFzQNcy3E}S+fG@V^7VF5RQ#XtRHV%g$@cSK*%=oHW#Sq1erc%vUidR0ed|ku0p^&s
zKsf^i`0uMMh$K@P!)qzrY2{+E*?}iW{H$b5Xv{5gjL|85{4?>}8Wf#%wL}&F_J!}f
z3IItmST#Zh510&sz?pczmBR3Gvn<1+Va4iTa6hUE=ef_)&C4w}&+761B_xg-{GV*{
z&Px{=Adb*h;WVH)AfbJ=wJ8<NYEm*Z^<yAzD@V**8@e@`pD672ITr^nPz}iC;<Q%I
z1gic!4T1tL!RIRVR-;Grp`+g2MMspJtRfyO$QUN6u$Ki*4myjauMjxwa~hC#%f2mg
zvVQR=qy9;&(ZIAC9iEj8Osg~COjHt`QH|H}NwDYUbEO@>xwa7s8-z>s8rPl8&(3zp
zit<0E<x1!cPc-iwaQvOt0T-$Ob~vltj&Gx+3J6ebwuQPWYZs?YW31)+e7(J$SvVwd
z*BTM8{OBSMweN~1akEX?>NDaeXZ8%SfCY;YOS0EMkN%xRdGY~AUBr$+H-KNz@uI^v
zT`ZEkJAwE-SFR+@YGiLu+!wT~tG590dDgyj=H+gq9<Hie__n4ru9gSJI09=0h4WE9
z-_(M9dUs6mv$BH_3LC_K3>$}l@1l?hnH|_rM&5x|I;GW_uIb;oC*2Ml@AlOnDoJeZ
zyI5C}sx@WL-Qh$VJVhMn`8?X!n{k>@GhS_<tNX_@-o%R!n^v2;iMANI3H>)lXG#K0
zY^kS5_$|gzL+Q>ySjU$y{ZLHYB~cm*lPp7yV+$%?A1hK__dQBh9k$xOuwJp~I9tT1
zbjnmUR?nInlq#z_$=GGcNG;Vwe&+XS%i{W52mz%nL%Y!@DD$TeU*H4*&oI`g&^;6b
z&uCK9&=1!$bXE4<BEG$(M>DndB2!wdXHi-V;5<HwY>~i<Owfat6f*vra-#01OD?xK
z#P*1x=5nv{!{VRt?==G)8#ud(g#g{w7jmX4Gz(U3s6^A@zF6p*TS#j_Lk>Qd)A+a9
zd7H<O*6{FX)%+TDr2%S!@_FjVVSAk$LKM&FpVhI2rY0t<mi((#5&s=xAOh>ltg$TM
zAe%21{OHKIG{-hzSI?mFFum$XKzdTt4O-wmX&B&uJ-0;-3d-F@Y6j1w=^57X308fp
zf!j(TKMLAa5TBJ7of3@79;e*mTqB?#G!Tn;hokWC;5IP!-K#5R;aMgFaGxtEU%s&P
z;-VsIPR42JSez(GsgE<g9mdCU$zxAFkZN_#&2E=X^C!iO`-@X3%ce{GLU*?@AC_%W
zib_&aASf7me;Q9+QW;PzJ01_rL<Rpx856+((^vFVetW4gQN<@v&?h8pyqY6e`7zr+
zm->9g&!pS3$>95UGm+o6KU?eTy3X92d))2!UD{CPNMUQ}ofZ`}wwEUSZjGPiG1lbk
z2p*A3A8Xe@>;7fts|9chz54n#%wI6^iVVvRl*~&RX&w`C!i+z=z2w|^SH@9h+}2W|
zRV`_|eGHg4*L1_!oTS;I)LCmJ!5(H9Jl&2};mks3+6}019^ji#Spk@VM+r$Zhp3JD
zJIVkChMXc`E<9CP@&o*18^sqmebA^DK8$UBjcD|e4^MWOWnA;X<kXm?cv@FxuOJU^
zV55*07q6gVeor-gyf+6xm;6TlNg<wjjH!RIPfa8@c;?@k;5)B5Kowm}(&G%YHZK+^
zL-VZft<K6$c3~;#&3QaBh^^RZ!D+R7TheXr(0e=0oGZ5jqjfkavx=&R-ds@+qv%*K
zq{9pbsvD+^D=X5-{}?R@ShS>P`;VB?{sLNnhroaH8Yz*$nqXaLQEF<*Qr$)M<4QA>
zXB&B7a^))<UCs=e9G#1sT_|W{XK5i@pEED3fD<#JnwMn5t&?;eO?$ImMkt=oZQ=$0
zX+oL-^T67aP@Ff{!-V<89;BYCdvnpIpF%;MT{qO5zT#a#%u`5lYxl5H__Tz_!#T2N
zW`&_jqh-I#P6Q~@d0ShwTB)g(WQM>qFj?sk&J~~x4ye-#O_@*SOr=T%<pXX0)5=yD
zfG)b!VY$2wI`AvR`Tie!?-|xqw{;CGf>NX?AQq}fM^LJWgeD>&O^{xd-g{4I3QAX+
z^xk_5y(?9EPv{*&=mZD^-W~6A&h34^=lk{kdavvGgE4`fz1LoA&N0UrbM33QW>$*b
za{UzfP53Fx%)*IcUZ&RwPp+((oaO^!REDlY|AotV8RK-h6=I4Jey6E=%EsLxlxC$o
z)uqmwQQ0Wxa~x+<UiDG%`|+>2jM0D3FtJo(Y|Qfc!TdXB6%qVMe*->43BU#C*n8xP
zq?=5(6}%4f>Y)83ThYb`b2F<fl$Wp+(bC|sNrO&BwsH$s(|Rd7tBQmaJxd|3Ikpkv
zTE_(G%xA)aQzs;*Y$Fg%ViETj!BBj%vA@6d^=;pq$0cK5PkDu9e&sS_<Fgo}Z^UV3
zRpxX*28M)YXTk!)5{+V_$$MwbE5YhbW86YiD&<uxRjBNQk&VoBYR|bb+YOTHj>iM`
z1j?Ov>OAVwGjqB=y6{<*W%sB(_8hrRWdsK|H+G>ZmQM{`dw4SbSi2FN07Pe!Q&d*O
z=J7ThhP%V{>d7=9!_T_M-oaM8gATnt+e>~-U;2FBe(zv<HZ_0#=D`9_D*JQ-2E9Ca
z6+BH?{kAnP$tEs2gM4^su<i|&CwsLkP{J$H3iTTSSq<}m_OCeo`=VdtTxF(K^ixO=
z!d@v45L)*vWV_;Ax?WdAM{}jC+z#G)nYs_Uvhv<Qc+>yQJFcI35~AB+V@x@|s^c;H
zN$z)Erx>2-qL6>!BCK*@gm}7%r50YQ(ud$a5H<O}vZHU1#gtGuPW&MFElBz0Mtp8e
zf@jZfNc4`NLw>8};UA0VAqN2X%XKAxVwdGIfMZ)<-9*~P((n%J>DG@~xW;I`%HHOY
z?ofbxju=P$3(x?Eu@=3f{l|oja0BAhmz`@CzavSAPrxVRx&^}=1-dnplj8C^ng=t)
z@Y}*`D*Fp2ZvR4OUAp^6kNAJ5`QJnI8AuO{k-s6zpdpT-#$@Xa;0`cLT8#w|t5rDG
z_f8I#JSb1m3M@`Bn~v*N2q;%2fELmDKmKEwKgR$gVs><+3R^UP(-LcM+uYooPMUH+
zE>@60k7I1A>-L?wwlZ5H<>t~VAI_g2ikhNk&)|(|oW?y7^=j*X1@qr0L8b{@(pTht
z_5fgXA62*5k916hwBMM55hDug(m!tHsMYhv96c(iLQJ>|-+TMVS-f@u{<3@4f0Pb@
zFrUBs@Hhc5ac<V#W$cqQz|gFyRfD6o)s`$|Q7Q*BOx5>ot4=Tu^_z~;Zjzecjz$~q
zTmF5pbga~b2EQc!3q0euep?EUH=s&-+Oz6iIh)@*?zmQO;yH}>eVS(5=n+R8jrSpL
zvvaAHi@Mq$)1Ay7IHOxT^!Zqv^(e!Kxl^q}!RFYiesl4j+S?ezXgH`mpHLo@yb}Lo
zd8r=#E>2!^7F#Z9+EiQFcD-Dx&ve74g})9NkqAVLZxwU7{Mdl&?)&T4<nj`E$IS|O
zlYLZUrCsn?G-m}Y-a_3KgJ!rw$=g+V-xOOw$(qC)=>+^Ebn60)Zuv3&5w?>1CN_Uw
z5mqWUK>MLW)G0DNwy?0|sc-a?8)~7vUx7MB>TfAQj6NEdXS%s?UvVIBG@}v=uYqw6
zm}B!d9fLh~*_^nv5bo)<+Km_*Z#4MayDhUO)AgYp36$^f_%R@;={1R<61*$Y{L$zI
z7b6j=E&n%vNRB8CpI<p5*&n2T=PD>Z>*w?<QGV>+rwef?J@WwTxyoPc)%65jyyY_t
z!+UdL6mC7J>*Wz&tngn{t&sl|_}4H2v#6UHxO-C`A!zJUkzt*`ro=Vm0#wSV>4AYt
zcRDq#pV_3td{?SCc2&zZz_twJQ=>csDz_nZc|89fY#%X8V6?T!-~T1e1mYXj!J+1v
z$02z)CO_~hR;$l)02_J6Y8h{62_Ajj_R0Cyqp5Uc?P&cjVGN}{3txC_w8(OIZQP|S
z{A&2$XU4|VlBmcwmc)mxk3K_vOzz#>i)JxtG|th%<kIl*brN2u(5}{#{BU49F;@2~
z6K1q*QFC@odt6!J6di4)VPjjEU*1_FI-`(mpSn}&mSA@Lu<Ad08<|)t7@JkrevbeC
zN>4U2qH<Ou*7nr~bTJJE8EBodJtpve_en4NT|-SxAvS?q?pE*j;^Wy1*fb3}#=VT_
z)J)(+Hn}s(k;kqi&c%)Qh{bCgmyQ3WH2+>74_R7D>${u@D6u37;LC_p<f2{*jAJ_~
zUymWOvbzKcc<%8OOjS<iv&$k_9>)6nsdj6<cb@nGJSh^EEs$SiQ2{GXb1BE4GU$fA
z{VjO>YsrYo0okD{v;Fz+37|kZsJqK|@3bzI3Dqn5!kG86e2^q%67FzDQoVKlu$rp?
zL?k=*IwsAHVLfVPdtd#-m@%U8eXp#id#gbG<{ZnUH+Tb=V^e>A=SnVA<?pqPwMh6W
z=FPTOl45T{jTpWd)`)@$Z#G#QNL*QbGbR9j`PI0_Jr$^vqaH-zis$Nzv#jvGQ;*U}
zN|-Z81vzv1a=^E1Edr7pdkkYDlVs-E7E@$?ukybxWv~pm4bR6TGkzn9Enl&s2axRg
zg*;Ao?@$aHHRaOVxC5)f99S7#w#F=!r<T{RoE<qsBpmQO(!cRj6vzcIQ@Cb%n8uCO
zTIC!)n3zm=?M3#kA#K%l_+~n`geObFZ42~U{wz+}uoTiy;qR;1uk3!9`lSnFrNo6_
zRgvG3>|vOu>k4nS+HP<bFn?o<P=D4{DDB<rAf9`(T3)wkJBX@RU=uAiT6|z)WP+|u
zu+SOocCo7LOBjbG$U&znC4;u&xv9-rs6|@qR?JQe@ZU1Yg)3omIJR2E{&<en06U*K
z{Ejk!OA5bz*QrKc8w&q0&}2R_$2)MRsj-{n?o99XBRYdPF8t`J#vEg#)su>j^0Y&q
zJM())!smt)Ws)%}y4Ha6%BEV2%1Nu<-v6<lmIZkS6>Myc?tKoeU^n3z>agmwtxBMJ
z@U=hk;6DOwW`@tVk5OzC%**2+mk0WFjjBQC*#H?+G!;Rez>`~Y#Lik-XVdtYmt_Di
zy<8V3>Os&N3NCV&l6w-SB!c&SQO?Sf&Wf<JxCjc%cj%q0?UR)gnBuUk{$7+<zLY~;
z9z2j})G7a9jfG-DnZoI9OpHs`A3;B&7`XStRcq#gKrD&C#VJd(2x9x0E~qWrV|}6B
zyLFkYZ6x^XV$kZ;1)-a0dWJ$6g9*`EuLNVwqh{K9%IoUE)6{P+azOh_fsN+uyXf0`
zqa|S?wVh;a`?pH9=(_R?@|+@tkfn7=aBIu%JKA-%qp~rv36#f`wr}P-6@*<9X}soN
zBeWydNM~zi7oz^b4TfP)u57{_%REOI;e`4N@pEOEok~AIhE~VIN4UAFa;OT+j_+9U
zBO=ZHgZ8b(Af$-M6x$hx15V!h`Z1H5LRR9T-Y$o|TuwTJVVXl#uV=DTZ>#vXL(l9b
zH+T>_E-KDh$U@fYB4cA{npI$t?%JvoOv^HZ1yx4QOafiX34_Wk1zRsa5hQ(Qf+_Rz
zkqZoOROB(q|5K^S0ces<3N@17D&F8d96|}UCh~x#4eI}2*|7~TIA-Tbuk!=qb(5Ib
z4;Vl_snv|*(ukU}a}}8Z7k9|(=_#vNKA3sArlw~MHO$fc1TU!xl&AS>dq%orslt4-
ziU5!^zA^>@)+GJj9vG!T5OE3PdgevyA2)YU1DNL@Zq5G1mVff`dqsH3*Uyt*TwK)$
z@3D$b2+pgl{Kcm~y3Y@v{x}b}*4`lD;FvRi_il{KEUSJcj_9GWmVyFwWzD%_w$Y=8
zpwCllZ+;dso2}QlZ=IW<ov**+)!XmVezGXLWK~%6uHm<->tE~4_Zbkl?C<{d-fIQl
zY@YP{)h{KF010}Ft?!&da4j-m{`5=h%x>+IkHvcz9*O}fD(swp<_BZo2#?Q7f15h!
z2>?|^_7*dps>K@hn=%Y#QqW9ZvAxT?+uxLo|5)6&vFX(CF!2wq^<T@L>^0bThNbj1
z1~n}4#~=JVF8Fc*j2W<3ULxVYfB!!LgY2~+a3&k$OO;gr-_O=1fX{S4_vQWjK!5*=
zuM?Imu@O=51d!R^Z~uPvzhC#igZamt_<v@q04@_VJ9+ugck1feuU-Ki;$xaFE>L4*
z<5QI~5-Je+`Ee@uwI-qyuK9YO9US0==a**6Q{gyE+zaN#y04Zdi+EqYM4w!drvf>d
z?dCzD<2<~)$RyX<GHE*SC?OG%^+>k73wk35{*i#(t3o4?D23p{b1?Sm9G(}|u5ODf
z@Z!Aj$6U~P(l34-Z*u#YE`HuUzvA%dkCUOK!%jnz0cWvmbzvZ?d&EY35kq3H{F~JO
zJ{vCj`Y&6|<q2Iji?$E3|2(W`dpafmSU23+i8{Ax$ys(cuPU<q4?6!p>y};w_q*K-
z5p<l|AIbc883_C!0KA$}WG)JS+qeGx34SKlzk0ZCs`ZzD^*<#N_7@(#1VXgLq!8PG
zp!|P6j-bV6zl;=frvKqHG9cy<7_Mdh4c7ntO3VXG+J{53qyPQRe~#!QCIJv^OdpdX
z|A!0M061^9|Gl`s2Mc)hzjy9`n0o)$)AFTOYH@M@Va8i+ZTRl<;pl9K|C*`RfHjKB
ztBj*bz`7r+l8AK@d@M-`2?+_OQ59!>Y-=&oDe#B;<M%BSGr}up)aJz!F>D-z&(;rz
zr0J%X!bNR7l0o1)`2>N!I@g_TcnQ6#3A0Q*+nHgi;h*0cv5%uOl;uU0V|$Hq{&fz6
z&KE0N&;BB7$yX!$D;!&<qGxf$bf9TbZf)o<(B2nC4Q1u8BIxxDH8r&uKHnVF-OL^D
z=t}DU#0ns?4=ei^x^7G&*k)JBra#+e$K>(Rwq?z$dJ%|!Juz1HPU=Dltifs&S~Yts
z%FC_B^53~2imG9A7bwBA^EFZ1PCD;~(0jZc9*66O2?EyTrw40r(ThD&&z;8Ot#5K%
zWfcP;xPzl3Mn_4x*UzHB7bFc6u%ZL0D_bh&QvK=I14d99AkH>fWx~jXuHro#FVZ$;
zd=*l9fjK)vZ?rYrU&*)s2``sl0;lhSz9JuUb61msPFfrs9IQrjl`m?Uq=`#B*OFYf
zax=pAujZ_t3s_AW$i;G1<*DQdRLdB}RGSQblQz09F7*!x<g0+~9wEr3=6Q;<uz1l<
zDq5+3iJr)2(CI06pZC47<=T9f1l|kQuiV<QN^uAH6R7vzPpYEI%F#M!#A=`$jYAiM
zfKgXOfc5Q>Z_p%uLL}fGEDxmh=(;%R6xC{mV60mG@YIx)460{b5(bRk%0(UVd7`Y~
zwT?Nivry{<Qf6~AUDi#1A;;CpuCI*NTa#sKDk@87Hb<LdsO|b4beby@6O)>T#@$*m
zRm<y}ER^8Y@j}fH2djM*4Gk{}3JWWfMLf}`MOs9kud^io1H=IutwDc|(H8&a`BPuA
z&zenMwF3=8dq2pLO=sH`XFJVv$)4LnXFcjo?*~LL8|D<@0m!q%jSnm-Ugb2d)Apct
zW~0~OE!<n$WSc4LsEdKi^Y9tS84|T7dO5XShu}&Ob}c5R5f}v><tb`PNqtfuDJ|dT
zs-a4p^FTbNb5edlVf5C0;S<%?sET5hW`)V=F~i-AH|$B0krDc?GtTwKrKiM<z#}8Y
z&4Tmu=cnMd%}7z(oJ5Bp+wk#TzduAbY@o}(5~r94t1urgCJRw^3E{Ei^L1tga=S|#
z0n171=;g)EC7Lt7v=OSD`ts`6h_APsxu_W}2GYX5*p&F`U%NqaD7hv9{e~mcN$nWY
zh=N(VtfvJE41$je^s5qV>bmc6>upt}x1r$Gw$+XUA`{78`!Z*}`WNr1^xV=&J90w_
zb!Obxk|1&A0oRG|FZ1T5^C`a7zS;KUxaWuGQM9^s&w|SN$IYB17p-O3$e>}vKk@YQ
z*8m$w;<x<d3nl&()BSx)I=8pLE;1$Eh5U~=jbI;P77A&=x|s+Aq0JD<@ELBBPD1$0
zJ)zO3L&(QmTsGe;Wcu0f%VwviIk6Kqtmo((ieBo8XPu;n@byt;r88Ku1IDVBC_iQ3
z#z{ZArkHQ3eX~**|2ufsu&`*aoun%HzsFO|C0`)KB`Za&>-yW?iDF&L;8;#5ZLNq^
z+eZQcgO)4sa$~t`vXg3>9PIbAW2587o=}$<w&OSBTPsnL56#*^`c^Wnn8yqke|)(p
zcFK>$QAlIm9j;HU_I?n<p;7grljN*h(ffTfdaUHUw2q8=wCr8mlLp>_7bDH2;)0jV
zIHcD6cGpB6^D>SkyucieG_9U(RVY4!Q(RU1!>tTJ5H@VR^z%8NZUVXAmG~c;k@}XO
zV{z`sPoBnT+dP$*D83m@Hj1y%<MsBQfYo<zlFmKbhv#IQak;2~1Ivr_VD6^LIpy6D
z-u_b4WcLmMo9UVaohRxsF|zniHmdyOeli9jKQo4v#_B_+nhvwL&C3SVRK_F2PP-Vx
zPh@^kRuWE0LNahgD?3|$CVMHD%E#%MrhuX5O;4k9@kYI}{%D%Q-ty;40A9NB-XZq~
z{zrNrzxzNy3`b(4bF(B2SUhVSnTBqJ?{(~2@rbF=gzwFopAfhMkO$};9YPX|)AvPY
zy~&9a{8h<uaX?6Smz{&7qM|>@m~ErqZ;uH#h3`hJtd#L5Fve29qPgx%k8`znwA&>R
zh^JqL(<0Xp77hKji7kPs`?&+Ed<{yjJN29S1ztXn4~=mW+VcJx9u%m6@Swo5imS_%
zN>D)Q=CPTV^Ts!aZ29hUr`(dZ)*EzIb)H(R@%m<{ttpqSvQG5r$I2I=6JP`3O~;3@
z0)sC&uJP+S_Y?s*T+z#2Kg~T-M=QejYfns3z-@PaX8rz+l+}hOogT;AA5^m6JjE+g
zm54Awj2EFc6un_KjYn#T*~WUh;yckzQ;hy0ccr=r9|*f_*&suUR&M>7r^sH*17I?5
zs>FPR9S?^hYOq1F`LxgWeDFCQuc}08dU|?Wv*>!N|GDE`RF%VutS1nokta*xbB|fG
zLZhZ`<1j#SKEHTW*VwuoDmc6AT5&dB-L&E_$rD{YUZPRfHc&C<ws<@}ty6NBnOPk*
zFyOuC*vxq-qj5YaNqTBkl@a#5rz9L^C;Tp7<a)f~8{>=f(+f>k{19%35!0~$T36{<
z{dsGQ&q4)1!0g!wx#9~$D)7ZaQPBu|i57_ik-VGt`MN{{!9WyWnxli9EL;d(Eb5;H
z7zF9x`x>Zp{IW{$dw(2nXYf}2PE&oi)ojBQt=s%n;iFL%u#zl?B-!9*g*U~=ctP9b
zBK_vsIpZgg2gcIo#W}%TbzQ}cD6M8&ujVb6a@@u#m21X}n1htdN=Z_Y(Hk=Fz%+7T
z_6)meT>jvZf8y&TV2S08m<idb6SA(S@woRGT^9O$;#%jg+y;^mN)pjVt;A4ovN4<m
zfw#3uTvA`Arg;{q743K8UrjkV`T_x<LXT=IBVO`-Q^CiY<v&JPPk?8Av?$|CM90LS
zW#;*xHUy03eRL)SnfvX@an=4{mXBYakb25GLhBzfmSvC-Aj_V>@ynhPlmj$>8zKLc
zR{&{~65Gh~z9ic)VbPz;0`h%FrZoavj1|``2F<LJycHAEWR0irT<-d+$B8RT5%nC%
zau0BDA;v#wZ%@tIZ8Jy)oCqaTBobXs$L2kr3Lg0b%e;@)7zBDWr}+}I1476+0-HnN
zc*XxXuWrT^fe#Yq4htkwJ|feBrzwry@mDe>K2VK&`XS(W*p!djboEyp)NJTsGf3k^
z@K0+CHeAsI;R^5h)1(A4u31W4rKwUwe{VIDFE~_lXS2s!iH`kck&2)xZc@_tc{GrL
z%SvkNi#}=3a*$<#ZR$3d>rX_vpjWS5Kj~{1YHRx9MdB=@>PdO`>&R%H3i1<`-mar{
zo&wr^BP({R<AE)o{G;NV%$fn{9!?fvfvGcM!D)k7J&(MVl@-k{!eRT)kb580_1v?P
zyf03nmltPe3uX+t(XS6W)@(||&Dw!MTqezO$?sAI#S6<GM1MLh!Uwt2{wey3(SO2q
zX{PcZ<PXWt3>3q+p`fCI?|5I{5I&xE#OyNR2|zB-=FSXV@6S^DDHGzVs;R+tAjiU_
zm2P|QMWw&avXZ6yjDnA6Onds3m9H@2lAinFioVQhOga5!>g6)*JfUlv0^9~HQgQ*%
z0<7_P260X0y~S3{*Lw+v8|xY27n3@Q4kVH&rv@Wd>!(WI8>S0wpIm0$*G^5$*OST-
zhwj=~aMLpqq@3~U^e8R-pRzTe2~&`D*<JSN#o{1S3Gh$e#9@QIp8E>rNalEkbU2hT
z{KarH37w<vB*!%=R~p1rMb5yq@*P#y)6>(7XXGpje8yDf&AHdFsaC(N6#QPOTRm<v
z5RVk$KOSKSq+aavH+Ca|Tk0|5;V23t`9>b?Xca%y?=#FYSXl0k#LV1i2QA!H_9Qv7
z?54NMXo+Vwn=AVeIB)^e(=!>fndPK37Tu`GA4mcfAWsW7P8O&aYWFS-6Xt<O&{h*5
zR0mFmpSj&_4x9M19`F$Z9^VeCk`A_xd`qwMy<mT@MBDO>C)66Lp<4@Tng#w_ASrDH
zPySG;mXcdyTU^xlc&7`+=NfjhvRAS+Ff+QJ9rJUi-SQtqqk%1OP-H0W*((LHsRs-d
zj5<nz$7$tS#VIkea`zrSMox?&fE6{LdoxZcyklJ6{UgMDr!E_+cvud-&)3gxkw0tQ
z$43a!hFX<j5g&sWIrE#+4`D(brb_0GLFo4YuD$60*t4$^kivtG%DNMlhHOJr&q3g(
zfOLq(%RNzSTJnErq(DkVY554HpyDI0y_BK9z-g)#dtHtj16z#mLUjmtOk`Af2u^Ur
zO0t}bXasK|;1+|lDKW5HI8uZ!%xwy>p-<uY#-n-+3Xc53l3a=?0fOe3GCYss&Yk#e
z`jg9xxyyqc{#Z?~T{db(@*@tPtaO4QXJD)P*?)AR0tsnS5=cXy_?_4uMKN{2!ymED
zS2F-Qo8*S<5$WR@i-gjN&hin|t%eP|^guU(<VNGURR&1_EJ9N~(Hs~=M{B9@c1=ba
zo8X*RMJTlpk3%F<P&I<;&-)tTgcB#D{((IN=vZu%YUVnJqcgy)G%SN^$ECVFr-8qo
z;C)pP#W;nltuS*>{8(8_bPThet|`09gZtfyrESM+P1t4HzVnomb;ejx3OTVi>vbTH
zMFCsz2`@M|9iLf*_v+tmlvsjqofWt)8m4ahI#v3htJlV4s1EAZZ3h(bS@~|<9fJy}
z&%NZf4R3^6_vs_Q$kKiEFf*GjfgEuZ<mQ$pCQ^@-)}~yZuVHikbx4&Qd=6OAE<bKb
zPfnJ700V<6X{|oL*~3v%2_!T)yNl`xgL_ZzTk~MK_HH-df3O*q^nez6$4-M%*Ed)9
zF2YaGS*yw-8ptO`)$XoZZ&mw89tjdSiHnO@93Wm66<H`!ATmAJ3f8J7^eU2sU7=di
zRpPBw#%OB0maDMFsoT-001s;CQDp&Z7+=D3=X^Q~*FOG<$&`yjmV_7X<+hTOHfJcL
z08o&LzEDJZVU8w>>gr&ovgYl9bdX6vEFIc*OTlcJBFlTZ4pMZBh&l%?|86zGo_eVu
zK!KrNg@ef-k_E^yEiWIjj6KOYx$UP&G^c0AlH%lWp0B3ge9$l4cReM2P>r9ysZqI5
zWmkQ7qm78W@_5#><lS*;YdGYP@$BNbnQj)cB_xY{C-&O9udlDlvZQewblR<mF3kBv
zM1>YgQFSjOrEN8wuCa&O%siii9w8>+pKmd~831bE<Yw*Jm?CXV9RN9oOp%{3V=k0Q
zu?A5&e>BZv7n7L5F~_>dvn%!VgYR(a$i|h@a#A3$xrKs)e)!3YNM`wRAYc3h>f3R<
zuO%LN)Q*QwA+t1%6L?<QLFLJ+ZBGxMvs=H@mGI<mkh}W%<yq$hTN7!ej<eU)k-RQ=
z@MUj2hedEAKbxxxQxG^6l|PHN$}76BK&3A+$m)fXXpxBwJx0POZ0pQNnCZOGM<w3Y
zbC_*c>pAdoRUj>-tAyx4<kQj*Z|0?e{|Zj2BEVq7GJZk{DDmT{;&)aV1a(+Tq)F*s
ztoO)nr0ZPuDJ<mlhAFW$4%N{XE1;*~x5E;;paBA(P_N1q@;~INyUkQ9(lt&aZihX{
z0^x@eI;;3_Qs4!(?UV6&Ku`zSzqlyz3M4e^obq&m0+pCNO7v)11@g|`zRY|4;yEf&
z$nlNjaTFWBVcW-2;r}8q>H#kYo+Esi7v`)X@G8|2_t2i0YBI?&piwxV*@ugp+g6<>
zI{LO~;p0*IAnVZgNq(`(@0zXQ&?+Is8#qTzwtILyyRxmViZe+sAFgsrL1&8FG%4QL
zt^#rM$O}b)??_DoAiB$T?HYOj0gy-5Kv7#MP3!(+ha864EGU6Rv8im)U7iHerBju+
zwh8@ZKK@S`3VR4x^R=TUnR##sUu?8#h<@Q@sCYEWpc5r}d47PTy<G$3YZVvyXRn@_
zm{#&nvN>|#?onh8?sr%d9t}EbOx$x05>@on3w&-)H>U4Ag1ZPTlA6Y|?RsmFs+=Rp
z`IV<7!lJJ)bu>aGC&=QqU6Pz=U-St|pgJ_>M(|jH`dME9$-T`K{!9h(s1*0rI27IG
z9-T|aJ=2pF4z<Ek_k-0b0KE*7;SzV=1q!)2lV%L+VBo*dl_${FW3D(l$&!X6ubYW@
zFQoIN+Ue??CMD?_wJ4-P<lq;tbu&Dv;CQ~(A6(V;bP+MBs=YK(edN4L(2nOUShLa_
zn^0c7xOO_^QiLZIpRe)^qJn^Fbv90MP3D2UFAh?yO~({^e|_$2ADgzX3io>_<sTFe
z|G~YJE71L#NthQ^RdD)UR#x^r0{Ubp3MazT6O<&~d9vG1iaF)WiIjyvnl6VB1;uqe
zrd}IW%(Uu4*wUn8lPst_3s^l)I{%CygL**lMN)n^drk0>2;D)owSc1&J%%XN&GlNk
zB3CnSW8*$qvgf`Si=dVaBsmf_boWdaK4=4t*7x8U12EKb?zG=^>nD}|;n;}o2v^NK
z&I5i6)!;Zm`6{NGfTcj|$`?%{t%8NN7H6f3=Mb$s{kKro)6#~u$&_9eF9=-E(Jz}w
zkXp44Su+6b;c_&lezr;NHt&ln(8u)f9TqXJ5vlCTxR&(x_8zPak-bPljHFythSCT_
z88qs#5V*<v@`Ct9T5(U0@J43jeLpor_x6DQVn1N?gANvA4@g>=@gKbvqiRPFy^yP$
zIse8a=OrLl^P1!;F&^il_adUux~=YH#e*g$(3Gay{>h^)@^QAo;pCLnFsd;4i6%fP
ztP3@S&2e!O4FHv5DNC|jo5`<d<6*+v&(5m7&jdY@RTEY~wF))qyPq<(uYY-@pDp(C
zZU7Z3RMci^!lD8azaKs)dh%(_AZy87xN+jSNZ{?}7h5@N<FcMxOOLgMZ8S<e%$t=K
z^#hGP#$EPAdt)9~0BfofG-3`^eW>L~Mf8GAkHm9e$zbeSv5d`tCS|kt`BQCAnqRX3
z?^VB^9(*n|>wO7sPS`4LzVN7D{w8<sso8viMxdfgynZ%M&`^)X43c_)v1BU13~0`A
zKNt9;w*rea$N{6}963Mg3t&Q&7daf1M0JW@455z@rbKr*Oa1(C>XA_R`3u?nTpoG8
zoX{A{nz(>xG>*TXUOW`mDdbpBueFT=r!UB`6SlnoVETohjwWMPRu-h6TK*R`>fnLP
zkYvqCIO*U=m1?yVyG~wto}zld^ISSK$bt8euj~+@_}mjfV2@``!2{NN>SU7QTvL_~
zwnDUCyR8CVC%XX#(wdra=1qlP8Ku#MiDbCSEvtQkbCsZN5|=teJX+)64paU|4}Ry1
z7_?YxD&6_~4bXbdFHYmGpHxB?pWy&AY#Co5gBhy{U#zy>sM<UM(xHa2an@yGkJbKi
zHyxbmFu1=c<f({dQ(+VM3~gO)HMQ6{=8E3Q%7S`t&2#=IuKi|o7XjG%({PvP6xXI<
z@0DohFgC|?u(R)LGU_)h2SbhaZmTd$+t?JRsIQ&PT{dBky)Ub6Q_B_jn&t8Du;!*2
zzNP~oe!WySpfB`1-|NKawpK17tLL_PRVf+>5%sd17hZ&=B1eZRsQz@<bFvC-3CH);
z37>?ui(Ib&LdedVzy$i9)}n=f{gZoYWs#rQ^H^A}Olg(E)r?hH?enUg=GFcxb7`6t
z(u^%S<1@2o8o6s9!bMB~!Z3cu>!1%r&cZoYW1pdG|C16m(R?u$_G16_+>QA2=d&5L
zb<T(Tb_#tXneL4gQD5`U!yT+6f#<t4ky3C*75K#q2?-I85rXLvCn3c}+STYK3r%fw
z>Vgl=;ls)1^Znk7gRBKlLc##3^Pu^&n|~@MnP0^aA2f#X{j-@<T7=dTmnxho+=hR8
z?aVeJ5&CC!Y$MCSD4>Ync0`uDt5TfclXD>6Yf0S`BzyrV7DWvBWaAqG2kh0;4#A?Y
z9$r%<G&tW46s@!cBeVr<ww~2@5f{8AdwK6=&o<%Jo5ptF{m7Kc5k=7%)ADK8IWVy=
z3*H@73clxCrR{H8X(+4IeMzbgqEr<2;<)aIRRI|x;OIgsBM^FG8OxZ2T#41+J|0h$
zBz%$AtZIsPDPd4*I*<%`fM=zu-Y1zk9~J~lYl(~zjK-`8Ze|9Oxw!+;OjkfjKZPmB
z<xS$VA(LO<4ZTm=>CWTY*MPxWJ_l+`bW7ysYz-+O7|b7!ImEGAD&BMJ0a?n?@r^Cp
zCGSp;3TWLK4_s67E>f8bzx%Z9M)IMM^30#@cl16j$$+8>`@LMn?CtF(4Va8}KVAt4
zaQ$?fN8zxL$O^-{o8}W1ux0*!wU$emCCctr@q!#XAz$wL!^VPZbQeFi#l;Entois9
zu0Aw|PdgpRLyoQ;i_WLKYaYswhI?PEdBao`MOOS7<13`45TmOAa2&Y5>Y5*lM=`2L
z{CHGH$#M19-jo8bY5$>X+eqCiZ2Hb6+2p$V<iG^uBwBD=ytNstm^8nOi3x%d_~sFD
zHw<pqERpEP4qk6orUHFLS(;4)<SYUl{e=lGywfnkRlijpa3$4`Zx5Vmux1K6hzo!T
zOx%Epr5Mtvh;xC};KMhFZ-jZKIsUl(Q3OL8p9^@<!16*zsR+~A?z$Y$xSlN`p}e;L
zR9plIfnA~)DH5a<>2aU({g9Cy;`2R#Ras(`gvZo;gOvEr6|&OPA9C&w#E;~amJV_?
zoxHyU>3DJAr?!kvnWiB9MdEL;4Ed50gNJE`DRhXecXS8gH$t|(=Pv>loGrQ{7$m(s
z`4XSt&Pu?e#4~wprk^>Ed^($0a04W&{T<4~h+r5H0XY_F+dx2MGmvzCM}T?wLH^@K
zWXi>lIi&mn@u#hLyDe<>3hsY#@aA$_NB6vk%k2aR8OH&qr4qW)p;`N_fn`^;ay``#
z#ZCEy?sAYTwQEKTxdEgMdS1m$+cYuu!=DVB*e0P-j?IXfCOgskLk+6#`&v0IUjuF=
z*>~K7+|{}(duQ(AWIhu0?#WAX9`Dm$edrB3uR*mUtrbC1s#%YWUksG;!$r57`?=Q_
z0|!l#)O<<tY!7+eb`9vff=d+4Z1E#Lf@8UKHv>zsxYY$Ha`-CQ7<un}tV0ynpU({P
zslJg?kdL-&09KMVCEin2iKIiV)~v-_KOr^@eNO?_8!QtHuIHmx1f#LEH<+CA{D9Jd
zG_7v|ync=NNHwo$igIi8tbhAjQU-4jXwAaXOG$0beEOpQnPBHXJXNFEuD*Co{BRMW
zIDQ`@Q^?h1_Q^=`$pD8}>dPrv(<I8?Z$nSYl_coJ;sgx6iI4B|TSTRttz((lgn&Y7
zdG?}07QEC&mJX6m#O9jaLJ9p8=6G&rt#a7wbv)H;M}B^;_yk!gB=Om1V}O!5EyGNi
zA`|W?5AXZ-JqW5IWoq$fFs5#PdG3eE#}(w>#5ThT#iq8h=ECY4Ivz8I*U*-^ng)XD
zMK)TVzzA3pQ>%%ck8JBs81CjH<IQwjHB>t$(gJBW*RtL^x0$k3rtzMoRwU5jC_Ps+
z^E6X8O-We#qdOS8@d01bc!UQsQ;O|jDv_b_66>?f5Ef$My7Tg03{TO>Oegc3XM@#h
z_r%j!eKNOl_xBxA-ep|*%)C90yUeadtIsE1xvC00bvr;mv5RQOZtoJ^FoDrUVl_}~
zVT1&L7*M&MAoMR`s2Nv%x<~ldX4F=I>I)!QKRMGR=a}fa!iSVJ!ckZq%SpZE>4%P!
z6?tP3WL|Us(6bZ1wNFAeekY)8nHfqxRf3}-nmN9K@uKS4&7Fpv7KL;){fTAQ>T%Pt
z2tZCOoqyaG#cU{Ar+`s=KyBKgmo#?cxw<-qJ3<;M8wFOcjCB0Nz~>{jMzkeMym9Vm
ze;wdy=y7qP@(y8^B~yTwy5&!9h#%!g5&y^?kd!XC>=r)8ALr^fLqJ_=L>t~1gg!_`
zs+A*}?3q%FSbHPi9yGw0%~Q7XCZc_>Zd0hzbtNd;Wil=BG<5_h@Zomo0r0k?OFOaP
zNorKU{HJvK#2_4IhSUt1`Vs(Ui6&a8V05=7p#5#1h%hv3P7@6=vbl<$80TMPAp}Pa
z00VOV5vD~Iu=vTjhjTbW81Krv+bcVYdY7rE;Swg_aR6QI<pvjoaC&8JZNm1R=Vl>j
z68Wg%pH$R0F+kn9?R5LyG!vO0Ssi0S5D#QA*oO47<r?Y9m)iEyoKih;7Gp8<ny7L$
ziq8a*_5c76M>Ws;5qZ4|iwSfLu&&z|GD@6wIkUc~)IVvXI<MG%nOzrH-%bNQ90Fx<
z;Pz0g-vcTc(o8lVsK=2-N)@@E9K$+A0Q1FuY;z?bzE^`jQ{udluj19H>=dm`gV;FI
z{YMT^wT&(fEzh7h$HLGzSGgXOT3h6Uk{US^wO#R5v*Qve+z5N4#i^LfNK<|qjB0`h
zr>s{`{^XwkB_7z;W7AAIY+?h4u!hwwEf}-V9=PMu#3UDUavcAt!G2!isy{$bPXfxC
zndNw489?u48|oX+vTDn^_NoYcWgb=|5Dy3-uHTiy$FZI)ZH4Um(@kQLuBemJJN$bR
zgrm7PX`YXY26D3C*?n8Q`1f7_Vid25?n!x{!ROA&+6~(SPLXXFgdKTB*pP@NyAKgs
zo@OQ9&-C!0U}c5+T+Cj$_o4GBx>TW+Mg9q-n<;h@*vay|nBhBl*hut1nqE)23PGGk
zt7dDCRPdcC^PHsB0J08dTGAuLFA)CP{Jnq6OlAc>7Z3vP_I2CDT~`I|W7GO&a(qlt
z{K#DQgm<3W2S9!r&oCcM-u;=UJ!NSY*`zAzBN1tDYGMonbh0g`HmFAilK+w70TX7C
zs(JU`v%OX(JW{7>!9D~pAUMv1CE%-7$sOrKt>Q#601dewc(tQiJu<t%?7dC&eCh;Y
z2<v3FOi%M(sO=<BR_Fd~%AH|{nE>^j3qb9l4ZrNsLj0L&{miv7?c5^AEMcOsxyuXJ
zlp`lVmlFW?IL(mxRds=a+a4oeZ&L#No!W@1-d$~2OZM!i(>)0IN!6E;WstpDz*#;R
zRw{3LB>oKaANoQJc<Pa0&_ywUcc*zuY2o9u3V8>@AIGi^P#>+FZ@>nhId5&V#kCLR
z-cp^a<S0}Df_N)w8d39WeUA;Ml*WuNh0)>}Em9|KV-?Kw>UJ{iX%gcq8YP@*wT^4(
zmJzDvlb^3;EWW48VrdkTsW(`w2pwi!x~X4EN(!QTE;*1a0)DcI6`JBy7^TCk@Wa;c
zV!b7e=*<GuFW!L@nXW3^-Or*5=LnIbiKh<bj^}B7(qmb%dYge_0nqiD`~5B}(dw|%
zwG>-G-tlhp_fDHZQ=5M~?v7*;oSxz;RJoqq^HrL5Bjv&6t%-Z!dBq=+3Am$lRaNLb
z>yxc&^G<Z=`?Cr3hjsqH>M+1Mf$yDS<`IOowD{o>y@^{#*N4y9c28zlmsDg24t^$`
z`4#9VmX6ouCF?}`sQdINn9T)wwCBxlE=T_U46!JGxj5_*kx}&lvdWuMW(zq;aG7zQ
zn92biv0cg?V!1!ho%)Fz*_Ga2cOquh)zmh-O|P0ete+IdKd}uD@MEiII{$_eessd{
z#)u}M=aY*FRw6fvr<2LDPvZZvotKl$V@&>7x6Y~BnxbXoq9+L5PIqBI`0a=1NvO8l
zB>82?FOAj6Rd<UQ6OsKi0dMj}GgOz0Il221DA8gAaH#cxqrds>eSj7pP!~Fs&-FG%
zi9fqBP><VotZITWmG@RfoV}V8P8+Q;P)&jyCaqrNT%M)xFkNDnX=g4pw^^W<#o+d*
z6oZ?ecgpNu{AE3XF_WDohhrYYhG}73O?<~-+GhFurkGE#l2eYOaL=?v2zn!1Na~CV
zbvZc~VfY|9QicAo)-ME|po_We7v7l!2I_%mVBV|>+3Syhkoq#0rB&!d^E7447qHMz
zh<U+ceN*9W`&&64xi)pV`V}sreaZar8ybAak&wKA1=G+kl$_518v(Mkodo_E!R94o
z31OI#Qh>kA`lx>sEolh2g<Jr468h!?ys)UU%$sRgpjASI#CM)(y8kM-CDM;Eo{j_Z
zfJRcPoxFXw+OEkiBs=-|hbXoB!CUj!8LQH7OF4k2))s8$Z9O#3{aIp}Ppez?=156Q
zCM6hn!rxCPbuqBoVaX?~>|bO%*YvyOSI@Ce5-uRE%SU!QBx_-mk?b5hlZu6_@5cC{
z?v`;y|JsWz9@vZQz4e!(84U}#EJvYNr-GoNchv*6dk_5LR{{{Lz-DB6;Wp#->czyH
zbG<<~sA|@5dUv92+piy0LBRXE*Y(xfkJI}zArnjr%-&r;=iRZ$?70m6c`~uLdZknR
zD%5;Hw6e<S+(?URx{Z(I28~lhbZjg<BOGMW3(|SD%NF;%BFTAdrjd$U&k9(Ihn^SM
za_+Z<B2CDP3+Db>K-!=Ls}WJ^TOboTsqE4I68hi$gRY;NLA<#6j-~R!{UXQHC-Rt2
z78<En-r%?KxrUTltBVA>SiH3}eq#PQe!C`HER?wg@GYTid@D~p*bLGaKCxpNZ0|7J
zSiJ%>u3Vd$x+)F?+h{@21x!!so-yI%$J;RotUk0AZ&^%G;DSg9_(b59#t>9J>rtTV
zGGz>@NoO{nz&+Y&zHB<$7|H22&;%wl4%<U}3ozv9mYUrQTyTDYrS(dDAJl8bL%Y<d
zsu7K1*DiM`Xsk>By~mo@u@L8$Nu0Zo+V@D~8QoI0UX1H))WorVXm3k_Q){mLJAO57
zpx0rD*!{613;VWVr1UcU`vTZ%|0GSo{udbsEod-+bbBq}s;R)o<?hcxKRW$o0i}qL
z@ME)KPfyRA5zb11kFQ7A)_3SJOBvbk2SwX9j-9)dNIK=j^Gy;cws#9#BB|d=ymGA&
zE_;}V*Cr|u*fcR$yl|gB@4>5;NHzL)@RbkhX)<tJ_9j7`%?rC*RAG@_!8J;OP@Nw?
zH)(s{3^g0EeZNIFh~H^(UWkc@7iT$#hL614kxV*l0Gtt`Yh$V6{`W6@EQ)FGGiRTs
zzZ$=^ls<Q<^VgG+`7#i1(f<%~maSI+LJ!e7Y|L|Ci^F;hOie#6M5KAg^K5Q`%r+DA
z8s3bvFYs!w;3&{>>YD<)z)dHgPrN-_YjcKf7TvD!waH!fd4WioYxh_?uH3G5kNG+_
zW5GM!Ty11hkEsB5fg5st1zzu3<K8#{8eJT9my!z}R}{lGE!d07)2E@)*43fL!d6EC
z4i!iI0*5U)!W%8(z&?pXE`ukx1oCUTjHP~ND_iLdsBV+Pux+hQSw1|1d~?R0y@29`
z5!GOibO%gHkmDGLclGg!@+`>6b7PgQlg2eMg<Y(b1rxfDMwGw-;dZ##`(ucMcAoYd
zdM_w{(&ugZqFWSnUc0_2@yp#TDd&^;xYK0o>;OTl%S2Olz}NI_@gdiHn$g_n3$d@y
z7XkgWWX-vgfbU|5i8>C1e>>@HdeXZ+`>p3uAL-WDwL4Rf2TGo3tJ_=)KCpf{r#D`o
zT9PmWn&Z^Inq$M&w2UKh!4l)UmI3VDlKeiJ<nsJ&^K;ZPt*${26<+2hpJDA;o_(Cs
z7}tsFI}^ApbaMZ378H1MVksTiS@<dA*5QZFsb&9HQ$CQ!P1hL_jXT2h;lH6A@)mxx
zC6+K-<9vH3FklA}R^BtMeBISiN>E^`30$AD#AD?Ms9k4oOfZaZpSftn1`Plkd6ubb
z+o+uAkb7B7eW}vr2DG?hLB;iZaK~G&Ei#e`jh_!Ln|3P5yqbJ$Rlslk>dk`rmAmmk
z1*sPg`=y>g7Hl4`J1(fYy8X~*3sldwNE=t(V*{R_nc-e!t_`k{;9AVQ$0M?M#?2XQ
z4iKggc!#C7)`Xk(toO7&^L`HL^W%}B6UVyuN6elMD2cQ7cH60f65C3>pS1$c8ls4&
zF8as1?+oIEM066nF<K+8VWfA*yhYg}*>K~$q7luRiv{}4Ak^}zuSG*6k-^5vcpn~g
z6Rn++ELmjW^Ps<4@e7i5#>%y+avE}6%_MMIdKHz$OIk@De$SRMfCA0SXCr-X?*7+4
z7y?&~9W6Jn61z{7KF52PpFd5up%zpQ;%m@v-=(mt1AvMA0UZVa<cF@aW3En?%>&v(
zcm==xRWQK+kw^SGuM}v@O6j1o$5U?jEL`cm_0R^5%eA;E`Vo1$l$&L(trbg{a@=nr
z={chXYszv?dUt8`yf|Ps-~nx+R`pL>^qSXM|J-b85r-VIWeCq@>*<;1E1NXqI)ez(
zXSE%tTbTi3DqcQp7tY!g@w2nu;dN<lO+c;&b}yuM6+LyA5~#I}8>6%>4^5bWGl_bR
zqo?=cF^9Z2rZib-tR#bV1Y$V$toMojvagxNis1{eon)r%V$EDw(cJXOr)wMaTCPuL
z8{8`zPgfH_8d=VtK7E4ZPMz#6Ay*S@q4rB1QvfGN>$3D4N2f*tE*)ncI|>py;K^`K
zRQ3~n<ser8<>^p>UXNb&<s!rPb-+uO?PW&}<*dQ^s~fF)V{NP2Q3%V%u)CJ`L<k>q
zunYk1TcCDlGaGf9&t1xILYzQsSZOqTbEjubwoSz@=!Kz|%p5ay<6O;qc=Z<-)mwGk
z)HXSE-la!9JF|AaFL%q9JU%iq_=uqi)@>>XK^rDK>N@tH2K<85@tftK*6>x^;q-ds
zra~*`>{xJy&#0!JAjoFXVZWQ@th=jyu=3=??tN@okop08IfbT~VRs;V4!5ZCY|?<e
zSO!DHyi#=>pX(ksS8Vy3s>5185Lu<Pchm@v5j%;q2p-Bi3b3_z#}?y18%x>zph!ph
z@)W&tv)YdqWBqF92!LJp>*wRWVM2<J%Wd@?3gwe4!$oDiv_)Ms?)<K+D`mt4MU<a3
z*VaSX;DDf_u<x3SIm{8Yt~>H7(QD*{_fcl?xNhsvc`ef6!hZB&?dL6e1V3#0D%1lF
z)>sMG+g>%lKFSEL8SqAwT;SyDs}=Rx0n^Z*MZ=}l&YK}8xaNm`7qZUh=FyN;;%07L
zet$l&TDcso-GQ+60G0y<b^)NT1$x<DO(_Uou4pDW42hu7!$$&`-LK=OUAVm)x?4>p
z4j9+y@}&3o6zcs%zu1DeggK4g=N0EyY!(YNY{Ga}h+NO0d7)_Ot4;N{=PXA7Ao5lc
zcv$`Yiy{6dDXr`;-GL-D7NUT-ea?=T32eyHBYJtNC~B+f>8JQSn<uL+Tr|S-v;M5A
zZkr1dY_nM-|JZWJ9$o=HKONofKoZ@NFS6`)Uksl+nMBU$JDbUwZzt~c!W|N`p<j;8
z8bF0az3pdEsBja+Fr}2UfwWU$)G;?4Hm;L&zq<bMS!Iq$A5BT^3R1}B9J<#@)VmbB
zag-co7jlA0(1NmbDlCJ5oi*Mz1JK#~mTM>2ab_?)*6O8lcJq~<EukOf^~X-8Hi<%m
zGSO2R?HydKAE>^sazCqZy0y0I8mCtZ)BRDjM`bUlYyVN#ZJSRtimB2)T%l6|XcB$m
zJGrwwd9JX-t#3P5yJvp}ckH!-ok-Am^>^>=;Pl09hf^6pM-skeDQqiBe&*%95gw{_
z3D%ylt_<Cr?R<5pZ5}(|)ta1AJq+<!VT0Q4wV*|N4oUHfXulH!T&wTpz^r?V%sV~r
z%hKpiS;tLsloV28%3mM8zsh&ta`X|Zn2p)xgZwXj`BB06$xz)4X0?~0crX3a>7~T8
z{0~WpdevqRz$OEBG$t;lh+jR9C|<{%>9eSV^W?SJxC6Lj{}KKVG5S01KkN^JV7AQ|
zo#Y(Ay<T&!$XE6LC*i^uw-@51?ObZYQ9m_S8ycV8i8AZtu;A4Yb(jn3H$%P>4uH7%
zja5$GUFVH?iL!KlA07UJRw%h=w7Ps{!xW<&6YKs<s(@UcYEzoS68Xx8ac{n1<DNU$
z$yCovsOizvt4%7ThC5QJ3Ca{7Hl9;7GG8M^r5X5i(!QE}P20F+Bp*dyNL4G4&wHnx
zrJT6^;bd%dD0zG)>uPsS=J$Yj<%dq89*o6_J00;?VbzPbcJW8?aq1z|{1M$M&Cx@G
zp|Jr{!bU$@ioXqKCfpVbJxp_6jk|XyZ5zs}!V$_hNSDAc%$Xr0EFU)evJ?7}*{e!N
zuXg>O`%a8K&sGblcuiMhH0WWIa=4Fl3u9^LLxYa{3ob8CN%p(LzDB(e6`d0y;Jzc~
z;w|tY&S;g0iYp3G#(T5n6R3TS6#=ojEW^muR1T0|vH)j5F<k*&0miJ;|CO<xUNRsO
z3HECfmETMP6y8Tb<!7&WnNb3GTb@aLY9f1}UkP0jxnpICgjMa&xXuZ2^CaKL|Jr#+
ze4?ZY&Auf+USX=ZX08@zKae7--*kE5DHs44lVpHp*k&Z}j@F`kAYc<w>$v)yTOj#!
z%}&!<fQ)p3?RP)oqr2o78#uJDk73r1Ec&o-<W`($X<t{DoxFzrxZd)3|B|_M`Wc`*
z)m2?li%1Y)DHKhftTF1o?aIl*(%Cl$I@=;Y*a*%R1{Z5qboAj_lK#>NN5ZRPp-7nQ
z0If(?!76V>m26`jMZ1reRUtt!(YSdytw`I!7aM~g@g^b%IA^PU2}Bd5?o)tmpZnNI
z)~Q;6<9=XtMHz3&uN^=Xo#C$lz^yc2BzKt5Dr7Jx{MGon4x@Mo$0mnH%-NNR>!&L*
zkF{5~Sweh4lfySAINm>hWjWWMCW?gfmjOl^ao0@8wSI-U;+Z;`;^GGnEGtppj=r;`
zd~-Ho&x?JmfvMYjl?X|jByRfl1ieUmHC&8_p%jtKw7;*d-_<?Q$au|xH%oIZ`P+DA
z1|%)Jr~fv%Jn=&$f^pACEWpW>)BK+1TUdJRR?4`BK#4YcJGYyrULA&2$b8Gdb%5~w
z`zp*h<2*1E-Eb{PuAYI-v_sk|D<@Z#cAH+<&b~MWtOY9o+3^kQ7ShS07b|YqmlawH
z;l*`em{f45WD7J<l?ZFt>v*saaQ9t+o9BMT{YTwEX69l^a{9&#W4mkXU3Q;eq~*)g
z@caCmH>7Oqkpi{ASkh?n8uw5L0rIl99W5iZWbF%)Up|4k_nsJt)qr019BzzoJW4IJ
zejOmkRi|-rv>3b~0QhzvwFi=3UQN4_tqryL$_=`d(DTMPy92fq>W&3jWfur~B&7%F
zlwgsjwtuNJYtOg0RWZ<`2@wA_$h}UwSZq_!*H_+vXLMy;+m<+mXgd+GwK!a2j)mLW
zI!v^k0?p8X{>g;3V$h`BE_^WE44{vd*yi)B<<->yf@j)0tqxUXilV<bo=*dog$1tf
z1Ass<m&CXO1Axn;0YI}k@mlU*2&CH{Km7jm1NEt?B~y7KSRipy!lM!J!0x9P-xxP>
z$5BQD)Vgff4WRj<-eIDkN|NqZC%~$&06bFc1KWn7TM8~9hh7b=r{bQVO(Nsyn+b#3
z`KPoYyMS)k12EjwoaJzkb+Ti>5D(A<G}F&?;5+FGHyVc_P}gw%h{hk-&wf5@UEHO6
zfg|%-Y&ZF6fJ!vR(dJx<WVYl>W_4He;T9BK5J!D=^J%8&dkA0p2g5h#PvmT~Yq#F=
zuFQu>IqXxCFYh(bIH1@9J#aRqd8Z)s*y+c^U@#}-_N+*&swXetY4ChjR9xH!igB)x
zHktTtui%W6N)3Jh`|K;XpGsj=){8@i$;yJ$cK*BQd4JMnfE#f%?qR%IDEj;qaLF4V
zt`EIcQkqBqNXRlD$?jk0zm{^bPQ-mbd9{;BgyH!j0pm?l=Jz*5$k6V`GnW=^D8?Dn
zhK}9ar;EYdZOL~NTX>0t=6$YB-xU(IhE)=Fv)ptu0tkN_OInH1z=-^=v+mu+jZb8X
zqo!Or3Q6ohgN>{8=C7K^w!f~uyehLQzgunr_!rwLUzkTed9!-UHWBMTYlHy$fUpGK
z4mDx|R{GdYdE#@M@3kAxZxatY45dqj%^O(Fj9M~r$X(vc!tD8-FZ2Ew223!VK-U=l
zu^|GF=hKmaAIq&Dd7q+&ZGl#}gNJ@63r7G!T5qoucu@5?fXL&?jV>&Ai%*X)Q7n2`
z2YB>mgkGfYO1!@*KCP)35<4M9rTeIQ)}f%%9NuAQY}vuDgfhV_%3NPK!kR{jQUb1I
zk=D}|((OJOVC`bj(}$d9F^U&8+A;dl2p<DlWw-bZ8VZnqAT<KNjHi&HVZ@3P+e|bK
zc%kdvt2N!BSWlOZLpM_~%{Dh#Z=w147lLEl?GF(u4fHKoDeKc&l|y%(yqwz42E9E7
z1EQGW+0O^Bbi4{WV)yYMk@(JwAcm+w-wtf!S=+ByMMSNNJj$?_TS*#)=jkoG<P!!M
z8^wQ9in|{$LC>TcK&(T*cD`_pZHVs98pf)IB{d(Xpk(QH*y$0!(oTHMbz9I8uLLK=
zyUjP^8tIF{d)ZqFX#-7Q^;^$BxwdNCHm+<M;yaK?Tz$(PFBU(4imNQ%MXDzmLE?X%
zILKV=2Hah@F)0k_`k7|Hr*mS;GzIzvTHo8yWjt3G(Rt!D)!CAz*znD7X{zXb$Ta~Q
zg~$*Lu#jAmpH-5Eg<P}!3RrVNZ@kDJe){$ODI;~;N7Ko&PFlok8dsjhKD2Md6Yg?6
zqSXRJTAq0ph5tj-SvWNL{$F1KVMvWoI!AX%j~XqFN_Pp;J-S9oH_{-2=;-e5?(PQZ
zl78mr`+NR{-MjDWeVy|<=bf>47cIl5^?^?`j2WiN0Z9b9xTyDtJl?cECxK!x<1B&7
zRSF(ET*<NdS!1kWVxV`3#99ewobe;tq4N*~9=S*BeSn}`>N`hv5@IWJFe$)uKA7>C
zA>#<>lK!9iY)uI1=tJvEkTbTG1hN}6&8?%!2NSW!ZCjsLuD8F<BWOp)UT@&BXE>E@
zTN!CYHn#`L00m<|#B#;HhbBnS5Yua{h~g(Ff2Rlm5J~Pya7qkqaJ^MZHN0E*G><+V
zG9ksp#hfv<)8eoEJc^C4qUSY)X6!zWq8?Y>muB>UknZhL^Ld-#6f4R=6+M+$4M5kI
zcP~L<MTnV810j}+3a-QOJEWjyjpGLl=ZMn^FWi^+z6K-Oq6v35s_qLrgW)nrl>so9
zvQb&87<ciU(={0YRB5LD9{0r0r3ju5XGZ@c)tuu3kF^a>Q}mfz^T3XP54i|#7NTkP
z8_BI1dCwxgHU6?x#(+~@B6O0YxB+pnL>3r<L>?0ua1m7y!GU<*9N&zsT#xwqZat{7
z02o1VPEZ1v-LyB_RfbVzR1(mM_-43g<_>)xL=I-?<(<_7l{VddHRB&y8TK!LU&ZtS
zy6|CWF#If~tu6^p#WUJmu|5<`PfV4jFPu_bfm|-7jmJWQ2wzgq>FV6UF$#6XW{9kV
zH2f^C61C~{GLI5m2S}mekJ-nRcX+zpZ9ElE$LZ*$YGWj{nQf>vjYWor<cQV7zeY$>
z2ti*yA{g6e2|(I(HQNQM==1HU-xBpDjI~Nt7MuGf76^M!^LTgV3{7)X(IU^18DcQ7
zi8Y3{0V#UDx$OY{%{MEHxT><0u2p0rJC+oS`Oowb$C<XNJkML*6)n`!mf5ao;{go#
z)?Yc_$%(+Y0e{6^fF-DBC`BkmmR<Pfr!*b+sCnii8TIFgL}Y^-)kjtsrngnEOP+y#
zn1@a-&+Nh*O}+JoVE9X95R3>Jg;OB+5WxuTfP1MzsP<9`XK1!4&5!u=quOWU;@&}5
z<M1OgYyjEq-`HS8+uFVMEw=?2%_yXI!F7RUL3$4eWNe+YQgm*?J4bkyuw%i3e4V<N
zh|1Rrjl6z`TJ_8&aLi<xddN`q4++i3SWY4E&#@raDB!t#9fcVmlD2H$9wHiJfAfvM
z$KUr#8hrcF(1Q7EmfK5D&<LrcT5_n^7OtES${*PdSa{|3?rbYZD7IZm*N0Ky*+-Mx
z%y{J^9d$W$Msl8f-Q3HMtUH2=IyHUCT%h|#N<6oiQo2{(mH+*}a#m=JZe*K$fS6@k
zfuR5j0-_`+Ch#pvI7}^6?W>kooNelOv?h+zf>mLC8DCQ>=P%Hqgts_e95T9O(jC-u
zZJEcpN0C^_WzX)$oyUgDgo_V1QWJtiEj}`9s>$LJEGt_k)?`)Ea`z>L@T&tG$|hm^
zUMXB_akDoj<zF-b7ZNK-G*7gLn4B7xe$$GR(&Z0X*04#vsV_Pu*pT+<3)0$3*bgy|
zRz;h5F%jqpp-`nHJE{$uHnx78z*#qs)vOBOC+Q-EMJDAo5kJO;_x+1Vr}Uz85b8=E
z!&=JOQzZfKAZ%xm#uTv(S>6yCO0I?K6YRtb)T~$lw6!ee2Ezah4B0Y+81|s{hw)<>
zxW?)M@`(@MPT(15p93}F?TB!!2zWMwp>%9M?emuoVf?Mc1Qo|^Y6qZ0S-mUT!D*C-
zk-lVVAhHy*v*V43`yg>)_WI87RKT2>jSBb-riakTM=sDuDFEO8*%WcPvEX)(>V~y2
z@je<o9JnUlXgXoCu-dg-vdJs8A)#HcK`_u8$}rB~zZ&#7N3tdQ!m*ooxVnAr386*Q
zmx+|j=8Rq2``zpQ$5=i6DiHG9xiY^nzF`B<!Msof%I&mfWunm*UxE6V!9VH><6<JK
zgR~mk&--gm`U}m{;{>kCBicwEU+hHH38W}Ry<uTh&ud%W`~Eyz*dug~^$GdqBh3t@
zF!6rBV>$;LmPoks%#u0xH4grkzi(6`^q^!bqCxG%vFpQp)xiS{@bj&@>ViJk?T9hY
z#rjT#{VMxiCj(5I_iaGw!GA}bZz6!`n?J(~SRipWpO#$Y1Nnh@g~<<|F+gLWR9j=u
z`OCOEVhEL`nT`+71{E@U^QHQ3v;PgM0k_4TTC#7k{ed_M86xgIqWjgXKbB^v+ut2y
zD}gv;CdKyb)F0O7g&spB3oeo!M|>p?FEOuW4V$Ql`XYEt{sQVRD-`sd90s^FVB7`S
z!m910A-wBO(2*QuGxBw{tuE?*fe_DkO?NjZCpz@<4aDmS3@UEaq2oP+Tnpu~!WU8y
zO^k39jy^;uBr&0h8J?#E3KbBWzN$AP<A)h?z2j+CKw=*gXdh12kUGYfYYjD6_mG++
z-9op16~h`v3E*Df6M?fY_L%}-*B_h(^$!1&S_&hQ3R!9C`^S|qS7Su)<g{vmZ=nrm
zCQ1@tb15G$AeIb`@--RW6aCou5D{%LL}*0t^AW=r>lI~aLvD7s@hH*h!iHy%O^}wk
z&*B0UQHMgVO{Rwv!aK`EVFFc^pjgN4X-bzed9@Dcm*a+Ze4);w8UO;+`U-Qj#A;T?
zzLmWX;|hB}(-hTtkjpTOb4Mg$MqyR;`7qS$n?4%ZXL4tIp@W~_^**$_3C0{lN8(n$
za*ASbRbRuO&6epyKgGlMXp0OiVxb^*P6@rPk$-=tmmj~F6K?ERkY**F%-W{FT^`R=
zue&u_GH+#m!^FQD;M_|8n5cIJ)j!94rp@r*z~G|$5ddE<`^0|VdN$GAuWU8-D?KE$
z`gQO^QHvMc;ba2Xq}ZJ&Va31S;?vT(w|bL7ZL1UV#PUVV^I9wNp-lwo-!zVb6e~w$
zj0j$bdP!%j9D!t5d-8~#VqXL4y|GCuHO7w{V@Y5Ssv}^txn1v5^Q)m3WC*^U$6sC)
z>t!&|AUOBSk;Bly=00rpRUlgIzPufs98TZ6NUOuiH&grv7qhj7h90-uDVey7MW%&i
z67M|*y6}Y2P@#rvyj_z!fnE6W4d}U@WiefYWu4I8!-yrY%t-TT2S#*NpkBFSUg*(g
zX7}U+ut#N%bUki}4I)6ED;K$XP=|llQ^#n1Ma^P)(uJ?$(goaB<i9@q)&8N|<-3T1
zMo&$mbX&SFIs?p`#*Z*p1!^XdmnbV5tTT6o%?DKRd)TDx?D_zdoeU<4#Ly=qRoRpz
zB3$8=c`purH5<f|Z$G2F+Tx?%Vc!>lAORZ2q)+8xIrtR1JK=<i2eyCF%K7!EU~)M)
z0X<-UvHZn<9y#Tf84?Fd{BQ?plvEzu2%2F?z!mbh(6u;=ER!YDI_yBtaa7kf{B|$A
z05WrLxw0nV9Q#OIYH#3r<^CV+J<`L8^InM-YT#VhaOFJ^DoI#E7{34-U%(<z`K7NS
zN;mED5w5rgxsuWQn19)`7FGANZ>mqiF8laxDZg@9A-ckOzGlsn>fF_69?5<$zq${~
z#Jf8q1B2YU(y0oLmeYq%t+(%M+X&enpC9{i%+L9)&AUXw(<MQ^+7-}iqYb7=+MWOs
zzOV8r*tgvKzooH%r~BR~Ec@oee*Y!^pCW_?^3M(JeE|b($>g6lcM1+j>=<>K%o)Qr
zT_u*s><?6166J!%7QSQV<8WNZS;`b7)w)q($kAZknwjBKkj<d#-Yjd?bpPz0e3cjI
z410;2Wu6^mKsH0DyRe31N}wJn+oLIC!wM7u@<SDkF-^lZ&!n))AfbD!nAd&R<Agz!
zLFDJ?9oTU6^iAn3W>OAU+w3vf9`v}NvzXjr=&5(0Vw>}8a_HC0Y4n*;lnzo<NPnao
z9!HRSS<^l#Z5El`f+p)a7zsDD%z#P)2%=aDby^t<5Hz-dHm$!5lG-4e3Kjy;64qD$
z9EF}5m{1J;W?Aqob8CwO>p>?Z#Bm#BgrP>$)n)8sXzS>XTgsJZq&*UuS}{NFyu$QM
zx0VARKsaxS93K4@s$gqtZ)r0vduQCrd=_h+FC|wvvc7~F0DN*Pt3SKWLH<(lsX*`M
zTcQTvFnU#T=@8$Ov3BY$EWli(A|qcXV;rYHU0PjGfU}40v%KH$%!lWZVhkZh7k_lh
za~VCNuOut;S8tZvJirp*Z8u6PVbY(vEQZOAucsVFTk7j@*LHGl!cgzt@oEcn4ksun
z<`wiJ7Dc2#BrToqDf%?%lFHjoyy6{fDZ~Gvz4UWNtv!|+i4S3V3c^FMkJ)80K_~t=
zt-rs^pDsgDz(`jpFCztg@$n~QC^EDlbp$;u1}sof2`U6pj}{GV|GxXLYy_kLZ|-vo
z>@ji3cADb)&Rs!~O|1&#F&PHzrdFHlDwsu6f7uSfB!;NXh9)_q<uw^crimsN|7;?z
zzm&Bk?83i;j06zok0P#bYmmD=_B3PLFGOx{J!P>$pdf_GxKoCGV0sTDqKQl)yrTYc
z+Q4m^AvF=Jq7iyNlkJfkn~^C5Fvn%t6isZNFE`TFNOqP8c%gPj1I7-+Ax+@v3Jf;=
zgN9BMCN*Q^AutiSZ>hp(_Nb1=9oVF^XP`!UsX!d2FvGuM3dQ~=tNqTkc-3-uK=>`3
zaH&=Z`U;ms0YHA67AyBqYy&;?9Xx)Wo!=B(5`P#5<O3PPl@X$%^IkU#qc>u^UjUYg
zt@uPYv%O$FRVN^~^CJ2X4Tsq}#tqJP$aC&oCN`5qmuA??p7<LpFYYkAIx6RLl^dw{
z;5Eao?w>#2@+E(>eo~+UHX`d}Ee1|k8Y2ao^^d}1yNGzi<lbp#^K2{#fUt*vot!e#
zegeuUfa=c8eq5Eis*=nmsb`iw!|-nLU5O>{y#yZ@Up#U=;yWjMszW|RFoEJh?(S+R
zeyTv9jyF^G(*-5H3izmD3(_Sxm(RC<P=>@)1~TJ14;?G<^d0?A)%)WCFbZ=t>~ek?
z#Ca~ghbu$3E=v2O=S7d{E2I~lah$1$oAq>X@cO>#(I*(Kj|qTgs{L=o6?Y6Ph&pe%
z*Xq00q>MKrlTe~`t|rkYo~Ol5IM4+uP?6pqN4DoI^ZL6Us;_RkP-q|aU7CdE8gaD<
z4rdrh`Mk9t2u?i()9}Y_Pj3|I{rR2{mF?4%&T2N&C+Nf)C$;Qitk6d)rR|`2DXSYB
z1~7ZMg^8Yp#WUHDc$+Y4jGg<KqCbfKeXoFZ`<aN#cr&+2o!Ew%V1`&LCk5Ae{PH!Q
z+3uMJ<g|`^qOYSPaNG1~QOlO}H(77{#6UoGT)?(%R8xM|uyvIvT)$7Mb17PyWt2-e
zJ0Ss7UteE=2%Ci>@Ae_TjoBp@`esl?n4x=JPcInF@WE^@V_)8HCd4+%I}a<JP)-8;
zL}e#vcZar^TWkqw+&jhh>7bj)Gskd`>G*&j!`h=jY@OmWNvGp=wVrz8{R^iuUnu0Q
z0pk*OklBfWc6=V{SDuaQ)ZT<%VeKV=0y*E0FMHRA#N6IDtHQE6wUJL@i%{L`Z5rSj
z{ay=f^PVR9=aJZq0FpP;J|VxoJS|7-RlWE1v<|U{xQhJX#1hU^)*Dl|%atE<`Br$>
zHO0bxT^u_YbNZJTohFGlq5sVUZCT+jy3k>G6iDEC<_W4zo)BRVC+Kh*?;ss_kHS~l
zHliEgM-|cAd@szqPn!B~_(;A}e>pPMI`7~Nt>RW8$24J)^chI~tRlUx{DbZX`$MGX
z3~H{l1wlI3;B6Y-0*r{dsvi%Q;Eqm>623JIxa*n}+vd#UN=09nbni<EzpJ}7+9TWM
z%%>*!7fK?;wlH9W_K07fqHwrCvIz~n$LaposIs^~C_6H#`hXYlLI|sI$=5?j5-=vl
zfr1yw+m0cUig7cjaV6!a5U-Tp^o#inh&#jAH8NCD$Wl?F)w=b9CJ{UQvnZVHlTZ_>
zZEu-L#ugv4a~YF8{BrazRPPay?EH}%p05_xq6{bRp)6g|h`y!{r<Ob-GOMU2wg!{c
zo*SX(4!91?{k6Gb>J+e~ESI`DmYh$oHa&c${ud$FI9uZ_Yc0aQLDJ$iBhB_<-!`Nz
zU2TS6Ek-ScR?)HI=Vg#NLYv~ez7{&h?;hmitHeLuU%UHgFKeYP-kL3Yo-=8GrVlZA
z_C1WymmyOLM||`#^9LdYcwH82)Ii71_FdgCetCv2i*0V)$LL)NomX#4`qz(X;wgsv
zFtHodD6{+Ow2I&Ffr?1oRi_~?=H8q0JPHA7$ivSr;@MZ&jo)Q2Q!?0#O`qx(dDPCo
zUn3O!YcrIgbi{T|aAe-PajrcrDt%RCemAXePb`B?1VcD3UKeOM=+-jO_EnTD7_U#i
z({!wV*9_%DnYRPw;~y*i_RT+wH?&9TpQEB8VzU+q4oM{maj?N1oFg8;^v%Ny*R<Mz
z#^7WSdL*k$v@dR?4eY5OBlt7W+1P*$C^1CdBO+x?dn-lWRD0^e?G(e{PtC`7lly1*
zE*R)<mMUmn;QekNgplXm_2xd7*=&irrh=?N=icU`vXg8TqUJNqTw!AzkP&*&d?e73
zff>q@?YSU5aZ1qA`moLMrmE)N3UUu!puw}XDfn&cZlC)Af-7_#NHUVr$2~eO0Vn}K
zn6j1QYI|Dgvi+XwwY03^D}6Yh4;eLx4{^9HC-UVHMtin^6DB0`v<^q?f%BnCKI>Ol
zFR5Sy6&LPrN}FuQT>knQ%a|o2xb=+iZXbE?B)i8MiWr2kUmNW>qfn{q6ul-B{_jAa
z`G&q{9x;uW`&BoVTxBvynpVOh@Hq{KK&qX9rl(b6n!_4-x7wEY<g6ctiL_aYNN(_u
zUTx>$Gt;~&<D_Lh%A{ta694tm5n_F3{lCfJ|E75n*|tb_Fd^`sOpsML+7UH3NDXl7
zfcU|LoRB+SQG+ozx*|2Ldogte&Mx`zsL)=4fTL(lzBGrBk8z9(;MBmYz<VGYw3f<3
zYU3rryg`|=>y1@<Ic^E6M<^jdfA#ZqZL8pB^45A1?j$Mq-aiF&D??;EW~?5O@R=rF
zPc&6N4TG?HM_d5yVaMy+oe3|a1h%*?lRxx+ny6*^P*A7e$Lv5M;#mbM6|iimL%Ngk
zbF)E^!tCKf#_IG??R6})mlA-SeSi!Q?+tF`j51hv5~QO&nNW;Nt+5i;WO+3pY;iTn
z(#hK;Bu%A21!cy&T<>>HCR*hD{nNmFWU@eCm=7DW`1SJ6yd=<Mmg(>>f>@4aDQ)70
zE)=uu5YsS1^#iURZZF6sY_?W#m{CP@kRM~S(N9_s7t7ITf;_;@O9MD%Q=i?;J!a{o
zvheUSk4z-d`W>$b@^HiEUV8(>P;@9Nk>KE!c;g=C{4fT!A^D01uqLdS{D!)Jbs}C*
ztkmGeOZ%=@ZDH24q>hg5TNoZFUx;4b<30FJ%hX>?&vXCua<+<{*6}LZ^K;;Z%Qf36
z`?0cE7@>tQM3U2HE<T?+PG5OK4Ltsvx>b<@scK&q`{;vuxhkjy*z33OWJM+ZrWTH0
zjEbE={+@D{pK$UJp!Wy5kRaH5P#V>zpP`!A>)>s4)KXm5@&U&caJHi1LhYm0)i39a
za^opT!<u7z>S>+-$F>Xf6ZVcZ%%HD#5KV_50;&hBQBFLj&HD3mL@5#{9Zw>95X}}g
z*TdO?_)kdy-d4hPU?5||PMUQ_bRu=VfGv&24kA|SW<bN|erz_MzW#SYfg?B@HB1fB
z*((QmahCs;PG+iG$nZn2lFEW9nFM+k21V(y=p!dutk*vW0Yjx&F&%n91y4wuiMt-M
z?ptu%iaQ`B(ESeNgSFsBV~k@mDT*{@K?C(Ykj|up--H*xDj@`SQ&~}Z{o%SXgB<XE
zRG}+jYTT(WJ*7DlwoY|0p5&h+^yDT)2>S?N^`EZTXu*0zc-w+#^V5GbBX)ZLb+r8l
z^BQ+dIlQjPwE6cs))0q}_1fhj-oY0I>Xgm6noM=x#xdep)TjMZCuX<q>G?NhEoni;
zxxVG}D~P@w_rsKT{d2dDnv%%HXBLP7DwWrEMWa9|i^a&}Kn0>A`s?i2JLo@&&<IDo
zEesoSOqD?4v4#5?(<I-d4)2%g7QTa#W&CV^7=u%jxYRlwlD3rBzt5C8BD~^g>@9IZ
zaL{snV12%48Y3h*2iDGOkCceQ4kK<`tg(0Qv9u*hs0x=w?;>8;02gC$U_(NOkRupu
zB}&%&35K<z`LHXsQL)MPlsA)LDFCxcMEbwOfF$-MxEd2FOcV$zn5!+!3&2>H9m3j*
z^-x!%#KG<3!I&fkLpeXWcMiEQG-1c!-I*z$@$uMGW@X7}QC$o^Omdh8WaMF9;8;|1
z5i0Lj@OQ?<z$sy-avq1hZ+>p1M}-5qz~8iH-B>jezQPg{lk0f97GxDywOoHWWz?IN
z06POX-|e7Ds6<clt5j5(UBJIZk@AJizI9gqNz&hOR=`urbRa1whVXQwS+_~5p7hYT
zR`z0i>z`z!$@U@JfNn1Y_(x7{W}vY2`IH$3xJTrQn8Ee96@ABCaRL|LxSX@@2mbjk
z;D^&XZ-lIq$+s0~qR)-(Ab_x(Ge!^&mT~aKTT=m7-?;=8S8R`IAw?Nb$rn)3q0UO!
zZ|g1}KQBI{2jvY6m-}eQ?dZL8zV3~U)mWK%hlKp(s$<cw*}Dh4d9{iUI=qT;A^U0f
z5;udsi*$OumaqNEi2}FFWsf<cNGt?=Tp11i`y%_x=DY?NfpzTia=0+o@tL55YiCun
z23~?r1CV%58pI*uAZtKe^hTQy2h)RfYZxB^ix8X6IV~D_(^zYihNoYm*StAtAzmQr
zftrDHbKfJ1Aze{$Xi3&Tf&(JJe2H9mg-zGPsmH~%Bd6<~+Xx!ksov&i5CZ`UnpT`O
zMtfL+KIE9SD|$0l9QMMbYw`QeSy!HtnT$5K-NA<tZ-XfbMYQXvR3laxQbECw2B~eR
zrUY}G7~1z{MA%8KlisQmz`TG}GhCnc);ilZwlANuW7x))lbuC37u)fv4c!3$O%3sP
z@gJ98$7QyRL6TOaTpMAJMt<OG+yMW;FdD_Tc{>Q_-2L!-6Q@R2THhU<@z01W!i@aO
z!w@SDWkrZ~>+)fm1oZ`4v%o^|lE0{2fgd6#)s-Ozs@X%`_{y*^5q&NWLnLlzJyac|
z4}<P8U?hUIb57)4&Y9|Bad>&3@QVUYgz1s=KC>R1@obndXKXP9%8;c7+GgBr(NccX
z90kRK0dqJoj|h$ai0qO?_F~=l?`2GzXZ}j)-$bJ4f3Wz1tX*9!)l<rx*eq<(Kkmh!
zKr?Or@D=iSvaH;%xG8IXZbUReE0qz1c*u0AG$D>ZGyHB40~6vI2S9Pl?hE5YBsKss
z*j<r2V7F5(an4JsU%)0ifNMZv`Q23eX(i$I5KHgfmX_*}fA<<K7$;XW@TG08aQQ)F
z;3w}&zB8AAunICo*otwT<efO4@ufPwB|+~2_hqAJ;X94<*AR5BpsZN-1sQ0Xf$Hh0
zzZ`ewX)=;w7pFgp6b@%~tT2m;A@IF7pJI{qE;VxOas4v+h>_Qk>+Ar+uVsK*Nm$PP
z{n<&T#m)sH)kk){3}L6uk+A{8sLx@o1MkBs!b_8jmj`Tu*wRbdP5IDlY{j6W3oS`O
z&<<1V@|s4sqw^r^XlIzxip!)@w2e-=cd%|N8G{%e3Uii}(but>s2HD_gkrA4**B3F
z!wWx@V5b7$KYbs1I<crYzA2Q@jXbX7_yN4kx({N?&$;!fYfm{FH@>py`7%3+l!s8y
zvhL0x{<#M;@O}KkJ9H)l?gVS2Ff6QLOy~}aP&qVX-Hxg=%Xm{d8H4{4<Y?ht{Cqad
z5<Aqx_Cw<J*=^D-`bVBQBN^6}U-^>*5JB%!dhX4;r}MSxpzWBHKMbYIp8ieV7ST^@
zN=avf7__Z&epNGywY+m*5Ax>%sI`O?t6e+mP=(KxJpOy4B1kWJOypljqC|~eMyd+L
zJU-DkVQi+H5@Tv*>GW>yojK%0CVeMg=tex(-i<c^R>Z`0y?=-RIdFCpUSEGuOSjO{
z{5Ud_UcS)iZlCfwe3PgeDuG*a`R{t09cY1CJ{?uVtM4HN?)Z!mN^^pZLV(^UKW4W@
z=A)~9SxDJ50F<M1+FO%`q*C>Fqv!J2vkqvbW?c<)!OA6Q)|-RH>CYkT0?$Z=L2-&?
z-&fbbPVkaot};|DQ^1zKvT`&=BM%3>&rxP_d|>WDh6UeaXvWUGCeBvM7gYttDb7u)
zw9l!1R3n5_%OG+!Y*i9bZE{-9Jyq$g%-&V(v&R(2(egK5*L}#o%*Mgjwk60%({<IM
zfMbRecKk261kMCZn`5Q(r*DU29K0zuoJ?}AlJ|hT_F#OGPhyr|ErH|$VmQ*O;!9Zn
z2O=52HQ7^jYYxGTMj>_N`UsZ|MVzlO^n<^3VsO_6E9&Z`=zUt`7F_$2GxzUS^&UIm
z80!q!f|<}C2es)kQ~jf;d@1|<mnpjJdmP1S{E4$-daQmvp3y_!TGeE*_8CX@gjH9X
z$_g)S5~v?mzj{uokf{A>1G2Ug*tE|@AV{`<c+yAgmplV~*^*fxm`~W-e1%<hJL?-Q
zFFXnq|KR)6cbu>qc!n?#+NmlPSZ{??=vStojX7)7VGDu94uH*vdn~%zafHG*k!9`>
z+yyJ*_sQ!FB2y%~vN>aJuS7B)n$fJXz{2lX>+6K&D*4ixEmT>p!)CUu1GT7T<KtAO
zux1LeLmJlsxN;?oD4P^9gmNSh8r<K5E&F|Sd%Y+pOptG<5Q!n@;gQ9e1Kri@`H|U~
znY;esVOM>vXKzLhCQnsL-4CREHb6X0P)%~s_JPUj&eJ4Q_Uk)NSvQ}qgVi0!yUYC&
zxiT<}(L|t}awyc?{45q?Ik@1AhGbv$lF5Rom-+K2MwR=fz{f?8nyPkQ%Y1rktbjT`
z^n*bzYkrON#9XC?0)06?$PqB?OMGg=?|}prY`kX;auhAIaPAGj2dhL1PxE~Zv8OhB
ziBHbElUJ~BNjB(?xfHH*PNG)%Br+KqSL!i9BOeIOl%_oF>4zTWwBGJ@cfHfm+gqCz
zo&7m@d47JJWd`L&wHHUu6_R;6-<!nuac=z1lI55uvx>@s73uKBGRddYH5<Xb9xJDf
zgoy2uaBH(Y!<~UMejoPHmTJ|7Y8ysE@>P8j^ZdS^2laWlEkO!>5W~lBvjrxAWTV$L
zVa7+IfBF9>S%-q!M9Q1)^N4@6Ny%YmNkiXhljGgguw0~-m9hvE!uS@-*F5mJOc^(f
zC4Ukit*gad?#*|;p{~V=iI6>Y@PC)nobz;Swie}VA%Jhv^SkLh;McnT@6ivN#|X2`
zp~4X_aMM;|!WP6m(Al;SK(@hk5k?NO;$C+S3$8>o&9G;je09=DE)#Pxez2&+N0R@k
zS0=jdaT%<&-1jKwqj1G9zr{6+=&Rd>v&Bn+U`WZDh43%c{4@55_t+Z4o<WWj28Mw5
zBd>r8gt~rfGGVj7OIr>)S~leKkA#ZW`4J^s;VZtowz4J_ErYtFHOc4<n?;Fh?SH9(
zUY4STRO6Of^Ewp`XP7VXLhshBjlY3!In^;g>*DW?L^N#cn<h#q%I<VtQ#UCF9yw)=
z&J-qv*YI*@__Eq?&e>H7^*g~S(m(ej86WK#V{oX+ED2j<Y+rT1I=`zx@{p`ObWaRH
zXN&}&=Qp+Fi?=L)WKCn2<{+TUa%qHIAoe0cLz{B13=HceE$kWJ<8cK3B6TF%_7po+
zM+UUniocFCWk?g(DEPf7?<xS)VOPg9DK3$oG51Mmm{~pMWoqct-kokAI$$@fm)IE^
zOt6^0OVuTG(rm0;<DB$T?=7(6&XFu|v5aSyb?Kj?EDm<+_c82`^Sf6T&B|tYtoX29
zn9038w<L3(FhZO+S{c{sT_=NGcVX>u+x%mqK<(>Rja*6qIqCaBnId_J_<jswH{!G5
za{Fs6MzyoSE$*t=>%RlxF5pE-I<fosJH8-XL;>`hfsIedMgFmcbZoqdFLZUCdWTXw
zRe6!Zy;T5LT%xzw=GON`o~<F^yT=p16N=X8bu43`!jRn2WMfS1;H%6%LX&?zG3$kT
zY9n{(c2+6Qz<2)~I0YfajrDoMVK;qsK7;aAWTMB`r>jzZ-*1Sbx?STgfxcnbP5eII
zZ~eo`rg|cr6*?nXlRcyO8(dJxNdbeWYL*A&6s1$r3Q<GIzRykV6_S2s?9=c%fPbgR
zDgh`M9H(4TTFcOM<OEfVG@Zg40l7lssM4gi0cLsYo7htmq>nVv5Zs#X283dL<<F3@
zJ(!oVZ`)8=eRb2g`ZyBoJ&tWXaQcg`+LSLM3o&CU(xOcyX&UVJ(O#k-3~SYmD3110
zOe#Jy)Bd5yB&bJp2xJuETv?{DbtI6+<q+zQEe<=F3=R%#oIw}%3+s8KZdCJS@wVZV
zHNel{aXW;*!frO@Qtbw+ovMv()4MXOY>5)6aAZ{z$?|*h*qxUVYl`i=ZtLyoYkn!g
zP~xL7G0OFO)R+4!;*fFF_*htRD1R^!hQa3AjC)x9(6PU~waIed7utI8W3kyx@7bb4
zSp<nYk%&<pRBEM>rr3~ItH;|=%(|?BIMI@Sm;3buux<Nk-Xz_yA2Ma@ud3Jy*L=5O
z981sT%`$3YG3}aH!uk$w759VwVN@==zH0Ppp;^nAvExX3E@8qfbtHY0tGsH*8ZWK;
z^>>qjmIZvYMrmTZ&nHbG#O){LVbs)*rzO)r5v5XM-U6<{aD(hpL!HKBu}`n;mOWiL
zR<s!RzeQc?#+H6t$V34Q+on}A)%qkqoNf4Q_luD+#u)hdlX2x|{+W8^U^A;|wJPw*
z!uqL@bo37!j=r##4*y^*xo0Km7_)v))nIIbta}_<2{G&FvQv(ptA(M}jDUVWLrd51
zuMnl_4}S<GL$`a7c)vhy5v`Z5tI_`*l0QcQsR5|rz$0k`YA~qf@me>f6x{^1{YBRr
zZRzA#uH^PV`m#5u$FPJX5@3c$%?4|X^-pIgRm0w#tf>)m-cgjD02at(5n|f&sOy`m
ztspR<;eWj+BYA}ABj{K`fDRyoceopkA=Fy@=f9fv<04nd5KKo~xGw2AVy(UYkYwa5
zeJbC7JxDh$f{?|98e3AnA@m=eIWjl{GWZJ;(2f)ly4droh!i{_-E&8b?-fE*m8+|W
zlD2cQ%2U~aJi?<3NZ{4-OP}x9KG8&i9pr)e2&WZC<T4iI)<(Yk(|&G)?h;XW(1(C)
zGg_v&PDrd9`D)j`iYB`>*Ov&-ARlhPD@Wqd@#*W1mFy=okKo#sOFI5rrjHd*@e)*?
zh}wR#l7DFO;E%Uc>E+^Z@iH(1U3PlpHk_3I|6KrAv%(#(M`;JKdPrjEZFLYE19t#<
zQEq1qAjR>Wsk;rH!wtmp1UHmb*#uFiEN#8b#BhRvbI2bdQbDKuhXc*lc#hVGiDhSa
z%vy4JnG8b1u+U7|+CQ8vMFdlJm^$@V^UPDNXSh+wJ%(eAy#A1$NsaI*l-bRlN`WzE
zqxi6AjpCb*&hOvj1vin1Ng45j>=;n`jtP5ltUn#$r@W=brDm|^d($5-yVVd<Fs#op
z`PM!m`Oea*<AA=XcJe~jJ$@a{W#ZWYo>XrSk6#&9{}GjyUG37C!AGyzP-V?}$afU0
zDQdvxDIkvmVy}jP*j-}gx1EGtuSsq<BO<nEcs%?3n{}AJm49fi|MD3-9_6QwT2M9e
znTz6cfs?uN2akiGB=wvgF>7prT7u)mRmypT*Y7M2zC9NwXf!x5E0C=VUL}=`PKMSd
z@c2zA_`09C+}fyF9>qU1%>UrK`uw-%i^7{Zf;mx<3ZwbA_4nPk6YtR*U;5~3V&ti&
z)2y<ooseCzGAoTgD{K7j{>v<hSN*|hfAo2g3fZ${LbZ`hy9%tr?)fG6vTtI}i~M1t
zxZCL|XXlut$C*cR_O+C-waw!-Uj6fQ_xTi-tyyO>QvE@$V1<ihz`I#qExgBCSMRUW
zMoeXiBj3K-?)63(>j&xoE%=`Z+bq!ri8Fw%zq+Ai;sJut{P~eX=uk-m(Qrbj>ec_q
ztAU@$OpqjjpivZid~hnjy~DW^)1%3j^Y~`f&rcI4h}cKY2FogF>({z{e{pfMmM+iR
zP#jA3fBda@q)`s!eC^f|Q6OB;(1d%}lTb;eNVPC?Q1|p>(+>f%>rL)OFrYxb{Q_*F
zp`Cgl6zgRH&b>1tBw<b}GVD^SrXCuu?<%A?X5=e+V4bXMOEgnrI5qS%f1tnL@rzET
zk&lzazts@z-ndA8O`~ln>VY-;Agx{>MlVyPu@c;9pxRcB2^pfQC&e1M1IG!AJARB2
zY;oLjZzuasoPxVnh6H7?WY|YH2pFDX;8yhT_|)3y;XmZx1d16$bgh1tO@#<>qSZVB
zswK6=$bw=6s={1-rgj7ZZ8wX19?E6vm8ss=m!ul*#Bw=gI*CX=1c<;sA=1-iYSJ5C
zGhc_9Y!}n^Yu&{H#x|J3;PNFNJ9yl?^}AW0N^_c*K44RG?FXHzd3~kJGET&**%d`u
zfl~s+VTW0T{O5Pw9KO^H84iKEAE1|9(GN$r+@Fuh2!FlzC_#7*Z&|gd{;j$IHQOLy
zAcd5o-R1+VdQSws(ny#;U1r?5RNdj|vYWOp8WIYct@QvqQU=*bj#|!wglH2!9*ra_
zg8s%Gn1o2{BZeH4SjCj4YISVN2?I}eTV|EkMn#JfrdS`{&p+KIipZw<KFC<-8JNii
z#2ep<L^;mB<B`tF%w!<+ZS9UzX|1=f0So)3M}8^|guJE=PyF2VD(ak$rDi~~lr5g^
zOX`Y;vi&>@f8vV?e3eh>a_3=6hK8&+*mj^!RQJT$0(kNd(ug1O1@Tl3;F+S`fwS+I
z3P!rQ57*7r+#1=IaPLHnMOuQG)gWLHlusHD>68)3f0;X|x*SdV^B(N#IQYru3M|Gj
zXe}F2#|N1I&e8Pn^F0OiLR({qsRM4}x(-(^yHCMnxUaa_fEI4e426LV)+d#P0B^~v
zFTr=Aq|Us|&mqydl$1vEZ64>3hnd~y3sqQ7)9A}vu`hEOV4NDk#REv?l{V8B@}n9R
zw9?sRTl6^t!`=G0NMFRTYVSSe_hh^7hK0?7HWqQ*g_F7)T6~3Q5{;_zZC*N}{;A!o
zZ^<OBcN$gFqUB(lXYbwfVKF?^M1DPM5Y-frYl2>2gwX@%=lGxgsv`LU^tPR-^Tod!
zU9V3tWE5l-<xb9EIhqvQKz?NTu3O)C-?KL@$RK~UO2{$gFz-UNv9<#-*mLIWkhtPA
zGmpT(1kkmOR?scFWHdGe9=Ry`koy?I7!>ZJsIlieS<*Nu`WOKucup~7g6h}PQY&HA
z<k}pUUQ{lk=NY)Ff`WqVwBh+HRZ+EgJwo9*|ES=Ugx5ifWv^>YWjImaK?EmXRkZgp
z0CM~8Lhvtwja80W55x@|Mmmz=wudZpV)uO6-~x(@9Yi^iYYZF>1o(;A;xN-pb9nX*
ztO-=P{wM|Thjw4qp2uY$K|ORi1Ku@Ur&*KVqA-Oh$9{%$_24N{F=+eMw8RMR5XsgM
zy&XT8+TrSX=@x4Qr|e_6{M-!rBZEKSkSReCXO>m2^HgT!;b4AT&j7Rl+@g$2E&?;)
zDTwMjkQQCjC~v1w#|3Tx8ShC=-)+T}RhD!j+GkF>7~)dPj;o=o(S=|rgeo4_I)RVG
zoYyHUss6o|vyTM*GJNX;tMQQ&<z|bxGB<#Y;;ldJmn_qLlA}6??7ZA~+Fm^4F5}m~
z^;yYz&yKGjR8zInNCb~a3mP^l89h$34c*2WV`JA70;;3*^C@cy5(iQ#w7DHlYS>}B
z^B#*^b<xD}g(3PsUGQG~=5M!@$%s=X+SaXk)30#Co_uC#mpIttn%s%~M~lrAj4qQ&
zz+6^CrdE`XiKmB8W}F#k)JkMRRRv#aSkTa7&bDD6kIeMc3;#up=q(KPtc?9gVvK$~
z#(RCdK;vpUj_e6(jk)X@nZUgIfjxbo{Ryv<+_oc5$M9G0s`&72rNR9j1Ybiz2AO!8
zTdCsGlPYv-$~D&%bVWq%gBjd#Sz)OV=N*ytOR~41Xrfd*Z4x>D^^<A2=Zbh_^;9i?
zvbDM-`Jg4%Z3hUh_xO_e9p-XT!yHLraFJnglCXget?bdFURJ$Djv4=JpEv%*bDyN)
z>|!W<cKs<D#6nL53(~jDd^+gTsn%sfgR_FF0U6`w>vEm14zS#>B-nnKF7|MHt)T9&
z{{AtzD<!ydLON{azR@>QVP>AC_jLS)c$SYKUh`h(yM#2J&I_Y?W|3iIjGism(|>_O
zgh@<`!fA4**Y>vq*wxoH7ud$ALWPUq-PdrN+((2CSqT>=KO_*PQxn-BBPVV`SN}0W
z++U^`cdd-_r55>Ympz)&Il0F}+3t+THppGK;$#Q@^?Au5M~>#8y*JCYki-9ib_QcW
zqs8fAk>T#Cz!izrzU;2M6*aPfB2hz?apNZ;SLLE}G&R9fx$IeTM)*o?Ni}HLWQ_Jr
zXCW8rfnDv3NEOj#kP&_Fv+lb_;~RSgVblPBO*Av=9_|LojrD00W7mSusyKHtp~%<*
zZR8ds<8PX|ny48qNy}T7Lc{p2(;ws`rkw#MF{iwoZDVL3z=i;86zk~kEXHYou3m(F
zsGsK|m-!X;g!us=+~rXFcz)^n<}!-gsLj(z1dE8X4VMUxyp!eH=V>C^);~je+Z_p|
zk$tPRzAxz!mhrXZ&0%XnjZx2)TlX8yy4%kY;&8}-KbENxD^S*`x9)81-+H1Yr$IJ$
z6#~<mdd!2KIouNdrh<RLn)*?_Svc=8@e)H@u#W@MqWP-d$qdyN{oF>mTo_}n=Oumy
zPdZ0am6khfF;@klB8=hO?+ugWv0A@w7U+5Ox6bAj+n!WeZXTTB^Wmi2(%dB3eCiVV
z9L>}z2;iCl-7lg2?5t}`^uk#WH+tWi!J|(OkiSDnvb32*Qzq+zAZ4uRhIboUbu3f<
z96d?muRzKl_=P#5I;R<hU5tc1J`Xqz$4#&`^_>abs)iTMjvW8$l{)sZEs9h{7O?`O
zEy=m@Z=CwKg$20;%KcrN+;)1_T^&3N((O;s{4HF)<0HYjM7`wp?CDx~6UN_IM4bEK
z6Q>p>z&eD1X5&@<je=FY1-}woPWFfDwH|W1i9&tJ+u?HC{B*lnJN9^YQA37=9s8u~
zG+<iI%+Jp{waMSkOUpC!eIgV+j3=0Pi)HwSh$s|*Dk6(A_Jxw0@8(Mmn<Xn<1ie0@
z-S*!9Hq$z>sXtt+Aqo6MtKwbu9)E9lu=LuV-aT{PV8)gyaaT+SG|0>PyT}uLV?F;N
zHN#nsRM7Hd$4;;EtzFWtnD%ds&xK}87gaj_@C$6);uV=6>}OY9EKa<v)v6Rwui#0s
z!cR7j<XV{Ymy6>*kvCCaVBs|IvrlQ0`|Zy|DRhUgw^cqLo$myl^2D@MY(#V9wS?<F
zwj;ac*;rq^k2_j)7f|8o(EFX^CN+5GXNLJoy0MR8nDQ7fRQl#Q0sDl;aC9}taDgt`
zd!oqExmwFPa@om}Wq-W-+tSlt&5D!}&Akyqzt(E~3FDty7L1KGK3?rOz68^52;lWU
z%`DauWhke^f!3G{m9R(C{1wgqKf*Q~ezwz|Q@P4FzEI)Yk0#Ad4#xarWgE9m?WAaj
zxS-T4CV&zcUZ3|jQJm&+na(fN2-yhs+CL*!)7(gjf}H`KOdQ7b)1UQ#Dtjb@s{Ezp
z#5@*Syt~wRPr!0hp$!p93W}yS-~}qp88?Ly5G~Hcl{yQ`3ZDBmxAZDo&tlic`U<@f
zBIiOuz!@S1f!mxm9XTTc5coTYJtVkJ8eG_N0dnXR|87th$AK<p&D62jZ_u^I2u%!l
z|D^DrWW><dPz)hsZ(y2mr4bcmbh2RKkj~)V_5+>bsDY~Rm<BvuXT8SO$0K|4apc_3
zp0BO|29iJ#=t~Mj3%{N3V59QU5~8{40Km?<VU;}Ib<X4%ebvSr#6Y%%--X|iR?cf{
z9U*$^Mq?#vTmrR~c;5qpL9-BD;f=vXI*b4M#vPQnTVZ6VyWAT0u@$h&Xj1->{Vzma
z%-M>)hjv$VlRpo`*;#2Zu`vI=Um3VX*Xh9pbq@5rdaK%ow9mI5B?P~6pW;bo3dxfW
z<%p|K=nRWbz6)Cn{0tHr@OsuQQR!c#?sWlp;LzePS#+5*7kDhbrX^1yNUF!+9|$Vt
zNY<F&Kff8gjvGm<WM(8f_G*|@!sAwN9k|t0CZ?^4Le3Q*=g=S=$e2DT&NonG53yx=
zW()LJ=XVvUk`U37Ia*dMc`_H9Pd20_<$Kc?o006aH8$dOGwHl_J36CK(;z~gv8ZL&
zSl0UPODer}^$_`eRZ=zSc_!MYG(25}Y191->f)_mhL=$bBhCxT$;pQ${l8SmC-ocL
zt!VBqZH~c~Lbd(zPtCf0Dy@1fg?pZ>Rn@8?H6u>cUkI0Z6q=E*T5*Kk2EG%X&>`RL
z`JGQWhx~9_e<C;A^DW-aXdR9#I1srq&)O*$lF~mT{V_-^)H8BKI!I4fo&~l4ad|MK
zW!Eau@U(SMGBOYZ{gPUQj^+C0>Av-3dpqVG`$HKv_b*nP84FgdkdpY0j}<6&D)Y&_
z72j18T9PdRCOR_nI3e%(^rkm{MJRv2rp$|0cTr@!yfes^?<pmpfC#x;(#(Rdi}i}A
zLjZN>&K@<Q!1bi>js3L|`~y*x#R-l_JbFLr%;RNTY>Y7Kcb>lg%EslJ)}}1Mect)+
zXgzp|WT9u=7EeVMgMA!9wyIRebzmIn9w}D?H8SclMM>96zZRTHc0#m&xUciZ8bWCi
zy^FH!gs~o6h<LHk31d^hDYwMw7FFwT;lY_dZZtL4`ll?Sb$?9&$_&DVJ;`#CAq4^=
zNqr`GCg=)vqazWez#{vvY0yToX`S?{!i~z};UsvkL;%wPv6Aq}`+q)d1)>tmSzj#H
z0Vahj)D3OpuMo5DjDFtXZ*^Qzk9C0N0+GU3U_#o&zfE-yNdwV2Xz`>>6P?N6G2jh3
z&376;)}h7K!LqOOF;&Vs=L(fNl26%lF?Y3ffs{~p;&7CgeGBc4nB0GIsL|?oitq_0
zUvaR2O<3gx1iEQZg08+FEqYqcmSlqDsI;gy=n>&bIq{1pQ&B34Z-{|vEc8mQj^6b&
zBVf-cLh69B`1ycB5)CDCURL(}t;B|dtpnRUQCm9izm14O4`))6n3^}#1L?yYx;rP+
znuZ92bT`}Y7{W(wU$3nF8#);0@4MC}F(EB-1_a;r#894;V3f>T!lJL*7tBTQQNupJ
zMWDPq=YjJAr_e=}PJ{mWV0X$a#js_Te`CWyf(;o5BY1W!vygP-rHsf7&J9XWXyIF-
z?=0%LqPY5W58n|{N6}Y}EI>X+7c@!aAW+qwJBKc*>{9W^Wu&1C^gwzQ^ul#E9<#Ze
z>+_zjAL?2=Z=|2CJSj>GHAfDqr9B_6XOsK)_zswV%J6l)F;>%A-uAg$w{767D$P;q
zCSZ+l|Dv3ODr|n6W_w<;L*aQ=>FRW)()deL(`$D>sZ^zAQ}~sSWbN?lg=I<Q3p%YF
zj}|vRIn9iBD8p<s?{o5{hw*po2k#q}cy}B>z^zMy;zd^3S6HKx+5?=2HAf2>46->m
zd>fQS$9Zc?Lza{p9>s~>EoJLH&ONQZQwZZwtS;lK5H9oTXN{?j>W0=Nu4avEI*a0_
z)qWt^Ct)qk{GH@LG(!|a?CLmO@t6d?JFHyovpp$IDIEF3+)6Yn+23d5@h8c{>6V3M
z`?;qo>G$Y@uCHiQamK?Ryp+j55rU*!uZE+pm-sy@lsliZnyECEDHru+uB>PhTIXqs
zdArzEItA}t?b)+ObU+LbtC;)6rnh>|29LK7EnHU&NVqO6Y(gB}cXZitC4#X#_K7K?
zTGY>vZol<zChaDENY;4Y4tp}G+H->Ms2-@&A_i-``^$0q6$Pj~it1J{miek;YBkG!
z-EM1t79NegRY_)<t~syp{Pp`Ub}y3?`E>A{x|BUi{5#!U=Yt&+kF79vZoe~n58Ly%
z_3Kq*ewKn_YG<t%r!`;Y@$2teh`CjrWdca8W2$pH(Ra6hMR5HU{XcMoN(jl~K>{eQ
z_fd8TkUtECIZXvDwb$*sjtK4wd>=iyz=^4v(zi$=js(d@6Bs1n`dWV*M}RaO0Q^W@
zVP!9($tJ};e0|d>j7O_b%_fMh#ZtU!c#2O!)S9nZx&~o<IA@0(Qz^T7u_(&7IJ0&>
z=3kVo*0zpAoOF)-si@sxR{@!OK(2kPefv}3eMxMKHYH4XeIJh%f2sET%+6M1%r{%q
zXXy}8dG2UEv5=!U_Pzfu$PD(dkpz(VjPeM)415=WLi)qN92Wacl}~O&V@Igk_b2i=
zrhF#`O{}PrIxjW+2iuwe$1xN9l>t3jE#DDQb}?I@xB=*Dq33=y`a`nQQ1EoEb4Wln
z;nISZtuJ*OO{x0=#)j#Uj#&G3yocYcKKHlk%Z6z-q$P%_oGpkrcalh2^Dnt4*YKX4
zgKlUuJxHu&mVGqG1j|aWUd5EoPN=W-xlvAZvPp1DF~B>mv0U!$?^)T}KLqm%&TBuL
zb$Cq|tkikzi4v8rC{;+^^ZQDVvq7bbi_E)Nw)B;)i;SgyRUO1j-$<}gF&phPc_-a{
z>|cF+@AkzvpvNcQm1X#i>aQ)vZ_U47d~|cO2y0|Ueszx2A(xW6bD&1JKlJWffy%GO
z-N`7k!(^@ld47p-a=iB1FLBZXw8Rtil6TjZ)cA!Zu?!YAaS2|Z`=d_aaU-^sTkvd4
z3N+iwq#dn0C&#QM5$Wu<x8ur#JK7T}7vJ768t2`()Cy$#d6h5mIcAcTs92MVxn_d0
z5xzQv-Osn4Y_c%7si3}%U94EDp=MUgcQ_2Y{%-cUB#|y%!CXv9eYq0B){3gQj$f@7
zRf~%JfL7X!D=>Yyr_Y{~j`3ztO8l!=Kn;^j%0JSKeuAccda1L!KP4r<uGG-;{C5e-
zV?Bb+>4|#Sy#1>!_fex#wA`2cy0uU8$IJ#$<cedSTX;}M0z5q4=@=~vkE=+CzsNIM
z(5bXd6ot2!oBL8M=Y6W7+u<O$>8(cp@DXLh+HdYKW<6HmH^dD4r2c7_<Kq3+O;DP=
z-EP4-16EN&%|j^PRN4K62F~b*m_2%)ostBvLfd##p?!R5mlcho+9kzv_k9}D<%)-A
zbxn!k0nPF1{V<VFE5s!5Pe~O@dqMw4?}#~}a8}g0r;mS{Usuo8^pG)v^A%&aMp=O@
zQFO@fmlfxYZY}RM7-o`1futO^QinXymAmk1i!xU8p|z+_E79rfd~TwPWOITnr1jMm
ziTsZ=S7C7c2gOID){KbS6C>e22%_dG$Sfg=qqOzwUCo(}-<zabQI>_Y;{x4S%Zi3}
zM{SQ0wcawEW8LP4B@)rSugcr&_ClI6Vh1RWVQ#}jA$xd5D$*C`bsAG>LTL9zmY94R
z2UOnC1&vqM4VqJS+*vnUAMi%r&*hAfV`AxAB3bFl%8+R&<p!7L#Clqhxrw7G(YB~-
zd^tC0LIvI|*C`?jINg)jJc6Ab(^<?Ezs=&?E^(G5TpmoxwP;sJAKx1lyVzyqh5PDH
zcQq9$6J3%fy_++<Kwa^eUvU~r{B$1gDVkZb{)zAzUP?-8Kph#SHuGjH{AIpVvl9<1
zBV(fB(-X1w&)XoUbl0&f?Qp&-xQ9@t>y6E=mNO2oCLW49x4mu=YyIvE*)aHr#%#JC
z{L^ESMT2P3{2((%V%Ju@*ZK>5@h7u3nkS5LZo}wD6I;2z6w?2E+%=y4{z@MGtnBsG
zk56XJBkomh$;B)5N{J3k(|%1kgg4$zH+L**zr;ACD<$gM9Tj!<s{6RM7wY)*CVc4h
z&p-A0@KR2LcHQFaGSlifBLBcY?h2Y(%Qd?9qAxD~3OdR|@1M0Q`GuYK{TKL=`W`~^
z5y_P8wOiV%UYa?<pqOuEX~xAB>TNIV<VU%Q&raRS1e`TRds`=}^xe&Go2$JMmbqNQ
z7z0^jg3aHCnd(DV*xj|N__LCt{RMb}kA3VF(A&55rVAF2_PUJ5bUv(_@ejw}7vCQy
zV{M1X?)Iq;s}(jS*f29DO=5qcyZJM%H)GG97`m!obO`aIwQ~OSE#e@u_u$L_BkLRk
z>)g6_y`wg1)YwU5+cp}fv2ELG8r!yQtFe>Dwr%S?`QG>JZ|`&d=110AbB;OYm;={6
z-%=ov_E{~LY7R!U?LSRu-0;3aSeiyeB7!_i%^TGlUUg5C5sFM}A>*Hy!>H6%J%8?+
zQfdXCuwK~<pMpQ<TNpk?%sC0aCeCYfd43)Nzt27Vv8W&20&zTmsaz#(!>iIrbFY7B
z%Aue<Fx-GgHQ~jDScX*Uq1Z1cm+tZuo-b2_4cJ}Hs1P!@iViv^#1Xq(qDUGc(u~cr
zRu+uUaDipt)kQVVtc%q*fHo%(P$vbCBqofx#b9Cs)Eiu#a9<~!G3}7-<R#@MBFfEd
zE9cu-_HoyzF&womRmq#BZi>OGK%o#Bw=j$DrOnh@9Ht7j({rD2+N{l84{n7ST<1Qq
z?=P&Z+VK#Jt>&Gu<omY7oKe|n$qZ8!Dd_Ql-QQpn>r6QnG`C4T+*8f(tR|?Jw#RUU
zGHD#;AXx59O+Rr=#NG31Q&X}a4@2+{$<5z0sh012$#7vxuF&G-tb@%+DwLS;|C61}
zu|0@PEOCxCuSFxLhX9|b!|8H#@`pK<U$-2XJrd`12p7mfmpOv0t%0|CG~;JQE(a5R
zjtqVJ{yZ1Ha~mvS_!e?Hm8~eq{@j#N5!RYvqRr`3r&uxKU+k+U{M2fu=wZKE;ReMz
z%Ox+04WKS>&50|$niR%ck0sb8kV+$Ynqsfb@>~Z|o;z0at>YqAvz&1v9}c{}B2p~0
zsWw<pspYS6V0IkI{?#@()w)z1%zg38dU*bk3ecAog6d&}H=*FRPZ@cCy*6i8X{RkD
z>oNuN|H>tYmvV6!HhXo`=&D;;DAID*q1h}*!=2w=wjM{j-&Q9HXIZi70)vm=J$S?G
z8nXM8qh~rvb%PxLt7wb!_CL*!ABc_r_E#Z*c$l^kk%LxcqGO_CqvPUl!wTj(_1$Js
z>bocJBYk?B6yzM*F)cN?3Lr2o>c<FRk7;Zddvg$vDAVec@+yxc!^>I&`&F(p7=vcC
zS>hoI{&IdCKGkSD$e8a`Ph08eptOoI;+TTmtQgv_OHfw1w*}xt!FD!)*JA#?H+5tu
zi*|!qP+ahcu|D42pYk#GM^EfseG6gY?-1%;#4Q106M{iF0quLIPT*ft16sT~hH2xY
z{g#J}Zc#BEZY7(KPUG$N<x5vma*v(F)4eMVkhQ)yr`H%BKOrt=2_3%F_e~W_G!PPK
zXfHbt5Ashg`<_f0Vxl%;y_KaGUC?_B;y_{lxhU&Oxo8tjEV#N+hu*#69goEts7!bt
zTQnf{mQ8fZa#*}VZZkX{na2xDLv1DhYQ?h$mskWRQw>{9LY-<zF}Wmh*6d>aXf}z;
zejjmob?HDHL{<nPNF-;HIU%S0g-a-{{e+jUK^P;Z4UnBv_%e$$)g0$P|6Nki`~-Zv
z4=J2Ex)N7%KjM&J-5l{@nc1OM5J{8ni+_*ML}oF2tJq)O_oWg$bhmRm9T2EohPF#n
zrzuFKDyUS8W%s^_qGn3EU(75t&H_r#T!lkS6Ys{lGYXmKeXWQ73;FtQaQd52wiVLl
zs@UnWT<O^5yN-#D%}hQ*Ajq7!@+X;ag3Akp&vmm+)5xx}?HLgk@n>AZX{13B6@G4T
zunVQ}T)eu$H?uEf`6+`~U0}P=%u8^~jrrFBNwq{WY9+>@#HmjHh?fEQ79tw01vE0S
zX1}`4FpC1kNL>LH44h`CJsj=f&za?r==;I4Y_3ngboj#Z9zaSF15+wkghY!ll>O>O
zI4%BCdsK2>n<Q@O)V24mGl{{!PoMlQeKx7G_8_6XD3eK{oMq0~C|43TPr4_S-mdiN
zd{rcai&xa`0O^?IP1hQuBzRl3^(+3b(tfK#p_+Jb0d(BgTF0ljcKf+QY>tCrj+%g<
zYR-aM(Uk1s!8Mt9s{&RS%Su+<$~~;6qQ?s^T@!cg8RhI*Yh`@iF<PAD%fEPf-y?=u
zw4(ysTw=CojnAmPA6#i&1!GUOy(SVFj$3b$9p))o6cw8yqSjW`$Qf10$n)LKODHER
znrXX|_YdVETOBQ^Uft{C64$gzo=w6ER^1n3I?YD2({G=&XttG+#R$`(6^y-DC{3A5
zoEw<rX~!_28^maDlYU}L)v-~bOauRyLlNd!=y><}za7l~dx(4ed{Wf3Vh}(WsXmG^
zJS>X2tTLGo0WhnL;gT60YHO;R-(Z=IAh+XeNUjL@!Kt3SLN(rp+z$B#E*Z9`7l+l_
zWg1Cpob)?93PhZoR5j;KK-Y6HY@?tcn^<Q(*!H*Z?c}swy`MOmU(RTEQ-xc(R1u5Z
z+Rv7vF;i$w12ONpxWg}~9<YAF;)-!H&BQv^D_!4(&8<kmM{1K?Ct5{^N*@uzYctEX
zkl8XA?wJuMh#RIRU`$dezFl1r5X-PM6^^Vvr-Do=*+0B&@4D7VK|yBhG{}@Px$cy!
zoc_vK_+;u^s0H#`%Bj@i6+k60fKvi5R2IiA`-%JdHrV@sJp9KFCG4b|(+lW5a<ga!
zE>U9o)H1hJ1eXF8Wd&(oRgjbT%59JF6ISul`t1*k*B+rLW`RD65LZ$Rs9Dm&<Q3aP
zFS|Lp^xgBLSNk5nT7zIWD0<ROuhIqOW2GmT6;zBL3RO~Ae$C{m2NVrx*31|Eu~w}-
z#MQ1&0aTM3GL0TB{4K@vv4l}KT;@ONYv#_TupLFWZu88ce3{<W>}_F>@}JB(Yf&xZ
z=fqRCeG}N^3{eCF^YxZmZYu_UkxG-U*Z$Wf|0gmyfD|GTYV_6-r9F*2`wsb#Ci9(V
znV*oRU2YY_b|-&mK99^LB8up-`d(>xlO|94^jjZ{KDT_&C1jVIUN-|qP6h;*6V7DG
z+2sxx(|6YapfmL^jjDW=C+4xZ7Y4!!l(Or_kZ?A!qNwy{d2Z5_!AzPp<pQsIkqWmL
z&gR=N-0>0U38?Frl56~#ZMfuTxfI$H-^PwaV_p?<tQnyA2*dFCFfE>dm5X7(wFt&E
zM@DJ7S)~pr61LkXz>Lk3jucBi(8#oyJQxKQm`-V+6&JY0+`9JJ4X<hsRaK~-ky~!>
zTgSp3`Lmk88L4K=#2ps}52BgqU8|fj$>StVu&-OkhMuR9m@~^ei=Kl0)N4O-*?gCZ
z9eX5Z5YKcIF-a&P?`*X$h*99&rE4d-kZ#$%-Mv~WJSJ+CDxgxU?Qxw>`{S5jIxi9%
ziI-3>f97G;0d06(0=tmpI*gtDv-(E6_=$CvXj5s;SGJE;XK$$v@2`f4OH8bV%J8<=
zO9s^zyHtnctmKDMGMxa2qdz~MZif@bT-Eaw8Ho=uXU5*y4|rwfF@*50wmy#-p2Ki4
z^sHnfj-JJ}-K-(1*0-yuHA0S2isDyZ&8n7KHx{H=wDA>`Q$@jv^AN|m|7dqQk|*v$
zl;o_3cjdvD%bt92{`*$`Fo9q}A8)HGtk~11+atNksWw;p4~@{1g>3lj%Pj<1aIVL=
zypi89p+e}kU&X0sH#FPKS|#6&+C463-Bhz=0*>-Cq|!zd^xfM7Fg@vw)7`)=6W~#;
zVlz^>QeL<rTdd{I6MS(nzqlH$y$Q8{))3P=@E};<rr#1+2(-#r5rUsFIC?*t9bT23
zTm`xnGWo9#Eq)Lfmj@UKa6$M-u2U{{%gv#48Op;3{$zS|h<5u^bYqh9s}NVEfj-VX
z?3Ic*+b$lm4-6_MyD~wS+^ezSPkL^9fjD2U8ELH~yeD7E=rqR;lix2o)hyR!g>aiS
zym9Ku6=Roo!-LOHC;0{oDdoa7I)c7D+_b`Q=IJyN3Ii}WpLdysTgw!FK;vK@(KkRk
zZcp+mw78x7;x`9a;9x;916*;N&)MeRUg*zo2pc%+Oi2XY>Z2QtD&kO2oy5#U8nk%7
z3Ph{*t~&TbbRnM?NT<-9Skux*QE4JECR)Yb(afjqp-f*BGTT1rBP9$=#3~Km5HB0h
zwioh~5tt}tJ-5$d!A^Pf9dSLHNd)guxF)krrQzAN?1eT^Mz=`ypUv}g)X7y+-IN~@
zH7~u5{qy?e8^hP>l$e^H-Yv?Fhyg^LC5^^Xb~)G{HvD>e<Kj!3Gl0HgC=h@da%i#W
zpA1O|=hqY*BU77bnaJ<#LHt})_(`Nwf~dlUAG2&hh3xaouK8iPh^O;-@uaa4F)rnC
zoW~MrV=%7yw^}r-cq20#v?-hJyepBkF_3;&B_q{Q#qOJTTZhpvV)M7zZ9arrKXbRR
z%|od(NXkfEKpJ+S1y2}%!HxvmTaZTvM#WD?rpS{k^hd<c!x>iX`Z=-%FwJ$CWU@$M
z+Dh`;V0RtEPf<{E&*E&W*%}TX$-%^o+&IP$uWe28s&o|G<7cnXInXuil}>+tzZ92{
zh)4LmGswH1R(S8rh2s&62da_|M^Thou$|mWupYfRrkT+!3(-Hglv=pi<4?><<Pw26
z>Yd@{j=Ml3xKw9Cmg&(vOij~z&K?GILY}r#%w>_uDAA6@BJA&SSe$l-I+g~V|9iD9
zz#lxj!D=ho@6<kEZbaVCPNtJJ`8R+NuwqH@dUCb<z3~wkC@@91r7E6>5GWU3?GYC6
zee)EVD3PvvFx)Q8&6)3$j*GrRstI#)rZUCzy~!5h`<vks3A=Npg=%fHY=f!bo~`cg
zv4y%Y;W)-Vhjf@c?X0NJZO>B($yfU5ZhT-|xHbAV7bnz$GQ)}B_>XaZ1jJ2PS>U+u
zGz)g)uFFmm!SQ>R0Iau-L~8&l+jdql$ZHElY9cwrGK*M`h^69oOS8}Kn(CI4Y%uoL
zpWW<YUH#&cA{pBp>v`<WPmW_w_HReV$Ge5>su2nr)xzxyHyNU@Ehwk*f@i=e#pe^8
zD{02CV<oip)PLPB3y|o|GgZ2R7;H8=CqLgtaYOkA6?GzUyzJmt!X~p20`Y$si=?Tk
zaWdTn+q8()A`n^xGJq!RlVirf&ZrQ&-ajYaMNb3$hNmfnpTTHr%1s>2-&8CJX)-$K
z4B9{37Yd;cFPEF6HEkjG41YrT^0$2d<v5tF;{vM#te{pgyCJ4X#|Vj)Xb!h^0<?MJ
z0f*@I^eMp_W@9uN>9S|{zdVoRACNiez(=&2Pgm(+^s!>s-9DPS|8LYIh6-+zMK(*6
zdMA$B-i8}2`a4!8IMXi)Gs!nvjF6_lZa3|l^>FqRcrod^CQas1?2rouy37rMhwy0&
z2LrF)7dJwhpn&yz$QcsQD5jSKlNmZeX|}dZs>w7PPUoV1=@@94BcjQd=UUbwaH$SB
zjN6%C#0;LQ9UM61PztPVi&kX5t1<qra1wM7%PpkcPVysoA8i(Yf5opTAQmd!=O-xj
zQM^r}1RFphKI87mtXDTLI@WkC1(bb<*52?$@OpLocxXSC3@pO*PDBHd)~RwS645vN
z^Id{;d+JbvlsS{>T?EIiR|!>?jl@|->3cEO^MU~T4#0mkckQUNirYd=R&HoNiibzo
z)A{6-?#7B%b@QK&kH5dl20~HgN7SD&s^Q77<r8;-Vpod2OfT5)IB3J3gDA-5N?4IW
zWm~xy2?_GH?H?$;e@DGjvi&?#=bvg|{KkQU|0Xm<eZV7azlt+WJWX6p>Oo7dMi~%-
z@iB%K(3XLiD`hv4GC%%LZwEje7ZEOY<YOjeJz2KCrMd{Q>$<q05J|ToB(j}&PPlV`
z^0Mpsh#mLGG@9lkqwSK`-^Y$A`mQw&Iwy*CW0(DgIl&GnNc;D;e=PA4K5%GDL}Qik
z!kc=#v98<z#)6)4*L{aEfljZhbmH9AZ`x18`|DP5>B~wK2&df66lw@M;7Y*q=p3|X
zPza1={y!nKn9;{|I$A4VkjlDM!t)&%D&=tMC=ksm_{wH`qJvQ*gQeQ4)SlU39ruac
zbjd;Q72n*<f_XV&tr|IX>m|HKq#Cp~wjWovR`X%*AItt9gMi`#Y!m9Qb`DLz=Cp3w
z5YT9U%u?(>fUR*w%Ti;Tm)R|95Dhu-r6ro3sHG-I?KFf2jLctO<vSezkmAvjrR_d9
zvryYI6FXOh{ejIwte!BO_V@@_J4aoM6QFb0_wQ8)o_N|H>&~6bZr!VBl+%Wwj8Rvj
zs06zeB^Rc9qCMo}n;J<(K=I>;K;F=S?(?A3{y8b0XQPsM1C2dAv6uqIxmo$6TB&4=
zlkI|}QkWHoH7^KbFI`zZpuP0>@%yi-`>KifEPSQ~I75tl0wOB{&Aa}|40@Rs-%Zyv
zMK|6lJIpcL5gMUGmlh@D><Fcbk3Vrrq)o_!mF+#Xfh&4?7A3Q>bYw*O1?y-f;q8%Q
z^j@K8y6x#+b%mkSSnS#z#v~A9NO4eX{>Y&q3CsxY0i=Z$zcdC^VKgXXfl$iQpCA^d
zo-h7?3*ZTCr-CQUaUBGs5U||PP5^a7prB<Sc;|~8I45yuk;2|CN(faNeqR;Kt;&Y+
z7e^SWTdOA3Oq+z_3=SL9Th(tFNtqeK7#v}LbezVtS0kwonqA+Sw+%X>GZK}K69<8=
z(V@{fgC*j=mZm#dP;oSMzZoX-L3Pr{kfH8!*q!*!2>a}iUsOxHI7eP{9+Cgw!leiT
zM!_6tm|nLN-eji(oPG8Ug?EJ{Oxc($`(qIXk<?LfnaP34dF_G#`uL(J*lEnc{+|km
z>s2!d=EZI~ZxIwbm*AJZTo}bj8J$P8{uK3p1%tYF02IQjxpL8w1qrQ0!Ov82-Ej&-
zCNp{yL|MaUs{Sdz+8w3^9zDjLT#xYv*u#EP$n_$5F7t8-=+-NyT;-L=4PFx0oad!~
ztxWpIh2ahkib<W@$<RK-37*6f_7e(koPu#HMciyeYb{8gu)?5+>(ONo)wnktW0BWF
zXtwpcw4Oh7=!|^dDhHyJsRBGvMXm!epe{O*IP!K^zvBK~`*4az{YxnDC{t{oL(){Y
zK6Q&66q6Z$rw4RqkRKlv-{W;ZH~jgdd!{*NXk1)T%1r_;bH4dx(As^P8t0Ni8e3=;
zXMDybF#d4Z1*Oh*Xa6n6rsg~qXmPH+a^fQHgQ!><$Xo8m{2U?@(Oh>ENRyrxNhO~Q
zf~f?|Q%)B%oIch3Yfp}#L5ED^XV7jCiwa+TaO@X4u0~Inhr^m4e~`HsAr$IT;uBhR
zc)awH829pX;Rh&!79NYvJJuj#Dnhtzp_%W-o2nb$?8Ms*k$Vy`GB$@FlteIM93@x7
z4#jB8678=u$pyfskot4Fi-~L}xI==V5M+F5u#Acc2at}3Si8~7A_2jqJw+A!BWLjd
zuR75m8jXTJm1eco*?Bojy1{z)SZ$)@&iIK&yF){%Qj1!%)hQm(>ulY#keqX3BA~7b
ziw`$8_f-C6$z}P-@bBM72)cG%%rmZ*Jl=)Z9ocaqmy8gI)|Fx0i&BE1++yl0nNc9J
zLQS~amI>=j20>pkAb+Y@2?0)(Ii9ntzT5rWkzgeyZ@9Uq*L+;2#ZYB!e(&-u<j+0Z
zZ4+BEwBh*p|1!qIP8yCB1LvWnn{StEt(^Y|>YM%fCT!1D9{m#*dZ!k{_3FSfaHzQu
z&c4rY;Y6#`DH0h4W$1%~3}7LP>MRz20Ff~<p!OX9L1dpQl*z0rL=qYvj;AyPh9#$p
z<Os>h$f8b8EC2$l$&Z-VfWsW&(495+^Z33{Gzl}alD4a1%BzoYkq;=msi~<ht?IB$
zGJ|k~)hfjy21bqNG7Av;WI3FO{d|d<w>2~-z_kd^DzRAZX#0oR<N7}N`1}vu30Y9o
zt7r*ccQ8Q#))f-NcD?wt!5G~(rC9k(Lq$SPc&NefX!+HHW$w<2Je*??1`GYhb>O`X
zc$kXY5ip|gm=sa7@oZ^lHeqzNaV)X(?r4p#nq2JWxyjxb2&M?#1HabVq?KyG8W&sX
zuKa0dJl2R9BBns5OJY((mQ&*Py9;juluz9s`uL-yFO5{Or&tNYXvb5#>)xCq(rj*7
z>gHdqf{DJJb-sJK0#N&10CyATbh<PIXc*wo+XJ%%BQwSF`GC6R8D}8zWQii>BcMjL
zdsJHKL}_rg(ky*%(0ZQ2BauL5tEKTl-oOT0ry?v;*lcsKofdSbfN5w9W-|pBi&6gT
zy)m5R{taMY0hb+^pIK!qeL|&e2u^!1i5Q!|{#X114;UReZd^=T*lUx>SmOFq&S=m`
zlu>!>+d+7fo+s1eo}+|ee~y4FK#||gtM*&9fS~_=b$ES5{1-?}USW|hyXQEE>sGp<
z-G2V_Ftc|iF>7XHW82M{p6Lpilr-M-DzSWFTH+G_H<xtjjHh&Luj$kn#X7;e^C|ux
zDGsr9qaygAC0g$M@vfZYmkJtHn&OQ#(ceRr2Km!d#I5c<4R?H`uR`_hi7CPKC~Uv?
zC+k9{0AfAjN=gZFadB+tS%8$|$OoMP(4vdt_xn^Fksp!QWq)v8oH^nv2!{-r4yX||
z+fR1Sni=2r&X<%|Q>!<yF_Z(Whs}__zCH%G8<WGy+`x}PBXO64rfNX4=#a(z?xZR(
zu^z=L3z73uFBC4p`U|%tT1$?%me>xv^PkTDnPvYw%GCq+(2FI?NOMu#7A<=H=I`-%
zuo=5^j6;64oAdai%{BuWZLd|-HE;>~)h4|0bcaIuE?51Ulb)o%A02~I|E^cwZcM4h
ztcT*KEXCZw%Bx^|+Bk1PDw9GFZ>LKkGFsxWsebE-eC+)2eQ9H}os%W;vCC1dR<^tH
zU=gm9j&<_$#)RQ0PrAX>P5zH2s|e&0Wx^>wZfrqamot5Cx1Lz2)xL0ldZ{Om;xwMB
zTCT?dd4uya%bjjx)Di2mbo`O|12;-{0v(J_+5PR4x$;!Pj01E#DEu@yeDJYe$O8XR
z)D}UP@5H6(u7VFbQ7!<DxC!8FG+zh|He{7)wkm0PJsM&F=u!igQHR2^a-C+E`|Yl|
zGv<vE%7@i)b{<hqv`V;+w11sip^s==C`a4RGtevVOc$Uu6`_})m!Vg@unm}$M)=D5
z$#12K{{VL?afZ6uoI#mtRt&f~pZyUM=U`SO)4r@FOSPzKdbO<2G2~ZCWwZO@W=A)(
zl)5y1meO}zDB9R?HJIr|;{Co#_h@j2s@mjIfov&@*#1psBrid8c~sj3&RnY4j2U|>
z($R1HU>qHNgmm2^`neLD1pfVy=`em;8ntXNmhm%dBFJj$APT$(wS1@Ac0VMEh>(Ir
zytqQc+ZSkP9E@v#xISE>SgKKkGnT{}v1XVa;o$`+Nov3YFgvT%viXp3SYnLn-tS1e
zfWQS(jCZ5yv<QpU%E8V4*zi{Iudh~#C0gcAD>!94sk7yf>Vz)<jgbV2L^2A6Qt4Oq
zr)5Cwekc%$Hx7Wp2?1Bh{<!LLyuuLLzk*}g{g2!uz+5?X4C=};O2dR|4Qtwbe-ol<
zzn4Fpa;k7R&&sA+#9@x}e>V`ES;)=P>|2sQd9QWG^|3`Ze)e{lN_Vris?o4LU-0Q#
zC`iJMiTd>yJ~*Plx36MiV)(x%X1oy}4zfRW{NfW}oXz%wPSQ)=TC-HOI%;oxe;zf>
zt1YXDJ#%mym^)oPJ2@G(zISrUa9?+B(X4Uz8QN&mE7kjCdz=5cN&<A%$Y?$l<fv~S
zC-PuWcG?K5r*JuN8V4-nufFeEq{O?-E<5F~59^sy7G||&3Fp6_w~Gg0d$2P1#IUQW
zpnj#7%qt`1uetYJce|r?PAI=#5RGQAfWDt!8me4%;f^LnMJ|9hdkg1%Tfm@+zZiH<
z+<&EjbYw;;qMZ<td*k;Y7g%bP5$}=k)*?H_H2rqArW_XSM?i060-lD?-*kRN-gI*2
z*ksVri_2+gkbCfl)yf5#j?9WKVA?QI)5yq|b3e^q@`W@6J21chTufLPB7s&r!rJp#
zorDMR`XTqhV7Wi*04C4tc2;@2qV<fycM1<c6P>KJW7Ub9k#GIE3N~G+(ls(2p~q{v
z(7QK}Eazrf{VUVs>if%?b0_0Z^wl?@u@!@H`5XLD)-EYE*ATYnY>A>#0gt*DT1lR2
zybmG}%JM;xfe>#Xfto$;Af2?k|4Hy`n#gSVt9Bqy=h4_*)<p-*EQG;8M}Zs+LJjc(
zal<6XR%o(S@z~(-?5WDCjPQ7vqxQ@KY`L4o)~BRfo;2F5K?L;d_+=LUeygt7Rt)dN
ziF_+7*&CTRiA?Bqi!;~eJ_aUEd5)z;kQp^QREzMpJKE>zmsD;`<hSz)w9Q3M89}df
zu#q_n;ma=AqIjKEWT%yT+D7xF^E2HucNE`+e|@aL=8%^DB7|ZPBz*tS08GC`sa+I0
zRtTCk52p4xMc3BA{D`BnaRp~;XJgj%&)|6;eJrUAE{uKecMmS0WAXtYo&<fUksBGY
z;ilGh(W_<~0Aelui2FCwGOTCjPVGZGq_ky7%OZ&_A5?Ql89>*z=XPE@5`dK<ekh6}
z{P_fnX>e7&<Y2z-MX6LqS6;P+G;(NmM6nDsfcoWVX=&l&dfv#j0l4Ia7Q<3@s$(YG
zEg?d<43Dd!Bp%Ne)%A2Y$9kL;?T?Mf0+0LHm9=kKTd}NUPr4X^@LM9EcD_ZxZ@=iP
zGn4y9KwhYA@=;S(g3@}W1fFN4tkppe#J5EpVfK;2Jq*A__7xMzwqKlO^m|z8mY9-y
zPE&%rL_7@G^VvTBw2$l?h<y7B|7wTp6gRuoBg2YMYL8<E?VMdvBR2CjgcNz|Sv-j2
zhm9VN3fdgG@i~XQ=v=&`(dyzS$~bis2!9lGWUQZY4|I~)m=TKFhWG+TDs8XB#pv8b
zplIvOsL5fRb7X^yuWii^>%2HRbXICyINmRp508qx)`&XjE*`+l`(Bh^pWR$u|8jYK
zPzs&yPt~!S7v@R-iwMrM->v5zq1E~W;uei+^0V23fhoIT7>p@pp-n$gMe2^WBwdgT
zl3FOP9Q;qC@C8Td(PNK*Su5s&oUem%iJ8U;TwbQ8#noXsHvRVcebll1+W)hb|F!Ua
z@R`%`2^m?L5@Wms?$5TQxSVj69k0Yz9%q!uR*S#L8R|WL2z>e3jksweJgl&BH*eXp
zv)CnM9n3klyX>nk=t+H4BK}7<IdJ90qHJC6S5qB2V-V+k9R_*|rgc=66a~la&JnuH
z^U`_Gdcw)?d{S5ekI~Ju6yz)yP1gcwX}n<Q_w~di-cn@303nR(>4fcMnRWL}pTdLg
zb8c#!2_8etHPsG;$o39gyo!a0gBwXn>F<0l4xk%hzJeN8qugPn^5EG<{5u!d;`cju
z^2d*3&4q))o2mCDM+|ZlA#!ptk+6%8WJ5*00EqSS0Z+O+2Z>boAxg*|T^)Xc#n3<*
zGddUpb?kc=R%s30`$1QM5igrLQ-7ZD_Rr3Wo>gA#aQ%XUI_*^}E5r7M0zlJic+}_~
zSYdo7raIFC&JZm3`!i-#ITjVxZ9IBUVB*qfXz`K@X=DeFI>;m0Vj11|@a_4WwZhSU
zZ<JoKW_@#bSp0fX4Pg=Eje(Kz5(rZ_0C^l<$v8bD%7+C8Ns#ikg>7el>f~w<n(~E4
z`+e!rOx|_uO&|cH8~H#LQgN0DQqE2({$~sS=WJYHfS_c01azFkov?2}<?(B*Bg_y&
zd;w8oNoQh&vL_)<C0XpkN_@#s86nNp40`*DblB^sEG?w)t6kBFNDcf8`MXIVH!t=e
zqul7@5CrK@ft#aBr_FG9*3C^6ci*yCG3iJlHxDK~eW5HTk&sN4U!zxth_=E)U%!ZO
z6-yRSA&9MHM*LY=ZElJb^&&Kw&FyHwETTs1Duxm6gBT<u<ChMAl=UO9*==)3dwmX0
z;`T(2LCV2`-{lFh{H%P%U0Oxjh&dqro#C+nH$G?}!HD2mmVD}yFb2Yn3PRf3>#>-|
z>z_{=E=Ab;w3xbAZr6QZgz!9XqRE{A6{SFw%*@QlL~SpO53wbL{HEyJ!recBaznUk
ztr3P?;IV1~iU!y6h-HUZ1C0Z`$8mY1&2^^LC*wB#n$xv*_r^zy^3)SYC)uqp3^?|q
z^hTx-_+C}dOIK0>N?H~j6;+1x2{`37gNdjri5hmnww7&xB3QTt(ekEhp%PqW=|2yY
zGx$l+@Hs^R$`uXBGoLwLg*r}ec^v;r_N!Pl|H_3IUC=cRKDFhyuaKy|rf+2;_xPq&
zvuI?D;3J`hyGV_4L|J`ENbAclM*bZ)7`_Sjv>}|v8BDN+8Eg>qMh2Uu;TSIavFR@E
zulQw;J71#^!|;a?#6n<1EewzaddP%=^VOpqT=iB>si;l62ub7KlSbR+r0oPf)7vUz
zL;3YQnP+mcE50bk?~+ka3R|f{!0iM*#Aya*%j?;Uk7ldfTr)bHUH@Me0DTgVE1L+c
zO#JUn0=2~&1Jh$mCAYSj;ggX3{MmvC4IG*?CsL{K0a0zE?VpT0Zsu*M(?s7Md99Ls
z%iTlVPM>l^F{sVRxKxix)Mv}pS1$v`0g<q2RCA2D^mN#knn~f&UAyEHC7A4dWMf0l
z$DLS9dLGRmeEKQe#to`VZZ)ewfvC*3JeLwTHr>){AOdI4UN2|aZp_2PBl{RvpH>e0
zNX+Nxi@G|k`xsq6Y9DQgM;Y1#g1$hy`&n}V2}`E&>P`FK_jwlhOR`Uh%$tXkTej7m
z?`(){AnexN@HeX-t+raT&ueRcct6>;`-p|Y*npP9p&M46n?4e8#6}aXfW|$?ev#?u
zq~ki*Mg}9l3&Y}Gcu9BO!#2Pj6%{dJnX}~fnZ>%-H#fppvRZ{dLAfu1EcT935fEmJ
z-hmNOsp?UwX2X=RXh#+U$*QTqZDuCm8wv~hdQ22F_iIx(iJJ?NR7$e{<hvlf<I2~(
z!u)TRHb23)oS>!i1TSOy7TxU9jcY)S_`k=i;-H<PT0Q8lg(0t8MV3F@4c$UIPF{%*
z$KoXICy`r8a8J~<-LB2W+XkYG2Cc)J*Vh^hA+r5K(kE~wY00OjR;l{ElTRFnV)Zfg
z4qPPtK{DlD^IXtJe)kQYb4S6B`8p!HmVknk3lL$2D<-0NlU2!GWIr(K^+L)Ewua}-
z0BHuk%W}pc+*U>lORWU+36RIPQ)_R*y){WHtv2+u^hm4d2A=d{zdgDC22vkBs`IPt
z<&gHc5Z-(b&%qUbA{YT<5Eii?+au@s=%N>I(6PRTThm_zI+dO*!Si0ZcYmn~Gpq76
z-;zw5O}v2o%GPG}-|x*}{lb(pjgA1Qg&;mO>YNG9%*^3;R>3^Op=6ci!35fcM%z}Y
zB;=FP6!WPH*Mlb{G)WP#&_jd{ZaD;eV_5M|I<SEe4~bd)s&9bdH+UchgM}RVt4XX`
zIixR6l;K|XQ4%2()W|(}+^epRBS{^9zy@C^!gSgu*_EDoJQ#NQnoeRxV38(a$UN>@
zHF|XKe)z|5!JyrW?Z76WoyUxhM^5mQ0_WneFmnqS?dk#^E@7=Hg&Wn@>pZc>D^0dW
zrpDd+9)$hMHPyi$<UEV#<*<9{v9bC`;+w&i&4&f47r;tlrIk-G#>dBR0$5jgug4K>
z0vei%V$YTR>_FUZL*5RUp`pOmQ@6AAn~qeA=FVNCZPVA2;mpl2#&J4=Z;h(9Rfj51
zBL4`SUVQ+wf88LuH9~nl{#L=7Y_jP4{dW*Jay$`6_;11ASq6NU!C;;RBnbRU3+Ow@
zoF_hW=@QznzCMLy`V;gMc)~df2oVqa$>{T?zTS3LkCE?ETaUZET4*)Am4OX0h5oTa
z>q`9z$F5Z5ED{>`Q;2CIeAi{yrnRw`XGa!<JnTu)i&)2oJT8y^r^cxhyzd@_JgY!j
z>H!GJLmiLjHUUYy{!rR>lV9Y{+$$C=77IS-9{ZdHBo6Jl%wNm>85kIBRvqV)Z*xV$
z86>p5v`?z1r>8FghVN$E^?1+od8c)6y(pgN%&5tIFOm#8B^hOzqN3s^phd{yxL#uz
z8tmK$Z-EC<oT%jn1$;bExw@yvQ*%B1vYen7Q{I{$Zhe2Yy5~`6hpw6bpJ4OjcJ=iV
z{3eF$ahmJgSzM=7cMs(P7qrBY()iid5=V5N()SFpN{QZyZeJBnm{|Xopev8PyYm_^
z)wX0*!D*#VK_PTmF(?SF{1ASx6(7Pi1UF;%$o(B(=UiVQ$pyB~38zk3{r=6WZ!n0@
z<B-2*P7=?<=`Wf>Fw#+M1FAH#zp4)qZEv3+xwW?|C`s{&eQ9j0E{i`0p4cciL~Fnq
z;&fjbj_{X~^n0bS+ZWho>VvSrL!f>7Q&Cj()pjCy>WX1T{Dg=^jTcal#%Ewa1f(`k
z8O-IyXxppa08lFi&-;tLyl4wVO+*=OBYiZ;^0?|tPEJl33VL~#g<flt^U+Lk7IVCz
zUSnfJo$HeRadT!lbRu%z!CGQUikZ<G=iP$-?CLmDLWlC-=llr=2t=M%$*-mSAL&lK
zMD8Q`)2zu3<etGm+{;;1X}{;27RP8S1cZ6Il2MWMS9m@y)OUVuhA<?fE)}X*;fnhe
zAz<POmE-s2D+Wq%-h)^eCT5^q58_ueJii;=V6mWZi-r(0s>B?AW#RC*-*U$b?%-q|
zKl-=o^^q#owL~Ln7XyGuJgwEnCH6RdY+7By{Ul+xvA)Bw<Lzm&)-&Ur(=3wps(~tZ
z*LZF-gtFrI&QPrF81uY?_{mYxT8rjCPE7|<mX&iy-g;!j7=N^T)(*Yek-+Qfvola#
zz!s^6(*eb9qhB<SXAC)AN(~QJzt??CSyids{mQc@2Wp`3m*37D8eYhbZ1r50f9y$O
zih|FV<XNi9B1mRf;lbh(h%ZERH~6Xrf!h!ClWcA;-$r@eh)BY*NNQ8_lh_2g3ihiX
zRbvrNBKgcw9a-Lgsunlh_ocJBd#SZnc(Zh}CcKPaVD8~jTbXRm%yFkKbBI%~4sr~y
zqb*yex==OL8a{Jj)G22sJl7{CE_01ohL)GhSXos-k>TRtgma!XfdLMt>8PZXgX5Rg
z|L$KLU+1^11ew<@CR1YIC2SJ<7c=R7)*`jl-uI}(iO!YadESMfK;8t^W(M(1Oa$Lt
zA{7S@2N;1K0^;BDWSH2}m+8>a4?I_BJACOlHld+Pg6)vfS7-{fG{TOGMI}^%{NL#t
zzAw(Y8$8wCkZHJjV2-n!BJPc%hYOLX4&gu#eyO*dRRNAx1C*>l`&&ol#dd9g{#4R0
zo?0)nR*{zcO2`$j=hOb#h#Vx%vkb2%(``_bzT%FOKwQy(Y+XA)Uti%`R*4}62=O0M
zaI!m<`<Ap9#I{-wFkG!G6rMLOXgO2eq%_3H!6YFjw%iRHk)S~$#&~v2W6E4J;p|}M
z-OxZ?T>Xor9hQ3}#bRmj3XYliIKrX=WWzq!g(yv*sZnukUQ!fgFLw^qvCOcYUnaGw
z#_cN*hoy)INj~9Fw|4{2Rm`=>oeyshB$Vk9_jA~<$$XZU)K*QKpH=`OtP7wlA?0IR
zI9<Ho$(yNA-uic#VuKExA-JAwJRq(-pam~u{>#s=v)%&7%H0e_$vfTeyS{!@S`w(J
z3jsF3V}u9|O%IY7Jy%K&TbWsqgzq~H3TPY^4J8?(eaAX7lhf7=c?#j$*$FmZztbDd
zNC@NA;y^2;O`_<^^K<;;0xqLDi2$ON)7E<xT4Hkwc6|A}ULFDMG{$_Wy(9Pa`Ps=!
zbaj1tyfLrT$|jw*`P_bv>63NgL9fBNt*15;ch28mI}5x`hOdwGoN*Qru$%*7&<wDe
zQ}-I1iCDa?&HYBbs|PY7;sOFP0u=mg2S0smtp37kf*<Yz^2U@a8mOALDo&P)7?{Jg
zQK^aku<GKrv-ka*EhDIUCqWAGMaG_-?#N@THc$)4yKBa*`cs4GX>KcjKsMcsm$_{w
zJnTn?ehvk#j+Ne~|KkfiF@JcY+r#N$N?%{$-z~(|v(|0*DC5nqny$*>k3GT;A-Dli
zRVv$b^HdzJf;MKM^q+(30_@OTOB-q+VTln}JggJSoHuW=Wcn5hC0RM$=UU6B9NH1T
z0p6@^Jt5M&tsTu-bGsv;jlYnwtGnGc?QYm)?8nd-ndkfS0U(_y^~aL(cbzh7r|)!u
zw6LV4WT}!s`>NJNgUhx`!`LcHI@j?uF6(N+|K2$u+F$=Ho_X+VMizs|@g+hnu!+Yr
zK(FRbcmy54J&_y9ga{dQvw@4|i=PyJ#qbfgAdzPfE?h&-aQW)|$VCz>|EtnXnF#e)
ze>Rmj?RfK^<H~a?wubGbz^|b)w}v;8+72>b7DzVC?Jy~HoXDCI?uBI4uu0SMP)bYW
z$vrdgWdHFHsfL`p>ws`9n<vx4<=J#KtF^Xfsmo&8CG+Dow1Fd-uj-lOJF{H>cz_;A
z0I9N>#JXD*;ExWWJ}6cp5|p;buM)QNj97HmA`IB{gc~7ZbUCrh?G=p>g?A;^)Y(7f
zG&3TfwzyRu4dJ{zCk<9^LTkSwp%j2BxIZ|kBY~K|*6qB)ASWnn*H}kb&ejx~21>UT
zmzMT~pis(cYMvrF(}XbYqM-EuAm`?8pS@EUyt}%xLx|~1shYek%Pp8ZA}87zT%UBS
zZEv8KFO=*GLLmIDvq`C3abw<qV>@&D&mUk3O2Ko>qDu`VoP2Q+sycTP;}kdHo!dh`
zYp;hq#`ccUI5DLmE*EEaC=K7|AI#La!a^camSOh)q*w~x#j3TrW#6_LsGJaXFfb|g
zJ4+xw*!PwC{Ry2nPCa6srvb|QdDlTGV7f-8;)X`+=<Zo?(sVqH{Q;;!d49P+ZCvjM
z!fK(Qa#n)a^04y4*Ok?L>Ox?_mjxzInzkdZ)f-j~i=WB=&V`BpP6b!+xFV6FmH_--
zC1MrWP|A`URmVN^%G(LYXeI-AMAIasZ9wn^5skGbPPlu0fF&{s1%DJFdOdS|5iK%^
z*(=D3%cjDL$13XvjIFw(18ao@mgvkZi1<bWDXY$A7!fwFzN|b~(HToTDo1<t19$!U
zY(#~*8ti3dy@IN$DODy1MUVaZ!NGwNb!Jjw<C1yg`C`HvM?&Mm636k6f4Jig0-L<@
zmP)gn0mY9<q3)YbI9G<76@=CM^L!GodljUNzK{zdG=Vn?(BzEJtHS%mp#I1DW9+fi
zYao0?j|{EahO(0yb=0vaFTRfExpZo5$UHBPK`$v5d`YXK#Ta*atD&nsuQ`Np<8MIr
zpj<-Oda(v#yFZ+>J0`^c#?bxwN<|YiFZrLjUDA)`i$%(w1+u*xHlQry(^oP8OK+LC
zyDd3*%ZzpS-Jf(hj0A)Nnfb@LD`Tk?XMZ^Eug8Ii`3ISVSQ>R=opNuINU7iJTI->n
zs)pzZ6jlXwCD+;Ynf1N8`Q;GuN=m39AtAq2q_eZL3%HTvRAeSv8Wf#VjR97q%ZKVV
z?{az-I+sfodj@rnhsnObqs~>Cgz^0?)ttTe#4Weygx#a@;*=*HAt6}Z1tyb86#3eo
zZoB2GdnpgMW#n1qKYWG+-|86^lqPW7ZLor$@KI1Uq8n?xGMP~fN3$7lX~+$k6d}#y
z<H2>b!hHV}!{3EpL^H;}=1Xl8Ai6R3nO4Ls#@UGO*v``-&`a>Q7yMhj1F67IoQJnV
zLU<%8kJYHrQOOrTw6_bEbw#IyK^W9#-Em$siPajnyQimBfqQw5=5(fSv>eCpIH@dX
zx{tt}+FeSDndj+1{{@gouRe;E8MU=)zg5z3+4M&Ts;HGX%Fs%G2aR!j)(9rt7XsEe
zQX~IkAPzr$92?#Dc4aPi8@Pyw;r$sIhHy06c%fu0N@o)JCjlIk->GivD9LpRt$3Cv
zDCxLlpS$*cZb`qT)Qs2JxOM)805owb*ZQ)f(@@);^+X1D({6$~oyUWX2auq10W#DV
zfN1pwaQFru_cJ2_G15;TSmf$}tv98OYQy@5ya)Z`9yf=(N=C!wZB<{>_qW%A4xr*B
z;!rct@Yq&wfMxO8RCmrj(s+Cl(>1aVX#G&BRDA{Diik9A2Lu3k@zY2m?Le2`XL_Ij
z`m)E`*#*>{eq+2YQOsYXKWY8raJ=#HQD?qvQkmcY_q_>$j{d5}n%7@Qw2!2F5kVp<
zS3=;7<MLpihm}&ymUe?!M<*Ey=m!?8umTBdQc-5=u)~Gz<!F;1_tS-x$v+>NmB=R7
zLY3ad=}hx_1wrWt-mnGLIUg0~KXCf;zF6XP)apPQNlRW0GYjhWwm8*Wv<?zoQJcRo
z)GZ7yUVXq&Gyw3Bhyf<>2GeCXGc&7ly*8p!Z-A)_Ycaf}hKxK&L7CBoaN7if!2BZF
z>U`c0poF1AjJvwJT!5v$yy$`7Cgpt$4u&_pzP*W3606ZmG<L<o(ZAg4BY93X&Ikqy
z%6Pi`3F+zK_2%<i{%Bgii4!$`K3?y;qgI~(^Mp2joX}<YToidwu+-nw_wBx^na_yO
zh0+3IH(wiP<*~r@WPV^XD9<y0Yt6w$z?D1I(|`k4cO0_+>^d>aTq7eF4>M>OrKYs(
zV`BVus~dX_B?}wBh??C8e%`Wo>)LojW<FyzhvZ2P3S0VjNcG4(c5fjT^u@9SNeJXJ
z<Agk=4>B5BD!N2nNp|WA@T8AGl4k3IWGsWYo<tn@G+m(6r@ESyxD_r1JT*mDY&p`6
zOy?Ql6jW4_RE_V=rJ|}I-SthEhnv0w;^@BtVSC-5DYa6_gQzqbCg^{Li@5nKY8`6_
z^xr-nz^nIbeS1==wkOpCm>tN+C${JlLmkFmU!0M_xiwKuS9?8Z5|8Qj3=IwUUVu#3
z;*|$oB+36Ri~v6MffB^A^?IH$_x1-USkzyimn2*7_BQrdS+<ilp=^GUIt^_Vi;Twq
z&`)c!kp`7c`MP|AMk%h4ZjsBjn=E855FaFETjM#o!<NQIgNZiSF%S_VWXjw}HnGC(
zD$__nF0<6*RPAX&8f)n+BR=m57VA^bm%@$@g>@HCNZR?M7yFCgs(B@fDA1J?{_x;(
zvf7Ha{=9Zi=Xj)Gd#mO%fa`uz_47mcS?}&-@k7z#q6hEotE;Q4$U&BDcb(_66CoY7
zYE5sDB(g-($NT?7mXhtf?V8SRM{GM~e!e@lSZq7g@>tb4eU_*|&LnsD590*aG!bYt
zJOuvRWxKJNHP*Fi19HIIzW3Yb4-^i)ZMV?NfhaEnsIr<`O7*8&RAU1f5r96&vWLz1
znHCV5jlJmIW?Z$O;AVR|$mK`lC&Bg8n<k9p(i<NFly`zYidm13k5bEHSf2k(Wo05<
zR6$?VrkFYcCl=iXylnh>XbI={8-q{;CWvT|L|Fz~R=xUizuxCvnrgMDy%*giHV)}e
z(FAP4KX3lnt6%>997xA9Zd#URK~A&SP*4YF{cgZyBqPm|Al(cR|L{!7DXXO(vhe5I
zd9WaIW*&ERHMt&YHG;){(dJOPNpg8P&1X!IduHkcmuTrUI-XCNWh4Eps4!>)O%0mc
zOZo;7-&*=T0d$#^l{)a~XZ|???{xu27$8fgtMx@XY`s_YK&e8*)Tl1^_YRD0N{=j^
zZb?}glf)NFWU&JVr^Eb2%=s#1EmsIY3%z1-ZJxHFot~b4Bhxsi{?EhZb~qN+vVkyu
ztAQ3J*<wERgng=UvIih9=NA+Nxs8d2f?~*GPs01j*xPkz<e{&=ldfza48sp-Y(jOY
zCftXnY5KiWKPQWPhrS+MJiY?9dYe5vCyWSv^pQX#<5>k%AH*c{#Lk^)nK%zZ{}Ii<
zg!!oPJT5{_9+CO_iu<s9!d_5RoZsrCY}8!{68pv^Eux-IO2C0ckOo(_sO<Z!y=3}H
zAHRz~>K#1;I|N4&cCVd!k{F=>%xv^Sd>3)qa#Q2rt})<zyU>LT5E-Fzq?$<$?)%=l
zz|#<WYS`NDfowgeGIE6k3T>RfkzViG|1<I|oBKO5r$yU0^KvGq6BYTLiq+Q97y?Mm
z-$tWJx;&)B#H@Hf;5zjO;9da5b8D^nhw(2=FapxiJ6P)v0gD2*y=X(%O@VMY-8H)-
z7XV&3yR8j-XlRIxjEwI7%Urmlv1C5$x(5;fgrmzG@;xU1<k@s0_#r~rk2vIE&Anu>
z%njsW8Nt5!le$}c(M~J28XA#ESvhuT!xs#&@dePoK31WU4cL??QfX{*;AZ!a4ZD+|
z;lP8WA;KuX<9}<Z;=kdciXL_XNrg-Ez&{cB_Va7>cW7|6;$sPJ>uuTujBJ$K^Y>FT
zwp8^6{S{BtEobP6(_ny{DivIf2rmp%bP|=W@7(Vy9*#`;lTjq6Dz9JVyJ4vM^e{Cs
z8}+Q=%B6*uy4OTPc!l#6O+)0VfxFZddhgu{dW&-wLX}?c!VR0O<Z4UnAExTovlOM@
zRcU1p$XHm%G>zk@SAFnYP67e~`bv#12FHTV^}H{e@JTF|DYg*$B<d{w#m{H0@SV@6
z=Ws|in9xVH5XU`F3^cQX@Z7(E`&R!nrQWEa6#&gIso1Mh(kg{m1r}zwl~jwgoV6Uo
zA%z^6xlDD(N@`Gjk&|p_-xJ&p-RG#;HC9uVGpRYVW#QG=qhGYwIhOE3&OB?omS(I?
zH<YH7E&%)nDo|o-wjJ6wmNBRHn#=n~{N+suq>A30MCy3Nb$k%B?gaQv*7*_iS>nGP
zl{q|OQ-L*(H?OE#z)*D5x53Tli6;9#9Z9*OXW|kvS_MBKFJbd>DSW!L#xCnm<;C$s
zw>4F}6lRgm<OFwzPtbGKrA8$d3nN1)Y|CUrhZ9k#Q*kU`34EPe%N2o*acWsUf890>
z_T}>tKq^m{=Hx)YD;q*goO&R^ucth%-TFjrh;H%%0E3u8%8kcT{PXgN<dGX)3yO+N
zuA{31lE@rv_SpKF^(+cM5~yYSp(x+Uwmv}4-8<y5V=UT1gbn4$PFBsks6dt*X91vE
zm-(X!7ut`r5A?R>%uc?&K8G_Smxthbzu0Qn_MtpXOL;R`>ZsQ7wVc$9{NC7|E|g(C
ztvO}J+v&rNHKd1|d%8V_RaQ%$ooU%NSZvc^&|yOYYhCtyd;}0>4Ap5re)I|}$8bjo
z8ja;$^fg#4*7R9yKVCs-+uQup4fByg746EC@|&^%*PvS>-!}ZyLWgK?p+WL@N}&Sd
zLd@AdA+**UGaU=HIu-pH{leY|y^vrBUArc*`YjPOg3lu;zDG4saw-llbZw89UW;dQ
zTa57WIU=Cp&f9;|E6JgapJRSjho8n9EU!NbFzxdZ=&V-kNq+A#<`kDxQL_Yx3Q0(p
zQd-gRC`h$xfg&A(v4(N+d_HYV&D!voFz|=jGY$!N91yRWW7oL%e!ur7Ik*K#<f%WS
z?74*zLkPTIkG1(n2nrJP5>yuhX@R>g0>OOE^InsXOBDfd1$umrBwhveNXx&I6eV3B
zt?pSWD5<IYVr6(th45PlYc!RR^|VHuPtnY~R8MPaYAyzN-zIHzaL!uqdBSkHmv;2M
zNX#FFE{E)LnuKkGZEM8E#U**%X&!u0HN2muGwG~W*yIzqwMlaS<ric={6ZnANNGO!
zCu|Um6<d~;a`&qZNn~Z@Kotr{Lj&sHWQvNR^-~Mzwzl8Hdpl0-ELOC2W%~!3=$N`e
z#sf&TTp#N4a66yYOpuY4H>;iDb!xy#ji20<$qPh&<T2p6@B5#|+*PMr&@NNwF08R9
zBw&68cX5^V5etLug9%1!F!FOR5c-6BZua!N{=R0w-h*+*hJ@{31R$9A3yKECf*bdu
zEwZdAmt?WCi4lxzN-I$F8-;UPS3eldgeKMF9bZMuzV<$?dcVJt$UXrAVv<tjItG4u
z>XalQytn;hX#L!g$<Ajhi!&VkReCw<xUtlM=ckuIJ>K@FEI0hXVcD`#_u*o*e>K8)
zC7Dco5@LcRL`AYJlsi#|_m%1GDzP)^+ZIbhEJ}fisYr)I^YlNwf$y6TkbQh}XsD|Y
z|MnZo#wT7~HpD;~^5;(sk3_KJ@lXcrti)L+9k(2$9wCq6aq58<!T6$zVnK~M=<f*F
zHxRnH;253HaR!oz^Z5hBO%Dfovb{phvRbO4me0dqw{zUJ=0D%2b8=fmHm!YsUXX5>
zhoj<b?A*gF0zW}<9iAk2aj&mWI$pW=n@8_MY_XUm2u}E3msUNG>rYJC|A%9MQCPHf
z^~_@J!vg!&Laf~C7A_@^=gJvT<vt4B`$s(9GW0>FK+BH{P%FR4mGGKjT24plIp7#%
zGWqkZb|<u+E*Aw^6X7t$gt`L|6VdwU8Y;jZnLab|Cu<p@w=Li$=?}ir7*A!L2$D%4
zrp`PV7cDJ`+W3mJlcr>v!MJYgJ#I75dwmD=C!-^be9<!U`<D?;d>G+AsaR<t_$NdV
zxPsx4(oMt>pKpP^?nipt7)XU#CAjh;LKs74?ndMdiXmBwD#Y?VoKfHAWDK`{B%&ky
z#Pif=VmYViJ%9Ivgf8q)B|PO;2#|7R4su|iYl3;`MFVh!kegSFsaCcYXv+gmHSJDW
zD}8%rcOW9a$8A`3CAq{Yj;qWwIg`AMEAS{IfG1tQEa&Dgq|vD}iAz%;xxjW@R@*x^
zj^nbjRj&uTjf8@Xie8FqKBRxWupsP!=Drn78kLgb_DelwY&lT^Ys38xyU{!yC7kRF
zy~%yIKXj@dUv%`YQ1yP2egr+^^5l<TC7G~oA!WAzL)SaU=h-z~!;R6{w$rGwZ6}Rw
zr$J-ew%ORWjfRaIqp_{`Xs_pfpX>hc@A=D_b07PdJ+o%bn%NW?G3RvsnVEQNp4XFN
z9QlK&35~!R9x&E<J>fH!c2?a44s`ivO$nyuS5gv^iN{AEH@7gIWX8P}{a0f78v3W|
z_uJS)B}pI`#T+GVSJul*XQxN*6IVLhx2nB-1_L7veA8&B;gUzo;sNQn&j1B10}_BL
ze5qlIj^RV8^C?Tne(IXrcR;`JyIAUJh{M>0lYM2Vkc0JOcX<iq*OT(T<2w1$P*q*j
zT$%PYFrB8wruvA<Q#>?aloYsm+yR{P<~%lxdKX*yoK1nckl#)}B0tNsy8Qr#fLa@H
zlF&9@&s?IC;0KXuYnI4^V%2oL2SeqZeQ0IKDJXF41Gh(Ax~}n+D}GH8^kYhEg(O!6
zXf@jo&VHAcjie0c&pzGrXtL_{hZs7gcl=6$!JupXsV#L8R=tT=Rbr*yEJmZzd?ueF
zIC%<S3@4hHjD}Lb>r5e{y~QUaM70bOodOr~){gdLS`iQO07Kz1&7Fppp|lqAETFs!
z{uSN%$pPJ2PBvC$G76RixX6J$rP~f5)0m7d8DX)h!Cuiz|0Uulg^Lyxe}!{0QI*I!
zfvZ5$o0f(`R`S4^kZ-pFgJSHtr_0k-idOtRDE&cUt&6T6PT2gG?3~sT*KU$J;Nr*B
z@V3q)x90W<T7*iH8aUM)eb|K|Wa~EU($WulwiJ~GmLBLOE90{pv`-uAfpy`Uq$p`t
z-C!7KpRm9)K$=b`oSiXor_2E%yJ-^omxo?hj!5>K=I&Q`{->E38G<RyAys$a!s^Su
zj|^Dj)Or!5O+SF!E0}&3s-Pr{?|trr*IDfXa;BX>0>5KkJ?qs|8Aty1D>I58bAU(y
zr@OwApKJL+?eOrhh{I}fphXs~+Eqp~uu<llRUy{xxzGEmx?B_%Q~o6ZRWk!JoIK|q
zT7_;W4x#6TV0p3{-T2(pzsS}LanLg&&^LAF$^-!4gGGOUibO`Hhr!W}gwzz^%v_)<
z5yq%1hZeg{B_#Am7A}aZ?M6F>9W0Y*z=uRPlZtfOE{Y%<X;Bw?y&UZ1AeO7h-qyj&
zL?Z^9g{;`n@i&NK-e#h5i&htW)wiVC&d+q+smDVN%p5-a_I*Mz#*;}@;<?O3fAns}
z?<$VdDGG%n)3;OAbuwBGzJF2cn$g9*OgrAN)L3Pr-^JodWSl&l2>|5g4hP$9`S~O}
zW1=aofF3XX1x>i(XH?_tBLMYdxX(N0CYFY5vfcjNl&K)Xc|E~A`PP&z3w6H3e)oXp
z52ziPQ1M=Yi`G)kwE{i8=LZb)J%|RW!e!a`%PlsvFgb^UuNuTG%c<EE+MSZ>*MLqA
z-uejiQ%!;xdgsirudh$HOSQrkOytWv-`;oLzCC&XTmcf0ZT*O#TQy$5{1+K;0|r|8
z?zxCaQ>F3+REp>V+D;$|jWFosNRwrrN#W~362ba}R5>oAfdZBxN!AF@mvFD2PN_78
zvt8QF58t*c1XJ1QVdOiTP;W`TBuUs^!Ss{L@=y&0t;DH>QPp~G?>gogZhspO$J^ai
zBPy;z$uUbLW=jOS%62H$Nm5sne0C_zvaUuT^BDW4fBH3Giy=M1-$m6dV`$Y?!6q&@
zwfFc$YvTz6>po0XMuwc6oMH&ctfL)hSDED6zm;KIVqMfo;hN&<4@T(sd4JYp)*=|h
z*VE#4TgW;GE*GJE0IqG7$-qX+5}E}>aps^=T5%zd59@EhyvUy(K%>WM<9?mtPepP3
zpg2<T^twX|@%0AveDnJ;+G_pz5=Gg3lyS4|uE<)^t*~vvfKHEv(YoTF=sU{GUK9V7
z=nL?Z{HcZh<3R;bWBv4Mp!s&h$nx3bA%||aX~dZe`iFaMNH)8$(VZSFj5>-LoY9Pe
zq2=Llyosu%OgqZOrGz+C$I))PABmtgu($gnJjxj*{S%l^(Q7S5z3DJ0z=iAbzXX<^
zM?nw&Zou;FBuC6a`?*zNOj$M%RXrRAz@(qMD(QE1XP>vfH8&3jS%LTgR29B*jMM<>
z*OS4wlh1InVLUPo<`Ez|EmTF?PCr$KvLXbBDCM)Q4RL|~i$=|0c#1b5K48-sVbsz;
z-<>n}B{(f#voj?*@c;$<$s9^d`MBZh&OK@NRzOFwaWZ^r(-`sCn$?F}nfOP1l8y~Q
zO{4gGC(?~F+clrFfo^pnz{FvQn?l}TYTu|;CKAF#2XeJ6WVD3g-$z^-sHZ$0hRs+~
z`}v9aE{M(R4;4{{b!W7U{~SX*ivp|uQKMgTS)r-#+2*17K!?&zy|Ra-)(VN7L$ig#
z-*#2TwbpEmC>QFCg?w02q7Nr$A0f}_wnv5e6KHz^k7UngaFfS02zMIepz<3R{g!XK
zn=}A0RI@*7NeesUt)DxUx-d|t!CkSH5}^^{hx({5A0Ec7J<l8BPMelt$<XNO0Rm_z
z_zw>WWR3l85w$Y3?lp|G4l=2XAaK)@9WvREEw+HJnIahN*CbDK+JyJjwndc5vH7Yc
z|H1x#dfLHnJ_*`FK+Q=y>r@1j%utzOzL%JmX5k-=h{IA6W@NP&dwK4C&rM1XIL1EX
z+O<bNe>AYvdToj+8$jOouUtGg2M|Y<=44V;7NJ0Y165tZ<`Nl54V$5&qZavw769Q=
z@t{CNQ&z~YjWkQXj%vhHxuh006KD8HRyObFZ|T`^N5pnUGbH{z#zfZ}mKD?}5fk^I
z5I_pQg8w{jAd|hDAVU!>``K!JM!eN*QmeBPYp(DUtOl=qLwpJIsS$SwymN$n(<?87
zo-`2P_NQceZREf9v&&1dgIammG=suiPk*0iOS-H_*MI0Tfki}w(iuzD+;`;4lC{s*
zw6)TRX5jY-XH)%FcUzl-{W07hPn42A2Pn>?;+EH+)_Jxz)Pr1M;RCK9fAz_gp7SHY
zJIlXHS-IV)`l)ELf30T&)%=T|7@TxpP=azzcMbF{E4pD=@9LM;d_)%`yrR$={SJ?x
z>TuFgBAzl-XD<tkq4)_czd({cJO1kMXx{eO81F0OC`K+Tf=B4r^Z87`q)0z^$Gnfw
zJ4gbVq@p|kN-|hxTr!carll4$nYI^d&{5n}Zg6+_5R^==6x|R3?8e;pPKwf}c_zNe
z8Ic}XibLW2I4pesDDJCCHlW@_Z4|-2HWf~WCi*aJJZ=@&c~~&Hs@Zkd#EQOo@D-~d
ztbUdk7-qq-pK5gRay^x?xo)jxWn){;$yROB=jp7Y_3XNDlUCemLw9{-_Aj6Pzzo!?
zUmj1)e<k*OAQ5<`Kxh!sA?9#gR|Kv*hAW{tnZ5{nPLjO1P?XGr{s_|-K6SrSzzP+M
zaxA2s3dXukFFs!4NH#S0dkN_>+e*L>T1I)cPf<}ZYA@$>`19lVD8ujo@fKA{nRrsD
z3ME;nN+R5e3nA6u(+iG>)N<6$O?DQ#s`|>{KY7{Y&-6qAHE8v`!<%Jl*Jh>*S8?X`
z<tysYCI0rRP7@Ee)zt;vw6}&RzfSDAa8>yf9cOIw8I!J*UuZ$?eTAJJr#9D=-kSdo
zO5a3)A{7@&;p!iC05fPqZ!WasS_M-Na9^EjVpK^e8aM=ZsZ66Lq9&=Rq;+xiUGi35
zw{20HAGaQ(ol^KXIMYEDItdlirIVcqW44xZ=tg%ESZIlHeCU@s6k$dd8gPwm2X>cl
ziFn-LA#5oIA)eWInEoipmF{FHGe@OOD-9ooNq?PMS18AfIW-IDL1BT=vP@>C26lIo
zf?5ia$M5t{s116<Lt@1fu%`H<qKZ;d!1AX96z_lfK>&q=@E?VP-MkX;=>;eVx6TW|
z$f|qZfv>%+rKn@2gv9<P{!El;zHKAHk4GyO{=rGkT|MDWL^1r#nGQ81K|70vDYFZC
zC);Ffzm|FVG+E=iir^GMyUVGXd+FgU9|sO)Cj8m?tAx~jmj1lT?pA&0qq^rCTuqit
z)2}9iwlAPr!~!;fLXt9tsKYT4y=KRC_IEE^BsH_~RUT><X3@C6-M)N-y<x66RsO%_
zHBUic<NqTl$N;aPopG7Uq;|_Jq2UW{JBl{qSG3M|Qg(}au4dgDB}nXdfZ3Z*Ts-S{
zf@90pTlTH(Cy}``<EQPJDjwbphbZMx&_zsRsm2ZKOXzR-Q-)hR-TCAq4r~02!}sj7
z0L)Uk*;WNwM<<(nl~r3>*#(OL1C&kEzc%qDS)e&s(R&rAG8OprHQN`PBZ7$8-~?&S
z>rkd*B|hY-E?_9RfC0y>fyjQ4_EW)k^owR%Kn!wDX!5BH0B#$<40!L2xt9o<B2Ei@
z5@8{{>{Y?;vF@I*xk(TZ4riKA<2Wqhzs$p}IabmA?>;07_>kppEBo+RX_}EEZ34oY
zVbKjP4CZJl5!)Z$^ND*TBgEN#oz~PMi|r67)-R<lh?@aZh!P4{nmm_BaxZt20a)bq
z;Xhj_`;T9+W1s5QEBIG??`m{pnzFJOWE^SyNMRJr4HaJJOv$$bSeGe;T|{R|r59QM
z6&3+g$~^$;T1TA1bb>&x#(=6nk>p<cSxPp%mv=EM5`MK@K`+xUeM|}2fh^vY0aoGD
z3U#u4D|sWw<JM`a+p&8`XDQY&Wvjq=F+Djc4@8E0{hKw^>Yw5aj#iO03#%h8xV2|7
zr*iz|;T;NEl4^!kIc7J7&Af;j?f;4g{%9K)fE0V43sa{Y$aG?0ILyb03l4zt_HY4v
zwr`T&ZeE@_rlnQclZdZF*as))a8xry#H7t#5~48CFDVt0I6;&PDYvGt%Lq-wEvvO&
zz&XK$ZUKt{le9NT%_azI#C4P{h2yD`{8vH>?85`tA20WxVrP;sK9`>RTUg8G@LY{&
zKk+=~84^fYojf=dB;5faSey$rD~x<nQC9`~@su;;va?gE%wz@l3bB4rk+Ny=W+f{p
zMR63=&mXpJ|Bs)!f%zl+0k0<z0Uq=WDOU@pINl|g<6@O|s^{lH^zc;_l<4$wbo!A4
zID|ADRd<5JfRNPD;8SgO1;g>=Q$4<QOxvo{f#tN<3MueoVFf#~H7~!O!)2Xyzy7yJ
zNc64#Lsc9z=MDoZx?CMlwm0Q(##FZ5Z@3S)&6I|Z=>a9<8{tQFp2!YV0!)wYv8Q}|
zKxw(wyMpp~9C&#+2cup^H;pcUuf3bc4pV;%;=XV_$CH%)TM7SPkf1{f*zTgMg)I;>
zJOz9&bEwt^M1r;!j;97Y7pDft8r`U-*8S7By$of)FpwmIldyw+(Th;vA*f9Am*rI_
zT!^?e;?>Yw$hc*<ukh@VN93%5&Sd0;7-z%C@2Fc)UoO3z>*0D|(foHzBK0GB{N7M4
zZuvGMcnnZn0{ldq*-Gp9CC^d!U45L*-L#EWLoZvU(EsSDP|-3C6SYE*CUe(2oW^*J
z1h=Iq&n;;qrWNWC$L~!?DKT}sFTZ4ZjeT=NbX{1jn8qZZ9tX$ZuLX%PrQB(;D0P3m
z{cJ_|@9MZzt`PtvohBq4fvwmS2QNukO_f}|H{Qyo<R9pt`Z~CKsVE_ZhD`HERu-pZ
zG!#)sf`+>ZPdsLMYdaGLs`mdYDifZp@A?D7{B+6mgMP>_n(UlT)!AR~C3sw#Yc~AL
zUje@UHy&V5T*H!%0BYGZ1FtAi)=>^=POj{?k+1AKs&-_wu!f6ARo;Y_59~+ZLy)3R
zk6Rr{*p5Sw1!m!cmnJ__aY%YUZYZclJRK>`tL+j~GT`DiV;bEx7_WUh)Xr?ur_q*c
z+hmO}_70XSN9_SOAD<Xcq<2w!Jy&blxeCLI$IDwZ`jF#jlm4%TJ^eXMPyHIRTz_Pa
zG$2zPQk;+Eu4a#rJRQ5Vo-b|~`LlhDoa6*?Okw7BV!d0Dv4X7wK@N;;iDWzWYaD`J
zAG1d3Bo)max`74}<7#2AsEHai<?y;~9_tZ$0Si;|=$wRb+HPL_h<$l62cZV)m3rO3
zTgC@$*_2O57Z`G0FA%okj@%bRUTzis%h8FWC|n*&P_zL;bU$wqUmO&uRkl<YT?0UO
z>_Dmeu1`wv#(_FQ6=Wp>9vvPb@>fMVK+eE{29Hn-tw;>Loikul7gpsCf7^&sBI(b%
zxcK3}C8T41|1Sk;SUzfHL4~Qw`22SiK$52Fxl-Q^?!r?KkN9Q@sZBh&=&H9M8uKrO
z_aB!i0mSr91mG=0LxbYkF)zC+G&bHYHL7WEH%TE~u(sEljuU%!6o?Fz!0IbNBLSM;
zX{5j)a8h;CxgGu$!JlEiUo^BrGHH-=Vat4l>tqmzFOBl;Zyp3weQqRVaM<Arf{Td?
z?FBZjmOwzh;36Y|dp}PvwruVir1J48R_1_iM_Q8p3ua97|MATHQyB}Iz}B%rD?sNX
zn9${=Mqx-kikgfprG)hP8=kDB#OOSKjMGvGo~@JmqP7^`sNAV8$hqkdL`3r$PVzDi
zi<JeTAefwpo)7Y{A0-Y6*G;6g!J*Dy4OeCMr&R6~%u>$|p||%3t0s(Xc1(GejgO6E
z$JfevPyl7{xBc~RXBiUc)phq@p@G0A(#1EAoe!Cc8B`+VgX7k&SHs`xUcv~-^D+kp
zqivnFM=@a~wL@{UN!8yT@<cyxCs#O*P^~L(TE@xp(}3il<dPH4O@v9hUi|_ap{o#4
z(oTg{;NzurxBHjNfzt-X0XN*t_ONM1fp!H&3$0K=5hc|WxQrpSgO&c4;m9Ry?9BCu
z8bL|_n06d>v0EEcY=uAmvk>U*`M{v4TVJb4Kfz0blT$g)^wS2e_IyfZvF%^Eh5!sW
z5pZfiFA6OafIa7k{~$`GNrP?m{YSVs!LvH@@kl&C2wlj74&}?2HQA}stm|f2RlI)T
zxSewR-l+7yTO*K*s&Gj?Jyp{r|4)d6Bm?jyKI`X&2A|bzq54AZvVZF{0Pi)O@+a_1
z11i1$Jg=8T4<rG<jVcMBc7jQCg<OG`E9lLCpQ2!Z!*wVAPZ)q!0A;&)U{Y49vDAVI
zAGegjV`24%okp#i{MX8SbAXdKDlZ?Y`bQf0=eGpp|GaF;OM_H49#AWG%gxK|&p%)P
zrM1ZgZ<7OR7FEo1y#F!88(dJfzPZtQ<%>JZ<_F^)!P^WmbJ73$&cEcf{{t<|b9|#N
zSuDPRI!Ig9n)!q+=XI@*DV?5IYyH<3|11bFab^Fu?*F(mehMU!4Q@%G`@u|N^AcST
zfD+tzs>xS7V^%-^v!uTd1L`jj+TD=fR)k@GEUxax=gg^9U3ONi9o-VV+Nv8pQcer}
zIkW`mS(jf{IP52$UU(FPwLQ3@3Yg25+niPVGk+<O;E-IoiX3Z&E8h;Jwl%6&Q;`a*
z<FtbD!9#>U7zF|ZpW(ObR_MpttV<!6>J%L8so;@Rl^mYv+)CiT6C;r!7TQSic?FkL
zRG=)<y&-<CA4l4Ug)9y>HIxYtKjJDR7vJ>LVh}Pf`FMMKduD?GRaRDJKPfv!#9WNG
z)utEI`bD+P?QHxMN8V>WhW&kr@D-l$Pi>Rv0_Gy`@`#@g%#i`rig2|U+<Sc0miRpA
zrp^(9oru90E#H3(N!#rMyrA6j_VuuD6YYA4*4h-_Ef&QT9^$%l9IS%Zh%VxHaQ6h?
zXyz9qyuGO%h^b2xmtzf0&FIxLFZkwlyX;7f<z`F3Vsgp-7ECUZTiq|2p<ov2yb&0x
z#s@jlqM%e7Wn+5L;LEH;tYJ%gL-h9h4q*j_jUvN|>@iD9t3*u&;T^_~Hp^^d^_+j9
z|9ov5{@&IG`P|k!w{iC|{&??|!2t;!IowSd1sSiRoBw88O+7*t)LYnB$M>;G+Du^U
za57RUGRh%u;WZ;@V}4EzJ?~jZ#U?70wsoZ1k*GTthq%{C)~mSsXpdnSK2899Rl3|B
zk|BQxD!j;QOpm7Y_79Mj24CBWeBXq?Hj7-e50)QRd}Gr!xAI*6>Fyw-NxQR5`Sz%n
zsP={Z^>^i~cuH4Ux|=W3McQ7%tB`7h?@d$=UwB^`<l05OBNEDXq8VQ_S~>(+*GE`q
zti=y!!^YY0qD7V^<zG?C*R<00oVrahvd=-YBP?>k=&>a;KhtVJs^P3mT)!N9kR$yD
zU#ix=i0C$Y&6A46W7Vnf@c(59dJdl+2~qpZR(dby$e9X-V$Ww?(toC-XE2t_`11Tl
z*iX*yYVqea_@G?inPqwm8kuHZX3+jfwhw-ZwHeq0Sc^48r%d~SFjabSA|_gdgIS_%
zE`<4cK)~XWqA!#Gk%8v{^^{*zyx1z~gXf*`PtWlX`^JJn)F8^@htZ~e81qH9vtbT&
zL7M^px&zhwdNKq(NG{AD+h`;h@(A`*^8lpl=^+(R<JRQC-jXDM+bf3k+guKS<Dodi
zkd7?YN8TZ8eFiZhDNA@!!Z)r$(q%p>I_y%qQGo$R@Pqs^o|4g*FgHrrdMdv>N!M`9
za~{g|qvmyBtFzeQa+ic<H-8J9fGO{)VrH!Ir)Sb3H{gssDfry-4+03<L4d4ETDn_{
zn>)s=Yhm5xUb^bJQLO&abmeKGY4HhHURP0NTTGqWzL%s=+eQswrg=ehwa&WMcm+;Q
zI(ykBI=i)8H^GF|?wj|1*X5uvzD67xJXwIb=dGMEd@<@3N9c3k3iZ}4d?p|*+Vs|y
zuheycxtLF<oE%a<dD9tFHvL^<%$LEoZ@}N@qqE#uu<g{o2*Z*^;Ks~B4C)k(!#uRf
z>=(_PZvVV`?cq3y;B#<(yUSa|^m$?neNi4nRNy&2J4db?DJMN+>x|}`-I9#RhEvvu
z>YS$)E}n;ZrB7il+gscW3q7?u!@ui!*9_yH)yP!+59VHDH*TmbOn)1`uqUL<I_>!S
z2h*Ji`gM#|EA(C+tarQVtuks3*J^iesFQ%Su@t|JTodM2{xo%p_)v|w4%gOLf3_O}
zZq09aerdYAjpvvvf!vqXT#LGuf5}zj1w~ha9ofYye3U$VDU1fzC6(EF1(GJP+0WoO
zfU;ndmjC+Z+x-a*0JUg9f3fSSG*?jNFBxxQzFOsjQNSWPB#DY!P)f=zP$+n^sXN`U
z=*B;So4<}|`Fbri-v{6zBVT_UuX$Ghk&+&>V=Tt<Kt$hBu!RYiqLI}Xm$TBMtldcU
zCpQJtaUt!~35`PN&n+}BM2wCzfg>p(WwKV_LD?qRf`ehBm^GzzW6Fy7<i;@jKh^Ws
z7+-v{>DR$uDJ`oClr+k2qUfIyF>-5a(1Jw2k8r4mf-RGe(3S~UXrObnqd&(%YOY6&
z4Rn#oR1_7E2*+XJMiWz?NN*!y;s&*}P!+;Qr+8B|TtY>gsy-EnF@N-N$D}`qua8kt
z)w*W&Da%?QSAf2NQOY5GeqWyNDs-93n;M{$C(f;F`|)6SZ>!vR<0sZbu3Qn)zE&%{
z<eY^5atwRX(YpGhP1W%8|3~NVIUU%Y>W4T74D!?h(!MYeGe#j3U!=8%auzLd<Z?Mk
zM3v8>Bju&SUag@qtv>~TdE2x7OdhUeL*EAt?h5l#xon|l=ah_*;)v{tIP(0s4>$9W
z1L^-pKX1977J-q7;?04B@s1azlZlbOsz~dKCoWX(&sP_!rZBO@uM%lD9|H#P`NzVR
zrjbO(UUN_rQVE5=Q|5XQJjZJuijNh_)5ND1QYCpINa~?b&lOASIk;;AlMW~i&l*@F
zrQ~~m-+UpX5W&AI2ySPXAyj+aLivng%i_drI@{EIAChi?4;m(rw8lVVEMt{$puSi&
z_uU6&x~uT~N5s@XagkPc2X;J`-;@5~5-T+b1IlJpAYyF#OT-6&NP8at7WHkR-8lB*
z=MdaK&DOC&ug4L7iTZGo(E>`w9Q#qY;nNn#C{&ziw}K9<4uK<YB%io;W0Di1X5+Yv
zpyYXqV^W}epENmeEKO7=XuhZ<7nD?>5n7M+b|;&CSa!nVB8M!N#SRo5j_SRU>^DJ*
zqfu2nBf8DLy<zaz#QlSVdU_}}-F%wpzd+T&)*47tBv-G8ndqJMmpMWB4tPf<65<Do
z1#d0QgNsCliqtU6JRu{;6%yLtlRHeMybUA^*A;ml3QwGj&&ISo$Kx;+6%E>8JMX0!
znC#o~Je)v=>hbT(MGgC7_cQw3nadF51QVCq%L*l$=|J6S%q`IR^-#p3;0Ck6+#C2z
zTH_o;I@dot0ZwzO2@%*u7x~hG#hcgP9ToX)qnN9j0ynnxjW+@mJsD6swP&usO$x;z
zb_PS}7pZ|(-oRC@sLs}d-6Z|n#rBJ*kc^9w@J<0$PNi8=CLTVVTBb{1W)$eW&E0rG
z5Gbe@tCO)wn{f%*vt*5^TKjqr;_B?j$w0_$RYl5f-Nj@uRud{3dI2#(7>CaTbu_G?
zYS|okW(=8AOcN0QnFSEKJSRrX$+VLAt8U%fSE+m&#P8@xGt*}h_2Ee$eJC=Ey4z_E
zbXJ!P|D+-ViV8$($!V$f^`i_fUibmQbZOn|rO@VU>W1-i#mK?%ZN~j}R+835wF!5P
z7tkRnEX<^#U4?42vk4a=CT!O-5&6mIrPtpJ^ckh7Xn13&Yv3BA*P0^Xxv`Z2xtVUk
zukVX2g#QtZg0kd2FLKuaKHhUVqq7u`ca1(A#w2bGHEJ-a(ppa%&#&=GSg%87P!@Oh
z_Xz79XG+~PfWKLUxp>C^<Wd~O9528wFZ(7d@QkvNMVO<(=f<YtRm*Gf+v6+pCUcRO
zz^!VUrraV-1KQU+f{}jiarVvH#Ra3=743n-kgXO280Z{EP4HqY4i1S;hFKoJC}#b1
zCJ?$b9w?~0V)7{(R$CVa4vC93s=d+NfW3b5K2>3q?fhU83XL^JF^qzIGQ#&K7BEEq
zX7qY9@~vXPaf)738Ue#elqJTUU6U*RM-QKS4}GEfLy{uWBdddfil#q&s%d-qyWY;@
zJ+mJclqp<PzvRbZu)JD_oR~yHdUHP_`k!K-@_Wd>E7+k}E%htYT7uW7$wV>vuMrx{
z8tK;dI0X)k46C=0C^1qsK~e|oiANTMD=4If00MhhcK$brE*A$JdLAnrIUZBIZKLlS
zamH(xSSe$kvngFf3gpa$iz6{8XV=BM&;E}+++s+XC`Y6$UxFxo`0hKBtn{{5nRhX_
z6P^s8??|BM3)B`%&Ks+z=m_gL)%90$RD9Jo-cZ=bgx&H6)>A)IKz@|Ri^#1J;ShoQ
z?4Ph(kP%>?#*Fx!jTn}FE*pp6(U83nD&c%V43@B|P-_?8Jreq5BAP*1*qy-Py59aV
zYaHpmy0PW;<&fj=)ESxu^?07uc|nM8_5b7+H?be+U}bnH4yAz*b#XQZ?|SjrsQz|H
z3`dCWhZ-Y?O1ex29)`7xB(3pg*W0rd^6MBe!CmtlSF_qw%gy4|*k?1A2Z7SRAJvyJ
zZJxvuT26`{18h~>vyB&O>V2Dag5Mdb>T&&3=I5_<GQq_Bz7ln(mai>s3tHf1gD7w5
z8QFtf{y5sV1<Zr6^aEZtuE!rVqfr>ace9D6I`HPW$bvH|-bwY(TQlLc{+c0xC_EQE
zc^!yQB~&-8a<X)-$VIkFMNCoLcRCp^pNsi~#l(oC32<Y68&MKb@hBR86-nh{gOS@u
zCA?d8-@I2%NFQex!i@NigpNTQ5TZ^7AfdiOo<uZ)zNvGKu<lP9G29HikY+ib!Dc=`
z$JyjK5#Oj*HsJk}mD-`^#-W_XQ1LK5+}Fvo?VR~k^=Z-G8}%v9sN20@fs2xnE}@e*
zf3<hDxDAQe_vpNDbx)r3D_+eH$54wNG3F47#KGqUFMvECqGkWM(dtbtmvfI^^}eP3
ziNn?(#^LNtdH{B;p6Xofn?Ul&_^^Hwdm5+72sNtOD2SvGZY4M*o1gEIw8N8+-Zo%F
z!lvc<u*=V2Km1<zaH%Q}`tf|6TJDC;a5t<{UdFzco<qB8LNXQ2WyLV`j}saj4h3RG
z^0;SCd`Th6NA-CG(g$Z%Bt7r<jGQMMFPYa&i%=N(KK~_av^~Ospl%=B4R;q*Z=Xk@
zFc{X^54YA<8^{Z!f9V3y4BywF^r;~3JL9e1Vzte#ck7|lI0KTkNTJvl0VF({{Dc?g
z`~+lg&*hj4PnJ9KE4W$x3abDlk_B~mLk(_;r_T8S;U)V`B)Qi^>6SZxn3kt)R3=lA
zGS0ZkQ}~nXm6S@Wvt<KQK4+EkoB5O%sP)=iTQVb!XbaXyM8ACxs}Hy2lCSLyml2As
zz)_k?VX?E!?^^8QNKuW<Yn+>7@cN`GoMY%(26i2T4HD8q5;&s2B{4IK$!c?)dN!=G
zBS);kL91kE&9yziYUe!xQn750<VgR@PSAFyuI;&gIRrb_)LMz+^gjbqzGeG%7SFC6
zCN(c^x4*Y*n^u<hh1Em$BlDsWZ`n`gqhck5-P_2Q=3NgTb!Ij(Y$1L^%OksJz47Va
zg^D7EdA8qZU$x7bXnX3b#@Ng#o<yN7A#yrA>r61v#{0E^*y)Ny=kAMYF3>npr9pi;
zNW!w&qNn|FCFNnzf+HV=4>M;T^EuQppSD9tz^l5`zcwL=C9Pqsnva{g&VGmY>vk*R
z{pN#Y7<8CqLfA1GK}btuD_z8keeQH!p7Umpp44%a`NAenfMT9KR(5Py&PFE+ZP&=a
zX*Um8!JKlT&O~t)DjoGdHCLdtxzsD9YRh!yz0Lb_`@Udwmxv`Y5F;${rA(bwx`SZI
z@k(o^)Wph0ZyO^o6Zx_Pli0FrAee?>WWdFz5sm%DoWc7xK{+-ZtpN0kwC9rs44}Dz
z>s8Rx3W3Ve3@5*o<MBKgRH**Ak<D=`oo;mYbSH!7_AMt&{wy#3i^O^f!(AMu*dmqR
zn~-l`1oxdHlB#zXdhuEZngMH4D!*gPi&PeUpKKr;2HAqYeXS#Z=zU64>#<}r!hKIK
zI$Z!^3LKuP>E<N-^X1jlThKj5PLP}9P+G<0rzf?KjZJ?EJUx_)7Z9VJCa;#41@?>4
zAKDW+%vgd<hjzhX&p1Y0l6u=Y$#VEJR%m&JZ!g3{=*(z>C(?b)7b%Nu275!+Jl=E5
zso&-qUzrL>6Mrq)f-c+-569RxsJL9>`<-k{<yH+v+Gs=+)mELCv1~?Hc!j;+lk+*s
z+@65-nOI7t%$ccR+v!dC&FJ5Do<&@rP&aivY_M`%7Bd(cs`Hvg;swM$)I!?_73BSp
zvpRjOJdap$yT*zFgR=OzI?z-bWlg&}(ZsA6=%^*=cD<yT{KuOU*}=3tBj3L|E>mhT
zJ{G@=sp=x{%VYU%4qQg{0R`Klx3w6S#T<Tf1qGu&!q}8k)92+F#t*G`<okgx&p}{5
z88qd`3Odr5va(V@R1FN~knv>l2z>Kyt~>qBAbl$RBo4TpaKr$=)}*mF2RAF;*BpWR
z^TzkZMxTXguLKvDN_pP*xE92Yu$>`M5(=kJnY9*CLXmJ-4`}#QqoL9-dzyyflDNyN
zO=&iiU=--NpnJwv+yI9EZ3CM9I#Pb_85X2Q*E<0YH5(ii%xs&uI$5=$IoTvWWuqv~
zF7&taWZBz6f3~ZT*Bk$<mHSmJyD$oAEE@j8rCho&%#H5{2zkIo-133|i<ST5Bb7Jw
z^4$jjpa|9E4)ewR9aY+Xk}wBkC)@&<K{b`}g8HuEH*Wj%;o5YlH^~)EXpmKQA`1s`
z>E_|!7wkhr!w}p5U2mQKtYCn=d@O<XAJSgH*NA1EHZ8fM5o4pnn>rdFH@YN|X^3u;
zqcXHSYxlDo=(*^M$;zVs%D8j=>qa7n!fx;GnKWqip4T$T(7M}ZQ^Z9CVUCHZ(EfGA
zA2Jj)#P^Q;wVdJd<E5lmE!Y9%raav!^2V`O>)!rj((UXp{CO5Wx4tXT`DynkJI;9i
z+m&MgeG)y5XlfSi8Pus}b5y~*HMyy7(wZAP79x#NLoD0lx5qz$s|I|n^J*4xVXNbK
z(81X-G4#<Ws^mPne%RA*9mpq#@7jDOkgQfD6@#H#TN3y>S^R(Bk!YI|euQ@#d-L<=
zb;6B1`waow(Z*5qbAA!LfuZ)k>1{gYLB{jyMCvDIdv$jq*p6!?>5!`-n}v(M<c+n3
z538r+vir;C(7k=%jJ@5!D8o-=`>4w-NE8d?Trvc-RXIpwK?>u3V~yL$mb1z~l2YhT
zpqXKzps!_(tM+$aQ?^qj<LoFR#mv9Ves}`uzs>{}ssB8>l`4tM@f`d;7?CqBrRWT`
z<Nz)#IM3mPPxMzkDX0pHT7}Sd#B!lp2KkH!V6>WbHs2ZI_>#RUyKl((P;=6Qi?77I
zYPEDZo|bXJ&4Ww37T1ZEwSzlHt5-9_n7_KC)>Ddw{{AdAkmA~W!Gh#-8OPQRK~?*P
zf?+JgX1vS5{<P0ra{f2->>}^+9@v8Ji{{nf*ehaZbSgp3yCfBLl_JRPOW-ARo{s&*
zBW7^KSiB`HxBDG5aj#ICd@~1J$*?~*8P6a}Vg5%|ihRNyxebWzOo{~Ic-Mm76AIu5
zF;bokwNL;>!<($HDv#%fAEOMtKi|Jni;1!mkuc%s#f6=L^b4S6pHLTkAg6czvyZ}J
z#-gT{ASQx`Ky$gDpG?mksVI3;%U0DpNk<Wi{iubV<V#1eyCxjVD+{LpQ!PU&GtUUQ
z%tI`yn}y!&t90L%9g)P7VP{v{l)}ArhHq;%1h?I3%+wHAvo0BPOGrbOMzd=WV50a2
zHyd382ivGKr3}vEn6b9!2q2ckM;^cM9V*X-X$jJi_-|qmYaOH!W+8#*WXl?8T0cCb
zCMS%_>hB|8zNW^)W5=PV#by010Q3k@>YoSs_}D_|>L!Iz_e1t$^Hwlj+zAx#%y{3C
z_nDMk5K~?m*1hmJ9IeVBm6Vl<$+GS!mXUs;w6jRIMUK^o)geaFn)q4S=~P^WTyrl3
zP&S+UAzop6q4Gx&zj}iTGTKFR+r+%8+aaowvMO5NJ!skWFL(5I1`6Vl@v<lo`z9(7
z9VbVyhQWg;c?=pJ!m0&LWs(gXb-jxJ*d!JQ_zCaD4(qD@>ub(VAWWFuKp}1f!Kt{W
zE|jgb(G{>f5_XF5+C74&RwvNR%ZERh_SHv^hW_f7X0tI`a`7%zAV8gtZR9?If5h@g
z7W9sw#7okQ>RIove=?i4ARwPs0UZT)P{p`%zqzRL8*%$#9C|95vLJVU2<Z}bnX0@v
zlA{)XoUGl1oeUnTZR*~BO4;}Ta>chG!103x`JRfyM$p<F7mPDfOq!g<5m(t^*OcC#
z#4Up8P#g*Jok)v)WN;wmck1SjSGBTA_r^&0i!15NrgO$L!M+~@wQG`GFG)4|<o8>0
zfb9a_tQrH^<OTtVqmk0a@g0(r)vENnPQVn~E)l02qY#$9wjrLyOW=aSm6%>c?1gI~
zH0@`P%kFZm@si`eCj%_J$LieEubH3I*Pt|DGg7}pAndx(cC=CttS`uOgj&`$4)@XG
z2oFTw%;ksY1a>SisC1na$#^^5UZ%YC9>!d67GlVI0l+`(B3`xab|4lDawrNL$8!Fb
z?6^TZPb%l56N5?eK7PwP+sJ5~>Oy^NuSH8|Z(weuf6WUYJDxo44~;JX1BV9MM7Zoy
z@}c|nz;XhQFe0~vIO<H^o!|PM%kI8+SJZY_JbERiLB4WN^@nGL&R1!gr{#8V9AJt@
zfK(VmQK<_wuo~2=x4D!DZBHe=lWc8?wy&$;^Y?8wY&siPEA4t9ttO%2sO6N!Et2N3
zmk2~k#waN;2BN0MW?0TODYk#R`N3)YzMuO<31f?fMiG*o8OJ%q%rBpE@hzT6AHn^3
zI}U#sXWw_ETr6#d%Yic=L+Yx>9p=p?RpoPJfCAyRel2FWOnaEW(Msu1)H^xT7HNw?
z`K<$*-Oc#2mvo!TN`oM{CFw9-^!4kxDiM)XiXKiN_Hta_n;||Dr3!8yQ0k(DS@Pah
zkV8JxszU&V+x=7)sXM8Bm7qjsGfn6cX_3YB-j&F<e4Tgm&wNyWvr+=>lp<<xL3wr<
z&#?XKo^nlKdn(;AID79E29SYMd;(szolb%loRfd{B29E^-RtF%?PNnWG_?f{<}*Q&
zs(jYaVUsrra~-L0-QTA}!7>L|9hI@kVBaOBCsuiigFY2+`v$VqMFx7=_w_oMC$_lg
z4wxkNLi-1al-I{L%F8t0B?_2_g5k}r>~bzN9)DYA23<+H3C!{N>&|%qH`GK4?{$t3
z3Y;L+=*XxZJ(Y3V^4i^3SqVb)pxm0kY&6c?dvfm2WYV9V-VJ$k<IHz}iW{^YDBjDp
zu9}8QM9Ri$U|s=91L5Hor<vYoW8=at$*LlTLxVKGHjlX;y>&RrNCBm9>bYzxt9z8-
zu2VaystGQDjc)5?f0|7B>ulD=%g7OoK|QgQH;%J)Kv=$-taLpiWxwAL&Bmi_GBR-i
zL$s>moZiO-oQ)-tjmM0!T`GAd|AxKagy1+wzg|b8c|$I$<?oCF??`z$>?gfOM|Q#c
z6SUJl9T1=qOt-gX61)BnYJdot2(0k>Dv)Dqqx}J;<{p629rg}h8z~P#4~1Pz>n*7b
zk;&i=z*`}Ym-A_=MebTjg-KK;X89~w6#JdJ_AP(TB2*T8uv>E}j#vYysBZGJ?O4&*
zL|;T`77eI}%_aAd9t!<)dp(=Ak7!>=K83$ZgtBNp$DL)l-H{Uqle*zlDaG!sb$*5#
z(%mwk4-Dp#w9;o$*E&S~2O{k1fMS&1k*{wbt#%EVZ6_QQgghB~92?F$@W8ZW>Rfl;
zbx`n*V~3tw@TzKQh3u8Pkh0Rj!vCO3KY|lMTeqmM0StyrxsH722if2{wv<;+h*?Z=
zO+s<)IqgyyDOcf}A@nq@4H027;cv<A9Ge`2xtI|+h4F<Gu`9c@UvkfJO$JWR(nfmm
zWIQP!7d5xay(EV(j*gFU=>~2%i;OhW3);cU$g0$X1~BVO3(94m5v9vszn@CGPR(@{
zdZ}iuuLN`z&JCNB7W^S^&6M2caJ!bA1$r3Rx?f)1Vg=r})~|@qDjil^)}OwyPI;NF
z4tvTj8s{v`z9X2*&LZxk?bM%gVxi!|h=)SroWG@6ve=^s8%q`$j~i<=t2DLJ-}YI5
zjFDfcXMp(#BUJOr>UekkQ<Z4mRqr7BE0rP^O_i;Hm<Df|WxLP2RrybUNE0P*BFb0q
zCNJmHPCDr`r<|{oz0*rnhvI#E*~L|>Hp<855vS69Ofs(C6xyRHqgS@&;GL?gBy;t*
zexDa=rpC4J#I|qQk(8vYv1a2*ji(Q7ZM0rVn~z-T9Mu9ln=f^TJ#_VM$vX@mceGo0
z&X>>K4lLIQ9h|xuF2#<4!qoDZ$0}xq-naFgGRq@H$^cL(pPxxiKd<a`bmqEky_=fq
zhX;+WLa~@fYS%hySpulct}Ecysp`9v48B^jB<!&&D{8E6q;K9nt(XPALS5Hdcu*S_
z7~<%hb$FM6s1WI#kHpK|6OOUlG*@W(Uv$vZmK~8vwCbkNVc*`GJrfDl9!6DITBFVc
zUkcOU`q!{DLuSBQ0ykF2C<oT*3^umd*`nw^{`!VT*L(4?{Ae;jRjsA&YtpD!B+@}E
zVf8T*r<i;GM+YtVmcosxX!+94=2Py}klR&hBusR`2M#ZBB?WlFxW&^xe%4s0`#5d&
zQ+9wjz>ml%d^sC$I$n1*2h60R*Ipp%MTwD&3GKRwjD+p;Kx9^juw<3AAP3SA&P(>C
z+vW;kP85`FCp6w_Q_>ChGDh0M;;M(1-W_>|t*E1roExw1hTh(r-w!Rl@P&-Sghri1
z=?)FSoCtaA$Y#%4H=X&h9wK$F)Uf^nyX|?A%+Jgalv6T8mkB5$Q3%Hk<dN*eF72OM
zpO)2%20}=fhJIF3!MuPf2427X^uq@UO1AIMw>z4l#!5#sEy~?{5|FZvDL!Y_SQ^N{
z=Kf$+^)|S#?fKrsy42A=ZG5?Fa*u63zQ$vqr~q$fUdY3Hp}HkG-OTb}C88Bfp*?l_
zmK2!9$nq)3!nW#}%SvN7d2#RHxZeKF#;VpvK5n#Z(L~zN>pD%X%k?t9Mkx`kwIx}b
z2lV{ao-6rNcGTkT*H^-#?Y1E5k@!;^ofI$SxgYpJPHT#aF*Cv>cJmkJaPfRaC7J^b
z^xr^F=*@pMR5lQ}sZ1s-H^6%ES3@rPSaD>xx*^bW5MyUVSUzW&q>>-HEzX@6f1iCc
z0j{8Izbe~=LtV7-IT2quq<wf9><V0LdoLX73gihY-&&R0yV2yulTuM(wJduijXq$1
zy5Pxr9XKXd7{b}-qc;lteSw6`q62h5BwzZo)I?zNx>(p$DDr+7!kJu@Marx~3ZvV%
z)ZM$3>rNnGY`bh?ct+tnH%VIDTC=ECzZ}2BN<s^VWzuJQa$-L^%W3~s#c&wa)=W2f
zH#D<Cb#142Lce~AGtR=U@MtO?3}JNl;5s0}F*sXF5cK`bVsa-xQzAM-e109pI==gT
zYTf0-f`+z+o<`@rsaCQ_^PWF^Q*H0JNc$2$YtUtKgi)pXcBJ?mBsO9gKlmDOXln4}
zKr{&<$~?{+)3tWsVv$dn8u33pUZ>K|qi$X*uD)y}d>dYPUYTd!`_Tyu%BIr9fCA+L
zW*&GQcv?KOdT-_ka$S9Oh|c-3eetrOsBJ^dt;OBa*-tX3!4)}G)Z5_@<nh+v%Bic$
z(2l5*Zl(nZoBJT1xnjiCF`D7wUiqfO(VEhWdH*TpdEF|4kQGxb6`4%n>WX$REaiT@
zwhr=fhBd+XX}~q%=iSAHD7I%@Q2>1En6~*rBtqOiGV$!_i>E;S2$!|MD?Zc?UT_mW
zDpwz44;!j+*?3;BV!&IxZ*9*8G(;^~5u}#!is9r-d-M1jDf(4;%AGW4=*H`!Y;x(&
zwhVHJV>`=-hu<M9yh-M2%1Zsa!+JUN1<It~E6M~Ba<kPbcf>@;E8kHV(kxmX4$tq$
zMX8BRzek?NY>f6eXz%Z4g5tV}^J_D19Wf72>#to)?L+QsYvK<(z!Lu2SdQ7x5MR?N
ziW1=vo1L)*hUs3LUkj)2WUF|xt;l==dlgJmM<E5>J7AxT)!{U`3OGO<Tly`4N`U}c
z5`X2+8QmQp4!OnLKg~p7Nd`sYm7K6=(I@`=%Qc_S^BNNKxVyV*f+6cp9t!HK&*@UK
z&2Nbky&2?Wztc+S@|Ba-LelKh7@LGu#c|@qy?Z1U2{qI^41e$2gXwq$rUw%82zJz;
zP3|N&xA0L8*E&9l!qak{TY72|9~zV@?ncA-+P$}3doIiP-QZU-mLa2r`yIb}>~{`x
zr^tu!TO4RvX<wM{m|+cBjT>kG;=jV>bMMG+MqOu)&c4Da3T?^^E$(dKnX{sk=H4sR
z+u)pLnijumnD=*ZS)u$j^LxD$e-$!58Qh>_BQU73opNYw_VMs)sEuUVu0<?gNV`uX
zh6D@VE|AF!#u4Yo#uCFC{wt&I)7E^1cZE^L{#wj6V^@{gL(|lPR<|~H0r_h@5y|bx
zEoD9Q=!7MW9mB{?i}x9t<v1+^YloUQ4o0~EwlBxIK`tG`SOyfYcvue;!N}0v*-7iZ
zlF)o<Upo%KGi4x}F)n!L^o9F;5|Yi*u=uYr10^{+Txe~Mb1W{h1FK;^a>%T9@=P#a
zV}xdXGo3vfb5yb}8D)TwrK2OAXjZuK?-VaHp30-?hi0VLGW}}L6p3_lG_wFRu7fD-
z5gA~QEWW>(LZ9YQy<^`7BN$L0zz<ayegyfry4rfR-DcRj)u-riS6bj3u_Ny@iqs^)
zs7BGN4wHb~GtGuT=-+Q?Oeh@+9j{CoEN!7uV9oo5bcUEQ`iC<VBDy}BX;gG<U<uK(
zd(Bw4DorSH4O!7ZGFE*~Tl2UBG|*VT58`HH6t?zl#$2PzZWqRDE#w06xUiMeP1-5q
zWxEVU?BZsjSbW#RXN4i7JiU-$149gAj8`qn247hHCmv?`3_;8S%h=fp1~xt(5%v}-
z86VH|G+uNJo|Oe=Dbgy<@GxZB>DF-sZXs5~cH)SDWVl^mIU4xsAY1EA4KfP6Sn5ut
z(+MW$cX*T_h$N8#`7I0^1*c?ZD|N`=0!XR?GP;6`rVlvU*b*DjJ2yjJF1h)N@o^#Q
z(9Vv;HN|v+*gHFg$PFgPGmtQ%pOA@3cN+qUS`WVWJKQ?DeI_))g77?wV~Vbo36Sxz
zl}$j)%Vb8<OQ)rSO-@T@Z}DA&3!Qsgc1-0Q0@7b8yN35^<m92W;?PV^@|6j#rd^dB
zB5lLxf=y~(Hg!2!n1sv%RtLPBp}UZ5(phn0odz=K78Jej<fl89L+ajrvbqLIXoem8
z?dxrC+pHaF%JoOMj1Tr_`e_dLSJMN~!|L9}Mp}2lQ}&+i!y*(dId1S|;%$9*;|@$y
z$<<r+PV1Y7uTQH<Zj{v{ZL^O$Q&wiV7GW0Nsns>rGpq)0+eb>2X#VF9DKWJ{eKc}C
zMPgC0Of!O}Gua(5&lnK$!<%MC)TySf8Q7TS`@AwxbF6cSe1Su*@cUx^oHKQ<=F?jN
zTI?LezxX`##liHpC8eJ}y&;fX0q5iEy`iJso-a47N3!yEe_;vSaN0T8PGp)bBc~Dv
zLYLN^TI#4Uv=+$4zA*<L>fRS5yRys$L(oZ;mAa#$1aJvb)c(LGweXtwaXQ#FNHv{R
zG6;zXA$$b-uuw|8NI`GOUoah9P&wv|6SZ-@4saVwVX1mw=bD^`9h5ExI);d<`ewOE
zt+zdz2$PeveoOWCsOG|5>i6ty#zd<}O2V-3F_c=zS|$cv+WD}Li+HF<hM2o0uqRW~
zOu+cGlq?`c%9w-2x}vGg?^Nd{J*2_GZN@yWAMZIP_(s%^4fYaFk8@kaj2QXwlhN)E
zjL&Ukf8^#t5KA7#>Zy3LQPOUurml_?DM4z+kD6TA;ug0Fv$Q@>3-k8AKT1Q|jgHOX
zX_d{^(IZLN_BYnEDA0~ar{sSa`a~_3(RKC~H4Dn>p>^Ih&Z$h{{M(3wf*ZNKYM=>*
zc}ZaDO#h7$ZIqv%_ft&1j;@1LK9AS1c9`?6PLuRX*#+HGm*~{${Hf~70&6kfhp3O3
z9z<)c$C1Wqtki<e23BWoF=pe-Fn$j7fuE3Cg{S98)c9)EiiAQPY=vsl8qSAQ>orF6
z9C|yz(_#sTgQod(x|@a8*VL$$jTo5(*Qs37nP_B+3=MYg3DJcTs3Ztl$lx1@6&B8@
z6WCo;Bpj$%UWjU*szpcRy;?N+HuPZptTH}?<4RWXlS16&>aPFNv1NI4ChlhDmTc$c
zDGmDqubG^LOswomv$KFg?KC4aI|e=-u2M`fo*j@>$JVU*BLk0aj@cfCV#Sujy!T$M
zFxAr*GV7ODg<P_6@VKStyRlgs9&Jc2Y#)2_JyxI=rd(7q?vS`Nc$4c%VC+NAg>qzr
zQm*7+m%#1a4?8y_v!FuZlAn4%Zd5Vgenb;mHTby0H=l+CM<@D4>rY(ca`}%{_P)@<
zd;_|cGYnSFes!;qRDY#S_;RBMi?LJ}J_AYkeuS2WuyqvRvqT-!t@eQn)f#+Q7i@-s
zK=#=BJNfq>H48~7fqFzo!w9ByBTFhZSobX~6`xC-^@-9`|ND|cXy##teiE9ER_=7n
z(!rN5tZUC7!|mx)Ian)UVdo>vx0uy}D`t&4t+eC>J`+})<{uhOT9*tVI@dS*YTmXO
z<%M-e=4P>V!EXz$+p`ycNH!P~xuI}-5<L_Anm`MkjK(>3TMDxuJmGjeFReuhZe1So
zLceOh9TYO+)c&-Kjq8@Q)Ca|XZ$gfx`x-z;es$NR6xP#G0_T|TI`Qi5=GG<?NFCVG
zr|EXN13Eg~5q2%V_@(fG$QdjCbtsAfgpPQULT`5C2ScE|jYlnv$P`puBw8lp2N$Al
z9QeWIBLM?2d|Wx`K;~eKt+d%YCaglUs2EY<+%eW52Dz2x5E^4O-S61*hdDbV9i#RR
z6)hOf1$`!K5=%Yytuz(t)vG!*5J=#Y65+}AhjDdG?IH+x<rcqIhB-`0vs|Hz7p$pb
zH7kN+dM!28ELK@gk{%0kBivFMVH2KChvRHJsWH<6$Qa8VDF;Wp`t=d+630B(V*K@r
zay}R=_x%PB=->DHZraDsN-9HPCNh4|9=^wD4HXDV=v-K3N32-VKM?6pAB>2nXj!#J
zYx_%g*Du{H*>~w4%!$05U}6;VPM7;9j(+NpUbp9mqgm43-mfwVwWI~|BM51cTW3qE
zaJns}rB(fwj5t2N^+`jEOR?0OY&jdYv?&qnyW_19Vxx_;Lu2Dw<C&wdEc{(fmYHb-
z9XCG&H9kT<B#KyflkeSwnK^)bDs@uVfWW1q?|t}@iSkIkVnRZpAM=Z4&U~T8Ol|c$
zH%`{ZDpO{HVimT%wo7KGGeK8pb*9)p3&hCsN2SSwMLsT?1Yu&kUb2GZaZS5II_c>J
z4sQpW0Mw|jbn;2ZU5tt(Or<sHFcUw@&+!xA-;M=GdybYeAmAN_O-3DNu+xHdwViB>
z$b^*t4*=yr8o!n9stZJ3`yfxg@pO)yl@KD=Up85e6;;b+KiSCng3+wXf%efgrzOgN
zetDX#fi~7p*Oka=2zzT6X^+8#$J0fj4DTLql<$2pRnA^C8jCnv<>gH!G6sUOqbSd^
z>5=lStEb4CPY%iBukVyczA;4>%}z0bwuxh6<a^i6Fca@5U&xl<f0`?IUOq-vEzgvi
z|Jxyd{Ip2YppqL8=Kg=bIaTIOOENT=wM1>G)yJyj{y*%I&vGgy9V>=!71YVMFHHe1
zW7@@K`TKeD9cZ3qK%=TZG@v+S1k>mP9Q)_5&_5Hje|k2V4xep5BrU0_0Uco_`w1Q4
zoRAE`-;dN`%UYStpPnRKq`LRB5?P71N5ORon29sTN6Iv8cdmry;^*1rk`4hd`>{Vb
zlMCP(hQX#nX-S7wV%wn+AcLtQW7QX~uXzn`=hWLE*e7$FHIePILEhi&7@KE5qmU^*
zAynp1j+gyME3q){NZPuZ&wC25h>sH=d5x$(=QWOi*9ZVZ%?0r}6JjL|3;UK#GhOaV
z*qB{r7SYLg`#Z03VtayOL4AYN*49fzIC+l%(2r8g@Am8b?nI!xezJX-ZKJxn)@Z2u
z11({GNDrEE^URIY<kVToMj?dn&p$mwE?-_IcRsPp2!V5<&GZw@hb}v3tmL9^!i$_T
zD^=>A-XYIpmqXUbaCz{?SrP?vK?n1zWY*MV89zE&Rz18!=EXv5ydTMqr6r;0kJaG$
z+UCq&S5c2V`ds+b7kjZeAK>Jl+C_9!ZRA|XbDqEfjtzrb(~@F>AyA4pr%0{aU1C-d
zC_I@pK1L>?(y8S{RckVc9GqzzV`Nge)YUi1dibYwejHY_>6eDPi!jh1E~+uYr;&-_
zl3P$~@<Oczt{zbMQ&3WC2Go%WVP2<gBI))|iy7E=!M4>Q<muFD2}T=i`+;&WMk9m*
zGnUJ}k+Cz7aY~Ht8kha~H3ml;o`jHP-?iPbr8N&iuZa+j<wICU4Jcm-pg4gf_MB``
z3+6=#BVNE3yZ>C3A<MDFj&dgEpDQd}HRJ%tXKK>~vGKss4hmH*t)n_{pz9mMFf7{>
z+ZDrbyCxmOFl<NPn#=@<ez`@`*#>FCA-`>Tfhs1`a|y&a$T40Ur{_18)jEc;0t>!p
zSSK|{t@)&Hu1NXbeq$JJqhz@pY;5ely;5j=DhA{Gp3jw;&_bcDo$p>eTCTi5Tb{jr
zl6l^mT`ae~c2pKaE5%;~PX;wJ{&0&RRLcWe<ZwZiy!pv7dG50!x#Q9?a{9b9Q{Oys
zkyT>r%(sXSzSOzuSXb95Edfv^4upy`y9NO@H{B}_gVmxRoQ1ZOj^mTr*SAAcCkX;<
z_qK&*X`Z=f|4?P4Ts$*YZu!zwDa4q6|L=Foz9ZF=4voDVFP|W*&KzSVDG$DWR1zQr
zVjpEU{|1Ch>$Vrl?G$ui9RAKVQ{|$i8S<4a#d6<=#qwYKiY04GqAZ^jEpJ0pXbiUL
z?ZS4xGv+3nLcP7d&=9?|r+Jrl3}`@mj(MTuh0xI%2%TH!HziPh{c^6XSyyO;PMlyr
z^tT)%9NvEnv@f0u5l(1gNIA}42(X@CJyqsTON3DEko@JnBN!(|RzjQV>I=r9yi?@r
zdp9}^KEiP9?g*Ea%QEEMO~;|>RbjR!vR+sRy>^JB9+8D1!$%6MwSV>kP^TX@fEjBt
z^A3NaBP>=t+UNTl3ytt;_T&UP8+O{LamSVDwG~Zr#~f#|ZtI@o^5+A!vN9!DYTf1o
zy@00oLD5c$XM@s`F0pK-2iRKTX@{rFz#bkr#`FiT(VaQ<F&tPzx{?Y$A`ZJ7jzc@b
zSy<<ZbzvsQN3a`#dF{@8-lM2qK03U{&TjA;jF|Jd`I8}dLK(PFoePBlYB-I?V)?Yp
z7_->_-j<>^`ojSpu1j76b7q%U<9{H3*dLog&>nvBHF-HXM85x@SI8Tm<m>#d4=TaZ
zlE!A(-V1VsdcLY>TO8T|e51Hgp8x82BY67#t9#{v*Yiy~z6poVTz|zB`PSv*WXVH2
z<dTu0a>-el^4X?*S#tMwnV%Ud%O=2XC~Ug!#5TN#u1%M8$QQ1@`!jiVe~nz38D^9~
zrqNtM-^j~twoksgXbE}0rY6^X8NS%#TvNf5!}P%$p9!B@3x=Cs@5M$OWX5SkoD4Hp
z0Jx&su4BS*lOQmnqt;R(gcF=>R^j?No_RVRHJA=U_02YLPJ+4j^W*Ks!fk7m$nV7V
z0}7hrAy|sQN*V<%6nrJcg-Rp@S<DLu*)*(1&~bYj<C{3j(VX%s?GB1zBJALYMPz=f
zG3l#^5QL6-FTyH0i4?KwC|YlDIfQc5LKN$QQVzhr);g?I5JO^+4a4Rn65N_d%`9V2
z3fUkN(v_nhi+NXmJNwQ!xV@6vYTtTnyZrT&Qn}&mc=>QsA%uZ}vM?*nOy1UQ%?Hzz
zD-%*fWiK>kZo6`voSNlmIDEe4m^=s}$HWvE^~6g2?N_vYTf3z|9x&fRWN$$|zKv}S
zw-(_Zcmd(C?IpVqEBKeYecOs}t~i}=nR4oECp{<JK@)B-!rgxPiQgW4@t{mVoaWnQ
zh!a`|bj%CCO~mARZ(+UMb}7R--#)`>X%D%=?L(aWw$Sa{|Gt!mI74I~!tr#fg|nP+
z8+IO-7c0id^5kMLgG~}>wk~#t_`rPQgen5<Y%2_h{^^YaM&slB#iQi=S5?Z&WuuJ%
z^|2TB%akzcz4)JF{vr34lG+h+%cDEwh4mG(dP%Ho-BThjzkdW<e_Jq)IsMZg^Y4F%
zo}441*91N1>d48GmRf<y00qHqTA3$@==cqXEE50pNZ4wE=4v$rgB;^2P;bw%)g8q?
zxU0_1FrR<$)J}P9BWMcx<{No6l2^P_{&3d<xp2uS`NIbVvZJWMFbVYh&gVzu+y{5d
zStEnxsm&Ew)Ho<#hd^opCh79`LiuQOkr6(fJugxI2U|_kBU|Jg#5)^8*hA19dJ9|h
zMuYBLdG=`U(#8sNd!?}BFWaB}&Z}*YX3+tCkmFD>1Z{L&(D==|GFg&}E#aVrBtSME
zj_DiAo8->Z6OG_${f;78`SWa9hH_F%>`TAQkrdF#vtZxsD@&5);kWW79(0O*g{~u9
z7DC1Sf%T<w5hiG~Q^m=kH#EkHwf$dG%UGn%{&_R{=Zo4u+Xu>vw`9gRnLUg-Kosoq
zu|9iSKVBs3l^5C;n-mi$|JYn2Us(Z09`m?!AuxXG<07-QE(?b9=RnX)_W}CdM3}6?
zwA`;8@q|e#r6pa|3h4m_8?~SZ$>!V+_tC72z@DFhd4KR4-p;9$FsG*9BgY-#Q#yn<
z+|JzsAtdMDsW3Z2%^{A<q)}W1rt6H?uxWc1)thGy(IkaD*r&k@UW0YPMO$iUr6q*R
zQ7l?-+E?zH^90D&0~Ka_<XG@uqaaAj#SRjV8BB-HD_#ShyED!hzX)T~a8l~&Z;Mny
zJ7?>m7P;xs?Mgp-3Zq9j+lQ;dCrSFqD$}34Q^y|B_4<)%(?r2i*kwBF)O53}=GtfS
z<N_QfL}it4{N;!&%1V~gpbd3VD&~#w%Xw1*WCMali~|rlwd|>oGzgD{w9z%$nF*5z
z&t_N47Vw)#z^{|n>6>=h>mv7g1U?f!arn26d$B`$saM8{E)_~R5W2^0)38_8F$FL@
zQj~)<?K^S%1Z$YWD{4HIz-CcmOsLr&!$FmCQz}S1O4dJKnL)G){!?J@f&{4=6X2sD
z3R7@&b!t2q`lHoG@InGXO&DqdQ7F}&v}uQ<yN)54KXw9YI?-_=XbgcPpY1tzjL=~3
z@<yQ!%W7kyjVlV{7(X#2CUo{hLHeVn6}4%IWqXdrWc^uf8`fiQ9LsBn9QMZl;?`Mm
z?yQ6IGWOtJcE(6KWp<j0nq{6pGew@7nq<nt2}%fTQ&`7uznz>2`w!0KB@}gM-Mkse
z@&r!CGpija7j2$cHy3LBzlNO%N0N+{)Nse96u)O7t;hc(&S<z$%PA6Sme$S2WcRUK
zVx4e^Guq?Z&u@w`;aIL1Yyq)u7FKN^x*^I;Hux<LOtN(|k;k8XqwO07rPj@u5HI(w
zjx_bgaMaGTZgOU<oEKFDfoYkfMUV8W0k95vodrYkKYXV^mS!c%f|<$koolC=Nz3zV
za^*L$2eSf9x!n?F%jEMb+BAg|B^c}GVorEJP6=Cym9f7-^XXrph57GSPc!vP?Mn6s
zc7YDMi_eSV&lzx5$?E0W+n$jeXyJ?-m(?pAIf7Elsny6UDyO}AVV%*I%zR8psF6p3
z(8v3$<f8OYvk0*mLf(xKaPQ44mnrC9nLG^PaGj}h;`sqIoU&j?DjLST7h;0KywfF-
z!^9T7x3`qawJRHC&eTL%mXIrZVN+`L>B+`6*qYCd$a>g$T7|9D-ATK%SvuRFbtjb<
zV<k02VzCXDj@p)SVCK0k*3+-q)&WhDc@txd-_N!kmkDt&S`Oprq{nm8gXQC`#YXr<
z+gCea>^u$h!qR|_a7iI95Z02Ujl$NzQf!r?_Acw8r;c@9hB}-X)+`U)I9)FO)X_}T
z{@Hy*N&l#U_-}V?mi1VuOJU1k-FP;T?nmA05S)jEW3KY*v3l9K>$seUE!?NiPLjLd
zFOpQ)@?1VK%4~a}hRK`Q)}09^feVR#<A7s(w1ir;+E!1jB})&kxcO-u{y_2aMEv!<
zA+qiCHLuZ~IW_0mwD-!nGBt;2BE;CC!cp)cXwwI<ZHM2y<uz>DC!*%vlSR>MUIPKx
zkEq#8N8W)#GP^s1CJBzAY!p7x9xdnOOoM-PUGW;sM<Ax+oX>XUNGDjAG-X2iWp^2L
z6|VE|;B&w+jKO-*gEzV82ezDRpX?m9uD)7Ya@}|TXf)-<kDuL}F5FdeU4G|0p_=oA
z^e~ul36dknD#7Fi7}<ge41p3&B+y3ac$|>@$mVi$z}1(a;a&33X|OwdRDSnPK5RPG
z%h}1n^5k1bW%iUL`QGYT@`Yu^@*)<ZpW9FZ8Z`jLWIsTkqyQ|Cr<J)no9%O5g3&_{
zZ5(qCBj}4@9AG#I^we<>;b2pZi4Fxj6m(GIhtg7a$0w2=4&EH3xq?jr1Xm5{_{)6I
z_%<DdRM-O9RvGZXU00lg*nx|c<^*)&<%$G_Pw7}m+l#Fh+;T@@z#*vc^Yo!uYz^#I
z+9oI7{dks_q1yD5c<+>tdG~ZIxA(X>HnMCSRttbw(pf1k2?5_xFt=nzqHKl{c&^eI
z18O#~>q<LVJi2ylS|qk7))@hoVOl)=dH;xQV-3#U>tw73U4QvxIem^}yW^v+$K|?<
z#~a%^ArQFnuRYh<w@})X!e4tXEHp^?*Pe^OK*hiITqH&s{<Y^i`^J63{A<reN5Yss
z{@Qaf=qvnlcdSCTj|AAA85N6@8FFhRoth)?<kYUaZ{OY1Smk7BSp47Xd4?IM0Gmg<
z|MHC^vK-7m1yug#42CTbECJ8JpkNu55-HbRG7)xjl3-J2zY#uMck#HBY(Dv0IR-<1
zMg+jXaZ3Z}0yJ~Xc-OP`!2Up6c(ma}Ey@YlW*tQvPT1<;intXH3QMRZH41i(cp$_<
zXoQwxf|7<vNOwr*_Jg)mQR4`Hjd0nmol<+2q=!O!PSCkMoX$*&vCZ#utcGpgeO#8z
zO_k+SW8?>~7sxW$s-gSG=S4Cd>BeCU?Md1u64#HV51xp5j>jZ@%qtynD#bmYDUf$}
z<`HmN4d8re!bD@DMnMrnlYSg`X;6h>Y>WeKc4zcE!o_Tl24Jz!WWE2!+4AULv*p#z
zrC3dLj7GcX!RvcuJp>u%;UgnqL(=J=iN@v=g<#r0y++b_WZR97gaPEnax@Z*eG=?9
zbcAhe4vf~nv!TccpJrg8<_n|4<hgy-a@%E&*3%{krq)2Sd1ab^EoQKXwE#~_K@2Uy
zVJ}KcI&PaIumyV*4}yTkbZ;ETvi{*U?3~&eJ3E?FGkuQ3H6S#`1}(-Y3Ua8u#9bJ4
z<^UigOu<;@6`ylr+TP&PTVBK3ho#_$WinI*KfI@j6S|Nf9L9ndq?^`tgbV$d1yo=$
zEi+m^+E#3y86O?5Zqb-`hQ8wc1PHpQxl7)J@wcUBymk8rQJBT~QOW3&u=%)XmeP-I
zgV;_0(h~BBRrO%XeMCzrbh7gswkP&4I*0dl=eFDUU@8PIhN+OBy?RJ?!VdBk%SXwj
zXJ^XN`Kj{#-)xtyIBE4bb_}h4aI>sFCmknTr^{V9P-!0QectiM{<CXoGL9d15z-RM
zOd=veTgzv^_t(`yuD@a*pm;g3a3VpiCAtI%dpHoelzz-RI|#9nz^hBjhanVU3BGlF
zrVxgXs|Plm{odKg#LiIx<CMqC>!98X%_%g@RGf@QcNkmvI4Dst<472LkalY(oEvv$
zfcdnHfV(%RByMj`dg1!k?Yy-`#4gu5V7J~dUgtf18rxX}fUYg5c_eWemlh$5A)uN$
zF4izl?64<cZ+v(CW3-VF!NX6yCO6;p2iWOyCLGhD>GR;NbL7<N@p2<nVt@DQK4VPO
z#%P6R_L)}>F!s;=y+3^;<Tj7y>Xy;2zoCvy8C`Yds+(pAWMg8Yq#S$LTbjAGh94N4
ze>hGOwqO<W;zcQFThO3DzoJ&xwP&QtTI`|b5vBg+h#e+2z_JJ@@Fm7awldh$c$yO0
z<)Rr*yL|Gcy)Yu%Hc4jL{0DjeLe(=jRshbi&}_bL9YlK89*8^bIIV~9sTAWn-GWIm
zvbU<)XihMC{!yS#VoD7uZk?o-R49Z=(RgoD=G|xrvO-)!YiiyawH*_y(}&$B+OF45
z-D5l)h1IloH^8nF+|t<zFj^BVv!R)peXvYk+gBxNV5GUg(UY`oTD)31LON)MY!o0I
z_Y}x<=7InO$6n%bk^?wAVU;cl6G*^g7z@%c))e2-)q)8q#Ye!@>Nq}GT-^JCy+_ud
zc^YWyfU-QdpsjIq_l9vcxXL?ie7rnx>s(j=+$P#TyGE0BM#nxF7ZWbW8(@RY&*%u}
z7^%?Idv!;->^oF$775RofqhwM^wch-duJoIx}smX3q#EZpZ*YBhn5r?S`uOAyUdb9
zOKe-+u5A^Vuq_5bknzZmz9P4-UTN$;Svo<?y5l&u^&hXnk;~~g91~q#$i6xuo6N@%
zY+0k@>D$y^q7A5G&?uf5M_vRZup7KaTiWgk**x?quYm-}pV3)il6hbj;4J?|I>vF|
z_1b*v3>SW5J~<zBcT9<zVeXFcCc^%~lIaOD8%EE$JH?o5aZk)WKsWEHq)<<ThH#`G
z54O?|?UUVv()3@3VM~aQi^M6UzTrtYj&MrA`4Pu}5KIzi)SoRYE*64cOy@XnF>@Ep
z!;-MAl<hnZ2KGs7ur`R(?aCzUM_c84k8g%~o-n!Q+|lx3iPKJMp>g}2=W=E8ZJ)`e
zT}8&;=faF|DT5}snTK^3IIsLO_b$E!G&&yTpr+_RxPeKfi;{4_;9%NYM<aW5JdKVg
zDNuun4TgWbXQSas5gcTyP|;i==Adm=_1RDujz@p;gcF{!U<O^*kIT8;wh;%{H<p2E
z*nEt}X>chJLe*j$O%b#*m{xDnW`e9Rt`^Wy>S`DTB+PeFSOKPEc`cm0Z*{mgc(lG~
zyCKs+9K6G&lg?lhZj0L70Tw55KDp(IM1Wc=JUMSqUWKu#(id%k$rM%8VnqliAG**m
zLO=B4^B0en-`z3C97p@rpKg%%)*YpHv)#?yxcx9M_RsCLxwY@x%kMybBjoPsxmQ8g
z-<-Z@m`1k(asn_WEk^1pabij{M%DHT>E?eRZ^`KAmN&|O%#V``mW+n+%^G>?m3<})
zmn<J6myC<RX+^M8<rkxy8#2TYST+FDjMNxRvYZUEU2>e^>3zTd&Rq1-7WwLZ8|9za
zO2z(4m1Flhb&FANJrH0!rfWsaZLIO|>h&W>+wo9V{BW})__^rxk&=wDHXEzx95X3!
z+Jh}`2XUgwghbd~!*;j=Xh~(JMM)-%f*+}Bk}$N{W4MlmRt`@wIf`*<e=+)%+49{{
zCf(knjqn)wzhm-JTvjL3p`~%(naRQx<WFIkwFDFONYH}==oefO?@8Jyh9p5F>3D@>
z$0-@x+BPB2w59ZUVS|iIbyn8dCT)W>_bwzkA!A#nV9DJX!BKbsjIKM(Qr3hR*;WJw
z2~~yUWc{&5oXqNM$=ikPS<@i2gKV!8=ip-d1fRGK#$*dGov)n)_u`Wimy?2_8@P4)
z=RBjLuKlxf5G}bN9m!0OlLnYF@HaZbIwmdIhBp5BwjxuYQ>MkswNs-oKEviGP9=H+
zTe#Wp$UOL!bJ3C}(2~rwI1gH4+bU=Y`r~kKtF|qMhlOB4fEwa<<mh>g+BtsT@fzOG
zso8GXuZp03OS2%!U}8Cc<~zsGxU=w1&7R)O=ZRaZ7E&Mb8l3i!zoLtWrV<zVts@^}
z%|Dc40gm&c4#)G8s?k}<aHmNOOCdoP4w521HZIN+GE*arnUo^X)4t@lnJs5N?PSoI
z-)Wz;b==*n?Z1YWq{aG-mT-Pli9cAJ5y*oK?RX#CwzuJcBc7Og=NSp|LSDVRaIjwf
z`|M<MqAzXkz5>$<Ge?EXDdTWLJ?4l!y6(BH<)&{@NDKp|vUo}icD)43UQ7v(@2-<P
zY@MZJ3$kt2yy%Y~0t~*S5j2{##N??z((#DXXY83fpcKxy4?rGy?DCO=4F?#io)(tY
zVy|$pEP{4R0wmjPu-wMTiS$xzZ<#eV1_F*IvuZ$)`Ak0);_!L)_-G?J-+!daNG)x{
zXBzx-h7%`Co(+TFDZJvoWD2q-z}Q~`Y)8=!6bDttPosQ?u?4RuX(M{Wh?aB39BxcX
zxY4d6ftU;<<};u*<nEY1mWwM0JP9xaGkxZfY11)2>-dU{RCn^`9^S^|es#yoNzwUA
zOzj_u3+w)q9VK!EOcNPxld-NMcTcE;dkXs~*s{*VjRz8gSZp`rR=ofD`ZRg%(*pU{
z|86uI1lI3xz7ik0$(hiuI8-kqnw=wi{Q(oUBjzsv{p{;s7;m!l@_UD5^>1=z6SgFA
zOVmx5j+M{By;2jy-<-h`m>O(dWpQtp6o`NAl1cK=H>b%OYzMjd!Hve4p}&=5IOT`q
z;uHvxe)>+KY~NQRW74DK`P*j61=C~9maiBZ`Ob=y*Y2Dv|MkW3vMIY(KEojZTpak`
z)e|HJS~+`Q^C%dCrhBiQXlw+$@$nHk2BVYihP5elY;Cc;agBjyl^*xp2L3SwRvU2`
zKpJe>U2{RE(f0lO`r|U+91$<F3mRIVY&zyu+Hr`_)zyu_vk~c!#r8fPq(OJ?!ZhiA
zpiKY(KmbWZK~z})+l3#&DEhCj9xY$Jc!GJ)ah;S9#lxyxlVK!AVIqGjXlw}<Y@#4M
zBB62ovA@E~N?efqeZz4n#RTVU2wCsBFj=-jNVlo9QSLr3Nmk+nK%NHs`;Q7`0_-<c
z0COAO9T$U-3l}5QGC4BYv3GrbJ*j_Q>h#aIwSRVswWTAI3>^W3=SOtJW}JfKC9qZY
zI{FajQB%go$v0MvF`s$p#s&y;Ik$8*IUCF`t`04!6N4{uM7%p|t2YgETEf#ajhT#I
z)F#KI{^T{fYG-ClgeT6M>o9N52XDbS&(w@)@Eou?OB+gPuN)`4%WJr&&2-2o&@>5M
zU-23o%PH_Bk3`#+<Dn>!0l^vhCDK_g*iXjd3=j68Ak8_IyOk80ytM!0<i3@Dx;)uF
z!Tgi8uwxSMx8#(|4%o|TohBM^+mF3FZ&@!C3R<Uq(jw9GtF5;IrzLDNu8wi{*Jj9u
zpDmIXf3Q&A_~~i#Ki5waoNFY%d~v@VhXC`Qf6tQLKb|Lh?wu!hT|Y|>9I23>zi>b%
zg$5Ym@Q-iElD$8eD{H?uLq5KDA!SbT%12OqKx4e(tPFYf?uD}M_UW?i$y4OwGc&QM
zSRfxAtbr*7$2Mlq(d#;7d$gqMP`&ffzVVsf#pzXO)S2UOGcqwkngg9d&c5#MmF<Jl
z>&-YhVcw(wNrcUsrL#hv!3Jq?ux10!IZ|n~pZ38X2ZdB4akOU|PF0{JJ`(nsIN;_&
z>Uf}_x^>{=NKRGGAWYU>?a*Y8_Z;Z0<1?lDRnWSiG@Zr(=?+6Zmi{<dra**-$Z4CZ
z+iAN7ZthLaFRn4CMVxm^iV-f+;5GA2n@QBDV29zLN1+#mUt_==oP{H~i32r^wqoz8
zRe7b9p8iQxy~&%gg$3!Y6=GR!1D2KJw|%CqCt}j-Ogga5g|NmSK7_iAy)!NnnpWp_
z+xo7XH%!BwL1A7PEpg#Dk~C@|+<#M+d<=%|Tff>RKLK;d<IEY3SPf^Fl!z?|U_Kk_
zsw6l99Hc?dw}{aG&Z|f2<d0t-YwB*-{!;m$H}i39X^8ylr2|lJj+2EjKz`2|$#Va@
z1#((ah@aShLR*5ZCt^uV4YnRoy0^>Bk<s$wf1fRDw-v~@pWGuq{PF~|6-3*m2WSKI
z$nkL^3}pZ057~0}SEtD2u{de%mN{nZ<#@;Oon!559~_dYnIXpZ*jOw+EW~LiZ{C+E
z`Je$Qpa;}cqOr-x-p-fF5XkJps+hGK5QuS`6B2eaJASix#wHzJ@Y_5X9)ELPp`3l%
zNJ&qQkPSPF<gb{ZoSztM+WBdS^D)>xn*e5{yJ^=T0Sbv;hTWu9V7%u|OO$`!dn!)v
zX%yPcdlwp_XD>`QTigVz>O6_%1878TL>`vTOOt>6;1mx2^5Q$W@|U&yVfW4v{Igw0
zqTl91Q|z}d?v)$9I7PnqwOR7@OL-hPPCkOIl458E|L>o6OC^L^#=tuA(k?E*gYnTB
zn|tur-=nmvN=~2~?4Lgw;QmR*%?RRvv8AyF2tWTxO2ndAVyHCKR{9Mcv5>K!#X>mz
z!v1RcR!)gI;f|*iaY5`Y2p3O-V2Vt!e>oQ|0iMLw;Omh=Pg*k2ZPg<?L+X*Op|5$3
zp3JE^e<yFjlesB?q9BUA2N#*yuD#_o+Dn^dq>zXwf%i49!Lf_9a!XE`%$*c3r@`C?
zg-uM8#+#{(ura&TY;7Xn#rIwE8ipp&XPaXk1qOg`Lpj)oNng!*LKsJh59tO?qqNbF
zw!5}ix_8GIQewj-gNKQDM5leSdsw#Tdmw2E+stB&|L<)smSmXp;I1jwDQPhK*51R_
zG8S5~>vF4Q_5W;=>&{7&%#l&j3?cBJUfXBpcmcSdfH~Dax0g!li`lYdb|U&}i0sB9
z=AYj=A|K^c;Y9Hu*sI(pUpgxtb|xcnLTt1A?v=gr2u}SSg)QIxOMX}&@FktllJ@GM
zFW&h~_+&HB)5RY7(U-BezED~)P_SZq>Uc67CC(phFOiWr$}=ezYPK*Y-Gmj7(#i&7
zh>wQ)DCFR3?AqNWl93!HNj%vO4XYTdvd18iKU(aZ-o|_&N;6RHJ6wqY(>aC8<PX{8
zc*nuN5}HWdazMXy9Q5|WKQ)f3Ak-?Ya`s@`qVd|Bw7IUaA-hb*rq;v06;4t@W6j-*
z?HDjxMpuB-B1kwmvFD(@{Xn@{A&-NKEAvF*RXA|rH6*y&oZ<)XPRH^B7uFEtFGBcG
zxA1(=veWj~L7enNEU5`*%T3{56a;)U@JGjb*$1QS#E#DTGY|dn<EkDLvr|_TLb2Wg
zWbvF-dCqmTGCd4u!IN#rqtX*(`lKi+t1O2w7b`jFpT2Y$@8u{oJMK6&L3nBe-EUtx
zAmNyvB*5VMFLzYPc^@5;OV7%bZ(cr0*6yt^hWsd0bu}?xW*=jGlfjah5?eZpwau7k
zyIhD97oVM!*g7r_=XPl^^2L=VBO+mQ=|EAPT>Z1Ha_yo-Ib}*53^WEyBgWKySV4XE
z<3ib4SSJ&pB~^pT^}qalr`)_eMdnYAl}H%<&pliwA8sv?pMQK@W*eqfWHyd;-?*nt
zVx#kAKNcktvC73&;3!OrNryh(m@n&g9G5AWz|fXdA{gaQ_f^Q>-r5g?^%3&Qheu@=
z!q5Oe$4`#GBM`nPX(vEqiJG#pU|w!}Vz+$ryfkR&#hC5Mk373uUfEtQkBw-N&!MF{
z4F)uMvfp$F!|r<yD}T^hnvg-Q;zn7!vs5z2gvf^7rRF3)mMa`ImnY%<@`FOzaj04@
zUy>rDu(gTW2p_G_muKEDkbS^50~%a4z?1lNh0FRKm=xl00eJ6M*BRXyi2iBkM5I&h
zLMKbR+zZcl*1vr}P&OQFdiH9^w$bT{IQ1tQbFd27x$r$5vH5eq#-Wlw+*D+QPsDg1
z?5MrIw@Rj<f1)e<og*zFp2U^-(pkK1W}WA@InF?|RhzayUfB-<jF7^;NB1nJea&ld
z2JF?G+QpmLxjlJ{ow?;^UY&ca+Km53TH`7fX+l@LMn`EgivHj=SOr{|J`7tp6*Wah
z^C=pN035dtq6~S3HQ*r}K8>1c-r&MRin!>`vasD#cswCJ!t6e20zIUVY2nm(BZwkz
zmVLNN!tl;>taQQFqYLArM*gi|TPe$N5QzK9*)~G$lNO2YUG3O5khFw6Lloo!n_v+9
z7mw_Lp)$5Emjg`*!rW&$4l#(pqGTB~1Xukw2f_k0J_dxg)45m>9D(*g`azqS_hOO!
z(2GYX5jAn0mJ}o@5H3*`u`|C$&S4tt97rb$W(pP~ZuVdP^{4ZOmh?yZ0q8LOT(qvU
z&AM8%p#T!^PWC-_9gXpUsxt!+_d8oH0bc1St>om~y~G@(m_F;Ajet^etLkY2xqc|A
zC*ya5b`HK#I!d7m2RC<Tw!7gQ<7K+-x!$JDJh4+U|6B!O-f0xplmX99sU1A1Qck}t
zI|p+P{#MnMD<7Urtv7kIW%LZsG^l#YbS!3PW+ln`J*7rCIC*rGSrnkqi8elr;0^%M
z|1$X70bv=pP_i8Eb=4k*?Y-mVu>r=@?)}9JIQb|@o_lmPjQn;kXSHpmz4(UfyG9sH
zy#CHV$=^30l^K)gNMJ}5>VxXUXh=SA>_&$meERZ&1Uc`tRN1hrL>_wU7>>bq26)yX
z`{AvtGGNawM1J??A=wN@HyQ+leZ+T;0!|3Q>nmmZo=vhMD_ws5<1ZN_s9ohr+htb>
zwg0^yhIp^K`3c#4v`!{Yf}I|r7EbRP0YhV`G%g!k#&T-$hA$u#!r^+d3<9DE2yUox
zk2!}K`?r@gN*<5*X5+;eJRL%nvDj8-=!Pr40({?y?PsSaVIqU@AR8tu?E}6C&7x^w
zfXCnuL#pX{%)&d^e%DynA`23NB!L1egt5JnG>3n^Nt@&D+sCn(fi#(+EaW>Kt9-BL
z*UL=gWfW{z(Vi;lDe1sJi($iq6E6f@9FG(Cp!JjwTRNM7Q){^p7d02TRqrGEXC@gx
z;#i)D6GDMey{n@V)~;}wi998t>>T5N@$Gr?@Sk_f^EjS(WRFj~BdgUUd$Tu4R(hyB
z`-p$hjq2)Z$;ru);NV~xKYqNq8it329nSsUyzrABE{=Wkdw-IbHyoB(Q%FameM(0>
zVIg{K@6BkBe}K)sw4`u(8Y|B?yl@Ds($Jh`=kgn8X-S1_*}X}=Fn^T%@*dC<4me%7
z;Z93xYU_;wd7ioGIp6I9x^_RCc7rc{$!l~qr{+AL^JMoHY3J&E&+qQvY{so0Gvs2-
zZI93CC~d~kAG`*KEv892$9%C5(zVB@b(A-TvAhQF)1h=QYkaJ{yQvVg35R=trf^O;
ze{!6xgR%kB;sQLc>@vJA#c6a@5?BlVB%+sdMUpRS$ecYd#gxe@QOCc-y-#+GO#5F9
zEx8M{Wc|R=61GjwTiD;-9p_`*woOMrT;!(XFWUjXIbY3d^WC;{UOCqRX*AA;XB)PB
zx@UODXJ7uFNJ}mOEqUBaTGAJ}=$s?(ixj+w<HWt#P6c`qtX(g*vZ;F7HXV23WX1B&
z-Em;zSH8AaPe1f;RPN}{BRmB!{Kv-T?%aulXBg|Rw`tov+4z_a2Tx|jJx!L&-7mlK
z&*srH?~Kozymb_woyX?Gb}pttg3psU*6u1r9+}GklP~6vCm}h)rWQl!jG;uR68FYY
z;O4&f+DWnf7@0R>!n~_v`)D!3kuw&Ilc!$I#wmnQ@ehs02j8KC>NLmLKfA72zWY|8
z5jsr+!w;FY>APHw<iP?L{Cb~RJy?R32j2^xTzDaG0S%RM3>q?LtR82u^@ds0cImtJ
z=9w~w2F^8jodUJ`8MtxAHAjX*ffCy&#}K~f-z=<N&2jVRK4D=znET?ytr2oDG+4>x
zG7R5yEFx{WID?w4n4r7bI6j#!cz#NVSwO49H!GClH@dz^n|WB78e-z(57Xcx!NsGn
zeF|4wPUe%^wZ<>*nR!l{h(%f?$_1_B)^fSNaW!!z;&G7xlZFX{c$qGpH@J|tl+pSp
z0&xL|c^POo5dE{Ep+R1H=_PsMi6<l>Awjlm*@BZuW8|r)o{}k3rkJss<>sQx(W6IY
z!-fse$cvD=x;lx9ijpNumY8n?jSbT?bOc=9nV=)TeQ}Qjj3*s|ad{uqRY&Eg0JQ`H
z;M2xON+J%3AtU<whGIDdT6k>7e&<}Yr08IkocRsV65y+}r1M&;$71Bocl}WgVe9V=
z|GGi~u;YT5^~TxIeaUNhGpF{<cI#Xn8M1luELW=^eBc;Mf1dGlls4mX=O6gYuy&5`
z9uMPo_sehmYcFoT=lB@`jbhxkIvb|-1#a34&+nwQ%m<wn65GB&fg5+aL_(vNi|yRf
z&1c5Na1^LjfqpoGt!9{B!70fQR`FZU9q15f5qVl_y>#UUf_`+Bhr#l_m6n_WTJplc
z(GpZAG}+x-=*fuDPc0`cojA^MI&$8G?<j1q-lMUTFCi^CqZ_oOS82Ke_l~>oiyZV*
z92~&Bab}=%dF(7mNAbCbcTbOTbri<>a}V3ov~67W%0}FtYcDUI<<XnGbr#+;A6$Xp
zpw7WI46D@^9QH>_edpo?B-;204-Jxuu$7XGEt5IeuECXwo)81lk7SR%*Im(+o}|+r
z)<fYP>z{bcnUMkAQ!tQK#S#{gf}(YWV*iz%DI2B~8W^jfJz^Pon<KVIG7>A(LQF2n
zn0vOv{>xowF=KLtZ4$(gSQ<>>-D2EXNPg3H*%d+q`<?B~2%5Y|wzCNbgPS((66|<J
z$M#_-J+_ToKX#&R<FN1Duf0ke5w$m&;#NC4_xv!Qo_^V8t-#6zaQoqy-_*rK%^O@u
zH|URlw0X$I3-)(D52Wkw{`u&mkII@gYvk#tpO)0rR4FSflhvzN%avDNDQnlR1w9SJ
zSmM-6Vq&6f-@aY$xZ@53!OopKF@A7ig>-bFk!c&_=FAuaTX$hnTUjjOKBgm{Z6g>B
zX>ECv+`TXbn(f$TvFo_}1$NXfaM@Aw^zYke(vn7lBk?r2B15cxZ>rZDKfWzr-=@x9
zVOl+I@(Qb@5ZahdPwBgKTwSlr2<gwJZLgkz_`aj~I-1M4$LkrNeRlWLleF8*Q)l_H
z>Dp^g%4Ngy+EsYluE}4p57SX8AP5T249jhKYzL$f{K~vfgt2p)0paQ95zy2<eMY<-
zf!(YUm}s#!rr@>Toy~i{Cp5?tU>hV>{!B{-7sS1zCA}@H(@T3BOE1Is<zDPwW<`Vd
zF#;ScxGKF5hTf~7-9+I4pE>yR&r)Nq-jQ(Zhd<hkvW5nGk{vVAMtt0R{m4CEZg=l>
zPvRX;VJxFEX<V9o@w9ZwD=L=e`YH_m95PsOzHyvDP~)K*D-rE6*CbG6gvlhE_T`&w
zx}{2{58M8L9Wf-9#FWlr?B-c-6#+%SO9a^0v0fonSk3<Dr-$WGAx^r*D!PC4doK&&
zg}2KtyG;J_m%kXnQ&Uru#Ky+TEw|hvTeogCf+j{mn#XS;AtAE-%;hH5S6_WqrcRw|
zR^fd$);KnH!03%llds^E!vlUoM=b0VU{<32T#^wci(zP-?xQWRLxXnZJ1S>$=Xq_T
zCHo7D4W7i6&f@GDVz@tZzF-|sKDLJ#*mrsz8VHaUaMx2MY!2q&q;hU&qqK?j&ux+G
zu%(U*Mto+!=AYS?2Lf^kHl}z$6t%;wqraXxh`cbU_{z>+&+<GtLfJ9SUuntUK+)#X
z_q4<>G@t6~>ZJiTI-H@R=iT1Is#pI(fVD|xVB3KTBT%A%$=w;@6uz&60d-EG`JIG;
zjs^ieygMJf`o#(eCL5ulZCgjY3Z|Y60>qv55fK(5mz_Obo_pgTGJX^^cewRVUAGZv
zsV$b=oJzUxo68KQMnKy0#@+4oHGm>8)DR$@;1-HXoSu5|lR2^!TW}+>1!JfQpE5Hu
zTdOWG&^bYn+v(^kDk@sPS%xQPe~x)aEh%3+QhSbdVbEy4^qlGP%&R##N}Y5BTkrh9
zaR@Gl@M+-$9$SIKS#S!`>zhhsg1>ILBc7loNA_3ArMFHuxGF92t|3t%f&c1;C;}8F
zRbb0(@xcmb`<z=jJvEsqbl!ak_GTfFvOEX-5d~26+w*3EMbY{OsfB&9h;ZdS+^V40
zxBgB`dR;&tMBqzW;&b6sf9%EfK^<z6T_C`H;GGeE?#uRcy<;M}xA)pJZGCP-V9CPC
zGHUY2QeIjhu@R{f7#xbag!++LM{4t@cb*71s|!sHwFbLUFnUQW)hPmsK;IxhCV`9u
zx7tz@vUfD2Z}QZau~@-EeSN*`+O<pa^Ydl)?Ad0k9n+z05js#i;J}k}n&s`c-!|{Y
zjT>jKzS4R!iM#zQK4p?jo0%Y`#m6KbTkiscA@K1r9ibL&0#1<Jc%V|g`jho?thz~H
zf(;sVy<tGz3f*V7mX=gY3GkaXGf{}AIz>PcXhVR4q%hd~?Tpv3zA0!53${<rwKw#G
z<7_id;dtb!*W`KFgnazoYjoGeiSYK>lRwguJ}X?mN8}&0<YbP&-wW;}&?;HR4fkRf
zl-2Y?+B`s+)?|8&Kv%?ogTCiqj}y~?eS!dqDVd+qY4P&yOJ>RX92knn(YPvf>JGfX
z5SS4=Vn{44L)o2J)UP6-2y~19x7zlGap*W^-+i&;1Z_AiUAk26x#u1ue0u1ihva8J
z``L*Zb4{pfYHH-Y_ui9BFTGUa;^K@SUkjfs9Z5@$m0K^HEgSbj%elG`dE;8Pe>k?C
z3K%4g1LNv>nwWp&khuUY0dAXsA8{m}wvG5t9{o2b6ind1x*?80SG<NTKK=Cv&rwuf
zi_^NRxgubk>td7DYcCKO478*d#T&q&zNaNV6F#}q0Nsl{fNgHrB&B<=`?9K6ojXS)
zDN-s+k4j@hjj?0YN?rRhp&=8Ov${a6h#9dn7!H~8X+abLMWDYBpav5s+O+F5bLLFB
z>gucH*=L`XYp=alzW@F2%Yg$2OwvI?j`BK%5Og^?Ig*{7Ef-vH0Zw=dG{On<!B2iJ
zIy+0!GGnEzBwreh4MA=r^gVmV`6!RdCXC)PuLB1uX-S~Li?|U#;;61`1l(IzZG96C
zuyHg6yAG*76oH;0z{8cu(^}`w>)LmE?L7kiN=tfQvi^+0e`tx%gir49>RxPjH1xi2
z5NO+b-8b<K&sgkA!`O@@dHkAr^7-x(X{kC2c0oyMS1p1V5i4Rw?1-VRRCk4%epdvB
zHv$|ZXuqkvyj-rk?mAhrWQp8(!;R)0zlDc~%dA<m3`pte=?++~;kc-XnyDhwrcE<f
zE0i)1KK!wCgqolCUAsUw?1J`F<q?-~%s*%ewIJMu6+VQ^BWVi7u(1+&5jWze8nhk|
zb@15QvT!ppF+!4}!#eP(uN8s5L4ae3F?ilL(P=Cm2n+^V;*n)Pdh{P!;updvKdKqc
z$`C++gHtn%IXwKtYjX2le-PRgQ`dV0E?;qqoH{dE3JanAR9%8oJ-F)bpVRf;m$o-C
zK<or;8f!|V09X;TD^@J%O@ta&5l{q%2?ADNv}w~OS+{PT+;PVpI5;EJoMvYy-UkmJ
zG#XJE85stZz!A<$`^JqM&2vgh$_Z#OPHP|PgbzM=Sase!Ie*beIdm*v8mf<DtVG_C
zKXvUO09=R<amoW;#Etl=>mC9j!9nuKkG?FoeC;9`fs>GJAM0K$>RS;|1pEmBr6t{{
z4L_qLej$9)z1ZDoK<d?N1d1wj@3q%pb>h(o^RO(OoDePd;Iy{w*)`H!aZs8X>U8_v
zNw|SofZ<<a(^P&?cI>S&m?b4df%)UN*0tD^Lh1`eKoL*`?5TAhe)ypQW5R?9CxB91
zTr4lV@Pe#ZvBC(Y_>tpEV`HPd@x~i6fBt;2C*(0MGTQuZ<2@m=0erA@BrYag?z;XA
z*_&5y#>mEcY~w~gbs@vEGAMW&f$erR2B&?%>#lE{Da21{i6>TFKZoPw-q285m{uCx
zHCpPUrwFihk*8JHe+Vco=`ObZM@xJrd~#ps(!JQ-ZAj|h8w7OkwKphv=ZDAG(w(z(
zrriDYEO~!Vp)^(GxfZOI(6*uI*bN~z9{{7fS7#Z__}yaI=2!0&0YyL&I2nP4h6dTN
zVS`ZcWVM^Duid+Mn_CK>I0<iRYBJ9S1qJfvn{UeLr=Kn%A<i-1)^-#%rPi!jW25ld
z)sBHn7EG20ZeA!K?mRBd<+(<fqCx@BYJmnf7A%MlaeCmEg|ftT!kxv@GlV{Oi~##W
z3-?y4wsyxL>6cz3PzSqWHLx#cojb2<-|4mY2-q=DX^A&j`X4RvneeHVUB!*=#rCH0
zsGsg4Kuvz#d)-~sI`(f?(DC`kuP&FRQ<Ej{=wWH7JPHO#SFIh?s9`X|U_xw&5wYS}
zr)vHkw=4ak2q*$4B0!-M-N=z6jg2RcA69ee`RAWETZiV(oqIx{yu3W~eCC;F8o$(p
zvhh6dzyq>l#}4z|*JF`Q|7%w-l}i_A$kszgrJ>>wWKTYuj-G&{-#n0(0GBPmhd6!h
zs-=EWe}03HW<(M2GXywlr^JRyMsh@}EZrH&{VbzHBs)q=ysXHRXvs1Y(BkK1EPd+D
z=hR;JV)to%YGjH)zad~%UAg`4(eGR&ITZm?eLPp{s%W<Z_Ma&28?KI9TYv$vAST3y
z7!fPoA~9SWg_c_p=m7#4MtO?eqD713gAYEC`uch!MB@1S>8GE{Lk~S9AAazm#Kgp4
zY-w&4N*y``0S?@hDO1dqaWD*RNB#WgKQ|gt{NT$8C)2ldBsMx+?!Rq?)C7k~X<?q!
zR~*9_O50V#n2r!2l%F^d58_e_e25eAQd(jF>d_B{w*f|jTg3rA0#E~WiU7ye0N4||
z^=m8Tk$b)@<ZadUF9J$Sy27_3S`y}STH<rz6QR?+*j+Us{oWG<xOl94uX}=A$6=`I
zN+LUZT88}nhZo4_+pDFyG)L-bfIM)R3ZTrSBgkMtEI!{+Eq}k~LW2>pvg*MdqpDvN
z0YyL&a1g+`X6ELqtFAKFd+)thwrtrV&ph*t5lH>@Z+|sce&pnvf7`ZglPj*c!uaRb
zyH{R$MP7XIMS1w)hsA10jUPYWe5Ze3JC2DIq)Z%}DsSJjQnnm!kh+rnQde1Mg!sc(
z0OeZ501kB}IR+QvL!5}0(vqIH+`77YX{hJnGs9iD*>emvWLF3TNN`9HY>PSDp1bm`
z-+PaM(vpra^lw_?7s4mqi``M<(f7SUpuR!(UiSvSj>1~z<-+B&<(Z$HEg!Bcg#c<V
zgfFFz0BTrG?zq}T!IOglv3TakXUm0WI!B>ftUAI{-zx%&fFj_I0LKgptw)a@El2Z@
znp5rGdh0D?$0<8ITQ0xsa^u~sw8JmwPc%aQ`s=UDnzz@;I0%_&Gm7uWjT>i-kkfcM
zHKpkMWlSQ|z%neJKS^G_?;`nZdzCa4?}GrUXqXD1EVEE#aQF-hJH&<f=pu$`x;_5k
ziN`nJ^+);nZ(oxZoP_KzeW1q$(tv)70G1ADrwwDTx`9JLX^AJ+{!dF(FVM5j^m#}i
zz_s0E?!Au3=_am)Uf-|x4hd*YAuQwb<x3YyO<jY0{qA>V>HKV|Z<!z=kqIEcTwxsQ
zh&uy(eN{1pZ~Nqf^(FF~d(ISMLT52D5Bj4BC<2Ormk3w^R9b49SPdu})Q&5bf#!Ci
zWy>5J4%W}$xjHoBqO)ep-yUd?%WwaOoHi{RgJ+Y3Mx|g3B@;(Ym7z>aobm+(1WFyY
z+SM25$frB1fX9VGTpTx)%kL^;KU`W{4sP-atE8~1X;>|d^#i0Dr6S;`2q-PFv~n=e
z62A~W>0WF<?aPBSTV`_Z+IzixZj$c3?uUA`%n`4vuU;mBV1Tas{yVZLYYzlZ<0J%(
z18$IV>-4W5a^b5eU1f4H3k}#V^2z23dGemK<lnw{y2+Gf5Pg+HO;-_61O_((c8uj%
z!Z{LG)A^2np3a+<cE;gvjzzI!<z*Mlmlu)8OYZoG%p12?0-75nIC>;iwOq91<Z}C4
zdAgq;&=MM2$ECjfuv8Z1%7*=Q^5TOR$rsLZ49Z)4x*xZOaBnf;A%PN7Pui_c5$G=j
zFg`=0(;vs${wlG*#H+MqaM2RK5I(i-#U`n6Cl~(GtTi8l4*~AI4%EHZ{aA69IpUQ<
zm*}W4x$^(LDRahUOKek}1c7lF0mG>+%{X?Jlwl}1FoHA&+l(?bmgh-%(Lvd?zh3_G
z;DvI<ic?Gpm01`H6|H4Z1bmNxyRfM<hNiP`zAr`RIdG>V6u7(t!`jQg`-+UO-6t98
zwGtRJMgjwaA*AB*<wJyy7)U@%uz=Cj)F4ge2hHiRIn^WNonKrgXD*uR_#nL*Dzvln
zX3+0ndGay-t5XE}5rGyQ5YZsDwe=Dau6CdL5$JZKR$4N6Xo=64!gMcoyM0Js_8I}*
zd);dYy@<dL_Vh~O%lp5+LblhAkd6EIN@Gd3G}hO+>_E}SV4nFod8d6RmWgF!8Ch1A
zSp`q5Uv-LrBA^I#9|4DsI9gB?ylngRmn1bYR^HorK$?#4l7`xHBdBp=9?W!vC2-La
z2%cCjmXGCRd0B3jp9>fil&IjT`)x@>C<4AffPJ+Y_QW23;x*V8`-70TRX6YmC@tAP
zC}@e#gio#02i$Nkc0pOqpk_@6zKdy!ihzG0;6B(dT{u~G{_-nw;mlF;-iCuxU$ny<
z8O@w>mDf(Z{Uhhz=E7N_rf(j9%`&lUEF;UxGApy-Etcv}5l{pafs+xSf{$U~V6b<_
z<k9lNL;oV*UNv3bTbnOc$3RE0EsZvysCnwR)>-7qk@@8tC%?=;%fa%nT<@(tYRbv-
zvfMn?jyR~oQ{Pr1Pd-*m`xaA;?PLUs%4_9NQFZH7cd(68CzojOy<tBezO-b9p`HFt
zOMJHN&Ruz0_hJve&h7s>*1gvQSx3&K&N)7NLS~Bm=EqmdvcG*KH~#OZl994g#;2#t
zh^Ubg6dHwWaaGrz2I4*v3}lYGO3TG4&>n}3kjA=7X|6mXIY*C5UO}@w_Fqfon#-3+
zbY!Su7W4@0uJF|Fihv@Z2y~5rJvuuzDMo&L`=zpA_87VH!FNTD?UrR@OJqd!C<zLW
z!v?$&E;_=o+F$7i%j@Kii!Y#j(okI@%@v2`{R0IeA)(l^eUV&p{(K1vbm*tj5@Oaj
z&b?*S);CFgqn&s3P5c^*BH)QYNYDr&Pivh&k4!d?L-ne(#D80#&xB9zja~O*yQ87^
zeS?7Rz3v+vyBCWc>?xGF;o7rgLDo3=>2KbV=l^*?=1wb?*aQfnB2pwUI2;lCqY%nn
z=oBDL4K>nST_9z}M`h#Aa=GlR4EfuA%T2k63|Fa@S?FFHQQwMyBG54cjuxde4t4CS
zcVGP7MwFo=*mp*O47W6APtTA?fB(Kb{O6rA5dx^uI0Z0$y=7cg-SY=3NT;;Yp>#<~
zgQRqKN;iVijevr5H%P~!yBq25mhSGl``}~z?*D!+FU|}0S!+#vXJ*aJI(rc_NkNvx
zE;qf+!SIz1`h+ziAI0Dwyw<yLS7EyolhrLXNh|0iFJ)*;SWuqfus6f}k!Hs?A?z#K
z2>#U4mT(G|7q75qL$aG%k_R!Pg-Lfs>Z>>(|L9BK!*0I#ob~rP#JXd2cnPav+1e2s
zD?V#SE=9u_2QbC998(cq-_sAb9`^C5RO%b#8gIsoOFzt8aTv;b33Yin0T+@!qi(%4
z=aNqd0V#-_fF3+{g}lswqhv}7f)(Xs_%Q(^u4*w_q;YW{j=Nk2W1=?tSZAawU3+Lx
zJ@!lx0-9|EJXZqB*3447_#XsgXEX4bM&3iQ@ZEUC^!BWAt+gz=(LP)fi)KktM~<y3
z!haEqZhi)rCIZC~FUvcg_h}g2qz_k$f>2UUg`RHQ*S6p=cDJuE6fq_^ysN41d1X?T
zK()voou3z+)WzpB4KgHnlyFh^Q$r_X{j~B_E84sLDk#?Xz;5~w4=OVHQc9`Zs2zB9
zNG(oX1@CDVDjH>8PSlgcm-^)kXhqB&J8e)bc_?VaPlZMMiOmGuwh*(sOGmSQNLX7$
za;40_%&fEjQSh}SJTjCNLhmaR>B|vDR3v6b@0<u|X}bQPk@oSenH70OO=69U=y#eT
z-?-0-B-Ri&2BYM1kNT3$xL#nMiUKBojSfNM*wf!1)vr;x1hlEXvAWeseKGy~`SXub
z!(JP2&Dn|2Mp{vw&hgqLysNC#^~6zQT><_2`m+``r_-9V<9$&V!@f&0Cbm~<-lX9#
ziCIKMpngVDArvCn<8RD%UN`n7V8po0&26JfhE1+#Ih*<o<(AZnp<rV6v?Q8wtdz4{
zEkEsDoy%l?g-vli{KAp~i<3k4{KVQ(6;7HDC3@z%aZlkc(K5(Al%zSX2qFBgXata+
zO46m|rBJx#<oQR2N)Oh6v~aw6m%W?#kCWct5B)Tbl#?S~SvOVzz04kEr;R&{uLFG%
zaa8c=gh|BcNb3+9$JGH+Fp&74XQ?xjHfL?8(b{=kv}Z4@q4dXWAR~04q|cN{k=NuE
z<Y;FR0y0Ufl(7@TX`8M*jJutBjef+2eI<Wir=B+>o&08}4Dk4;*bp?XGYu|Pu6pBn
z3nG(xmjwm1YIXJ?G~Q_zuG3u#qP`y1;}P!;Bm}EM)e`;zZ6>XS2aBrPql*6Hxp?ld
z*fu5y#l-e5y?T@&Xo41R5naa3FXjbZjv>!AUzMt8G!K}(6Lrs}LQL?Cmexzl&qXQE
zD3lZz59#e}%OqXvKkg`FUfo#i2{+7^4!fKZi%5M<je@LaC{K5P=aESqv)5(%G-=%7
zVMAEqc`e*nOQ9<AZlGEhb~dR0`|?fx2>RkAgxwPJvVysBJ2;;T>0}(UZK=uHZ#Z`Y
zsnb!?)Tc2TpT$JKu>3yAz6QMfe>b#??+s<RhpIdinW3a>d3z^+FI?Z#=&}~B&(-8t
zx}>|*C4pfkhKRdC=DHu~={r524K<+|uu9nJH|V~@h0La=qG1S|3rC>pgB}gVCV6ON
zQBZbwsHKf%)(*$`j_k;`Sc%^#s%V!A?C=DUA<X@^672@U{QT}Kq%bg7CGTKHV>@5-
zG*@IMwDCZ5=)AE`PJAl${b9;UcHvexhWlh+d-`&pUXohS&`7@gWoPF*8`bi1sP_Us
zfw-7LVSd$>DtNWJxad+qRg|;U#d!tGuRq8<lV>Oqg2&X<G;;kK*F<GfOwiI7pMfUI
zQW%R=blYI0&+)27$!@hiQ^9be{5ARu6ieDS1ESBf>2;sK0qp6yJY2CgIJtxAb5s2q
z!R&{W`yS=GwOATJY$fi~X4RSqjg-}_ysXAg43&jYR~ZO?ir*7u*;!b6WC&dZ<kKP7
zibC87G>~1%jS1wZ&V}`ML-k*zz!YGv;d7HGRC;+`Iu_NJ(CMI9)4^h)Muv5xg=4;;
zFA0-F#}yI4l#W5<*Lus`!-zh&?s>h=$JzGQM$88k6pdZLt4A{r#|Td+^+{eugU(PX
zN?uWkOca5{mD(g+uI&Blt(vLr!uPLC#_P{t@R2Kde^CGWwU?D;#x*|Xnwj!hPLXwF
zzLoo1HX=~O*NF8rt<<E%W;K+6_a@}9m=M9FQl;!JJ_ecoJ!K&U%C)akFC7?O%W$?1
zi3STiDW{QHxN%LXuR98Lt<Mz7XcQpr66(qM!T9b=Ks)dY-YHz9r@$FlF=P!viE3Fi
zIQfxI>iA0m!9F&dAvFJJ{<_>W<vjDejOtQx6qRxDaRFM*&rrQTr;lub-(uKydc!l>
znfZN_3^J#ab#b!1%VB@&{Uj<Gcsml~>^^auVm90<2P7u_iDWPE&a%(5+~de7P&0Tb
zQPHQ?^H<QKg)SvGRpI{TyNRNCjH3@`i_(W2OA3vGdV`TiTk8k57TG_z)M;y+pi2!e
z(5_r>^rn+ksEKxYX>E&@Kc9C;q{3sN(Y|Z(LWkiZ9(!JVCiznacxP^w5tZ4K6<K6t
zZ&DLZfr*#;tp*UWp=DVVz1}a<4bCIO!sEh7Uu?mnucE&}qbqv_Y1y?NaWDpyxq%LV
zES!zSM<S2i)hX?({<|CgwceDGk~Ios68bUn@6G-m8_BOFf^k%O8t3%GBQ%JKtOp_*
zYP#0W>faBc4TXyj`LZzj%eA26;l2!e{$Q#Q@bU9Q^%nv45jakH-;S_SPPmlk<V59A
zdIpl!nxSZP8~su-9cnL_c$tOCmon1Yp^_UX*YVZAe;W7ua<Ko{2nj?_-7Vx>ToYCx
z%zMmei7f(imrNPQi$}t}is&9Qpk1$`%jMAbZa#p^*l@OgPD$Z!&jsnrzrqyWw-^_g
zE%8*Ej+P!2E_n4yUP<oNmGD})OF90eI8{*&nwgY*F9==|Qj7Tc7u`gQHcOP(#mM2&
zn6KitURQUD8GOw6CLI{_xmpaaJ_mku{YSm0|LZ<~tFI<)!r%gx?h;yYgkIKMSo7C|
z`8#A+`a3dVdL}jezBj>MiTaM{Py)AHpZ1}ye&&s)f)xFIgV3qyK#{;@(;SZFk|;Pf
zb($55%H!`WEgRB+%fARcp^-<z8BG|3mYct|PuT!J)g;p&(SJqHCjRs1_hJ(?RJuG_
z_*2>R4?6X->OYArGYIWICmdX-<5Qc4;@y9OuGBp?5R!NO6u_@T>gn`Sareh)aPwuF
zl?_R1xxvzQFA(&fs%T1?=8)-9tZL4R13O-ytXj}II`i%TzpPj!LJQi%oE8Qg_t*L(
ziIT7+(Qve7zCv9eszU>$xfL5@^lu&)A<jjQnJ*EsE;P;f_;`3k#M5Qx;%`9VLX)oH
z#K+=V7`yL{^iNddEw~p5BVHyJbqTz$=dU)e*OY0?F1tHmX}%HDedt(jhizLDsXPCi
z3PRoA8frpFR8Kp)k}_BNNnTVej*y><ymz8$3Ihnac@a%keWZzX6{rcL+cO1-ti>@y
zqfT;C9Xbwn4#$n!?4>hDe)up0rFdAfZL_l-Rq85@-uL=anAy6C>2&WMTs7+TtqH|N
zBqPV7=G#{xSQ`<fwhcC|T%)omFYM=J$<xT%{ia+L&Tc=<G(V$B!p@d?raS9tr<Dl%
zaG7~@6r=6@F4^%X{5vtRP&$oDF-gg9+-GA7Ooly}%)B$N`}u5liE7@&1s6NZwG?u8
zmX8j9JX06+!bL7D4GWGHM)6Pn_!{dBmETW~evad_jfFo#uJPK99tDx#CuYS!3r95+
z`fWcXpfGjaNPXu`Q<QJcs9}s-EERy1rsetm!#i$BMh@b%s+T?oZ@!weMC0ECyU!>2
z*@>p_xxZO@+k&COLiKT^VbbywpX$L3jC<w6J{N`SV5>bJEAp}foF}sZp3P=J<<q8o
zBEIftvtO834kM6iN{{~k8{i!i#f(_|PI86?+5==)O~%3MQap&cX|5qps?H-|-rUvS
z-F`d|BJn$J1w|L#u?-Vwz9}DD@qB8v9AuMZGyS39+OU{3wLIg3bpxofUA$kk*<dc+
z=l2!zg~25Yp~a@DBJ?AS_};cUQKa>fB(23LA?OMQ0t*?I_twKBrnM9o?uAJ8b426~
zEc$#e3Tflz%1n$SS7gVD<Zl$8fgeET_zPgtBdw()o6fsf=BavMdg=K;VKwD|fjVrb
zjwBSV`)NGYGGygNAdo1?Fnh*0r;RHQHw-m{F#GL>O;<<o!?a;cH+$2S6g(=bztnf~
zuR%dk8df#J(R3Q*vsKnHS0@`)OdZi2S!^|RWCY~{CkbvvD#E&yWX-%%hFc=E>LGCL
zaRx@uKEgqId=kJr<;S^}*@m>h(^~myvQ}n!w|!C6i+DtG9GK!N?4XtSIjd0?3Lnaq
z-nBGOVoS~oeVN>qG%W{Y9Uk_f<xM5i^mF%e3`*(b9J(`CZfOG30-U^2TO}XqHYcy%
z#Ph=fhvjJ-^kmi?|KVomD`KQB3=6Q!Za(C1hKrMw)5w5%d$KE@C>>Syt~fXi1<pn;
z8;w{Prc&(LelK2$FO~|;H{5+<xx;og<`S=LEDpPXO13q;kKo2G1`5=uBn}pqVIdY4
z*1EXJOe}*gbk&OD(ijTmwb>7{p$P|#jI>-~ULJMB&rhp#1-Q!Rp{G!=%l9BH9B^7$
zHE$3gwSF?a!D;QbEDlQYRfwXKvo!Y+Y^*bdz=9}3JFGdRdihe!{chCUf{%w|C00T$
zfoDG(_Om}UUUw;$b`qv+TN2b)r(qKM$T1!LjadqRg4Jbd`p`x>pe&H)_r#B?%L|?;
z36lipCl!8tiT#IGcftXcR1zh+Vfp!I#1Vot8M&EQ70XD0R9rYk<P-j}mHIC~Iqpb&
zq*VN=oC;K*rz^~3RyQ_!cc;txmAQ<j=zqvjDk&<y+?gz5kC#KlrfoL03{K6<$`Y9X
zX&C=Z1Ldfd8O6CM=-$ae`7uI*+;H3|$vEB)nAu><54OKGdn3(MewUpOZAsr8yxsD)
z+Rt1^Ak|J2=HTs{t*(GC&qpV2z1)$O3qitot8ls$-i1ym$DCukkLA)-I7i}Uy7_@T
z5gNpw%lg$kw#e8sEkiJO%hnD*YEB}63Z!B#$ow?l(x|ZrYYB?HmEQS^nos$#9rS5Z
z%OUC83uY1e#>@9*<H6op6e+Iq;BqU+MxxzIpx|oNcxB|Y62vZP&v$z$2z2t`<2?{7
zU$h9dw4wF_4QYDd7(@m4Ngp%Fn@T52YE%gd2G>j3dFb7u{q4t4=FqU<PAKHbM#8qk
z_71*HQCN_w4h^Kc7yk{JpN@ePbK0Zh2PYj!Uct|OlRMZxz9pc_iLPJyRI01Ygn&h?
zSDc+Q0CY=5U@Bf$TFslWBoR6uLR?LN7MTu!>)Q>+^OVTjT3V#%-40t|kC%K<<3|ri
zMn~6xQ#WB9fjD@}7<Y*-`!$^pXOlYCP%>=p57ZUwB50d@Q1xlB3zw!0{<wuaik#RJ
z6$f>Dd$L_+pbY11I3yXUG2#KhNa!j6;()SjGoyg#b$@^1Q}TuD(=Um9>~gIpG-wC-
zZaI38?ZlMi6;YIq)C>&lxH4n9wMTw<{ao(p%O9+g((yQKGivT<o9+@MQ+Tz2(6;tH
zkRBZ&J?F^4l4oso^|=R&wDjb7u6Ad{J-Gb)YdN6O(FK=tm>U8yzWLY%018?V9)h}1
z!OnRFR*_Yekv(zQ8S%S%umLibxL0^0_n|}xv72m`GX@r{^FSvO9T(S3x;B02%OWZv
zHzl+2K;*Y?NGmuSr*RkMC+(JwL>{*#nAq6y*2u`n$D=9tiuXX51ksXLZs(n^3=A#+
z)8ET_L5T4C$~;A?CI`yIzX(Kn38~y<I)DB)E((p_l(AF#g+BrbOeuD~M6VgmuocGq
zXLt;0IMx4c84lxP9H6Tk8XCTMz?2=foU4|bDAY2)JyF|9q|8zHvmAsDacV1tPHLek
z^>1lB{rp5`InCuNLf>}z`$s!#X!&&kk6Q!?GaAdO;ug5%0{e*mrTzK3emY+n%$H7P
zOdVZa2?w7(eOh;ZxVz}xDQco09v-H+s`#+21g;E%yMW*Vp%*`xF|X3BKPbF>;$;*g
z(zkz?DL*+x%S_JM3QrSyED+7oP5-QT=3V)g5T48g3Z@Uw!|lm>duxr|M!af)jtc?1
zRicxQye8>E{YkKph)566Z?j%d15ux(^8M9lDjX6cr1Q-Dlc$B<kDFsKuz)L+(obF<
zcV=Ie9*<$df8L`&$WXjwk!2m~|5!}GL`f-zO{e~{YTiC!Y;saUNa*v|)lnI~D1Sod
zA5$Mo&l3vxyAe_-Exr#QW#1oAakyR<qba{GPPUoscb5HC8hq5*&<0%2x<ZOz9@-zN
zHi|aSeB#trr%BlZMGC7?kt?vsf(H1|if2iRYPYE)TK+$qP37Jc7vZutpB9`px_cR#
zXdM=uvcV1nT4w*rR>N=brovq!L16J-AvsH_{+2jkaRtOHZ9&kb7V6*Hh5cjcH0d#9
z?_!EO=k%d+TG}n`(?vs~)2~GPbM37g&Z2>`jhMZCrKbDz8o8B5m1{A@s$AXI;Oqr0
z*Q1V7vvKMZi;C}kKL2AhOJq=xjHVw3&d%guSk!*08LVyaJvX=sYn`y{-2^)Elo(9!
zHCrjhUAKwKZ|Ie!`)4q4Kb<aE<<HhTmG#9jcUjb=_fy_99@u8R{-Y>x@sSxy@g&!{
z+LZG5%+1zl#eSVtyHwGifAEtzzBii1FuAlO@%zwO024WG^s|BoI7Vg{fzSEzhegY5
zokI~CH%O~aS@DqN8(s*u)^E`qDnKb-u4ZGUPs{&Cr*nWOE9L!6*Ol$jPeqI%EU_~d
zX=L_dIN2wLdz?qx^l!?=cVN$nLV?nZ(`uNg!AQotRyt=O@W=*bdx5B>q<+sW3suk0
zFH?5{`SJ|h8h(T$5;jzUUtgNOQn-RFO_huco0^K?iccm?-9R>I<@<M1DtdbC<j+;+
zlk{;+h5^H&rEl{K_2r5F2g*p0W+$e)HWP|}%n3rE8G#&l6*ezCJT8ff4rAdwoyODf
z7g8BIXeMT6;hmi#@W{xlTT`_MlR~VvzeNZ(MQ2Y>tGXQs6I`L+R3C~$Ievz#GFAfK
z8n2NcvHDFRk<Q^hR;3@fk!q>ip4xGnCm7e~FWgS~AgNuqJ-)F*|DIQuA2{;5@nrVX
zEP`QYj?Fun0MCgCTwJ1Ohi;K@6-k?OmT#{pY2pk1&Or%DK$_Z`jgmHu`kT|{rMJzv
zxE^bdo`)sEi6#_R^VRU2bD$?eQoXSF-9PAHK$BgLU>Ke(2hJG&axc7Zo6VYcd1a|t
z&-$74<-Na7BV=4tz6cWe&E$WsXp;?0JDUOS<1p~(LmjAvTNWMV<ak;EGRTbSidVfJ
zcKICtI7#43&ryH?VftpQG+OT0gt}s6x_g`i4EYf<r8I`a_+3Meo=`|zw&u&fRrHH8
z7+7FET>P_)8pYpe;}imX!L`ZCYc)1jk)P>;x+e9|A~{@0---TrD5e*LAlPTZRFG%<
z2Gc1jWckk6&=hSEh%Vet#ttdyLPi`n|6jR+ZO09O_?TNC%je$@me}D`>!dX<FeNk%
zQYC+UdF?c+Fm#(0z(V$0`rufI<N}Oi*-h|-d<?@tMIjcgID5L`{9U7F{Zz@B0kphN
zjb^;VZ?*pteVUIaLW-ZXmf5@i==Xa85GmxasT#G*qfB#>^(w)5+?k1SGvqflzh+MX
z#~!|1xPHIi{~uC-TDaoYp^r@<M$_{OPCH5#$@qWC6BD?S6A;SXlRS~!KSE)LRCNCC
zr=TR2USCrqG{7y49CX3Hj@$Hm-pBR$2cO9<D-7C<%Kl+l2F(P`xOcvG&zH%~pgMy>
zlz*oiP3($AKfd~Zh5}u3FMvnxC&Qw@S=VJG)<q6#I_`e0Rx&m0Tnd@n66(>P{y)Q-
z4+Q{H+p-HQGX7gq5DLQF#8t*BPH~d8_uE(aECXd?R-u60j=zBt44dbqV6=Tv%)l1>
z4{etuhwIs*hHJUq=$Oxm`wJru{D=_L!99V0MtX<>%iU`W#`FFDe=|`2oXo8*kmg-b
zAPFp$zxQx)fHXALT6em2H~?wfgy&^cDxPD7heA&aO7cGw?l3GyE%cGB;!Gc1@pM*q
zaUMt<9oU?DfAMA|r|;X08O=(0WhyZOq#W?lM`EbbXQfJc)VUe*H})}3$rlp|k*06O
zs@Zd0c5dEUI$Sk9T$~JYM4j}Wcq}YUe<5Ab?x=zz_Iv{Q>q8PBqBZ+#8Fn8O1k^MB
zCteB&pWTuOr^nJ*H<a=#Yuj;yviAlYvR!Ey_@6v}1N^a+2=|OP3Bz7G;BlJfICw{$
z>|q1(3*PBmbB2BvT)wg}N1Xpw0Iy{%>Up@e`^F9%yaoi+LNWi;q-pOtSss0$X-h%8
zZXcYXjP+DB#N*S)4_gZU%SbK_5s}ALiSr?cOB$swEG#CGRajv3{k;^GW!iP&h-HQ6
z51u@s3VkOep!kO;13Aw_s=!zz-$&~B3!&<ZB8+7~us0RyZelp}aoC9gA}clM#+tvV
z0kdrZ|BTmABK*TI0^T4(qVh}^?kG>>)|Y<~Sut)&Q1~md`VWA}*sXrR{gU9SA~B_N
zX`}R=5B%NvG(D&;R%C?(+Sumr<OK-8<WUhVzb60D$gf_xHV~X$ML6O#+RM_=IG2}f
zfkPP7Sr*d&<*#bgaEM)dIH{hJKJpc+<+Z8$<Fr>4b^E%PSxH8ZIBK5xHSRw$o(6xv
zk&5iU@3VTP4p#?N>dRDJ`$ec%7SliHLdqz$Cxv<YSC7P^ke_8ZybV=<(OOD%INLRI
zIv}yFgc(S==Cc3h7sZd7|A7H;&8<e6^%X2t#w)1SMrqMn{|Y^qltR_%XT(C7ajwrb
zU;iP9e*xfnZExiD2k_)xZ*kymIC-uu{0lWTGiSEZ%~_HwMOJ*>*EEU#4j}=wfHh)f
zOZO-R=vPZ_N$B0>?q@>Tg$=Z$CiSWGp@9N_$dAPYsE>FbS0nbgf-44ig{HeNc|Gsd
z-{g~5e+EExRT<fb^BcFZr{DqiAy1w>&W|BSyu0)9pp$JLxKDwxgE2Jz%ZZ`%Uj@J=
zN&=`gVB!w`g>nvbU;%<RQ$?1Rg3hBOrY(}K{M-0afup}9{-`D!5nu`9_RfU=bluP_
zC{wXht5iYFK=*yvaleM+>0fq(q2N+%p?ZsdEt&Y$bG~n2-w0>8S@vzy^jKy-{rBO^
zxHrdyf06W+jQ^75!-RL-FEpl>-)(e_GJG`qV3(W4uBbY$KpyH1`<q=r>aLgoCc2dL
z2Yv~Z<{3d<!aM`J{pK}Iyw48O(_HTc6MCdS15#6=;ZhKsooe3vk|_G2$cp>!)GTPB
zC4*j773UToM!p5r^<M-H0z4dBw8YYb+2UVDoO@8MTlmn;g4IDP9t17#>oKlL@|WW@
z025f-J+@kp^f<kGcK&I5`^5y=tn<f?o=)9+G~wfI&RE(%1D-%q03(tpTL*v1XjOG-
zJRlB09$qp}&Uj$HqKd-%pw3bIjXHc8zh$()F6u|glVycquNQ=m?rILs(I!ItN|YMz
zgR}#7+(owc>sD>aY_863?FTcj@QY94FGp)Pjs%@|GUG&6C|vkL$o@DJ;!qh#;VTxk
z_5XZbHxxJValm&I76od|5=IJ)V|)ezarNg>=cpc_yNw4}|AzLeD(Hifi>_U=llzqe
z<iQ=U=#ZVkl<wE@Vx}f9bMuZbOR8LSC{R->ci(*P8HvRnD1=Yu8nwg6i?#m8;9{Qu
zP{Pnfi-iP>1MFi>R?uXhWBNMDwn8S8&EL%|E@U<?xPQXm9!{>bsvz1|a5zZ4h81Dq
zjF)AKLy*P~6vhg{$^Blh%Bo~&7O$Q=J!(Go2BaRPgcU!mXg<3{t*D5}gKc$%$4n9p
zt-bu!XIT3#qG@8I#@wUG{aQBvzO7%v`&qBfUP}zog_Xr*U7-nFBkI%KC0JgPf8^5z
z(CJj~C`kawI`$jn3Qc0`5QXXNxH?yDxr?=brG@n<hK72Dn@$Yd{#ML6ig+fuk`Ap}
z#c;W!WF|IS$9L?PLB1k{r!OsC7D9dHlG5Lq>MC-wkRngt7)5OzPh+XGlqtv1nh%!>
z_s28m%h3f@1WeCwwq9o|s0bNqt0^cIR1OHtzK@ZgYb{_j;IzrnVIX)u)>N0vt2(hl
zCex|39{(2_2=6=(b3>IQ%)yQXLA6<sf{#f_y0n-Q#B;-Swu|U;+T5Asyi~)s=eVDv
zhzA@_@ZF~ADL2w93t*sB$@mfPAiArfJyuyO{60WPpLeJvAiWQp`zEn;iOP~FS59~o
zJ`=qS_XY0I@V)!p`W4FE-GTi(Eh;)qUg1ev>j~BvhM}I!GFF$)a^3voLF-%YyNyG*
z!1QfeSL#yyyg|sqneFht3BH@FWeTp-iKvP^N*aVtxT-ndWyLorI)C|i_JbX8$gUc%
z*8>6fB+2B3(Uw@43}NZ)UbXLJlHb^Pk7X*8VrCR$?yeP8=-%rTfT<!bY#9AIO1078
zVjPaQbNh?;6PhpakR2u5z1k51XOh&HdbCslGl=3e3TBD%@`ARi5*x_WJR{lZJX+UB
zojlhQNetp;G@o{6YMi^2B+9qFIj||CLW115X?wpVU*VUTW&a$hMzKeRUXHlP!F?{i
z$(|6r*>pR7rOyzAkC@uJU2vp-$n%eNU%dpPyi9(FCcu~1yO&EXp|CP~Pmh8J2zjdY
z@*7<)jChLEw9JBc(04i%S*<HaI#`v*46vl-ZYtJW!WtNoD(mKIVpNz6ZBt)2lw_FJ
zuT7oAF-*X2w@+$#Vg;#)5n4n^DQJx&Ntr&EHPEhdc0t+MX__r8df0^4Dyw~0HCOyr
zrAQn7H1Jr#pmIg+lGR?_+DKc6U@|Pd4O`QY{HSk2n$Ch=DNxDcs-|tHGRf0<y+csF
zF-N+iQY-Z7AJ6%}u9n{m(NT9LEy9|IYi>N-g=nszXyNKxWxb8+23^ibEzLT$8HsFp
z#t39~pVo?ZaW^CdYI1NC6SQ;@I$J~5Vt&&?7av=tuUtvESvPMUP5j4CS}F2&Ton&P
zByNX@7pBuA6lYfn^kZi#MMGoihy}Zc=En#$MF#P#Ak}%E1zBq5)%g(n&8OC;@b2de
z3JXsfvON@au5Lio3|~#huSUH$E<ZoUw8syAUT~yd$$M=Kc>Pp<Xz#fn;vU?NxFwy7
z7Y}-YjkgV=70YobAXUxud}Rg{>X7#xSOSbum8s-=QBo~vej>}0n#2S|H&ad^f5n3O
ziPmB&vqB4HJN@ine5*;(hm||^yTFZ8-w>@d^GRAn>KO039kqyKzZvv~5v!n~y{Vci
zjd`DUbXq`w*=-q>D8e5HJ(wq0)zr$dYco?g&RMo@=Pp6~kI$h1`<zl)Nen;(lpnoT
zf-1;n&TSL!&u@0ll04?JUIkK@@0a=pD6-`JcuT<-ti%D`A0G_+)_8LB(#c0;jV*?B
zG1{2uli``V%50iXlBM%T_WlTkwX0l8jS~oa&>AI6RY~bP@Fsho1CI-#x`p$sT<S_~
zS@*fKA#JylD%jIAj|#C~Yy(It?`%&$nP1@v9F?7@0Z3;fc9nx6{T3arYC4Eq{`#sY
zWkjlq&%%%w9@JpqN2)|eO%19v=}NE(DYE4xyiarq?9*($auSQCPtNx%&CN?0$+PAw
zl~}xQ)4sc$sJL!Mjxlo<%oOoM-5eSw0eO^&KHS)yu(E_BRZb%#WuAI8Ii{KI6*?0B
z@O8N0x|sZd^iL>x!|e5imGx}>IsPv?6EVCP9gqEfdf0W{)!xBseB3(5jgpSb{oW%o
z>y5)j@{yBbl)FE|$h|_uLPDE5O7VM>prF>x>E-DB^-qeJ9i4@0ktS6S+5U-}?y6~!
z+IUWghkhV$ZI(8X<4ygNW?HXla%{{$F_du`*d~zAg)@=>5Yx4VgF5O{H5?4&LfMpg
zxVGZCw#}Xxu$Ikm4*sdU7n%F=SVcO6I(t+trEiYsGcLilT%K{2dsDigHAj<xLISo4
zi!vz6uIKdqPAc!ieN7i`;7t>vQwIj@Kc2;xnEz4{F8>GXqfW?|SgXRs$YY&WyZ6*j
za2(97-n}i0H>OnP>CN-{so$cI2E|B7-VrGoY<9Y-y#qa-=h&B*Y__p|XO6wKa*&jL
zJtJSY&REb>25ngTSJ!a$e*9T=Tjuqi+5heLviQjzkI~3#50*D7))n|hd(2GNzg-2$
z#ATZl<TQI{_f<G!#pr0fOM;dPI#DE)F+HJ~EjP1bf=hNyNJ@}BT-E+hMPRalck^E=
zisg4#Jn}%YL=t$q<DWb_%V$?bpRDwK+Yg$$Fj+5T#%?RpjV>~PqG{x?*c_pd4pd6T
ztf<MQV;zq>$;EpkF@LgTH!<5&#G#MT=u`9$`Iz2MX7>^FuxelcwGhid-R@h;(d;=n
zu1M5wEAUOeI_Xv9{M0B;jaM*3rKckIC2cgHF4(dy5Zko+A{B<yVLMk#(0McAV6?V(
z5OLpTL*yUg;V=V$Em&H?%SkVAe6sO6+_y}q+8^)naLcAo#8xo8gG5ZmrBFM>q>sGt
zDt(eyB<SXEza2M_k$g8HrCwqXJx?<Dwl)==+Li8q!k!Kg_HMPfjKI6{)gb=oS>=@u
zTi1LKu*fJ#IZm}o+)6SG&G*vTWwdc3t8F~6WA;miQBg5F3)Ze;bW#;ive|7v%>Tww
z^71D<DdeZej2vJYJV4)~SZQ}KvxE=nc$~017dB{&;(<2gl**uGd}kK7&I&bNGTI=r
zf52n6rD5-#yW2kt?%4Uq)M8`7yEZ$mSS$dHvNn(o3um#7Co;3;_Ezcd$C<kdLttk0
zB0l_@00mg`cRGV0>}l!XTN<tjMlmA<bnBkIM&;flH2Z^COh3Jf=^c891U31UeH{i4
zP1&8*$7&rx|H|zWo~O?DH$)Sq9n4ei4y0Ioe~oT6=}DA~Q<Z__bHGRbK!upKjG4A#
zPaN7?bW<J2GL0JRldUfgU)oBcOS3MKfMK&*RkmU?u#ekwc-Tee{=2{P<X-@?{k4Ai
z*K%M9H9@ty3!`$E3NJpa(SzcLQ@;nmWGcL?3l($M>QjpYLCYGgh*+%|S#5}#I4Nhf
zy!*@Ivmd1ZB%%dgpn&5hF-EjwhqTItr`5x<KBu)n4QUjeat8)8;Wc;C?cf)7;}B?N
zEBl)eLPR7}UU%Eq$^T@2mtY}#1jT|=i_j`aRI8~xe<EuY1-JA7@qCcCap~gxzM@22
z%}tU<P26k^$-kcAiWYFtgZW)^U<VCPxunrf8uq2u#s74^j?Hx0TU@vY&oVv9x*~mh
zYfQq({XQj*E>Y*j^V-aW(!2dJ=Xyvm@sC>5{R!l&QuVBQB7jZtYa=?Qf8b-|c?j6o
z^5CaCI}5;aiTIG0%*2#^otW!x%UoqASanKP&NB|j9r8E&HVdH>yT*&<w`c;hdO<!r
zyceQEOx@SAp6!VL84xL@@N&D!D#zLRuGf5A#%SH%YBSnuyYgQ{TTK9@+6a1nA`DPP
zUd&%jXJKugeZO(wu=6vEDgayMgMZzqc3EH2!Q23HlRhw9)Wmg0@EC0LztJ}k_GEdK
z!+WC~%n1p?=h;^6sY;!@Sr<hm9;IQ`Ir^x)>`@)-x|5{o8ody;+FYWvqpgrGuD>%p
zd&3SWWur5x44e)_Q!UN5a^p`Q_F0)4eO1&1r4Blx6jlBhctJC^Xw=v{fZRVKH6IOf
z;J9sTANuQ(IM67^nhH0d0tFr|RGR*o73|JF!O)5Z%I58ixbVuF{%pJLWd0_<&Rc(2
zd3*Nih(S8nta8fl;Zr+~dY687*Q|C9T<jku*gt9Tz1^|n<dOcBzCTL5c?U>{os(qi
zA9LQN3c$*AUDQ!`TC{lb^7G2GriD4?DWc28Q}Gz`Mm3@p&27fwKQ@UJuAbZ|&3<uT
z<cQySUoh#%nPhA^@=Fo!pC_`JF^j;W|4$Z6ngA-2Xq*PX`*O@g&rB|Re2x*ihPk5B
zW9nJkjV2?e5a8t}TM_<}@O5_@UzM2dZcvl6H5OP^EXhneEN(GQC?2K{FjWk&WG>v^
zH|}3X#BEEd@@3NYzp84RYVT((W*Kc}d!u(QW#>Zh?JswXanQ#r2MZBSzSZ_%`;vho
z)i)G%QLoF$#eUi$PWPr{lLD6G-E+pP!--=mxJ2)++nbUULAD1c$}o9-Da}bxa_&xH
zV(xpy@7$rs8?9uHi|_}?DR>X8uJe~&w&xAASMc*Al22!s(mr4ePItcmJC^?t<h2Vo
z<qr3sKU=8w^p_#r+@0H4R`@zHTus`@M;pWO75N~$_(92nD$?ULC<C*~rW-FTWU7%n
zMZt+X&rMY1v_HjabhuIO)!va9_b5J(RnZBXtdBBsRC!B1h2o_)5j*#7iOEY4$?U>L
z$W3fITXjfES=*k4wAEIY)y_tsGjRtv(tu+n7{tkNvR9E5h#f3oGk1L^_DX?sY`mxz
z{iX`};36`xH2dh8{)=yGZ)Z3|#~Ky}yRF=+tQkQ!doFyfiJczHZr3rE*E4=?)Kz=V
zanD3&<`agBW<<wQB7X|Y?Uk;tg%A=VzXcKBlw3@=aX$Z+0w)e4#g?77vr*(}%S}D*
z8H}iF8ztYlG03t?5YM!Ap#HCt`G^x@SV%?QwI@*|VA{H=L#4CFM!8IurTf&eIy%2+
z<125e(yuI!9GZv|CF-Oxxa&5qmn-JyyNJnH?u||E&t%KXWZin)Or5Iq(lx)Bu;iP{
zeX%C@2{rpHEzta|kacu?L@^UR-)MWEU^qg9^K5Z{Iy+X0081lwd?ICNHhN+c);b|`
z-7g^Z*yO-V+ifSse(P$2L<!TlwP3fXPRV#W)k;kdp#1!>d%gW%sW!qLR!zF9QLTfk
zfRefpsA9K=Md|7A`IrIfQ~FKsTPeJa%O1L`T3;2UjDGX8b=#SU$^+34XW^jaWxUC{
z&}7xnxjuBQQ11v|lcugT%g#K>GFk4UR$I@2nB}diE)lA7H^`08Qrhew@IRZ%Vskp1
za4AgA9Bgu^tUSxK*wGfJQSUj6siGb4>qNM_?|wRCQe+;hUf3HmVP>A9Q<Op?>fb+3
zr*E`fNn(1*Dr)?@xcP|GQgZ$^g$c?RZ~$^+>mj?o<46d}SEkU7y53HubNG~Si@@(J
z0!5|ou!LNx=1mQ4J*o1w)wx0rZXbLnw8jd|C#p=_dfm)&gUuaZzEFI3_1TqXzS&0J
z<6;HpY&5&>w7{|{0$N_ee8u@v0%xmq4Ov~ePUfNlb%(+lq+qBP#(TrZwsWpSp!i9m
z#3*WSygn~hS?VinL#C~LSW9zQ^O$AZDb+=7O#`Wj=pUC7%MHLqD!F1^9E_Jlc>Mg<
z5}}u7O8lk=kc3$;Ik@r-g|WG#Fhwh}K{AsIC4!SubR#K!4C4|0erG?jd|Kj)RGEYm
zoFfFLR?}i}sHSJVvsRPb-rcPpwFywZ+PNCjL>omcLNcGc=gt34t*jMy7hgU}wAFnY
zQoDRCLZ>Ec#GV|OGwke%s&03piOoZhU6!>1NLcUbh5mY%PP@Zzs{?~V41zj^e6^I3
z3haZLplH$IQ66wZaH#a$zSciP+@`#-jSYu-5tI2^5F0YdS|Cj2?OCwunY^K7-3+V`
zhg4NvyOZ$fSCX(nR>P;4T009KACea8-{<x82KxA|G2`{_1v};v7prS2WTfy;<kF~2
z?DSikjU3>NncXQ^UM~w0%*?WWcitf|WayvQ!#LxWd3R$odo)qQMMd^^b3`1<1CUv2
z+${!>-kgDRxF9sHWQZ&Fl8&+aRK3UankBm3u^6)~FH7=yRCj$nbx#OmDsGCGuR!EH
zo{WR*wd?$O*sHvvqM;uq@@@@+0z_1^8`yJbgEGqx@+ZcNb9o2%@BLw5Fq{`P);Awq
z-J=RQn#7ar9k>@;V-ngb8E2ZUrmeHA@QywQ{aZ!gMqjcnb}Cnu!~!F96H=}YQNzvB
zHyBCdAtTZaKYz4)xQ|ZmxNgd+xxAxtB1%=^sMJx&r2ySbTF}&HWTLHY!jc;)jp>B4
z%F>R({?%#8ry%u4le~R*$D;Lx>&U{!J7}Pm(?3(Ylf!yhX|#^5>y+?{PgL-oB5+#B
zQvFs%W+ynsQYFc<_vXi}aejYZ9#HNKnZsu*1%m#p2z-idTVKv!b#q_<1?CrYPr;In
zkRaZCXTSm6j8qX0%^MbZ_6{LDZH}XzPUx8YC!(>^&bj%{r^!MROWrr6*25pCp+~(j
z_!`nRdO<M}^Oy{8>LGiMsrCrPO<PV&mr7vS2uff_<8|<ddn9^T%W_*GK_!F|=K=CA
zZBPA0mCTx02dkKJWNr<bfPzzQ*2wi+wZYsH(tO>50{7$Ycs*THASx(n#3ie%@nHD;
zTd!n>aOPe67VY)ud|xejePKUMp8vTV@##Ps#jq!cmbQCs8B07pn%3<)>!iYL7}Iik
zJUxG5EET%U?6f^v@OlfeLEX$D^W<K7p_t3euDnuKT^(&aE2+Wxq+`U`DlxnCr^?v-
zyR?M{dp{#qB^do^jm^^>B{9p}(O1{%N+tWi-G&5P5}PzQ(n^Asx%;!lIt(Z;S?lKZ
zGhzavdiLrIdg=OEijfHs4k&?xex1L1V80><7qH2i{$PL!j=WwaLB<sotRuGw$jGO8
z1+Be7r7677&;o47o|aB02khLVh4Xt)uYc67o6o-*^{HPU?uR{_<Xgz?h>GiZdEmkO
zCK8T1(|ONLq9E)tgm7=n`^C-2c`WABOc~mk!Skbjf|>fgFNF>D!h&DREvo`IuaZR1
zT6Q+JCQupRal>4e0Ox{ox5sd9W+^2}*2vC4;UB}1$zl?1rLJOJ*%Ib9nkbp_Ya{n-
zpTOw#<xb*L;e97?DhO<MFVW#>Qi9KsHg#@i5As12Y0(@s1sIOp3JnoY5g(A*+{EYh
z?PJCs%n2rLjH68p&zxuMOvGdykn%guyPb$A2mNl-JG6I#esEdnXoo?I-6Ps$p9Rzw
z)CiUBNPJrQQS4srB5R$1tD^rY?3}1#Qf=U*w85y>a=y;Qz9s-vce(E)=$!gV3`bR1
zDa`V&ciLKhu`(84X=lIRtn%u1w`s9}YJ9DwlB<Ym<<C|b@M=WhriWfsEE&K>s3;_1
z`|aAKdm6)HnUV^FMyBd?<dT(TO^&B+v_QUefCfVr!p#_SqWpo0c+|OD)S2~hlFdqB
zczt*`6GfDxuED|CW#^5^)y?&un;|x>R7tbhl=eo^+4|Z&r}>V->~==_IVb9LkL4|{
zIX|zN&jzhyDYNMJ*RNnhwp-{DPJJC{hdu6hUA)=U*<SsdQ67tU`mH#XDN163eaR&x
z6{3DUvGJL&U51-|OidVJzB2$d8d_om+z^eCRXw?MB}%6v^4{=!O2=ch%N1>SEa~p1
z;HjJ2BN!K*Olal%GFU~d^9G{(>8?_|tK3Fz-1>pRt)gD7L(D`@{S7}O@II{be=?H@
zAT!|wVcR`sCe{4a7#3SUU*xGOf)<k7#HTAuG?jQb{Ec7h!L2S*=)C%fFx}b2=1PmN
ze|y8d!0~$J$nMg-v&riA`Sim=C1QLM9*~4xqv0AqxPJTc&sNV0P>1bG57;d=XJEqJ
zJnJ`$%knr`=Ws5)%aJE*XgM!QjN!+MbTsNsnypFG0B0{B1hnXJeEv)#I|SGjbVI!+
zKA=*Yb7)tMSZj?s_dWDnH^r?M>76TsSArbXJM+^`wPl|~W^qk`{MHZHcv{qOY1}9N
zZMhLAfbDb4fcDWo^=Dg^wHPPgMr@h!d=%>r<8_(ySiAIN3!cP2Y*ZU=?%SIiqPx@W
zoj312`rGtsZU>T-_}HD+0<ewOV;x%Rxr<iE=gH3>?<>uel*w#)p#&yzJC+_31S`Nf
z3Vys9?6a?*!+A_m*Y+`i>q-9b#Yh5l^nOR<R7R*m?FOM8dQ176G?Z6<&N=R!Yo$hc
z$#XsZKx)YG%Sa!A{=*pDT+rrO2d5X3vJgc>NB2S%tZTJ<WHDQrNl(QuDrWTGCA=P&
zq0nJ-kbnU)^;v%gM5OufyzGelBP6hSKN8X0jtwv~wbQoyiy64KaPvaVtb|2YnS^s9
z#qSTq4vkz}0qV_|LC}B71|wC+P=9840*RsOd1wmBfTAD>_`qYe<^+A04r$YOVODj9
zNzT_~xLXTF!z9+cgk=)xSC>a;XFXY}y37`jv&G~(bKZ+Lf(QP`9q}UAcr80(8Gae>
zuu0a{B~U&&ZO|wh>!i~Xzt~k~iYv}P)DeJ^eetxeE-9|MU*8BQj;u>{&i`-8A7HI#
zV_bh9V3wZ{e(rR?_w?%p{lv8jXXNfQJDZ(%R55IHLUj;c4c5OE2w&1?B_2v^DM^Ti
ztTkYZ)%8ZWoOoTdWNLXZa^`W5ii`hT_T==6Iyc#BoF}3N-pQzL-ewS@g1}MDaBZ#j
z0*^>uLcy|lJ~%HM{u(yk^lx-aj0TE6*&Q8$KyZVA8W3t0#6(TyR=jHQs1@LH&TMR}
zW1TZb{y`?pwrX(e@C-FM0P<;-4Mr7-BWDDkUj1Jn>yjb@wT;zrJu5JC5b#hCkyx{Z
zs!S57Zfh_+Cq2b}C<pNw5MxJ6rkjVoUcL#{QUSa&(5KnS5`XJ7HV~~8_xO)UNM!ZO
znTH&7a4D+0<(_u|T5avY5XkoXV>Z^n?`My>magP}D9P$i8cl9b?oQsf2e&l0N_oSq
zxF_)c>(da#Jd<Uh%VRYWz}iEFTrg$qQ`vX9bZWReX&UK?uFl&r&acjUHR{2YR4PGL
z{;ZQ>?!LdP@!M%--o_9i&n#!2j>lP&^K31mcP5^71gd#A{-daVp~H~}FyA84ual27
z8z>QDlsaO`3WadjXqn&twCbmOXQF(9)RAXQg&l~aN_H%%e`H`gwtrfs;xc=_6SYiw
z*HSs7<#a@9CpLQ~66|ZM)?vi>8+l-blmG?Sfg?y8XRy|p5FFPcI$pQ1F{A2Ky5IPi
zC9?_d=*)mtmt$Bs$G9fG%zfg{=$RyAcCe=1pIn_>pWJ+u&c4Rp9BBbe?<9@k!f!hR
zj14%h(32g}efS1!I?ZZmhjmL%%*Q}4?XI8TV&|O5VmPzcTv44%K^QCOT+^Ze0r)n9
z#!>4Km65}h1oiM@T3Vr-^KFgQ{Q0oq^;}nx(n|UE5{jq$-oHK_6I{b1DWaeB1D0I<
z%Kx1I-~I-N!dr7=R_ga|sX!sFfsGRNZ4S@}n`8!@O=RW0^v;F7Duhbde!XSV!o_(Y
zCPM%^_gZ&~RMz96l1Gnm*a>*7g=!MAUn&qFHk=*2wQ_W~aH)~<xVSsJKIgNV3>RKA
zi=8O=q9lNYr2;jW?c=vfcq$p0m)56Ax*$6j+%DEMqP+L9xJ*i;3UxDrogUbhY3FyG
zB=~G}>6jlrN%_asQR)J&4pLi_9vScvKvUIPi14FN=`%9}mfV|%Vqu;8h4h(2Nq=S9
zLb>@d@?4Af_t<ZRcUiY07qJ)=EI*$6<g9E238f1hWM%kD<L-`ywe8K+gEaCPEXHtn
z?R^Wi$|O5JEb>10Q@pBV`g<4!aP7>%i5*Tmu7yYDL&>~s@RCtIs`F(oc&pWP8ctqV
zIxQCuXvk(}nVtox!(q|oD!#-;H44l1JGN$qkU7+y;3FX7DzFvL7Ky^$SLk`0eLf)Q
zY-(IYuga;EQn&S1%i|0|3IswjdVL>X%1Z|$^w$gOLW0eEsYA~Uj1>dyLsF6=Oawez
z7r|p#c8hU`gM|WELerS6u21h2KzVuD)LNS9CiQdPIixh6r69-iMn07EPKji*{A5_k
zm5{z2pJFO}t|(cvkNja_R%hq<6Bv71^0PUGYV#QyR;p?FD!_&@;D(Zr(<v&tDeB*h
zr<)Qu((*zj$sD}PJr*M_Q|f2Vlyn{)XHJzGJec4?wH%+EY$Dy;Qt02c?Jvq7iQHBf
z4gihh<Vc38er@P-W`riv@0Kav>(}rKAy{VDmoaXf+F+zYKrdE&W{?@tHqmD)yA{Su
zu(vfbK-RE5%?upe8v~hdY!j%^4d)@Q=Tz2mKfi5v^3PtS0b9;v*aET{DIn;Sg5FGm
z%Xq7~&0qDTN2mkNcMrzO7<PxB;yG-4ZOoLf)TEC@=EURDDC)>%QDI?G)6=Px=LqF~
zQ4+qwZV>A`&QX2Fctvn}3-^<<C|dm09_&pq=;Ls?68kGZRwkCz^t(IGF}|F0l%1`Q
zESb=~*_$(K{k7}I(CQ^0E7$vqc@>9CO1wRL{ReLov*#r7{)Dk7UIjpz(J(07>k-NW
zexCla$p@cjk8TsY4b(vdS~Cq*f*O@oTVLrwGmS+Oco*ub<;Jqq6*%KK5^Eo%$~qS_
zOLBaLhxHvMPwEC=ft;>nnKGr#KNfn)Z4G^4F*r!j%IY;%G8yhu*D5U#?!{&`A1Dy1
zV40xC<|1S?teiySymGerBQBt4p$!PE9%k_|$zF}}^cNv~NK`e96wCOSQKti%2+Xaj
z(pg|IlhLSRg*y0qR;75T=OCH_G3<LlSD>Ptc0qg|levj@?sPw^z+zTj9zunxb#84u
z0v6UZTjPhWuj8sK$v-vz@u|<A13Bo=kjGOp`~q<G>cV4Ran?1?M(&r#6&9{cH3umc
zy>VDzs`l)bv1>6F5BL<8PtB;g!T#$%@|(K>pCfJ6#{BpXC)#Iza><O)X(OiRx;^!a
zqq%)a+tLMvJ^Hv=f2*y4hs6TSH-9ei@A;`e_7$rM=`mOT-d2lC1q`2mr1S`{P<_DV
z;6G*N{=biaA}+9DJK46|`#+;R`GbLHwn%-4|MwAK!oW7GFMqJ&pBl$+D06`KDX$sZ
z{y!c?r~yi2D&n2|hq2!TuL9dHs;*|Ff0`A)A4wVk40P9DYjXWx4np<49#G%nj?qs9
TeG+~G{Jj&F7AkzJ>;3-#UMHto

literal 0
HcmV?d00001

diff --git a/docs-src/docs/schedule.md b/docs-src/docs/schedule.md
index 305e5e6..adbb56a 100755
--- a/docs-src/docs/schedule.md
+++ b/docs-src/docs/schedule.md
@@ -413,6 +413,128 @@ if args.qe_calibration:
 
 The genreated YAML stats file can then be provided using the ``--qe-stats-file` argument. An example of a generated stats file can be found [here](https://github.com/NervanaSystems/distiller/blob/master/examples/quantization/post_training_quant/stats/resnet18_quant_stats.yaml).
 
+## Pruning Fine-Control
+
+Sometimes the default pruning process doesn't satisfy our needs and we require finer control over the pruning process (e.g. over masking, gradient handling, and weight updates).  Below we will explain the math and nuances of fine-control configuration.
+
+### Setting up the problem
+
+We represent the weights of a DNN as the set 
+$$
+\theta=\left\{\theta_{l} : 0 \leq l \leq : L\right\}
+$$
+where  \(\theta_{l}\)  represents the parameters tensor (weights and biases) of layer \( l \) in a network having \( L \) layers. Usually we do not prune biases because of their small size and relative importance.  Therefore, we will consider only the network weights (also known as network connections):
+$$
+W=\left\{W_{l} : 0 \leq l \leq : L\right\}
+$$
+We wish to optimize some objective (e.g. minimize the energy required to execute a network in inference mode) under some performance constraint (e.g. accuracy), and we do this by maximizing the sparsity of the network weights (sometimes under some chosen sparsity-pattern constraint).  
+
+We formalize pruning as a 3-step action:
+
+1. Generating a mask - in which we define a sparsity-inducing function per layer, \( P_l \), such that
+   $$
+   M_{l}=P_{l}\left(W_{l}\right)
+   $$
+   \( M_{l} \) is a binary matrix which is used to mask \( W_{l} \).  \( P_l\) is implemented by subclasses of ```distiller.pruner```.
+
+2. Masking the weights using the Hadamard product:
+   $$
+   \widehat{W}_{l}=M_{l} \circ W_{l}
+   $$
+
+3. Updating the weights (performed by the optimizer).  By default, we compute the data-loss using the masked weights, and calculate the gradient of this loss with respect to the masked-weights.  We update the weights by making a small adjustment to the *masked weights*:
+   $$
+   W_{l} \leftarrow \widehat{W}_{l}-\alpha \frac{\partial Loss(\widehat{W}_{l})}{\partial \widehat{W}_{l}}
+   $$
+   We show below how to change this default behavior.  We also provide a more exact description of the weights update when using PyTorch's SGD optimizer.
+
+The pruning regimen follows a pruning-rate schedule which, analogously to learning-rate annealing, changes the pruning rate according to a configurable strategy over time.  The schedule allows us to configure new masks either once at the beginning of epochs (most common), or at the beginning of mini-batches (for finer control).  In the former, the masks are calculated and assigned to \(\{M_{l}\}\) once, at the beginning of epochs (the specific epochs are determined by the schedule).  The pseudo-code below shows the typical training-loop with ```CompressionScheduler``` callbacks in bold font, and the three pruning actions described above in burgendy.
+
+<center>![Masking](imgs/pruning_algorithm_pseudo_code.png)</center><br>
+<center>**Figure 1: Pruning algorithm pseudo-code**</center>
+
+We can perform masking by adding the masking operation to the network graph.  We call this *in-graph masking*, as depicted in the bottom of Figure 2.  In the forward-pass we apply element-wise multiplication of the weights \( W_{l} \) and the mask \( M_{l} \) to obtain the masked weights \(widehat{W}_{l}\) , which we apply to the Convolution operation.  In the backward-pass we mask \(\frac{\partial L}{\partial \widehat{W}}\) to obtain \(\frac{\partial L}{\partial W}\) with which we update \( W_{l} \).
+
+<center>![Masking](imgs/pruning_masking.png)</center><br>
+<center>**Figure 2: Forward and backward weight masking**</center>
+
+In Distiller we perform *out-of-graph masking* in which we directly set the value of \(\widehat{W}_{l}\) by applying a mask on \( W_{l} \)  In the backward-pass we make sure that the weights are updated by the *proper* gradients.  In the common pruning use-case we want the optimizer to update only the unmasked weights, but we can configure this behavior using the fine-control arguments, as explained below.
+
+### Fine-Control
+
+For finer control over the behavior of the pruning process, Distiller provides a set of ```PruningPolicy``` arguments in the ```args``` field, as in the sample below.
+
+```YAML
+pruners:
+  random_filter_pruner:
+    class: BernoulliFilterPruner
+    desired_sparsity: 0.1
+    group_type: Filters
+    weights: [module.conv1.weight]
+
+policies:
+  - pruner:
+      instance_name: random_filter_pruner
+      args:
+        mini_batch_pruning_frequency: 16
+        discard_masks_at_minibatch_end: True
+        use_double_copies: True
+        mask_on_forward_only: True
+        mask_gradients: True
+    starting_epoch: 15
+    ending_epoch: 180
+    frequency: 1
+```
+
+#### Controls
+
+- ```mini_batch_pruning_frequency``` (default: 0): controls pruning scheduling at the mini-batch granularity.  Every mini_batch_pruning_frequency training steps (i.e. mini_batches) we configure a new mask.  In between mask updates, we mask mini-batches with the current mask.
+
+- ```discard_masks_at_minibatch_end``` (default: False): discards the pruning mask at the end of the mini-batch.  In the example YAML above, a new mask is computed once every 16 mini-batches, applied in one forward-pass, and then discraded.  In the next 15 mini-batches the mask is `Null` so we do not mask.
+
+- ```mask_gradients``` (default: False): mask the weights gradients after performing the backward-pass, and before invoking the optimizer.  
+  <br>
+  One way to mask the gradients in PyTorch is to register to the backward callback of the weight tensors we want to mask, and alter the gradients there.  We do this by setting ```mask_gradients: True```, as in the sample YAML above.
+  <br>
+  This is sufficient if our weights optimization uses plain-vanilla SGD, because the update maintains the sparsity of the weights: \(\widehat{W}_{l}\) is sparse by definition, and the gradients are sparse because we mask them.
+  $$
+  W_{l} \leftarrow \widehat{W}_{l}-\alpha \frac{\partial Loss(\widehat{W}_{l})}{\partial \widehat{W}_{l}}
+  $$
+  <br>
+  But this is not always the case.  For example, [PyTorch’s SGD optimizer](<https://github.com/pytorch/pytorch/blob/master/torch/optim/sgd.py>) with weight-decay (\(\lambda\)) and momentum (\(\alpha\)) has the optimization logic listed below:
+  <br>1. \( \Delta p=\frac{\partial Loss\left(\widehat{W}_{l}^{i}\right)}{\partial \widehat{W}_{l}^{i}}+\lambda \widehat{W}_{l}^{i} \)
+  <br>2. \( v_{i}=\left\lbrace
+  \matrix{
+    {\Delta p: \; if \;i==0 }\; \cr
+    {v_{i-1} \rho+ (1-dampening)\Delta p: \; if \; i>0}
+  }
+  \right\rbrace \)<br>
+  <br>3. \( W_{l}^{i+1} = \widehat{W}_{l}^{i}-\alpha v_{i} \)
+  <br><br>
+  Let’s look at the weight optimization update at some arbitrary step (i.e. mini-batch) *k*. 
+  <br>
+  We want to show that masking the weights and gradients (\(W_{l}^{i=k}\) and \(
+  \frac{\partial Loss\left(\widehat{W}_{l}^{i=k}\right)}{\partial \widehat{W}_{l}^{i=k}}
+  \)) is not sufficient to guarantee that \(W_{l}^{i=k+1}\) is sparse.  This is easy do: if we allow for the general case where \(v_i\) is not necessarily sparse, then \(W_{l}^{i+1}\) is not necessarily sparse.
+  <hr>
+  ***Masking the weights in the forward-pass, and gradients in the backward-pass, is not sufficient to maintain the sparsity of the weights!***
+  <hr>
+  This is an important insight, and it means that naïve in-graph masking is also not sufficient to guarantee sparsity of the updated weights. 
+  
+  - ```use_double_copies``` (default: False): 
+  If you want to compute the gradients using the masked weights and also to update the unmasked weights (instead of updating the masked weights, per usual), set ```use_double_copies = True```.  This changes step (3) to: 
+  <br>3. \( W_{l}^{i+1} = W_{1}^{i}-\alpha \Delta p \)
+  <br>
+
+  - ```mask_on_forward_only``` (default: False): when set to ```False``` the weights will *also* be masked after the Optimizer is done updating the weights, to remove any updates of the masked gradients.
+  <br>
+  If we want to guarantee the sparsity of the updated weights, we must explicitly mask the weights after step (3) above:
+  <br>4. \( {W}_{l}^{i+1} \leftarrow M_{l}^{i} \circ {W}_{l}^{i+1} \)
+  <br>
+  This argument defaults to ```False```, but you can skip step (4), by setting ```mask_on_forward_only = True```.
+  <br>
+  Finally, note that ```mask_gradients``` and ```not mask_on_forward_only``` are mutually exclusive, or simply put: if you are masking in the backward-pass, you should choose to either do it via ```mask_gradients``` or ```mask_on_forward_only=False```, but not both.
+
 ## Knowledge Distillation
 
 Knowledge distillation (see [here](knowledge_distillation.md)) is also implemented as a `Policy`, which should be added to the scheduler. However, with the current implementation, it cannot be defined within the YAML file like the rest of the policies described above.
diff --git a/docs-src/mkdocs.yml b/docs-src/mkdocs.yml
index dda7e2b..5b05dd6 100755
--- a/docs-src/mkdocs.yml
+++ b/docs-src/mkdocs.yml
@@ -12,7 +12,7 @@ extra_javascript: ['https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX
 extra_css: [extra.css]
 
 
-pages:
+nav:
   - Home: index.md
   - Installation: install.md
   - Usage: usage.md
diff --git a/docs/earlyexit.html b/docs/earlyexit.html
deleted file mode 100644
index 2480045..0000000
--- a/docs/earlyexit.html
+++ /dev/null
@@ -1,253 +0,0 @@
-<!DOCTYPE html>
-<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
-<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
-<head>
-  <meta charset="utf-8">
-  <meta http-equiv="X-UA-Compatible" content="IE=edge">
-  <meta name="viewport" content="width=device-width, initial-scale=1.0">
-  
-  
-  <link rel="shortcut icon" href="img/favicon.ico">
-  <title>Early Exit Inference - Neural Network Distiller</title>
-  <link href='https://fonts.googleapis.com/css?family=Lato:400,700|Roboto+Slab:400,700|Inconsolata:400,700' rel='stylesheet' type='text/css'>
-
-  <link rel="stylesheet" href="css/theme.css" type="text/css" />
-  <link rel="stylesheet" href="css/theme_extra.css" type="text/css" />
-  <link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/github.min.css">
-  <link href="extra.css" rel="stylesheet">
-  
-  <script>
-    // Current page data
-    var mkdocs_page_name = "Early Exit Inference";
-    var mkdocs_page_input_path = "earlyexit.md";
-    var mkdocs_page_url = null;
-  </script>
-  
-  <script src="js/jquery-2.1.1.min.js" defer></script>
-  <script src="js/modernizr-2.8.3.min.js" defer></script>
-  <script src="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
-  <script>hljs.initHighlightingOnLoad();</script> 
-  
-</head>
-
-<body class="wy-body-for-nav" role="document">
-
-  <div class="wy-grid-for-nav">
-
-    
-    <nav data-toggle="wy-nav-shift" class="wy-nav-side stickynav">
-      <div class="wy-side-nav-search">
-        <a href="index.html" class="icon icon-home"> Neural Network Distiller</a>
-        <div role="search">
-  <form id ="rtd-search-form" class="wy-form" action="./search.html" method="get">
-    <input type="text" name="q" placeholder="Search docs" title="Type search term here" />
-  </form>
-</div>
-      </div>
-
-      <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
-	<ul class="current">
-	  
-          
-            <li class="toctree-l1">
-		
-    <a class="" href="index.html">Home</a>
-	    </li>
-          
-            <li class="toctree-l1">
-		
-    <a class="" href="install.html">Installation</a>
-	    </li>
-          
-            <li class="toctree-l1">
-		
-    <a class="" href="usage.html">Usage</a>
-	    </li>
-          
-            <li class="toctree-l1">
-		
-    <a class="" href="schedule.html">Compression Scheduling</a>
-	    </li>
-          
-            <li class="toctree-l1">
-		
-    <span class="caption-text">Compressing Models</span>
-    <ul class="subnav">
-                <li class="">
-                    
-    <a class="" href="pruning.html">Pruning</a>
-                </li>
-                <li class="">
-                    
-    <a class="" href="regularization.html">Regularization</a>
-                </li>
-                <li class="">
-                    
-    <a class="" href="quantization.html">Quantization</a>
-                </li>
-                <li class="">
-                    
-    <a class="" href="knowledge_distillation.html">Knowledge Distillation</a>
-                </li>
-                <li class="">
-                    
-    <a class="" href="conditional_computation.html">Conditional Computation</a>
-                </li>
-    </ul>
-	    </li>
-          
-            <li class="toctree-l1">
-		
-    <span class="caption-text">Algorithms</span>
-    <ul class="subnav">
-                <li class="">
-                    
-    <a class="" href="algo_pruning.html">Pruning</a>
-                </li>
-                <li class="">
-                    
-    <a class="" href="algo_quantization.html">Quantization</a>
-                </li>
-                <li class="">
-                    
-    <a class="" href="algo_earlyexit.html">Early Exit</a>
-                </li>
-    </ul>
-	    </li>
-          
-            <li class="toctree-l1">
-		
-    <a class="" href="model_zoo.html">Model Zoo</a>
-	    </li>
-          
-            <li class="toctree-l1">
-		
-    <a class="" href="jupyter.html">Jupyter Notebooks</a>
-	    </li>
-          
-            <li class="toctree-l1">
-		
-    <a class="" href="design.html">Design</a>
-	    </li>
-          
-            <li class="toctree-l1">
-		
-    <span class="caption-text">Tutorials</span>
-    <ul class="subnav">
-                <li class="">
-                    
-    <a class="" href="tutorial-struct_pruning.html">Pruning Filters and Channels</a>
-                </li>
-                <li class="">
-                    
-    <a class="" href="tutorial-lang_model.html">Pruning a Language Model</a>
-                </li>
-    </ul>
-	    </li>
-          
-        </ul>
-      </div>
-      &nbsp;
-    </nav>
-
-    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
-
-      
-      <nav class="wy-nav-top" role="navigation" aria-label="top navigation">
-        <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
-        <a href="index.html">Neural Network Distiller</a>
-      </nav>
-
-      
-      <div class="wy-nav-content">
-        <div class="rst-content">
-          <div role="navigation" aria-label="breadcrumbs navigation">
-  <ul class="wy-breadcrumbs">
-    <li><a href="index.html">Docs</a> &raquo;</li>
-    
-      
-    
-    <li>Early Exit Inference</li>
-    <li class="wy-breadcrumbs-aside">
-      
-    </li>
-  </ul>
-  <hr/>
-</div>
-          <div role="main">
-            <div class="section">
-              
-                <h1 id="early-exit-inference">Early Exit Inference</h1>
-<p>While Deep Neural Networks benefit from a large number of layers, it's often the case that many datapoints in classification tasks can be classified accurately with much less work. There have been several studies recently regarding the idea of exiting before the normal endpoint of the neural network. Panda et al in <a href="#panda">Conditional Deep Learning for Energy-Efficient and Enhanced Pattern Recognition</a> points out that a lot of data points can be classified easily and require less processing than some more difficult points and they view this in terms of power savings. Surat et al in <a href="#branchynet">BranchyNet: Fast Inference via Early Exiting from Deep Neural Networks</a> look at a selective approach to exit placement and criteria for exiting early.</p>
-<h2 id="why-does-early-exit-work">Why Does Early Exit Work?</h2>
-<p>Early Exit is a strategy with a straightforward and easy to understand concept Figure #fig(boundaries) shows a simple example in a 2-D feature space. While deep networks can representative more complex and expressive boundaries between classes (assuming we’re confident of avoiding over-fitting the data), it’s also clear that much of the data can be properly classified with even the simplest of classification boundaries.</p>
-<p><img alt="Figure !fig(boundaries): Simple and more expressive classification boundaries" src="/docs-src/docs/imgs/decision_boundary.png" /></p>
-<p>Data points far from the boundary can be considered "easy to classify" and achieve a high degree of confidence quicker than do data points close to the boundary. In fact, we can think of the area between the outer straight lines as being the region that is "difficult to classify" and require the full expressiveness of the neural network to accurately classify it.</p>
-<h2 id="example-code-for-early-exit">Example code for Early Exit</h2>
-<p>Both CIFAR10 and Imagenet code comes directly from publically available examples from Pytorch. The only edits are the exits that are inserted in a methodology similar to BranchyNet work.</p>
-<p>Deeper networks can benefit from multiple exits. Our examples illustrate both a single and a pair of early exits for CIFAR10 and Imagenet, respectively.</p>
-<p>Note that this code does not actually take exits. What it does is to compute statistics of loss and accuracy assuming exits were taken when criteria are met. Actually implementing exits can be tricky and architecture dependent and we plan to address these issues.</p>
-<h3 id="heuristics">Heuristics</h3>
-<p>The insertion of the exits are ad-hoc, but there are some heuristic principals guiding their placement and parameters. The earlier exits are placed, the more agressive the exit as it essentially prunes the rest of the network at a very early stage, thus saving a lot of work. However, a diminishing percentage of data will be directed through the exit if we are to preserve accuracy.</p>
-<p>There are other benefits to adding exits in that training the modified network now has backpropagation losses coming from the exits that affect the earlier layers more substantially than the last exit. This effect mitigates problems such as vanishing gradient.</p>
-<h3 id="early-exit-hyperparameters">Early Exit Hyperparameters</h3>
-<p>There are two parameters that are required to enable early exit. Leave them undefined if you are not enabling Early Exit:</p>
-<ol>
-<li>
-<p><strong>--earlyexit_thresholds</strong> defines the
-thresholds for each of the early exits. The cross entropy measure must be <strong>less than</strong> the specified threshold to take a specific exit, otherwise the data continues along the regular path. For example, you could specify "--earlyexit_thresholds 0.9 1.2" and this implies two early exits with corresponding thresholds of 0.9 and 1.2, respectively to take those exits.</p>
-</li>
-<li>
-<p><strong>--earlyexit_lossweights</strong> provide the weights for the linear combination of losses during training to compute a signle, overall loss. We only specify weights for the early exits and assume that the sum of the weights (including final exit) are equal to 1.0. So an example of "--earlyexit_lossweights 0.2 0.3" implies two early exits weighted with values of 0.2 and 0.3, respectively and that the final exit has a value of 1.0-(0.2+0.3) = 0.5. Studies have shown that weighting the early exits more heavily will create more agressive early exits, but perhaps with a slight negative effect on accuracy.</p>
-</li>
-</ol>
-<h3 id="output-stats">Output Stats</h3>
-<p>The example code outputs various statistics regarding the loss and accuracy at each of the exits. During training, the Top1 and Top5 stats represent the accuracy should all of the data be forced out that exit (in order to compute the loss at that exit). During inference (i.e. validation and test stages), the Top1 and Top5 stats represent the accuracy for those data points that could exit because the calculated entropy at that exit was lower than the specified threshold for that exit.</p>
-<h3 id="cifar10">CIFAR10</h3>
-<p>In the case of CIFAR10, we have inserted a single exit after the first full layer grouping. The layers on the exit path itself includes a convolutional layer and a fully connected layer. If you move the exit, be sure to match the proper sizes for inputs and outputs to the exit layers.</p>
-<h3 id="imagenet">Imagenet</h3>
-<p>This supports training and inference of the imagenet dataset via several well known deep architectures. ResNet-50 is the architecture of interest in this study, however the exit is defined in the generic resnet code and could be used with other size resnets. There are two exits inserted in this example. Again, exit layers must have their sizes match properly.</p>
-<h2 id="references">References</h2>
-<p><div id="panda"></div> <strong>Priyadarshini Panda, Abhronil Sengupta, Kaushik Roy</strong>.
-    <a href="https://arxiv.org/abs/1509.08971v6"><em>Conditional Deep Learning for Energy-Efficient and Enhanced Pattern Recognition</em></a>, arXiv:1509.08971v6, 2017.</p>
-<div id="branchynet"></div>
-
-<p><strong>Surat Teerapittayanon, Bradley McDanel, H. T. Kung</strong>.
-    <a href="http://arxiv.org/abs/1709.01686"><em>BranchyNet: Fast Inference via Early Exiting from Deep Neural Networks</em></a>, arXiv:1709.01686, 2017.</p>
-              
-            </div>
-          </div>
-          <footer>
-  
-
-  <hr/>
-
-  <div role="contentinfo">
-    <!-- Copyright etc -->
-    
-  </div>
-
-  Built with <a href="http://www.mkdocs.org">MkDocs</a> using a <a href="https://github.com/snide/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
-</footer>
-      
-        </div>
-      </div>
-
-    </section>
-
-  </div>
-
-  <div class="rst-versions" role="note" style="cursor: pointer">
-    <span class="rst-current-version" data-toggle="rst-current-version">
-      
-      
-      
-    </span>
-</div>
-    <script>var base_url = '.';</script>
-    <script src="js/theme.js" defer></script>
-      <script src="https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML" defer></script>
-      <script src="search/main.js" defer></script>
-
-</body>
-</html>
diff --git a/docs/index.html b/docs/index.html
index 5d64125..7911218 100644
--- a/docs/index.html
+++ b/docs/index.html
@@ -273,5 +273,5 @@ And of course, if we used a sparse or compressed representation, then we are red
 
 <!--
 MkDocs version : 1.0.4
-Build Date UTC : 2019-04-01 14:59:11
+Build Date UTC : 2019-04-08 12:31:43
 -->
diff --git a/docs/schedule.html b/docs/schedule.html
index 2e8db13..559cc43 100644
--- a/docs/schedule.html
+++ b/docs/schedule.html
@@ -85,6 +85,8 @@
         
             <li><a class="toctree-l3" href="#post-training-quantization">Post-Training Quantization</a></li>
         
+            <li><a class="toctree-l3" href="#pruning-fine-control">Pruning Fine-Control</a></li>
+        
             <li><a class="toctree-l3" href="#knowledge-distillation">Knowledge Distillation</a></li>
         
         </ul>
@@ -580,6 +582,127 @@ For examples invocations of post-training quantization see <a href="https://gith
 </code></pre>
 
 <p>The genreated YAML stats file can then be provided using the <code>`--qe-stats-file</code> argument. An example of a generated stats file can be found <a href="https://github.com/NervanaSystems/distiller/blob/master/examples/quantization/post_training_quant/stats/resnet18_quant_stats.yaml">here</a>.</p>
+<h2 id="pruning-fine-control">Pruning Fine-Control</h2>
+<p>Sometimes the default pruning process doesn't satisfy our needs and we require finer control over the pruning process (e.g. over masking, gradient handling, and weight updates).  Below we will explain the math and nuances of fine-control configuration.</p>
+<h3 id="setting-up-the-problem">Setting up the problem</h3>
+<p>We represent the weights of a DNN as the set 
+<script type="math/tex; mode=display">
+\theta=\left\{\theta_{l} : 0 \leq l \leq : L\right\}
+</script>
+where  <script type="math/tex">\theta_{l}</script>  represents the parameters tensor (weights and biases) of layer <script type="math/tex"> l </script> in a network having <script type="math/tex"> L </script> layers. Usually we do not prune biases because of their small size and relative importance.  Therefore, we will consider only the network weights (also known as network connections):
+<script type="math/tex; mode=display">
+W=\left\{W_{l} : 0 \leq l \leq : L\right\}
+</script>
+We wish to optimize some objective (e.g. minimize the energy required to execute a network in inference mode) under some performance constraint (e.g. accuracy), and we do this by maximizing the sparsity of the network weights (sometimes under some chosen sparsity-pattern constraint).  </p>
+<p>We formalize pruning as a 3-step action:</p>
+<ol>
+<li>
+<p>Generating a mask - in which we define a sparsity-inducing function per layer, <script type="math/tex"> P_l </script>, such that
+   <script type="math/tex; mode=display">
+   M_{l}=P_{l}\left(W_{l}\right)
+   </script>
+<script type="math/tex"> M_{l} </script> is a binary matrix which is used to mask <script type="math/tex"> W_{l} </script>.  <script type="math/tex"> P_l</script> is implemented by subclasses of <code>distiller.pruner</code>.</p>
+</li>
+<li>
+<p>Masking the weights using the Hadamard product:
+   <script type="math/tex; mode=display">
+   \widehat{W}_{l}=M_{l} \circ W_{l}
+   </script>
+</p>
+</li>
+<li>
+<p>Updating the weights (performed by the optimizer).  By default, we compute the data-loss using the masked weights, and calculate the gradient of this loss with respect to the masked-weights.  We update the weights by making a small adjustment to the <em>masked weights</em>:
+   <script type="math/tex; mode=display">
+   W_{l} \leftarrow \widehat{W}_{l}-\alpha \frac{\partial Loss(\widehat{W}_{l})}{\partial \widehat{W}_{l}}
+   </script>
+   We show below how to change this default behavior.  We also provide a more exact description of the weights update when using PyTorch's SGD optimizer.</p>
+</li>
+</ol>
+<p>The pruning regimen follows a pruning-rate schedule which, analogously to learning-rate annealing, changes the pruning rate according to a configurable strategy over time.  The schedule allows us to configure new masks either once at the beginning of epochs (most common), or at the beginning of mini-batches (for finer control).  In the former, the masks are calculated and assigned to <script type="math/tex">\{M_{l}\}</script> once, at the beginning of epochs (the specific epochs are determined by the schedule).  The pseudo-code below shows the typical training-loop with <code>CompressionScheduler</code> callbacks in bold font, and the three pruning actions described above in burgendy.</p>
+<p><center><img alt="Masking" src="imgs/pruning_algorithm_pseudo_code.png" /></center><br>
+<center><strong>Figure 1: Pruning algorithm pseudo-code</strong></center></p>
+<p>We can perform masking by adding the masking operation to the network graph.  We call this <em>in-graph masking</em>, as depicted in the bottom of Figure 2.  In the forward-pass we apply element-wise multiplication of the weights <script type="math/tex"> W_{l} </script> and the mask <script type="math/tex"> M_{l} </script> to obtain the masked weights <script type="math/tex">widehat{W}_{l}</script> , which we apply to the Convolution operation.  In the backward-pass we mask <script type="math/tex">\frac{\partial L}{\partial \widehat{W}}</script> to obtain <script type="math/tex">\frac{\partial L}{\partial W}</script> with which we update <script type="math/tex"> W_{l} </script>.</p>
+<p><center><img alt="Masking" src="imgs/pruning_masking.png" /></center><br>
+<center><strong>Figure 2: Forward and backward weight masking</strong></center></p>
+<p>In Distiller we perform <em>out-of-graph masking</em> in which we directly set the value of <script type="math/tex">\widehat{W}_{l}</script> by applying a mask on <script type="math/tex"> W_{l} </script>  In the backward-pass we make sure that the weights are updated by the <em>proper</em> gradients.  In the common pruning use-case we want the optimizer to update only the unmasked weights, but we can configure this behavior using the fine-control arguments, as explained below.</p>
+<h3 id="fine-control">Fine-Control</h3>
+<p>For finer control over the behavior of the pruning process, Distiller provides a set of <code>PruningPolicy</code> arguments in the <code>args</code> field, as in the sample below.</p>
+<pre><code class="YAML">pruners:
+  random_filter_pruner:
+    class: BernoulliFilterPruner
+    desired_sparsity: 0.1
+    group_type: Filters
+    weights: [module.conv1.weight]
+
+policies:
+  - pruner:
+      instance_name: random_filter_pruner
+      args:
+        mini_batch_pruning_frequency: 16
+        discard_masks_at_minibatch_end: True
+        use_double_copies: True
+        mask_on_forward_only: True
+        mask_gradients: True
+    starting_epoch: 15
+    ending_epoch: 180
+    frequency: 1
+</code></pre>
+
+<h4 id="controls">Controls</h4>
+<ul>
+<li>
+<p><code>mini_batch_pruning_frequency</code> (default: 0): controls pruning scheduling at the mini-batch granularity.  Every mini_batch_pruning_frequency training steps (i.e. mini_batches) we configure a new mask.  In between mask updates, we mask mini-batches with the current mask.</p>
+</li>
+<li>
+<p><code>discard_masks_at_minibatch_end</code> (default: False): discards the pruning mask at the end of the mini-batch.  In the example YAML above, a new mask is computed once every 16 mini-batches, applied in one forward-pass, and then discraded.  In the next 15 mini-batches the mask is <code>Null</code> so we do not mask.</p>
+</li>
+<li>
+<p><code>mask_gradients</code> (default: False): mask the weights gradients after performing the backward-pass, and before invoking the optimizer.<br />
+  <br>
+  One way to mask the gradients in PyTorch is to register to the backward callback of the weight tensors we want to mask, and alter the gradients there.  We do this by setting <code>mask_gradients: True</code>, as in the sample YAML above.
+  <br>
+  This is sufficient if our weights optimization uses plain-vanilla SGD, because the update maintains the sparsity of the weights: <script type="math/tex">\widehat{W}_{l}</script> is sparse by definition, and the gradients are sparse because we mask them.
+  <script type="math/tex; mode=display">
+  W_{l} \leftarrow \widehat{W}_{l}-\alpha \frac{\partial Loss(\widehat{W}_{l})}{\partial \widehat{W}_{l}}
+  </script>
+  <br>
+  But this is not always the case.  For example, <a href="https://github.com/pytorch/pytorch/blob/master/torch/optim/sgd.py">PyTorch’s SGD optimizer</a> with weight-decay (<script type="math/tex">\lambda</script>) and momentum (<script type="math/tex">\alpha</script>) has the optimization logic listed below:
+  <br>1. <script type="math/tex"> \Delta p=\frac{\partial Loss\left(\widehat{W}_{l}^{i}\right)}{\partial \widehat{W}_{l}^{i}}+\lambda \widehat{W}_{l}^{i} </script>
+  <br>2. <script type="math/tex"> v_{i}=\left\lbrace
+  \matrix{
+    {\Delta p: \; if \;i==0 }\; \cr
+    {v_{i-1} \rho+ (1-dampening)\Delta p: \; if \; i>0}
+  }
+  \right\rbrace </script><br>
+  <br>3. <script type="math/tex"> W_{l}^{i+1} = \widehat{W}_{l}^{i}-\alpha v_{i} </script>
+  <br><br>
+  Let’s look at the weight optimization update at some arbitrary step (i.e. mini-batch) <em>k</em>. 
+  <br>
+  We want to show that masking the weights and gradients (<script type="math/tex">W_{l}^{i=k}</script> and <script type="math/tex">
+  \frac{\partial Loss\left(\widehat{W}_{l}^{i=k}\right)}{\partial \widehat{W}_{l}^{i=k}}
+  </script>) is not sufficient to guarantee that <script type="math/tex">W_{l}^{i=k+1}</script> is sparse.  This is easy do: if we allow for the general case where <script type="math/tex">v_i</script> is not necessarily sparse, then <script type="math/tex">W_{l}^{i+1}</script> is not necessarily sparse.
+  <hr>
+  <strong><em>Masking the weights in the forward-pass, and gradients in the backward-pass, is not sufficient to maintain the sparsity of the weights!</em></strong>
+  <hr>
+  This is an important insight, and it means that naïve in-graph masking is also not sufficient to guarantee sparsity of the updated weights. </p>
+</li>
+<li>
+<p><code>use_double_copies</code> (default: False): 
+  If you want to compute the gradients using the masked weights and also to update the unmasked weights (instead of updating the masked weights, per usual), set <code>use_double_copies = True</code>.  This changes step (3) to: 
+  <br>3. <script type="math/tex"> W_{l}^{i+1} = W_{1}^{i}-\alpha \Delta p </script>
+  <br></p>
+</li>
+<li>
+<p><code>mask_on_forward_only</code> (default: False): when set to <code>False</code> the weights will <em>also</em> be masked after the Optimizer is done updating the weights, to remove any updates of the masked gradients.
+  <br>
+  If we want to guarantee the sparsity of the updated weights, we must explicitly mask the weights after step (3) above:
+  <br>4. <script type="math/tex"> {W}_{l}^{i+1} \leftarrow M_{l}^{i} \circ {W}_{l}^{i+1} </script>
+  <br>
+  This argument defaults to <code>False</code>, but you can skip step (4), by setting <code>mask_on_forward_only = True</code>.
+  <br>
+  Finally, note that <code>mask_gradients</code> and <code>not mask_on_forward_only</code> are mutually exclusive, or simply put: if you are masking in the backward-pass, you should choose to either do it via <code>mask_gradients</code> or <code>mask_on_forward_only=False</code>, but not both.</p>
+</li>
+</ul>
 <h2 id="knowledge-distillation">Knowledge Distillation</h2>
 <p>Knowledge distillation (see <a href="knowledge_distillation.html">here</a>) is also implemented as a <code>Policy</code>, which should be added to the scheduler. However, with the current implementation, it cannot be defined within the YAML file like the rest of the policies described above.</p>
 <p>To make the integration of this method into applications a bit easier, a helper function can be used that will add a set of command-line arguments related to knowledge distillation:</p>
diff --git a/docs/search/search_index.json b/docs/search/search_index.json
index c275610..a77da70 100644
--- a/docs/search/search_index.json
+++ b/docs/search/search_index.json
@@ -1 +1 @@
-{"config":{"lang":["en"],"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"index.html","text":"Distiller Documentation What is Distiller Distiller is an open-source Python package for neural network compression research. Network compression can reduce the footprint of a neural network, increase its inference speed and save energy. Distiller provides a PyTorch environment for prototyping and analyzing compression algorithms, such as sparsity-inducing methods and low precision arithmetic. Distiller contains: A framework for integrating pruning, regularization and quantization algorithms. A set of tools for analyzing and evaluating compression performance. Example implementations of state-of-the-art compression algorithms. Motivation A sparse tensor is any tensor that contains some zeros, but sparse tensors are usually only interesting if they contain a significant number of zeros. A sparse neural network performs computations using some sparse tensors (preferably many). These tensors can be parameters (weights and biases) or activations (feature maps). Why do we care about sparsity? Present day neural networks tend to be deep, with millions of weights and activations. Refer to GoogLeNet or ResNet50, for a couple of examples. These large models are compute-intensive which means that even with dedicated acceleration hardware, the inference pass (network evaluation) will take time. You might think that latency is an issue only in certain cases, such as autonomous driving systems, but in fact, whenever we humans interact with our phones and computers, we are sensitive to the latency of the interaction. We don't like to wait for search results or for an application or web-page to load, and we are especially sensitive in realtime interactions such as speech recognition. So inference latency is often something we want to minimize. Large models are also memory-intensive with millions of parameters. Moving around all of the data required to compute inference results consumes energy, which is a problem on a mobile device as well as in a server environment. Data center server-racks are limited by their power-envelope and their ToC (total cost of ownership) is correlated to their power consumption and thermal characteristics. In the mobile device environment, we are obviously always aware of the implications of power consumption on the device battery. Inference performance in the data center is often measured using a KPI (key performance indicator) which folds latency and power considerations: inferences per second, per Watt (inferences/sec/watt). The storage and transfer of large neural networks is also a challenge in mobile device environments, because of limitations on application sizes and long application download times. For these reasons, we wish to compress the network as much as possible, to reduce the amount of bandwidth and compute required. Inducing sparseness, through regularization or pruning, in neural-network models, is one way to compress the network (quantization is another method). Sparse neural networks hold the promise of speed, small size, and energy efficiency. Smaller Sparse NN model representations can be compressed by taking advantage of the fact that the tensor elements are dominated by zeros. The compression format, if any, is very HW and SW specific, and the optimal format may be different per tensor (an obvious example: largely dense tensors should not be compressed). The compute hardware needs to support the compressions formats, for representation compression to be meaningful. Compression representation decisions might interact with algorithms such as the use of tiles for memory accesses. Data such as a parameter tensor is read/written from/to main system memory compressed, but the computation can be dense or sparse. In dense compute we use dense operators, so the compressed data eventually needs to be decompressed into its full, dense size. The best we can do is bring the compressed representation as close as possible to the compute engine. Sparse compute, on the other hand, operates on the sparse representation which never requires decompression (we therefore distinguish between sparse representation and compressed representation). This is not a simple matter to implement in HW, and often means lower utilization of the vectorized compute engines. Therefore, there is a third class of representations, which take advantage of specific hardware characteristics. For example, for a vectorized compute engine we can remove an entire zero-weights vector and skip its computation (this uses structured pruning or regularization). Faster Many of the layers in modern neural-networks are bandwidth-bound, which means that the execution latency is dominated by the available bandwidth. In essence, the hardware spends more time bringing data close to the compute engines, than actually performing the computations. Fully-connected layers, RNNs and LSTMs are some examples of bandwidth-dominated operations. Reducing the bandwidth required by these layers, will immediately speed them up. Some pruning algorithms prune entire kernels, filters and even layers from the network without adversely impacting the final accuracy. Depending on the hardware implementation, these methods can be leveraged to skip computations, thus reducing latency and power. More energy efficient Because we pay two orders-of-magnitude more energy to access off-chip memory (e.g. DDR) compared to on-chip memory (e.g. SRAM or cache), many hardware designs employ a multi-layered cache hierarchy. Fitting the parameters and activations of a network in these on-chip caches can make a big difference on the required bandwidth, the total inference latency, and off course reduce power consumption. And of course, if we used a sparse or compressed representation, then we are reducing the data throughput and therefore the energy consumption.","title":"Home"},{"location":"index.html#distiller-documentation","text":"","title":"Distiller Documentation"},{"location":"index.html#what-is-distiller","text":"Distiller is an open-source Python package for neural network compression research. Network compression can reduce the footprint of a neural network, increase its inference speed and save energy. Distiller provides a PyTorch environment for prototyping and analyzing compression algorithms, such as sparsity-inducing methods and low precision arithmetic. Distiller contains: A framework for integrating pruning, regularization and quantization algorithms. A set of tools for analyzing and evaluating compression performance. Example implementations of state-of-the-art compression algorithms.","title":"What is Distiller"},{"location":"index.html#motivation","text":"A sparse tensor is any tensor that contains some zeros, but sparse tensors are usually only interesting if they contain a significant number of zeros. A sparse neural network performs computations using some sparse tensors (preferably many). These tensors can be parameters (weights and biases) or activations (feature maps). Why do we care about sparsity? Present day neural networks tend to be deep, with millions of weights and activations. Refer to GoogLeNet or ResNet50, for a couple of examples. These large models are compute-intensive which means that even with dedicated acceleration hardware, the inference pass (network evaluation) will take time. You might think that latency is an issue only in certain cases, such as autonomous driving systems, but in fact, whenever we humans interact with our phones and computers, we are sensitive to the latency of the interaction. We don't like to wait for search results or for an application or web-page to load, and we are especially sensitive in realtime interactions such as speech recognition. So inference latency is often something we want to minimize. Large models are also memory-intensive with millions of parameters. Moving around all of the data required to compute inference results consumes energy, which is a problem on a mobile device as well as in a server environment. Data center server-racks are limited by their power-envelope and their ToC (total cost of ownership) is correlated to their power consumption and thermal characteristics. In the mobile device environment, we are obviously always aware of the implications of power consumption on the device battery. Inference performance in the data center is often measured using a KPI (key performance indicator) which folds latency and power considerations: inferences per second, per Watt (inferences/sec/watt). The storage and transfer of large neural networks is also a challenge in mobile device environments, because of limitations on application sizes and long application download times. For these reasons, we wish to compress the network as much as possible, to reduce the amount of bandwidth and compute required. Inducing sparseness, through regularization or pruning, in neural-network models, is one way to compress the network (quantization is another method). Sparse neural networks hold the promise of speed, small size, and energy efficiency.","title":"Motivation"},{"location":"index.html#smaller","text":"Sparse NN model representations can be compressed by taking advantage of the fact that the tensor elements are dominated by zeros. The compression format, if any, is very HW and SW specific, and the optimal format may be different per tensor (an obvious example: largely dense tensors should not be compressed). The compute hardware needs to support the compressions formats, for representation compression to be meaningful. Compression representation decisions might interact with algorithms such as the use of tiles for memory accesses. Data such as a parameter tensor is read/written from/to main system memory compressed, but the computation can be dense or sparse. In dense compute we use dense operators, so the compressed data eventually needs to be decompressed into its full, dense size. The best we can do is bring the compressed representation as close as possible to the compute engine. Sparse compute, on the other hand, operates on the sparse representation which never requires decompression (we therefore distinguish between sparse representation and compressed representation). This is not a simple matter to implement in HW, and often means lower utilization of the vectorized compute engines. Therefore, there is a third class of representations, which take advantage of specific hardware characteristics. For example, for a vectorized compute engine we can remove an entire zero-weights vector and skip its computation (this uses structured pruning or regularization).","title":"Smaller"},{"location":"index.html#faster","text":"Many of the layers in modern neural-networks are bandwidth-bound, which means that the execution latency is dominated by the available bandwidth. In essence, the hardware spends more time bringing data close to the compute engines, than actually performing the computations. Fully-connected layers, RNNs and LSTMs are some examples of bandwidth-dominated operations. Reducing the bandwidth required by these layers, will immediately speed them up. Some pruning algorithms prune entire kernels, filters and even layers from the network without adversely impacting the final accuracy. Depending on the hardware implementation, these methods can be leveraged to skip computations, thus reducing latency and power.","title":"Faster"},{"location":"index.html#more-energy-efficient","text":"Because we pay two orders-of-magnitude more energy to access off-chip memory (e.g. DDR) compared to on-chip memory (e.g. SRAM or cache), many hardware designs employ a multi-layered cache hierarchy. Fitting the parameters and activations of a network in these on-chip caches can make a big difference on the required bandwidth, the total inference latency, and off course reduce power consumption. And of course, if we used a sparse or compressed representation, then we are reducing the data throughput and therefore the energy consumption.","title":"More energy efficient"},{"location":"algo_earlyexit.html","text":"Early Exit Inference While Deep Neural Networks benefit from a large number of layers, it's often the case that many data points in classification tasks can be classified accurately with much less work. There have been several studies recently regarding the idea of exiting before the normal endpoint of the neural network. Panda et al in Conditional Deep Learning for Energy-Efficient and Enhanced Pattern Recognition points out that a lot of data points can be classified easily and require less processing than some more difficult points and they view this in terms of power savings. Surat et al in BranchyNet: Fast Inference via Early Exiting from Deep Neural Networks look at a selective approach to exit placement and criteria for exiting early. Why Does Early Exit Work? Early Exit is a strategy with a straightforward and easy to understand concept Figure #fig(boundaries) shows a simple example in a 2-D feature space. While deep networks can represent more complex and expressive boundaries between classes (assuming we\u2019re confident of avoiding over-fitting the data), it\u2019s also clear that much of the data can be properly classified with even the simplest of classification boundaries. Data points far from the boundary can be considered \"easy to classify\" and achieve a high degree of confidence quicker than do data points close to the boundary. In fact, we can think of the area between the outer straight lines as being the region that is \"difficult to classify\" and require the full expressiveness of the neural network to accurately classify it. Example code for Early Exit Both CIFAR10 and ImageNet code comes directly from publicly available examples from PyTorch. The only edits are the exits that are inserted in a methodology similar to BranchyNet work. Note: the sample code provided for ResNet models with Early Exits has exactly one early exit for the CIFAR10 example and exactly two early exits for the ImageNet example. If you want to modify the number of early exits, you will need to make sure that the model code is updated to have a corresponding number of exits. Deeper networks can benefit from multiple exits. Our examples illustrate both a single and a pair of early exits for CIFAR10 and ImageNet, respectively. Note that this code does not actually take exits. What it does is to compute statistics of loss and accuracy assuming exits were taken when criteria are met. Actually implementing exits can be tricky and architecture dependent and we plan to address these issues. Example command lines We have provided examples for ResNets of varying sizes for both CIFAR10 and ImageNet datasets. An example command line for training for CIFAR10 is: python compress_classifier.py --arch=resnet32_cifar_earlyexit --epochs=20 -b 128 \\ --lr=0.003 --earlyexit_thresholds 0.4 --earlyexit_lossweights 0.4 -j 30 \\ --out-dir /home/ -n earlyexit /home/pcifar10 And an example command line for ImageNet is: python compress_classifier.py --arch=resnet50_earlyexit --epochs=120 -b 128 \\ --lr=0.003 --earlyexit_thresholds 1.2 0.9 --earlyexit_lossweights 0.1 0.3 \\ -j 30 --out-dir /home/ -n earlyexit /home/I1K/i1k-extracted/ Heuristics The insertion of the exits are ad-hoc, but there are some heuristic principals guiding their placement and parameters. The earlier exits are placed, the more aggressive the exit as it essentially prunes the rest of the network at a very early stage, thus saving a lot of work. However, a diminishing percentage of data will be directed through the exit if we are to preserve accuracy. There are other benefits to adding exits in that training the modified network now has back-propagation losses coming from the exits that affect the earlier layers more substantially than the last exit. This effect mitigates problems such as vanishing gradient. Early Exit Hyper-Parameters There are two parameters that are required to enable early exit. Leave them undefined if you are not enabling Early Exit: --earlyexit_thresholds defines the thresholds for each of the early exits. The cross entropy measure must be less than the specified threshold to take a specific exit, otherwise the data continues along the regular path. For example, you could specify \"--earlyexit_thresholds 0.9 1.2\" and this implies two early exits with corresponding thresholds of 0.9 and 1.2, respectively to take those exits. 12 --earlyexit_lossweights provide the weights for the linear combination of losses during training to compute a single, overall loss. We only specify weights for the early exits and assume that the sum of the weights (including final exit) are equal to 1.0. So an example of \"--earlyexit_lossweights 0.2 0.3\" implies two early exits weighted with values of 0.2 and 0.3, respectively and that the final exit has a value of 1.0-(0.2+0.3) = 0.5. Studies have shown that weighting the early exits more heavily will create more agressive early exits, but perhaps with a slight negative effect on accuracy. Output Stats The example code outputs various statistics regarding the loss and accuracy at each of the exits. During training, the Top1 and Top5 stats represent the accuracy should all of the data be forced out that exit (in order to compute the loss at that exit). During inference (i.e. validation and test stages), the Top1 and Top5 stats represent the accuracy for those data points that could exit because the calculated entropy at that exit was lower than the specified threshold for that exit. CIFAR10 In the case of CIFAR10, we have inserted a single exit after the first full layer grouping. The layers on the exit path itself includes a convolutional layer and a fully connected layer. If you move the exit, be sure to match the proper sizes for inputs and outputs to the exit layers. ImageNet This supports training and inference of the ImageNet dataset via several well known deep architectures. ResNet-50 is the architecture of interest in this study, however the exit is defined in the generic ResNet code and could be used with other size ResNets. There are two exits inserted in this example. Again, exit layers must have their sizes match properly. References Priyadarshini Panda, Abhronil Sengupta, Kaushik Roy . Conditional Deep Learning for Energy-Efficient and Enhanced Pattern Recognition , arXiv:1509.08971v6, 2017. Surat Teerapittayanon, Bradley McDanel, H. T. Kung . BranchyNet: Fast Inference via Early Exiting from Deep Neural Networks , arXiv:1709.01686, 2017.","title":"Early Exit"},{"location":"algo_earlyexit.html#early-exit-inference","text":"While Deep Neural Networks benefit from a large number of layers, it's often the case that many data points in classification tasks can be classified accurately with much less work. There have been several studies recently regarding the idea of exiting before the normal endpoint of the neural network. Panda et al in Conditional Deep Learning for Energy-Efficient and Enhanced Pattern Recognition points out that a lot of data points can be classified easily and require less processing than some more difficult points and they view this in terms of power savings. Surat et al in BranchyNet: Fast Inference via Early Exiting from Deep Neural Networks look at a selective approach to exit placement and criteria for exiting early.","title":"Early Exit Inference"},{"location":"algo_earlyexit.html#why-does-early-exit-work","text":"Early Exit is a strategy with a straightforward and easy to understand concept Figure #fig(boundaries) shows a simple example in a 2-D feature space. While deep networks can represent more complex and expressive boundaries between classes (assuming we\u2019re confident of avoiding over-fitting the data), it\u2019s also clear that much of the data can be properly classified with even the simplest of classification boundaries. Data points far from the boundary can be considered \"easy to classify\" and achieve a high degree of confidence quicker than do data points close to the boundary. In fact, we can think of the area between the outer straight lines as being the region that is \"difficult to classify\" and require the full expressiveness of the neural network to accurately classify it.","title":"Why Does Early Exit Work?"},{"location":"algo_earlyexit.html#example-code-for-early-exit","text":"Both CIFAR10 and ImageNet code comes directly from publicly available examples from PyTorch. The only edits are the exits that are inserted in a methodology similar to BranchyNet work. Note: the sample code provided for ResNet models with Early Exits has exactly one early exit for the CIFAR10 example and exactly two early exits for the ImageNet example. If you want to modify the number of early exits, you will need to make sure that the model code is updated to have a corresponding number of exits. Deeper networks can benefit from multiple exits. Our examples illustrate both a single and a pair of early exits for CIFAR10 and ImageNet, respectively. Note that this code does not actually take exits. What it does is to compute statistics of loss and accuracy assuming exits were taken when criteria are met. Actually implementing exits can be tricky and architecture dependent and we plan to address these issues.","title":"Example code for Early Exit"},{"location":"algo_earlyexit.html#example-command-lines","text":"We have provided examples for ResNets of varying sizes for both CIFAR10 and ImageNet datasets. An example command line for training for CIFAR10 is: python compress_classifier.py --arch=resnet32_cifar_earlyexit --epochs=20 -b 128 \\ --lr=0.003 --earlyexit_thresholds 0.4 --earlyexit_lossweights 0.4 -j 30 \\ --out-dir /home/ -n earlyexit /home/pcifar10 And an example command line for ImageNet is: python compress_classifier.py --arch=resnet50_earlyexit --epochs=120 -b 128 \\ --lr=0.003 --earlyexit_thresholds 1.2 0.9 --earlyexit_lossweights 0.1 0.3 \\ -j 30 --out-dir /home/ -n earlyexit /home/I1K/i1k-extracted/","title":"Example command lines"},{"location":"algo_earlyexit.html#heuristics","text":"The insertion of the exits are ad-hoc, but there are some heuristic principals guiding their placement and parameters. The earlier exits are placed, the more aggressive the exit as it essentially prunes the rest of the network at a very early stage, thus saving a lot of work. However, a diminishing percentage of data will be directed through the exit if we are to preserve accuracy. There are other benefits to adding exits in that training the modified network now has back-propagation losses coming from the exits that affect the earlier layers more substantially than the last exit. This effect mitigates problems such as vanishing gradient.","title":"Heuristics"},{"location":"algo_earlyexit.html#early-exit-hyper-parameters","text":"There are two parameters that are required to enable early exit. Leave them undefined if you are not enabling Early Exit: --earlyexit_thresholds defines the thresholds for each of the early exits. The cross entropy measure must be less than the specified threshold to take a specific exit, otherwise the data continues along the regular path. For example, you could specify \"--earlyexit_thresholds 0.9 1.2\" and this implies two early exits with corresponding thresholds of 0.9 and 1.2, respectively to take those exits. 12 --earlyexit_lossweights provide the weights for the linear combination of losses during training to compute a single, overall loss. We only specify weights for the early exits and assume that the sum of the weights (including final exit) are equal to 1.0. So an example of \"--earlyexit_lossweights 0.2 0.3\" implies two early exits weighted with values of 0.2 and 0.3, respectively and that the final exit has a value of 1.0-(0.2+0.3) = 0.5. Studies have shown that weighting the early exits more heavily will create more agressive early exits, but perhaps with a slight negative effect on accuracy.","title":"Early Exit Hyper-Parameters"},{"location":"algo_earlyexit.html#output-stats","text":"The example code outputs various statistics regarding the loss and accuracy at each of the exits. During training, the Top1 and Top5 stats represent the accuracy should all of the data be forced out that exit (in order to compute the loss at that exit). During inference (i.e. validation and test stages), the Top1 and Top5 stats represent the accuracy for those data points that could exit because the calculated entropy at that exit was lower than the specified threshold for that exit.","title":"Output Stats"},{"location":"algo_earlyexit.html#cifar10","text":"In the case of CIFAR10, we have inserted a single exit after the first full layer grouping. The layers on the exit path itself includes a convolutional layer and a fully connected layer. If you move the exit, be sure to match the proper sizes for inputs and outputs to the exit layers.","title":"CIFAR10"},{"location":"algo_earlyexit.html#imagenet","text":"This supports training and inference of the ImageNet dataset via several well known deep architectures. ResNet-50 is the architecture of interest in this study, however the exit is defined in the generic ResNet code and could be used with other size ResNets. There are two exits inserted in this example. Again, exit layers must have their sizes match properly.","title":"ImageNet"},{"location":"algo_earlyexit.html#references","text":"Priyadarshini Panda, Abhronil Sengupta, Kaushik Roy . Conditional Deep Learning for Energy-Efficient and Enhanced Pattern Recognition , arXiv:1509.08971v6, 2017. Surat Teerapittayanon, Bradley McDanel, H. T. Kung . BranchyNet: Fast Inference via Early Exiting from Deep Neural Networks , arXiv:1709.01686, 2017.","title":"References"},{"location":"algo_pruning.html","text":"Weights Pruning Algorithms Magnitude Pruner This is the most basic pruner: it applies a thresholding function, \\(thresh(.)\\), on each element, \\(w_i\\), of a weights tensor. A different threshold can be used for each layer's weights tensor. Because the threshold is applied on individual elements, this pruner belongs to the element-wise pruning algorithm family. \\[ thresh(w_i)=\\left\\lbrace \\matrix{{{w_i: \\; if \\;|w_i| \\; \\gt}\\;\\lambda}\\cr {0: \\; if \\; |w_i| \\leq \\lambda} } \\right\\rbrace \\] Sensitivity Pruner Finding a threshold magnitude per layer is daunting, especially since each layer's elements have different average absolute values. We can take advantage of the fact that the weights of convolutional and fully connected layers exhibit a Gaussian distribution with a mean value roughly zero, to avoid using a direct threshold based on the values of each specific tensor. The diagram below shows the distribution the weights tensor of the first convolutional layer, and first fully-connected layer in TorchVision's pre-trained Alexnet model. You can see that they have an approximate Gaussian distribution. The distributions of Alexnet conv1 and fc1 layers We use the standard deviation of the weights tensor as a sort of normalizing factor between the different weights tensors. For example, if a tensor is Normally distributed, then about 68% of the elements have an absolute value less than the standard deviation (\\(\\sigma\\)) of the tensor. Thus, if we set the threshold to \\(s*\\sigma\\), then basically we are thresholding \\(s * 68\\%\\) of the tensor elements. \\[ thresh(w_i)=\\left\\lbrace \\matrix{{{w_i: \\; if \\;|w_i| \\; \\gt}\\;\\lambda}\\cr {0: \\; if \\; |w_i| \\leq \\lambda} } \\right\\rbrace \\] \\[ \\lambda = s * \\sigma_l \\;\\;\\; where\\; \\sigma_l\\; is \\;the \\;std \\;of \\;layer \\;l \\;as \\;measured \\;on \\;the \\;dense \\;model \\] How do we choose this \\(s\\) multiplier? In Learning both Weights and Connections for Efficient Neural Networks the authors write: \"We used the sensitivity results to find each layer\u2019s threshold: for example, the smallest threshold was applied to the most sensitive layer, which is the first convolutional layer... The pruning threshold is chosen as a quality parameter multiplied by the standard deviation of a layer\u2019s weights So the results of executing pruning sensitivity analysis on the tensor, gives us a good starting guess at \\(s\\). Sensitivity analysis is an empirical method, and we still have to spend time to hone in on the exact multiplier value. Method of Operation Start by running a pruning sensitivity analysis on the model. Then use the results to set and tune the threshold of each layer, but instead of using a direct threshold use a sensitivity parameter which is multiplied by the standard-deviation of the initial weight-tensor's distribution. Schedule In their paper Song Han et al. use iterative pruning and change the value of the \\(s\\) multiplier at each pruning step. Distiller's SensitivityPruner works differently: the value \\(s\\) is set once based on a one-time calculation of the standard-deviation of the tensor (the first time we prune), and relies on the fact that as the tensor is pruned, more elements are \"pulled\" toward the center of the distribution and thus more elements gets pruned. This actually works quite well as we can see in the diagram below. This is a TensorBoard screen-capture from Alexnet training, which shows how this method starts off pruning very aggressively, but then slowly reduces the pruning rate. We use a simple iterative-pruning schedule such as: Prune every second epoch starting at epoch 0, and ending at epoch 38. This excerpt from alexnet.schedule_sensitivity.yaml shows how this iterative schedule is conveyed in Distiller scheduling configuration YAML: pruners: my_pruner: class: 'SensitivityPruner' sensitivities: 'features.module.0.weight': 0.25 'features.module.3.weight': 0.35 'features.module.6.weight': 0.40 'features.module.8.weight': 0.45 'features.module.10.weight': 0.55 'classifier.1.weight': 0.875 'classifier.4.weight': 0.875 'classifier.6.weight': 0.625 policies: - pruner: instance_name : 'my_pruner' starting_epoch: 0 ending_epoch: 38 frequency: 2 Level Pruner Class SparsityLevelParameterPruner uses a similar method to go around specifying specific thresholding magnitudes. Instead of specifying a threshold magnitude, you specify a target sparsity level (expressed as a fraction, so 0.5 means 50% sparsity). Essentially this pruner also uses a pruning criteria based on the magnitude of each tensor element, but it has the advantage that you can aim for an exact and specific sparsity level. This pruner is much more stable compared to SensitivityPruner because the target sparsity level is not coupled to the actual magnitudes of the elements. Distiller's SensitivityPruner is unstable because the final sparsity level depends on the convergence pattern of the tensor distribution. Song Han's methodology of using several different values for the multiplier \\(s\\), and the recalculation of the standard-deviation at each pruning phase, probably gives it stability, but requires much more hyper-parameters (this is the reason we have not implemented it thus far). To set the target sparsity levels, you can once again use pruning sensitivity analysis to make better guesses at the correct sparsity level of each Method of Operation Sort the weights in the specified layer by their absolute values. Mask to zero the smallest magnitude weights until the desired sparsity level is reached. Splicing Pruner In Dynamic Network Surgery for Efficient DNNs Guo et. al propose that network pruning and splicing work in tandem. A SpilicingPruner is a pruner that both prunes and splices connections and works best with a Dynamic Network Surgery schedule, which, for example, configures the PruningPolicy to mask weights only during the forward pass. Automated Gradual Pruner (AGP) In To prune, or not to prune: exploring the efficacy of pruning for model compression , authors Michael Zhu and Suyog Gupta provide an algorithm to schedule a Level Pruner which Distiller implements in AutomatedGradualPruner . \"We introduce a new automated gradual pruning algorithm in which the sparsity is increased from an initial sparsity value \\(s_i\\) (usually 0) to a \ufb01nal sparsity value \\(s_f\\) over a span of n pruning steps. The intuition behind this sparsity function in equation (1) is to prune the network rapidly in the initial phase when the redundant connections are abundant and gradually reduce the number of weights being pruned each time as there are fewer and fewer weights remaining in the network.\"\" You can play with the scheduling parameters in the agp_schedule.ipynb notebook . The authors describe AGP: Our automated gradual pruning algorithm prunes the smallest magnitude weights to achieve a preset level of network sparsity. Doesn't require much hyper-parameter tuning Shown to perform well across different models Does not make any assumptions about the structure of the network or its constituent layers, and is therefore more generally applicable. RNN Pruner The authors of Exploring Sparsity in Recurrent Neural Networks , Sharan Narang, Erich Elsen, Gregory Diamos, and Shubho Sengupta, \"propose a technique to reduce the parameters of a network by pruning weights during the initial training of the network.\" They use a gradual pruning schedule which is reminiscent of the schedule used in AGP, for element-wise pruning of RNNs, which they also employ during training. They show pruning of RNN, GRU, LSTM and embedding layers. Distiller's distiller.pruning.BaiduRNNPruner class implements this pruning algorithm. Structure Pruners Element-wise pruning can create very sparse models which can be compressed to consume less memory footprint and bandwidth, but without specialized hardware that can compute using the sparse representation of the tensors, we don't gain any speedup of the computation. Structure pruners, remove entire \"structures\", such as kernels, filters, and even entire feature-maps. Structure Ranking Pruners Ranking pruners use some criterion to rank the structures in a tensor, and then prune the tensor to a specified level. In principle, these pruners perform one-shot pruning, but can be combined with automatic pruning-level scheduling, such as AGP (see below). In Pruning Filters for Efficient ConvNets the authors use filter ranking, with one-shot pruning followed by fine-tuning. The authors of Exploiting Sparseness in Deep Neural Networks for Large Vocabulary Speech Recognition also use a one-shot pruning schedule, for fully-connected layers, and they provide an explanation: First, after sweeping through the full training set several times the weights become relatively stable \u2014 they tend to remain either large or small magnitudes. Second, in a stabilized model, the importance of the connection is approximated well by the magnitudes of the weights (times the magnitudes of the corresponding input values, but these are relatively uniform within each layer since on the input layer, features are normalized to zero-mean and unit-variance, and hidden-layer values are probabilities) L1RankedStructureParameterPruner The L1RankedStructureParameterPruner pruner calculates the magnitude of some \"structure\", orders all of the structures based on some magnitude function and the m lowest ranking structures are pruned away. This pruner performs ranking of structures using the mean of the absolute value of the structure as the representative of the structure magnitude. The absolute mean does not depend on the size of the structure, so it is easier to use compared to just using the \\(L_1\\)-norm of the structure, and at the same time it is a good proxy of the \\(L_1\\)-norm. Basically, you can think of mean(abs(t)) as a form of normalization of the structure L1-norm by the length of the structure. L1RankedStructureParameterPruner currently prunes weight filters, channels, and rows (for linear layers). ActivationAPoZRankedFilterPruner The ActivationAPoZRankedFilterPruner pruner uses the activation channels mean APoZ (average percentage of zeros) to rank weight filters and prune a specified percentage of filters. This method is called Network Trimming from the research paper: \"Network Trimming: A Data-Driven Neuron Pruning Approach towards Efficient Deep Architectures\", Hengyuan Hu, Rui Peng, Yu-Wing Tai, Chi-Keung Tang, ICLR 2016 https://arxiv.org/abs/1607.03250 GradientRankedFilterPruner The GradientRankedFilterPruner tries to asses the importance of weight filters using the product of their gradients and the filter value. RandomRankedFilterPruner For research purposes we may want to compare the results of some structure-ranking pruner to a random structure-ranking. The RandomRankedFilterPruner pruner can be used for this purpose. Automated Gradual Pruner (AGP) for Structures The idea of a mathematical formula controlling the sparsity level growth is very useful and StructuredAGP extends the implementation to structured pruning. Pruner Compositions Pruners can be combined to create new pruning schemes. Specifically, with a few lines of code we currently marry the AGP sparsity level scheduler with our filter-ranking classes to create pruner compositions. For each of these, we use AGP to decided how many filters to prune at each step, and we choose the filters to remove using one of the filter-ranking methods: L1RankedStructureParameterPruner_AGP ActivationAPoZRankedFilterPruner_AGP GradientRankedFilterPruner_AGP RandomRankedFilterPruner_AGP Hybrid Pruning In a single schedule we can mix different pruning techniques. For example, we might mix pruning and regularization. Or structured pruning and element-wise pruning. We can even apply different methods on the same tensor. For example, we might want to perform filter pruning for a few epochs, then perform thinning and continue with element-wise pruning of the smaller network tensors. This technique of mixing different methods we call Hybrid Pruning, and Distiller has a few example schedules.","title":"Pruning"},{"location":"algo_pruning.html#weights-pruning-algorithms","text":"","title":"Weights Pruning Algorithms"},{"location":"algo_pruning.html#magnitude-pruner","text":"This is the most basic pruner: it applies a thresholding function, \\(thresh(.)\\), on each element, \\(w_i\\), of a weights tensor. A different threshold can be used for each layer's weights tensor. Because the threshold is applied on individual elements, this pruner belongs to the element-wise pruning algorithm family. \\[ thresh(w_i)=\\left\\lbrace \\matrix{{{w_i: \\; if \\;|w_i| \\; \\gt}\\;\\lambda}\\cr {0: \\; if \\; |w_i| \\leq \\lambda} } \\right\\rbrace \\]","title":"Magnitude Pruner"},{"location":"algo_pruning.html#sensitivity-pruner","text":"Finding a threshold magnitude per layer is daunting, especially since each layer's elements have different average absolute values. We can take advantage of the fact that the weights of convolutional and fully connected layers exhibit a Gaussian distribution with a mean value roughly zero, to avoid using a direct threshold based on the values of each specific tensor. The diagram below shows the distribution the weights tensor of the first convolutional layer, and first fully-connected layer in TorchVision's pre-trained Alexnet model. You can see that they have an approximate Gaussian distribution. The distributions of Alexnet conv1 and fc1 layers We use the standard deviation of the weights tensor as a sort of normalizing factor between the different weights tensors. For example, if a tensor is Normally distributed, then about 68% of the elements have an absolute value less than the standard deviation (\\(\\sigma\\)) of the tensor. Thus, if we set the threshold to \\(s*\\sigma\\), then basically we are thresholding \\(s * 68\\%\\) of the tensor elements. \\[ thresh(w_i)=\\left\\lbrace \\matrix{{{w_i: \\; if \\;|w_i| \\; \\gt}\\;\\lambda}\\cr {0: \\; if \\; |w_i| \\leq \\lambda} } \\right\\rbrace \\] \\[ \\lambda = s * \\sigma_l \\;\\;\\; where\\; \\sigma_l\\; is \\;the \\;std \\;of \\;layer \\;l \\;as \\;measured \\;on \\;the \\;dense \\;model \\] How do we choose this \\(s\\) multiplier? In Learning both Weights and Connections for Efficient Neural Networks the authors write: \"We used the sensitivity results to find each layer\u2019s threshold: for example, the smallest threshold was applied to the most sensitive layer, which is the first convolutional layer... The pruning threshold is chosen as a quality parameter multiplied by the standard deviation of a layer\u2019s weights So the results of executing pruning sensitivity analysis on the tensor, gives us a good starting guess at \\(s\\). Sensitivity analysis is an empirical method, and we still have to spend time to hone in on the exact multiplier value.","title":"Sensitivity Pruner"},{"location":"algo_pruning.html#method-of-operation","text":"Start by running a pruning sensitivity analysis on the model. Then use the results to set and tune the threshold of each layer, but instead of using a direct threshold use a sensitivity parameter which is multiplied by the standard-deviation of the initial weight-tensor's distribution.","title":"Method of Operation"},{"location":"algo_pruning.html#schedule","text":"In their paper Song Han et al. use iterative pruning and change the value of the \\(s\\) multiplier at each pruning step. Distiller's SensitivityPruner works differently: the value \\(s\\) is set once based on a one-time calculation of the standard-deviation of the tensor (the first time we prune), and relies on the fact that as the tensor is pruned, more elements are \"pulled\" toward the center of the distribution and thus more elements gets pruned. This actually works quite well as we can see in the diagram below. This is a TensorBoard screen-capture from Alexnet training, which shows how this method starts off pruning very aggressively, but then slowly reduces the pruning rate. We use a simple iterative-pruning schedule such as: Prune every second epoch starting at epoch 0, and ending at epoch 38. This excerpt from alexnet.schedule_sensitivity.yaml shows how this iterative schedule is conveyed in Distiller scheduling configuration YAML: pruners: my_pruner: class: 'SensitivityPruner' sensitivities: 'features.module.0.weight': 0.25 'features.module.3.weight': 0.35 'features.module.6.weight': 0.40 'features.module.8.weight': 0.45 'features.module.10.weight': 0.55 'classifier.1.weight': 0.875 'classifier.4.weight': 0.875 'classifier.6.weight': 0.625 policies: - pruner: instance_name : 'my_pruner' starting_epoch: 0 ending_epoch: 38 frequency: 2","title":"Schedule"},{"location":"algo_pruning.html#level-pruner","text":"Class SparsityLevelParameterPruner uses a similar method to go around specifying specific thresholding magnitudes. Instead of specifying a threshold magnitude, you specify a target sparsity level (expressed as a fraction, so 0.5 means 50% sparsity). Essentially this pruner also uses a pruning criteria based on the magnitude of each tensor element, but it has the advantage that you can aim for an exact and specific sparsity level. This pruner is much more stable compared to SensitivityPruner because the target sparsity level is not coupled to the actual magnitudes of the elements. Distiller's SensitivityPruner is unstable because the final sparsity level depends on the convergence pattern of the tensor distribution. Song Han's methodology of using several different values for the multiplier \\(s\\), and the recalculation of the standard-deviation at each pruning phase, probably gives it stability, but requires much more hyper-parameters (this is the reason we have not implemented it thus far). To set the target sparsity levels, you can once again use pruning sensitivity analysis to make better guesses at the correct sparsity level of each","title":"Level Pruner"},{"location":"algo_pruning.html#method-of-operation_1","text":"Sort the weights in the specified layer by their absolute values. Mask to zero the smallest magnitude weights until the desired sparsity level is reached.","title":"Method of Operation"},{"location":"algo_pruning.html#splicing-pruner","text":"In Dynamic Network Surgery for Efficient DNNs Guo et. al propose that network pruning and splicing work in tandem. A SpilicingPruner is a pruner that both prunes and splices connections and works best with a Dynamic Network Surgery schedule, which, for example, configures the PruningPolicy to mask weights only during the forward pass.","title":"Splicing Pruner"},{"location":"algo_pruning.html#automated-gradual-pruner-agp","text":"In To prune, or not to prune: exploring the efficacy of pruning for model compression , authors Michael Zhu and Suyog Gupta provide an algorithm to schedule a Level Pruner which Distiller implements in AutomatedGradualPruner . \"We introduce a new automated gradual pruning algorithm in which the sparsity is increased from an initial sparsity value \\(s_i\\) (usually 0) to a \ufb01nal sparsity value \\(s_f\\) over a span of n pruning steps. The intuition behind this sparsity function in equation (1) is to prune the network rapidly in the initial phase when the redundant connections are abundant and gradually reduce the number of weights being pruned each time as there are fewer and fewer weights remaining in the network.\"\" You can play with the scheduling parameters in the agp_schedule.ipynb notebook . The authors describe AGP: Our automated gradual pruning algorithm prunes the smallest magnitude weights to achieve a preset level of network sparsity. Doesn't require much hyper-parameter tuning Shown to perform well across different models Does not make any assumptions about the structure of the network or its constituent layers, and is therefore more generally applicable.","title":"Automated Gradual Pruner (AGP)"},{"location":"algo_pruning.html#rnn-pruner","text":"The authors of Exploring Sparsity in Recurrent Neural Networks , Sharan Narang, Erich Elsen, Gregory Diamos, and Shubho Sengupta, \"propose a technique to reduce the parameters of a network by pruning weights during the initial training of the network.\" They use a gradual pruning schedule which is reminiscent of the schedule used in AGP, for element-wise pruning of RNNs, which they also employ during training. They show pruning of RNN, GRU, LSTM and embedding layers. Distiller's distiller.pruning.BaiduRNNPruner class implements this pruning algorithm.","title":"RNN Pruner"},{"location":"algo_pruning.html#structure-pruners","text":"Element-wise pruning can create very sparse models which can be compressed to consume less memory footprint and bandwidth, but without specialized hardware that can compute using the sparse representation of the tensors, we don't gain any speedup of the computation. Structure pruners, remove entire \"structures\", such as kernels, filters, and even entire feature-maps.","title":"Structure Pruners"},{"location":"algo_pruning.html#structure-ranking-pruners","text":"Ranking pruners use some criterion to rank the structures in a tensor, and then prune the tensor to a specified level. In principle, these pruners perform one-shot pruning, but can be combined with automatic pruning-level scheduling, such as AGP (see below). In Pruning Filters for Efficient ConvNets the authors use filter ranking, with one-shot pruning followed by fine-tuning. The authors of Exploiting Sparseness in Deep Neural Networks for Large Vocabulary Speech Recognition also use a one-shot pruning schedule, for fully-connected layers, and they provide an explanation: First, after sweeping through the full training set several times the weights become relatively stable \u2014 they tend to remain either large or small magnitudes. Second, in a stabilized model, the importance of the connection is approximated well by the magnitudes of the weights (times the magnitudes of the corresponding input values, but these are relatively uniform within each layer since on the input layer, features are normalized to zero-mean and unit-variance, and hidden-layer values are probabilities)","title":"Structure Ranking Pruners"},{"location":"algo_pruning.html#l1rankedstructureparameterpruner","text":"The L1RankedStructureParameterPruner pruner calculates the magnitude of some \"structure\", orders all of the structures based on some magnitude function and the m lowest ranking structures are pruned away. This pruner performs ranking of structures using the mean of the absolute value of the structure as the representative of the structure magnitude. The absolute mean does not depend on the size of the structure, so it is easier to use compared to just using the \\(L_1\\)-norm of the structure, and at the same time it is a good proxy of the \\(L_1\\)-norm. Basically, you can think of mean(abs(t)) as a form of normalization of the structure L1-norm by the length of the structure. L1RankedStructureParameterPruner currently prunes weight filters, channels, and rows (for linear layers).","title":"L1RankedStructureParameterPruner"},{"location":"algo_pruning.html#activationapozrankedfilterpruner","text":"The ActivationAPoZRankedFilterPruner pruner uses the activation channels mean APoZ (average percentage of zeros) to rank weight filters and prune a specified percentage of filters. This method is called Network Trimming from the research paper: \"Network Trimming: A Data-Driven Neuron Pruning Approach towards Efficient Deep Architectures\", Hengyuan Hu, Rui Peng, Yu-Wing Tai, Chi-Keung Tang, ICLR 2016 https://arxiv.org/abs/1607.03250","title":"ActivationAPoZRankedFilterPruner"},{"location":"algo_pruning.html#gradientrankedfilterpruner","text":"The GradientRankedFilterPruner tries to asses the importance of weight filters using the product of their gradients and the filter value.","title":"GradientRankedFilterPruner"},{"location":"algo_pruning.html#randomrankedfilterpruner","text":"For research purposes we may want to compare the results of some structure-ranking pruner to a random structure-ranking. The RandomRankedFilterPruner pruner can be used for this purpose.","title":"RandomRankedFilterPruner"},{"location":"algo_pruning.html#automated-gradual-pruner-agp-for-structures","text":"The idea of a mathematical formula controlling the sparsity level growth is very useful and StructuredAGP extends the implementation to structured pruning.","title":"Automated Gradual Pruner (AGP) for Structures"},{"location":"algo_pruning.html#pruner-compositions","text":"Pruners can be combined to create new pruning schemes. Specifically, with a few lines of code we currently marry the AGP sparsity level scheduler with our filter-ranking classes to create pruner compositions. For each of these, we use AGP to decided how many filters to prune at each step, and we choose the filters to remove using one of the filter-ranking methods: L1RankedStructureParameterPruner_AGP ActivationAPoZRankedFilterPruner_AGP GradientRankedFilterPruner_AGP RandomRankedFilterPruner_AGP","title":"Pruner Compositions"},{"location":"algo_pruning.html#hybrid-pruning","text":"In a single schedule we can mix different pruning techniques. For example, we might mix pruning and regularization. Or structured pruning and element-wise pruning. We can even apply different methods on the same tensor. For example, we might want to perform filter pruning for a few epochs, then perform thinning and continue with element-wise pruning of the smaller network tensors. This technique of mixing different methods we call Hybrid Pruning, and Distiller has a few example schedules.","title":"Hybrid Pruning"},{"location":"algo_quantization.html","text":"Quantization Algorithms Note: For any of the methods below that require quantization-aware training, please see here for details on how to invoke it using Distiller's scheduling mechanism. Range-Based Linear Quantization Let's break down the terminology we use here: Linear: Means a float value is quantized by multiplying with a numeric constant (the scale factor ). Range-Based: Means that in order to calculate the scale factor, we look at the actual range of the tensor's values. In the most naive implementation, we use the actual min/max values of the tensor. Alternatively, we use some derivation based on the tensor's range / distribution to come up with a narrower min/max range, in order to remove possible outliers. This is in contrast to the other methods described here, which we could call clipping-based , as they impose an explicit clipping function on the tensors (using either a hard-coded value or a learned value). Asymmetric vs. Symmetric In this method we can use two modes - asymmetric and symmetric . Asymmetric Mode In asymmetric mode, we map the min/max in the float range to the min/max of the integer range. This is done by using a zero-point (also called quantization bias , or offset ) in addition to the scale factor. Let us denote the original floating-point tensor by x_f , the quantized tensor by x_q , the scale factor by q_x , the zero-point by zp_x and the number of bits used for quantization by n . Then, we get: x_q = round\\left ((x_f - min_{x_f})\\underbrace{\\frac{2^n - 1}{max_{x_f} - min_{x_f}}}_{q_x} \\right) = round(q_x x_f - \\underbrace{min_{x_f}q_x)}_{zp_x} = round(q_x x_f - zp_x) In practice, we actually use zp_x = round(min_{x_f}q_x) . This means that zero is exactly representable by an integer in the quantized range. This is important, for example, for layers that have zero-padding. By rounding the zero-point, we effectively \"nudge\" the min/max values in the float range a little bit, in order to gain this exact quantization of zero. Note that in the derivation above we use unsigned integer to represent the quantized range. That is, x_q \\in [0, 2^n-1] . One could use signed integer if necessary (perhaps due to HW considerations). This can be achieved by subtracting 2^{n-1} . Let's see how a convolution or fully-connected (FC) layer is quantized in asymmetric mode: (we denote input, output, weights and bias with x, y, w and b respectively) y_f = \\sum{x_f w_f} + b_f = \\sum{\\frac{x_q + zp_x}{q_x} \\frac{w_q + zp_w}{q_w}} + \\frac{b_q + zp_b}{q_b} = = \\frac{1}{q_x q_w} \\left( \\sum { (x_q + zp_x) (w_q + zp_w) + \\frac{q_x q_w}{q_b}(b_q + zp_b) } \\right) Therefore: y_q = round(q_y y_f) = round\\left(\\frac{q_y}{q_x q_w} \\left( \\sum { (x_q+zp_x) (w_q+zp_w) + \\frac{q_x q_w}{q_b}(b_q+zp_b) } \\right) \\right) Notes: We can see that the bias has to be re-scaled to match the scale of the summation. In a proper integer-only HW pipeline, we would like our main accumulation term to simply be \\sum{x_q w_q} . In order to achieve this, one needs to further develop the expression we derived above. For further details please refer to the gemmlowp documentation Symmetric Mode In symmetric mode, instead of mapping the exact min/max of the float range to the quantized range, we choose the maximum absolute value between min/max. In addition, we don't use a zero-point. So, the floating-point range we're effectively quantizing is symmetric with respect to zero, and so is the quantized range. Using the same notations as above, we get: x_q = round\\left (x_f \\underbrace{\\frac{2^{n-1} - 1}{\\max|x_f|}}_{q_x} \\right) = round(q_x x_f) Again, let's see how a convolution or fully-connected (FC) layer is quantized, this time in symmetric mode: y_f = \\sum{x_f w_f} + b_f = \\sum{\\frac{x_q}{q_x} \\frac{w_q}{q_w}} + \\frac{b_q}{q_b} = \\frac{1}{q_x q_w} \\left( \\sum { x_q w_q + \\frac{q_x q_w}{q_b}b_q } \\right) Therefore: y_q = round(q_y y_f) = round\\left(\\frac{q_y}{q_x q_w} \\left( \\sum { x_q w_q + \\frac{q_x q_w}{q_b}b_q } \\right) \\right) Comparing the Two Modes The main trade-off between these two modes is simplicity vs. utilization of the quantized range. When using asymmetric quantization, the quantized range is fully utilized. That is because we exactly map the min/max values from the float range to the min/max of the quantized range. Using symmetric mode, if the float range is biased towards one side, could result in a quantized range where significant dynamic range is dedicated to values that we'll never see. The most extreme example of this is after ReLU, where the entire tensor is positive. Quantizing it in symmetric mode means we're effectively losing 1 bit. On the other hand, if we look at the derviations for convolution / FC layers above, we can see that the actual implementation of symmetric mode is much simpler. In asymmetric mode, the zero-points require additional logic in HW. The cost of this extra logic in terms of latency and/or power and/or area will of course depend on the exact implementation. Other Features Removing Outliers: As discussed here , in some cases the float range of activations contains outliers. Spending dynamic range on these outliers hurts our ability to represent the values we actually care about accurately. Currently, Distiller supports clipping of activations with averaging during post-training quantization. That is - for each batch, instead of calculating global min/max values, an average of the min/max values of each sample in the batch. Scale factor scope: For weight tensors, Distiller supports per-channel quantization (per output channel). Implementation in Distiller Post-Training For post-training quantization, this method is implemented by wrapping existing modules with quantization and de-quantization operations. The wrapper implementations are in range_linear.py . The operations currently supported are: Convolution Fully connected Element-wise addition Element-wise multiplication Concatenation Embedding All other layers are unaffected and are executed using their original FP32 implementation. To automatically transform an existing model to a quantized model using this method, use the PostTrainLinearQuantizer class. For details on ways to invoke the quantizer see here . The transform performed by the Quantizer only works on sub-classes of torch.nn.Module . But operations such as element-wise addition / multiplication and concatenation do not have associated Modules in PyTorch. They are either overloaded operators, or simple functions in the torch namespace. To be able to quantize these operations, we've implemented very simple modules that wrap these operations here . It is necessary to manually modify your model and replace any existing operator with a corresponding module. For an example, see our slightly modified ResNet implementation . For weights and bias the scale factor and zero-point are determined once at quantization setup (\"offline\" / \"static\"). For activations, both \"static\" and \"dynamic\" quantization is supported. Static quantizaton of activations requires that statistics be collected beforehand. See details on how to do that here . The calculated quantization parameters are stored as buffers within the module, so they are automatically serialized when the model checkpoint is saved. Quantization-Aware Training To apply range-based linear quantization in training, use the QuantAwareTrainRangeLinearQuantizer class. As it is now, it will apply weights quantization to convolution, FC and embedding modules. For activations quantization, it will insert instances FakeLinearQuantization module after ReLUs. This module follows the methodology described in Benoit et al., 2018 and uses exponential moving averages to track activation ranges. Note that the current implementation of QuantAwareTrainRangeLinearQuantizer supports training with single GPU only . Similarly to post-training, the calculated quantization parameters (scale factors, zero-points, tracked activation ranges) are stored as buffers within their respective modules, so they're saved when a checkpoint is created. Note that converting from a quantization-aware training model to a post-training quantization model is not yet supported. Such a conversion will use the activation ranges tracked during training, so additional offline or online calculation of quantization parameters will not be required. DoReFa (As proposed in DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low Bitwidth Gradients ) In this method, we first define the quantization function quantize_k , which takes a real value a_f \\in [0, 1] and outputs a discrete-valued a_q \\in \\left\\{ \\frac{0}{2^k-1}, \\frac{1}{2^k-1}, ... , \\frac{2^k-1}{2^k-1} \\right\\} , where k is the number of bits used for quantization. a_q = quantize_k(a_f) = \\frac{1}{2^k-1} round \\left( \\left(2^k - 1 \\right) a_f \\right) Activations are clipped to the [0, 1] range and then quantized as follows: x_q = quantize_k(x_f) For weights, we define the following function f , which takes an unbounded real valued input and outputs a real value in [0, 1] : f(w) = \\frac{tanh(w)}{2 max(|tanh(w)|)} + \\frac{1}{2} Now we can use quantize_k to get quantized weight values, as follows: w_q = 2 quantize_k \\left( f(w_f) \\right) - 1 This method requires training the model with quantization-aware training, as discussed here . Use the DorefaQuantizer class to transform an existing model to a model suitable for training with quantization using DoReFa. Notes: Gradients quantization as proposed in the paper is not supported yet. The paper defines special handling for binary weights which isn't supported in Distiller yet. PACT (As proposed in PACT: Parameterized Clipping Activation for Quantized Neural Networks ) This method is similar to DoReFa, but the upper clipping values, \\alpha , of the activation functions are learned parameters instead of hard coded to 1. Note that per the paper's recommendation, \\alpha is shared per layer. This method requires training the model with quantization-aware training, as discussed here . Use the PACTQuantizer class to transform an existing model to a model suitable for training with quantization using PACT. WRPN (As proposed in WRPN: Wide Reduced-Precision Networks ) In this method, activations are clipped to [0, 1] and quantized as follows ( k is the number of bits used for quantization): x_q = \\frac{1}{2^k-1} round \\left( \\left(2^k - 1 \\right) x_f \\right) Weights are clipped to [-1, 1] and quantized as follows: w_q = \\frac{1}{2^{k-1}-1} round \\left( \\left(2^{k-1} - 1 \\right)w_f \\right) Note that k-1 bits are used to quantize weights, leaving one bit for sign. This method requires training the model with quantization-aware training, as discussed here . Use the WRPNQuantizer class to transform an existing model to a model suitable for training with quantization using WRPN. Notes: The paper proposed widening of layers as a means to reduce accuracy loss. This isn't implemented as part of WRPNQuantizer at the moment. To experiment with this, modify your model implementation to have wider layers. The paper defines special handling for binary weights which isn't supported in Distiller yet.","title":"Quantization"},{"location":"algo_quantization.html#quantization-algorithms","text":"Note: For any of the methods below that require quantization-aware training, please see here for details on how to invoke it using Distiller's scheduling mechanism.","title":"Quantization Algorithms"},{"location":"algo_quantization.html#range-based-linear-quantization","text":"Let's break down the terminology we use here: Linear: Means a float value is quantized by multiplying with a numeric constant (the scale factor ). Range-Based: Means that in order to calculate the scale factor, we look at the actual range of the tensor's values. In the most naive implementation, we use the actual min/max values of the tensor. Alternatively, we use some derivation based on the tensor's range / distribution to come up with a narrower min/max range, in order to remove possible outliers. This is in contrast to the other methods described here, which we could call clipping-based , as they impose an explicit clipping function on the tensors (using either a hard-coded value or a learned value).","title":"Range-Based Linear Quantization"},{"location":"algo_quantization.html#asymmetric-vs-symmetric","text":"In this method we can use two modes - asymmetric and symmetric .","title":"Asymmetric vs. Symmetric"},{"location":"algo_quantization.html#asymmetric-mode","text":"In asymmetric mode, we map the min/max in the float range to the min/max of the integer range. This is done by using a zero-point (also called quantization bias , or offset ) in addition to the scale factor. Let us denote the original floating-point tensor by x_f , the quantized tensor by x_q , the scale factor by q_x , the zero-point by zp_x and the number of bits used for quantization by n . Then, we get: x_q = round\\left ((x_f - min_{x_f})\\underbrace{\\frac{2^n - 1}{max_{x_f} - min_{x_f}}}_{q_x} \\right) = round(q_x x_f - \\underbrace{min_{x_f}q_x)}_{zp_x} = round(q_x x_f - zp_x) In practice, we actually use zp_x = round(min_{x_f}q_x) . This means that zero is exactly representable by an integer in the quantized range. This is important, for example, for layers that have zero-padding. By rounding the zero-point, we effectively \"nudge\" the min/max values in the float range a little bit, in order to gain this exact quantization of zero. Note that in the derivation above we use unsigned integer to represent the quantized range. That is, x_q \\in [0, 2^n-1] . One could use signed integer if necessary (perhaps due to HW considerations). This can be achieved by subtracting 2^{n-1} . Let's see how a convolution or fully-connected (FC) layer is quantized in asymmetric mode: (we denote input, output, weights and bias with x, y, w and b respectively) y_f = \\sum{x_f w_f} + b_f = \\sum{\\frac{x_q + zp_x}{q_x} \\frac{w_q + zp_w}{q_w}} + \\frac{b_q + zp_b}{q_b} = = \\frac{1}{q_x q_w} \\left( \\sum { (x_q + zp_x) (w_q + zp_w) + \\frac{q_x q_w}{q_b}(b_q + zp_b) } \\right) Therefore: y_q = round(q_y y_f) = round\\left(\\frac{q_y}{q_x q_w} \\left( \\sum { (x_q+zp_x) (w_q+zp_w) + \\frac{q_x q_w}{q_b}(b_q+zp_b) } \\right) \\right) Notes: We can see that the bias has to be re-scaled to match the scale of the summation. In a proper integer-only HW pipeline, we would like our main accumulation term to simply be \\sum{x_q w_q} . In order to achieve this, one needs to further develop the expression we derived above. For further details please refer to the gemmlowp documentation","title":"Asymmetric Mode"},{"location":"algo_quantization.html#symmetric-mode","text":"In symmetric mode, instead of mapping the exact min/max of the float range to the quantized range, we choose the maximum absolute value between min/max. In addition, we don't use a zero-point. So, the floating-point range we're effectively quantizing is symmetric with respect to zero, and so is the quantized range. Using the same notations as above, we get: x_q = round\\left (x_f \\underbrace{\\frac{2^{n-1} - 1}{\\max|x_f|}}_{q_x} \\right) = round(q_x x_f) Again, let's see how a convolution or fully-connected (FC) layer is quantized, this time in symmetric mode: y_f = \\sum{x_f w_f} + b_f = \\sum{\\frac{x_q}{q_x} \\frac{w_q}{q_w}} + \\frac{b_q}{q_b} = \\frac{1}{q_x q_w} \\left( \\sum { x_q w_q + \\frac{q_x q_w}{q_b}b_q } \\right) Therefore: y_q = round(q_y y_f) = round\\left(\\frac{q_y}{q_x q_w} \\left( \\sum { x_q w_q + \\frac{q_x q_w}{q_b}b_q } \\right) \\right)","title":"Symmetric Mode"},{"location":"algo_quantization.html#comparing-the-two-modes","text":"The main trade-off between these two modes is simplicity vs. utilization of the quantized range. When using asymmetric quantization, the quantized range is fully utilized. That is because we exactly map the min/max values from the float range to the min/max of the quantized range. Using symmetric mode, if the float range is biased towards one side, could result in a quantized range where significant dynamic range is dedicated to values that we'll never see. The most extreme example of this is after ReLU, where the entire tensor is positive. Quantizing it in symmetric mode means we're effectively losing 1 bit. On the other hand, if we look at the derviations for convolution / FC layers above, we can see that the actual implementation of symmetric mode is much simpler. In asymmetric mode, the zero-points require additional logic in HW. The cost of this extra logic in terms of latency and/or power and/or area will of course depend on the exact implementation.","title":"Comparing the Two Modes"},{"location":"algo_quantization.html#other-features","text":"Removing Outliers: As discussed here , in some cases the float range of activations contains outliers. Spending dynamic range on these outliers hurts our ability to represent the values we actually care about accurately. Currently, Distiller supports clipping of activations with averaging during post-training quantization. That is - for each batch, instead of calculating global min/max values, an average of the min/max values of each sample in the batch. Scale factor scope: For weight tensors, Distiller supports per-channel quantization (per output channel).","title":"Other Features"},{"location":"algo_quantization.html#implementation-in-distiller","text":"","title":"Implementation in Distiller"},{"location":"algo_quantization.html#post-training","text":"For post-training quantization, this method is implemented by wrapping existing modules with quantization and de-quantization operations. The wrapper implementations are in range_linear.py . The operations currently supported are: Convolution Fully connected Element-wise addition Element-wise multiplication Concatenation Embedding All other layers are unaffected and are executed using their original FP32 implementation. To automatically transform an existing model to a quantized model using this method, use the PostTrainLinearQuantizer class. For details on ways to invoke the quantizer see here . The transform performed by the Quantizer only works on sub-classes of torch.nn.Module . But operations such as element-wise addition / multiplication and concatenation do not have associated Modules in PyTorch. They are either overloaded operators, or simple functions in the torch namespace. To be able to quantize these operations, we've implemented very simple modules that wrap these operations here . It is necessary to manually modify your model and replace any existing operator with a corresponding module. For an example, see our slightly modified ResNet implementation . For weights and bias the scale factor and zero-point are determined once at quantization setup (\"offline\" / \"static\"). For activations, both \"static\" and \"dynamic\" quantization is supported. Static quantizaton of activations requires that statistics be collected beforehand. See details on how to do that here . The calculated quantization parameters are stored as buffers within the module, so they are automatically serialized when the model checkpoint is saved.","title":"Post-Training"},{"location":"algo_quantization.html#quantization-aware-training","text":"To apply range-based linear quantization in training, use the QuantAwareTrainRangeLinearQuantizer class. As it is now, it will apply weights quantization to convolution, FC and embedding modules. For activations quantization, it will insert instances FakeLinearQuantization module after ReLUs. This module follows the methodology described in Benoit et al., 2018 and uses exponential moving averages to track activation ranges. Note that the current implementation of QuantAwareTrainRangeLinearQuantizer supports training with single GPU only . Similarly to post-training, the calculated quantization parameters (scale factors, zero-points, tracked activation ranges) are stored as buffers within their respective modules, so they're saved when a checkpoint is created. Note that converting from a quantization-aware training model to a post-training quantization model is not yet supported. Such a conversion will use the activation ranges tracked during training, so additional offline or online calculation of quantization parameters will not be required.","title":"Quantization-Aware Training"},{"location":"algo_quantization.html#dorefa","text":"(As proposed in DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low Bitwidth Gradients ) In this method, we first define the quantization function quantize_k , which takes a real value a_f \\in [0, 1] and outputs a discrete-valued a_q \\in \\left\\{ \\frac{0}{2^k-1}, \\frac{1}{2^k-1}, ... , \\frac{2^k-1}{2^k-1} \\right\\} , where k is the number of bits used for quantization. a_q = quantize_k(a_f) = \\frac{1}{2^k-1} round \\left( \\left(2^k - 1 \\right) a_f \\right) Activations are clipped to the [0, 1] range and then quantized as follows: x_q = quantize_k(x_f) For weights, we define the following function f , which takes an unbounded real valued input and outputs a real value in [0, 1] : f(w) = \\frac{tanh(w)}{2 max(|tanh(w)|)} + \\frac{1}{2} Now we can use quantize_k to get quantized weight values, as follows: w_q = 2 quantize_k \\left( f(w_f) \\right) - 1 This method requires training the model with quantization-aware training, as discussed here . Use the DorefaQuantizer class to transform an existing model to a model suitable for training with quantization using DoReFa.","title":"DoReFa"},{"location":"algo_quantization.html#notes","text":"Gradients quantization as proposed in the paper is not supported yet. The paper defines special handling for binary weights which isn't supported in Distiller yet.","title":"Notes:"},{"location":"algo_quantization.html#pact","text":"(As proposed in PACT: Parameterized Clipping Activation for Quantized Neural Networks ) This method is similar to DoReFa, but the upper clipping values, \\alpha , of the activation functions are learned parameters instead of hard coded to 1. Note that per the paper's recommendation, \\alpha is shared per layer. This method requires training the model with quantization-aware training, as discussed here . Use the PACTQuantizer class to transform an existing model to a model suitable for training with quantization using PACT.","title":"PACT"},{"location":"algo_quantization.html#wrpn","text":"(As proposed in WRPN: Wide Reduced-Precision Networks ) In this method, activations are clipped to [0, 1] and quantized as follows ( k is the number of bits used for quantization): x_q = \\frac{1}{2^k-1} round \\left( \\left(2^k - 1 \\right) x_f \\right) Weights are clipped to [-1, 1] and quantized as follows: w_q = \\frac{1}{2^{k-1}-1} round \\left( \\left(2^{k-1} - 1 \\right)w_f \\right) Note that k-1 bits are used to quantize weights, leaving one bit for sign. This method requires training the model with quantization-aware training, as discussed here . Use the WRPNQuantizer class to transform an existing model to a model suitable for training with quantization using WRPN.","title":"WRPN"},{"location":"algo_quantization.html#notes_1","text":"The paper proposed widening of layers as a means to reduce accuracy loss. This isn't implemented as part of WRPNQuantizer at the moment. To experiment with this, modify your model implementation to have wider layers. The paper defines special handling for binary weights which isn't supported in Distiller yet.","title":"Notes:"},{"location":"conditional_computation.html","text":"Conditional Computation Conditional Computation refers to a class of algorithms in which each input sample uses a different part of the model, such that on average the compute, latency or power (depending on our objective) is reduced. To quote Bengio et. al \"Conditional computation refers to activating only some of the units in a network, in an input-dependent fashion. For example, if we think we\u2019re looking at a car, we only need to compute the activations of the vehicle detecting units, not of all features that a network could possible compute. The immediate effect of activating fewer units is that propagating information through the network will be faster, both at training as well as at test time. However, one needs to be able to decide in an intelligent fashion which units to turn on and off, depending on the input data. This is typically achieved with some form of gating structure, learned in parallel with the original network.\" As usual, there are several approaches to implement Conditional Computation: Sun et. al use several expert CNN, each trained on a different task, and combine them to one large network. Zheng et. al use cascading, an idea which may be familiar to you from Viola-Jones face detection. Theodorakopoulos et. al add small layers that learn which filters to use per input sample, and then enforce that during inference (LKAM module). Ioannou et. al introduce Conditional Networks: that \"can be thought of as: i) decision trees augmented with data transformation operators, or ii) CNNs, with block-diagonal sparse weight matrices, and explicit data routing functions\" Bolukbasi et. al \"learn a system to adaptively choose the components of a deep network to be evaluated for each example. By allowing examples correctly classified using early layers of the system to exit, we avoid the computational time associated with full evaluation of the network. We extend this to learn a network selection system that adaptively selects the network to be evaluated for each example.\" Conditional Computation is especially useful for real-time, latency-sensitive applicative. In Distiller we currently have implemented a variant of Early Exit. References Emmanuel Bengio, Pierre-Luc Bacon, Joelle Pineau, Doina Precup. Conditional Deep Learning for Energy-Efficient and Enhanced Pattern Recognition , arXiv:1511.06297v2, 2016. Y. Sun, X.Wang, and X. Tang. Deep Convolutional Network Cascade for Facial Point Detection . In Proc. IEEE Conf. Computer Vision and Pattern Recognition (CVPR), 2014 X. Zheng, W.Ouyang, and X.Wang. Multi-Stage Contextual Deep Learning for Pedestrian Detection. In Proc. IEEE Intl Conf. on Computer Vision (ICCV), 2014. I. Theodorakopoulos, V. Pothos, D. Kastaniotis and N. Fragoulis1. Parsimonious Inference on Convolutional Neural Networks: Learning and applying on-line kernel activation rules. Irida Labs S.A, January 2017 Tolga Bolukbasi, Joseph Wang, Ofer Dekel, Venkatesh Saligrama Adaptive Neural Networks for Efficient Inference . Proceedings of the 34th International Conference on Machine Learning, PMLR 70:527-536, 2017. Yani Ioannou, Duncan Robertson, Darko Zikic, Peter Kontschieder, Jamie Shotton, Matthew Brown, Antonio Criminisi . Decision Forests, Convolutional Networks and the Models in-Between , arXiv:1511.06297v2, 2016.","title":"Conditional Computation"},{"location":"conditional_computation.html#conditional-computation","text":"Conditional Computation refers to a class of algorithms in which each input sample uses a different part of the model, such that on average the compute, latency or power (depending on our objective) is reduced. To quote Bengio et. al \"Conditional computation refers to activating only some of the units in a network, in an input-dependent fashion. For example, if we think we\u2019re looking at a car, we only need to compute the activations of the vehicle detecting units, not of all features that a network could possible compute. The immediate effect of activating fewer units is that propagating information through the network will be faster, both at training as well as at test time. However, one needs to be able to decide in an intelligent fashion which units to turn on and off, depending on the input data. This is typically achieved with some form of gating structure, learned in parallel with the original network.\" As usual, there are several approaches to implement Conditional Computation: Sun et. al use several expert CNN, each trained on a different task, and combine them to one large network. Zheng et. al use cascading, an idea which may be familiar to you from Viola-Jones face detection. Theodorakopoulos et. al add small layers that learn which filters to use per input sample, and then enforce that during inference (LKAM module). Ioannou et. al introduce Conditional Networks: that \"can be thought of as: i) decision trees augmented with data transformation operators, or ii) CNNs, with block-diagonal sparse weight matrices, and explicit data routing functions\" Bolukbasi et. al \"learn a system to adaptively choose the components of a deep network to be evaluated for each example. By allowing examples correctly classified using early layers of the system to exit, we avoid the computational time associated with full evaluation of the network. We extend this to learn a network selection system that adaptively selects the network to be evaluated for each example.\" Conditional Computation is especially useful for real-time, latency-sensitive applicative. In Distiller we currently have implemented a variant of Early Exit.","title":"Conditional Computation"},{"location":"conditional_computation.html#references","text":"Emmanuel Bengio, Pierre-Luc Bacon, Joelle Pineau, Doina Precup. Conditional Deep Learning for Energy-Efficient and Enhanced Pattern Recognition , arXiv:1511.06297v2, 2016. Y. Sun, X.Wang, and X. Tang. Deep Convolutional Network Cascade for Facial Point Detection . In Proc. IEEE Conf. Computer Vision and Pattern Recognition (CVPR), 2014 X. Zheng, W.Ouyang, and X.Wang. Multi-Stage Contextual Deep Learning for Pedestrian Detection. In Proc. IEEE Intl Conf. on Computer Vision (ICCV), 2014. I. Theodorakopoulos, V. Pothos, D. Kastaniotis and N. Fragoulis1. Parsimonious Inference on Convolutional Neural Networks: Learning and applying on-line kernel activation rules. Irida Labs S.A, January 2017 Tolga Bolukbasi, Joseph Wang, Ofer Dekel, Venkatesh Saligrama Adaptive Neural Networks for Efficient Inference . Proceedings of the 34th International Conference on Machine Learning, PMLR 70:527-536, 2017. Yani Ioannou, Duncan Robertson, Darko Zikic, Peter Kontschieder, Jamie Shotton, Matthew Brown, Antonio Criminisi . Decision Forests, Convolutional Networks and the Models in-Between , arXiv:1511.06297v2, 2016.","title":"References"},{"location":"design.html","text":"Distiller design Distiller is designed to be easily integrated into your own PyTorch research applications. It is easiest to understand this integration by examining the code of the sample application for compressing image classification models ( compress_classifier.py ). The application borrows its main flow code from torchvision's ImageNet classification training sample application (https://github.com/pytorch/examples/tree/master/imagenet). We tried to keep it similar, in order to make it familiar and easy to understand. Integrating compression is very simple: simply add invocations of the appropriate compression_scheduler callbacks, for each stage in the training. The training skeleton looks like the pseudo code below. The boiler-plate Pytorch classification training is speckled with invocations of CompressionScheduler. For each epoch: compression_scheduler.on_epoch_begin(epoch) train() validate() save_checkpoint() compression_scheduler.on_epoch_end(epoch) train(): For each training step: compression_scheduler.on_minibatch_begin(epoch) output = model(input_var) loss = criterion(output, target_var) compression_scheduler.before_backward_pass(epoch) loss.backward() optimizer.step() compression_scheduler.on_minibatch_end(epoch) These callbacks can be seen in the diagram below, as the arrow pointing from the Training Loop and into Distiller's Scheduler , which invokes the correct algorithm. The application also uses Distiller services to collect statistics in Summaries and logs files, which can be queried at a later time, from Jupyter notebooks or TensorBoard. Sparsification and fine-tuning The application sets up a model as normally done in PyTorch. And then instantiates a Scheduler and configures it: Scheduler configuration is defined in a YAML file The configuration specifies Policies. Each Policy is tied to a specific algorithm which controls some aspect of the training. Some types of algorithms control the actual sparsification of the model. Such types are \"pruner\" and \"regularizer\". Some algorithms control some parameter of the training process, such as the learning-rate decay scheduler ( lr_scheduler ). The parameters of each algorithm are also specified in the configuration. In addition to specifying the algorithm, each Policy specifies scheduling parameters which control when the algorithm is executed: start epoch, end epoch and frequency. The Scheduler exposes callbacks for relevant training stages: epoch start/end, mini-batch start/end and pre-backward pass. Each scheduler callback activates the policies that were defined according the schedule that was defined. These callbacks are placed the training loop. Quantization A quantized model is obtained by replacing existing operations with quantized versions. The quantized versions can be either complete replacements, or wrappers. A wrapper will use the existing modules internally and add quantization and de-quantization operations before/after as necessary. In Distiller we will provide a set of quantized versions of common operations which will enable implementation of different quantization methods. The user can write a quantized model from scratch, using the quantized operations provided. We also provide a mechanism which takes an existing model and automatically replaces required operations with quantized versions. This mechanism is exposed by the Quantizer class. Quantizer should be sub-classed for each quantization method. Model Transformation The high-level flow is as follows: Define a mapping between the module types to be replaced (e.g. Conv2D, Linear, etc.) to a function which generates the replacement module. The mapping is defined in the replacement_factory attribute of the Quantizer class. Iterate over the modules defined in the model. For each module, if its type is in the mapping, call the replacement generation function. We pass the existing module to this function to allow wrapping of it. Replace the existing module with the module returned by the function. It is important to note that the name of the module does not change, as that could break the forward function of the parent module. Different quantization methods may, obviously, use different quantized operations. In addition, different methods may employ different \"strategies\" of replacing / wrapping existing modules. For instance, some methods replace ReLU with another activation function, while others keep it. Hence, for each quantization method, a different mapping will likely be defined. Each sub-class of Quantizer should populate the replacement_factory dictionary attribute with the appropriate mapping. To execute the model transformation, call the prepare_model function of the Quantizer instance. Flexible Bit-Widths Each instance of Quantizer is parameterized by the number of bits to be used for quantization of different tensor types. The default ones are activations and weights. These are the bits_activations , bits_weights and bits_bias parameters in Quantizer 's constructor. Sub-classes may define bit-widths for other tensor types as needed. We also want to be able to override the default number of bits mentioned in the bullet above for certain layers. These could be very specific layers. However, many models are comprised of building blocks (\"container\" modules, such as Sequential) which contain several modules, and it is likely we'll want to override settings for entire blocks, or for a certain module across different blocks. When such building blocks are used, the names of the internal modules usually follow some pattern. So, for this purpose, Quantizer also accepts a mapping of regular expressions to number of bits. This allows the user to override specific layers using they're exact name, or a group of layers via a regular expression. This mapping is passed via the overrides parameter in the constructor. The overrides mapping is required to be an instance of collections.OrderedDict (as opposed to just a simple Python dict ). This is done in order to enable handling of overlapping name patterns. So, for example, one could define certain override parameters for a group of layers, e.g. 'conv*', but also define different parameters for specific layers in that group, e.g. 'conv1'. The patterns are evaluated eagerly - the first match wins. Therefore, the more specific patterns must come before the broad patterns. Weights Quantization The Quantizer class also provides an API to quantize the weights of all layers at once. To use it, the param_quantization_fn attribute needs to point to a function that accepts a tensor and the number of bits. During model transformation, the Quantizer class will build a list of all model parameters that need to be quantized along with their bit-width. Then, the quantize_params function can be called, which will iterate over all parameters and quantize them using params_quantization_fn . Quantization-Aware Training The Quantizer class supports quantization-aware training, that is - training with quantization in the loop. This requires handling of a couple of flows / scenarios: Maintaining a full precision copy of the weights, as described here . This is enabled by setting train_with_fp_copy=True in the Quantizer constructor. At model transformation, in each module that has parameters that should be quantized, a new torch.nn.Parameter is added, which will maintain the required full precision copy of the parameters. Note that this is done in-place - a new module is not created. We preferred not to sub-class the existing PyTorch modules for this purpose. In order to this in-place, and also guarantee proper back-propagation through the weights quantization function, we employ the following \"hack\": The existing torch.nn.Parameter , e.g. weights , is replaced by a torch.nn.Parameter named float_weight . To maintain the existing functionality of the module, we then register a buffer in the module with the original name - weights . During training, float_weight will be passed to param_quantization_fn and the result will be stored in weight . In addition, some quantization methods may introduce additional learned parameters to the model. For example, in the PACT method, acitvations are clipped to a value \\alpha , which is a learned parameter per-layer To support these two cases, the Quantizer class also accepts an instance of a torch.optim.Optimizer (normally this would be one an instance of its sub-classes). The quantizer will take care of modifying the optimizer according to the changes made to the parameters. Optimizing New Parameters In cases where new parameters are required by the scheme, it is likely that they'll need to be optimized separately from the main model parameters. In that case, the sub-class for the speicifc method should override Quantizer._get_updated_optimizer_params_groups() , and return the proper groups plus any desired hyper-parameter overrides. Examples The base Quantizer class is implemented in distiller/quantization/quantizer.py . For a simple sub-class implementing symmetric linear quantization, see SymmetricLinearQuantizer in distiller/quantization/range_linear.py . In distiller/quantization/clipped_linear.py there are examples of lower-precision methods which use training with quantization. Specifically, see PACTQuantizer for an example of overriding Quantizer._get_updated_optimizer_params_groups() .","title":"Design"},{"location":"design.html#distiller-design","text":"Distiller is designed to be easily integrated into your own PyTorch research applications. It is easiest to understand this integration by examining the code of the sample application for compressing image classification models ( compress_classifier.py ). The application borrows its main flow code from torchvision's ImageNet classification training sample application (https://github.com/pytorch/examples/tree/master/imagenet). We tried to keep it similar, in order to make it familiar and easy to understand. Integrating compression is very simple: simply add invocations of the appropriate compression_scheduler callbacks, for each stage in the training. The training skeleton looks like the pseudo code below. The boiler-plate Pytorch classification training is speckled with invocations of CompressionScheduler. For each epoch: compression_scheduler.on_epoch_begin(epoch) train() validate() save_checkpoint() compression_scheduler.on_epoch_end(epoch) train(): For each training step: compression_scheduler.on_minibatch_begin(epoch) output = model(input_var) loss = criterion(output, target_var) compression_scheduler.before_backward_pass(epoch) loss.backward() optimizer.step() compression_scheduler.on_minibatch_end(epoch) These callbacks can be seen in the diagram below, as the arrow pointing from the Training Loop and into Distiller's Scheduler , which invokes the correct algorithm. The application also uses Distiller services to collect statistics in Summaries and logs files, which can be queried at a later time, from Jupyter notebooks or TensorBoard.","title":"Distiller design"},{"location":"design.html#sparsification-and-fine-tuning","text":"The application sets up a model as normally done in PyTorch. And then instantiates a Scheduler and configures it: Scheduler configuration is defined in a YAML file The configuration specifies Policies. Each Policy is tied to a specific algorithm which controls some aspect of the training. Some types of algorithms control the actual sparsification of the model. Such types are \"pruner\" and \"regularizer\". Some algorithms control some parameter of the training process, such as the learning-rate decay scheduler ( lr_scheduler ). The parameters of each algorithm are also specified in the configuration. In addition to specifying the algorithm, each Policy specifies scheduling parameters which control when the algorithm is executed: start epoch, end epoch and frequency. The Scheduler exposes callbacks for relevant training stages: epoch start/end, mini-batch start/end and pre-backward pass. Each scheduler callback activates the policies that were defined according the schedule that was defined. These callbacks are placed the training loop.","title":"Sparsification and fine-tuning"},{"location":"design.html#quantization","text":"A quantized model is obtained by replacing existing operations with quantized versions. The quantized versions can be either complete replacements, or wrappers. A wrapper will use the existing modules internally and add quantization and de-quantization operations before/after as necessary. In Distiller we will provide a set of quantized versions of common operations which will enable implementation of different quantization methods. The user can write a quantized model from scratch, using the quantized operations provided. We also provide a mechanism which takes an existing model and automatically replaces required operations with quantized versions. This mechanism is exposed by the Quantizer class. Quantizer should be sub-classed for each quantization method.","title":"Quantization"},{"location":"design.html#model-transformation","text":"The high-level flow is as follows: Define a mapping between the module types to be replaced (e.g. Conv2D, Linear, etc.) to a function which generates the replacement module. The mapping is defined in the replacement_factory attribute of the Quantizer class. Iterate over the modules defined in the model. For each module, if its type is in the mapping, call the replacement generation function. We pass the existing module to this function to allow wrapping of it. Replace the existing module with the module returned by the function. It is important to note that the name of the module does not change, as that could break the forward function of the parent module. Different quantization methods may, obviously, use different quantized operations. In addition, different methods may employ different \"strategies\" of replacing / wrapping existing modules. For instance, some methods replace ReLU with another activation function, while others keep it. Hence, for each quantization method, a different mapping will likely be defined. Each sub-class of Quantizer should populate the replacement_factory dictionary attribute with the appropriate mapping. To execute the model transformation, call the prepare_model function of the Quantizer instance.","title":"Model Transformation"},{"location":"design.html#flexible-bit-widths","text":"Each instance of Quantizer is parameterized by the number of bits to be used for quantization of different tensor types. The default ones are activations and weights. These are the bits_activations , bits_weights and bits_bias parameters in Quantizer 's constructor. Sub-classes may define bit-widths for other tensor types as needed. We also want to be able to override the default number of bits mentioned in the bullet above for certain layers. These could be very specific layers. However, many models are comprised of building blocks (\"container\" modules, such as Sequential) which contain several modules, and it is likely we'll want to override settings for entire blocks, or for a certain module across different blocks. When such building blocks are used, the names of the internal modules usually follow some pattern. So, for this purpose, Quantizer also accepts a mapping of regular expressions to number of bits. This allows the user to override specific layers using they're exact name, or a group of layers via a regular expression. This mapping is passed via the overrides parameter in the constructor. The overrides mapping is required to be an instance of collections.OrderedDict (as opposed to just a simple Python dict ). This is done in order to enable handling of overlapping name patterns. So, for example, one could define certain override parameters for a group of layers, e.g. 'conv*', but also define different parameters for specific layers in that group, e.g. 'conv1'. The patterns are evaluated eagerly - the first match wins. Therefore, the more specific patterns must come before the broad patterns.","title":"Flexible Bit-Widths"},{"location":"design.html#weights-quantization","text":"The Quantizer class also provides an API to quantize the weights of all layers at once. To use it, the param_quantization_fn attribute needs to point to a function that accepts a tensor and the number of bits. During model transformation, the Quantizer class will build a list of all model parameters that need to be quantized along with their bit-width. Then, the quantize_params function can be called, which will iterate over all parameters and quantize them using params_quantization_fn .","title":"Weights Quantization"},{"location":"design.html#quantization-aware-training","text":"The Quantizer class supports quantization-aware training, that is - training with quantization in the loop. This requires handling of a couple of flows / scenarios: Maintaining a full precision copy of the weights, as described here . This is enabled by setting train_with_fp_copy=True in the Quantizer constructor. At model transformation, in each module that has parameters that should be quantized, a new torch.nn.Parameter is added, which will maintain the required full precision copy of the parameters. Note that this is done in-place - a new module is not created. We preferred not to sub-class the existing PyTorch modules for this purpose. In order to this in-place, and also guarantee proper back-propagation through the weights quantization function, we employ the following \"hack\": The existing torch.nn.Parameter , e.g. weights , is replaced by a torch.nn.Parameter named float_weight . To maintain the existing functionality of the module, we then register a buffer in the module with the original name - weights . During training, float_weight will be passed to param_quantization_fn and the result will be stored in weight . In addition, some quantization methods may introduce additional learned parameters to the model. For example, in the PACT method, acitvations are clipped to a value \\alpha , which is a learned parameter per-layer To support these two cases, the Quantizer class also accepts an instance of a torch.optim.Optimizer (normally this would be one an instance of its sub-classes). The quantizer will take care of modifying the optimizer according to the changes made to the parameters. Optimizing New Parameters In cases where new parameters are required by the scheme, it is likely that they'll need to be optimized separately from the main model parameters. In that case, the sub-class for the speicifc method should override Quantizer._get_updated_optimizer_params_groups() , and return the proper groups plus any desired hyper-parameter overrides.","title":"Quantization-Aware Training"},{"location":"design.html#examples","text":"The base Quantizer class is implemented in distiller/quantization/quantizer.py . For a simple sub-class implementing symmetric linear quantization, see SymmetricLinearQuantizer in distiller/quantization/range_linear.py . In distiller/quantization/clipped_linear.py there are examples of lower-precision methods which use training with quantization. Specifically, see PACTQuantizer for an example of overriding Quantizer._get_updated_optimizer_params_groups() .","title":"Examples"},{"location":"earlyexit.html","text":"Early Exit Inference While Deep Neural Networks benefit from a large number of layers, it's often the case that many datapoints in classification tasks can be classified accurately with much less work. There have been several studies recently regarding the idea of exiting before the normal endpoint of the neural network. Panda et al in Conditional Deep Learning for Energy-Efficient and Enhanced Pattern Recognition points out that a lot of data points can be classified easily and require less processing than some more difficult points and they view this in terms of power savings. Surat et al in BranchyNet: Fast Inference via Early Exiting from Deep Neural Networks look at a selective approach to exit placement and criteria for exiting early. Why Does Early Exit Work? Early Exit is a strategy with a straightforward and easy to understand concept Figure #fig(boundaries) shows a simple example in a 2-D feature space. While deep networks can representative more complex and expressive boundaries between classes (assuming we\u2019re confident of avoiding over-fitting the data), it\u2019s also clear that much of the data can be properly classified with even the simplest of classification boundaries. Data points far from the boundary can be considered \"easy to classify\" and achieve a high degree of confidence quicker than do data points close to the boundary. In fact, we can think of the area between the outer straight lines as being the region that is \"difficult to classify\" and require the full expressiveness of the neural network to accurately classify it. Example code for Early Exit Both CIFAR10 and Imagenet code comes directly from publically available examples from Pytorch. The only edits are the exits that are inserted in a methodology similar to BranchyNet work. Deeper networks can benefit from multiple exits. Our examples illustrate both a single and a pair of early exits for CIFAR10 and Imagenet, respectively. Note that this code does not actually take exits. What it does is to compute statistics of loss and accuracy assuming exits were taken when criteria are met. Actually implementing exits can be tricky and architecture dependent and we plan to address these issues. Heuristics The insertion of the exits are ad-hoc, but there are some heuristic principals guiding their placement and parameters. The earlier exits are placed, the more agressive the exit as it essentially prunes the rest of the network at a very early stage, thus saving a lot of work. However, a diminishing percentage of data will be directed through the exit if we are to preserve accuracy. There are other benefits to adding exits in that training the modified network now has backpropagation losses coming from the exits that affect the earlier layers more substantially than the last exit. This effect mitigates problems such as vanishing gradient. Early Exit Hyperparameters There are two parameters that are required to enable early exit. Leave them undefined if you are not enabling Early Exit: --earlyexit_thresholds defines the thresholds for each of the early exits. The cross entropy measure must be less than the specified threshold to take a specific exit, otherwise the data continues along the regular path. For example, you could specify \"--earlyexit_thresholds 0.9 1.2\" and this implies two early exits with corresponding thresholds of 0.9 and 1.2, respectively to take those exits. --earlyexit_lossweights provide the weights for the linear combination of losses during training to compute a signle, overall loss. We only specify weights for the early exits and assume that the sum of the weights (including final exit) are equal to 1.0. So an example of \"--earlyexit_lossweights 0.2 0.3\" implies two early exits weighted with values of 0.2 and 0.3, respectively and that the final exit has a value of 1.0-(0.2+0.3) = 0.5. Studies have shown that weighting the early exits more heavily will create more agressive early exits, but perhaps with a slight negative effect on accuracy. Output Stats The example code outputs various statistics regarding the loss and accuracy at each of the exits. During training, the Top1 and Top5 stats represent the accuracy should all of the data be forced out that exit (in order to compute the loss at that exit). During inference (i.e. validation and test stages), the Top1 and Top5 stats represent the accuracy for those data points that could exit because the calculated entropy at that exit was lower than the specified threshold for that exit. CIFAR10 In the case of CIFAR10, we have inserted a single exit after the first full layer grouping. The layers on the exit path itself includes a convolutional layer and a fully connected layer. If you move the exit, be sure to match the proper sizes for inputs and outputs to the exit layers. Imagenet This supports training and inference of the imagenet dataset via several well known deep architectures. ResNet-50 is the architecture of interest in this study, however the exit is defined in the generic resnet code and could be used with other size resnets. There are two exits inserted in this example. Again, exit layers must have their sizes match properly. References Priyadarshini Panda, Abhronil Sengupta, Kaushik Roy . Conditional Deep Learning for Energy-Efficient and Enhanced Pattern Recognition , arXiv:1509.08971v6, 2017. Surat Teerapittayanon, Bradley McDanel, H. T. Kung . BranchyNet: Fast Inference via Early Exiting from Deep Neural Networks , arXiv:1709.01686, 2017.","title":"Early Exit Inference"},{"location":"earlyexit.html#early-exit-inference","text":"While Deep Neural Networks benefit from a large number of layers, it's often the case that many datapoints in classification tasks can be classified accurately with much less work. There have been several studies recently regarding the idea of exiting before the normal endpoint of the neural network. Panda et al in Conditional Deep Learning for Energy-Efficient and Enhanced Pattern Recognition points out that a lot of data points can be classified easily and require less processing than some more difficult points and they view this in terms of power savings. Surat et al in BranchyNet: Fast Inference via Early Exiting from Deep Neural Networks look at a selective approach to exit placement and criteria for exiting early.","title":"Early Exit Inference"},{"location":"earlyexit.html#why-does-early-exit-work","text":"Early Exit is a strategy with a straightforward and easy to understand concept Figure #fig(boundaries) shows a simple example in a 2-D feature space. While deep networks can representative more complex and expressive boundaries between classes (assuming we\u2019re confident of avoiding over-fitting the data), it\u2019s also clear that much of the data can be properly classified with even the simplest of classification boundaries. Data points far from the boundary can be considered \"easy to classify\" and achieve a high degree of confidence quicker than do data points close to the boundary. In fact, we can think of the area between the outer straight lines as being the region that is \"difficult to classify\" and require the full expressiveness of the neural network to accurately classify it.","title":"Why Does Early Exit Work?"},{"location":"earlyexit.html#example-code-for-early-exit","text":"Both CIFAR10 and Imagenet code comes directly from publically available examples from Pytorch. The only edits are the exits that are inserted in a methodology similar to BranchyNet work. Deeper networks can benefit from multiple exits. Our examples illustrate both a single and a pair of early exits for CIFAR10 and Imagenet, respectively. Note that this code does not actually take exits. What it does is to compute statistics of loss and accuracy assuming exits were taken when criteria are met. Actually implementing exits can be tricky and architecture dependent and we plan to address these issues.","title":"Example code for Early Exit"},{"location":"earlyexit.html#heuristics","text":"The insertion of the exits are ad-hoc, but there are some heuristic principals guiding their placement and parameters. The earlier exits are placed, the more agressive the exit as it essentially prunes the rest of the network at a very early stage, thus saving a lot of work. However, a diminishing percentage of data will be directed through the exit if we are to preserve accuracy. There are other benefits to adding exits in that training the modified network now has backpropagation losses coming from the exits that affect the earlier layers more substantially than the last exit. This effect mitigates problems such as vanishing gradient.","title":"Heuristics"},{"location":"earlyexit.html#early-exit-hyperparameters","text":"There are two parameters that are required to enable early exit. Leave them undefined if you are not enabling Early Exit: --earlyexit_thresholds defines the thresholds for each of the early exits. The cross entropy measure must be less than the specified threshold to take a specific exit, otherwise the data continues along the regular path. For example, you could specify \"--earlyexit_thresholds 0.9 1.2\" and this implies two early exits with corresponding thresholds of 0.9 and 1.2, respectively to take those exits. --earlyexit_lossweights provide the weights for the linear combination of losses during training to compute a signle, overall loss. We only specify weights for the early exits and assume that the sum of the weights (including final exit) are equal to 1.0. So an example of \"--earlyexit_lossweights 0.2 0.3\" implies two early exits weighted with values of 0.2 and 0.3, respectively and that the final exit has a value of 1.0-(0.2+0.3) = 0.5. Studies have shown that weighting the early exits more heavily will create more agressive early exits, but perhaps with a slight negative effect on accuracy.","title":"Early Exit Hyperparameters"},{"location":"earlyexit.html#output-stats","text":"The example code outputs various statistics regarding the loss and accuracy at each of the exits. During training, the Top1 and Top5 stats represent the accuracy should all of the data be forced out that exit (in order to compute the loss at that exit). During inference (i.e. validation and test stages), the Top1 and Top5 stats represent the accuracy for those data points that could exit because the calculated entropy at that exit was lower than the specified threshold for that exit.","title":"Output Stats"},{"location":"earlyexit.html#cifar10","text":"In the case of CIFAR10, we have inserted a single exit after the first full layer grouping. The layers on the exit path itself includes a convolutional layer and a fully connected layer. If you move the exit, be sure to match the proper sizes for inputs and outputs to the exit layers.","title":"CIFAR10"},{"location":"earlyexit.html#imagenet","text":"This supports training and inference of the imagenet dataset via several well known deep architectures. ResNet-50 is the architecture of interest in this study, however the exit is defined in the generic resnet code and could be used with other size resnets. There are two exits inserted in this example. Again, exit layers must have their sizes match properly.","title":"Imagenet"},{"location":"earlyexit.html#references","text":"Priyadarshini Panda, Abhronil Sengupta, Kaushik Roy . Conditional Deep Learning for Energy-Efficient and Enhanced Pattern Recognition , arXiv:1509.08971v6, 2017. Surat Teerapittayanon, Bradley McDanel, H. T. Kung . BranchyNet: Fast Inference via Early Exiting from Deep Neural Networks , arXiv:1709.01686, 2017.","title":"References"},{"location":"install.html","text":"Distiller Installation These instructions will help get Distiller up and running on your local machine. You may also want to refer to these resources: Dataset installation instructions. Jupyter installation instructions. Notes: - Distiller has only been tested on Ubuntu 16.04 LTS, and with Python 3.5. - If you are not using a GPU, you might need to make small adjustments to the code. Clone Distiller Clone the Distiller code repository from github: $ git clone https://github.com/NervanaSystems/distiller.git The rest of the documentation that follows, assumes that you have cloned your repository to a directory called distiller . Create a Python virtual environment We recommend using a Python virtual environment , but that of course, is up to you. There's nothing special about using Distiller in a virtual environment, but we provide some instructions, for completeness. Before creating the virtual environment, make sure you are located in directory distiller . After creating the environment, you should see a directory called distiller/env . Using virtualenv If you don't have virtualenv installed, you can find the installation instructions here . To create the environment, execute: $ python3 -m virtualenv env This creates a subdirectory named env where the python virtual environment is stored, and configures the current shell to use it as the default python environment. Using venv If you prefer to use venv , then begin by installing it: $ sudo apt-get install python3-venv Then create the environment: $ python3 -m venv env As with virtualenv, this creates a directory called distiller/env . Activate the environment The environment activation and deactivation commands for venv and virtualenv are the same. !NOTE: Make sure to activate the environment, before proceeding with the installation of the dependency packages: $ source env/bin/activate Install the package Finally, install the Distiller package and its dependencies using pip3 : $ cd distiller $ pip3 install -e . This installs Distiller in \"development mode\", meaning any changes made in the code are reflected in the environment without re-running the install command (so no need to re-install after pulling changes from the Git repository). PyTorch is included in the requirements.txt file, and will currently download PyTorch version 1.0.1 for CUDA 9.0. This is the setup we've used for testing Distiller.","title":"Installation"},{"location":"install.html#distiller-installation","text":"These instructions will help get Distiller up and running on your local machine. You may also want to refer to these resources: Dataset installation instructions. Jupyter installation instructions. Notes: - Distiller has only been tested on Ubuntu 16.04 LTS, and with Python 3.5. - If you are not using a GPU, you might need to make small adjustments to the code.","title":"Distiller Installation"},{"location":"install.html#clone-distiller","text":"Clone the Distiller code repository from github: $ git clone https://github.com/NervanaSystems/distiller.git The rest of the documentation that follows, assumes that you have cloned your repository to a directory called distiller .","title":"Clone Distiller"},{"location":"install.html#create-a-python-virtual-environment","text":"We recommend using a Python virtual environment , but that of course, is up to you. There's nothing special about using Distiller in a virtual environment, but we provide some instructions, for completeness. Before creating the virtual environment, make sure you are located in directory distiller . After creating the environment, you should see a directory called distiller/env .","title":"Create a Python virtual environment"},{"location":"install.html#using-virtualenv","text":"If you don't have virtualenv installed, you can find the installation instructions here . To create the environment, execute: $ python3 -m virtualenv env This creates a subdirectory named env where the python virtual environment is stored, and configures the current shell to use it as the default python environment.","title":"Using virtualenv"},{"location":"install.html#using-venv","text":"If you prefer to use venv , then begin by installing it: $ sudo apt-get install python3-venv Then create the environment: $ python3 -m venv env As with virtualenv, this creates a directory called distiller/env .","title":"Using venv"},{"location":"install.html#activate-the-environment","text":"The environment activation and deactivation commands for venv and virtualenv are the same. !NOTE: Make sure to activate the environment, before proceeding with the installation of the dependency packages: $ source env/bin/activate","title":"Activate the environment"},{"location":"install.html#install-the-package","text":"Finally, install the Distiller package and its dependencies using pip3 : $ cd distiller $ pip3 install -e . This installs Distiller in \"development mode\", meaning any changes made in the code are reflected in the environment without re-running the install command (so no need to re-install after pulling changes from the Git repository). PyTorch is included in the requirements.txt file, and will currently download PyTorch version 1.0.1 for CUDA 9.0. This is the setup we've used for testing Distiller.","title":"Install the package"},{"location":"jupyter.html","text":"Jupyter environment The Jupyter notebooks environment allows us to plan our compression session and load Distiller data summaries to study and analyze compression results. Each notebook has embedded instructions and explanations, so here we provide only a brief description of each notebook. Installation Jupyter and its dependencies are included as part of the main requirements.txt file, so there is no need for a dedicated installation step. However, to use the ipywidgets extension, you will need to enable it: $ jupyter nbextension enable --py widgetsnbextension --sys-prefix You may want to refer to the ipywidgets extension installation documentation . Another extension which requires special installation handling is Qgrid . Qgrid is a Jupyter notebook widget that adds interactive features, such as sorting, to Panadas DataFrames rendering. To enable Qgrid: $ jupyter nbextension enable --py --sys-prefix qgrid Launching the Jupyter server There are all kinds of options to use when launching Jupyter which you can use. The example below tells the server to listen to connections from any IP address, and not to launch the browser window, but of course, you are free to launch Jupyter any way you want. Consult the user's guide for more details. $ jupyter-notebook --ip=0.0.0.0 --no-browser Using the Distiller notebooks The Distiller Jupyter notebooks are located in the distiller/jupyter directory. They are provided as tools that you can use to prepare your compression experiments and study their results. We welcome new ideas and implementations of Jupyter. Roughly, the notebooks can be divided into three categories. Theory jupyter/L1-regularization.ipynb : Experience hands-on how L1 and L2 regularization affect the solution of a toy loss-minimization problem, to get a better grasp on the interaction between regularization and sparsity. jupyter/alexnet_insights.ipynb : This notebook reviews and compares a couple of pruning sessions on Alexnet. We compare distributions, performance, statistics and show some visualizations of the weights tensors. Preparation for compression jupyter/model_summary.ipynb : Begin by getting familiar with your model. Examine the sizes and properties of layers and connections. Study which layers are compute-bound, and which are bandwidth-bound, and decide how to prune or regularize the model. jupyter/sensitivity_analysis.ipynb : If you performed pruning sensitivity analysis on your model, this notebook can help you load the results and graphically study how the layers behave. jupyter/interactive_lr_scheduler.ipynb : The learning rate decay policy affects pruning results, perhaps as much as it affects training results. Graph a few LR-decay policies to see how they behave. jupyter/jupyter/agp_schedule.ipynb : If you are using the Automated Gradual Pruner, this notebook can help you tune the schedule. Reviewing experiment results jupyter/compare_executions.ipynb : This is a simple notebook to help you graphically compare the results of executions of several experiments. jupyter/compression_insights.ipynb : This notebook is packed with code, tables and graphs to us understand the results of a compression session. Distiller provides summaries , which are Pandas dataframes, which contain statistical information about you model. We chose to use Pandas dataframes because they can be sliced, queried, summarized and graphed with a few lines of code.","title":"Jupyter Notebooks"},{"location":"jupyter.html#jupyter-environment","text":"The Jupyter notebooks environment allows us to plan our compression session and load Distiller data summaries to study and analyze compression results. Each notebook has embedded instructions and explanations, so here we provide only a brief description of each notebook.","title":"Jupyter environment"},{"location":"jupyter.html#installation","text":"Jupyter and its dependencies are included as part of the main requirements.txt file, so there is no need for a dedicated installation step. However, to use the ipywidgets extension, you will need to enable it: $ jupyter nbextension enable --py widgetsnbextension --sys-prefix You may want to refer to the ipywidgets extension installation documentation . Another extension which requires special installation handling is Qgrid . Qgrid is a Jupyter notebook widget that adds interactive features, such as sorting, to Panadas DataFrames rendering. To enable Qgrid: $ jupyter nbextension enable --py --sys-prefix qgrid","title":"Installation"},{"location":"jupyter.html#launching-the-jupyter-server","text":"There are all kinds of options to use when launching Jupyter which you can use. The example below tells the server to listen to connections from any IP address, and not to launch the browser window, but of course, you are free to launch Jupyter any way you want. Consult the user's guide for more details. $ jupyter-notebook --ip=0.0.0.0 --no-browser","title":"Launching the Jupyter server"},{"location":"jupyter.html#using-the-distiller-notebooks","text":"The Distiller Jupyter notebooks are located in the distiller/jupyter directory. They are provided as tools that you can use to prepare your compression experiments and study their results. We welcome new ideas and implementations of Jupyter. Roughly, the notebooks can be divided into three categories.","title":"Using the Distiller notebooks"},{"location":"jupyter.html#theory","text":"jupyter/L1-regularization.ipynb : Experience hands-on how L1 and L2 regularization affect the solution of a toy loss-minimization problem, to get a better grasp on the interaction between regularization and sparsity. jupyter/alexnet_insights.ipynb : This notebook reviews and compares a couple of pruning sessions on Alexnet. We compare distributions, performance, statistics and show some visualizations of the weights tensors.","title":"Theory"},{"location":"jupyter.html#preparation-for-compression","text":"jupyter/model_summary.ipynb : Begin by getting familiar with your model. Examine the sizes and properties of layers and connections. Study which layers are compute-bound, and which are bandwidth-bound, and decide how to prune or regularize the model. jupyter/sensitivity_analysis.ipynb : If you performed pruning sensitivity analysis on your model, this notebook can help you load the results and graphically study how the layers behave. jupyter/interactive_lr_scheduler.ipynb : The learning rate decay policy affects pruning results, perhaps as much as it affects training results. Graph a few LR-decay policies to see how they behave. jupyter/jupyter/agp_schedule.ipynb : If you are using the Automated Gradual Pruner, this notebook can help you tune the schedule.","title":"Preparation for compression"},{"location":"jupyter.html#reviewing-experiment-results","text":"jupyter/compare_executions.ipynb : This is a simple notebook to help you graphically compare the results of executions of several experiments. jupyter/compression_insights.ipynb : This notebook is packed with code, tables and graphs to us understand the results of a compression session. Distiller provides summaries , which are Pandas dataframes, which contain statistical information about you model. We chose to use Pandas dataframes because they can be sliced, queried, summarized and graphed with a few lines of code.","title":"Reviewing experiment results"},{"location":"knowledge_distillation.html","text":"Knowledge Distillation (For details on how to train a model with knowledge distillation in Distiller, see here ) Knowledge distillation is model compression method in which a small model is trained to mimic a pre-trained, larger model (or ensemble of models). This training setting is sometimes referred to as \"teacher-student\", where the large model is the teacher and the small model is the student (we'll be using these terms interchangeably). The method was first proposed by Bucila et al., 2006 and generalized by Hinton et al., 2015 . The implementation in Distiller is based on the latter publication. Here we'll provide a summary of the method. For more information the reader may refer to the paper (a video lecture with slides is also available). In distillation, knowledge is transferred from the teacher model to the student by minimizing a loss function in which the target is the distribution of class probabilities predicted by the teacher model. That is - the output of a softmax function on the teacher model's logits. However, in many cases, this probability distribution has the correct class at a very high probability, with all other class probabilities very close to 0. As such, it doesn't provide much information beyond the ground truth labels already provided in the dataset. To tackle this issue, Hinton et al., 2015 introduced the concept of \"softmax temperature\". The probability p_i of class i is calculated from the logits z as: p_i = \\frac{exp\\left(\\frac{z_i}{T}\\right)}{\\sum_{j} \\exp\\left(\\frac{z_j}{T}\\right)} where T is the temperature parameter. When T=1 we get the standard softmax function. As T grows, the probability distribution generated by the softmax function becomes softer, providing more information as to which classes the teacher found more similar to the predicted class. Hinton calls this the \"dark knowledge\" embedded in the teacher model, and it is this dark knowledge that we are transferring to the student model in the distillation process. When computing the loss function vs. the teacher's soft targets, we use the same value of T to compute the softmax on the student's logits. We call this loss the \"distillation loss\". Hinton et al., 2015 found that it is also beneficial to train the distilled model to produce the correct labels (based on the ground truth) in addition to the teacher's soft-labels. Hence, we also calculate the \"standard\" loss between the student's predicted class probabilities and the ground-truth labels (also called \"hard labels/targets\"). We dub this loss the \"student loss\". When calculating the class probabilities for the student loss we use T = 1 . The overall loss function, incorporating both distillation and student losses, is calculated as: \\mathcal{L}(x;W) = \\alpha * \\mathcal{H}(y, \\sigma(z_s; T=1)) + \\beta * \\mathcal{H}(\\sigma(z_t; T=\\tau), \\sigma(z_s, T=\\tau)) where x is the input, W are the student model parameters, y is the ground truth label, \\mathcal{H} is the cross-entropy loss function, \\sigma is the softmax function parameterized by the temperature T , and \\alpha and \\beta are coefficients. z_s and z_t are the logits of the student and teacher respectively. New Hyper-Parameters In general \\tau , \\alpha and \\beta are hyper parameters. In their experiments, Hinton et al., 2015 use temperature values ranging from 1 to 20. They note that empirically, when the student model is very small compared to the teacher model, lower temperatures work better. This makes sense if we consider that as we raise the temperature, the resulting soft-labels distribution becomes richer in information, and a very small model might not be able to capture all of this information. However, there's no clear way to predict up front what kind of capacity for information the student model will have. With regards to \\alpha and \\beta , Hinton et al., 2015 use a weighted average between the distillation loss and the student loss. That is, \\beta = 1 - \\alpha . They note that in general, they obtained the best results when setting \\alpha to be much smaller than \\beta (although in one of their experiments they use \\alpha = \\beta = 0.5 ). Other works which utilize knowledge distillation don't use a weighted average. Some set \\alpha = 1 while leaving \\beta tunable, while others don't set any constraints. Combining with Other Model Compression Techniques In the \"basic\" scenario, the smaller (student) model is a pre-defined architecture which just has a smaller number of parameters compared to the teacher model. For example, we could train ResNet-18 by distilling knowledge from ResNet-34. But, a model with smaller capacity can also be obtained by other model compression techniques - sparsification and/or quantization. So, for example, we could train a 4-bit ResNet-18 model with some method using quantization-aware training, and use a distillation loss function as described above. In that case, the teacher model can even be a FP32 ResNet-18 model. Same goes for pruning and regularization. Tann et al., 2017 , Mishra and Marr, 2018 and Polino et al., 2018 are some works that combine knowledge distillation with quantization . Theis et al., 2018 and Ashok et al., 2018 combine distillation with pruning . References Cristian Bucila, Rich Caruana, and Alexandru Niculescu-Mizil . Model Compression. KDD, 2006 Geoffrey Hinton, Oriol Vinyals and Jeff Dean . Distilling the Knowledge in a Neural Network. arxiv:1503.02531 Hokchhay Tann, Soheil Hashemi, Iris Bahar and Sherief Reda . Hardware-Software Codesign of Accurate, Multiplier-free Deep Neural Networks. DAC, 2017 Asit Mishra and Debbie Marr . Apprentice: Using Knowledge Distillation Techniques To Improve Low-Precision Network Accuracy. ICLR, 2018 Antonio Polino, Razvan Pascanu and Dan Alistarh . Model compression via distillation and quantization. ICLR, 2018 Anubhav Ashok, Nicholas Rhinehart, Fares Beainy and Kris M. Kitani . N2N learning: Network to Network Compression via Policy Gradient Reinforcement Learning. ICLR, 2018 Lucas Theis, Iryna Korshunova, Alykhan Tejani and Ferenc Husz\u00e1r . Faster gaze prediction with dense networks and Fisher pruning. arxiv:1801.05787","title":"Knowledge Distillation"},{"location":"knowledge_distillation.html#knowledge-distillation","text":"(For details on how to train a model with knowledge distillation in Distiller, see here ) Knowledge distillation is model compression method in which a small model is trained to mimic a pre-trained, larger model (or ensemble of models). This training setting is sometimes referred to as \"teacher-student\", where the large model is the teacher and the small model is the student (we'll be using these terms interchangeably). The method was first proposed by Bucila et al., 2006 and generalized by Hinton et al., 2015 . The implementation in Distiller is based on the latter publication. Here we'll provide a summary of the method. For more information the reader may refer to the paper (a video lecture with slides is also available). In distillation, knowledge is transferred from the teacher model to the student by minimizing a loss function in which the target is the distribution of class probabilities predicted by the teacher model. That is - the output of a softmax function on the teacher model's logits. However, in many cases, this probability distribution has the correct class at a very high probability, with all other class probabilities very close to 0. As such, it doesn't provide much information beyond the ground truth labels already provided in the dataset. To tackle this issue, Hinton et al., 2015 introduced the concept of \"softmax temperature\". The probability p_i of class i is calculated from the logits z as: p_i = \\frac{exp\\left(\\frac{z_i}{T}\\right)}{\\sum_{j} \\exp\\left(\\frac{z_j}{T}\\right)} where T is the temperature parameter. When T=1 we get the standard softmax function. As T grows, the probability distribution generated by the softmax function becomes softer, providing more information as to which classes the teacher found more similar to the predicted class. Hinton calls this the \"dark knowledge\" embedded in the teacher model, and it is this dark knowledge that we are transferring to the student model in the distillation process. When computing the loss function vs. the teacher's soft targets, we use the same value of T to compute the softmax on the student's logits. We call this loss the \"distillation loss\". Hinton et al., 2015 found that it is also beneficial to train the distilled model to produce the correct labels (based on the ground truth) in addition to the teacher's soft-labels. Hence, we also calculate the \"standard\" loss between the student's predicted class probabilities and the ground-truth labels (also called \"hard labels/targets\"). We dub this loss the \"student loss\". When calculating the class probabilities for the student loss we use T = 1 . The overall loss function, incorporating both distillation and student losses, is calculated as: \\mathcal{L}(x;W) = \\alpha * \\mathcal{H}(y, \\sigma(z_s; T=1)) + \\beta * \\mathcal{H}(\\sigma(z_t; T=\\tau), \\sigma(z_s, T=\\tau)) where x is the input, W are the student model parameters, y is the ground truth label, \\mathcal{H} is the cross-entropy loss function, \\sigma is the softmax function parameterized by the temperature T , and \\alpha and \\beta are coefficients. z_s and z_t are the logits of the student and teacher respectively.","title":"Knowledge Distillation"},{"location":"knowledge_distillation.html#new-hyper-parameters","text":"In general \\tau , \\alpha and \\beta are hyper parameters. In their experiments, Hinton et al., 2015 use temperature values ranging from 1 to 20. They note that empirically, when the student model is very small compared to the teacher model, lower temperatures work better. This makes sense if we consider that as we raise the temperature, the resulting soft-labels distribution becomes richer in information, and a very small model might not be able to capture all of this information. However, there's no clear way to predict up front what kind of capacity for information the student model will have. With regards to \\alpha and \\beta , Hinton et al., 2015 use a weighted average between the distillation loss and the student loss. That is, \\beta = 1 - \\alpha . They note that in general, they obtained the best results when setting \\alpha to be much smaller than \\beta (although in one of their experiments they use \\alpha = \\beta = 0.5 ). Other works which utilize knowledge distillation don't use a weighted average. Some set \\alpha = 1 while leaving \\beta tunable, while others don't set any constraints.","title":"New Hyper-Parameters"},{"location":"knowledge_distillation.html#references","text":"Cristian Bucila, Rich Caruana, and Alexandru Niculescu-Mizil . Model Compression. KDD, 2006 Geoffrey Hinton, Oriol Vinyals and Jeff Dean . Distilling the Knowledge in a Neural Network. arxiv:1503.02531 Hokchhay Tann, Soheil Hashemi, Iris Bahar and Sherief Reda . Hardware-Software Codesign of Accurate, Multiplier-free Deep Neural Networks. DAC, 2017 Asit Mishra and Debbie Marr . Apprentice: Using Knowledge Distillation Techniques To Improve Low-Precision Network Accuracy. ICLR, 2018 Antonio Polino, Razvan Pascanu and Dan Alistarh . Model compression via distillation and quantization. ICLR, 2018 Anubhav Ashok, Nicholas Rhinehart, Fares Beainy and Kris M. Kitani . N2N learning: Network to Network Compression via Policy Gradient Reinforcement Learning. ICLR, 2018 Lucas Theis, Iryna Korshunova, Alykhan Tejani and Ferenc Husz\u00e1r . Faster gaze prediction with dense networks and Fisher pruning. arxiv:1801.05787","title":"References"},{"location":"model_zoo.html","text":"Distiller Model Zoo How to contribute models to the Model Zoo We encourage you to contribute new models to the Model Zoo. We welcome implementations of published papers or of your own work. To assure that models and algorithms shared with others are high-quality, please commit your models with the following: Command-line arguments Log files PyTorch model Contents The Distiller model zoo is not a \"traditional\" model-zoo, because it does not necessarily contain best-in-class compressed models. Instead, the model-zoo contains a number of deep learning models that have been compressed using Distiller following some well-known research papers. These are meant to serve as examples of how Distiller can be used. Each model contains a Distiller schedule detailing how the model was compressed, a PyTorch checkpoint, text logs and TensorBoard logs. table, th, td { border: 1px solid black; } Paper Dataset Network Method & Granularity Schedule Features Learning both Weights and Connections for Efficient Neural Networks ImageNet Alexnet Element-wise pruning Iterative; Manual Magnitude thresholding based on a sensitivity quantifier. Element-wise sparsity sensitivity analysis To prune, or not to prune: exploring the efficacy of pruning for model compression ImageNet MobileNet Element-wise pruning Automated gradual; Iterative Magnitude thresholding based on target level Learning Structured Sparsity in Deep Neural Networks CIFAR10 ResNet20 Group regularization 1.Train with group-lasso 2.Remove zero groups and fine-tune Group Lasso regularization. Groups: kernels (2D), channels, filters (3D), layers (4D), vectors (rows, cols) Pruning Filters for Efficient ConvNets CIFAR10 ResNet56 Filter ranking; guided by sensitivity analysis 1.Rank filters 2. Remove filters and channels 3.Fine-tune One-shot ranking and pruning of filters; with network thinning Learning both Weights and Connections for Efficient Neural Networks This schedule is an example of \"Iterative Pruning\" for Alexnet/Imagent, as described in chapter 3 of Song Han's PhD dissertation: Efficient Methods and Hardware for Deep Learning and in his paper Learning both Weights and Connections for Efficient Neural Networks . The Distiller schedule uses SensitivityPruner which is similar to MagnitudeParameterPruner, but instead of specifying \"raw\" thresholds, it uses a \"sensitivity parameter\". Song Han's paper says that \"the pruning threshold is chosen as a quality parameter multiplied by the standard deviation of a layers weights,\" and this is not explained much further. In Distiller, the \"quality parameter\" is referred to as \"sensitivity\" and is based on the values learned from performing sensitivity analysis. Using a parameter that is related to the standard deviation is very helpful: under the assumption that the weights tensors are distributed normally, the standard deviation acts as a threshold normalizer. Note that Distiller's implementation deviates slightly from the algorithm Song Han describes in his PhD dissertation, in that the threshold value is set only once. In his PhD dissertation, Song Han describes a growing threshold, at each iteration. This requires n+1 hyper-parameters (n being the number of pruning iterations we use): the threshold and the threshold increase (delta) at each pruning iteration. Distiller's implementation takes advantage of the fact that as pruning progresses, more weights are pulled toward zero, and therefore the threshold \"traps\" more weights. Thus, we can use less hyper-parameters and achieve the same results. Distiller schedule: distiller/examples/sensitivity-pruning/alexnet.schedule_sensitivity.yaml Checkpoint file: alexnet.checkpoint.89.pth.tar Results Our reference is TorchVision's pretrained Alexnet model which has a Top1 accuracy of 56.55 and Top5=79.09. We prune away 88.44% of the parameters and achieve Top1=56.61 and Top5=79.45. Song Han prunes 89% of the parameters, which is slightly better than our results. Parameters: +----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ | | Name | Shape | NNZ (dense) | NNZ (sparse) | Cols (%) | Rows (%) | Ch (%) | 2D (%) | 3D (%) | Fine (%) | Std | Mean | Abs-Mean |----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------| | 0 | features.module.0.weight | (64, 3, 11, 11) | 23232 | 13411 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 42.27359 | 0.14391 | -0.00002 | 0.08805 | | 1 | features.module.3.weight | (192, 64, 5, 5) | 307200 | 115560 | 0.00000 | 0.00000 | 0.00000 | 1.91243 | 0.00000 | 62.38281 | 0.04703 | -0.00250 | 0.02289 | | 2 | features.module.6.weight | (384, 192, 3, 3) | 663552 | 256565 | 0.00000 | 0.00000 | 0.00000 | 6.18490 | 0.00000 | 61.33445 | 0.03354 | -0.00184 | 0.01803 | | 3 | features.module.8.weight | (256, 384, 3, 3) | 884736 | 315065 | 0.00000 | 0.00000 | 0.00000 | 6.96411 | 0.00000 | 64.38881 | 0.02646 | -0.00168 | 0.01422 | | 4 | features.module.10.weight | (256, 256, 3, 3) | 589824 | 186938 | 0.00000 | 0.00000 | 0.00000 | 15.49225 | 0.00000 | 68.30614 | 0.02714 | -0.00246 | 0.01409 | | 5 | classifier.1.weight | (4096, 9216) | 37748736 | 3398881 | 0.00000 | 0.21973 | 0.00000 | 0.21973 | 0.00000 | 90.99604 | 0.00589 | -0.00020 | 0.00168 | | 6 | classifier.4.weight | (4096, 4096) | 16777216 | 1782769 | 0.21973 | 3.46680 | 0.00000 | 3.46680 | 0.00000 | 89.37387 | 0.00849 | -0.00066 | 0.00263 | | 7 | classifier.6.weight | (1000, 4096) | 4096000 | 994738 | 3.36914 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 75.71440 | 0.01718 | 0.00030 | 0.00778 | | 8 | Total sparsity: | - | 61090496 | 7063928 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 88.43694 | 0.00000 | 0.00000 | 0.00000 | +----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ 2018-04-04 21:30:52,499 - Total sparsity: 88.44 2018-04-04 21:30:52,499 - --- validate (epoch=89)----------- 2018-04-04 21:30:52,499 - 128116 samples (256 per mini-batch) 2018-04-04 21:31:35,357 - ==> Top1: 51.838 Top5: 74.817 Loss: 2.150 2018-04-04 21:31:39,251 - --- test --------------------- 2018-04-04 21:31:39,252 - 50000 samples (256 per mini-batch) 2018-04-04 21:32:01,274 - ==> Top1: 56.606 Top5: 79.446 Loss: 1.893 To prune, or not to prune: exploring the efficacy of pruning for model compression In their paper Zhu and Gupta, \"compare the accuracy of large, but pruned models (large-sparse) and their smaller, but dense (small-dense) counterparts with identical memory footprint.\" They also \"propose a new gradual pruning technique that is simple and straightforward to apply across a variety of models/datasets with minimal tuning.\" This pruning schedule is implemented by distiller.AutomatedGradualPruner, which increases the sparsity level (expressed as a percentage of zero-valued elements) gradually over several pruning steps. Distiller's implementation only prunes elements once in an epoch (the model is fine-tuned in between pruning events), which is a small deviation from Zhu and Gupta's paper. The research paper specifies the schedule in terms of mini-batches, while our implementation specifies the schedule in terms of epochs. We feel that using epochs performs well, and is more \"stable\", since the number of mini-batches will change, if you change the batch size. ImageNet files: Distiller schedule: distiller/examples/agp-pruning/mobilenet.imagenet.schedule_agp.yaml Checkpoint file: checkpoint.pth.tar ResNet18 files: Distiller schedule: distiller/examples/agp-pruning/resnet18.schedule_agp.yaml Checkpoint file: checkpoint.pth.tar Results As our baseline we used a pretrained PyTorch MobileNet model (width=1) which has Top1=68.848 and Top5=88.740. In their paper, Zhu and Gupta prune 50% of the elements of MobileNet (width=1) with a 1.1% drop in accuracy. We pruned about 51.6% of the elements, with virtually no change in the accuracies (Top1: 68.808 and Top5: 88.656). We didn't try to prune more than this, but we do note that the baseline accuracy that we used is almost 2% lower than the accuracy published in the paper. +----+--------------------------+--------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ | | Name | Shape | NNZ (dense) | NNZ (sparse) | Cols (%) | Rows (%) | Ch (%) | 2D (%) | 3D (%) | Fine (%) | Std | Mean | Abs-Mean | |----+--------------------------+--------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------| | 0 | module.model.0.0.weight | (32, 3, 3, 3) | 864 | 864 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.14466 | 0.00103 | 0.06508 | | 1 | module.model.1.0.weight | (32, 1, 3, 3) | 288 | 288 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.32146 | 0.01020 | 0.12932 | | 2 | module.model.1.3.weight | (64, 32, 1, 1) | 2048 | 2048 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.11942 | 0.00024 | 0.03627 | | 3 | module.model.2.0.weight | (64, 1, 3, 3) | 576 | 576 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.15809 | 0.00543 | 0.11513 | | 4 | module.model.2.3.weight | (128, 64, 1, 1) | 8192 | 8192 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.08442 | -0.00031 | 0.04182 | | 5 | module.model.3.0.weight | (128, 1, 3, 3) | 1152 | 1152 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.16780 | 0.00125 | 0.10545 | | 6 | module.model.3.3.weight | (128, 128, 1, 1) | 16384 | 16384 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.07126 | -0.00197 | 0.04123 | | 7 | module.model.4.0.weight | (128, 1, 3, 3) | 1152 | 1152 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.10182 | 0.00171 | 0.08719 | | 8 | module.model.4.3.weight | (256, 128, 1, 1) | 32768 | 13108 | 0.00000 | 0.00000 | 10.15625 | 59.99756 | 12.50000 | 59.99756 | 0.05543 | -0.00002 | 0.02760 | | 9 | module.model.5.0.weight | (256, 1, 3, 3) | 2304 | 2304 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.12516 | -0.00288 | 0.08058 | | 10 | module.model.5.3.weight | (256, 256, 1, 1) | 65536 | 26215 | 0.00000 | 0.00000 | 12.50000 | 59.99908 | 23.82812 | 59.99908 | 0.04453 | 0.00002 | 0.02271 | | 11 | module.model.6.0.weight | (256, 1, 3, 3) | 2304 | 2304 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.08024 | 0.00252 | 0.06377 | | 12 | module.model.6.3.weight | (512, 256, 1, 1) | 131072 | 52429 | 0.00000 | 0.00000 | 23.82812 | 59.99985 | 14.25781 | 59.99985 | 0.03561 | -0.00057 | 0.01779 | | 13 | module.model.7.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.11008 | -0.00018 | 0.06829 | | 14 | module.model.7.3.weight | (512, 512, 1, 1) | 262144 | 104858 | 0.00000 | 0.00000 | 14.25781 | 59.99985 | 21.28906 | 59.99985 | 0.02944 | -0.00060 | 0.01515 | | 15 | module.model.8.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.08258 | 0.00370 | 0.04905 | | 16 | module.model.8.3.weight | (512, 512, 1, 1) | 262144 | 104858 | 0.00000 | 0.00000 | 21.28906 | 59.99985 | 28.51562 | 59.99985 | 0.02865 | -0.00046 | 0.01465 | | 17 | module.model.9.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.07578 | 0.00468 | 0.04201 | | 18 | module.model.9.3.weight | (512, 512, 1, 1) | 262144 | 104858 | 0.00000 | 0.00000 | 28.51562 | 59.99985 | 23.43750 | 59.99985 | 0.02939 | -0.00044 | 0.01511 | | 19 | module.model.10.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.07091 | 0.00014 | 0.04306 | | 20 | module.model.10.3.weight | (512, 512, 1, 1) | 262144 | 104858 | 0.00000 | 0.00000 | 24.60938 | 59.99985 | 20.89844 | 59.99985 | 0.03095 | -0.00059 | 0.01672 | | 21 | module.model.11.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.05729 | -0.00518 | 0.04267 | | 22 | module.model.11.3.weight | (512, 512, 1, 1) | 262144 | 104858 | 0.00000 | 0.00000 | 20.89844 | 59.99985 | 17.57812 | 59.99985 | 0.03229 | -0.00044 | 0.01797 | | 23 | module.model.12.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.04981 | -0.00136 | 0.03967 | | 24 | module.model.12.3.weight | (1024, 512, 1, 1) | 524288 | 209716 | 0.00000 | 0.00000 | 16.01562 | 59.99985 | 44.23828 | 59.99985 | 0.02514 | -0.00106 | 0.01278 | | 25 | module.model.13.0.weight | (1024, 1, 3, 3) | 9216 | 9216 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.02396 | -0.00949 | 0.01549 | | 26 | module.model.13.3.weight | (1024, 1024, 1, 1) | 1048576 | 419431 | 0.00000 | 0.00000 | 44.72656 | 59.99994 | 1.46484 | 59.99994 | 0.01801 | -0.00017 | 0.00931 | | 27 | module.fc.weight | (1000, 1024) | 1024000 | 409600 | 1.46484 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 60.00000 | 0.05078 | 0.00271 | 0.02734 | | 28 | Total sparsity: | - | 4209088 | 1726917 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 58.97171 | 0.00000 | 0.00000 | 0.00000 | +----+--------------------------+--------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ Total sparsity: 58.97 --- validate (epoch=199)----------- 128116 samples (256 per mini-batch) ==> Top1: 65.337 Top5: 84.984 Loss: 1.494 --- test --------------------- 50000 samples (256 per mini-batch) ==> Top1: 68.810 Top5: 88.626 Loss: 1.282 Learning Structured Sparsity in Deep Neural Networks This research paper from the University of Pittsburgh, \"proposes a Structured Sparsity Learning (SSL) method to regularize the structures (i.e., filters, channels, filter shapes, and layer depth) of DNNs. SSL can: (1) learn a compact structure from a bigger DNN to reduce computation cost; (2) obtain a hardware-friendly structured sparsity of DNN to efficiently accelerate the DNN\u2019s evaluation.\" Note that this paper does not use pruning, but instead uses group regularization during the training to force weights towards zero, as a group. We used a schedule which thresholds the regularized elements at a magnitude equal to the regularization strength. At the end of the regularization phase, we save the final sparsity masks generated by the regularization, and exit. Then we load this regularized model, remove the layers corresponding to the zeroed weight tensors (all of a layer's elements have a zero value). Baseline training We started by training the baseline ResNet20-Cifar dense network since we didn't have a pre-trained model. Distiller schedule: distiller/examples/ssl/resnet20_cifar_baseline_training.yaml Checkpoint files: distiller/examples/ssl/checkpoints/ $ time python3 compress_classifier.py --arch resnet20_cifar ../data.cifar10 -p=50 --lr=0.3 --epochs=180 --compress=../cifar10/resnet20/baseline_training.yaml -j=1 --deterministic Regularization Then we started training from scratch again, but this time we used Group Lasso regularization on entire layers: Distiller schedule: distiller/examples/ssl/ssl_4D-removal_4L_training.yaml $ time python3 compress_classifier.py --arch resnet20_cifar ../data.cifar10 -p=50 --lr=0.4 --epochs=180 --compress=../ssl/ssl_4D-removal_training.yaml -j=1 --deterministic The diagram below shows the training of Resnet20/CIFAR10 using Group Lasso regularization on entire layers (in blue) vs. training Resnet20/CIFAR10 baseline (in red). You may notice several interesting things: 1. The LR-decay policy is the same, but the two sessions start with different initial LR values. 2. The data-loss of the regularized training follows the same shape as the un-regularized training (baseline), and eventually the two seem to merge. 3. We see similar behavior in the validation Top1 and Top5 accuracy results, but the regularized training eventually performs better. 4. In the top right corner we see the behavior of the regularization loss ( Reg Loss ), which actually increases for some time, until the data-loss has a sharp drop (after ~16K mini-batches), at which point the regularization loss also starts dropping. This regularization yields 5 layers with zeroed weight tensors. We load this model, remove the 5 layers, and start the fine tuning of the weights. This process of layer removal is specific to ResNet for CIFAR, which we altered by adding code to skip over layers during the forward path. When you export to ONNX, the removed layers do not participate in the forward path, so they don't get incarnated. We managed to remove 5 of the 16 3x3 convolution layers which dominate the computation time. It's not bad, but we probably could have done better. Fine-tuning During the fine-tuning process, because the removed layers do not participate in the forward path, they do not appear in the backward path and are not backpropogated: therefore they are completely disconnected from the network. We copy the checkpoint file of the regularized model to checkpoint_trained_4D_regularized_5Lremoved.pth.tar . Distiller schedule: distiller/examples/ssl/ssl_4D-removal_finetuning.yaml $ time python3 compress_classifier.py --arch resnet20_cifar ../data.cifar10 -p=50 --lr=0.1 --epochs=250 --resume=../cifar10/resnet20/checkpoint_trained_4D_regularized_5Lremoved.pth.tar --compress=../ssl/ssl_4D-removal_finetuning.yaml -j=1 --deterministic Results Our baseline results for ResNet20 Cifar are: Top1=91.450 and Top5=99.750 We used Distiller's GroupLassoRegularizer to remove 5 layers from Resnet20 (CIFAR10) with no degradation of the accuracies. The regularized model exhibits really poor classification abilities: $ time python3 compress_classifier.py --arch resnet20_cifar ../data.cifar10 -p=50 --resume=../cifar10/resnet20/checkpoint_trained_4D_regularized_5Lremoved.pth.tar --evaluate => loading checkpoint ../cifar10/resnet20/checkpoint_trained_4D_regularized_5Lremoved.pth.tar best top@1: 90.620 Loaded compression schedule from checkpoint (epoch 179) Removing layer: module.layer1.0.conv1 [layer=0 block=0 conv=0] Removing layer: module.layer1.0.conv2 [layer=0 block=0 conv=1] Removing layer: module.layer1.1.conv1 [layer=0 block=1 conv=0] Removing layer: module.layer1.1.conv2 [layer=0 block=1 conv=1] Removing layer: module.layer2.2.conv2 [layer=1 block=2 conv=1] Files already downloaded and verified Files already downloaded and verified Dataset sizes: training=45000 validation=5000 test=10000 --- test --------------------- 10000 samples (256 per mini-batch) ==> Top1: 22.290 Top5: 68.940 Loss: 5.172 However, after fine-tuning, we recovered most of the accuracies loss, but not quite all of it: Top1=91.020 and Top5=99.670 We didn't spend time trying to wrestle with this network, and therefore didn't achieve SSL's published results (which showed that they managed to remove 6 layers and at the same time increase accuracies). Pruning Filters for Efficient ConvNets Quoting the authors directly: We present an acceleration method for CNNs, where we prune filters from CNNs that are identified as having a small effect on the output accuracy. By removing whole filters in the network together with their connecting feature maps, the computation costs are reduced significantly. In contrast to pruning weights, this approach does not result in sparse connectivity patterns. Hence, it does not need the support of sparse convolution libraries and can work with existing efficient BLAS libraries for dense matrix multiplications. The implementation of the research by Hao et al. required us to add filter-pruning sensitivity analysis, and support for \"network thinning\". After performing filter-pruning sensitivity analysis to assess which layers are more sensitive to the pruning of filters, we execute distiller.L1RankedStructureParameterPruner once in order to rank the filters of each layer by their L1-norm values, and then we prune the schedule-prescribed sparsity level. Distiller schedule: distiller/examples/pruning_filters_for_efficient_convnets/resnet56_cifar_filter_rank.yaml Checkpoint files: checkpoint_finetuned.pth.tar The excerpt from the schedule, displayed below, shows how we declare the L1RankedStructureParameterPruner. This class currently ranks filters only, but because in the future this class may support ranking of various structures, you need to specify for each parameter both the target sparsity level, and the structure type ('3D' is filter-wise pruning). pruners: filter_pruner: class: 'L1RankedStructureParameterPruner' reg_regims: 'module.layer1.0.conv1.weight': [0.6, '3D'] 'module.layer1.1.conv1.weight': [0.6, '3D'] 'module.layer1.2.conv1.weight': [0.6, '3D'] 'module.layer1.3.conv1.weight': [0.6, '3D'] In the policy, we specify that we want to invoke this pruner once, at epoch 180. Because we are starting from a network which was trained for 180 epochs (see Baseline training below), the filter ranking is performed right at the outset of this schedule. policies: - pruner: instance_name: filter_pruner epochs: [180] Following the pruning, we want to \"physically\" remove the pruned filters from the network, which involves reconfiguring the Convolutional layers and the parameter tensors. When we remove filters from Convolution layer n we need to perform several changes to the network: 1. Shrink layer n 's weights tensor, leaving only the \"important\" filters. 2. Configure layer n 's .out_channels member to its new, smaller, value. 3. If a BN layer follows layer n , then it also needs to be reconfigured and its scale and shift parameter vectors need to be shrunk. 4. If a Convolution layer follows the BN layer, then it will have less input channels which requires reconfiguration and shrinking of its weights. All of this is performed by distiller.ResnetCifarFilterRemover which is also scheduled at epoch 180. We call this process \"network thinning\". extensions: net_thinner: class: 'FilterRemover' thinning_func_str: remove_filters arch: 'resnet56_cifar' dataset: 'cifar10' Network thinning requires us to understand the layer connectivity and data-dependency of the DNN, and we are working on a robust method to perform this. On networks with topologies similar to ResNet (residuals) and GoogLeNet (inception), which have several inputs and outputs to/from Convolution layers, there is extra details to consider. Our current implementation is specific to certain layers in ResNet and is a bit fragile. We will continue to improve and generalize this. Baseline training We started by training the baseline ResNet56-Cifar dense network (180 epochs) since we didn't have a pre-trained model. Distiller schedule: distiller/examples/pruning_filters_for_efficient_convnets/resnet56_cifar_baseline_training.yaml Checkpoint files: checkpoint.resnet56_cifar_baseline.pth.tar Results We trained a ResNet56-Cifar10 network and achieve accuracy results which are on-par with published results: Top1: 92.970 and Top5: 99.740. We used Hao et al.'s algorithm to remove 37.3% of the original convolution MACs, while maintaining virtually the same accuracy as the baseline: Top1: 92.830 and Top5: 99.760","title":"Model Zoo"},{"location":"model_zoo.html#distiller-model-zoo","text":"","title":"Distiller Model Zoo"},{"location":"model_zoo.html#how-to-contribute-models-to-the-model-zoo","text":"We encourage you to contribute new models to the Model Zoo. We welcome implementations of published papers or of your own work. To assure that models and algorithms shared with others are high-quality, please commit your models with the following: Command-line arguments Log files PyTorch model","title":"How to contribute models to the Model Zoo"},{"location":"model_zoo.html#contents","text":"The Distiller model zoo is not a \"traditional\" model-zoo, because it does not necessarily contain best-in-class compressed models. Instead, the model-zoo contains a number of deep learning models that have been compressed using Distiller following some well-known research papers. These are meant to serve as examples of how Distiller can be used. Each model contains a Distiller schedule detailing how the model was compressed, a PyTorch checkpoint, text logs and TensorBoard logs. table, th, td { border: 1px solid black; } Paper Dataset Network Method & Granularity Schedule Features Learning both Weights and Connections for Efficient Neural Networks ImageNet Alexnet Element-wise pruning Iterative; Manual Magnitude thresholding based on a sensitivity quantifier. Element-wise sparsity sensitivity analysis To prune, or not to prune: exploring the efficacy of pruning for model compression ImageNet MobileNet Element-wise pruning Automated gradual; Iterative Magnitude thresholding based on target level Learning Structured Sparsity in Deep Neural Networks CIFAR10 ResNet20 Group regularization 1.Train with group-lasso 2.Remove zero groups and fine-tune Group Lasso regularization. Groups: kernels (2D), channels, filters (3D), layers (4D), vectors (rows, cols) Pruning Filters for Efficient ConvNets CIFAR10 ResNet56 Filter ranking; guided by sensitivity analysis 1.Rank filters 2. Remove filters and channels 3.Fine-tune One-shot ranking and pruning of filters; with network thinning","title":"Contents"},{"location":"model_zoo.html#learning-both-weights-and-connections-for-efficient-neural-networks","text":"This schedule is an example of \"Iterative Pruning\" for Alexnet/Imagent, as described in chapter 3 of Song Han's PhD dissertation: Efficient Methods and Hardware for Deep Learning and in his paper Learning both Weights and Connections for Efficient Neural Networks . The Distiller schedule uses SensitivityPruner which is similar to MagnitudeParameterPruner, but instead of specifying \"raw\" thresholds, it uses a \"sensitivity parameter\". Song Han's paper says that \"the pruning threshold is chosen as a quality parameter multiplied by the standard deviation of a layers weights,\" and this is not explained much further. In Distiller, the \"quality parameter\" is referred to as \"sensitivity\" and is based on the values learned from performing sensitivity analysis. Using a parameter that is related to the standard deviation is very helpful: under the assumption that the weights tensors are distributed normally, the standard deviation acts as a threshold normalizer. Note that Distiller's implementation deviates slightly from the algorithm Song Han describes in his PhD dissertation, in that the threshold value is set only once. In his PhD dissertation, Song Han describes a growing threshold, at each iteration. This requires n+1 hyper-parameters (n being the number of pruning iterations we use): the threshold and the threshold increase (delta) at each pruning iteration. Distiller's implementation takes advantage of the fact that as pruning progresses, more weights are pulled toward zero, and therefore the threshold \"traps\" more weights. Thus, we can use less hyper-parameters and achieve the same results. Distiller schedule: distiller/examples/sensitivity-pruning/alexnet.schedule_sensitivity.yaml Checkpoint file: alexnet.checkpoint.89.pth.tar","title":"Learning both Weights and Connections for Efficient Neural Networks"},{"location":"model_zoo.html#results","text":"Our reference is TorchVision's pretrained Alexnet model which has a Top1 accuracy of 56.55 and Top5=79.09. We prune away 88.44% of the parameters and achieve Top1=56.61 and Top5=79.45. Song Han prunes 89% of the parameters, which is slightly better than our results. Parameters: +----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ | | Name | Shape | NNZ (dense) | NNZ (sparse) | Cols (%) | Rows (%) | Ch (%) | 2D (%) | 3D (%) | Fine (%) | Std | Mean | Abs-Mean |----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------| | 0 | features.module.0.weight | (64, 3, 11, 11) | 23232 | 13411 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 42.27359 | 0.14391 | -0.00002 | 0.08805 | | 1 | features.module.3.weight | (192, 64, 5, 5) | 307200 | 115560 | 0.00000 | 0.00000 | 0.00000 | 1.91243 | 0.00000 | 62.38281 | 0.04703 | -0.00250 | 0.02289 | | 2 | features.module.6.weight | (384, 192, 3, 3) | 663552 | 256565 | 0.00000 | 0.00000 | 0.00000 | 6.18490 | 0.00000 | 61.33445 | 0.03354 | -0.00184 | 0.01803 | | 3 | features.module.8.weight | (256, 384, 3, 3) | 884736 | 315065 | 0.00000 | 0.00000 | 0.00000 | 6.96411 | 0.00000 | 64.38881 | 0.02646 | -0.00168 | 0.01422 | | 4 | features.module.10.weight | (256, 256, 3, 3) | 589824 | 186938 | 0.00000 | 0.00000 | 0.00000 | 15.49225 | 0.00000 | 68.30614 | 0.02714 | -0.00246 | 0.01409 | | 5 | classifier.1.weight | (4096, 9216) | 37748736 | 3398881 | 0.00000 | 0.21973 | 0.00000 | 0.21973 | 0.00000 | 90.99604 | 0.00589 | -0.00020 | 0.00168 | | 6 | classifier.4.weight | (4096, 4096) | 16777216 | 1782769 | 0.21973 | 3.46680 | 0.00000 | 3.46680 | 0.00000 | 89.37387 | 0.00849 | -0.00066 | 0.00263 | | 7 | classifier.6.weight | (1000, 4096) | 4096000 | 994738 | 3.36914 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 75.71440 | 0.01718 | 0.00030 | 0.00778 | | 8 | Total sparsity: | - | 61090496 | 7063928 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 88.43694 | 0.00000 | 0.00000 | 0.00000 | +----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ 2018-04-04 21:30:52,499 - Total sparsity: 88.44 2018-04-04 21:30:52,499 - --- validate (epoch=89)----------- 2018-04-04 21:30:52,499 - 128116 samples (256 per mini-batch) 2018-04-04 21:31:35,357 - ==> Top1: 51.838 Top5: 74.817 Loss: 2.150 2018-04-04 21:31:39,251 - --- test --------------------- 2018-04-04 21:31:39,252 - 50000 samples (256 per mini-batch) 2018-04-04 21:32:01,274 - ==> Top1: 56.606 Top5: 79.446 Loss: 1.893","title":"Results"},{"location":"model_zoo.html#to-prune-or-not-to-prune-exploring-the-efficacy-of-pruning-for-model-compression","text":"In their paper Zhu and Gupta, \"compare the accuracy of large, but pruned models (large-sparse) and their smaller, but dense (small-dense) counterparts with identical memory footprint.\" They also \"propose a new gradual pruning technique that is simple and straightforward to apply across a variety of models/datasets with minimal tuning.\" This pruning schedule is implemented by distiller.AutomatedGradualPruner, which increases the sparsity level (expressed as a percentage of zero-valued elements) gradually over several pruning steps. Distiller's implementation only prunes elements once in an epoch (the model is fine-tuned in between pruning events), which is a small deviation from Zhu and Gupta's paper. The research paper specifies the schedule in terms of mini-batches, while our implementation specifies the schedule in terms of epochs. We feel that using epochs performs well, and is more \"stable\", since the number of mini-batches will change, if you change the batch size. ImageNet files: Distiller schedule: distiller/examples/agp-pruning/mobilenet.imagenet.schedule_agp.yaml Checkpoint file: checkpoint.pth.tar ResNet18 files: Distiller schedule: distiller/examples/agp-pruning/resnet18.schedule_agp.yaml Checkpoint file: checkpoint.pth.tar","title":"To prune, or not to prune: exploring the efficacy of pruning for model compression"},{"location":"model_zoo.html#results_1","text":"As our baseline we used a pretrained PyTorch MobileNet model (width=1) which has Top1=68.848 and Top5=88.740. In their paper, Zhu and Gupta prune 50% of the elements of MobileNet (width=1) with a 1.1% drop in accuracy. We pruned about 51.6% of the elements, with virtually no change in the accuracies (Top1: 68.808 and Top5: 88.656). We didn't try to prune more than this, but we do note that the baseline accuracy that we used is almost 2% lower than the accuracy published in the paper. +----+--------------------------+--------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ | | Name | Shape | NNZ (dense) | NNZ (sparse) | Cols (%) | Rows (%) | Ch (%) | 2D (%) | 3D (%) | Fine (%) | Std | Mean | Abs-Mean | |----+--------------------------+--------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------| | 0 | module.model.0.0.weight | (32, 3, 3, 3) | 864 | 864 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.14466 | 0.00103 | 0.06508 | | 1 | module.model.1.0.weight | (32, 1, 3, 3) | 288 | 288 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.32146 | 0.01020 | 0.12932 | | 2 | module.model.1.3.weight | (64, 32, 1, 1) | 2048 | 2048 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.11942 | 0.00024 | 0.03627 | | 3 | module.model.2.0.weight | (64, 1, 3, 3) | 576 | 576 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.15809 | 0.00543 | 0.11513 | | 4 | module.model.2.3.weight | (128, 64, 1, 1) | 8192 | 8192 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.08442 | -0.00031 | 0.04182 | | 5 | module.model.3.0.weight | (128, 1, 3, 3) | 1152 | 1152 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.16780 | 0.00125 | 0.10545 | | 6 | module.model.3.3.weight | (128, 128, 1, 1) | 16384 | 16384 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.07126 | -0.00197 | 0.04123 | | 7 | module.model.4.0.weight | (128, 1, 3, 3) | 1152 | 1152 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.10182 | 0.00171 | 0.08719 | | 8 | module.model.4.3.weight | (256, 128, 1, 1) | 32768 | 13108 | 0.00000 | 0.00000 | 10.15625 | 59.99756 | 12.50000 | 59.99756 | 0.05543 | -0.00002 | 0.02760 | | 9 | module.model.5.0.weight | (256, 1, 3, 3) | 2304 | 2304 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.12516 | -0.00288 | 0.08058 | | 10 | module.model.5.3.weight | (256, 256, 1, 1) | 65536 | 26215 | 0.00000 | 0.00000 | 12.50000 | 59.99908 | 23.82812 | 59.99908 | 0.04453 | 0.00002 | 0.02271 | | 11 | module.model.6.0.weight | (256, 1, 3, 3) | 2304 | 2304 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.08024 | 0.00252 | 0.06377 | | 12 | module.model.6.3.weight | (512, 256, 1, 1) | 131072 | 52429 | 0.00000 | 0.00000 | 23.82812 | 59.99985 | 14.25781 | 59.99985 | 0.03561 | -0.00057 | 0.01779 | | 13 | module.model.7.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.11008 | -0.00018 | 0.06829 | | 14 | module.model.7.3.weight | (512, 512, 1, 1) | 262144 | 104858 | 0.00000 | 0.00000 | 14.25781 | 59.99985 | 21.28906 | 59.99985 | 0.02944 | -0.00060 | 0.01515 | | 15 | module.model.8.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.08258 | 0.00370 | 0.04905 | | 16 | module.model.8.3.weight | (512, 512, 1, 1) | 262144 | 104858 | 0.00000 | 0.00000 | 21.28906 | 59.99985 | 28.51562 | 59.99985 | 0.02865 | -0.00046 | 0.01465 | | 17 | module.model.9.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.07578 | 0.00468 | 0.04201 | | 18 | module.model.9.3.weight | (512, 512, 1, 1) | 262144 | 104858 | 0.00000 | 0.00000 | 28.51562 | 59.99985 | 23.43750 | 59.99985 | 0.02939 | -0.00044 | 0.01511 | | 19 | module.model.10.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.07091 | 0.00014 | 0.04306 | | 20 | module.model.10.3.weight | (512, 512, 1, 1) | 262144 | 104858 | 0.00000 | 0.00000 | 24.60938 | 59.99985 | 20.89844 | 59.99985 | 0.03095 | -0.00059 | 0.01672 | | 21 | module.model.11.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.05729 | -0.00518 | 0.04267 | | 22 | module.model.11.3.weight | (512, 512, 1, 1) | 262144 | 104858 | 0.00000 | 0.00000 | 20.89844 | 59.99985 | 17.57812 | 59.99985 | 0.03229 | -0.00044 | 0.01797 | | 23 | module.model.12.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.04981 | -0.00136 | 0.03967 | | 24 | module.model.12.3.weight | (1024, 512, 1, 1) | 524288 | 209716 | 0.00000 | 0.00000 | 16.01562 | 59.99985 | 44.23828 | 59.99985 | 0.02514 | -0.00106 | 0.01278 | | 25 | module.model.13.0.weight | (1024, 1, 3, 3) | 9216 | 9216 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.02396 | -0.00949 | 0.01549 | | 26 | module.model.13.3.weight | (1024, 1024, 1, 1) | 1048576 | 419431 | 0.00000 | 0.00000 | 44.72656 | 59.99994 | 1.46484 | 59.99994 | 0.01801 | -0.00017 | 0.00931 | | 27 | module.fc.weight | (1000, 1024) | 1024000 | 409600 | 1.46484 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 60.00000 | 0.05078 | 0.00271 | 0.02734 | | 28 | Total sparsity: | - | 4209088 | 1726917 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 58.97171 | 0.00000 | 0.00000 | 0.00000 | +----+--------------------------+--------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ Total sparsity: 58.97 --- validate (epoch=199)----------- 128116 samples (256 per mini-batch) ==> Top1: 65.337 Top5: 84.984 Loss: 1.494 --- test --------------------- 50000 samples (256 per mini-batch) ==> Top1: 68.810 Top5: 88.626 Loss: 1.282","title":"Results"},{"location":"model_zoo.html#learning-structured-sparsity-in-deep-neural-networks","text":"This research paper from the University of Pittsburgh, \"proposes a Structured Sparsity Learning (SSL) method to regularize the structures (i.e., filters, channels, filter shapes, and layer depth) of DNNs. SSL can: (1) learn a compact structure from a bigger DNN to reduce computation cost; (2) obtain a hardware-friendly structured sparsity of DNN to efficiently accelerate the DNN\u2019s evaluation.\" Note that this paper does not use pruning, but instead uses group regularization during the training to force weights towards zero, as a group. We used a schedule which thresholds the regularized elements at a magnitude equal to the regularization strength. At the end of the regularization phase, we save the final sparsity masks generated by the regularization, and exit. Then we load this regularized model, remove the layers corresponding to the zeroed weight tensors (all of a layer's elements have a zero value).","title":"Learning Structured Sparsity in Deep Neural Networks"},{"location":"model_zoo.html#baseline-training","text":"We started by training the baseline ResNet20-Cifar dense network since we didn't have a pre-trained model. Distiller schedule: distiller/examples/ssl/resnet20_cifar_baseline_training.yaml Checkpoint files: distiller/examples/ssl/checkpoints/ $ time python3 compress_classifier.py --arch resnet20_cifar ../data.cifar10 -p=50 --lr=0.3 --epochs=180 --compress=../cifar10/resnet20/baseline_training.yaml -j=1 --deterministic","title":"Baseline training"},{"location":"model_zoo.html#regularization","text":"Then we started training from scratch again, but this time we used Group Lasso regularization on entire layers: Distiller schedule: distiller/examples/ssl/ssl_4D-removal_4L_training.yaml $ time python3 compress_classifier.py --arch resnet20_cifar ../data.cifar10 -p=50 --lr=0.4 --epochs=180 --compress=../ssl/ssl_4D-removal_training.yaml -j=1 --deterministic The diagram below shows the training of Resnet20/CIFAR10 using Group Lasso regularization on entire layers (in blue) vs. training Resnet20/CIFAR10 baseline (in red). You may notice several interesting things: 1. The LR-decay policy is the same, but the two sessions start with different initial LR values. 2. The data-loss of the regularized training follows the same shape as the un-regularized training (baseline), and eventually the two seem to merge. 3. We see similar behavior in the validation Top1 and Top5 accuracy results, but the regularized training eventually performs better. 4. In the top right corner we see the behavior of the regularization loss ( Reg Loss ), which actually increases for some time, until the data-loss has a sharp drop (after ~16K mini-batches), at which point the regularization loss also starts dropping. This regularization yields 5 layers with zeroed weight tensors. We load this model, remove the 5 layers, and start the fine tuning of the weights. This process of layer removal is specific to ResNet for CIFAR, which we altered by adding code to skip over layers during the forward path. When you export to ONNX, the removed layers do not participate in the forward path, so they don't get incarnated. We managed to remove 5 of the 16 3x3 convolution layers which dominate the computation time. It's not bad, but we probably could have done better.","title":"Regularization"},{"location":"model_zoo.html#fine-tuning","text":"During the fine-tuning process, because the removed layers do not participate in the forward path, they do not appear in the backward path and are not backpropogated: therefore they are completely disconnected from the network. We copy the checkpoint file of the regularized model to checkpoint_trained_4D_regularized_5Lremoved.pth.tar . Distiller schedule: distiller/examples/ssl/ssl_4D-removal_finetuning.yaml $ time python3 compress_classifier.py --arch resnet20_cifar ../data.cifar10 -p=50 --lr=0.1 --epochs=250 --resume=../cifar10/resnet20/checkpoint_trained_4D_regularized_5Lremoved.pth.tar --compress=../ssl/ssl_4D-removal_finetuning.yaml -j=1 --deterministic","title":"Fine-tuning"},{"location":"model_zoo.html#results_2","text":"Our baseline results for ResNet20 Cifar are: Top1=91.450 and Top5=99.750 We used Distiller's GroupLassoRegularizer to remove 5 layers from Resnet20 (CIFAR10) with no degradation of the accuracies. The regularized model exhibits really poor classification abilities: $ time python3 compress_classifier.py --arch resnet20_cifar ../data.cifar10 -p=50 --resume=../cifar10/resnet20/checkpoint_trained_4D_regularized_5Lremoved.pth.tar --evaluate => loading checkpoint ../cifar10/resnet20/checkpoint_trained_4D_regularized_5Lremoved.pth.tar best top@1: 90.620 Loaded compression schedule from checkpoint (epoch 179) Removing layer: module.layer1.0.conv1 [layer=0 block=0 conv=0] Removing layer: module.layer1.0.conv2 [layer=0 block=0 conv=1] Removing layer: module.layer1.1.conv1 [layer=0 block=1 conv=0] Removing layer: module.layer1.1.conv2 [layer=0 block=1 conv=1] Removing layer: module.layer2.2.conv2 [layer=1 block=2 conv=1] Files already downloaded and verified Files already downloaded and verified Dataset sizes: training=45000 validation=5000 test=10000 --- test --------------------- 10000 samples (256 per mini-batch) ==> Top1: 22.290 Top5: 68.940 Loss: 5.172 However, after fine-tuning, we recovered most of the accuracies loss, but not quite all of it: Top1=91.020 and Top5=99.670 We didn't spend time trying to wrestle with this network, and therefore didn't achieve SSL's published results (which showed that they managed to remove 6 layers and at the same time increase accuracies).","title":"Results"},{"location":"model_zoo.html#pruning-filters-for-efficient-convnets","text":"Quoting the authors directly: We present an acceleration method for CNNs, where we prune filters from CNNs that are identified as having a small effect on the output accuracy. By removing whole filters in the network together with their connecting feature maps, the computation costs are reduced significantly. In contrast to pruning weights, this approach does not result in sparse connectivity patterns. Hence, it does not need the support of sparse convolution libraries and can work with existing efficient BLAS libraries for dense matrix multiplications. The implementation of the research by Hao et al. required us to add filter-pruning sensitivity analysis, and support for \"network thinning\". After performing filter-pruning sensitivity analysis to assess which layers are more sensitive to the pruning of filters, we execute distiller.L1RankedStructureParameterPruner once in order to rank the filters of each layer by their L1-norm values, and then we prune the schedule-prescribed sparsity level. Distiller schedule: distiller/examples/pruning_filters_for_efficient_convnets/resnet56_cifar_filter_rank.yaml Checkpoint files: checkpoint_finetuned.pth.tar The excerpt from the schedule, displayed below, shows how we declare the L1RankedStructureParameterPruner. This class currently ranks filters only, but because in the future this class may support ranking of various structures, you need to specify for each parameter both the target sparsity level, and the structure type ('3D' is filter-wise pruning). pruners: filter_pruner: class: 'L1RankedStructureParameterPruner' reg_regims: 'module.layer1.0.conv1.weight': [0.6, '3D'] 'module.layer1.1.conv1.weight': [0.6, '3D'] 'module.layer1.2.conv1.weight': [0.6, '3D'] 'module.layer1.3.conv1.weight': [0.6, '3D'] In the policy, we specify that we want to invoke this pruner once, at epoch 180. Because we are starting from a network which was trained for 180 epochs (see Baseline training below), the filter ranking is performed right at the outset of this schedule. policies: - pruner: instance_name: filter_pruner epochs: [180] Following the pruning, we want to \"physically\" remove the pruned filters from the network, which involves reconfiguring the Convolutional layers and the parameter tensors. When we remove filters from Convolution layer n we need to perform several changes to the network: 1. Shrink layer n 's weights tensor, leaving only the \"important\" filters. 2. Configure layer n 's .out_channels member to its new, smaller, value. 3. If a BN layer follows layer n , then it also needs to be reconfigured and its scale and shift parameter vectors need to be shrunk. 4. If a Convolution layer follows the BN layer, then it will have less input channels which requires reconfiguration and shrinking of its weights. All of this is performed by distiller.ResnetCifarFilterRemover which is also scheduled at epoch 180. We call this process \"network thinning\". extensions: net_thinner: class: 'FilterRemover' thinning_func_str: remove_filters arch: 'resnet56_cifar' dataset: 'cifar10' Network thinning requires us to understand the layer connectivity and data-dependency of the DNN, and we are working on a robust method to perform this. On networks with topologies similar to ResNet (residuals) and GoogLeNet (inception), which have several inputs and outputs to/from Convolution layers, there is extra details to consider. Our current implementation is specific to certain layers in ResNet and is a bit fragile. We will continue to improve and generalize this.","title":"Pruning Filters for Efficient ConvNets"},{"location":"model_zoo.html#baseline-training_1","text":"We started by training the baseline ResNet56-Cifar dense network (180 epochs) since we didn't have a pre-trained model. Distiller schedule: distiller/examples/pruning_filters_for_efficient_convnets/resnet56_cifar_baseline_training.yaml Checkpoint files: checkpoint.resnet56_cifar_baseline.pth.tar","title":"Baseline training"},{"location":"model_zoo.html#results_3","text":"We trained a ResNet56-Cifar10 network and achieve accuracy results which are on-par with published results: Top1: 92.970 and Top5: 99.740. We used Hao et al.'s algorithm to remove 37.3% of the original convolution MACs, while maintaining virtually the same accuracy as the baseline: Top1: 92.830 and Top5: 99.760","title":"Results"},{"location":"pruning.html","text":"Pruning A common methodology for inducing sparsity in weights and activations is called pruning . Pruning is the application of a binary criteria to decide which weights to prune: weights which match the pruning criteria are assigned a value of zero. Pruned elements are \"trimmed\" from the model: we zero their values and also make sure they don't take part in the back-propagation process. We can prune weights, biases, and activations. Biases are few and their contribution to a layer's output is relatively large, so there is little incentive to prune them. We usually see sparse activations following a ReLU layer, because ReLU quenches negative activations to exact zero (\\(ReLU(x): max(0,x)\\)). Sparsity in weights is less common, as weights tend to be very small, but are often not exact zeros. Let's define sparsity Sparsity is a a measure of how many elements in a tensor are exact zeros, relative to the tensor size. A tensor is considered sparse if \"most\" of its elements are zero. How much is \"most\", is not strictly defined, but when you see a sparse tensor you know it ;-) The \\(l_0\\)-\"norm\" function measures how many zero-elements are in a tensor x : \\[\\lVert x \\rVert_0\\;=\\;|x_1|^0 + |x_2|^0 + ... + |x_n|^0 \\] In other words, an element contributes either a value of 1 or 0 to \\(l_0\\). Anything but an exact zero contributes a value of 1 - that's pretty cool. Sometimes it helps to think about density, the number of non-zero elements (NNZ) and sparsity's complement: \\[ density = 1 - sparsity \\] You can use distiller.sparsity and distiller.density to query a PyTorch tensor's sparsity and density. What is weights pruning? Weights pruning, or model pruning, is a set of methods to increase the sparsity (amount of zero-valued elements in a tensor) of a network's weights. In general, the term 'parameters' refers to both weights and bias tensors of a model. Biases are rarely, if ever, pruned because there are very few bias elements compared to weights elements, and it is just not worth the trouble. Pruning requires a criteria for choosing which elements to prune - this is called the pruning criteria . The most common pruning criteria is the absolute value of each element: the element's absolute value is compared to some threshold value, and if it is below the threshold the element is set to zero (i.e. pruned) . This is implemented by the distiller.MagnitudeParameterPruner class. The idea behind this method, is that weights with small \\(l_1\\)-norms (absolute value) contribute little to the final result (low saliency), so they are less important and can be removed. A related idea motivating pruning, is that models are over-parametrized and contain redundant logic and features. Therefore, some of these redundancies can be removed by setting their weights to zero. And yet another way to think of pruning is to phrase it as a search for a set of weights with as many zeros as possible, which still produces acceptable inference accuracies compared to the dense-model (non-pruned model). Another way to look at it, is to imagine that because of the very high-dimensionality of the parameter space, the immediate space around the dense-model's solution likely contains some sparse solutions, and we want to use find these sparse solutions. Pruning schedule The most straight-forward to prune is to take a trained model and prune it once; also called one-shot pruning . In Learning both Weights and Connections for Efficient Neural Networks Song Han et. al show that this is surprisingly effective, but also leaves a lot of potential sparsity untapped. The surprise is what they call the \"free lunch\" effect: \"reducing 2x the connections without losing accuracy even without retraining.\" However, they also note that when employing a pruning-followed-by-retraining regimen, they can achieve much better results (higher sparsity at no accuracy loss). This is called iterative pruning , and the retraining that follows pruning is often referred to as fine-tuning . How the pruning criteria changes between iterations, how many iterations we perform and how often, and which tensors are pruned - this is collectively called the pruning schedule . We can think of iterative pruning as repeatedly learning which weights are important, removing the least important ones based on some importance criteria, and then retraining the model to let it \"recover\" from the pruning by adjusting the remaining weights. At each iteration, we prune more weights. The decision of when to stop pruning is also expressed in the schedule, and it depends on the pruning algorithm. For example, if we are trying to achieve a specific sparsity level, then we stop when the pruning achieves that level. And if we are pruning weights structures in order to reduce the required compute budget, then we stop the pruning when this compute reduction is achieved. Distiller supports expressing the pruning schedule as a YAML file (which is then executed by an instance of a PruningScheduler). Pruning granularity Pruning individual weight elements is called element-wise pruning , and it is also sometimes referred to as fine-grained pruning. Coarse-grained pruning - also referred to as structured pruning , group pruning , or block pruning - is pruning entire groups of elements which have some significance. Groups come in various shapes and sizes, but an easy to visualize group-pruning is filter-pruning, in which entire filters are removed. Sensitivity analysis The hard part about inducing sparsity via pruning is determining what threshold, or sparsity level, to use for each layer's tensors. Sensitivity analysis is a method that tries to help us rank the tensors by their sensitivity to pruning. The idea is to set the pruning level (percentage) of a specific layer, and then to prune once, run an evaluation on the test dataset and record the accuracy score. We do this for all of the parameterized layers, and for each layer we examine several sparsity levels. This should teach us about the \"sensitivity\" of each of the layers to pruning. The evaluated model should be trained to maximum accuracy before running the analysis, because we aim to understand the behavior of the trained model's performance in relation to pruning of a specific weights tensor. Much as we can prune structures, we can also perform sensitivity analysis on structures. Distiller implements element-wise pruning sensitivity analysis using the \\(l_1\\)-norm of individual elements; and filter-wise pruning sensitivity analysis using the mean \\(l_1\\)-norm of filters. The authors of Pruning Filters for Efficient ConvNets describe how they do sensitivity analysis: \"To understand the sensitivity of each layer, we prune each layer independently and evaluate the resulting pruned network\u2019s accuracy on the validation set. Figure 2(b) shows that layers that maintain their accuracy as filters are pruned away correspond to layers with larger slopes in Figure 2(a). On the contrary, layers with relatively flat slopes are more sensitive to pruning. We empirically determine the number of filters to prune for each layer based on their sensitivity to pruning. For deep networks such as VGG-16 or ResNets, we observe that layers in the same stage (with the same feature map size) have a similar sensitivity to pruning. To avoid introducing layer-wise meta-parameters, we use the same pruning ratio for all layers in the same stage. For layers that are sensitive to pruning, we prune a smaller percentage of these layers or completely skip pruning them.\" The diagram below shows the results of running an element-wise sensitivity analysis on Alexnet, using Distillers's perform_sensitivity_analysis utility function. As reported by Song Han, and exhibited in the diagram, in Alexnet the feature detecting layers (convolution layers) are more sensitive to pruning, and their sensitivity drops, the deeper they are. The fully-connected layers are much less sensitive, which is great, because that's where most of the parameters are. References Song Han, Jeff Pool, John Tran, William J. Dally . Learning both Weights and Connections for Efficient Neural Networks , arXiv:1607.04381v2, 2015. Hao Li, Asim Kadav, Igor Durdanovic, Hanan Samet, Hans Peter Graf . Pruning Filters for Efficient ConvNets , arXiv:1608.08710v3, 2017.","title":"Pruning"},{"location":"pruning.html#pruning","text":"A common methodology for inducing sparsity in weights and activations is called pruning . Pruning is the application of a binary criteria to decide which weights to prune: weights which match the pruning criteria are assigned a value of zero. Pruned elements are \"trimmed\" from the model: we zero their values and also make sure they don't take part in the back-propagation process. We can prune weights, biases, and activations. Biases are few and their contribution to a layer's output is relatively large, so there is little incentive to prune them. We usually see sparse activations following a ReLU layer, because ReLU quenches negative activations to exact zero (\\(ReLU(x): max(0,x)\\)). Sparsity in weights is less common, as weights tend to be very small, but are often not exact zeros.","title":"Pruning"},{"location":"pruning.html#lets-define-sparsity","text":"Sparsity is a a measure of how many elements in a tensor are exact zeros, relative to the tensor size. A tensor is considered sparse if \"most\" of its elements are zero. How much is \"most\", is not strictly defined, but when you see a sparse tensor you know it ;-) The \\(l_0\\)-\"norm\" function measures how many zero-elements are in a tensor x : \\[\\lVert x \\rVert_0\\;=\\;|x_1|^0 + |x_2|^0 + ... + |x_n|^0 \\] In other words, an element contributes either a value of 1 or 0 to \\(l_0\\). Anything but an exact zero contributes a value of 1 - that's pretty cool. Sometimes it helps to think about density, the number of non-zero elements (NNZ) and sparsity's complement: \\[ density = 1 - sparsity \\] You can use distiller.sparsity and distiller.density to query a PyTorch tensor's sparsity and density.","title":"Let's define sparsity"},{"location":"pruning.html#what-is-weights-pruning","text":"Weights pruning, or model pruning, is a set of methods to increase the sparsity (amount of zero-valued elements in a tensor) of a network's weights. In general, the term 'parameters' refers to both weights and bias tensors of a model. Biases are rarely, if ever, pruned because there are very few bias elements compared to weights elements, and it is just not worth the trouble. Pruning requires a criteria for choosing which elements to prune - this is called the pruning criteria . The most common pruning criteria is the absolute value of each element: the element's absolute value is compared to some threshold value, and if it is below the threshold the element is set to zero (i.e. pruned) . This is implemented by the distiller.MagnitudeParameterPruner class. The idea behind this method, is that weights with small \\(l_1\\)-norms (absolute value) contribute little to the final result (low saliency), so they are less important and can be removed. A related idea motivating pruning, is that models are over-parametrized and contain redundant logic and features. Therefore, some of these redundancies can be removed by setting their weights to zero. And yet another way to think of pruning is to phrase it as a search for a set of weights with as many zeros as possible, which still produces acceptable inference accuracies compared to the dense-model (non-pruned model). Another way to look at it, is to imagine that because of the very high-dimensionality of the parameter space, the immediate space around the dense-model's solution likely contains some sparse solutions, and we want to use find these sparse solutions.","title":"What is weights pruning?"},{"location":"pruning.html#pruning-schedule","text":"The most straight-forward to prune is to take a trained model and prune it once; also called one-shot pruning . In Learning both Weights and Connections for Efficient Neural Networks Song Han et. al show that this is surprisingly effective, but also leaves a lot of potential sparsity untapped. The surprise is what they call the \"free lunch\" effect: \"reducing 2x the connections without losing accuracy even without retraining.\" However, they also note that when employing a pruning-followed-by-retraining regimen, they can achieve much better results (higher sparsity at no accuracy loss). This is called iterative pruning , and the retraining that follows pruning is often referred to as fine-tuning . How the pruning criteria changes between iterations, how many iterations we perform and how often, and which tensors are pruned - this is collectively called the pruning schedule . We can think of iterative pruning as repeatedly learning which weights are important, removing the least important ones based on some importance criteria, and then retraining the model to let it \"recover\" from the pruning by adjusting the remaining weights. At each iteration, we prune more weights. The decision of when to stop pruning is also expressed in the schedule, and it depends on the pruning algorithm. For example, if we are trying to achieve a specific sparsity level, then we stop when the pruning achieves that level. And if we are pruning weights structures in order to reduce the required compute budget, then we stop the pruning when this compute reduction is achieved. Distiller supports expressing the pruning schedule as a YAML file (which is then executed by an instance of a PruningScheduler).","title":"Pruning schedule"},{"location":"pruning.html#pruning-granularity","text":"Pruning individual weight elements is called element-wise pruning , and it is also sometimes referred to as fine-grained pruning. Coarse-grained pruning - also referred to as structured pruning , group pruning , or block pruning - is pruning entire groups of elements which have some significance. Groups come in various shapes and sizes, but an easy to visualize group-pruning is filter-pruning, in which entire filters are removed.","title":"Pruning granularity"},{"location":"pruning.html#sensitivity-analysis","text":"The hard part about inducing sparsity via pruning is determining what threshold, or sparsity level, to use for each layer's tensors. Sensitivity analysis is a method that tries to help us rank the tensors by their sensitivity to pruning. The idea is to set the pruning level (percentage) of a specific layer, and then to prune once, run an evaluation on the test dataset and record the accuracy score. We do this for all of the parameterized layers, and for each layer we examine several sparsity levels. This should teach us about the \"sensitivity\" of each of the layers to pruning. The evaluated model should be trained to maximum accuracy before running the analysis, because we aim to understand the behavior of the trained model's performance in relation to pruning of a specific weights tensor. Much as we can prune structures, we can also perform sensitivity analysis on structures. Distiller implements element-wise pruning sensitivity analysis using the \\(l_1\\)-norm of individual elements; and filter-wise pruning sensitivity analysis using the mean \\(l_1\\)-norm of filters. The authors of Pruning Filters for Efficient ConvNets describe how they do sensitivity analysis: \"To understand the sensitivity of each layer, we prune each layer independently and evaluate the resulting pruned network\u2019s accuracy on the validation set. Figure 2(b) shows that layers that maintain their accuracy as filters are pruned away correspond to layers with larger slopes in Figure 2(a). On the contrary, layers with relatively flat slopes are more sensitive to pruning. We empirically determine the number of filters to prune for each layer based on their sensitivity to pruning. For deep networks such as VGG-16 or ResNets, we observe that layers in the same stage (with the same feature map size) have a similar sensitivity to pruning. To avoid introducing layer-wise meta-parameters, we use the same pruning ratio for all layers in the same stage. For layers that are sensitive to pruning, we prune a smaller percentage of these layers or completely skip pruning them.\" The diagram below shows the results of running an element-wise sensitivity analysis on Alexnet, using Distillers's perform_sensitivity_analysis utility function. As reported by Song Han, and exhibited in the diagram, in Alexnet the feature detecting layers (convolution layers) are more sensitive to pruning, and their sensitivity drops, the deeper they are. The fully-connected layers are much less sensitive, which is great, because that's where most of the parameters are.","title":"Sensitivity analysis"},{"location":"pruning.html#references","text":"Song Han, Jeff Pool, John Tran, William J. Dally . Learning both Weights and Connections for Efficient Neural Networks , arXiv:1607.04381v2, 2015. Hao Li, Asim Kadav, Igor Durdanovic, Hanan Samet, Hans Peter Graf . Pruning Filters for Efficient ConvNets , arXiv:1608.08710v3, 2017.","title":"References"},{"location":"quantization.html","text":"Quantization Quantization refers to the process of reducing the number of bits that represent a number. In the context of deep learning, the predominant numerical format used for research and for deployment has so far been 32-bit floating point, or FP32. However, the desire for reduced bandwidth and compute requirements of deep learning models has driven research into using lower-precision numerical formats. It has been extensively demonstrated that weights and activations can be represented using 8-bit integers (or INT8) without incurring significant loss in accuracy. The use of even lower bit-widths, such as 4/2/1-bits, is an active field of research that has also shown great progress. Note that this discussion is on quantization only in the context of more efficient inference. Using lower-precision numerics for more efficient training is currently out of scope. Motivation: Overall Efficiency The more obvious benefit from quantization is significantly reduced bandwidth and storage . For instance, using INT8 for weights and activations consumes 4x less overall bandwidth compared to FP32. Additionally integer compute is faster than floating point compute. It is also much more area and energy efficient : INT8 Operation Energy Saving vs FP32 Area Saving vs FP32 Add 30x 116x Multiply 18.5x 27x ( Dally, 2015 ) Note that very aggressive quantization can yield even more efficiency. If weights are binary (-1, 1) or ternary (-1, 0, 1 using 2-bits), then convolution and fully-connected layers can be computed with additions and subtractions only, removing multiplications completely. If activations are binary as well, then additions can also be removed, in favor of bitwise operations ( Rastegari et al., 2016 ). Integer vs. FP32 There are two main attributes when discussing a numerical format. The first is dynamic range , which refers to the range of representable numbers. The second one is how many values can be represented within the dynamic range, which in turn determines the precision / resolution of the format (the distance between two numbers). For all integer formats, the dynamic range is [-2^{n-1} .. 2^{n-1}-1] , where n is the number of bits. So for INT8 the range is [-128 .. 127] , and for INT4 it is [-8 .. 7] (we're limiting ourselves to signed integers for now). The number of representable values is 2^n . Contrast that with FP32, where the dynamic range is \\pm 3.4\\ x\\ 10^{38} , and approximately 4.2\\ x\\ 10^9 values can be represented. We can immediately see that FP32 is much more versatile , in that it is able to represent a wide range of distributions accurately. This is a nice property for deep learning models, where the distributions of weights and activations are usually very different (at least in dynamic range). In addition the dynamic range can differ between layers in the model. In order to be able to represent these different distributions with an integer format, a scale factor is used to map the dynamic range of the tensor to the integer format range. But still we remain with the issue of having a significantly lower number of representable values, that is - much lower resolution. Note that this scale factor is, in most cases, a floating-point number. Hence, even when using integer numerics, some floating-point computations remain. Courbariaux et al., 2014 scale using only shifts, eliminating the floating point operation. In GEMMLWOP , the FP32 scale factor is approximated using an integer or fixed-point multiplication followed by a shift operation. In many cases the effect of this approximation on accuracy is negligible. Avoiding Overflows Convolution and fully connected layers involve the storing of intermediate results in accumulators. Due to the limited dynamic range of integer formats, if we would use the same bit-width for the weights and activation, and for the accumulators, we would likely overflow very quickly. Therefore, accumulators are usually implemented with higher bit-widths. The result of multiplying two n -bit integers is, at most, a 2n -bit number. In convolution layers, such multiplications are accumulated c\\cdot k^2 times, where c is the number of input channels and k is the kernel width (assuming a square kernel). Hence, to avoid overflowing, the accumulator should be 2n + M -bits wide, where M is at least log_2(c\\cdot k^2) . In many cases 32-bit accumulators are used, however for INT4 and lower it might be possible to use less than 32 -bits, depending on the expected use cases and layer widths. \"Conservative\" Quantization: INT8 In many cases, taking a model trained for FP32 and directly quantizing it to INT8, without any re-training, can result in a relatively low loss of accuracy (which may or may not be acceptable, depending on the use case). Some fine-tuning can further improve the accuracy ( Gysel at al., 2018 ). As mentioned above, a scale factor is used to adapt the dynamic range of the tensor at hand to that of the integer format. This scale factor needs to be calculated per-layer per-tensor. The simplest way is to map the min/max values of the float tensor to the min/max of the integer format. For weights and biases this is easy, as they are set once training is complete. For activations, the min/max float values can be obtained \"online\" during inference, or \"offline\". Offline means gathering activations statistics before deploying the model, either during training or by running a few \"calibration\" batches on the trained FP32 model. Based on these gathered statistics, the scaled factors are calculated and are fixed once the model is deployed. This method has the risk of encountering values outside the previously observed ranges at runtime. These values will be clipped, which might lead to accuracy degradation. Online means calculating the min/max values for each tensor dynamically during runtime. In this method clipping cannot occur, however the added computation resources required to calculate the min/max values at runtime might be prohibitive. It is important to note, however, that the full float range of an activations tensor usually includes elements which are statistically outliers. These values can be discarded by using a narrower min/max range, effectively allowing some clipping to occur in favor of increasing the resolution provided to the part of the distribution containing most of the information. A simple method which can yield nice results is to simply use an average of the observed min/max values instead of the actual values. Alternatively, statistical measures can be used to intelligently select where to clip the original range in order to preserve as much information as possible ( Migacz, 2017 ). Going further, Banner et al., 2018 have proposed a method for analytically computing the clipping value under certain conditions. Another possible optimization point is scale-factor scope . The most common way is use a single scale-factor per-layer, but it is also possible to calculate a scale-factor per-channel. This can be beneficial if the weight distributions vary greatly between channels. When used to directly quantize a model without re-training, as described so far, this method is commonly referred to as post-training quantization . However, recent publications have shown that there are cases where post-training quantization to INT8 doesn't preserve accuracy ( Benoit et al., 2018 , Krishnamoorthi, 2018 ). Namely, smaller models such as MobileNet seem to not respond as well to post-training quantization, presumabley due to their smaller representational capacity. In such cases, quantization-aware training is used. \"Aggressive\" Quantization: INT4 and Lower Naively quantizing a FP32 model to INT4 and lower usually incurs significant accuracy degradation. Many works have tried to mitigate this effect. They usually employ one or more of the following concepts in order to improve model accuracy: Training / Re-Training : For INT4 and lower, training is required in order to obtain reasonable accuracy. The training loop is modified to take quantization into account. See details in the next section . Zhou S et al., 2016 have shown that bootstrapping the quantized model with trained FP32 weights leads to higher accuracy, as opposed to training from scratch. Other methods require a trained FP32 model, either as a starting point ( Zhou A et al., 2017 ), or as a teacher network in a knowledge distillation training setup (see here ). Replacing the activation function : The most common activation function in vision models is ReLU, which is unbounded. That is - its dynamic range is not limited for positive inputs. This is very problematic for INT4 and below due to the very limited range and resolution. Therefore, most methods replace ReLU with another function which is bounded. In some cases a clipping function with hard coded values is used ( Zhou S et al., 2016 , Mishra et al., 2018 ). Another method learns the clipping value per layer, with better results ( Choi et al., 2018 ). Once the clipping value is set, the scale factor used for quantization is also set, and no further calibration steps are required (as opposed to INT8 methods described above). Modifying network structure : Mishra et al., 2018 try to compensate for the loss of information due to quantization by using wider layers (more channels). Lin et al., 2017 proposed a binary quantization method in which a single FP32 convolution is replaced with multiple binary convolutions, each scaled to represent a different \"base\", covering a larger dynamic range overall. First and last layer : Many methods do not quantize the first and last layer of the model. It has been observed by Han et al., 2015 that the first convolutional layer is more sensitive to weights pruning, and some quantization works cite the same reason and show it empirically ( Zhou S et al., 2016 , Choi et al., 2018 ). Some works also note that these layers usually constitute a very small portion of the overall computation within the model, further reducing the motivation to quantize them ( Rastegari et al., 2016 ). Most methods keep the first and last layers at FP32. However, Choi et al., 2018 showed that \"conservative\" quantization of these layers, e.g. to INT8, does not reduce accuracy. Iterative quantization : Most methods quantize the entire model at once. Zhou A et al., 2017 employ an iterative method, which starts with a trained FP32 baseline, and quantizes only a portion of the model at the time followed by several epochs of re-training to recover the accuracy loss from quantization. Mixed Weights and Activations Precision : It has been observed that activations are more sensitive to quantization than weights ( Zhou S et al., 2016 ). Hence it is not uncommon to see experiments with activations quantized to a higher precision compared to weights. Some works have focused solely on quantizing weights, keeping the activations at FP32 ( Li et al., 2016 , Zhu et al., 2016 ). Quantization-Aware Training As mentioned above, in order to minimize the loss of accuracy from \"aggressive\" quantization, many methods that target INT4 and lower (and in some cases for INT8 as well) involve training the model in a way that considers the quantization. This means training with quantization of weights and activations \"baked\" into the training procedure. The training graph usually looks like this: A full precision copy of the weights is maintained throughout the training process (\"weights_fp\" in the diagram). Its purpose is to accumulate the small changes from the gradients without loss of precision (Note that the quantization of the weights is an integral part of the training graph, meaning that we back-propagate through it as well). Once the model is trained, only the quantized weights are used for inference. In the diagram we show \"layer N\" as the conv + batch-norm + activation combination, but the same applies to fully-connected layers, element-wise operations, etc. During training, the operations within \"layer N\" can still run in full precision, with the \"quantize\" operations in the boundaries ensuring discrete-valued weights and activations. This is sometimes called \"simulated quantization\". Straight-Through Estimator An important question in this context is how to back-propagate through the quantization functions. These functions are discrete-valued, hence their derivative is 0 almost everywhere. So, using their gradients as-is would severely hinder the learning process. An approximation commonly used to overcome this issue is the \"straight-through estimator\" (STE) ( Hinton et al., 2012 , Bengio, 2013 ), which simply passes the gradient through these functions as-is. References William Dally . High-Performance Hardware for Machine Learning. Tutorial, NIPS, 2015 Mohammad Rastegari, Vicente Ordone, Joseph Redmon and Ali Farhadi . XNOR-Net: ImageNet Classification Using Binary Convolutional Neural Networks. ECCV, 2016 Matthieu Courbariaux, Yoshua Bengio and Jean-Pierre David . Training deep neural networks with low precision multiplications. arxiv:1412.7024 Philipp Gysel, Jon Pimentel, Mohammad Motamedi and Soheil Ghiasi . Ristretto: A Framework for Empirical Study of Resource-Efficient Inference in Convolutional Neural Networks. IEEE Transactions on Neural Networks and Learning Systems, 2018 Szymon Migacz . 8-bit Inference with TensorRT. GTC San Jose, 2017 Shuchang Zhou, Zekun Ni, Xinyu Zhou, He Wen, Yuxin Wu and Yuheng Zou . DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low Bitwidth Gradients. arxiv:1606.06160 Aojun Zhou, Anbang Yao, Yiwen Guo, Lin Xu and Yurong Chen . Incremental Network Quantization: Towards Lossless CNNs with Low-precision Weights. ICLR, 2017 Asit Mishra, Eriko Nurvitadhi, Jeffrey J Cook and Debbie Marr . WRPN: Wide Reduced-Precision Networks. ICLR, 2018 Jungwook Choi, Zhuo Wang, Swagath Venkataramani, Pierce I-Jen Chuang, Vijayalakshmi Srinivasan and Kailash Gopalakrishnan . PACT: Parameterized Clipping Activation for Quantized Neural Networks. arxiv:1805.06085 Xiaofan Lin, Cong Zhao and Wei Pan . Towards Accurate Binary Convolutional Neural Network. NIPS, 2017 Song Han, Jeff Pool, John Tran and William Dally . Learning both Weights and Connections for Efficient Neural Network. NIPS, 2015 Fengfu Li, Bo Zhang and Bin Liu . Ternary Weight Networks. arxiv:1605.04711 Chenzhuo Zhu, Song Han, Huizi Mao and William J. Dally . Trained Ternary Quantization. arxiv:1612.01064 Yoshua Bengio, Nicholas Leonard and Aaron Courville . Estimating or Propagating Gradients Through Stochastic Neurons for Conditional Computation. arxiv:1308.3432 Geoffrey Hinton, Nitish Srivastava, Kevin Swersky, Tijmen Tieleman and Abdelrahman Mohamed . Neural Networks for Machine Learning. Coursera, video lectures, 2012 Benoit Jacob, Skirmantas Kligys, Bo Chen, Menglong Zhu, Matthew Tang, Andrew Howard, Hartwig Adam and Dmitry Kalenichenko . Quantization and Training of Neural Networks for Efficient Integer-Arithmetic-Only Inference. ECCV, 2018 Raghuraman Krishnamoorthi . Quantizing deep convolutional networks for efficient inference: A whitepaper arxiv:1806.08342 Ron Banner, Yury Nahshan, Elad Hoffer and Daniel Soudry . ACIQ: Analytical Clipping for Integer Quantization of neural networks arxiv:1810.05723","title":"Quantization"},{"location":"quantization.html#quantization","text":"Quantization refers to the process of reducing the number of bits that represent a number. In the context of deep learning, the predominant numerical format used for research and for deployment has so far been 32-bit floating point, or FP32. However, the desire for reduced bandwidth and compute requirements of deep learning models has driven research into using lower-precision numerical formats. It has been extensively demonstrated that weights and activations can be represented using 8-bit integers (or INT8) without incurring significant loss in accuracy. The use of even lower bit-widths, such as 4/2/1-bits, is an active field of research that has also shown great progress. Note that this discussion is on quantization only in the context of more efficient inference. Using lower-precision numerics for more efficient training is currently out of scope.","title":"Quantization"},{"location":"quantization.html#motivation-overall-efficiency","text":"The more obvious benefit from quantization is significantly reduced bandwidth and storage . For instance, using INT8 for weights and activations consumes 4x less overall bandwidth compared to FP32. Additionally integer compute is faster than floating point compute. It is also much more area and energy efficient : INT8 Operation Energy Saving vs FP32 Area Saving vs FP32 Add 30x 116x Multiply 18.5x 27x ( Dally, 2015 ) Note that very aggressive quantization can yield even more efficiency. If weights are binary (-1, 1) or ternary (-1, 0, 1 using 2-bits), then convolution and fully-connected layers can be computed with additions and subtractions only, removing multiplications completely. If activations are binary as well, then additions can also be removed, in favor of bitwise operations ( Rastegari et al., 2016 ).","title":"Motivation: Overall Efficiency"},{"location":"quantization.html#integer-vs-fp32","text":"There are two main attributes when discussing a numerical format. The first is dynamic range , which refers to the range of representable numbers. The second one is how many values can be represented within the dynamic range, which in turn determines the precision / resolution of the format (the distance between two numbers). For all integer formats, the dynamic range is [-2^{n-1} .. 2^{n-1}-1] , where n is the number of bits. So for INT8 the range is [-128 .. 127] , and for INT4 it is [-8 .. 7] (we're limiting ourselves to signed integers for now). The number of representable values is 2^n . Contrast that with FP32, where the dynamic range is \\pm 3.4\\ x\\ 10^{38} , and approximately 4.2\\ x\\ 10^9 values can be represented. We can immediately see that FP32 is much more versatile , in that it is able to represent a wide range of distributions accurately. This is a nice property for deep learning models, where the distributions of weights and activations are usually very different (at least in dynamic range). In addition the dynamic range can differ between layers in the model. In order to be able to represent these different distributions with an integer format, a scale factor is used to map the dynamic range of the tensor to the integer format range. But still we remain with the issue of having a significantly lower number of representable values, that is - much lower resolution. Note that this scale factor is, in most cases, a floating-point number. Hence, even when using integer numerics, some floating-point computations remain. Courbariaux et al., 2014 scale using only shifts, eliminating the floating point operation. In GEMMLWOP , the FP32 scale factor is approximated using an integer or fixed-point multiplication followed by a shift operation. In many cases the effect of this approximation on accuracy is negligible.","title":"Integer vs. FP32"},{"location":"quantization.html#avoiding-overflows","text":"Convolution and fully connected layers involve the storing of intermediate results in accumulators. Due to the limited dynamic range of integer formats, if we would use the same bit-width for the weights and activation, and for the accumulators, we would likely overflow very quickly. Therefore, accumulators are usually implemented with higher bit-widths. The result of multiplying two n -bit integers is, at most, a 2n -bit number. In convolution layers, such multiplications are accumulated c\\cdot k^2 times, where c is the number of input channels and k is the kernel width (assuming a square kernel). Hence, to avoid overflowing, the accumulator should be 2n + M -bits wide, where M is at least log_2(c\\cdot k^2) . In many cases 32-bit accumulators are used, however for INT4 and lower it might be possible to use less than 32 -bits, depending on the expected use cases and layer widths.","title":"Avoiding Overflows"},{"location":"quantization.html#conservative-quantization-int8","text":"In many cases, taking a model trained for FP32 and directly quantizing it to INT8, without any re-training, can result in a relatively low loss of accuracy (which may or may not be acceptable, depending on the use case). Some fine-tuning can further improve the accuracy ( Gysel at al., 2018 ). As mentioned above, a scale factor is used to adapt the dynamic range of the tensor at hand to that of the integer format. This scale factor needs to be calculated per-layer per-tensor. The simplest way is to map the min/max values of the float tensor to the min/max of the integer format. For weights and biases this is easy, as they are set once training is complete. For activations, the min/max float values can be obtained \"online\" during inference, or \"offline\". Offline means gathering activations statistics before deploying the model, either during training or by running a few \"calibration\" batches on the trained FP32 model. Based on these gathered statistics, the scaled factors are calculated and are fixed once the model is deployed. This method has the risk of encountering values outside the previously observed ranges at runtime. These values will be clipped, which might lead to accuracy degradation. Online means calculating the min/max values for each tensor dynamically during runtime. In this method clipping cannot occur, however the added computation resources required to calculate the min/max values at runtime might be prohibitive. It is important to note, however, that the full float range of an activations tensor usually includes elements which are statistically outliers. These values can be discarded by using a narrower min/max range, effectively allowing some clipping to occur in favor of increasing the resolution provided to the part of the distribution containing most of the information. A simple method which can yield nice results is to simply use an average of the observed min/max values instead of the actual values. Alternatively, statistical measures can be used to intelligently select where to clip the original range in order to preserve as much information as possible ( Migacz, 2017 ). Going further, Banner et al., 2018 have proposed a method for analytically computing the clipping value under certain conditions. Another possible optimization point is scale-factor scope . The most common way is use a single scale-factor per-layer, but it is also possible to calculate a scale-factor per-channel. This can be beneficial if the weight distributions vary greatly between channels. When used to directly quantize a model without re-training, as described so far, this method is commonly referred to as post-training quantization . However, recent publications have shown that there are cases where post-training quantization to INT8 doesn't preserve accuracy ( Benoit et al., 2018 , Krishnamoorthi, 2018 ). Namely, smaller models such as MobileNet seem to not respond as well to post-training quantization, presumabley due to their smaller representational capacity. In such cases, quantization-aware training is used.","title":"\"Conservative\" Quantization: INT8"},{"location":"quantization.html#aggressive-quantization-int4-and-lower","text":"Naively quantizing a FP32 model to INT4 and lower usually incurs significant accuracy degradation. Many works have tried to mitigate this effect. They usually employ one or more of the following concepts in order to improve model accuracy: Training / Re-Training : For INT4 and lower, training is required in order to obtain reasonable accuracy. The training loop is modified to take quantization into account. See details in the next section . Zhou S et al., 2016 have shown that bootstrapping the quantized model with trained FP32 weights leads to higher accuracy, as opposed to training from scratch. Other methods require a trained FP32 model, either as a starting point ( Zhou A et al., 2017 ), or as a teacher network in a knowledge distillation training setup (see here ). Replacing the activation function : The most common activation function in vision models is ReLU, which is unbounded. That is - its dynamic range is not limited for positive inputs. This is very problematic for INT4 and below due to the very limited range and resolution. Therefore, most methods replace ReLU with another function which is bounded. In some cases a clipping function with hard coded values is used ( Zhou S et al., 2016 , Mishra et al., 2018 ). Another method learns the clipping value per layer, with better results ( Choi et al., 2018 ). Once the clipping value is set, the scale factor used for quantization is also set, and no further calibration steps are required (as opposed to INT8 methods described above). Modifying network structure : Mishra et al., 2018 try to compensate for the loss of information due to quantization by using wider layers (more channels). Lin et al., 2017 proposed a binary quantization method in which a single FP32 convolution is replaced with multiple binary convolutions, each scaled to represent a different \"base\", covering a larger dynamic range overall. First and last layer : Many methods do not quantize the first and last layer of the model. It has been observed by Han et al., 2015 that the first convolutional layer is more sensitive to weights pruning, and some quantization works cite the same reason and show it empirically ( Zhou S et al., 2016 , Choi et al., 2018 ). Some works also note that these layers usually constitute a very small portion of the overall computation within the model, further reducing the motivation to quantize them ( Rastegari et al., 2016 ). Most methods keep the first and last layers at FP32. However, Choi et al., 2018 showed that \"conservative\" quantization of these layers, e.g. to INT8, does not reduce accuracy. Iterative quantization : Most methods quantize the entire model at once. Zhou A et al., 2017 employ an iterative method, which starts with a trained FP32 baseline, and quantizes only a portion of the model at the time followed by several epochs of re-training to recover the accuracy loss from quantization. Mixed Weights and Activations Precision : It has been observed that activations are more sensitive to quantization than weights ( Zhou S et al., 2016 ). Hence it is not uncommon to see experiments with activations quantized to a higher precision compared to weights. Some works have focused solely on quantizing weights, keeping the activations at FP32 ( Li et al., 2016 , Zhu et al., 2016 ).","title":"\"Aggressive\" Quantization: INT4 and Lower"},{"location":"quantization.html#quantization-aware-training","text":"As mentioned above, in order to minimize the loss of accuracy from \"aggressive\" quantization, many methods that target INT4 and lower (and in some cases for INT8 as well) involve training the model in a way that considers the quantization. This means training with quantization of weights and activations \"baked\" into the training procedure. The training graph usually looks like this: A full precision copy of the weights is maintained throughout the training process (\"weights_fp\" in the diagram). Its purpose is to accumulate the small changes from the gradients without loss of precision (Note that the quantization of the weights is an integral part of the training graph, meaning that we back-propagate through it as well). Once the model is trained, only the quantized weights are used for inference. In the diagram we show \"layer N\" as the conv + batch-norm + activation combination, but the same applies to fully-connected layers, element-wise operations, etc. During training, the operations within \"layer N\" can still run in full precision, with the \"quantize\" operations in the boundaries ensuring discrete-valued weights and activations. This is sometimes called \"simulated quantization\".","title":"Quantization-Aware Training"},{"location":"quantization.html#straight-through-estimator","text":"An important question in this context is how to back-propagate through the quantization functions. These functions are discrete-valued, hence their derivative is 0 almost everywhere. So, using their gradients as-is would severely hinder the learning process. An approximation commonly used to overcome this issue is the \"straight-through estimator\" (STE) ( Hinton et al., 2012 , Bengio, 2013 ), which simply passes the gradient through these functions as-is.","title":"Straight-Through Estimator"},{"location":"quantization.html#references","text":"William Dally . High-Performance Hardware for Machine Learning. Tutorial, NIPS, 2015 Mohammad Rastegari, Vicente Ordone, Joseph Redmon and Ali Farhadi . XNOR-Net: ImageNet Classification Using Binary Convolutional Neural Networks. ECCV, 2016 Matthieu Courbariaux, Yoshua Bengio and Jean-Pierre David . Training deep neural networks with low precision multiplications. arxiv:1412.7024 Philipp Gysel, Jon Pimentel, Mohammad Motamedi and Soheil Ghiasi . Ristretto: A Framework for Empirical Study of Resource-Efficient Inference in Convolutional Neural Networks. IEEE Transactions on Neural Networks and Learning Systems, 2018 Szymon Migacz . 8-bit Inference with TensorRT. GTC San Jose, 2017 Shuchang Zhou, Zekun Ni, Xinyu Zhou, He Wen, Yuxin Wu and Yuheng Zou . DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low Bitwidth Gradients. arxiv:1606.06160 Aojun Zhou, Anbang Yao, Yiwen Guo, Lin Xu and Yurong Chen . Incremental Network Quantization: Towards Lossless CNNs with Low-precision Weights. ICLR, 2017 Asit Mishra, Eriko Nurvitadhi, Jeffrey J Cook and Debbie Marr . WRPN: Wide Reduced-Precision Networks. ICLR, 2018 Jungwook Choi, Zhuo Wang, Swagath Venkataramani, Pierce I-Jen Chuang, Vijayalakshmi Srinivasan and Kailash Gopalakrishnan . PACT: Parameterized Clipping Activation for Quantized Neural Networks. arxiv:1805.06085 Xiaofan Lin, Cong Zhao and Wei Pan . Towards Accurate Binary Convolutional Neural Network. NIPS, 2017 Song Han, Jeff Pool, John Tran and William Dally . Learning both Weights and Connections for Efficient Neural Network. NIPS, 2015 Fengfu Li, Bo Zhang and Bin Liu . Ternary Weight Networks. arxiv:1605.04711 Chenzhuo Zhu, Song Han, Huizi Mao and William J. Dally . Trained Ternary Quantization. arxiv:1612.01064 Yoshua Bengio, Nicholas Leonard and Aaron Courville . Estimating or Propagating Gradients Through Stochastic Neurons for Conditional Computation. arxiv:1308.3432 Geoffrey Hinton, Nitish Srivastava, Kevin Swersky, Tijmen Tieleman and Abdelrahman Mohamed . Neural Networks for Machine Learning. Coursera, video lectures, 2012 Benoit Jacob, Skirmantas Kligys, Bo Chen, Menglong Zhu, Matthew Tang, Andrew Howard, Hartwig Adam and Dmitry Kalenichenko . Quantization and Training of Neural Networks for Efficient Integer-Arithmetic-Only Inference. ECCV, 2018 Raghuraman Krishnamoorthi . Quantizing deep convolutional networks for efficient inference: A whitepaper arxiv:1806.08342 Ron Banner, Yury Nahshan, Elad Hoffer and Daniel Soudry . ACIQ: Analytical Clipping for Integer Quantization of neural networks arxiv:1810.05723","title":"References"},{"location":"regularization.html","text":"Regularization In their book Deep Learning Ian Goodfellow et al. define regularization as \"any modification we make to a learning algorithm that is intended to reduce its generalization error, but not its training error.\" PyTorch's optimizers use \\(l_2\\) parameter regularization to limit the capacity of models (i.e. reduce the variance). In general, we can write this as: \\[ loss(W;x;y) = loss_D(W;x;y) + \\lambda_R R(W) \\] And specifically, \\[ loss(W;x;y) = loss_D(W;x;y) + \\lambda_R \\lVert W \\rVert_2^2 \\] Where W is the collection of all weight elements in the network (i.e. this is model.parameters()), \\(loss(W;x;y)\\) is the total training loss, and \\(loss_D(W)\\) is the data loss (i.e. the error of the objective function, also called the loss function, or criterion in the Distiller sample image classifier compression application). optimizer = optim.SGD(model.parameters(), lr = 0.01, momentum=0.9, weight_decay=0.0001) criterion = nn.CrossEntropyLoss() ... for input, target in dataset: optimizer.zero_grad() output = model(input) loss = criterion(output, target) loss.backward() optimizer.step() \\(\\lambda_R\\) is a scalar called the regularization strength , and it balances the data error and the regularization error. In PyTorch, this is the weight_decay argument. \\(\\lVert W \\rVert_2^2\\) is the square of the \\(l_2\\)-norm of W, and as such it is a magnitude , or sizing, of the weights tensor. \\[ \\lVert W \\rVert_2^2 = \\sum_{l=1}^{L} \\sum_{i=1}^{n} |w_{l,i}|^2 \\;\\;where \\;n = torch.numel(w_l) \\] \\(L\\) is the number of layers in the network; and the notation about used 1-based numbering to simplify the notation. The qualitative differences between the \\(l_2\\)-norm, and the squared \\(l_2\\)-norm is explained in Deep Learning . Sparsity and Regularization We mention regularization because there is an interesting interaction between regularization and some DNN sparsity-inducing methods. In Dense-Sparse-Dense (DSD) , Song Han et al. use pruning as a regularizer to improve a model's accuracy: \"Sparsity is a powerful form of regularization. Our intuition is that, once the network arrives at a local minimum given the sparsity constraint, relaxing the constraint gives the network more freedom to escape the saddle point and arrive at a higher-accuracy local minimum.\" Regularization can also be used to induce sparsity. To induce element-wise sparsity we can use the \\(l_1\\)-norm, \\(\\lVert W \\rVert_1\\). \\[ \\lVert W \\rVert_1 = l_1(W) = \\sum_{i=1}^{|W|} |w_i| \\] \\(l_2\\)-norm regularization reduces overfitting and improves a model's accuracy by shrinking large parameters, but it does not force these parameters to absolute zero. \\(l_1\\)-norm regularization sets some of the parameter elements to zero, therefore limiting the model's capacity while making the model simpler. This is sometimes referred to as feature selection and gives us another interpretation of pruning. One of Distiller's Jupyter notebooks explains how the \\(l_1\\)-norm regularizer induces sparsity, and how it interacts with \\(l_2\\)-norm regularization. If we configure weight_decay to zero and use \\(l_1\\)-norm regularization, then we have: \\[ loss(W;x;y) = loss_D(W;x;y) + \\lambda_R \\lVert W \\rVert_1 \\] If we use both regularizers, we have: \\[ loss(W;x;y) = loss_D(W;x;y) + \\lambda_{R_2} \\lVert W \\rVert_2^2 + \\lambda_{R_1} \\lVert W \\rVert_1 \\] Class distiller.L1Regularizer implements \\(l_1\\)-norm regularization, and of course, you can also schedule regularization. l1_regularizer = distiller.s(model.parameters()) ... loss = criterion(output, target) + lambda * l1_regularizer() Group Regularization In Group Regularization, we penalize entire groups of parameter elements, instead of individual elements. Therefore, entire groups are either sparsified (i.e. all of the group elements have a value of zero) or not. The group structures have to be pre-defined. To the data loss, and the element-wise regularization (if any), we can add group-wise regularization penalty. We represent all of the parameter groups in layer \\(l\\) as \\( W_l^{(G)} \\), and we add the penalty of all groups for all layers. It gets a bit messy, but not overly complicated: \\[ loss(W;x;y) = loss_D(W;x;y) + \\lambda_R R(W) + \\lambda_g \\sum_{l=1}^{L} R_g(W_l^{(G)}) \\] Let's denote all of the weight elements in group \\(g\\) as \\(w^{(g)}\\). \\[ R_g(w^{(g)}) = \\sum_{g=1}^{G} \\lVert w^{(g)} \\rVert_g = \\sum_{g=1}^{G} \\sum_{i=1}^{|w^{(g)}|} {(w_i^{(g)})}^2 \\] where \\(w^{(g)} \\in w^{(l)} \\) and \\( |w^{(g)}| \\) is the number of elements in \\( w^{(g)} \\). \\( \\lambda_g \\sum_{l=1}^{L} R_g(W_l^{(G)}) \\) is called the Group Lasso regularizer. Much as in \\(l_1\\)-norm regularization we sum the magnitudes of all tensor elements, in Group Lasso we sum the magnitudes of element structures (i.e. groups). Group Regularization is also called Block Regularization, Structured Regularization, or coarse-grained sparsity (remember that element-wise sparsity is sometimes referred to as fine-grained sparsity). Group sparsity exhibits regularity (i.e. its shape is regular), and therefore it can be beneficial to improve inference speed. Huizi-et-al-2017 provides an overview of some of the different groups: kernel, channel, filter, layers. Fiber structures such as matrix columns and rows, as well as various shaped structures (block sparsity), and even intra kernel strided sparsity can also be used. distiller.GroupLassoRegularizer currently implements most of these groups, and you can easily add new groups. References Ian Goodfellow and Yoshua Bengio and Aaron Courville . Deep Learning , arXiv:1607.04381v2, 2017. Song Han, Jeff Pool, Sharan Narang, Huizi Mao, Enhao Gong, Shijian Tang, Erich Elsen, Peter Vajda, Manohar Paluri, John Tran, Bryan Catanzaro, William J. Dally . DSD: Dense-Sparse-Dense Training for Deep Neural Networks , arXiv:1607.04381v2, 2017. Huizi Mao, Song Han, Jeff Pool, Wenshuo Li, Xingyu Liu, Yu Wang, William J. Dally . Exploring the Regularity of Sparse Structure in Convolutional Neural Networks , arXiv:1705.08922v3, 2017. Sajid Anwar, Kyuyeon Hwang, and Wonyong Sung . Structured pruning of deep convolutional neural networks , arXiv:1512.08571, 2015","title":"Regularization"},{"location":"regularization.html#regularization","text":"In their book Deep Learning Ian Goodfellow et al. define regularization as \"any modification we make to a learning algorithm that is intended to reduce its generalization error, but not its training error.\" PyTorch's optimizers use \\(l_2\\) parameter regularization to limit the capacity of models (i.e. reduce the variance). In general, we can write this as: \\[ loss(W;x;y) = loss_D(W;x;y) + \\lambda_R R(W) \\] And specifically, \\[ loss(W;x;y) = loss_D(W;x;y) + \\lambda_R \\lVert W \\rVert_2^2 \\] Where W is the collection of all weight elements in the network (i.e. this is model.parameters()), \\(loss(W;x;y)\\) is the total training loss, and \\(loss_D(W)\\) is the data loss (i.e. the error of the objective function, also called the loss function, or criterion in the Distiller sample image classifier compression application). optimizer = optim.SGD(model.parameters(), lr = 0.01, momentum=0.9, weight_decay=0.0001) criterion = nn.CrossEntropyLoss() ... for input, target in dataset: optimizer.zero_grad() output = model(input) loss = criterion(output, target) loss.backward() optimizer.step() \\(\\lambda_R\\) is a scalar called the regularization strength , and it balances the data error and the regularization error. In PyTorch, this is the weight_decay argument. \\(\\lVert W \\rVert_2^2\\) is the square of the \\(l_2\\)-norm of W, and as such it is a magnitude , or sizing, of the weights tensor. \\[ \\lVert W \\rVert_2^2 = \\sum_{l=1}^{L} \\sum_{i=1}^{n} |w_{l,i}|^2 \\;\\;where \\;n = torch.numel(w_l) \\] \\(L\\) is the number of layers in the network; and the notation about used 1-based numbering to simplify the notation. The qualitative differences between the \\(l_2\\)-norm, and the squared \\(l_2\\)-norm is explained in Deep Learning .","title":"Regularization"},{"location":"regularization.html#sparsity-and-regularization","text":"We mention regularization because there is an interesting interaction between regularization and some DNN sparsity-inducing methods. In Dense-Sparse-Dense (DSD) , Song Han et al. use pruning as a regularizer to improve a model's accuracy: \"Sparsity is a powerful form of regularization. Our intuition is that, once the network arrives at a local minimum given the sparsity constraint, relaxing the constraint gives the network more freedom to escape the saddle point and arrive at a higher-accuracy local minimum.\" Regularization can also be used to induce sparsity. To induce element-wise sparsity we can use the \\(l_1\\)-norm, \\(\\lVert W \\rVert_1\\). \\[ \\lVert W \\rVert_1 = l_1(W) = \\sum_{i=1}^{|W|} |w_i| \\] \\(l_2\\)-norm regularization reduces overfitting and improves a model's accuracy by shrinking large parameters, but it does not force these parameters to absolute zero. \\(l_1\\)-norm regularization sets some of the parameter elements to zero, therefore limiting the model's capacity while making the model simpler. This is sometimes referred to as feature selection and gives us another interpretation of pruning. One of Distiller's Jupyter notebooks explains how the \\(l_1\\)-norm regularizer induces sparsity, and how it interacts with \\(l_2\\)-norm regularization. If we configure weight_decay to zero and use \\(l_1\\)-norm regularization, then we have: \\[ loss(W;x;y) = loss_D(W;x;y) + \\lambda_R \\lVert W \\rVert_1 \\] If we use both regularizers, we have: \\[ loss(W;x;y) = loss_D(W;x;y) + \\lambda_{R_2} \\lVert W \\rVert_2^2 + \\lambda_{R_1} \\lVert W \\rVert_1 \\] Class distiller.L1Regularizer implements \\(l_1\\)-norm regularization, and of course, you can also schedule regularization. l1_regularizer = distiller.s(model.parameters()) ... loss = criterion(output, target) + lambda * l1_regularizer()","title":"Sparsity and Regularization"},{"location":"regularization.html#group-regularization","text":"In Group Regularization, we penalize entire groups of parameter elements, instead of individual elements. Therefore, entire groups are either sparsified (i.e. all of the group elements have a value of zero) or not. The group structures have to be pre-defined. To the data loss, and the element-wise regularization (if any), we can add group-wise regularization penalty. We represent all of the parameter groups in layer \\(l\\) as \\( W_l^{(G)} \\), and we add the penalty of all groups for all layers. It gets a bit messy, but not overly complicated: \\[ loss(W;x;y) = loss_D(W;x;y) + \\lambda_R R(W) + \\lambda_g \\sum_{l=1}^{L} R_g(W_l^{(G)}) \\] Let's denote all of the weight elements in group \\(g\\) as \\(w^{(g)}\\). \\[ R_g(w^{(g)}) = \\sum_{g=1}^{G} \\lVert w^{(g)} \\rVert_g = \\sum_{g=1}^{G} \\sum_{i=1}^{|w^{(g)}|} {(w_i^{(g)})}^2 \\] where \\(w^{(g)} \\in w^{(l)} \\) and \\( |w^{(g)}| \\) is the number of elements in \\( w^{(g)} \\). \\( \\lambda_g \\sum_{l=1}^{L} R_g(W_l^{(G)}) \\) is called the Group Lasso regularizer. Much as in \\(l_1\\)-norm regularization we sum the magnitudes of all tensor elements, in Group Lasso we sum the magnitudes of element structures (i.e. groups). Group Regularization is also called Block Regularization, Structured Regularization, or coarse-grained sparsity (remember that element-wise sparsity is sometimes referred to as fine-grained sparsity). Group sparsity exhibits regularity (i.e. its shape is regular), and therefore it can be beneficial to improve inference speed. Huizi-et-al-2017 provides an overview of some of the different groups: kernel, channel, filter, layers. Fiber structures such as matrix columns and rows, as well as various shaped structures (block sparsity), and even intra kernel strided sparsity can also be used. distiller.GroupLassoRegularizer currently implements most of these groups, and you can easily add new groups.","title":"Group Regularization"},{"location":"regularization.html#references","text":"Ian Goodfellow and Yoshua Bengio and Aaron Courville . Deep Learning , arXiv:1607.04381v2, 2017. Song Han, Jeff Pool, Sharan Narang, Huizi Mao, Enhao Gong, Shijian Tang, Erich Elsen, Peter Vajda, Manohar Paluri, John Tran, Bryan Catanzaro, William J. Dally . DSD: Dense-Sparse-Dense Training for Deep Neural Networks , arXiv:1607.04381v2, 2017. Huizi Mao, Song Han, Jeff Pool, Wenshuo Li, Xingyu Liu, Yu Wang, William J. Dally . Exploring the Regularity of Sparse Structure in Convolutional Neural Networks , arXiv:1705.08922v3, 2017. Sajid Anwar, Kyuyeon Hwang, and Wonyong Sung . Structured pruning of deep convolutional neural networks , arXiv:1512.08571, 2015","title":"References"},{"location":"schedule.html","text":"Compression scheduler In iterative pruning, we create some kind of pruning regimen that specifies how to prune, and what to prune at every stage of the pruning and training stages. This motivated the design of CompressionScheduler : it needed to be part of the training loop, and to be able to make and implement pruning, regularization and quantization decisions. We wanted to be able to change the particulars of the compression schedule, w/o touching the code, and settled on using YAML as a container for this specification. We found that when we make many experiments on the same code base, it is easier to maintain all of these experiments if we decouple the differences from the code-base. Therefore, we added to the scheduler support for learning-rate decay scheduling because, again, we wanted the freedom to change the LR-decay policy without changing code. High level overview Let's briefly discuss the main mechanisms and abstractions: A schedule specification is composed of a list of sections defining instances of Pruners, Regularizers, Quantizers, LR-scheduler and Policies. Pruners, Regularizers and Quantizers are very similar: They implement either a Pruning/Regularization/Quantization algorithm, respectively. An LR-scheduler specifies the LR-decay algorithm. These define the what part of the schedule. The Policies define the when part of the schedule: at which epoch to start applying the Pruner/Regularizer/Quantizer/LR-decay, the epoch to end, and how often to invoke the policy (frequency of application). A policy also defines the instance of Pruner/Regularizer/Quantizer/LR-decay it is managing. The CompressionScheduler is configured from a YAML file or from a dictionary, but you can also manually create Policies, Pruners, Regularizers and Quantizers from code. Syntax through example We'll use alexnet.schedule_agp.yaml to explain some of the YAML syntax for configuring Sensitivity Pruning of Alexnet. version: 1 pruners: my_pruner: class: 'SensitivityPruner' sensitivities: 'features.module.0.weight': 0.25 'features.module.3.weight': 0.35 'features.module.6.weight': 0.40 'features.module.8.weight': 0.45 'features.module.10.weight': 0.55 'classifier.1.weight': 0.875 'classifier.4.weight': 0.875 'classifier.6.weight': 0.625 lr_schedulers: pruning_lr: class: ExponentialLR gamma: 0.9 policies: - pruner: instance_name : 'my_pruner' starting_epoch: 0 ending_epoch: 38 frequency: 2 - lr_scheduler: instance_name: pruning_lr starting_epoch: 24 ending_epoch: 200 frequency: 1 There is only one version of the YAML syntax, and the version number is not verified at the moment. However, to be future-proof it is probably better to let the YAML parser know that you are using version-1 syntax, in case there is ever a version 2. version: 1 In the pruners section, we define the instances of pruners we want the scheduler to instantiate and use. We define a single pruner instance, named my_pruner , of algorithm SensitivityPruner . We will refer to this instance in the Policies section. Then we list the sensitivity multipliers, \\(s\\), of each of the weight tensors. You may list as many Pruners as you want in this section, as long as each has a unique name. You can several types of pruners in one schedule. pruners: my_pruner: class: 'SensitivityPruner' sensitivities: 'features.module.0.weight': 0.25 'features.module.3.weight': 0.35 'features.module.6.weight': 0.40 'features.module.8.weight': 0.45 'features.module.10.weight': 0.55 'classifier.1.weight': 0.875 'classifier.4.weight': 0.875 'classifier.6.weight': 0.6 Next, we want to specify the learning-rate decay scheduling in the lr_schedulers section. We assign a name to this instance: pruning_lr . As in the pruners section, you may use any name, as long as all LR-schedulers have a unique name. At the moment, only one instance of LR-scheduler is allowed. The LR-scheduler must be a subclass of PyTorch's _LRScheduler . You can use any of the schedulers defined in torch.optim.lr_scheduler (see here ). In addition, we've implemented some additional schedulers in Distiller (see here ). The keyword arguments (kwargs) are passed directly to the LR-scheduler's constructor, so that as new LR-schedulers are added to torch.optim.lr_scheduler , they can be used without changing the application code. lr_schedulers: pruning_lr: class: ExponentialLR gamma: 0.9 Finally, we define the policies section which defines the actual scheduling. A Policy manages an instance of a Pruner , Regularizer , Quantizer , or LRScheduler , by naming the instance. In the example below, a PruningPolicy uses the pruner instance named my_pruner : it activates it at a frequency of 2 epochs (i.e. every other epoch), starting at epoch 0, and ending at epoch 38. policies: - pruner: instance_name : 'my_pruner' starting_epoch: 0 ending_epoch: 38 frequency: 2 - lr_scheduler: instance_name: pruning_lr starting_epoch: 24 ending_epoch: 200 frequency: 1 This is iterative pruning : Train Connectivity Prune Connections Retrain Weights Goto 2 It is described in Learning both Weights and Connections for Efficient Neural Networks : \"Our method prunes redundant connections using a three-step method. First, we train the network to learn which connections are important. Next, we prune the unimportant connections. Finally, we retrain the network to fine tune the weights of the remaining connections...After an initial training phase, we remove all connections whose weight is lower than a threshold. This pruning converts a dense, fully-connected layer to a sparse layer. This first phase learns the topology of the networks \u2014 learning which connections are important and removing the unimportant connections. We then retrain the sparse network so the remaining connections can compensate for the connections that have been removed. The phases of pruning and retraining may be repeated iteratively to further reduce network complexity.\" Regularization You can also define and schedule regularization. L1 regularization Format (this is an informal specification, not a valid ABNF specification): regularizers: <REGULARIZER_NAME_STR>: class: L1Regularizer reg_regims: <PYTORCH_PARAM_NAME_STR>: <STRENGTH_FLOAT> ... <PYTORCH_PARAM_NAME_STR>: <STRENGTH_FLOAT> threshold_criteria: [Mean_Abs | Max] For example: version: 1 regularizers: my_L1_reg: class: L1Regularizer reg_regims: 'module.layer3.1.conv1.weight': 0.000002 'module.layer3.1.conv2.weight': 0.000002 'module.layer3.1.conv3.weight': 0.000002 'module.layer3.2.conv1.weight': 0.000002 threshold_criteria: Mean_Abs policies: - regularizer: instance_name: my_L1_reg starting_epoch: 0 ending_epoch: 60 frequency: 1 Group regularization Format (informal specification): Format: regularizers: <REGULARIZER_NAME_STR>: class: L1Regularizer reg_regims: <PYTORCH_PARAM_NAME_STR>: [<STRENGTH_FLOAT>, <'2D' | '3D' | '4D' | 'Channels' | 'Cols' | 'Rows'>] <PYTORCH_PARAM_NAME_STR>: [<STRENGTH_FLOAT>, <'2D' | '3D' | '4D' | 'Channels' | 'Cols' | 'Rows'>] threshold_criteria: [Mean_Abs | Max] For example: version: 1 regularizers: my_filter_regularizer: class: GroupLassoRegularizer reg_regims: 'module.layer3.1.conv1.weight': [0.00005, '3D'] 'module.layer3.1.conv2.weight': [0.00005, '3D'] 'module.layer3.1.conv3.weight': [0.00005, '3D'] 'module.layer3.2.conv1.weight': [0.00005, '3D'] threshold_criteria: Mean_Abs policies: - regularizer: instance_name: my_filter_regularizer starting_epoch: 0 ending_epoch: 60 frequency: 1 Mixing it up You can mix pruning and regularization. version: 1 pruners: my_pruner: class: 'SensitivityPruner' sensitivities: 'features.module.0.weight': 0.25 'features.module.3.weight': 0.35 'features.module.6.weight': 0.40 'features.module.8.weight': 0.45 'features.module.10.weight': 0.55 'classifier.1.weight': 0.875 'classifier.4.weight': 0.875 'classifier.6.weight': 0.625 regularizers: 2d_groups_regularizer: class: GroupLassoRegularizer reg_regims: 'features.module.0.weight': [0.000012, '2D'] 'features.module.3.weight': [0.000012, '2D'] 'features.module.6.weight': [0.000012, '2D'] 'features.module.8.weight': [0.000012, '2D'] 'features.module.10.weight': [0.000012, '2D'] lr_schedulers: # Learning rate decay scheduler pruning_lr: class: ExponentialLR gamma: 0.9 policies: - pruner: instance_name : 'my_pruner' starting_epoch: 0 ending_epoch: 38 frequency: 2 - regularizer: instance_name: '2d_groups_regularizer' starting_epoch: 0 ending_epoch: 38 frequency: 1 - lr_scheduler: instance_name: pruning_lr starting_epoch: 24 ending_epoch: 200 frequency: 1 Quantization-Aware Training Similarly to pruners and regularizers, specifying a quantizer in the scheduler YAML follows the constructor arguments of the Quantizer class (see details here ). Note that only a single quantizer instance may be defined per YAML. Let's see an example: quantizers: dorefa_quantizer: class: DorefaQuantizer bits_activations: 8 bits_weights: 4 overrides: conv1: bits_weights: null bits_activations: null relu1: bits_weights: null bits_activations: null final_relu: bits_weights: null bits_activations: null fc: bits_weights: null bits_activations: null The specific quantization method we're instantiating here is DorefaQuantizer . Then we define the default bit-widths for activations and weights, in this case 8 and 4-bits, respectively. Then, we define the overrides mapping. In the example above, we choose not to quantize the first and last layer of the model. In the case of DorefaQuantizer , the weights are quantized as part of the convolution / FC layers, but the activations are quantized in separate layers, which replace the ReLU layers in the original model (remember - even though we replaced the ReLU modules with our own quantization modules, the name of the modules isn't changed). So, in all, we need to reference the first layer with parameters conv1 , the first activation layer relu1 , the last activation layer final_relu and the last layer with parameters fc . Specifying null means \"do not quantize\". Note that for quantizers, we reference names of modules, not names of parameters as we do for pruners and regularizers. Defining overrides for groups of layers using regular expressions Suppose we have a sub-module in our model named block1 , which contains multiple convolution layers which we would like to quantize to, say, 2-bits. The convolution layers are named conv1 , conv2 and so on. In that case we would define the following: overrides: 'block1\\.conv*': bits_weights: 2 bits_activations: null RegEx Note : Remember that the dot ( . ) is a meta-character (i.e. a reserved character) in regular expressions. So, to match the actual dot characters which separate sub-modules in PyTorch module names, we need to escape it: \\. Overlapping patterns are also possible, which allows to define some override for a groups of layers and also \"single-out\" specific layers for different overrides. For example, let's take the last example and configure a different override for block1.conv1 : overrides: 'block1\\.conv1': bits_weights: 4 bits_activations: null 'block1\\.conv*': bits_weights: 2 bits_activations: null Important Note : The patterns are evaluated eagerly - first match wins. So, to properly quantize a model using \"broad\" patterns and more \"specific\" patterns as just shown, make sure the specific pattern is listed before the broad one. The QuantizationPolicy , which controls the quantization procedure during training, is actually quite simplistic. All it does is call the prepare_model() function of the Quantizer when it's initialized, followed by the first call to quantize_params() . Then, at the end of each epoch, after the float copy of the weights has been updated, it calls the quantize_params() function again. policies: - quantizer: instance_name: dorefa_quantizer starting_epoch: 0 ending_epoch: 200 frequency: 1 Important Note : As mentioned here , since the quantizer modifies the model's parameters (assuming training with quantization in the loop is used), the call to prepare_model() must be performed before an optimizer is called. Therefore, currently, the starting epoch for a quantization policy must be 0, otherwise the quantization process will not work as expected. If one wishes to do a \"warm-startup\" (or \"boot-strapping\"), training for a few epochs with full precision and only then starting to quantize, the only way to do this right now is to execute a separate run to generate the boot-strapped weights, and execute a second which will resume the checkpoint with the boot-strapped weights. Post-Training Quantization Post-training quantization differs from the other techniques described here. Since it is not executed during training, it does not require any Policies nor a Scheduler. Currently, the only method implemented for post-training quantization is range-based linear quantization . Quantizing a model using this method, requires adding 2 lines of code: quantizer = distiller.quantization.PostTrainLinearQuantizer(model, <quantizer arguments>) quantizer.prepare_model() # Execute evaluation on model as usual See the documentation for PostTrainLinearQuantizer in range_linear.py for details on the available arguments. In addition to directly instantiating the quantizer with arguments, it can also be configured from a YAML file. The syntax for the YAML file is exactly the same as seen in the quantization-aware training section above. Not surprisingly, the class defined must be PostTrainLinearQuantizer , and any other components or policies defined in the YAML file are ignored. We'll see how to create the quantizer in this manner below. If more configurability is needed, a helper function can be used that will add a set of command-line arguments to configure the quantizer: parser = argparse.ArgumentParser() distiller.quantization.add_post_train_quant_args(parser) args = parser.parse_args() These are the available command line arguments: Arguments controlling quantization at evaluation time (\"post-training quantization\"): --quantize-eval, --qe Apply linear quantization to model before evaluation. Applicable only if --evaluate is also set --qe-calibration PORTION_OF_TEST_SET Run the model in evaluation mode on the specified portion of the test dataset and collect statistics. Ignores all other 'qe--*' arguments --qe-mode QE_MODE, --qem QE_MODE Linear quantization mode. Choices: sym | asym_s | asym_u --qe-bits-acts NUM_BITS, --qeba NUM_BITS Number of bits for quantization of activations --qe-bits-wts NUM_BITS, --qebw NUM_BITS Number of bits for quantization of weights --qe-bits-accum NUM_BITS Number of bits for quantization of the accumulator --qe-clip-acts, --qeca Enable clipping of activations using min/max values averaging over batch --qe-no-clip-layers LAYER_NAME [LAYER_NAME ...], --qencl LAYER_NAME [LAYER_NAME ...] List of layer names for which not to clip activations. Applicable only if --qe-clip-acts is also set --qe-per-channel, --qepc Enable per-channel quantization of weights (per output channel) --qe-stats-file PATH Path to YAML file with calibration stats. If not given, dynamic quantization will be run (Note that not all layer types are supported for dynamic quantization) --qe-config-file PATH Path to YAML file containing configuration for PostTrainLinearQuantizer (if present, all other --qe* arguments are ignored) (Note that --quantize-eval and --qe-calibration are mutually exclusive.) When using these command line arguments, the quantizer can be invoked as follows: if args.quantize_eval: if args.qe_config_file: quantizer = distiller.config_component_from_file_by_class(model, args.qe_config_file, 'PostTrainLinearQuantizer') else: quantizer = quantization.PostTrainLinearQuantizer(model, args.qe_bits_acts, args.qe_bits_wts, args.qe_bits_accum, None, args.qe_mode, args.qe_clip_acts, args.qe_no_clip_layers, args.qe_per_channel, args.qe_stats_file) quantizer.prepare_model() # Execute evaluation on model as usual Note that the command-line arguments don't expose the overrides parameter of the quantizer, which allows fine-grained control over how each layer is quantized. To utilize this functionality, configure with a YAML file. To see integration of these command line arguments in use, see the image classification example . For examples invocations of post-training quantization see here . Collecting Statistics for Quantization To collect generate statistics that can be used for static quantization of activations, do the following (shown here assuming the command line argument --qe-calibration shown above is used, which specifies the number of batches to use for statistics generation): if args.qe_calibration: distiller.utils.assign_layer_fq_names(model) msglogger.info(\"Generating quantization calibration stats based on {0} users\".format(args.qe_calibration)) collector = distiller.data_loggers.QuantCalibrationStatsCollector(model) with collector_context(collector): # Here call your model evaluation function, making sure to execute only # the portion of the dataset specified by the qe_calibration argument yaml_path = 'some/dir/quantization_stats.yaml' collector.save(yaml_path) The genreated YAML stats file can then be provided using the `--qe-stats-file argument. An example of a generated stats file can be found here . Knowledge Distillation Knowledge distillation (see here ) is also implemented as a Policy , which should be added to the scheduler. However, with the current implementation, it cannot be defined within the YAML file like the rest of the policies described above. To make the integration of this method into applications a bit easier, a helper function can be used that will add a set of command-line arguments related to knowledge distillation: import argparse import distiller parser = argparse.ArgumentParser() distiller.knowledge_distillation.add_distillation_args(parser) (The add_distillation_args function accepts some optional arguments, see its implementation at distiller/knowledge_distillation.py for details) These are the command line arguments exposed by this function: Knowledge Distillation Training Arguments: --kd-teacher ARCH Model architecture for teacher model --kd-pretrained Use pre-trained model for teacher --kd-resume PATH Path to checkpoint from which to load teacher weights --kd-temperature TEMP, --kd-temp TEMP Knowledge distillation softmax temperature --kd-distill-wt WEIGHT, --kd-dw WEIGHT Weight for distillation loss (student vs. teacher soft targets) --kd-student-wt WEIGHT, --kd-sw WEIGHT Weight for student vs. labels loss --kd-teacher-wt WEIGHT, --kd-tw WEIGHT Weight for teacher vs. labels loss --kd-start-epoch EPOCH_NUM Epoch from which to enable distillation Once arguments have been parsed, some initialization code is required, similar to the following: # Assuming: # \"args\" variable holds command line arguments # \"model\" variable holds the model we're going to train, that is - the student model # \"compression_scheduler\" variable holds a CompressionScheduler instance args.kd_policy = None if args.kd_teacher: # Create teacher model - replace this with your model creation code teacher = create_model(args.kd_pretrained, args.dataset, args.kd_teacher, device_ids=args.gpus) if args.kd_resume: teacher, _, _ = apputils.load_checkpoint(teacher, chkpt_file=args.kd_resume) # Create policy and add to scheduler dlw = distiller.DistillationLossWeights(args.kd_distill_wt, args.kd_student_wt, args.kd_teacher_wt) args.kd_policy = distiller.KnowledgeDistillationPolicy(model, teacher, args.kd_temp, dlw) compression_scheduler.add_policy(args.kd_policy, starting_epoch=args.kd_start_epoch, ending_epoch=args.epochs, frequency=1) Finally, during the training loop, we need to perform forward propagation through the teacher model as well. The KnowledgeDistillationPolicy class keeps a reference to both the student and teacher models, and exposes a forward function that performs forward propagation on both of them. Since this is not one of the standard policy callbacks, we need to call this function manually from our training loop, as follows: if args.kd_policy is None: # Revert to a \"normal\" forward-prop call if no knowledge distillation policy is present output = model(input_var) else: output = args.kd_policy.forward(input_var) To see this integration in action, take a look at the image classification sample at examples/classifier_compression/compress_classifier.py .","title":"Compression Scheduling"},{"location":"schedule.html#compression-scheduler","text":"In iterative pruning, we create some kind of pruning regimen that specifies how to prune, and what to prune at every stage of the pruning and training stages. This motivated the design of CompressionScheduler : it needed to be part of the training loop, and to be able to make and implement pruning, regularization and quantization decisions. We wanted to be able to change the particulars of the compression schedule, w/o touching the code, and settled on using YAML as a container for this specification. We found that when we make many experiments on the same code base, it is easier to maintain all of these experiments if we decouple the differences from the code-base. Therefore, we added to the scheduler support for learning-rate decay scheduling because, again, we wanted the freedom to change the LR-decay policy without changing code.","title":"Compression scheduler"},{"location":"schedule.html#high-level-overview","text":"Let's briefly discuss the main mechanisms and abstractions: A schedule specification is composed of a list of sections defining instances of Pruners, Regularizers, Quantizers, LR-scheduler and Policies. Pruners, Regularizers and Quantizers are very similar: They implement either a Pruning/Regularization/Quantization algorithm, respectively. An LR-scheduler specifies the LR-decay algorithm. These define the what part of the schedule. The Policies define the when part of the schedule: at which epoch to start applying the Pruner/Regularizer/Quantizer/LR-decay, the epoch to end, and how often to invoke the policy (frequency of application). A policy also defines the instance of Pruner/Regularizer/Quantizer/LR-decay it is managing. The CompressionScheduler is configured from a YAML file or from a dictionary, but you can also manually create Policies, Pruners, Regularizers and Quantizers from code.","title":"High level overview"},{"location":"schedule.html#syntax-through-example","text":"We'll use alexnet.schedule_agp.yaml to explain some of the YAML syntax for configuring Sensitivity Pruning of Alexnet. version: 1 pruners: my_pruner: class: 'SensitivityPruner' sensitivities: 'features.module.0.weight': 0.25 'features.module.3.weight': 0.35 'features.module.6.weight': 0.40 'features.module.8.weight': 0.45 'features.module.10.weight': 0.55 'classifier.1.weight': 0.875 'classifier.4.weight': 0.875 'classifier.6.weight': 0.625 lr_schedulers: pruning_lr: class: ExponentialLR gamma: 0.9 policies: - pruner: instance_name : 'my_pruner' starting_epoch: 0 ending_epoch: 38 frequency: 2 - lr_scheduler: instance_name: pruning_lr starting_epoch: 24 ending_epoch: 200 frequency: 1 There is only one version of the YAML syntax, and the version number is not verified at the moment. However, to be future-proof it is probably better to let the YAML parser know that you are using version-1 syntax, in case there is ever a version 2. version: 1 In the pruners section, we define the instances of pruners we want the scheduler to instantiate and use. We define a single pruner instance, named my_pruner , of algorithm SensitivityPruner . We will refer to this instance in the Policies section. Then we list the sensitivity multipliers, \\(s\\), of each of the weight tensors. You may list as many Pruners as you want in this section, as long as each has a unique name. You can several types of pruners in one schedule. pruners: my_pruner: class: 'SensitivityPruner' sensitivities: 'features.module.0.weight': 0.25 'features.module.3.weight': 0.35 'features.module.6.weight': 0.40 'features.module.8.weight': 0.45 'features.module.10.weight': 0.55 'classifier.1.weight': 0.875 'classifier.4.weight': 0.875 'classifier.6.weight': 0.6 Next, we want to specify the learning-rate decay scheduling in the lr_schedulers section. We assign a name to this instance: pruning_lr . As in the pruners section, you may use any name, as long as all LR-schedulers have a unique name. At the moment, only one instance of LR-scheduler is allowed. The LR-scheduler must be a subclass of PyTorch's _LRScheduler . You can use any of the schedulers defined in torch.optim.lr_scheduler (see here ). In addition, we've implemented some additional schedulers in Distiller (see here ). The keyword arguments (kwargs) are passed directly to the LR-scheduler's constructor, so that as new LR-schedulers are added to torch.optim.lr_scheduler , they can be used without changing the application code. lr_schedulers: pruning_lr: class: ExponentialLR gamma: 0.9 Finally, we define the policies section which defines the actual scheduling. A Policy manages an instance of a Pruner , Regularizer , Quantizer , or LRScheduler , by naming the instance. In the example below, a PruningPolicy uses the pruner instance named my_pruner : it activates it at a frequency of 2 epochs (i.e. every other epoch), starting at epoch 0, and ending at epoch 38. policies: - pruner: instance_name : 'my_pruner' starting_epoch: 0 ending_epoch: 38 frequency: 2 - lr_scheduler: instance_name: pruning_lr starting_epoch: 24 ending_epoch: 200 frequency: 1 This is iterative pruning : Train Connectivity Prune Connections Retrain Weights Goto 2 It is described in Learning both Weights and Connections for Efficient Neural Networks : \"Our method prunes redundant connections using a three-step method. First, we train the network to learn which connections are important. Next, we prune the unimportant connections. Finally, we retrain the network to fine tune the weights of the remaining connections...After an initial training phase, we remove all connections whose weight is lower than a threshold. This pruning converts a dense, fully-connected layer to a sparse layer. This first phase learns the topology of the networks \u2014 learning which connections are important and removing the unimportant connections. We then retrain the sparse network so the remaining connections can compensate for the connections that have been removed. The phases of pruning and retraining may be repeated iteratively to further reduce network complexity.\"","title":"Syntax through example"},{"location":"schedule.html#regularization","text":"You can also define and schedule regularization.","title":"Regularization"},{"location":"schedule.html#l1-regularization","text":"Format (this is an informal specification, not a valid ABNF specification): regularizers: <REGULARIZER_NAME_STR>: class: L1Regularizer reg_regims: <PYTORCH_PARAM_NAME_STR>: <STRENGTH_FLOAT> ... <PYTORCH_PARAM_NAME_STR>: <STRENGTH_FLOAT> threshold_criteria: [Mean_Abs | Max] For example: version: 1 regularizers: my_L1_reg: class: L1Regularizer reg_regims: 'module.layer3.1.conv1.weight': 0.000002 'module.layer3.1.conv2.weight': 0.000002 'module.layer3.1.conv3.weight': 0.000002 'module.layer3.2.conv1.weight': 0.000002 threshold_criteria: Mean_Abs policies: - regularizer: instance_name: my_L1_reg starting_epoch: 0 ending_epoch: 60 frequency: 1","title":"L1 regularization"},{"location":"schedule.html#group-regularization","text":"Format (informal specification): Format: regularizers: <REGULARIZER_NAME_STR>: class: L1Regularizer reg_regims: <PYTORCH_PARAM_NAME_STR>: [<STRENGTH_FLOAT>, <'2D' | '3D' | '4D' | 'Channels' | 'Cols' | 'Rows'>] <PYTORCH_PARAM_NAME_STR>: [<STRENGTH_FLOAT>, <'2D' | '3D' | '4D' | 'Channels' | 'Cols' | 'Rows'>] threshold_criteria: [Mean_Abs | Max] For example: version: 1 regularizers: my_filter_regularizer: class: GroupLassoRegularizer reg_regims: 'module.layer3.1.conv1.weight': [0.00005, '3D'] 'module.layer3.1.conv2.weight': [0.00005, '3D'] 'module.layer3.1.conv3.weight': [0.00005, '3D'] 'module.layer3.2.conv1.weight': [0.00005, '3D'] threshold_criteria: Mean_Abs policies: - regularizer: instance_name: my_filter_regularizer starting_epoch: 0 ending_epoch: 60 frequency: 1","title":"Group regularization"},{"location":"schedule.html#mixing-it-up","text":"You can mix pruning and regularization. version: 1 pruners: my_pruner: class: 'SensitivityPruner' sensitivities: 'features.module.0.weight': 0.25 'features.module.3.weight': 0.35 'features.module.6.weight': 0.40 'features.module.8.weight': 0.45 'features.module.10.weight': 0.55 'classifier.1.weight': 0.875 'classifier.4.weight': 0.875 'classifier.6.weight': 0.625 regularizers: 2d_groups_regularizer: class: GroupLassoRegularizer reg_regims: 'features.module.0.weight': [0.000012, '2D'] 'features.module.3.weight': [0.000012, '2D'] 'features.module.6.weight': [0.000012, '2D'] 'features.module.8.weight': [0.000012, '2D'] 'features.module.10.weight': [0.000012, '2D'] lr_schedulers: # Learning rate decay scheduler pruning_lr: class: ExponentialLR gamma: 0.9 policies: - pruner: instance_name : 'my_pruner' starting_epoch: 0 ending_epoch: 38 frequency: 2 - regularizer: instance_name: '2d_groups_regularizer' starting_epoch: 0 ending_epoch: 38 frequency: 1 - lr_scheduler: instance_name: pruning_lr starting_epoch: 24 ending_epoch: 200 frequency: 1","title":"Mixing it up"},{"location":"schedule.html#quantization-aware-training","text":"Similarly to pruners and regularizers, specifying a quantizer in the scheduler YAML follows the constructor arguments of the Quantizer class (see details here ). Note that only a single quantizer instance may be defined per YAML. Let's see an example: quantizers: dorefa_quantizer: class: DorefaQuantizer bits_activations: 8 bits_weights: 4 overrides: conv1: bits_weights: null bits_activations: null relu1: bits_weights: null bits_activations: null final_relu: bits_weights: null bits_activations: null fc: bits_weights: null bits_activations: null The specific quantization method we're instantiating here is DorefaQuantizer . Then we define the default bit-widths for activations and weights, in this case 8 and 4-bits, respectively. Then, we define the overrides mapping. In the example above, we choose not to quantize the first and last layer of the model. In the case of DorefaQuantizer , the weights are quantized as part of the convolution / FC layers, but the activations are quantized in separate layers, which replace the ReLU layers in the original model (remember - even though we replaced the ReLU modules with our own quantization modules, the name of the modules isn't changed). So, in all, we need to reference the first layer with parameters conv1 , the first activation layer relu1 , the last activation layer final_relu and the last layer with parameters fc . Specifying null means \"do not quantize\". Note that for quantizers, we reference names of modules, not names of parameters as we do for pruners and regularizers.","title":"Quantization-Aware Training"},{"location":"schedule.html#defining-overrides-for-groups-of-layers-using-regular-expressions","text":"Suppose we have a sub-module in our model named block1 , which contains multiple convolution layers which we would like to quantize to, say, 2-bits. The convolution layers are named conv1 , conv2 and so on. In that case we would define the following: overrides: 'block1\\.conv*': bits_weights: 2 bits_activations: null RegEx Note : Remember that the dot ( . ) is a meta-character (i.e. a reserved character) in regular expressions. So, to match the actual dot characters which separate sub-modules in PyTorch module names, we need to escape it: \\. Overlapping patterns are also possible, which allows to define some override for a groups of layers and also \"single-out\" specific layers for different overrides. For example, let's take the last example and configure a different override for block1.conv1 : overrides: 'block1\\.conv1': bits_weights: 4 bits_activations: null 'block1\\.conv*': bits_weights: 2 bits_activations: null Important Note : The patterns are evaluated eagerly - first match wins. So, to properly quantize a model using \"broad\" patterns and more \"specific\" patterns as just shown, make sure the specific pattern is listed before the broad one. The QuantizationPolicy , which controls the quantization procedure during training, is actually quite simplistic. All it does is call the prepare_model() function of the Quantizer when it's initialized, followed by the first call to quantize_params() . Then, at the end of each epoch, after the float copy of the weights has been updated, it calls the quantize_params() function again. policies: - quantizer: instance_name: dorefa_quantizer starting_epoch: 0 ending_epoch: 200 frequency: 1 Important Note : As mentioned here , since the quantizer modifies the model's parameters (assuming training with quantization in the loop is used), the call to prepare_model() must be performed before an optimizer is called. Therefore, currently, the starting epoch for a quantization policy must be 0, otherwise the quantization process will not work as expected. If one wishes to do a \"warm-startup\" (or \"boot-strapping\"), training for a few epochs with full precision and only then starting to quantize, the only way to do this right now is to execute a separate run to generate the boot-strapped weights, and execute a second which will resume the checkpoint with the boot-strapped weights.","title":"Defining overrides for groups of layers using regular expressions"},{"location":"schedule.html#post-training-quantization","text":"Post-training quantization differs from the other techniques described here. Since it is not executed during training, it does not require any Policies nor a Scheduler. Currently, the only method implemented for post-training quantization is range-based linear quantization . Quantizing a model using this method, requires adding 2 lines of code: quantizer = distiller.quantization.PostTrainLinearQuantizer(model, <quantizer arguments>) quantizer.prepare_model() # Execute evaluation on model as usual See the documentation for PostTrainLinearQuantizer in range_linear.py for details on the available arguments. In addition to directly instantiating the quantizer with arguments, it can also be configured from a YAML file. The syntax for the YAML file is exactly the same as seen in the quantization-aware training section above. Not surprisingly, the class defined must be PostTrainLinearQuantizer , and any other components or policies defined in the YAML file are ignored. We'll see how to create the quantizer in this manner below. If more configurability is needed, a helper function can be used that will add a set of command-line arguments to configure the quantizer: parser = argparse.ArgumentParser() distiller.quantization.add_post_train_quant_args(parser) args = parser.parse_args() These are the available command line arguments: Arguments controlling quantization at evaluation time (\"post-training quantization\"): --quantize-eval, --qe Apply linear quantization to model before evaluation. Applicable only if --evaluate is also set --qe-calibration PORTION_OF_TEST_SET Run the model in evaluation mode on the specified portion of the test dataset and collect statistics. Ignores all other 'qe--*' arguments --qe-mode QE_MODE, --qem QE_MODE Linear quantization mode. Choices: sym | asym_s | asym_u --qe-bits-acts NUM_BITS, --qeba NUM_BITS Number of bits for quantization of activations --qe-bits-wts NUM_BITS, --qebw NUM_BITS Number of bits for quantization of weights --qe-bits-accum NUM_BITS Number of bits for quantization of the accumulator --qe-clip-acts, --qeca Enable clipping of activations using min/max values averaging over batch --qe-no-clip-layers LAYER_NAME [LAYER_NAME ...], --qencl LAYER_NAME [LAYER_NAME ...] List of layer names for which not to clip activations. Applicable only if --qe-clip-acts is also set --qe-per-channel, --qepc Enable per-channel quantization of weights (per output channel) --qe-stats-file PATH Path to YAML file with calibration stats. If not given, dynamic quantization will be run (Note that not all layer types are supported for dynamic quantization) --qe-config-file PATH Path to YAML file containing configuration for PostTrainLinearQuantizer (if present, all other --qe* arguments are ignored) (Note that --quantize-eval and --qe-calibration are mutually exclusive.) When using these command line arguments, the quantizer can be invoked as follows: if args.quantize_eval: if args.qe_config_file: quantizer = distiller.config_component_from_file_by_class(model, args.qe_config_file, 'PostTrainLinearQuantizer') else: quantizer = quantization.PostTrainLinearQuantizer(model, args.qe_bits_acts, args.qe_bits_wts, args.qe_bits_accum, None, args.qe_mode, args.qe_clip_acts, args.qe_no_clip_layers, args.qe_per_channel, args.qe_stats_file) quantizer.prepare_model() # Execute evaluation on model as usual Note that the command-line arguments don't expose the overrides parameter of the quantizer, which allows fine-grained control over how each layer is quantized. To utilize this functionality, configure with a YAML file. To see integration of these command line arguments in use, see the image classification example . For examples invocations of post-training quantization see here .","title":"Post-Training Quantization"},{"location":"schedule.html#collecting-statistics-for-quantization","text":"To collect generate statistics that can be used for static quantization of activations, do the following (shown here assuming the command line argument --qe-calibration shown above is used, which specifies the number of batches to use for statistics generation): if args.qe_calibration: distiller.utils.assign_layer_fq_names(model) msglogger.info(\"Generating quantization calibration stats based on {0} users\".format(args.qe_calibration)) collector = distiller.data_loggers.QuantCalibrationStatsCollector(model) with collector_context(collector): # Here call your model evaluation function, making sure to execute only # the portion of the dataset specified by the qe_calibration argument yaml_path = 'some/dir/quantization_stats.yaml' collector.save(yaml_path) The genreated YAML stats file can then be provided using the `--qe-stats-file argument. An example of a generated stats file can be found here .","title":"Collecting Statistics for Quantization"},{"location":"schedule.html#knowledge-distillation","text":"Knowledge distillation (see here ) is also implemented as a Policy , which should be added to the scheduler. However, with the current implementation, it cannot be defined within the YAML file like the rest of the policies described above. To make the integration of this method into applications a bit easier, a helper function can be used that will add a set of command-line arguments related to knowledge distillation: import argparse import distiller parser = argparse.ArgumentParser() distiller.knowledge_distillation.add_distillation_args(parser) (The add_distillation_args function accepts some optional arguments, see its implementation at distiller/knowledge_distillation.py for details) These are the command line arguments exposed by this function: Knowledge Distillation Training Arguments: --kd-teacher ARCH Model architecture for teacher model --kd-pretrained Use pre-trained model for teacher --kd-resume PATH Path to checkpoint from which to load teacher weights --kd-temperature TEMP, --kd-temp TEMP Knowledge distillation softmax temperature --kd-distill-wt WEIGHT, --kd-dw WEIGHT Weight for distillation loss (student vs. teacher soft targets) --kd-student-wt WEIGHT, --kd-sw WEIGHT Weight for student vs. labels loss --kd-teacher-wt WEIGHT, --kd-tw WEIGHT Weight for teacher vs. labels loss --kd-start-epoch EPOCH_NUM Epoch from which to enable distillation Once arguments have been parsed, some initialization code is required, similar to the following: # Assuming: # \"args\" variable holds command line arguments # \"model\" variable holds the model we're going to train, that is - the student model # \"compression_scheduler\" variable holds a CompressionScheduler instance args.kd_policy = None if args.kd_teacher: # Create teacher model - replace this with your model creation code teacher = create_model(args.kd_pretrained, args.dataset, args.kd_teacher, device_ids=args.gpus) if args.kd_resume: teacher, _, _ = apputils.load_checkpoint(teacher, chkpt_file=args.kd_resume) # Create policy and add to scheduler dlw = distiller.DistillationLossWeights(args.kd_distill_wt, args.kd_student_wt, args.kd_teacher_wt) args.kd_policy = distiller.KnowledgeDistillationPolicy(model, teacher, args.kd_temp, dlw) compression_scheduler.add_policy(args.kd_policy, starting_epoch=args.kd_start_epoch, ending_epoch=args.epochs, frequency=1) Finally, during the training loop, we need to perform forward propagation through the teacher model as well. The KnowledgeDistillationPolicy class keeps a reference to both the student and teacher models, and exposes a forward function that performs forward propagation on both of them. Since this is not one of the standard policy callbacks, we need to call this function manually from our training loop, as follows: if args.kd_policy is None: # Revert to a \"normal\" forward-prop call if no knowledge distillation policy is present output = model(input_var) else: output = args.kd_policy.forward(input_var) To see this integration in action, take a look at the image classification sample at examples/classifier_compression/compress_classifier.py .","title":"Knowledge Distillation"},{"location":"tutorial-lang_model.html","text":"Using Distiller to prune a PyTorch language model Contents Introduction Setup Preparing the code Training-loop Creating compression baselines Compressing the language model What are we compressing? How are we compressing? When are we compressing? Until next time Introduction In this tutorial I'll show you how to compress a word-level language model using Distiller . Specifically, we use PyTorch\u2019s word-level language model sample code as the code-base of our example, weave in some Distiller code, and show how we compress the model using two different element-wise pruning algorithms. To make things manageable, I've divided the tutorial to two parts: in the first we will setup the sample application and prune using AGP . In the second part I'll show how I've added Baidu's RNN pruning algorithm and then use it to prune the same word-level language model. The completed code is available here . The results are displayed below and the code is available here . Note that we can improve the results by training longer, since the loss curves are usually still decreasing at the end of epoch 40. However, for demonstration purposes we don\u2019t need to do this. Type Sparsity NNZ Validation Test Command line Small 0% 7,135,600 101.13 96.29 time python3 main.py --cuda --epochs 40 --tied --wd=1e-6 Medium 0% 28,390,700 88.17 84.21 time python3 main.py --cuda --emsize 650 --nhid 650 --dropout 0.5 --epochs 40 --tied,--wd=1e-6 Large 0% 85,917,000 87.49 83.85 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --wd=1e-6 Large 70% 25,487,550 90.67 85.96 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_70.schedule_agp.yaml Large 70% 25,487,550 90.59 85.84 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_70.schedule_agp.yaml --wd=1e-6 Large 70% 25,487,550 87.40 82.93 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_70B.schedule_agp.yaml --wd=1e-6 Large 80.4% 16,847,550 89.31 83.64 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_80.schedule_agp.yaml --wd=1e-6 Large 90% 8,591,700 90.70 85.67 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_90.schedule_agp.yaml --wd=1e-6 Large 95% 4,295,850 98.42 92.79 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_95.schedule_agp.yaml --wd=1e-6 Table 1: AGP language model pruning results. NNZ stands for number of non-zero coefficients (embeddings are counted once, because they are tied). Figure 1: Perplexity vs model size (lower perplexity is better). The model is composed of an Encoder embedding, two LSTMs, and a Decoder embedding. The Encoder and decoder embeddings (projections) are tied to improve perplexity results (per https://arxiv.org/pdf/1611.01462.pdf), so in the sparsity statistics we account for only one of the encoder/decoder embeddings. We used the WikiText2 dataset (twice as large as PTB). We compared three model sizes: small (7.1M; 14M), medium (28M; 50M), large: (86M; 136M) \u2013 reported as (#parameters net/tied; #parameters gross). The results reported below use a preset seed (for reproducibility), and we expect results can be improved if we allow \u201ctrue\u201d pseudo-randomness. We limited our tests to 40 epochs, even though validation perplexity was still trending down. Essentially, this recreates the language model experiment in the AGP paper, and validates its conclusions: \u201cWe see that sparse models are able to outperform dense models which have significantly more parameters.\u201d The 80% sparse large model (which has 16.9M parameters and a perplexity of 83.64) is able to outperform the dense medium (which has 28.4M parameters and a perplexity of 84.21), a model which has 1.7 times more parameters. It also outperform the dense large model, which exemplifies how pruning can act as a regularizer. * \u201cOur results show that pruning works very well not only on the dense LSTM weights and dense softmax layer but also the dense embedding matrix. This suggests that during the optimization procedure the neural network can find a good sparse embedding for the words in the vocabulary that works well together with the sparse connectivity structure of the LSTM weights and softmax layer.\u201d Setup We start by cloning Pytorch\u2019s example repository . I\u2019ve copied the language model code to distiller\u2019s examples/word_language_model directory, so I\u2019ll use that for the rest of the tutorial. Next, let\u2019s create and activate a virtual environment, as explained in Distiller's README file. Now we can turn our attention to main.py , which contains the training application. Preparing the code We begin by adding code to invoke Distiller in file main.py . This involves a bit of mechanics, because we did not pip install Distiller in our environment (we don\u2019t have a setup.py script for Distiller as of yet). To make Distiller library functions accessible from main.py , we modify sys.path to include the distiller root directory by taking the current directory and pointing two directories up. This is very specific to the location of this example code, and it will break if you\u2019ve placed the code elsewhere \u2013 so be aware. import os import sys script_dir = os.path.dirname(__file__) module_path = os.path.abspath(os.path.join(script_dir, '..', '..')) if module_path not in sys.path: sys.path.append(module_path) import distiller import apputils from distiller.data_loggers import TensorBoardLogger, PythonLogger Next, we augment the application arguments with two Distiller-specific arguments. The first, --summary , gives us the ability to do simple compression instrumentation (e.g. log sparsity statistics). The second argument, --compress , is how we tell the application where the compression scheduling file is located. We also add two arguments - momentum and weight-decay - for the SGD optimizer. As I explain later, I replaced the original code's optimizer with SGD, so we need these extra arguments. # Distiller-related arguments SUMMARY_CHOICES = ['sparsity', 'model', 'modules', 'png', 'percentile'] parser.add_argument('--summary', type=str, choices=SUMMARY_CHOICES, help='print a summary of the model, and exit - options: ' + ' | '.join(SUMMARY_CHOICES)) parser.add_argument('--compress', dest='compress', type=str, nargs='?', action='store', help='configuration file for pruning the model (default is to use hard-coded schedule)') parser.add_argument('--momentum', default=0., type=float, metavar='M', help='momentum') parser.add_argument('--weight-decay', '--wd', default=0., type=float, metavar='W', help='weight decay (default: 1e-4)') We add code to handle the --summary application argument. It can be as simple as forwarding to distiller.model_summary or more complex, as in the Distiller sample. if args.summary: distiller.model_summary(model, None, args.summary, 'wikitext2') exit(0) Similarly, we add code to handle the --compress argument, which creates a CompressionScheduler and configures it from a YAML schedule file: if args.compress: source = args.compress compression_scheduler = distiller.CompressionScheduler(model) distiller.config.fileConfig(model, None, compression_scheduler, args.compress, msglogger) We also create the optimizer, and the learning-rate decay policy scheduler. The original PyTorch example manually manages the optimization and LR decay process, but I think that having a standard optimizer and LR-decay schedule gives us the flexibility to experiment with these during the training process. Using an SGD optimizer configured with momentum=0 and weight_decay=0 , and a ReduceLROnPlateau LR-decay policy with patience=0 and factor=0.5 will give the same behavior as in the original PyTorch example. From there, we can experiment with the optimizer and LR-decay configuration. optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay) lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', patience=0, verbose=True, factor=0.5) Next, we add code to setup the logging backends: a Python logger backend which reads its configuration from file and logs messages to the console and log file ( pylogger ); and a TensorBoard backend logger which logs statistics to a TensorBoard data file ( tflogger ). I configured the TensorBoard backend to log gradients because RNNs suffer from vanishing and exploding gradients, so we might want to take a look in case the training experiences a sudden failure. This code is not strictly required, but it is quite useful to be able to log the session progress, and to export logs to TensorBoard for realtime visualization of the training progress. # Distiller loggers msglogger = apputils.config_pylogger('logging.conf', None) tflogger = TensorBoardLogger(msglogger.logdir) tflogger.log_gradients = True pylogger = PythonLogger(msglogger) Training loop Now we scroll down all the way to the train() function. We'll change its signature to include the epoch , optimizer , and compression_schdule . We'll soon see why we need these. def train(epoch, optimizer, compression_scheduler=None) Function train() is responsible for training the network in batches for one epoch, and in its epoch loop we want to perform compression. The CompressionScheduler invokes ScheduledTrainingPolicy instances per the scheduling specification that was programmed in the CompressionScheduler instance. There are four main SchedulingPolicy types: PruningPolicy , RegularizationPolicy , LRPolicy , and QuantizationPolicy . We'll be using PruningPolicy , which is triggered on_epoch_begin (to invoke the Pruners , and on_minibatch_begin (to mask the weights). Later we will create a YAML scheduling file, and specify the schedule of AutomatedGradualPruner instances. Because we are writing a single application, which can be used with various Policies in the future (e.g. group-lasso regularization), we should add code to invoke all of the CompressionScheduler 's callbacks, not just the mandatory on_epoch_begin callback. We invoke on_minibatch_begin before running the forward-pass, before_backward_pass after computing the loss, and on_minibatch_end after completing the backward-pass. def train(epoch, optimizer, compression_scheduler=None): ... # The line below was fixed as per: https://github.com/pytorch/examples/issues/214 for batch, i in enumerate(range(0, train_data.size(0), args.bptt)): data, targets = get_batch(train_data, i) # Starting each batch, we detach the hidden state from how it was previously produced. # If we didn't, the model would try backpropagating all the way to start of the dataset. hidden = repackage_hidden(hidden) if compression_scheduler: compression_scheduler.on_minibatch_begin(epoch, minibatch_id=batch, minibatches_per_epoch=steps_per_epoch) output, hidden = model(data, hidden) loss = criterion(output.view(-1, ntokens), targets) if compression_scheduler: compression_scheduler.before_backward_pass(epoch, minibatch_id=batch, minibatches_per_epoch=steps_per_epoch, loss=loss) optimizer.zero_grad() loss.backward() # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs. torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip) optimizer.step() total_loss += loss.item() if compression_scheduler: compression_scheduler.on_minibatch_end(epoch, minibatch_id=batch, minibatches_per_epoch=steps_per_epoch) The rest of the code could stay as in the original PyTorch sample, but I wanted to use an SGD optimizer, so I replaced: for p in model.parameters(): p.data.add_(-lr, p.grad.data) with: optimizer.step() The rest of the code in function train() logs to a text file and a TensorBoard backend. Again, such code is not mandatory, but a few lines give us a lot of visibility: we have training progress information saved to log, and we can monitor the training progress in realtime on TensorBoard. That's a lot for a few lines of code ;-) if batch % args.log_interval == 0 and batch > 0: cur_loss = total_loss / args.log_interval elapsed = time.time() - start_time lr = optimizer.param_groups[0]['lr'] msglogger.info( '| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.4f} | ms/batch {:5.2f} ' '| loss {:5.2f} | ppl {:8.2f}'.format( epoch, batch, len(train_data) // args.bptt, lr, elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss))) total_loss = 0 start_time = time.time() stats = ('Peformance/Training/', OrderedDict([ ('Loss', cur_loss), ('Perplexity', math.exp(cur_loss)), ('LR', lr), ('Batch Time', elapsed * 1000)]) ) steps_completed = batch + 1 distiller.log_training_progress(stats, model.named_parameters(), epoch, steps_completed, steps_per_epoch, args.log_interval, [tflogger]) Finally we get to the outer training-loop which loops on args.epochs . We add the two final CompressionScheduler callbacks: on_epoch_begin , at the start of the loop, and on_epoch_end after running evaluate on the model and updating the learning-rate. try: for epoch in range(0, args.epochs): epoch_start_time = time.time() if compression_scheduler: compression_scheduler.on_epoch_begin(epoch) train(epoch, optimizer, compression_scheduler) val_loss = evaluate(val_data) lr_scheduler.step(val_loss) if compression_scheduler: compression_scheduler.on_epoch_end(epoch) And that's it! The language model sample is ready for compression. Creating compression baselines In To prune, or not to prune: exploring the efficacy of pruning for model compression Zhu and Gupta, \"compare the accuracy of large, but pruned models (large-sparse) and their smaller, but dense (small-dense) counterparts with identical memory footprint.\" They also \"propose a new gradual pruning technique that is simple and straightforward to apply across a variety of models/datasets with minimal tuning.\" This pruning schedule is implemented by distiller.AutomatedGradualPruner (AGP), which increases the sparsity level (expressed as a percentage of zero-valued elements) gradually over several pruning steps. Distiller's implementation only prunes elements once in an epoch (the model is fine-tuned in between pruning events), which is a small deviation from Zhu and Gupta's paper. The research paper specifies the schedule in terms of mini-batches, while our implementation specifies the schedule in terms of epochs. We feel that using epochs performs well, and is more \"stable\", since the number of mini-batches will change, if you change the batch size. Before we start compressing stuff ;-), we need to create baselines so we have something to benchmark against. Let's prepare small, medium, and large baseline models, like Table 3 of To prune, or Not to Prune . These will provide baseline perplexity results that we'll compare the compressed models against. I chose to use tied input/output embeddings, and constrained the training to 40 epochs. The table below shows the model sizes, where we are interested in the tied version (biases are ignored due to their small size and because we don't prune them). Size Number of Weights (untied) Number of Weights (tied) Small 13,951,200 7,295,600 Medium 50,021,400 28,390,700 Large 135,834,000 85,917,000 I started experimenting with the optimizer setup like in the PyTorch example, but I added some L2 regularization when I noticed that the training was overfitting. The two right columns show the perplexity results (lower is better) of each of the models with no L2 regularization and with 1e-5 and 1e-6. In all three model sizes using the smaller L2 regularization (1e-6) gave the best results. BTW, I'm not showing here experiments with even lower regularization because that did not help. Type Command line Validation Test Small time python3 main.py --cuda --epochs 40 --tied 105.23 99.53 Small time python3 main.py --cuda --epochs 40 --tied --wd=1e-6 101.13 96.29 Small time python3 main.py --cuda --epochs 40 --tied --wd=1e-5 109.49 103.53 Medium time python3 main.py --cuda --emsize 650 --nhid 650 --dropout 0.5 --epochs 40 --tied 90.93 86.20 Medium time python3 main.py --cuda --emsize 650 --nhid 650 --dropout 0.5 --epochs 40 --tied --wd=1e-6 88.17 84.21 Medium time python3 main.py --cuda --emsize 650 --nhid 650 --dropout 0.5 --epochs 40 --tied --wd=1e-5 97.75 93.06 Large time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied 88.23 84.21 Large time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --wd=1e-6 87.49 83.85 Large time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --wd=1e-5 99.22 94.28 Compressing the language model OK, so now let's recreate the results of the language model experiment from section 4.2 of paper. We're using PyTorch's sample, so the language model we implement is not exactly like the one in the AGP paper (and uses a different dataset), but it's close enough, so if everything goes well, we should see similar compression results. What are we compressing? To gain insight about the model parameters, we can use the command-line to produce a weights-sparsity table: $ python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --summary=sparsity Parameters: +---------+------------------+---------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ | | Name | Shape | NNZ (dense) | NNZ (sparse) | Cols (%) | Rows (%) | Ch (%) | 2D (%) | 3D (%) | Fine (%) | Std | Mean | Abs-Mean | |---------+------------------+---------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------| | 0.00000 | encoder.weight | (33278, 1500) | 49917000 | 49916999 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00000 | 0.05773 | -0.00000 | 0.05000 | | 1.00000 | rnn.weight_ih_l0 | (6000, 1500) | 9000000 | 9000000 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00000 | 0.01491 | 0.00001 | 0.01291 | | 2.00000 | rnn.weight_hh_l0 | (6000, 1500) | 9000000 | 8999999 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00001 | 0.01491 | 0.00000 | 0.01291 | | 3.00000 | rnn.weight_ih_l1 | (6000, 1500) | 9000000 | 8999999 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00001 | 0.01490 | -0.00000 | 0.01291 | | 4.00000 | rnn.weight_hh_l1 | (6000, 1500) | 9000000 | 9000000 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00000 | 0.01491 | -0.00000 | 0.01291 | | 5.00000 | decoder.weight | (33278, 1500) | 49917000 | 49916999 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00000 | 0.05773 | -0.00000 | 0.05000 | | 6.00000 | Total sparsity: | - | 135834000 | 135833996 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | +---------+------------------+---------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ Total sparsity: 0.00 So what's going on here? encoder.weight and decoder.weight are the input and output embeddings, respectively. Remember that in the configuration I chose for the three model sizes these embeddings are tied, which means that we only have one copy of parameters, that is shared between the encoder and decoder. We also have two pairs of RNN (LSTM really) parameters. There is a pair because the model uses the command-line argument args.nlayers to decide how many instances of RNN (or LSTM or GRU) cells to use, and it defaults to 2. The recurrent cells are LSTM cells, because this is the default of args.model , which is used in the initialization of RNNModel . Let's look at the parameters of the first RNN: rnn.weight_ih_l0 and rnn.weight_hh_l0 : what are these? Recall the LSTM equations that PyTorch implements. In the equations, there are 8 instances of vector-matrix multiplication (when batch=1). These can be combined into a single matrix-matrix multiplication (GEMM), but PyTorch groups these into two GEMM operations: one GEMM multiplies the inputs ( rnn.weight_ih_l0 ), and the other multiplies the hidden-state ( rnn.weight_hh_l0 ). How are we compressing? Let's turn to the configurations of the Large language model compression schedule to 70%, 80%, 90% and 95% sparsity. Using AGP it is easy to configure the pruning schedule to produce an exact sparsity of the compressed model. I'll use the 70% schedule to show a concrete example. The YAML file has two sections: pruners and policies . Section pruners defines instances of ParameterPruner - in our case we define three instances of AutomatedGradualPruner : for the weights of the first RNN ( l0_rnn_pruner ), the second RNN ( l1_rnn_pruner ) and the embedding layer ( embedding_pruner ). These names are arbitrary, and serve are name-handles which bind Policies to Pruners - so you can use whatever names you want. Each AutomatedGradualPruner is configured with an initial_sparsity and final_sparsity . For examples, the l0_rnn_pruner below is configured to prune 5% of the weights as soon as it starts working, and finish when 70% of the weights have been pruned. The weights parameter tells the Pruner which weight tensors to prune. pruners: l0_rnn_pruner: class: AutomatedGradualPruner initial_sparsity : 0.05 final_sparsity: 0.70 weights: [rnn.weight_ih_l0, rnn.weight_hh_l0] l1_rnn_pruner: class: AutomatedGradualPruner initial_sparsity : 0.05 final_sparsity: 0.70 weights: [rnn.weight_ih_l1, rnn.weight_hh_l1] embedding_pruner: class: AutomatedGradualPruner initial_sparsity : 0.05 final_sparsity: 0.70 weights: [encoder.weight] When are we compressing? If the pruners section defines \"what-to-do\", the policies section defines \"when-to-do\". This part is harder, because we define the pruning schedule, which requires us to try a few different schedules until we understand which schedule works best. Below we define three PruningPolicy instances. The first two instances start operating at epoch 2 ( starting_epoch ), end at epoch 20 ( ending_epoch ), and operate once every epoch ( frequency ; as I explained above, Distiller's Pruning scheduling operates only at on_epoch_begin ). In between pruning operations, the pruned model is fine-tuned. policies: - pruner: instance_name : l0_rnn_pruner starting_epoch: 2 ending_epoch: 20 frequency: 1 - pruner: instance_name : l1_rnn_pruner starting_epoch: 2 ending_epoch: 20 frequency: 1 - pruner: instance_name : embedding_pruner starting_epoch: 3 ending_epoch: 21 frequency: 1 We invoke the compression as follows: $ time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_70.schedule_agp.yaml Table 1 above shows that we can make a negligible improvement when adding L2 regularization. I did some experimenting with the sparsity distribution between the layers, and the scheduling frequency and noticed that the embedding layers are much less sensitive to pruning than the RNN cells. I didn't notice any difference between the RNN cells, but I also didn't invest in this exploration. A new 70% sparsity schedule , prunes the RNNs only to 50% sparsity, but prunes the embedding to 85% sparsity, and achieves almost a 3 points improvement in the test perplexity results. We provide similar pruning schedules for the other compression rates. Until next time This concludes the first part of the tutorial on pruning a PyTorch language model. In the next installment, I'll explain how we added an implementation of Baidu Research's Exploring Sparsity in Recurrent Neural Networks paper, and applied to this language model. Geek On.","title":"Pruning a Language Model"},{"location":"tutorial-lang_model.html#using-distiller-to-prune-a-pytorch-language-model","text":"","title":"Using Distiller to prune a PyTorch language model"},{"location":"tutorial-lang_model.html#contents","text":"Introduction Setup Preparing the code Training-loop Creating compression baselines Compressing the language model What are we compressing? How are we compressing? When are we compressing? Until next time","title":"Contents"},{"location":"tutorial-lang_model.html#introduction","text":"In this tutorial I'll show you how to compress a word-level language model using Distiller . Specifically, we use PyTorch\u2019s word-level language model sample code as the code-base of our example, weave in some Distiller code, and show how we compress the model using two different element-wise pruning algorithms. To make things manageable, I've divided the tutorial to two parts: in the first we will setup the sample application and prune using AGP . In the second part I'll show how I've added Baidu's RNN pruning algorithm and then use it to prune the same word-level language model. The completed code is available here . The results are displayed below and the code is available here . Note that we can improve the results by training longer, since the loss curves are usually still decreasing at the end of epoch 40. However, for demonstration purposes we don\u2019t need to do this. Type Sparsity NNZ Validation Test Command line Small 0% 7,135,600 101.13 96.29 time python3 main.py --cuda --epochs 40 --tied --wd=1e-6 Medium 0% 28,390,700 88.17 84.21 time python3 main.py --cuda --emsize 650 --nhid 650 --dropout 0.5 --epochs 40 --tied,--wd=1e-6 Large 0% 85,917,000 87.49 83.85 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --wd=1e-6 Large 70% 25,487,550 90.67 85.96 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_70.schedule_agp.yaml Large 70% 25,487,550 90.59 85.84 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_70.schedule_agp.yaml --wd=1e-6 Large 70% 25,487,550 87.40 82.93 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_70B.schedule_agp.yaml --wd=1e-6 Large 80.4% 16,847,550 89.31 83.64 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_80.schedule_agp.yaml --wd=1e-6 Large 90% 8,591,700 90.70 85.67 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_90.schedule_agp.yaml --wd=1e-6 Large 95% 4,295,850 98.42 92.79 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_95.schedule_agp.yaml --wd=1e-6 Table 1: AGP language model pruning results. NNZ stands for number of non-zero coefficients (embeddings are counted once, because they are tied). Figure 1: Perplexity vs model size (lower perplexity is better). The model is composed of an Encoder embedding, two LSTMs, and a Decoder embedding. The Encoder and decoder embeddings (projections) are tied to improve perplexity results (per https://arxiv.org/pdf/1611.01462.pdf), so in the sparsity statistics we account for only one of the encoder/decoder embeddings. We used the WikiText2 dataset (twice as large as PTB). We compared three model sizes: small (7.1M; 14M), medium (28M; 50M), large: (86M; 136M) \u2013 reported as (#parameters net/tied; #parameters gross). The results reported below use a preset seed (for reproducibility), and we expect results can be improved if we allow \u201ctrue\u201d pseudo-randomness. We limited our tests to 40 epochs, even though validation perplexity was still trending down. Essentially, this recreates the language model experiment in the AGP paper, and validates its conclusions: \u201cWe see that sparse models are able to outperform dense models which have significantly more parameters.\u201d The 80% sparse large model (which has 16.9M parameters and a perplexity of 83.64) is able to outperform the dense medium (which has 28.4M parameters and a perplexity of 84.21), a model which has 1.7 times more parameters. It also outperform the dense large model, which exemplifies how pruning can act as a regularizer. * \u201cOur results show that pruning works very well not only on the dense LSTM weights and dense softmax layer but also the dense embedding matrix. This suggests that during the optimization procedure the neural network can find a good sparse embedding for the words in the vocabulary that works well together with the sparse connectivity structure of the LSTM weights and softmax layer.\u201d","title":"Introduction"},{"location":"tutorial-lang_model.html#setup","text":"We start by cloning Pytorch\u2019s example repository . I\u2019ve copied the language model code to distiller\u2019s examples/word_language_model directory, so I\u2019ll use that for the rest of the tutorial. Next, let\u2019s create and activate a virtual environment, as explained in Distiller's README file. Now we can turn our attention to main.py , which contains the training application.","title":"Setup"},{"location":"tutorial-lang_model.html#preparing-the-code","text":"We begin by adding code to invoke Distiller in file main.py . This involves a bit of mechanics, because we did not pip install Distiller in our environment (we don\u2019t have a setup.py script for Distiller as of yet). To make Distiller library functions accessible from main.py , we modify sys.path to include the distiller root directory by taking the current directory and pointing two directories up. This is very specific to the location of this example code, and it will break if you\u2019ve placed the code elsewhere \u2013 so be aware. import os import sys script_dir = os.path.dirname(__file__) module_path = os.path.abspath(os.path.join(script_dir, '..', '..')) if module_path not in sys.path: sys.path.append(module_path) import distiller import apputils from distiller.data_loggers import TensorBoardLogger, PythonLogger Next, we augment the application arguments with two Distiller-specific arguments. The first, --summary , gives us the ability to do simple compression instrumentation (e.g. log sparsity statistics). The second argument, --compress , is how we tell the application where the compression scheduling file is located. We also add two arguments - momentum and weight-decay - for the SGD optimizer. As I explain later, I replaced the original code's optimizer with SGD, so we need these extra arguments. # Distiller-related arguments SUMMARY_CHOICES = ['sparsity', 'model', 'modules', 'png', 'percentile'] parser.add_argument('--summary', type=str, choices=SUMMARY_CHOICES, help='print a summary of the model, and exit - options: ' + ' | '.join(SUMMARY_CHOICES)) parser.add_argument('--compress', dest='compress', type=str, nargs='?', action='store', help='configuration file for pruning the model (default is to use hard-coded schedule)') parser.add_argument('--momentum', default=0., type=float, metavar='M', help='momentum') parser.add_argument('--weight-decay', '--wd', default=0., type=float, metavar='W', help='weight decay (default: 1e-4)') We add code to handle the --summary application argument. It can be as simple as forwarding to distiller.model_summary or more complex, as in the Distiller sample. if args.summary: distiller.model_summary(model, None, args.summary, 'wikitext2') exit(0) Similarly, we add code to handle the --compress argument, which creates a CompressionScheduler and configures it from a YAML schedule file: if args.compress: source = args.compress compression_scheduler = distiller.CompressionScheduler(model) distiller.config.fileConfig(model, None, compression_scheduler, args.compress, msglogger) We also create the optimizer, and the learning-rate decay policy scheduler. The original PyTorch example manually manages the optimization and LR decay process, but I think that having a standard optimizer and LR-decay schedule gives us the flexibility to experiment with these during the training process. Using an SGD optimizer configured with momentum=0 and weight_decay=0 , and a ReduceLROnPlateau LR-decay policy with patience=0 and factor=0.5 will give the same behavior as in the original PyTorch example. From there, we can experiment with the optimizer and LR-decay configuration. optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay) lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', patience=0, verbose=True, factor=0.5) Next, we add code to setup the logging backends: a Python logger backend which reads its configuration from file and logs messages to the console and log file ( pylogger ); and a TensorBoard backend logger which logs statistics to a TensorBoard data file ( tflogger ). I configured the TensorBoard backend to log gradients because RNNs suffer from vanishing and exploding gradients, so we might want to take a look in case the training experiences a sudden failure. This code is not strictly required, but it is quite useful to be able to log the session progress, and to export logs to TensorBoard for realtime visualization of the training progress. # Distiller loggers msglogger = apputils.config_pylogger('logging.conf', None) tflogger = TensorBoardLogger(msglogger.logdir) tflogger.log_gradients = True pylogger = PythonLogger(msglogger)","title":"Preparing the code"},{"location":"tutorial-lang_model.html#training-loop","text":"Now we scroll down all the way to the train() function. We'll change its signature to include the epoch , optimizer , and compression_schdule . We'll soon see why we need these. def train(epoch, optimizer, compression_scheduler=None) Function train() is responsible for training the network in batches for one epoch, and in its epoch loop we want to perform compression. The CompressionScheduler invokes ScheduledTrainingPolicy instances per the scheduling specification that was programmed in the CompressionScheduler instance. There are four main SchedulingPolicy types: PruningPolicy , RegularizationPolicy , LRPolicy , and QuantizationPolicy . We'll be using PruningPolicy , which is triggered on_epoch_begin (to invoke the Pruners , and on_minibatch_begin (to mask the weights). Later we will create a YAML scheduling file, and specify the schedule of AutomatedGradualPruner instances. Because we are writing a single application, which can be used with various Policies in the future (e.g. group-lasso regularization), we should add code to invoke all of the CompressionScheduler 's callbacks, not just the mandatory on_epoch_begin callback. We invoke on_minibatch_begin before running the forward-pass, before_backward_pass after computing the loss, and on_minibatch_end after completing the backward-pass. def train(epoch, optimizer, compression_scheduler=None): ... # The line below was fixed as per: https://github.com/pytorch/examples/issues/214 for batch, i in enumerate(range(0, train_data.size(0), args.bptt)): data, targets = get_batch(train_data, i) # Starting each batch, we detach the hidden state from how it was previously produced. # If we didn't, the model would try backpropagating all the way to start of the dataset. hidden = repackage_hidden(hidden) if compression_scheduler: compression_scheduler.on_minibatch_begin(epoch, minibatch_id=batch, minibatches_per_epoch=steps_per_epoch) output, hidden = model(data, hidden) loss = criterion(output.view(-1, ntokens), targets) if compression_scheduler: compression_scheduler.before_backward_pass(epoch, minibatch_id=batch, minibatches_per_epoch=steps_per_epoch, loss=loss) optimizer.zero_grad() loss.backward() # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs. torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip) optimizer.step() total_loss += loss.item() if compression_scheduler: compression_scheduler.on_minibatch_end(epoch, minibatch_id=batch, minibatches_per_epoch=steps_per_epoch) The rest of the code could stay as in the original PyTorch sample, but I wanted to use an SGD optimizer, so I replaced: for p in model.parameters(): p.data.add_(-lr, p.grad.data) with: optimizer.step() The rest of the code in function train() logs to a text file and a TensorBoard backend. Again, such code is not mandatory, but a few lines give us a lot of visibility: we have training progress information saved to log, and we can monitor the training progress in realtime on TensorBoard. That's a lot for a few lines of code ;-) if batch % args.log_interval == 0 and batch > 0: cur_loss = total_loss / args.log_interval elapsed = time.time() - start_time lr = optimizer.param_groups[0]['lr'] msglogger.info( '| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.4f} | ms/batch {:5.2f} ' '| loss {:5.2f} | ppl {:8.2f}'.format( epoch, batch, len(train_data) // args.bptt, lr, elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss))) total_loss = 0 start_time = time.time() stats = ('Peformance/Training/', OrderedDict([ ('Loss', cur_loss), ('Perplexity', math.exp(cur_loss)), ('LR', lr), ('Batch Time', elapsed * 1000)]) ) steps_completed = batch + 1 distiller.log_training_progress(stats, model.named_parameters(), epoch, steps_completed, steps_per_epoch, args.log_interval, [tflogger]) Finally we get to the outer training-loop which loops on args.epochs . We add the two final CompressionScheduler callbacks: on_epoch_begin , at the start of the loop, and on_epoch_end after running evaluate on the model and updating the learning-rate. try: for epoch in range(0, args.epochs): epoch_start_time = time.time() if compression_scheduler: compression_scheduler.on_epoch_begin(epoch) train(epoch, optimizer, compression_scheduler) val_loss = evaluate(val_data) lr_scheduler.step(val_loss) if compression_scheduler: compression_scheduler.on_epoch_end(epoch) And that's it! The language model sample is ready for compression.","title":"Training loop"},{"location":"tutorial-lang_model.html#creating-compression-baselines","text":"In To prune, or not to prune: exploring the efficacy of pruning for model compression Zhu and Gupta, \"compare the accuracy of large, but pruned models (large-sparse) and their smaller, but dense (small-dense) counterparts with identical memory footprint.\" They also \"propose a new gradual pruning technique that is simple and straightforward to apply across a variety of models/datasets with minimal tuning.\" This pruning schedule is implemented by distiller.AutomatedGradualPruner (AGP), which increases the sparsity level (expressed as a percentage of zero-valued elements) gradually over several pruning steps. Distiller's implementation only prunes elements once in an epoch (the model is fine-tuned in between pruning events), which is a small deviation from Zhu and Gupta's paper. The research paper specifies the schedule in terms of mini-batches, while our implementation specifies the schedule in terms of epochs. We feel that using epochs performs well, and is more \"stable\", since the number of mini-batches will change, if you change the batch size. Before we start compressing stuff ;-), we need to create baselines so we have something to benchmark against. Let's prepare small, medium, and large baseline models, like Table 3 of To prune, or Not to Prune . These will provide baseline perplexity results that we'll compare the compressed models against. I chose to use tied input/output embeddings, and constrained the training to 40 epochs. The table below shows the model sizes, where we are interested in the tied version (biases are ignored due to their small size and because we don't prune them). Size Number of Weights (untied) Number of Weights (tied) Small 13,951,200 7,295,600 Medium 50,021,400 28,390,700 Large 135,834,000 85,917,000 I started experimenting with the optimizer setup like in the PyTorch example, but I added some L2 regularization when I noticed that the training was overfitting. The two right columns show the perplexity results (lower is better) of each of the models with no L2 regularization and with 1e-5 and 1e-6. In all three model sizes using the smaller L2 regularization (1e-6) gave the best results. BTW, I'm not showing here experiments with even lower regularization because that did not help. Type Command line Validation Test Small time python3 main.py --cuda --epochs 40 --tied 105.23 99.53 Small time python3 main.py --cuda --epochs 40 --tied --wd=1e-6 101.13 96.29 Small time python3 main.py --cuda --epochs 40 --tied --wd=1e-5 109.49 103.53 Medium time python3 main.py --cuda --emsize 650 --nhid 650 --dropout 0.5 --epochs 40 --tied 90.93 86.20 Medium time python3 main.py --cuda --emsize 650 --nhid 650 --dropout 0.5 --epochs 40 --tied --wd=1e-6 88.17 84.21 Medium time python3 main.py --cuda --emsize 650 --nhid 650 --dropout 0.5 --epochs 40 --tied --wd=1e-5 97.75 93.06 Large time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied 88.23 84.21 Large time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --wd=1e-6 87.49 83.85 Large time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --wd=1e-5 99.22 94.28","title":"Creating compression baselines"},{"location":"tutorial-lang_model.html#compressing-the-language-model","text":"OK, so now let's recreate the results of the language model experiment from section 4.2 of paper. We're using PyTorch's sample, so the language model we implement is not exactly like the one in the AGP paper (and uses a different dataset), but it's close enough, so if everything goes well, we should see similar compression results.","title":"Compressing the language model"},{"location":"tutorial-lang_model.html#what-are-we-compressing","text":"To gain insight about the model parameters, we can use the command-line to produce a weights-sparsity table: $ python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --summary=sparsity Parameters: +---------+------------------+---------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ | | Name | Shape | NNZ (dense) | NNZ (sparse) | Cols (%) | Rows (%) | Ch (%) | 2D (%) | 3D (%) | Fine (%) | Std | Mean | Abs-Mean | |---------+------------------+---------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------| | 0.00000 | encoder.weight | (33278, 1500) | 49917000 | 49916999 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00000 | 0.05773 | -0.00000 | 0.05000 | | 1.00000 | rnn.weight_ih_l0 | (6000, 1500) | 9000000 | 9000000 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00000 | 0.01491 | 0.00001 | 0.01291 | | 2.00000 | rnn.weight_hh_l0 | (6000, 1500) | 9000000 | 8999999 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00001 | 0.01491 | 0.00000 | 0.01291 | | 3.00000 | rnn.weight_ih_l1 | (6000, 1500) | 9000000 | 8999999 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00001 | 0.01490 | -0.00000 | 0.01291 | | 4.00000 | rnn.weight_hh_l1 | (6000, 1500) | 9000000 | 9000000 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00000 | 0.01491 | -0.00000 | 0.01291 | | 5.00000 | decoder.weight | (33278, 1500) | 49917000 | 49916999 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00000 | 0.05773 | -0.00000 | 0.05000 | | 6.00000 | Total sparsity: | - | 135834000 | 135833996 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | +---------+------------------+---------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ Total sparsity: 0.00 So what's going on here? encoder.weight and decoder.weight are the input and output embeddings, respectively. Remember that in the configuration I chose for the three model sizes these embeddings are tied, which means that we only have one copy of parameters, that is shared between the encoder and decoder. We also have two pairs of RNN (LSTM really) parameters. There is a pair because the model uses the command-line argument args.nlayers to decide how many instances of RNN (or LSTM or GRU) cells to use, and it defaults to 2. The recurrent cells are LSTM cells, because this is the default of args.model , which is used in the initialization of RNNModel . Let's look at the parameters of the first RNN: rnn.weight_ih_l0 and rnn.weight_hh_l0 : what are these? Recall the LSTM equations that PyTorch implements. In the equations, there are 8 instances of vector-matrix multiplication (when batch=1). These can be combined into a single matrix-matrix multiplication (GEMM), but PyTorch groups these into two GEMM operations: one GEMM multiplies the inputs ( rnn.weight_ih_l0 ), and the other multiplies the hidden-state ( rnn.weight_hh_l0 ).","title":"What are we compressing?"},{"location":"tutorial-lang_model.html#how-are-we-compressing","text":"Let's turn to the configurations of the Large language model compression schedule to 70%, 80%, 90% and 95% sparsity. Using AGP it is easy to configure the pruning schedule to produce an exact sparsity of the compressed model. I'll use the 70% schedule to show a concrete example. The YAML file has two sections: pruners and policies . Section pruners defines instances of ParameterPruner - in our case we define three instances of AutomatedGradualPruner : for the weights of the first RNN ( l0_rnn_pruner ), the second RNN ( l1_rnn_pruner ) and the embedding layer ( embedding_pruner ). These names are arbitrary, and serve are name-handles which bind Policies to Pruners - so you can use whatever names you want. Each AutomatedGradualPruner is configured with an initial_sparsity and final_sparsity . For examples, the l0_rnn_pruner below is configured to prune 5% of the weights as soon as it starts working, and finish when 70% of the weights have been pruned. The weights parameter tells the Pruner which weight tensors to prune. pruners: l0_rnn_pruner: class: AutomatedGradualPruner initial_sparsity : 0.05 final_sparsity: 0.70 weights: [rnn.weight_ih_l0, rnn.weight_hh_l0] l1_rnn_pruner: class: AutomatedGradualPruner initial_sparsity : 0.05 final_sparsity: 0.70 weights: [rnn.weight_ih_l1, rnn.weight_hh_l1] embedding_pruner: class: AutomatedGradualPruner initial_sparsity : 0.05 final_sparsity: 0.70 weights: [encoder.weight]","title":"How are we compressing?"},{"location":"tutorial-lang_model.html#when-are-we-compressing","text":"If the pruners section defines \"what-to-do\", the policies section defines \"when-to-do\". This part is harder, because we define the pruning schedule, which requires us to try a few different schedules until we understand which schedule works best. Below we define three PruningPolicy instances. The first two instances start operating at epoch 2 ( starting_epoch ), end at epoch 20 ( ending_epoch ), and operate once every epoch ( frequency ; as I explained above, Distiller's Pruning scheduling operates only at on_epoch_begin ). In between pruning operations, the pruned model is fine-tuned. policies: - pruner: instance_name : l0_rnn_pruner starting_epoch: 2 ending_epoch: 20 frequency: 1 - pruner: instance_name : l1_rnn_pruner starting_epoch: 2 ending_epoch: 20 frequency: 1 - pruner: instance_name : embedding_pruner starting_epoch: 3 ending_epoch: 21 frequency: 1 We invoke the compression as follows: $ time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_70.schedule_agp.yaml Table 1 above shows that we can make a negligible improvement when adding L2 regularization. I did some experimenting with the sparsity distribution between the layers, and the scheduling frequency and noticed that the embedding layers are much less sensitive to pruning than the RNN cells. I didn't notice any difference between the RNN cells, but I also didn't invest in this exploration. A new 70% sparsity schedule , prunes the RNNs only to 50% sparsity, but prunes the embedding to 85% sparsity, and achieves almost a 3 points improvement in the test perplexity results. We provide similar pruning schedules for the other compression rates.","title":"When are we compressing?"},{"location":"tutorial-lang_model.html#until-next-time","text":"This concludes the first part of the tutorial on pruning a PyTorch language model. In the next installment, I'll explain how we added an implementation of Baidu Research's Exploring Sparsity in Recurrent Neural Networks paper, and applied to this language model. Geek On.","title":"Until next time"},{"location":"tutorial-struct_pruning.html","text":"Pruning Filters & Channels Introduction Channel and filter pruning are examples of structured-pruning which create compressed models that do not require special hardware to execute. This latter fact makes this form of structured pruning particularly interesting and popular. In networks that have serial data dependencies, it is pretty straight-forward to understand and define how to prune channels and filters. However, in more complex models, with parallel-data dependencies (paths) - such as ResNets (skip connections) and GoogLeNet (Inception layers) \u2013 things become increasingly more complex and require a deeper understanding of the data flow in the model, in order to define the pruning schedule. This post explains channel and filter pruning, the challenges, and how to define a Distiller pruning schedule for these structures. The details of the implementation are left for a separate post. Before we dive into pruning, let\u2019s level-set on the terminology, because different people (and even research papers) do not always agree on the nomenclature. This reflects my understanding of the nomenclature, and therefore these are the names used in Distiller. I\u2019ll restrict this discussion to Convolution layers in CNNs, to contain the scope of the topic I\u2019ll be covering, although Distiller supports pruning of other structures such as matrix columns and rows. PyTorch describes torch.nn.Conv2d as applying \u201ca 2D convolution over an input signal composed of several input planes.\u201d We call each of these input planes a feature-map (or FM, for short). Another name is input channel , as in the R/G/B channels of an image. Some people refer to feature-maps as activations (i.e. the activation of neurons), although I think strictly speaking activations are the output of an activation layer that was fed a group of feature-maps. Because it is very common, and because the use of an activation is orthogonal to our discussion, I will use activations to refer to the output of a Convolution layer (i.e. 3D stack of feature-maps). In the PyTorch documentation Convolution outputs have shape (N, C out , H out , W out ) where N is a batch size, C out denotes a number of output channels, H out is a height of output planes in pixels, and W out is width in pixels. We won\u2019t be paying much attention to the batch-size since it\u2019s not important to our discussion, so without loss of generality we can set N=1. I\u2019m also assuming the most common Convolutions having groups==1 . Convolution weights are 4D: (F, C, K, K) where F is the number of filters, C is the number of channels, and K is the kernel size (we can assume the kernel height and width are equal for simplicity). A kernel is a 2D matrix (K, K) that is part of a 3D feature detector. This feature detector is called a filter and it is basically a stack of 2D kernels . Each kernel is convolved with a 2D input channel (i.e. feature-map) so if there are C in channels in the input, then there are C in kernels in a filter (C == C in ). Each filter is convolved with the entire input to create a single output channel (i.e. feature-map). If there are C out output channels, then there are C out filters (F == C out ). Filter Pruning Filter pruning and channel pruning are very similar, and I\u2019ll expand on that similarity later on \u2013 but for now let\u2019s focus on filter pruning. In filter pruning we use some criterion to determine which filters are important and which are not. Researchers came up with all sorts of pruning criteria: the L1-magnitude of the filters (citation), the entropy of the activations (citation), and the classification accuracy reduction (citation) are just some examples. Disregarding how we chose the filters to prune, let\u2019s imagine that in the diagram below, we chose to prune (remove) the green and orange filters (the circle with the \u201c*\u201d designates a Convolution operation). Since we have two less filters operating on the input, we must have two less output feature-maps. So when we prune filters, besides changing the physical size of the weight tensors, we also need to reconfigure the immediate Convolution layer (change its out_channels ) and the following Convolution layer (change its in_channels ). And finally, because the next layer\u2019s input is now smaller (has fewer channels), we should also shrink the next layer\u2019s weights tensors, by removing the channels corresponding to the filters we pruned. We say that there is a data-dependency between the two Convolution layers. I didn\u2019t make any mention of the activation function that usually follows Convolution, because these functions are parameter-less and are not sensitive to the shape of their input. There are some other dependencies that Distiller resolves (such as Optimizer parameters tightly-coupled to the weights) that I won\u2019t discuss here, because they are implementation details. The scheduler YAML syntax for this example is pasted below. We use L1-norm ranking of weight filters, and the pruning-rate is set by the AGP algorithm (Automatic Gradual Pruning). The Convolution layers are conveniently named conv1 and conv2 in this example. pruners: example_pruner: class: L1RankedStructureParameterPruner_AGP initial_sparsity : 0.10 final_sparsity: 0.50 group_type: Filters weights: [module.conv1.weight] Now let\u2019s add a Batch Normalization layer between the two convolutions: The Batch Normalization layer is parameterized by a couple of tensors that contain information per input-channel (i.e. scale and shift). Because our Convolution produces less output FMs, and these are the input to the Batch Normalization layer, we also need to reconfigure the Batch Normalization layer. And we also need to physically shrink the Batch Normalization layer\u2019s scale and shift tensors, which are coefficients in the BN input transformation. Moreover, the scale and shift coefficients that we remove from the tensors, must correspond to the filters (or output feature-maps channels) that we removed from the Convolution weight tensors. This small nuance will prove to be a large pain, but we\u2019ll get to that in later examples. The presence of a Batch Normalization layer in the example above is transparent to us, and in fact, the YAML schedule does not change. Distiller detects the presence of Batch Normalization layers and adjusts their parameters automatically. Let\u2019s look at another example, with non-serial data-dependencies. Here, the output of conv1 is the input for conv2 and conv3 . This is an example of parallel data-dependency, since both conv2 and conv3 depend on conv1 . Note that the Distiller YAML schedule is unchanged from the previous two examples, since we are still only explicitly pruning the weight filters of conv1 . The weight channels of conv2 and conv3 are pruned implicitly by Distiller in a process called \u201cThinning\u201d (on which I will expand in a different post). Next, let\u2019s look at another example also involving three Convolutions, but this time we want to prune the filters of two convolutional layers, whose outputs are element-wise-summed and fed into a third Convolution. In this example conv3 is dependent on both conv1 and conv2 , and there are two implications to this dependency. The first, and more obvious implication, is that we need to prune the same number of filters from both conv1 and conv2 . Since we apply element-wise addition on the outputs of conv1 and conv2 , they must have the same shape - and they can only have the same shape if conv1 and conv2 prune the same number of filters. The second implication of this triangular data-dependency is that both conv1 and conv2 must prune the same filters! Let\u2019s imagine for a moment, that we ignore this second constraint. The diagram below illustrates the dilemma that arises: how should we prune the channels of the weights of conv3 ? Obviously, we can\u2019t. We must apply the second constraint \u2013 and that means that we now need to be proactive: we need to decide whether to use the prune conv1 and conv2 according to the filter-pruning choices of conv1 or of conv2 . The diagram below illustrates the pruning scheme after deciding to follow the pruning choices of conv1 . The YAML compression schedule syntax needs to be able to express the two dependencies (or constraints) discussed above. First we need to tell the Filter Pruner that we there is a dependency of type Leader . This means that all of the tensors listed in the weights field are pruned together, to the same extent at each iteration, and that to prune the filters we will use the pruning decisions of the first tensor listed. In the example below module.conv1.weight and module.conv2.weight are pruned together according to the pruning choices for module.conv1.weight . pruners: example_pruner: class: L1RankedStructureParameterPruner_AGP initial_sparsity : 0.10 final_sparsity: 0.50 group_type: Filters group_dependency: Leader weights: [module.conv1.weight, module.conv2.weight] When we turn to filter-pruning ResNets we see some pretty long dependency chains because of the skip-connections. If you don\u2019t pay attention, you can easily under-specify (or mis-specify) dependency chains and Distiller will exit with an exception. The exception does not explain the specification error and this needs to be improved. Channel Pruning Channel pruning is very similar to Filter pruning with all the details of dependencies reversed. Look again at example #1, but this time imagine that we\u2019ve changed our schedule to prune the channels of module.conv2.weight . pruners: example_pruner: class: L1RankedStructureParameterPruner_AGP initial_sparsity : 0.10 final_sparsity: 0.50 group_type: Channels weights: [module.conv2.weight] As the diagram shows, conv1 is now dependent on conv2 and its weights filters will be implicitly pruned according to the channels removed from the weights of conv2 . Geek On.","title":"Pruning Filters and Channels"},{"location":"tutorial-struct_pruning.html#pruning-filters-channels","text":"","title":"Pruning Filters &amp; Channels"},{"location":"tutorial-struct_pruning.html#introduction","text":"Channel and filter pruning are examples of structured-pruning which create compressed models that do not require special hardware to execute. This latter fact makes this form of structured pruning particularly interesting and popular. In networks that have serial data dependencies, it is pretty straight-forward to understand and define how to prune channels and filters. However, in more complex models, with parallel-data dependencies (paths) - such as ResNets (skip connections) and GoogLeNet (Inception layers) \u2013 things become increasingly more complex and require a deeper understanding of the data flow in the model, in order to define the pruning schedule. This post explains channel and filter pruning, the challenges, and how to define a Distiller pruning schedule for these structures. The details of the implementation are left for a separate post. Before we dive into pruning, let\u2019s level-set on the terminology, because different people (and even research papers) do not always agree on the nomenclature. This reflects my understanding of the nomenclature, and therefore these are the names used in Distiller. I\u2019ll restrict this discussion to Convolution layers in CNNs, to contain the scope of the topic I\u2019ll be covering, although Distiller supports pruning of other structures such as matrix columns and rows. PyTorch describes torch.nn.Conv2d as applying \u201ca 2D convolution over an input signal composed of several input planes.\u201d We call each of these input planes a feature-map (or FM, for short). Another name is input channel , as in the R/G/B channels of an image. Some people refer to feature-maps as activations (i.e. the activation of neurons), although I think strictly speaking activations are the output of an activation layer that was fed a group of feature-maps. Because it is very common, and because the use of an activation is orthogonal to our discussion, I will use activations to refer to the output of a Convolution layer (i.e. 3D stack of feature-maps). In the PyTorch documentation Convolution outputs have shape (N, C out , H out , W out ) where N is a batch size, C out denotes a number of output channels, H out is a height of output planes in pixels, and W out is width in pixels. We won\u2019t be paying much attention to the batch-size since it\u2019s not important to our discussion, so without loss of generality we can set N=1. I\u2019m also assuming the most common Convolutions having groups==1 . Convolution weights are 4D: (F, C, K, K) where F is the number of filters, C is the number of channels, and K is the kernel size (we can assume the kernel height and width are equal for simplicity). A kernel is a 2D matrix (K, K) that is part of a 3D feature detector. This feature detector is called a filter and it is basically a stack of 2D kernels . Each kernel is convolved with a 2D input channel (i.e. feature-map) so if there are C in channels in the input, then there are C in kernels in a filter (C == C in ). Each filter is convolved with the entire input to create a single output channel (i.e. feature-map). If there are C out output channels, then there are C out filters (F == C out ).","title":"Introduction"},{"location":"tutorial-struct_pruning.html#filter-pruning","text":"Filter pruning and channel pruning are very similar, and I\u2019ll expand on that similarity later on \u2013 but for now let\u2019s focus on filter pruning. In filter pruning we use some criterion to determine which filters are important and which are not. Researchers came up with all sorts of pruning criteria: the L1-magnitude of the filters (citation), the entropy of the activations (citation), and the classification accuracy reduction (citation) are just some examples. Disregarding how we chose the filters to prune, let\u2019s imagine that in the diagram below, we chose to prune (remove) the green and orange filters (the circle with the \u201c*\u201d designates a Convolution operation). Since we have two less filters operating on the input, we must have two less output feature-maps. So when we prune filters, besides changing the physical size of the weight tensors, we also need to reconfigure the immediate Convolution layer (change its out_channels ) and the following Convolution layer (change its in_channels ). And finally, because the next layer\u2019s input is now smaller (has fewer channels), we should also shrink the next layer\u2019s weights tensors, by removing the channels corresponding to the filters we pruned. We say that there is a data-dependency between the two Convolution layers. I didn\u2019t make any mention of the activation function that usually follows Convolution, because these functions are parameter-less and are not sensitive to the shape of their input. There are some other dependencies that Distiller resolves (such as Optimizer parameters tightly-coupled to the weights) that I won\u2019t discuss here, because they are implementation details. The scheduler YAML syntax for this example is pasted below. We use L1-norm ranking of weight filters, and the pruning-rate is set by the AGP algorithm (Automatic Gradual Pruning). The Convolution layers are conveniently named conv1 and conv2 in this example. pruners: example_pruner: class: L1RankedStructureParameterPruner_AGP initial_sparsity : 0.10 final_sparsity: 0.50 group_type: Filters weights: [module.conv1.weight] Now let\u2019s add a Batch Normalization layer between the two convolutions: The Batch Normalization layer is parameterized by a couple of tensors that contain information per input-channel (i.e. scale and shift). Because our Convolution produces less output FMs, and these are the input to the Batch Normalization layer, we also need to reconfigure the Batch Normalization layer. And we also need to physically shrink the Batch Normalization layer\u2019s scale and shift tensors, which are coefficients in the BN input transformation. Moreover, the scale and shift coefficients that we remove from the tensors, must correspond to the filters (or output feature-maps channels) that we removed from the Convolution weight tensors. This small nuance will prove to be a large pain, but we\u2019ll get to that in later examples. The presence of a Batch Normalization layer in the example above is transparent to us, and in fact, the YAML schedule does not change. Distiller detects the presence of Batch Normalization layers and adjusts their parameters automatically. Let\u2019s look at another example, with non-serial data-dependencies. Here, the output of conv1 is the input for conv2 and conv3 . This is an example of parallel data-dependency, since both conv2 and conv3 depend on conv1 . Note that the Distiller YAML schedule is unchanged from the previous two examples, since we are still only explicitly pruning the weight filters of conv1 . The weight channels of conv2 and conv3 are pruned implicitly by Distiller in a process called \u201cThinning\u201d (on which I will expand in a different post). Next, let\u2019s look at another example also involving three Convolutions, but this time we want to prune the filters of two convolutional layers, whose outputs are element-wise-summed and fed into a third Convolution. In this example conv3 is dependent on both conv1 and conv2 , and there are two implications to this dependency. The first, and more obvious implication, is that we need to prune the same number of filters from both conv1 and conv2 . Since we apply element-wise addition on the outputs of conv1 and conv2 , they must have the same shape - and they can only have the same shape if conv1 and conv2 prune the same number of filters. The second implication of this triangular data-dependency is that both conv1 and conv2 must prune the same filters! Let\u2019s imagine for a moment, that we ignore this second constraint. The diagram below illustrates the dilemma that arises: how should we prune the channels of the weights of conv3 ? Obviously, we can\u2019t. We must apply the second constraint \u2013 and that means that we now need to be proactive: we need to decide whether to use the prune conv1 and conv2 according to the filter-pruning choices of conv1 or of conv2 . The diagram below illustrates the pruning scheme after deciding to follow the pruning choices of conv1 . The YAML compression schedule syntax needs to be able to express the two dependencies (or constraints) discussed above. First we need to tell the Filter Pruner that we there is a dependency of type Leader . This means that all of the tensors listed in the weights field are pruned together, to the same extent at each iteration, and that to prune the filters we will use the pruning decisions of the first tensor listed. In the example below module.conv1.weight and module.conv2.weight are pruned together according to the pruning choices for module.conv1.weight . pruners: example_pruner: class: L1RankedStructureParameterPruner_AGP initial_sparsity : 0.10 final_sparsity: 0.50 group_type: Filters group_dependency: Leader weights: [module.conv1.weight, module.conv2.weight] When we turn to filter-pruning ResNets we see some pretty long dependency chains because of the skip-connections. If you don\u2019t pay attention, you can easily under-specify (or mis-specify) dependency chains and Distiller will exit with an exception. The exception does not explain the specification error and this needs to be improved.","title":"Filter Pruning"},{"location":"tutorial-struct_pruning.html#channel-pruning","text":"Channel pruning is very similar to Filter pruning with all the details of dependencies reversed. Look again at example #1, but this time imagine that we\u2019ve changed our schedule to prune the channels of module.conv2.weight . pruners: example_pruner: class: L1RankedStructureParameterPruner_AGP initial_sparsity : 0.10 final_sparsity: 0.50 group_type: Channels weights: [module.conv2.weight] As the diagram shows, conv1 is now dependent on conv2 and its weights filters will be implicitly pruned according to the channels removed from the weights of conv2 . Geek On.","title":"Channel Pruning"},{"location":"usage.html","text":"Using the sample application The Distiller repository contains a sample application, distiller/examples/classifier_compression/compress_classifier.py , and a set of scheduling files which demonstrate Distiller's features. Following is a brief discussion of how to use this application and the accompanying schedules. You might also want to refer to the following resources: An explanation of the scheduler file format. An in-depth discussion of how we used these schedule files to implement several state-of-the-art DNN compression research papers. The sample application supports various features for compression of image classification DNNs, and gives an example of how to integrate distiller in your own application. The code is documented and should be considered the best source of documentation, but we provide some elaboration here. This diagram shows how where compress_classifier.py fits in the compression workflow, and how we integrate the Jupyter notebooks as part of our research work. Command line arguments To get help on the command line arguments, invoke: $ python3 compress_classifier.py --help For example: $ time python3 compress_classifier.py -a alexnet --lr 0.005 -p 50 ../../../data.imagenet -j 44 --epochs 90 --pretrained --compress=../sensitivity-pruning/alexnet.schedule_sensitivity.yaml Parameters: +----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ | | Name | Shape | NNZ (dense) | NNZ (sparse) | Cols (%) | Rows (%) | Ch (%) | 2D (%) | 3D (%) | Fine (%) | Std | Mean | Abs-Mean | |----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------| | 0 | features.module.0.weight | (64, 3, 11, 11) | 23232 | 13411 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 42.27359 | 0.14391 | -0.00002 | 0.08805 | | 1 | features.module.3.weight | (192, 64, 5, 5) | 307200 | 115560 | 0.00000 | 0.00000 | 0.00000 | 1.91243 | 0.00000 | 62.38281 | 0.04703 | -0.00250 | 0.02289 | | 2 | features.module.6.weight | (384, 192, 3, 3) | 663552 | 256565 | 0.00000 | 0.00000 | 0.00000 | 6.18490 | 0.00000 | 61.33445 | 0.03354 | -0.00184 | 0.01803 | | 3 | features.module.8.weight | (256, 384, 3, 3) | 884736 | 315065 | 0.00000 | 0.00000 | 0.00000 | 6.96411 | 0.00000 | 64.38881 | 0.02646 | -0.00168 | 0.01422 | | 4 | features.module.10.weight | (256, 256, 3, 3) | 589824 | 186938 | 0.00000 | 0.00000 | 0.00000 | 15.49225 | 0.00000 | 68.30614 | 0.02714 | -0.00246 | 0.01409 | | 5 | classifier.1.weight | (4096, 9216) | 37748736 | 3398881 | 0.00000 | 0.21973 | 0.00000 | 0.21973 | 0.00000 | 90.99604 | 0.00589 | -0.00020 | 0.00168 | | 6 | classifier.4.weight | (4096, 4096) | 16777216 | 1782769 | 0.21973 | 3.46680 | 0.00000 | 3.46680 | 0.00000 | 89.37387 | 0.00849 | -0.00066 | 0.00263 | | 7 | classifier.6.weight | (1000, 4096) | 4096000 | 994738 | 3.36914 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 75.71440 | 0.01718 | 0.00030 | 0.00778 | | 8 | Total sparsity: | - | 61090496 | 7063928 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 88.43694 | 0.00000 | 0.00000 | 0.00000 | +----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ 2018-04-04 21:30:52,499 - Total sparsity: 88.44 2018-04-04 21:30:52,499 - --- validate (epoch=89)----------- 2018-04-04 21:30:52,499 - 128116 samples (256 per mini-batch) 2018-04-04 21:31:04,646 - Epoch: [89][ 50/ 500] Loss 2.175988 Top1 51.289063 Top5 74.023438 2018-04-04 21:31:06,427 - Epoch: [89][ 100/ 500] Loss 2.171564 Top1 51.175781 Top5 74.308594 2018-04-04 21:31:11,432 - Epoch: [89][ 150/ 500] Loss 2.159347 Top1 51.546875 Top5 74.473958 2018-04-04 21:31:14,364 - Epoch: [89][ 200/ 500] Loss 2.156857 Top1 51.585938 Top5 74.568359 2018-04-04 21:31:18,381 - Epoch: [89][ 250/ 500] Loss 2.152790 Top1 51.707813 Top5 74.681250 2018-04-04 21:31:22,195 - Epoch: [89][ 300/ 500] Loss 2.149962 Top1 51.791667 Top5 74.755208 2018-04-04 21:31:25,508 - Epoch: [89][ 350/ 500] Loss 2.150936 Top1 51.827009 Top5 74.767857 2018-04-04 21:31:29,538 - Epoch: [89][ 400/ 500] Loss 2.150853 Top1 51.781250 Top5 74.763672 2018-04-04 21:31:32,842 - Epoch: [89][ 450/ 500] Loss 2.150156 Top1 51.828125 Top5 74.821181 2018-04-04 21:31:35,338 - Epoch: [89][ 500/ 500] Loss 2.150417 Top1 51.833594 Top5 74.817187 2018-04-04 21:31:35,357 - ==> Top1: 51.838 Top5: 74.817 Loss: 2.150 2018-04-04 21:31:35,364 - Saving checkpoint 2018-04-04 21:31:39,251 - --- test --------------------- 2018-04-04 21:31:39,252 - 50000 samples (256 per mini-batch) 2018-04-04 21:31:51,512 - Test: [ 50/ 195] Loss 1.487607 Top1 63.273438 Top5 85.695312 2018-04-04 21:31:55,015 - Test: [ 100/ 195] Loss 1.638043 Top1 60.636719 Top5 83.664062 2018-04-04 21:31:58,732 - Test: [ 150/ 195] Loss 1.833214 Top1 57.619792 Top5 80.447917 2018-04-04 21:32:01,274 - ==> Top1: 56.606 Top5: 79.446 Loss: 1.893 Let's look at the command line again: $ time python3 compress_classifier.py -a alexnet --lr 0.005 -p 50 ../../../data.imagenet -j 44 --epochs 90 --pretrained --compress=../sensitivity-pruning/alexnet.schedule_sensitivity.yaml In this example, we prune a TorchVision pre-trained AlexNet network, using the following configuration: Learning-rate of 0.005 Print progress every 50 mini-batches. Use 44 worker threads to load data (make sure to use something suitable for your machine). Run for 90 epochs. Torchvision's pre-trained models did not store the epoch metadata, so pruning starts at epoch 0. When you train and prune your own networks, the last training epoch is saved as a metadata with the model. Therefore, when you load such models, the first epoch is not 0, but it is the last training epoch. The pruning schedule is provided in alexnet.schedule_sensitivity.yaml Log files are written to directory logs . Examples Distiller comes with several example schedules which can be used together with compress_classifier.py . These example schedules (YAML) files, contain the command line that is used in order to invoke the schedule (so that you can easily recreate the results in your environment), together with the results of the pruning or regularization. The results usually contain a table showing the sparsity of each of the model parameters, together with the validation and test top1, top5 and loss scores. For more details on the example schedules, you can refer to the coverage of the Model Zoo . examples/agp-pruning : Automated Gradual Pruning (AGP) on MobileNet and ResNet18 (ImageNet dataset) examples/hybrid : AlexNet AGP with 2D (kernel) regularization (ImageNet dataset) AlexNet sensitivity pruning with 2D regularization examples/network_slimming : ResNet20 Network Slimming (this is work-in-progress) examples/pruning_filters_for_efficient_convnets : ResNet56 baseline training (CIFAR10 dataset) ResNet56 filter removal using filter ranking examples/sensitivity_analysis : Element-wise pruning sensitivity-analysis: AlexNet (ImageNet) MobileNet (ImageNet) ResNet18 (ImageNet) ResNet20 (CIFAR10) ResNet34 (ImageNet) Filter-wise pruning sensitivity-analysis: ResNet20 (CIFAR10) ResNet56 (CIFAR10) examples/sensitivity-pruning : AlexNet sensitivity pruning with Iterative Pruning AlexNet sensitivity pruning with One-Shot Pruning examples/ssl : ResNet20 baseline training (CIFAR10 dataset) Structured Sparsity Learning (SSL) with layer removal on ResNet20 SSL with channels removal on ResNet20 examples/quantization : AlexNet w. Batch-Norm (base FP32 + DoReFa) Pre-activation ResNet20 on CIFAR10 (base FP32 + DoReFa) Pre-activation ResNet18 on ImageNEt (base FP32 + DoReFa) Experiment reproducibility Experiment reproducibility is sometimes important. Pete Warden recently expounded about this in his blog . PyTorch's support for deterministic execution requires us to use only one thread for loading data (other wise the multi-threaded execution of the data loaders can create random order and change the results), and to set the seed of the CPU and GPU PRNGs. Using the --deterministic command-line flag and setting j=1 will produce reproducible results (for the same PyTorch version). Performing pruning sensitivity analysis Distiller supports element-wise and filter-wise pruning sensitivity analysis. In both cases, L1-norm is used to rank which elements or filters to prune. For example, when running filter-pruning sensitivity analysis, the L1-norm of the filters of each layer's weights tensor are calculated, and the bottom x% are set to zero. The analysis process is quite long, because currently we use the entire test dataset to assess the accuracy performance at each pruning level of each weights tensor. Using a small dataset for this would save much time and we plan on assessing if this will provide sufficient results. Results are output as a CSV file ( sensitivity.csv ) and PNG file ( sensitivity.png ). The implementation is in distiller/sensitivity.py and it contains further details about process and the format of the CSV file. The example below performs element-wise pruning sensitivity analysis on ResNet20 for CIFAR10: $ python3 compress_classifier.py -a resnet20_cifar ../../../data.cifar10/ -j=1 --resume=../cifar10/resnet20/checkpoint_trained_dense.pth.tar --sense=element The sense command-line argument can be set to either element or filter , depending on the type of analysis you want done. There is also a Jupyter notebook with example invocations, outputs and explanations. Post-Training Quantization The following example qunatizes ResNet18 for ImageNet: $ python3 compress_classifier.py -a resnet18 ../../../data.imagenet --pretrained --quantize-eval --evaluate See here for more details on how to invoke post-training quantization from the command line. A checkpoint with the quantized model will be dumped in the run directory. It will contain the quantized model parameters (the data type will still be FP32, but the values will be integers). The calculated quantization parameters (scale and zero-point) are stored as well in each quantized layer. For more examples of post-training quantization see here . Summaries You can use the sample compression application to generate model summary reports, such as the attributes and compute summary report (see screen capture below). You can log sparsity statistics (written to console and CSV file), performance, optimizer and model information, and also create a PNG image of the DNN. Creating a PNG image is an experimental feature (it relies on features which are not available on PyTorch 3.1 and that we hope will be available in PyTorch's next release), so to use it you will need to compile the PyTorch master branch, and hope for the best ;-). $ python3 compress_classifier.py --resume=../ssl/checkpoints/checkpoint_trained_ch_regularized_dense.pth.tar -a=resnet20_cifar ../../../data.cifar10 --summary=compute Generates: +----+------------------------------+--------+----------+-----------------+--------------+-----------------+--------------+------------------+---------+ | | Name | Type | Attrs | IFM | IFM volume | OFM | OFM volume | Weights volume | MACs | |----+------------------------------+--------+----------+-----------------+--------------+-----------------+--------------+------------------+---------| | 0 | module.conv1 | Conv2d | k=(3, 3) | (1, 3, 32, 32) | 3072 | (1, 16, 32, 32) | 16384 | 432 | 442368 | | 1 | module.layer1.0.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 2 | module.layer1.0.conv2 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 3 | module.layer1.1.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 4 | module.layer1.1.conv2 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 5 | module.layer1.2.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 6 | module.layer1.2.conv2 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 7 | module.layer2.0.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 32, 16, 16) | 8192 | 4608 | 1179648 | | 8 | module.layer2.0.conv2 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 | | 9 | module.layer2.0.downsample.0 | Conv2d | k=(1, 1) | (1, 16, 32, 32) | 16384 | (1, 32, 16, 16) | 8192 | 512 | 131072 | | 10 | module.layer2.1.conv1 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 | | 11 | module.layer2.1.conv2 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 | | 12 | module.layer2.2.conv1 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 | | 13 | module.layer2.2.conv2 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 | | 14 | module.layer3.0.conv1 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 64, 8, 8) | 4096 | 18432 | 1179648 | | 15 | module.layer3.0.conv2 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 | | 16 | module.layer3.0.downsample.0 | Conv2d | k=(1, 1) | (1, 32, 16, 16) | 8192 | (1, 64, 8, 8) | 4096 | 2048 | 131072 | | 17 | module.layer3.1.conv1 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 | | 18 | module.layer3.1.conv2 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 | | 19 | module.layer3.2.conv1 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 | | 20 | module.layer3.2.conv2 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 | | 21 | module.fc | Linear | | (1, 64) | 64 | (1, 10) | 10 | 640 | 640 | +----+------------------------------+--------+----------+-----------------+--------------+-----------------+--------------+------------------+---------+ Total MACs: 40,813,184 Using TensorBoard Google's TensorBoard is an excellent tool for visualizing the progress of DNN training. Distiller's logger supports writing performance indicators and parameter statistics in a file format that can be read by TensorBoard (Distiller uses TensorFlow's APIs in order to do this, which is why Distiller requires the installation of TensorFlow). To view the graphs, invoke the TensorBoard server. For example: $ tensorboard --logdir=logs Distillers's setup (requirements.txt) installs TensorFlow for CPU. If you want a different installation, please follow the TensorFlow installation instructions . Collecting activations statistics In CNNs with ReLU layers, ReLU activations (feature-maps) also exhibit a nice level of sparsity (50-60% sparsity is typical). You can collect activation statistics using the --act_stats command-line flag. For example: $ python3 compress_classifier.py -a=resnet56_cifar -p=50 ../../../data.cifar10 --resume=checkpoint.resnet56_cifar_baseline.pth.tar --act-stats=test -e The test parameter indicates that, in this example, we want to collect activation statistics during the test phase. Note that we also used the -e command-line argument to indicate that we want to run a test phase. The other two legal parameter values are train and valid which collect activation statistics during the training and validation phases, respectively. Collectors and their collaterals An instance of a subclass of ActivationStatsCollector can be used to collect activation statistics. Currently, ActivationStatsCollector has two types of subclasses: SummaryActivationStatsCollector and RecordsActivationStatsCollector . Instances of SummaryActivationStatsCollector compute the mean of some statistic of the activation. It is rather light-weight and quicker than collecting a record per activation. The statistic function is configured in the constructor. In the sample compression application, compress_classifier.py , we create a dictionary of collectors. For example: SummaryActivationStatsCollector(model, \"sparsity\", lambda t: 100 * distiller.utils.sparsity(t)) The lambda expression is invoked per activation encountered during forward passes, and the value it returns (in this case, the sparsity of the activation tensors, multiplied by 100) is stored in module.sparsity ( \"sparsity\" is this collector's name). To access the statistics, you can invoke collector.value() , or you can access each module's data directly. Another type of collector is RecordsActivationStatsCollector which computes a hard-coded set of activations statistics and collects a record per activation . For obvious reasons, this is slower than instances of SummaryActivationStatsCollector . ActivationStatsCollector default to collecting activations statistics only on the output activations of ReLU layers, but we can choose any layer type we want. In the example below we collect statistics from outputs of torch.nn.Conv2d layers. RecordsActivationStatsCollector(model, classes=[torch.nn.Conv2d]) Collectors can write their data to Excel workbooks (which are named using the collector's name), by invoking collector.to_xlsx(path_to_workbook) . In compress_classifier.py we currently create four different collectors which you can selectively disable. You can also add other statistics collectors and use a different function to compute your new statistic. collectors = missingdict({ \"sparsity\": SummaryActivationStatsCollector(model, \"sparsity\", lambda t: 100 * distiller.utils.sparsity(t)), \"l1_channels\": SummaryActivationStatsCollector(model, \"l1_channels\", distiller.utils.activation_channels_l1), \"apoz_channels\": SummaryActivationStatsCollector(model, \"apoz_channels\", distiller.utils.activation_channels_apoz), \"records\": RecordsActivationStatsCollector(model, classes=[torch.nn.Conv2d])}) By default, these Collectors write their data to files in the active log directory. You can use a utility function, distiller.log_activation_statsitics , to log the data of an ActivationStatsCollector instance to one of the backend-loggers. For an example, the code below logs the \"sparsity\" collector to a TensorBoard log file. distiller.log_activation_statsitics(epoch, \"train\", loggers=[tflogger], collector=collectors[\"sparsity\"]) Caveats Distiller collects activations statistics using PyTorch's forward-hooks mechanism. Collectors iteratively register the modules' forward-hooks, and collectors are called during the forward traversal and get exposed to activation data. Registering for forward callbacks is performed like this: module.register_forward_hook This makes apparent two limitations of this mechanism: We can only register on PyTorch modules. This means that we can't register on the forward hook of a functionals such as torch.nn.functional.relu and torch.nn.functional.max_pool2d . Therefore, you may need to replace functionals with their module alternative. For example: class MadeUpNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 6, 5) def forward(self, x): x = F.relu(self.conv1(x)) return x Can be changed to: class MadeUpNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.relu(self.conv1(x)) return x We can only use a module instance once in our models. If we use the same module several times, then we can't determine which node in the graph has invoked the callback, because the PyTorch callback signature def hook(module, input, output) doesn't provide enough contextual information. TorchVision's ResNet is an example of a model that uses the same instance of nn.ReLU multiple times: class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) # <================ out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) # <================ return out In Distiller we changed ResNet to use multiple instances of nn.ReLU, and each instance is used only once: class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu1 = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.relu2 = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu1(out) # <================ out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu2(out) # <================ return out Using the Jupyter notebooks The Jupyter notebooks contain many examples of how to use the statistics summaries generated by Distiller. They are explained in a separate page. Generating this documentation Install mkdocs and the required packages by executing: $ pip3 install -r doc-requirements.txt To build the project documentation run: $ cd distiller/docs-src $ mkdocs build --clean This will create a folder named 'site' which contains the documentation website. Open distiller/docs/site/index.html to view the documentation home page.","title":"Usage"},{"location":"usage.html#using-the-sample-application","text":"The Distiller repository contains a sample application, distiller/examples/classifier_compression/compress_classifier.py , and a set of scheduling files which demonstrate Distiller's features. Following is a brief discussion of how to use this application and the accompanying schedules. You might also want to refer to the following resources: An explanation of the scheduler file format. An in-depth discussion of how we used these schedule files to implement several state-of-the-art DNN compression research papers. The sample application supports various features for compression of image classification DNNs, and gives an example of how to integrate distiller in your own application. The code is documented and should be considered the best source of documentation, but we provide some elaboration here. This diagram shows how where compress_classifier.py fits in the compression workflow, and how we integrate the Jupyter notebooks as part of our research work.","title":"Using the sample application"},{"location":"usage.html#command-line-arguments","text":"To get help on the command line arguments, invoke: $ python3 compress_classifier.py --help For example: $ time python3 compress_classifier.py -a alexnet --lr 0.005 -p 50 ../../../data.imagenet -j 44 --epochs 90 --pretrained --compress=../sensitivity-pruning/alexnet.schedule_sensitivity.yaml Parameters: +----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ | | Name | Shape | NNZ (dense) | NNZ (sparse) | Cols (%) | Rows (%) | Ch (%) | 2D (%) | 3D (%) | Fine (%) | Std | Mean | Abs-Mean | |----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------| | 0 | features.module.0.weight | (64, 3, 11, 11) | 23232 | 13411 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 42.27359 | 0.14391 | -0.00002 | 0.08805 | | 1 | features.module.3.weight | (192, 64, 5, 5) | 307200 | 115560 | 0.00000 | 0.00000 | 0.00000 | 1.91243 | 0.00000 | 62.38281 | 0.04703 | -0.00250 | 0.02289 | | 2 | features.module.6.weight | (384, 192, 3, 3) | 663552 | 256565 | 0.00000 | 0.00000 | 0.00000 | 6.18490 | 0.00000 | 61.33445 | 0.03354 | -0.00184 | 0.01803 | | 3 | features.module.8.weight | (256, 384, 3, 3) | 884736 | 315065 | 0.00000 | 0.00000 | 0.00000 | 6.96411 | 0.00000 | 64.38881 | 0.02646 | -0.00168 | 0.01422 | | 4 | features.module.10.weight | (256, 256, 3, 3) | 589824 | 186938 | 0.00000 | 0.00000 | 0.00000 | 15.49225 | 0.00000 | 68.30614 | 0.02714 | -0.00246 | 0.01409 | | 5 | classifier.1.weight | (4096, 9216) | 37748736 | 3398881 | 0.00000 | 0.21973 | 0.00000 | 0.21973 | 0.00000 | 90.99604 | 0.00589 | -0.00020 | 0.00168 | | 6 | classifier.4.weight | (4096, 4096) | 16777216 | 1782769 | 0.21973 | 3.46680 | 0.00000 | 3.46680 | 0.00000 | 89.37387 | 0.00849 | -0.00066 | 0.00263 | | 7 | classifier.6.weight | (1000, 4096) | 4096000 | 994738 | 3.36914 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 75.71440 | 0.01718 | 0.00030 | 0.00778 | | 8 | Total sparsity: | - | 61090496 | 7063928 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 88.43694 | 0.00000 | 0.00000 | 0.00000 | +----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ 2018-04-04 21:30:52,499 - Total sparsity: 88.44 2018-04-04 21:30:52,499 - --- validate (epoch=89)----------- 2018-04-04 21:30:52,499 - 128116 samples (256 per mini-batch) 2018-04-04 21:31:04,646 - Epoch: [89][ 50/ 500] Loss 2.175988 Top1 51.289063 Top5 74.023438 2018-04-04 21:31:06,427 - Epoch: [89][ 100/ 500] Loss 2.171564 Top1 51.175781 Top5 74.308594 2018-04-04 21:31:11,432 - Epoch: [89][ 150/ 500] Loss 2.159347 Top1 51.546875 Top5 74.473958 2018-04-04 21:31:14,364 - Epoch: [89][ 200/ 500] Loss 2.156857 Top1 51.585938 Top5 74.568359 2018-04-04 21:31:18,381 - Epoch: [89][ 250/ 500] Loss 2.152790 Top1 51.707813 Top5 74.681250 2018-04-04 21:31:22,195 - Epoch: [89][ 300/ 500] Loss 2.149962 Top1 51.791667 Top5 74.755208 2018-04-04 21:31:25,508 - Epoch: [89][ 350/ 500] Loss 2.150936 Top1 51.827009 Top5 74.767857 2018-04-04 21:31:29,538 - Epoch: [89][ 400/ 500] Loss 2.150853 Top1 51.781250 Top5 74.763672 2018-04-04 21:31:32,842 - Epoch: [89][ 450/ 500] Loss 2.150156 Top1 51.828125 Top5 74.821181 2018-04-04 21:31:35,338 - Epoch: [89][ 500/ 500] Loss 2.150417 Top1 51.833594 Top5 74.817187 2018-04-04 21:31:35,357 - ==> Top1: 51.838 Top5: 74.817 Loss: 2.150 2018-04-04 21:31:35,364 - Saving checkpoint 2018-04-04 21:31:39,251 - --- test --------------------- 2018-04-04 21:31:39,252 - 50000 samples (256 per mini-batch) 2018-04-04 21:31:51,512 - Test: [ 50/ 195] Loss 1.487607 Top1 63.273438 Top5 85.695312 2018-04-04 21:31:55,015 - Test: [ 100/ 195] Loss 1.638043 Top1 60.636719 Top5 83.664062 2018-04-04 21:31:58,732 - Test: [ 150/ 195] Loss 1.833214 Top1 57.619792 Top5 80.447917 2018-04-04 21:32:01,274 - ==> Top1: 56.606 Top5: 79.446 Loss: 1.893 Let's look at the command line again: $ time python3 compress_classifier.py -a alexnet --lr 0.005 -p 50 ../../../data.imagenet -j 44 --epochs 90 --pretrained --compress=../sensitivity-pruning/alexnet.schedule_sensitivity.yaml In this example, we prune a TorchVision pre-trained AlexNet network, using the following configuration: Learning-rate of 0.005 Print progress every 50 mini-batches. Use 44 worker threads to load data (make sure to use something suitable for your machine). Run for 90 epochs. Torchvision's pre-trained models did not store the epoch metadata, so pruning starts at epoch 0. When you train and prune your own networks, the last training epoch is saved as a metadata with the model. Therefore, when you load such models, the first epoch is not 0, but it is the last training epoch. The pruning schedule is provided in alexnet.schedule_sensitivity.yaml Log files are written to directory logs .","title":"Command line arguments"},{"location":"usage.html#examples","text":"Distiller comes with several example schedules which can be used together with compress_classifier.py . These example schedules (YAML) files, contain the command line that is used in order to invoke the schedule (so that you can easily recreate the results in your environment), together with the results of the pruning or regularization. The results usually contain a table showing the sparsity of each of the model parameters, together with the validation and test top1, top5 and loss scores. For more details on the example schedules, you can refer to the coverage of the Model Zoo . examples/agp-pruning : Automated Gradual Pruning (AGP) on MobileNet and ResNet18 (ImageNet dataset) examples/hybrid : AlexNet AGP with 2D (kernel) regularization (ImageNet dataset) AlexNet sensitivity pruning with 2D regularization examples/network_slimming : ResNet20 Network Slimming (this is work-in-progress) examples/pruning_filters_for_efficient_convnets : ResNet56 baseline training (CIFAR10 dataset) ResNet56 filter removal using filter ranking examples/sensitivity_analysis : Element-wise pruning sensitivity-analysis: AlexNet (ImageNet) MobileNet (ImageNet) ResNet18 (ImageNet) ResNet20 (CIFAR10) ResNet34 (ImageNet) Filter-wise pruning sensitivity-analysis: ResNet20 (CIFAR10) ResNet56 (CIFAR10) examples/sensitivity-pruning : AlexNet sensitivity pruning with Iterative Pruning AlexNet sensitivity pruning with One-Shot Pruning examples/ssl : ResNet20 baseline training (CIFAR10 dataset) Structured Sparsity Learning (SSL) with layer removal on ResNet20 SSL with channels removal on ResNet20 examples/quantization : AlexNet w. Batch-Norm (base FP32 + DoReFa) Pre-activation ResNet20 on CIFAR10 (base FP32 + DoReFa) Pre-activation ResNet18 on ImageNEt (base FP32 + DoReFa)","title":"Examples"},{"location":"usage.html#experiment-reproducibility","text":"Experiment reproducibility is sometimes important. Pete Warden recently expounded about this in his blog . PyTorch's support for deterministic execution requires us to use only one thread for loading data (other wise the multi-threaded execution of the data loaders can create random order and change the results), and to set the seed of the CPU and GPU PRNGs. Using the --deterministic command-line flag and setting j=1 will produce reproducible results (for the same PyTorch version).","title":"Experiment reproducibility"},{"location":"usage.html#performing-pruning-sensitivity-analysis","text":"Distiller supports element-wise and filter-wise pruning sensitivity analysis. In both cases, L1-norm is used to rank which elements or filters to prune. For example, when running filter-pruning sensitivity analysis, the L1-norm of the filters of each layer's weights tensor are calculated, and the bottom x% are set to zero. The analysis process is quite long, because currently we use the entire test dataset to assess the accuracy performance at each pruning level of each weights tensor. Using a small dataset for this would save much time and we plan on assessing if this will provide sufficient results. Results are output as a CSV file ( sensitivity.csv ) and PNG file ( sensitivity.png ). The implementation is in distiller/sensitivity.py and it contains further details about process and the format of the CSV file. The example below performs element-wise pruning sensitivity analysis on ResNet20 for CIFAR10: $ python3 compress_classifier.py -a resnet20_cifar ../../../data.cifar10/ -j=1 --resume=../cifar10/resnet20/checkpoint_trained_dense.pth.tar --sense=element The sense command-line argument can be set to either element or filter , depending on the type of analysis you want done. There is also a Jupyter notebook with example invocations, outputs and explanations.","title":"Performing pruning sensitivity analysis"},{"location":"usage.html#post-training-quantization","text":"The following example qunatizes ResNet18 for ImageNet: $ python3 compress_classifier.py -a resnet18 ../../../data.imagenet --pretrained --quantize-eval --evaluate See here for more details on how to invoke post-training quantization from the command line. A checkpoint with the quantized model will be dumped in the run directory. It will contain the quantized model parameters (the data type will still be FP32, but the values will be integers). The calculated quantization parameters (scale and zero-point) are stored as well in each quantized layer. For more examples of post-training quantization see here .","title":"Post-Training Quantization"},{"location":"usage.html#summaries","text":"You can use the sample compression application to generate model summary reports, such as the attributes and compute summary report (see screen capture below). You can log sparsity statistics (written to console and CSV file), performance, optimizer and model information, and also create a PNG image of the DNN. Creating a PNG image is an experimental feature (it relies on features which are not available on PyTorch 3.1 and that we hope will be available in PyTorch's next release), so to use it you will need to compile the PyTorch master branch, and hope for the best ;-). $ python3 compress_classifier.py --resume=../ssl/checkpoints/checkpoint_trained_ch_regularized_dense.pth.tar -a=resnet20_cifar ../../../data.cifar10 --summary=compute Generates: +----+------------------------------+--------+----------+-----------------+--------------+-----------------+--------------+------------------+---------+ | | Name | Type | Attrs | IFM | IFM volume | OFM | OFM volume | Weights volume | MACs | |----+------------------------------+--------+----------+-----------------+--------------+-----------------+--------------+------------------+---------| | 0 | module.conv1 | Conv2d | k=(3, 3) | (1, 3, 32, 32) | 3072 | (1, 16, 32, 32) | 16384 | 432 | 442368 | | 1 | module.layer1.0.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 2 | module.layer1.0.conv2 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 3 | module.layer1.1.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 4 | module.layer1.1.conv2 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 5 | module.layer1.2.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 6 | module.layer1.2.conv2 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 7 | module.layer2.0.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 32, 16, 16) | 8192 | 4608 | 1179648 | | 8 | module.layer2.0.conv2 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 | | 9 | module.layer2.0.downsample.0 | Conv2d | k=(1, 1) | (1, 16, 32, 32) | 16384 | (1, 32, 16, 16) | 8192 | 512 | 131072 | | 10 | module.layer2.1.conv1 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 | | 11 | module.layer2.1.conv2 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 | | 12 | module.layer2.2.conv1 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 | | 13 | module.layer2.2.conv2 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 | | 14 | module.layer3.0.conv1 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 64, 8, 8) | 4096 | 18432 | 1179648 | | 15 | module.layer3.0.conv2 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 | | 16 | module.layer3.0.downsample.0 | Conv2d | k=(1, 1) | (1, 32, 16, 16) | 8192 | (1, 64, 8, 8) | 4096 | 2048 | 131072 | | 17 | module.layer3.1.conv1 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 | | 18 | module.layer3.1.conv2 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 | | 19 | module.layer3.2.conv1 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 | | 20 | module.layer3.2.conv2 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 | | 21 | module.fc | Linear | | (1, 64) | 64 | (1, 10) | 10 | 640 | 640 | +----+------------------------------+--------+----------+-----------------+--------------+-----------------+--------------+------------------+---------+ Total MACs: 40,813,184","title":"Summaries"},{"location":"usage.html#using-tensorboard","text":"Google's TensorBoard is an excellent tool for visualizing the progress of DNN training. Distiller's logger supports writing performance indicators and parameter statistics in a file format that can be read by TensorBoard (Distiller uses TensorFlow's APIs in order to do this, which is why Distiller requires the installation of TensorFlow). To view the graphs, invoke the TensorBoard server. For example: $ tensorboard --logdir=logs Distillers's setup (requirements.txt) installs TensorFlow for CPU. If you want a different installation, please follow the TensorFlow installation instructions .","title":"Using TensorBoard"},{"location":"usage.html#collecting-activations-statistics","text":"In CNNs with ReLU layers, ReLU activations (feature-maps) also exhibit a nice level of sparsity (50-60% sparsity is typical). You can collect activation statistics using the --act_stats command-line flag. For example: $ python3 compress_classifier.py -a=resnet56_cifar -p=50 ../../../data.cifar10 --resume=checkpoint.resnet56_cifar_baseline.pth.tar --act-stats=test -e The test parameter indicates that, in this example, we want to collect activation statistics during the test phase. Note that we also used the -e command-line argument to indicate that we want to run a test phase. The other two legal parameter values are train and valid which collect activation statistics during the training and validation phases, respectively.","title":"Collecting activations statistics"},{"location":"usage.html#collectors-and-their-collaterals","text":"An instance of a subclass of ActivationStatsCollector can be used to collect activation statistics. Currently, ActivationStatsCollector has two types of subclasses: SummaryActivationStatsCollector and RecordsActivationStatsCollector . Instances of SummaryActivationStatsCollector compute the mean of some statistic of the activation. It is rather light-weight and quicker than collecting a record per activation. The statistic function is configured in the constructor. In the sample compression application, compress_classifier.py , we create a dictionary of collectors. For example: SummaryActivationStatsCollector(model, \"sparsity\", lambda t: 100 * distiller.utils.sparsity(t)) The lambda expression is invoked per activation encountered during forward passes, and the value it returns (in this case, the sparsity of the activation tensors, multiplied by 100) is stored in module.sparsity ( \"sparsity\" is this collector's name). To access the statistics, you can invoke collector.value() , or you can access each module's data directly. Another type of collector is RecordsActivationStatsCollector which computes a hard-coded set of activations statistics and collects a record per activation . For obvious reasons, this is slower than instances of SummaryActivationStatsCollector . ActivationStatsCollector default to collecting activations statistics only on the output activations of ReLU layers, but we can choose any layer type we want. In the example below we collect statistics from outputs of torch.nn.Conv2d layers. RecordsActivationStatsCollector(model, classes=[torch.nn.Conv2d]) Collectors can write their data to Excel workbooks (which are named using the collector's name), by invoking collector.to_xlsx(path_to_workbook) . In compress_classifier.py we currently create four different collectors which you can selectively disable. You can also add other statistics collectors and use a different function to compute your new statistic. collectors = missingdict({ \"sparsity\": SummaryActivationStatsCollector(model, \"sparsity\", lambda t: 100 * distiller.utils.sparsity(t)), \"l1_channels\": SummaryActivationStatsCollector(model, \"l1_channels\", distiller.utils.activation_channels_l1), \"apoz_channels\": SummaryActivationStatsCollector(model, \"apoz_channels\", distiller.utils.activation_channels_apoz), \"records\": RecordsActivationStatsCollector(model, classes=[torch.nn.Conv2d])}) By default, these Collectors write their data to files in the active log directory. You can use a utility function, distiller.log_activation_statsitics , to log the data of an ActivationStatsCollector instance to one of the backend-loggers. For an example, the code below logs the \"sparsity\" collector to a TensorBoard log file. distiller.log_activation_statsitics(epoch, \"train\", loggers=[tflogger], collector=collectors[\"sparsity\"])","title":"Collectors and their collaterals"},{"location":"usage.html#caveats","text":"Distiller collects activations statistics using PyTorch's forward-hooks mechanism. Collectors iteratively register the modules' forward-hooks, and collectors are called during the forward traversal and get exposed to activation data. Registering for forward callbacks is performed like this: module.register_forward_hook This makes apparent two limitations of this mechanism: We can only register on PyTorch modules. This means that we can't register on the forward hook of a functionals such as torch.nn.functional.relu and torch.nn.functional.max_pool2d . Therefore, you may need to replace functionals with their module alternative. For example: class MadeUpNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 6, 5) def forward(self, x): x = F.relu(self.conv1(x)) return x Can be changed to: class MadeUpNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.relu(self.conv1(x)) return x We can only use a module instance once in our models. If we use the same module several times, then we can't determine which node in the graph has invoked the callback, because the PyTorch callback signature def hook(module, input, output) doesn't provide enough contextual information. TorchVision's ResNet is an example of a model that uses the same instance of nn.ReLU multiple times: class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) # <================ out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) # <================ return out In Distiller we changed ResNet to use multiple instances of nn.ReLU, and each instance is used only once: class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu1 = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.relu2 = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu1(out) # <================ out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu2(out) # <================ return out","title":"Caveats"},{"location":"usage.html#using-the-jupyter-notebooks","text":"The Jupyter notebooks contain many examples of how to use the statistics summaries generated by Distiller. They are explained in a separate page.","title":"Using the Jupyter notebooks"},{"location":"usage.html#generating-this-documentation","text":"Install mkdocs and the required packages by executing: $ pip3 install -r doc-requirements.txt To build the project documentation run: $ cd distiller/docs-src $ mkdocs build --clean This will create a folder named 'site' which contains the documentation website. Open distiller/docs/site/index.html to view the documentation home page.","title":"Generating this documentation"}]}
\ No newline at end of file
+{"config":{"lang":["en"],"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"index.html","text":"Distiller Documentation What is Distiller Distiller is an open-source Python package for neural network compression research. Network compression can reduce the footprint of a neural network, increase its inference speed and save energy. Distiller provides a PyTorch environment for prototyping and analyzing compression algorithms, such as sparsity-inducing methods and low precision arithmetic. Distiller contains: A framework for integrating pruning, regularization and quantization algorithms. A set of tools for analyzing and evaluating compression performance. Example implementations of state-of-the-art compression algorithms. Motivation A sparse tensor is any tensor that contains some zeros, but sparse tensors are usually only interesting if they contain a significant number of zeros. A sparse neural network performs computations using some sparse tensors (preferably many). These tensors can be parameters (weights and biases) or activations (feature maps). Why do we care about sparsity? Present day neural networks tend to be deep, with millions of weights and activations. Refer to GoogLeNet or ResNet50, for a couple of examples. These large models are compute-intensive which means that even with dedicated acceleration hardware, the inference pass (network evaluation) will take time. You might think that latency is an issue only in certain cases, such as autonomous driving systems, but in fact, whenever we humans interact with our phones and computers, we are sensitive to the latency of the interaction. We don't like to wait for search results or for an application or web-page to load, and we are especially sensitive in realtime interactions such as speech recognition. So inference latency is often something we want to minimize. Large models are also memory-intensive with millions of parameters. Moving around all of the data required to compute inference results consumes energy, which is a problem on a mobile device as well as in a server environment. Data center server-racks are limited by their power-envelope and their ToC (total cost of ownership) is correlated to their power consumption and thermal characteristics. In the mobile device environment, we are obviously always aware of the implications of power consumption on the device battery. Inference performance in the data center is often measured using a KPI (key performance indicator) which folds latency and power considerations: inferences per second, per Watt (inferences/sec/watt). The storage and transfer of large neural networks is also a challenge in mobile device environments, because of limitations on application sizes and long application download times. For these reasons, we wish to compress the network as much as possible, to reduce the amount of bandwidth and compute required. Inducing sparseness, through regularization or pruning, in neural-network models, is one way to compress the network (quantization is another method). Sparse neural networks hold the promise of speed, small size, and energy efficiency. Smaller Sparse NN model representations can be compressed by taking advantage of the fact that the tensor elements are dominated by zeros. The compression format, if any, is very HW and SW specific, and the optimal format may be different per tensor (an obvious example: largely dense tensors should not be compressed). The compute hardware needs to support the compressions formats, for representation compression to be meaningful. Compression representation decisions might interact with algorithms such as the use of tiles for memory accesses. Data such as a parameter tensor is read/written from/to main system memory compressed, but the computation can be dense or sparse. In dense compute we use dense operators, so the compressed data eventually needs to be decompressed into its full, dense size. The best we can do is bring the compressed representation as close as possible to the compute engine. Sparse compute, on the other hand, operates on the sparse representation which never requires decompression (we therefore distinguish between sparse representation and compressed representation). This is not a simple matter to implement in HW, and often means lower utilization of the vectorized compute engines. Therefore, there is a third class of representations, which take advantage of specific hardware characteristics. For example, for a vectorized compute engine we can remove an entire zero-weights vector and skip its computation (this uses structured pruning or regularization). Faster Many of the layers in modern neural-networks are bandwidth-bound, which means that the execution latency is dominated by the available bandwidth. In essence, the hardware spends more time bringing data close to the compute engines, than actually performing the computations. Fully-connected layers, RNNs and LSTMs are some examples of bandwidth-dominated operations. Reducing the bandwidth required by these layers, will immediately speed them up. Some pruning algorithms prune entire kernels, filters and even layers from the network without adversely impacting the final accuracy. Depending on the hardware implementation, these methods can be leveraged to skip computations, thus reducing latency and power. More energy efficient Because we pay two orders-of-magnitude more energy to access off-chip memory (e.g. DDR) compared to on-chip memory (e.g. SRAM or cache), many hardware designs employ a multi-layered cache hierarchy. Fitting the parameters and activations of a network in these on-chip caches can make a big difference on the required bandwidth, the total inference latency, and off course reduce power consumption. And of course, if we used a sparse or compressed representation, then we are reducing the data throughput and therefore the energy consumption.","title":"Home"},{"location":"index.html#distiller-documentation","text":"","title":"Distiller Documentation"},{"location":"index.html#what-is-distiller","text":"Distiller is an open-source Python package for neural network compression research. Network compression can reduce the footprint of a neural network, increase its inference speed and save energy. Distiller provides a PyTorch environment for prototyping and analyzing compression algorithms, such as sparsity-inducing methods and low precision arithmetic. Distiller contains: A framework for integrating pruning, regularization and quantization algorithms. A set of tools for analyzing and evaluating compression performance. Example implementations of state-of-the-art compression algorithms.","title":"What is Distiller"},{"location":"index.html#motivation","text":"A sparse tensor is any tensor that contains some zeros, but sparse tensors are usually only interesting if they contain a significant number of zeros. A sparse neural network performs computations using some sparse tensors (preferably many). These tensors can be parameters (weights and biases) or activations (feature maps). Why do we care about sparsity? Present day neural networks tend to be deep, with millions of weights and activations. Refer to GoogLeNet or ResNet50, for a couple of examples. These large models are compute-intensive which means that even with dedicated acceleration hardware, the inference pass (network evaluation) will take time. You might think that latency is an issue only in certain cases, such as autonomous driving systems, but in fact, whenever we humans interact with our phones and computers, we are sensitive to the latency of the interaction. We don't like to wait for search results or for an application or web-page to load, and we are especially sensitive in realtime interactions such as speech recognition. So inference latency is often something we want to minimize. Large models are also memory-intensive with millions of parameters. Moving around all of the data required to compute inference results consumes energy, which is a problem on a mobile device as well as in a server environment. Data center server-racks are limited by their power-envelope and their ToC (total cost of ownership) is correlated to their power consumption and thermal characteristics. In the mobile device environment, we are obviously always aware of the implications of power consumption on the device battery. Inference performance in the data center is often measured using a KPI (key performance indicator) which folds latency and power considerations: inferences per second, per Watt (inferences/sec/watt). The storage and transfer of large neural networks is also a challenge in mobile device environments, because of limitations on application sizes and long application download times. For these reasons, we wish to compress the network as much as possible, to reduce the amount of bandwidth and compute required. Inducing sparseness, through regularization or pruning, in neural-network models, is one way to compress the network (quantization is another method). Sparse neural networks hold the promise of speed, small size, and energy efficiency.","title":"Motivation"},{"location":"index.html#smaller","text":"Sparse NN model representations can be compressed by taking advantage of the fact that the tensor elements are dominated by zeros. The compression format, if any, is very HW and SW specific, and the optimal format may be different per tensor (an obvious example: largely dense tensors should not be compressed). The compute hardware needs to support the compressions formats, for representation compression to be meaningful. Compression representation decisions might interact with algorithms such as the use of tiles for memory accesses. Data such as a parameter tensor is read/written from/to main system memory compressed, but the computation can be dense or sparse. In dense compute we use dense operators, so the compressed data eventually needs to be decompressed into its full, dense size. The best we can do is bring the compressed representation as close as possible to the compute engine. Sparse compute, on the other hand, operates on the sparse representation which never requires decompression (we therefore distinguish between sparse representation and compressed representation). This is not a simple matter to implement in HW, and often means lower utilization of the vectorized compute engines. Therefore, there is a third class of representations, which take advantage of specific hardware characteristics. For example, for a vectorized compute engine we can remove an entire zero-weights vector and skip its computation (this uses structured pruning or regularization).","title":"Smaller"},{"location":"index.html#faster","text":"Many of the layers in modern neural-networks are bandwidth-bound, which means that the execution latency is dominated by the available bandwidth. In essence, the hardware spends more time bringing data close to the compute engines, than actually performing the computations. Fully-connected layers, RNNs and LSTMs are some examples of bandwidth-dominated operations. Reducing the bandwidth required by these layers, will immediately speed them up. Some pruning algorithms prune entire kernels, filters and even layers from the network without adversely impacting the final accuracy. Depending on the hardware implementation, these methods can be leveraged to skip computations, thus reducing latency and power.","title":"Faster"},{"location":"index.html#more-energy-efficient","text":"Because we pay two orders-of-magnitude more energy to access off-chip memory (e.g. DDR) compared to on-chip memory (e.g. SRAM or cache), many hardware designs employ a multi-layered cache hierarchy. Fitting the parameters and activations of a network in these on-chip caches can make a big difference on the required bandwidth, the total inference latency, and off course reduce power consumption. And of course, if we used a sparse or compressed representation, then we are reducing the data throughput and therefore the energy consumption.","title":"More energy efficient"},{"location":"algo_earlyexit.html","text":"Early Exit Inference While Deep Neural Networks benefit from a large number of layers, it's often the case that many data points in classification tasks can be classified accurately with much less work. There have been several studies recently regarding the idea of exiting before the normal endpoint of the neural network. Panda et al in Conditional Deep Learning for Energy-Efficient and Enhanced Pattern Recognition points out that a lot of data points can be classified easily and require less processing than some more difficult points and they view this in terms of power savings. Surat et al in BranchyNet: Fast Inference via Early Exiting from Deep Neural Networks look at a selective approach to exit placement and criteria for exiting early. Why Does Early Exit Work? Early Exit is a strategy with a straightforward and easy to understand concept Figure #fig(boundaries) shows a simple example in a 2-D feature space. While deep networks can represent more complex and expressive boundaries between classes (assuming we\u2019re confident of avoiding over-fitting the data), it\u2019s also clear that much of the data can be properly classified with even the simplest of classification boundaries. Data points far from the boundary can be considered \"easy to classify\" and achieve a high degree of confidence quicker than do data points close to the boundary. In fact, we can think of the area between the outer straight lines as being the region that is \"difficult to classify\" and require the full expressiveness of the neural network to accurately classify it. Example code for Early Exit Both CIFAR10 and ImageNet code comes directly from publicly available examples from PyTorch. The only edits are the exits that are inserted in a methodology similar to BranchyNet work. Note: the sample code provided for ResNet models with Early Exits has exactly one early exit for the CIFAR10 example and exactly two early exits for the ImageNet example. If you want to modify the number of early exits, you will need to make sure that the model code is updated to have a corresponding number of exits. Deeper networks can benefit from multiple exits. Our examples illustrate both a single and a pair of early exits for CIFAR10 and ImageNet, respectively. Note that this code does not actually take exits. What it does is to compute statistics of loss and accuracy assuming exits were taken when criteria are met. Actually implementing exits can be tricky and architecture dependent and we plan to address these issues. Example command lines We have provided examples for ResNets of varying sizes for both CIFAR10 and ImageNet datasets. An example command line for training for CIFAR10 is: python compress_classifier.py --arch=resnet32_cifar_earlyexit --epochs=20 -b 128 \\ --lr=0.003 --earlyexit_thresholds 0.4 --earlyexit_lossweights 0.4 -j 30 \\ --out-dir /home/ -n earlyexit /home/pcifar10 And an example command line for ImageNet is: python compress_classifier.py --arch=resnet50_earlyexit --epochs=120 -b 128 \\ --lr=0.003 --earlyexit_thresholds 1.2 0.9 --earlyexit_lossweights 0.1 0.3 \\ -j 30 --out-dir /home/ -n earlyexit /home/I1K/i1k-extracted/ Heuristics The insertion of the exits are ad-hoc, but there are some heuristic principals guiding their placement and parameters. The earlier exits are placed, the more aggressive the exit as it essentially prunes the rest of the network at a very early stage, thus saving a lot of work. However, a diminishing percentage of data will be directed through the exit if we are to preserve accuracy. There are other benefits to adding exits in that training the modified network now has back-propagation losses coming from the exits that affect the earlier layers more substantially than the last exit. This effect mitigates problems such as vanishing gradient. Early Exit Hyper-Parameters There are two parameters that are required to enable early exit. Leave them undefined if you are not enabling Early Exit: --earlyexit_thresholds defines the thresholds for each of the early exits. The cross entropy measure must be less than the specified threshold to take a specific exit, otherwise the data continues along the regular path. For example, you could specify \"--earlyexit_thresholds 0.9 1.2\" and this implies two early exits with corresponding thresholds of 0.9 and 1.2, respectively to take those exits. 12 --earlyexit_lossweights provide the weights for the linear combination of losses during training to compute a single, overall loss. We only specify weights for the early exits and assume that the sum of the weights (including final exit) are equal to 1.0. So an example of \"--earlyexit_lossweights 0.2 0.3\" implies two early exits weighted with values of 0.2 and 0.3, respectively and that the final exit has a value of 1.0-(0.2+0.3) = 0.5. Studies have shown that weighting the early exits more heavily will create more agressive early exits, but perhaps with a slight negative effect on accuracy. Output Stats The example code outputs various statistics regarding the loss and accuracy at each of the exits. During training, the Top1 and Top5 stats represent the accuracy should all of the data be forced out that exit (in order to compute the loss at that exit). During inference (i.e. validation and test stages), the Top1 and Top5 stats represent the accuracy for those data points that could exit because the calculated entropy at that exit was lower than the specified threshold for that exit. CIFAR10 In the case of CIFAR10, we have inserted a single exit after the first full layer grouping. The layers on the exit path itself includes a convolutional layer and a fully connected layer. If you move the exit, be sure to match the proper sizes for inputs and outputs to the exit layers. ImageNet This supports training and inference of the ImageNet dataset via several well known deep architectures. ResNet-50 is the architecture of interest in this study, however the exit is defined in the generic ResNet code and could be used with other size ResNets. There are two exits inserted in this example. Again, exit layers must have their sizes match properly. References Priyadarshini Panda, Abhronil Sengupta, Kaushik Roy . Conditional Deep Learning for Energy-Efficient and Enhanced Pattern Recognition , arXiv:1509.08971v6, 2017. Surat Teerapittayanon, Bradley McDanel, H. T. Kung . BranchyNet: Fast Inference via Early Exiting from Deep Neural Networks , arXiv:1709.01686, 2017.","title":"Early Exit"},{"location":"algo_earlyexit.html#early-exit-inference","text":"While Deep Neural Networks benefit from a large number of layers, it's often the case that many data points in classification tasks can be classified accurately with much less work. There have been several studies recently regarding the idea of exiting before the normal endpoint of the neural network. Panda et al in Conditional Deep Learning for Energy-Efficient and Enhanced Pattern Recognition points out that a lot of data points can be classified easily and require less processing than some more difficult points and they view this in terms of power savings. Surat et al in BranchyNet: Fast Inference via Early Exiting from Deep Neural Networks look at a selective approach to exit placement and criteria for exiting early.","title":"Early Exit Inference"},{"location":"algo_earlyexit.html#why-does-early-exit-work","text":"Early Exit is a strategy with a straightforward and easy to understand concept Figure #fig(boundaries) shows a simple example in a 2-D feature space. While deep networks can represent more complex and expressive boundaries between classes (assuming we\u2019re confident of avoiding over-fitting the data), it\u2019s also clear that much of the data can be properly classified with even the simplest of classification boundaries. Data points far from the boundary can be considered \"easy to classify\" and achieve a high degree of confidence quicker than do data points close to the boundary. In fact, we can think of the area between the outer straight lines as being the region that is \"difficult to classify\" and require the full expressiveness of the neural network to accurately classify it.","title":"Why Does Early Exit Work?"},{"location":"algo_earlyexit.html#example-code-for-early-exit","text":"Both CIFAR10 and ImageNet code comes directly from publicly available examples from PyTorch. The only edits are the exits that are inserted in a methodology similar to BranchyNet work. Note: the sample code provided for ResNet models with Early Exits has exactly one early exit for the CIFAR10 example and exactly two early exits for the ImageNet example. If you want to modify the number of early exits, you will need to make sure that the model code is updated to have a corresponding number of exits. Deeper networks can benefit from multiple exits. Our examples illustrate both a single and a pair of early exits for CIFAR10 and ImageNet, respectively. Note that this code does not actually take exits. What it does is to compute statistics of loss and accuracy assuming exits were taken when criteria are met. Actually implementing exits can be tricky and architecture dependent and we plan to address these issues.","title":"Example code for Early Exit"},{"location":"algo_earlyexit.html#example-command-lines","text":"We have provided examples for ResNets of varying sizes for both CIFAR10 and ImageNet datasets. An example command line for training for CIFAR10 is: python compress_classifier.py --arch=resnet32_cifar_earlyexit --epochs=20 -b 128 \\ --lr=0.003 --earlyexit_thresholds 0.4 --earlyexit_lossweights 0.4 -j 30 \\ --out-dir /home/ -n earlyexit /home/pcifar10 And an example command line for ImageNet is: python compress_classifier.py --arch=resnet50_earlyexit --epochs=120 -b 128 \\ --lr=0.003 --earlyexit_thresholds 1.2 0.9 --earlyexit_lossweights 0.1 0.3 \\ -j 30 --out-dir /home/ -n earlyexit /home/I1K/i1k-extracted/","title":"Example command lines"},{"location":"algo_earlyexit.html#heuristics","text":"The insertion of the exits are ad-hoc, but there are some heuristic principals guiding their placement and parameters. The earlier exits are placed, the more aggressive the exit as it essentially prunes the rest of the network at a very early stage, thus saving a lot of work. However, a diminishing percentage of data will be directed through the exit if we are to preserve accuracy. There are other benefits to adding exits in that training the modified network now has back-propagation losses coming from the exits that affect the earlier layers more substantially than the last exit. This effect mitigates problems such as vanishing gradient.","title":"Heuristics"},{"location":"algo_earlyexit.html#early-exit-hyper-parameters","text":"There are two parameters that are required to enable early exit. Leave them undefined if you are not enabling Early Exit: --earlyexit_thresholds defines the thresholds for each of the early exits. The cross entropy measure must be less than the specified threshold to take a specific exit, otherwise the data continues along the regular path. For example, you could specify \"--earlyexit_thresholds 0.9 1.2\" and this implies two early exits with corresponding thresholds of 0.9 and 1.2, respectively to take those exits. 12 --earlyexit_lossweights provide the weights for the linear combination of losses during training to compute a single, overall loss. We only specify weights for the early exits and assume that the sum of the weights (including final exit) are equal to 1.0. So an example of \"--earlyexit_lossweights 0.2 0.3\" implies two early exits weighted with values of 0.2 and 0.3, respectively and that the final exit has a value of 1.0-(0.2+0.3) = 0.5. Studies have shown that weighting the early exits more heavily will create more agressive early exits, but perhaps with a slight negative effect on accuracy.","title":"Early Exit Hyper-Parameters"},{"location":"algo_earlyexit.html#output-stats","text":"The example code outputs various statistics regarding the loss and accuracy at each of the exits. During training, the Top1 and Top5 stats represent the accuracy should all of the data be forced out that exit (in order to compute the loss at that exit). During inference (i.e. validation and test stages), the Top1 and Top5 stats represent the accuracy for those data points that could exit because the calculated entropy at that exit was lower than the specified threshold for that exit.","title":"Output Stats"},{"location":"algo_earlyexit.html#cifar10","text":"In the case of CIFAR10, we have inserted a single exit after the first full layer grouping. The layers on the exit path itself includes a convolutional layer and a fully connected layer. If you move the exit, be sure to match the proper sizes for inputs and outputs to the exit layers.","title":"CIFAR10"},{"location":"algo_earlyexit.html#imagenet","text":"This supports training and inference of the ImageNet dataset via several well known deep architectures. ResNet-50 is the architecture of interest in this study, however the exit is defined in the generic ResNet code and could be used with other size ResNets. There are two exits inserted in this example. Again, exit layers must have their sizes match properly.","title":"ImageNet"},{"location":"algo_earlyexit.html#references","text":"Priyadarshini Panda, Abhronil Sengupta, Kaushik Roy . Conditional Deep Learning for Energy-Efficient and Enhanced Pattern Recognition , arXiv:1509.08971v6, 2017. Surat Teerapittayanon, Bradley McDanel, H. T. Kung . BranchyNet: Fast Inference via Early Exiting from Deep Neural Networks , arXiv:1709.01686, 2017.","title":"References"},{"location":"algo_pruning.html","text":"Weights Pruning Algorithms Magnitude Pruner This is the most basic pruner: it applies a thresholding function, \\(thresh(.)\\), on each element, \\(w_i\\), of a weights tensor. A different threshold can be used for each layer's weights tensor. Because the threshold is applied on individual elements, this pruner belongs to the element-wise pruning algorithm family. \\[ thresh(w_i)=\\left\\lbrace \\matrix{{{w_i: \\; if \\;|w_i| \\; \\gt}\\;\\lambda}\\cr {0: \\; if \\; |w_i| \\leq \\lambda} } \\right\\rbrace \\] Sensitivity Pruner Finding a threshold magnitude per layer is daunting, especially since each layer's elements have different average absolute values. We can take advantage of the fact that the weights of convolutional and fully connected layers exhibit a Gaussian distribution with a mean value roughly zero, to avoid using a direct threshold based on the values of each specific tensor. The diagram below shows the distribution the weights tensor of the first convolutional layer, and first fully-connected layer in TorchVision's pre-trained Alexnet model. You can see that they have an approximate Gaussian distribution. The distributions of Alexnet conv1 and fc1 layers We use the standard deviation of the weights tensor as a sort of normalizing factor between the different weights tensors. For example, if a tensor is Normally distributed, then about 68% of the elements have an absolute value less than the standard deviation (\\(\\sigma\\)) of the tensor. Thus, if we set the threshold to \\(s*\\sigma\\), then basically we are thresholding \\(s * 68\\%\\) of the tensor elements. \\[ thresh(w_i)=\\left\\lbrace \\matrix{{{w_i: \\; if \\;|w_i| \\; \\gt}\\;\\lambda}\\cr {0: \\; if \\; |w_i| \\leq \\lambda} } \\right\\rbrace \\] \\[ \\lambda = s * \\sigma_l \\;\\;\\; where\\; \\sigma_l\\; is \\;the \\;std \\;of \\;layer \\;l \\;as \\;measured \\;on \\;the \\;dense \\;model \\] How do we choose this \\(s\\) multiplier? In Learning both Weights and Connections for Efficient Neural Networks the authors write: \"We used the sensitivity results to find each layer\u2019s threshold: for example, the smallest threshold was applied to the most sensitive layer, which is the first convolutional layer... The pruning threshold is chosen as a quality parameter multiplied by the standard deviation of a layer\u2019s weights So the results of executing pruning sensitivity analysis on the tensor, gives us a good starting guess at \\(s\\). Sensitivity analysis is an empirical method, and we still have to spend time to hone in on the exact multiplier value. Method of Operation Start by running a pruning sensitivity analysis on the model. Then use the results to set and tune the threshold of each layer, but instead of using a direct threshold use a sensitivity parameter which is multiplied by the standard-deviation of the initial weight-tensor's distribution. Schedule In their paper Song Han et al. use iterative pruning and change the value of the \\(s\\) multiplier at each pruning step. Distiller's SensitivityPruner works differently: the value \\(s\\) is set once based on a one-time calculation of the standard-deviation of the tensor (the first time we prune), and relies on the fact that as the tensor is pruned, more elements are \"pulled\" toward the center of the distribution and thus more elements gets pruned. This actually works quite well as we can see in the diagram below. This is a TensorBoard screen-capture from Alexnet training, which shows how this method starts off pruning very aggressively, but then slowly reduces the pruning rate. We use a simple iterative-pruning schedule such as: Prune every second epoch starting at epoch 0, and ending at epoch 38. This excerpt from alexnet.schedule_sensitivity.yaml shows how this iterative schedule is conveyed in Distiller scheduling configuration YAML: pruners: my_pruner: class: 'SensitivityPruner' sensitivities: 'features.module.0.weight': 0.25 'features.module.3.weight': 0.35 'features.module.6.weight': 0.40 'features.module.8.weight': 0.45 'features.module.10.weight': 0.55 'classifier.1.weight': 0.875 'classifier.4.weight': 0.875 'classifier.6.weight': 0.625 policies: - pruner: instance_name : 'my_pruner' starting_epoch: 0 ending_epoch: 38 frequency: 2 Level Pruner Class SparsityLevelParameterPruner uses a similar method to go around specifying specific thresholding magnitudes. Instead of specifying a threshold magnitude, you specify a target sparsity level (expressed as a fraction, so 0.5 means 50% sparsity). Essentially this pruner also uses a pruning criteria based on the magnitude of each tensor element, but it has the advantage that you can aim for an exact and specific sparsity level. This pruner is much more stable compared to SensitivityPruner because the target sparsity level is not coupled to the actual magnitudes of the elements. Distiller's SensitivityPruner is unstable because the final sparsity level depends on the convergence pattern of the tensor distribution. Song Han's methodology of using several different values for the multiplier \\(s\\), and the recalculation of the standard-deviation at each pruning phase, probably gives it stability, but requires much more hyper-parameters (this is the reason we have not implemented it thus far). To set the target sparsity levels, you can once again use pruning sensitivity analysis to make better guesses at the correct sparsity level of each Method of Operation Sort the weights in the specified layer by their absolute values. Mask to zero the smallest magnitude weights until the desired sparsity level is reached. Splicing Pruner In Dynamic Network Surgery for Efficient DNNs Guo et. al propose that network pruning and splicing work in tandem. A SpilicingPruner is a pruner that both prunes and splices connections and works best with a Dynamic Network Surgery schedule, which, for example, configures the PruningPolicy to mask weights only during the forward pass. Automated Gradual Pruner (AGP) In To prune, or not to prune: exploring the efficacy of pruning for model compression , authors Michael Zhu and Suyog Gupta provide an algorithm to schedule a Level Pruner which Distiller implements in AutomatedGradualPruner . \"We introduce a new automated gradual pruning algorithm in which the sparsity is increased from an initial sparsity value \\(s_i\\) (usually 0) to a \ufb01nal sparsity value \\(s_f\\) over a span of n pruning steps. The intuition behind this sparsity function in equation (1) is to prune the network rapidly in the initial phase when the redundant connections are abundant and gradually reduce the number of weights being pruned each time as there are fewer and fewer weights remaining in the network.\"\" You can play with the scheduling parameters in the agp_schedule.ipynb notebook . The authors describe AGP: Our automated gradual pruning algorithm prunes the smallest magnitude weights to achieve a preset level of network sparsity. Doesn't require much hyper-parameter tuning Shown to perform well across different models Does not make any assumptions about the structure of the network or its constituent layers, and is therefore more generally applicable. RNN Pruner The authors of Exploring Sparsity in Recurrent Neural Networks , Sharan Narang, Erich Elsen, Gregory Diamos, and Shubho Sengupta, \"propose a technique to reduce the parameters of a network by pruning weights during the initial training of the network.\" They use a gradual pruning schedule which is reminiscent of the schedule used in AGP, for element-wise pruning of RNNs, which they also employ during training. They show pruning of RNN, GRU, LSTM and embedding layers. Distiller's distiller.pruning.BaiduRNNPruner class implements this pruning algorithm. Structure Pruners Element-wise pruning can create very sparse models which can be compressed to consume less memory footprint and bandwidth, but without specialized hardware that can compute using the sparse representation of the tensors, we don't gain any speedup of the computation. Structure pruners, remove entire \"structures\", such as kernels, filters, and even entire feature-maps. Structure Ranking Pruners Ranking pruners use some criterion to rank the structures in a tensor, and then prune the tensor to a specified level. In principle, these pruners perform one-shot pruning, but can be combined with automatic pruning-level scheduling, such as AGP (see below). In Pruning Filters for Efficient ConvNets the authors use filter ranking, with one-shot pruning followed by fine-tuning. The authors of Exploiting Sparseness in Deep Neural Networks for Large Vocabulary Speech Recognition also use a one-shot pruning schedule, for fully-connected layers, and they provide an explanation: First, after sweeping through the full training set several times the weights become relatively stable \u2014 they tend to remain either large or small magnitudes. Second, in a stabilized model, the importance of the connection is approximated well by the magnitudes of the weights (times the magnitudes of the corresponding input values, but these are relatively uniform within each layer since on the input layer, features are normalized to zero-mean and unit-variance, and hidden-layer values are probabilities) L1RankedStructureParameterPruner The L1RankedStructureParameterPruner pruner calculates the magnitude of some \"structure\", orders all of the structures based on some magnitude function and the m lowest ranking structures are pruned away. This pruner performs ranking of structures using the mean of the absolute value of the structure as the representative of the structure magnitude. The absolute mean does not depend on the size of the structure, so it is easier to use compared to just using the \\(L_1\\)-norm of the structure, and at the same time it is a good proxy of the \\(L_1\\)-norm. Basically, you can think of mean(abs(t)) as a form of normalization of the structure L1-norm by the length of the structure. L1RankedStructureParameterPruner currently prunes weight filters, channels, and rows (for linear layers). ActivationAPoZRankedFilterPruner The ActivationAPoZRankedFilterPruner pruner uses the activation channels mean APoZ (average percentage of zeros) to rank weight filters and prune a specified percentage of filters. This method is called Network Trimming from the research paper: \"Network Trimming: A Data-Driven Neuron Pruning Approach towards Efficient Deep Architectures\", Hengyuan Hu, Rui Peng, Yu-Wing Tai, Chi-Keung Tang, ICLR 2016 https://arxiv.org/abs/1607.03250 GradientRankedFilterPruner The GradientRankedFilterPruner tries to asses the importance of weight filters using the product of their gradients and the filter value. RandomRankedFilterPruner For research purposes we may want to compare the results of some structure-ranking pruner to a random structure-ranking. The RandomRankedFilterPruner pruner can be used for this purpose. Automated Gradual Pruner (AGP) for Structures The idea of a mathematical formula controlling the sparsity level growth is very useful and StructuredAGP extends the implementation to structured pruning. Pruner Compositions Pruners can be combined to create new pruning schemes. Specifically, with a few lines of code we currently marry the AGP sparsity level scheduler with our filter-ranking classes to create pruner compositions. For each of these, we use AGP to decided how many filters to prune at each step, and we choose the filters to remove using one of the filter-ranking methods: L1RankedStructureParameterPruner_AGP ActivationAPoZRankedFilterPruner_AGP GradientRankedFilterPruner_AGP RandomRankedFilterPruner_AGP Hybrid Pruning In a single schedule we can mix different pruning techniques. For example, we might mix pruning and regularization. Or structured pruning and element-wise pruning. We can even apply different methods on the same tensor. For example, we might want to perform filter pruning for a few epochs, then perform thinning and continue with element-wise pruning of the smaller network tensors. This technique of mixing different methods we call Hybrid Pruning, and Distiller has a few example schedules.","title":"Pruning"},{"location":"algo_pruning.html#weights-pruning-algorithms","text":"","title":"Weights Pruning Algorithms"},{"location":"algo_pruning.html#magnitude-pruner","text":"This is the most basic pruner: it applies a thresholding function, \\(thresh(.)\\), on each element, \\(w_i\\), of a weights tensor. A different threshold can be used for each layer's weights tensor. Because the threshold is applied on individual elements, this pruner belongs to the element-wise pruning algorithm family. \\[ thresh(w_i)=\\left\\lbrace \\matrix{{{w_i: \\; if \\;|w_i| \\; \\gt}\\;\\lambda}\\cr {0: \\; if \\; |w_i| \\leq \\lambda} } \\right\\rbrace \\]","title":"Magnitude Pruner"},{"location":"algo_pruning.html#sensitivity-pruner","text":"Finding a threshold magnitude per layer is daunting, especially since each layer's elements have different average absolute values. We can take advantage of the fact that the weights of convolutional and fully connected layers exhibit a Gaussian distribution with a mean value roughly zero, to avoid using a direct threshold based on the values of each specific tensor. The diagram below shows the distribution the weights tensor of the first convolutional layer, and first fully-connected layer in TorchVision's pre-trained Alexnet model. You can see that they have an approximate Gaussian distribution. The distributions of Alexnet conv1 and fc1 layers We use the standard deviation of the weights tensor as a sort of normalizing factor between the different weights tensors. For example, if a tensor is Normally distributed, then about 68% of the elements have an absolute value less than the standard deviation (\\(\\sigma\\)) of the tensor. Thus, if we set the threshold to \\(s*\\sigma\\), then basically we are thresholding \\(s * 68\\%\\) of the tensor elements. \\[ thresh(w_i)=\\left\\lbrace \\matrix{{{w_i: \\; if \\;|w_i| \\; \\gt}\\;\\lambda}\\cr {0: \\; if \\; |w_i| \\leq \\lambda} } \\right\\rbrace \\] \\[ \\lambda = s * \\sigma_l \\;\\;\\; where\\; \\sigma_l\\; is \\;the \\;std \\;of \\;layer \\;l \\;as \\;measured \\;on \\;the \\;dense \\;model \\] How do we choose this \\(s\\) multiplier? In Learning both Weights and Connections for Efficient Neural Networks the authors write: \"We used the sensitivity results to find each layer\u2019s threshold: for example, the smallest threshold was applied to the most sensitive layer, which is the first convolutional layer... The pruning threshold is chosen as a quality parameter multiplied by the standard deviation of a layer\u2019s weights So the results of executing pruning sensitivity analysis on the tensor, gives us a good starting guess at \\(s\\). Sensitivity analysis is an empirical method, and we still have to spend time to hone in on the exact multiplier value.","title":"Sensitivity Pruner"},{"location":"algo_pruning.html#method-of-operation","text":"Start by running a pruning sensitivity analysis on the model. Then use the results to set and tune the threshold of each layer, but instead of using a direct threshold use a sensitivity parameter which is multiplied by the standard-deviation of the initial weight-tensor's distribution.","title":"Method of Operation"},{"location":"algo_pruning.html#schedule","text":"In their paper Song Han et al. use iterative pruning and change the value of the \\(s\\) multiplier at each pruning step. Distiller's SensitivityPruner works differently: the value \\(s\\) is set once based on a one-time calculation of the standard-deviation of the tensor (the first time we prune), and relies on the fact that as the tensor is pruned, more elements are \"pulled\" toward the center of the distribution and thus more elements gets pruned. This actually works quite well as we can see in the diagram below. This is a TensorBoard screen-capture from Alexnet training, which shows how this method starts off pruning very aggressively, but then slowly reduces the pruning rate. We use a simple iterative-pruning schedule such as: Prune every second epoch starting at epoch 0, and ending at epoch 38. This excerpt from alexnet.schedule_sensitivity.yaml shows how this iterative schedule is conveyed in Distiller scheduling configuration YAML: pruners: my_pruner: class: 'SensitivityPruner' sensitivities: 'features.module.0.weight': 0.25 'features.module.3.weight': 0.35 'features.module.6.weight': 0.40 'features.module.8.weight': 0.45 'features.module.10.weight': 0.55 'classifier.1.weight': 0.875 'classifier.4.weight': 0.875 'classifier.6.weight': 0.625 policies: - pruner: instance_name : 'my_pruner' starting_epoch: 0 ending_epoch: 38 frequency: 2","title":"Schedule"},{"location":"algo_pruning.html#level-pruner","text":"Class SparsityLevelParameterPruner uses a similar method to go around specifying specific thresholding magnitudes. Instead of specifying a threshold magnitude, you specify a target sparsity level (expressed as a fraction, so 0.5 means 50% sparsity). Essentially this pruner also uses a pruning criteria based on the magnitude of each tensor element, but it has the advantage that you can aim for an exact and specific sparsity level. This pruner is much more stable compared to SensitivityPruner because the target sparsity level is not coupled to the actual magnitudes of the elements. Distiller's SensitivityPruner is unstable because the final sparsity level depends on the convergence pattern of the tensor distribution. Song Han's methodology of using several different values for the multiplier \\(s\\), and the recalculation of the standard-deviation at each pruning phase, probably gives it stability, but requires much more hyper-parameters (this is the reason we have not implemented it thus far). To set the target sparsity levels, you can once again use pruning sensitivity analysis to make better guesses at the correct sparsity level of each","title":"Level Pruner"},{"location":"algo_pruning.html#method-of-operation_1","text":"Sort the weights in the specified layer by their absolute values. Mask to zero the smallest magnitude weights until the desired sparsity level is reached.","title":"Method of Operation"},{"location":"algo_pruning.html#splicing-pruner","text":"In Dynamic Network Surgery for Efficient DNNs Guo et. al propose that network pruning and splicing work in tandem. A SpilicingPruner is a pruner that both prunes and splices connections and works best with a Dynamic Network Surgery schedule, which, for example, configures the PruningPolicy to mask weights only during the forward pass.","title":"Splicing Pruner"},{"location":"algo_pruning.html#automated-gradual-pruner-agp","text":"In To prune, or not to prune: exploring the efficacy of pruning for model compression , authors Michael Zhu and Suyog Gupta provide an algorithm to schedule a Level Pruner which Distiller implements in AutomatedGradualPruner . \"We introduce a new automated gradual pruning algorithm in which the sparsity is increased from an initial sparsity value \\(s_i\\) (usually 0) to a \ufb01nal sparsity value \\(s_f\\) over a span of n pruning steps. The intuition behind this sparsity function in equation (1) is to prune the network rapidly in the initial phase when the redundant connections are abundant and gradually reduce the number of weights being pruned each time as there are fewer and fewer weights remaining in the network.\"\" You can play with the scheduling parameters in the agp_schedule.ipynb notebook . The authors describe AGP: Our automated gradual pruning algorithm prunes the smallest magnitude weights to achieve a preset level of network sparsity. Doesn't require much hyper-parameter tuning Shown to perform well across different models Does not make any assumptions about the structure of the network or its constituent layers, and is therefore more generally applicable.","title":"Automated Gradual Pruner (AGP)"},{"location":"algo_pruning.html#rnn-pruner","text":"The authors of Exploring Sparsity in Recurrent Neural Networks , Sharan Narang, Erich Elsen, Gregory Diamos, and Shubho Sengupta, \"propose a technique to reduce the parameters of a network by pruning weights during the initial training of the network.\" They use a gradual pruning schedule which is reminiscent of the schedule used in AGP, for element-wise pruning of RNNs, which they also employ during training. They show pruning of RNN, GRU, LSTM and embedding layers. Distiller's distiller.pruning.BaiduRNNPruner class implements this pruning algorithm.","title":"RNN Pruner"},{"location":"algo_pruning.html#structure-pruners","text":"Element-wise pruning can create very sparse models which can be compressed to consume less memory footprint and bandwidth, but without specialized hardware that can compute using the sparse representation of the tensors, we don't gain any speedup of the computation. Structure pruners, remove entire \"structures\", such as kernels, filters, and even entire feature-maps.","title":"Structure Pruners"},{"location":"algo_pruning.html#structure-ranking-pruners","text":"Ranking pruners use some criterion to rank the structures in a tensor, and then prune the tensor to a specified level. In principle, these pruners perform one-shot pruning, but can be combined with automatic pruning-level scheduling, such as AGP (see below). In Pruning Filters for Efficient ConvNets the authors use filter ranking, with one-shot pruning followed by fine-tuning. The authors of Exploiting Sparseness in Deep Neural Networks for Large Vocabulary Speech Recognition also use a one-shot pruning schedule, for fully-connected layers, and they provide an explanation: First, after sweeping through the full training set several times the weights become relatively stable \u2014 they tend to remain either large or small magnitudes. Second, in a stabilized model, the importance of the connection is approximated well by the magnitudes of the weights (times the magnitudes of the corresponding input values, but these are relatively uniform within each layer since on the input layer, features are normalized to zero-mean and unit-variance, and hidden-layer values are probabilities)","title":"Structure Ranking Pruners"},{"location":"algo_pruning.html#l1rankedstructureparameterpruner","text":"The L1RankedStructureParameterPruner pruner calculates the magnitude of some \"structure\", orders all of the structures based on some magnitude function and the m lowest ranking structures are pruned away. This pruner performs ranking of structures using the mean of the absolute value of the structure as the representative of the structure magnitude. The absolute mean does not depend on the size of the structure, so it is easier to use compared to just using the \\(L_1\\)-norm of the structure, and at the same time it is a good proxy of the \\(L_1\\)-norm. Basically, you can think of mean(abs(t)) as a form of normalization of the structure L1-norm by the length of the structure. L1RankedStructureParameterPruner currently prunes weight filters, channels, and rows (for linear layers).","title":"L1RankedStructureParameterPruner"},{"location":"algo_pruning.html#activationapozrankedfilterpruner","text":"The ActivationAPoZRankedFilterPruner pruner uses the activation channels mean APoZ (average percentage of zeros) to rank weight filters and prune a specified percentage of filters. This method is called Network Trimming from the research paper: \"Network Trimming: A Data-Driven Neuron Pruning Approach towards Efficient Deep Architectures\", Hengyuan Hu, Rui Peng, Yu-Wing Tai, Chi-Keung Tang, ICLR 2016 https://arxiv.org/abs/1607.03250","title":"ActivationAPoZRankedFilterPruner"},{"location":"algo_pruning.html#gradientrankedfilterpruner","text":"The GradientRankedFilterPruner tries to asses the importance of weight filters using the product of their gradients and the filter value.","title":"GradientRankedFilterPruner"},{"location":"algo_pruning.html#randomrankedfilterpruner","text":"For research purposes we may want to compare the results of some structure-ranking pruner to a random structure-ranking. The RandomRankedFilterPruner pruner can be used for this purpose.","title":"RandomRankedFilterPruner"},{"location":"algo_pruning.html#automated-gradual-pruner-agp-for-structures","text":"The idea of a mathematical formula controlling the sparsity level growth is very useful and StructuredAGP extends the implementation to structured pruning.","title":"Automated Gradual Pruner (AGP) for Structures"},{"location":"algo_pruning.html#pruner-compositions","text":"Pruners can be combined to create new pruning schemes. Specifically, with a few lines of code we currently marry the AGP sparsity level scheduler with our filter-ranking classes to create pruner compositions. For each of these, we use AGP to decided how many filters to prune at each step, and we choose the filters to remove using one of the filter-ranking methods: L1RankedStructureParameterPruner_AGP ActivationAPoZRankedFilterPruner_AGP GradientRankedFilterPruner_AGP RandomRankedFilterPruner_AGP","title":"Pruner Compositions"},{"location":"algo_pruning.html#hybrid-pruning","text":"In a single schedule we can mix different pruning techniques. For example, we might mix pruning and regularization. Or structured pruning and element-wise pruning. We can even apply different methods on the same tensor. For example, we might want to perform filter pruning for a few epochs, then perform thinning and continue with element-wise pruning of the smaller network tensors. This technique of mixing different methods we call Hybrid Pruning, and Distiller has a few example schedules.","title":"Hybrid Pruning"},{"location":"algo_quantization.html","text":"Quantization Algorithms Note: For any of the methods below that require quantization-aware training, please see here for details on how to invoke it using Distiller's scheduling mechanism. Range-Based Linear Quantization Let's break down the terminology we use here: Linear: Means a float value is quantized by multiplying with a numeric constant (the scale factor ). Range-Based: Means that in order to calculate the scale factor, we look at the actual range of the tensor's values. In the most naive implementation, we use the actual min/max values of the tensor. Alternatively, we use some derivation based on the tensor's range / distribution to come up with a narrower min/max range, in order to remove possible outliers. This is in contrast to the other methods described here, which we could call clipping-based , as they impose an explicit clipping function on the tensors (using either a hard-coded value or a learned value). Asymmetric vs. Symmetric In this method we can use two modes - asymmetric and symmetric . Asymmetric Mode In asymmetric mode, we map the min/max in the float range to the min/max of the integer range. This is done by using a zero-point (also called quantization bias , or offset ) in addition to the scale factor. Let us denote the original floating-point tensor by x_f , the quantized tensor by x_q , the scale factor by q_x , the zero-point by zp_x and the number of bits used for quantization by n . Then, we get: x_q = round\\left ((x_f - min_{x_f})\\underbrace{\\frac{2^n - 1}{max_{x_f} - min_{x_f}}}_{q_x} \\right) = round(q_x x_f - \\underbrace{min_{x_f}q_x)}_{zp_x} = round(q_x x_f - zp_x) In practice, we actually use zp_x = round(min_{x_f}q_x) . This means that zero is exactly representable by an integer in the quantized range. This is important, for example, for layers that have zero-padding. By rounding the zero-point, we effectively \"nudge\" the min/max values in the float range a little bit, in order to gain this exact quantization of zero. Note that in the derivation above we use unsigned integer to represent the quantized range. That is, x_q \\in [0, 2^n-1] . One could use signed integer if necessary (perhaps due to HW considerations). This can be achieved by subtracting 2^{n-1} . Let's see how a convolution or fully-connected (FC) layer is quantized in asymmetric mode: (we denote input, output, weights and bias with x, y, w and b respectively) y_f = \\sum{x_f w_f} + b_f = \\sum{\\frac{x_q + zp_x}{q_x} \\frac{w_q + zp_w}{q_w}} + \\frac{b_q + zp_b}{q_b} = = \\frac{1}{q_x q_w} \\left( \\sum { (x_q + zp_x) (w_q + zp_w) + \\frac{q_x q_w}{q_b}(b_q + zp_b) } \\right) Therefore: y_q = round(q_y y_f) = round\\left(\\frac{q_y}{q_x q_w} \\left( \\sum { (x_q+zp_x) (w_q+zp_w) + \\frac{q_x q_w}{q_b}(b_q+zp_b) } \\right) \\right) Notes: We can see that the bias has to be re-scaled to match the scale of the summation. In a proper integer-only HW pipeline, we would like our main accumulation term to simply be \\sum{x_q w_q} . In order to achieve this, one needs to further develop the expression we derived above. For further details please refer to the gemmlowp documentation Symmetric Mode In symmetric mode, instead of mapping the exact min/max of the float range to the quantized range, we choose the maximum absolute value between min/max. In addition, we don't use a zero-point. So, the floating-point range we're effectively quantizing is symmetric with respect to zero, and so is the quantized range. Using the same notations as above, we get: x_q = round\\left (x_f \\underbrace{\\frac{2^{n-1} - 1}{\\max|x_f|}}_{q_x} \\right) = round(q_x x_f) Again, let's see how a convolution or fully-connected (FC) layer is quantized, this time in symmetric mode: y_f = \\sum{x_f w_f} + b_f = \\sum{\\frac{x_q}{q_x} \\frac{w_q}{q_w}} + \\frac{b_q}{q_b} = \\frac{1}{q_x q_w} \\left( \\sum { x_q w_q + \\frac{q_x q_w}{q_b}b_q } \\right) Therefore: y_q = round(q_y y_f) = round\\left(\\frac{q_y}{q_x q_w} \\left( \\sum { x_q w_q + \\frac{q_x q_w}{q_b}b_q } \\right) \\right) Comparing the Two Modes The main trade-off between these two modes is simplicity vs. utilization of the quantized range. When using asymmetric quantization, the quantized range is fully utilized. That is because we exactly map the min/max values from the float range to the min/max of the quantized range. Using symmetric mode, if the float range is biased towards one side, could result in a quantized range where significant dynamic range is dedicated to values that we'll never see. The most extreme example of this is after ReLU, where the entire tensor is positive. Quantizing it in symmetric mode means we're effectively losing 1 bit. On the other hand, if we look at the derviations for convolution / FC layers above, we can see that the actual implementation of symmetric mode is much simpler. In asymmetric mode, the zero-points require additional logic in HW. The cost of this extra logic in terms of latency and/or power and/or area will of course depend on the exact implementation. Other Features Removing Outliers: As discussed here , in some cases the float range of activations contains outliers. Spending dynamic range on these outliers hurts our ability to represent the values we actually care about accurately. Currently, Distiller supports clipping of activations with averaging during post-training quantization. That is - for each batch, instead of calculating global min/max values, an average of the min/max values of each sample in the batch. Scale factor scope: For weight tensors, Distiller supports per-channel quantization (per output channel). Implementation in Distiller Post-Training For post-training quantization, this method is implemented by wrapping existing modules with quantization and de-quantization operations. The wrapper implementations are in range_linear.py . The operations currently supported are: Convolution Fully connected Element-wise addition Element-wise multiplication Concatenation Embedding All other layers are unaffected and are executed using their original FP32 implementation. To automatically transform an existing model to a quantized model using this method, use the PostTrainLinearQuantizer class. For details on ways to invoke the quantizer see here . The transform performed by the Quantizer only works on sub-classes of torch.nn.Module . But operations such as element-wise addition / multiplication and concatenation do not have associated Modules in PyTorch. They are either overloaded operators, or simple functions in the torch namespace. To be able to quantize these operations, we've implemented very simple modules that wrap these operations here . It is necessary to manually modify your model and replace any existing operator with a corresponding module. For an example, see our slightly modified ResNet implementation . For weights and bias the scale factor and zero-point are determined once at quantization setup (\"offline\" / \"static\"). For activations, both \"static\" and \"dynamic\" quantization is supported. Static quantizaton of activations requires that statistics be collected beforehand. See details on how to do that here . The calculated quantization parameters are stored as buffers within the module, so they are automatically serialized when the model checkpoint is saved. Quantization-Aware Training To apply range-based linear quantization in training, use the QuantAwareTrainRangeLinearQuantizer class. As it is now, it will apply weights quantization to convolution, FC and embedding modules. For activations quantization, it will insert instances FakeLinearQuantization module after ReLUs. This module follows the methodology described in Benoit et al., 2018 and uses exponential moving averages to track activation ranges. Note that the current implementation of QuantAwareTrainRangeLinearQuantizer supports training with single GPU only . Similarly to post-training, the calculated quantization parameters (scale factors, zero-points, tracked activation ranges) are stored as buffers within their respective modules, so they're saved when a checkpoint is created. Note that converting from a quantization-aware training model to a post-training quantization model is not yet supported. Such a conversion will use the activation ranges tracked during training, so additional offline or online calculation of quantization parameters will not be required. DoReFa (As proposed in DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low Bitwidth Gradients ) In this method, we first define the quantization function quantize_k , which takes a real value a_f \\in [0, 1] and outputs a discrete-valued a_q \\in \\left\\{ \\frac{0}{2^k-1}, \\frac{1}{2^k-1}, ... , \\frac{2^k-1}{2^k-1} \\right\\} , where k is the number of bits used for quantization. a_q = quantize_k(a_f) = \\frac{1}{2^k-1} round \\left( \\left(2^k - 1 \\right) a_f \\right) Activations are clipped to the [0, 1] range and then quantized as follows: x_q = quantize_k(x_f) For weights, we define the following function f , which takes an unbounded real valued input and outputs a real value in [0, 1] : f(w) = \\frac{tanh(w)}{2 max(|tanh(w)|)} + \\frac{1}{2} Now we can use quantize_k to get quantized weight values, as follows: w_q = 2 quantize_k \\left( f(w_f) \\right) - 1 This method requires training the model with quantization-aware training, as discussed here . Use the DorefaQuantizer class to transform an existing model to a model suitable for training with quantization using DoReFa. Notes: Gradients quantization as proposed in the paper is not supported yet. The paper defines special handling for binary weights which isn't supported in Distiller yet. PACT (As proposed in PACT: Parameterized Clipping Activation for Quantized Neural Networks ) This method is similar to DoReFa, but the upper clipping values, \\alpha , of the activation functions are learned parameters instead of hard coded to 1. Note that per the paper's recommendation, \\alpha is shared per layer. This method requires training the model with quantization-aware training, as discussed here . Use the PACTQuantizer class to transform an existing model to a model suitable for training with quantization using PACT. WRPN (As proposed in WRPN: Wide Reduced-Precision Networks ) In this method, activations are clipped to [0, 1] and quantized as follows ( k is the number of bits used for quantization): x_q = \\frac{1}{2^k-1} round \\left( \\left(2^k - 1 \\right) x_f \\right) Weights are clipped to [-1, 1] and quantized as follows: w_q = \\frac{1}{2^{k-1}-1} round \\left( \\left(2^{k-1} - 1 \\right)w_f \\right) Note that k-1 bits are used to quantize weights, leaving one bit for sign. This method requires training the model with quantization-aware training, as discussed here . Use the WRPNQuantizer class to transform an existing model to a model suitable for training with quantization using WRPN. Notes: The paper proposed widening of layers as a means to reduce accuracy loss. This isn't implemented as part of WRPNQuantizer at the moment. To experiment with this, modify your model implementation to have wider layers. The paper defines special handling for binary weights which isn't supported in Distiller yet.","title":"Quantization"},{"location":"algo_quantization.html#quantization-algorithms","text":"Note: For any of the methods below that require quantization-aware training, please see here for details on how to invoke it using Distiller's scheduling mechanism.","title":"Quantization Algorithms"},{"location":"algo_quantization.html#range-based-linear-quantization","text":"Let's break down the terminology we use here: Linear: Means a float value is quantized by multiplying with a numeric constant (the scale factor ). Range-Based: Means that in order to calculate the scale factor, we look at the actual range of the tensor's values. In the most naive implementation, we use the actual min/max values of the tensor. Alternatively, we use some derivation based on the tensor's range / distribution to come up with a narrower min/max range, in order to remove possible outliers. This is in contrast to the other methods described here, which we could call clipping-based , as they impose an explicit clipping function on the tensors (using either a hard-coded value or a learned value).","title":"Range-Based Linear Quantization"},{"location":"algo_quantization.html#asymmetric-vs-symmetric","text":"In this method we can use two modes - asymmetric and symmetric .","title":"Asymmetric vs. Symmetric"},{"location":"algo_quantization.html#asymmetric-mode","text":"In asymmetric mode, we map the min/max in the float range to the min/max of the integer range. This is done by using a zero-point (also called quantization bias , or offset ) in addition to the scale factor. Let us denote the original floating-point tensor by x_f , the quantized tensor by x_q , the scale factor by q_x , the zero-point by zp_x and the number of bits used for quantization by n . Then, we get: x_q = round\\left ((x_f - min_{x_f})\\underbrace{\\frac{2^n - 1}{max_{x_f} - min_{x_f}}}_{q_x} \\right) = round(q_x x_f - \\underbrace{min_{x_f}q_x)}_{zp_x} = round(q_x x_f - zp_x) In practice, we actually use zp_x = round(min_{x_f}q_x) . This means that zero is exactly representable by an integer in the quantized range. This is important, for example, for layers that have zero-padding. By rounding the zero-point, we effectively \"nudge\" the min/max values in the float range a little bit, in order to gain this exact quantization of zero. Note that in the derivation above we use unsigned integer to represent the quantized range. That is, x_q \\in [0, 2^n-1] . One could use signed integer if necessary (perhaps due to HW considerations). This can be achieved by subtracting 2^{n-1} . Let's see how a convolution or fully-connected (FC) layer is quantized in asymmetric mode: (we denote input, output, weights and bias with x, y, w and b respectively) y_f = \\sum{x_f w_f} + b_f = \\sum{\\frac{x_q + zp_x}{q_x} \\frac{w_q + zp_w}{q_w}} + \\frac{b_q + zp_b}{q_b} = = \\frac{1}{q_x q_w} \\left( \\sum { (x_q + zp_x) (w_q + zp_w) + \\frac{q_x q_w}{q_b}(b_q + zp_b) } \\right) Therefore: y_q = round(q_y y_f) = round\\left(\\frac{q_y}{q_x q_w} \\left( \\sum { (x_q+zp_x) (w_q+zp_w) + \\frac{q_x q_w}{q_b}(b_q+zp_b) } \\right) \\right) Notes: We can see that the bias has to be re-scaled to match the scale of the summation. In a proper integer-only HW pipeline, we would like our main accumulation term to simply be \\sum{x_q w_q} . In order to achieve this, one needs to further develop the expression we derived above. For further details please refer to the gemmlowp documentation","title":"Asymmetric Mode"},{"location":"algo_quantization.html#symmetric-mode","text":"In symmetric mode, instead of mapping the exact min/max of the float range to the quantized range, we choose the maximum absolute value between min/max. In addition, we don't use a zero-point. So, the floating-point range we're effectively quantizing is symmetric with respect to zero, and so is the quantized range. Using the same notations as above, we get: x_q = round\\left (x_f \\underbrace{\\frac{2^{n-1} - 1}{\\max|x_f|}}_{q_x} \\right) = round(q_x x_f) Again, let's see how a convolution or fully-connected (FC) layer is quantized, this time in symmetric mode: y_f = \\sum{x_f w_f} + b_f = \\sum{\\frac{x_q}{q_x} \\frac{w_q}{q_w}} + \\frac{b_q}{q_b} = \\frac{1}{q_x q_w} \\left( \\sum { x_q w_q + \\frac{q_x q_w}{q_b}b_q } \\right) Therefore: y_q = round(q_y y_f) = round\\left(\\frac{q_y}{q_x q_w} \\left( \\sum { x_q w_q + \\frac{q_x q_w}{q_b}b_q } \\right) \\right)","title":"Symmetric Mode"},{"location":"algo_quantization.html#comparing-the-two-modes","text":"The main trade-off between these two modes is simplicity vs. utilization of the quantized range. When using asymmetric quantization, the quantized range is fully utilized. That is because we exactly map the min/max values from the float range to the min/max of the quantized range. Using symmetric mode, if the float range is biased towards one side, could result in a quantized range where significant dynamic range is dedicated to values that we'll never see. The most extreme example of this is after ReLU, where the entire tensor is positive. Quantizing it in symmetric mode means we're effectively losing 1 bit. On the other hand, if we look at the derviations for convolution / FC layers above, we can see that the actual implementation of symmetric mode is much simpler. In asymmetric mode, the zero-points require additional logic in HW. The cost of this extra logic in terms of latency and/or power and/or area will of course depend on the exact implementation.","title":"Comparing the Two Modes"},{"location":"algo_quantization.html#other-features","text":"Removing Outliers: As discussed here , in some cases the float range of activations contains outliers. Spending dynamic range on these outliers hurts our ability to represent the values we actually care about accurately. Currently, Distiller supports clipping of activations with averaging during post-training quantization. That is - for each batch, instead of calculating global min/max values, an average of the min/max values of each sample in the batch. Scale factor scope: For weight tensors, Distiller supports per-channel quantization (per output channel).","title":"Other Features"},{"location":"algo_quantization.html#implementation-in-distiller","text":"","title":"Implementation in Distiller"},{"location":"algo_quantization.html#post-training","text":"For post-training quantization, this method is implemented by wrapping existing modules with quantization and de-quantization operations. The wrapper implementations are in range_linear.py . The operations currently supported are: Convolution Fully connected Element-wise addition Element-wise multiplication Concatenation Embedding All other layers are unaffected and are executed using their original FP32 implementation. To automatically transform an existing model to a quantized model using this method, use the PostTrainLinearQuantizer class. For details on ways to invoke the quantizer see here . The transform performed by the Quantizer only works on sub-classes of torch.nn.Module . But operations such as element-wise addition / multiplication and concatenation do not have associated Modules in PyTorch. They are either overloaded operators, or simple functions in the torch namespace. To be able to quantize these operations, we've implemented very simple modules that wrap these operations here . It is necessary to manually modify your model and replace any existing operator with a corresponding module. For an example, see our slightly modified ResNet implementation . For weights and bias the scale factor and zero-point are determined once at quantization setup (\"offline\" / \"static\"). For activations, both \"static\" and \"dynamic\" quantization is supported. Static quantizaton of activations requires that statistics be collected beforehand. See details on how to do that here . The calculated quantization parameters are stored as buffers within the module, so they are automatically serialized when the model checkpoint is saved.","title":"Post-Training"},{"location":"algo_quantization.html#quantization-aware-training","text":"To apply range-based linear quantization in training, use the QuantAwareTrainRangeLinearQuantizer class. As it is now, it will apply weights quantization to convolution, FC and embedding modules. For activations quantization, it will insert instances FakeLinearQuantization module after ReLUs. This module follows the methodology described in Benoit et al., 2018 and uses exponential moving averages to track activation ranges. Note that the current implementation of QuantAwareTrainRangeLinearQuantizer supports training with single GPU only . Similarly to post-training, the calculated quantization parameters (scale factors, zero-points, tracked activation ranges) are stored as buffers within their respective modules, so they're saved when a checkpoint is created. Note that converting from a quantization-aware training model to a post-training quantization model is not yet supported. Such a conversion will use the activation ranges tracked during training, so additional offline or online calculation of quantization parameters will not be required.","title":"Quantization-Aware Training"},{"location":"algo_quantization.html#dorefa","text":"(As proposed in DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low Bitwidth Gradients ) In this method, we first define the quantization function quantize_k , which takes a real value a_f \\in [0, 1] and outputs a discrete-valued a_q \\in \\left\\{ \\frac{0}{2^k-1}, \\frac{1}{2^k-1}, ... , \\frac{2^k-1}{2^k-1} \\right\\} , where k is the number of bits used for quantization. a_q = quantize_k(a_f) = \\frac{1}{2^k-1} round \\left( \\left(2^k - 1 \\right) a_f \\right) Activations are clipped to the [0, 1] range and then quantized as follows: x_q = quantize_k(x_f) For weights, we define the following function f , which takes an unbounded real valued input and outputs a real value in [0, 1] : f(w) = \\frac{tanh(w)}{2 max(|tanh(w)|)} + \\frac{1}{2} Now we can use quantize_k to get quantized weight values, as follows: w_q = 2 quantize_k \\left( f(w_f) \\right) - 1 This method requires training the model with quantization-aware training, as discussed here . Use the DorefaQuantizer class to transform an existing model to a model suitable for training with quantization using DoReFa.","title":"DoReFa"},{"location":"algo_quantization.html#notes","text":"Gradients quantization as proposed in the paper is not supported yet. The paper defines special handling for binary weights which isn't supported in Distiller yet.","title":"Notes:"},{"location":"algo_quantization.html#pact","text":"(As proposed in PACT: Parameterized Clipping Activation for Quantized Neural Networks ) This method is similar to DoReFa, but the upper clipping values, \\alpha , of the activation functions are learned parameters instead of hard coded to 1. Note that per the paper's recommendation, \\alpha is shared per layer. This method requires training the model with quantization-aware training, as discussed here . Use the PACTQuantizer class to transform an existing model to a model suitable for training with quantization using PACT.","title":"PACT"},{"location":"algo_quantization.html#wrpn","text":"(As proposed in WRPN: Wide Reduced-Precision Networks ) In this method, activations are clipped to [0, 1] and quantized as follows ( k is the number of bits used for quantization): x_q = \\frac{1}{2^k-1} round \\left( \\left(2^k - 1 \\right) x_f \\right) Weights are clipped to [-1, 1] and quantized as follows: w_q = \\frac{1}{2^{k-1}-1} round \\left( \\left(2^{k-1} - 1 \\right)w_f \\right) Note that k-1 bits are used to quantize weights, leaving one bit for sign. This method requires training the model with quantization-aware training, as discussed here . Use the WRPNQuantizer class to transform an existing model to a model suitable for training with quantization using WRPN.","title":"WRPN"},{"location":"algo_quantization.html#notes_1","text":"The paper proposed widening of layers as a means to reduce accuracy loss. This isn't implemented as part of WRPNQuantizer at the moment. To experiment with this, modify your model implementation to have wider layers. The paper defines special handling for binary weights which isn't supported in Distiller yet.","title":"Notes:"},{"location":"conditional_computation.html","text":"Conditional Computation Conditional Computation refers to a class of algorithms in which each input sample uses a different part of the model, such that on average the compute, latency or power (depending on our objective) is reduced. To quote Bengio et. al \"Conditional computation refers to activating only some of the units in a network, in an input-dependent fashion. For example, if we think we\u2019re looking at a car, we only need to compute the activations of the vehicle detecting units, not of all features that a network could possible compute. The immediate effect of activating fewer units is that propagating information through the network will be faster, both at training as well as at test time. However, one needs to be able to decide in an intelligent fashion which units to turn on and off, depending on the input data. This is typically achieved with some form of gating structure, learned in parallel with the original network.\" As usual, there are several approaches to implement Conditional Computation: Sun et. al use several expert CNN, each trained on a different task, and combine them to one large network. Zheng et. al use cascading, an idea which may be familiar to you from Viola-Jones face detection. Theodorakopoulos et. al add small layers that learn which filters to use per input sample, and then enforce that during inference (LKAM module). Ioannou et. al introduce Conditional Networks: that \"can be thought of as: i) decision trees augmented with data transformation operators, or ii) CNNs, with block-diagonal sparse weight matrices, and explicit data routing functions\" Bolukbasi et. al \"learn a system to adaptively choose the components of a deep network to be evaluated for each example. By allowing examples correctly classified using early layers of the system to exit, we avoid the computational time associated with full evaluation of the network. We extend this to learn a network selection system that adaptively selects the network to be evaluated for each example.\" Conditional Computation is especially useful for real-time, latency-sensitive applicative. In Distiller we currently have implemented a variant of Early Exit. References Emmanuel Bengio, Pierre-Luc Bacon, Joelle Pineau, Doina Precup. Conditional Deep Learning for Energy-Efficient and Enhanced Pattern Recognition , arXiv:1511.06297v2, 2016. Y. Sun, X.Wang, and X. Tang. Deep Convolutional Network Cascade for Facial Point Detection . In Proc. IEEE Conf. Computer Vision and Pattern Recognition (CVPR), 2014 X. Zheng, W.Ouyang, and X.Wang. Multi-Stage Contextual Deep Learning for Pedestrian Detection. In Proc. IEEE Intl Conf. on Computer Vision (ICCV), 2014. I. Theodorakopoulos, V. Pothos, D. Kastaniotis and N. Fragoulis1. Parsimonious Inference on Convolutional Neural Networks: Learning and applying on-line kernel activation rules. Irida Labs S.A, January 2017 Tolga Bolukbasi, Joseph Wang, Ofer Dekel, Venkatesh Saligrama Adaptive Neural Networks for Efficient Inference . Proceedings of the 34th International Conference on Machine Learning, PMLR 70:527-536, 2017. Yani Ioannou, Duncan Robertson, Darko Zikic, Peter Kontschieder, Jamie Shotton, Matthew Brown, Antonio Criminisi . Decision Forests, Convolutional Networks and the Models in-Between , arXiv:1511.06297v2, 2016.","title":"Conditional Computation"},{"location":"conditional_computation.html#conditional-computation","text":"Conditional Computation refers to a class of algorithms in which each input sample uses a different part of the model, such that on average the compute, latency or power (depending on our objective) is reduced. To quote Bengio et. al \"Conditional computation refers to activating only some of the units in a network, in an input-dependent fashion. For example, if we think we\u2019re looking at a car, we only need to compute the activations of the vehicle detecting units, not of all features that a network could possible compute. The immediate effect of activating fewer units is that propagating information through the network will be faster, both at training as well as at test time. However, one needs to be able to decide in an intelligent fashion which units to turn on and off, depending on the input data. This is typically achieved with some form of gating structure, learned in parallel with the original network.\" As usual, there are several approaches to implement Conditional Computation: Sun et. al use several expert CNN, each trained on a different task, and combine them to one large network. Zheng et. al use cascading, an idea which may be familiar to you from Viola-Jones face detection. Theodorakopoulos et. al add small layers that learn which filters to use per input sample, and then enforce that during inference (LKAM module). Ioannou et. al introduce Conditional Networks: that \"can be thought of as: i) decision trees augmented with data transformation operators, or ii) CNNs, with block-diagonal sparse weight matrices, and explicit data routing functions\" Bolukbasi et. al \"learn a system to adaptively choose the components of a deep network to be evaluated for each example. By allowing examples correctly classified using early layers of the system to exit, we avoid the computational time associated with full evaluation of the network. We extend this to learn a network selection system that adaptively selects the network to be evaluated for each example.\" Conditional Computation is especially useful for real-time, latency-sensitive applicative. In Distiller we currently have implemented a variant of Early Exit.","title":"Conditional Computation"},{"location":"conditional_computation.html#references","text":"Emmanuel Bengio, Pierre-Luc Bacon, Joelle Pineau, Doina Precup. Conditional Deep Learning for Energy-Efficient and Enhanced Pattern Recognition , arXiv:1511.06297v2, 2016. Y. Sun, X.Wang, and X. Tang. Deep Convolutional Network Cascade for Facial Point Detection . In Proc. IEEE Conf. Computer Vision and Pattern Recognition (CVPR), 2014 X. Zheng, W.Ouyang, and X.Wang. Multi-Stage Contextual Deep Learning for Pedestrian Detection. In Proc. IEEE Intl Conf. on Computer Vision (ICCV), 2014. I. Theodorakopoulos, V. Pothos, D. Kastaniotis and N. Fragoulis1. Parsimonious Inference on Convolutional Neural Networks: Learning and applying on-line kernel activation rules. Irida Labs S.A, January 2017 Tolga Bolukbasi, Joseph Wang, Ofer Dekel, Venkatesh Saligrama Adaptive Neural Networks for Efficient Inference . Proceedings of the 34th International Conference on Machine Learning, PMLR 70:527-536, 2017. Yani Ioannou, Duncan Robertson, Darko Zikic, Peter Kontschieder, Jamie Shotton, Matthew Brown, Antonio Criminisi . Decision Forests, Convolutional Networks and the Models in-Between , arXiv:1511.06297v2, 2016.","title":"References"},{"location":"design.html","text":"Distiller design Distiller is designed to be easily integrated into your own PyTorch research applications. It is easiest to understand this integration by examining the code of the sample application for compressing image classification models ( compress_classifier.py ). The application borrows its main flow code from torchvision's ImageNet classification training sample application (https://github.com/pytorch/examples/tree/master/imagenet). We tried to keep it similar, in order to make it familiar and easy to understand. Integrating compression is very simple: simply add invocations of the appropriate compression_scheduler callbacks, for each stage in the training. The training skeleton looks like the pseudo code below. The boiler-plate Pytorch classification training is speckled with invocations of CompressionScheduler. For each epoch: compression_scheduler.on_epoch_begin(epoch) train() validate() save_checkpoint() compression_scheduler.on_epoch_end(epoch) train(): For each training step: compression_scheduler.on_minibatch_begin(epoch) output = model(input_var) loss = criterion(output, target_var) compression_scheduler.before_backward_pass(epoch) loss.backward() optimizer.step() compression_scheduler.on_minibatch_end(epoch) These callbacks can be seen in the diagram below, as the arrow pointing from the Training Loop and into Distiller's Scheduler , which invokes the correct algorithm. The application also uses Distiller services to collect statistics in Summaries and logs files, which can be queried at a later time, from Jupyter notebooks or TensorBoard. Sparsification and fine-tuning The application sets up a model as normally done in PyTorch. And then instantiates a Scheduler and configures it: Scheduler configuration is defined in a YAML file The configuration specifies Policies. Each Policy is tied to a specific algorithm which controls some aspect of the training. Some types of algorithms control the actual sparsification of the model. Such types are \"pruner\" and \"regularizer\". Some algorithms control some parameter of the training process, such as the learning-rate decay scheduler ( lr_scheduler ). The parameters of each algorithm are also specified in the configuration. In addition to specifying the algorithm, each Policy specifies scheduling parameters which control when the algorithm is executed: start epoch, end epoch and frequency. The Scheduler exposes callbacks for relevant training stages: epoch start/end, mini-batch start/end and pre-backward pass. Each scheduler callback activates the policies that were defined according the schedule that was defined. These callbacks are placed the training loop. Quantization A quantized model is obtained by replacing existing operations with quantized versions. The quantized versions can be either complete replacements, or wrappers. A wrapper will use the existing modules internally and add quantization and de-quantization operations before/after as necessary. In Distiller we will provide a set of quantized versions of common operations which will enable implementation of different quantization methods. The user can write a quantized model from scratch, using the quantized operations provided. We also provide a mechanism which takes an existing model and automatically replaces required operations with quantized versions. This mechanism is exposed by the Quantizer class. Quantizer should be sub-classed for each quantization method. Model Transformation The high-level flow is as follows: Define a mapping between the module types to be replaced (e.g. Conv2D, Linear, etc.) to a function which generates the replacement module. The mapping is defined in the replacement_factory attribute of the Quantizer class. Iterate over the modules defined in the model. For each module, if its type is in the mapping, call the replacement generation function. We pass the existing module to this function to allow wrapping of it. Replace the existing module with the module returned by the function. It is important to note that the name of the module does not change, as that could break the forward function of the parent module. Different quantization methods may, obviously, use different quantized operations. In addition, different methods may employ different \"strategies\" of replacing / wrapping existing modules. For instance, some methods replace ReLU with another activation function, while others keep it. Hence, for each quantization method, a different mapping will likely be defined. Each sub-class of Quantizer should populate the replacement_factory dictionary attribute with the appropriate mapping. To execute the model transformation, call the prepare_model function of the Quantizer instance. Flexible Bit-Widths Each instance of Quantizer is parameterized by the number of bits to be used for quantization of different tensor types. The default ones are activations and weights. These are the bits_activations , bits_weights and bits_bias parameters in Quantizer 's constructor. Sub-classes may define bit-widths for other tensor types as needed. We also want to be able to override the default number of bits mentioned in the bullet above for certain layers. These could be very specific layers. However, many models are comprised of building blocks (\"container\" modules, such as Sequential) which contain several modules, and it is likely we'll want to override settings for entire blocks, or for a certain module across different blocks. When such building blocks are used, the names of the internal modules usually follow some pattern. So, for this purpose, Quantizer also accepts a mapping of regular expressions to number of bits. This allows the user to override specific layers using they're exact name, or a group of layers via a regular expression. This mapping is passed via the overrides parameter in the constructor. The overrides mapping is required to be an instance of collections.OrderedDict (as opposed to just a simple Python dict ). This is done in order to enable handling of overlapping name patterns. So, for example, one could define certain override parameters for a group of layers, e.g. 'conv*', but also define different parameters for specific layers in that group, e.g. 'conv1'. The patterns are evaluated eagerly - the first match wins. Therefore, the more specific patterns must come before the broad patterns. Weights Quantization The Quantizer class also provides an API to quantize the weights of all layers at once. To use it, the param_quantization_fn attribute needs to point to a function that accepts a tensor and the number of bits. During model transformation, the Quantizer class will build a list of all model parameters that need to be quantized along with their bit-width. Then, the quantize_params function can be called, which will iterate over all parameters and quantize them using params_quantization_fn . Quantization-Aware Training The Quantizer class supports quantization-aware training, that is - training with quantization in the loop. This requires handling of a couple of flows / scenarios: Maintaining a full precision copy of the weights, as described here . This is enabled by setting train_with_fp_copy=True in the Quantizer constructor. At model transformation, in each module that has parameters that should be quantized, a new torch.nn.Parameter is added, which will maintain the required full precision copy of the parameters. Note that this is done in-place - a new module is not created. We preferred not to sub-class the existing PyTorch modules for this purpose. In order to this in-place, and also guarantee proper back-propagation through the weights quantization function, we employ the following \"hack\": The existing torch.nn.Parameter , e.g. weights , is replaced by a torch.nn.Parameter named float_weight . To maintain the existing functionality of the module, we then register a buffer in the module with the original name - weights . During training, float_weight will be passed to param_quantization_fn and the result will be stored in weight . In addition, some quantization methods may introduce additional learned parameters to the model. For example, in the PACT method, acitvations are clipped to a value \\alpha , which is a learned parameter per-layer To support these two cases, the Quantizer class also accepts an instance of a torch.optim.Optimizer (normally this would be one an instance of its sub-classes). The quantizer will take care of modifying the optimizer according to the changes made to the parameters. Optimizing New Parameters In cases where new parameters are required by the scheme, it is likely that they'll need to be optimized separately from the main model parameters. In that case, the sub-class for the speicifc method should override Quantizer._get_updated_optimizer_params_groups() , and return the proper groups plus any desired hyper-parameter overrides. Examples The base Quantizer class is implemented in distiller/quantization/quantizer.py . For a simple sub-class implementing symmetric linear quantization, see SymmetricLinearQuantizer in distiller/quantization/range_linear.py . In distiller/quantization/clipped_linear.py there are examples of lower-precision methods which use training with quantization. Specifically, see PACTQuantizer for an example of overriding Quantizer._get_updated_optimizer_params_groups() .","title":"Design"},{"location":"design.html#distiller-design","text":"Distiller is designed to be easily integrated into your own PyTorch research applications. It is easiest to understand this integration by examining the code of the sample application for compressing image classification models ( compress_classifier.py ). The application borrows its main flow code from torchvision's ImageNet classification training sample application (https://github.com/pytorch/examples/tree/master/imagenet). We tried to keep it similar, in order to make it familiar and easy to understand. Integrating compression is very simple: simply add invocations of the appropriate compression_scheduler callbacks, for each stage in the training. The training skeleton looks like the pseudo code below. The boiler-plate Pytorch classification training is speckled with invocations of CompressionScheduler. For each epoch: compression_scheduler.on_epoch_begin(epoch) train() validate() save_checkpoint() compression_scheduler.on_epoch_end(epoch) train(): For each training step: compression_scheduler.on_minibatch_begin(epoch) output = model(input_var) loss = criterion(output, target_var) compression_scheduler.before_backward_pass(epoch) loss.backward() optimizer.step() compression_scheduler.on_minibatch_end(epoch) These callbacks can be seen in the diagram below, as the arrow pointing from the Training Loop and into Distiller's Scheduler , which invokes the correct algorithm. The application also uses Distiller services to collect statistics in Summaries and logs files, which can be queried at a later time, from Jupyter notebooks or TensorBoard.","title":"Distiller design"},{"location":"design.html#sparsification-and-fine-tuning","text":"The application sets up a model as normally done in PyTorch. And then instantiates a Scheduler and configures it: Scheduler configuration is defined in a YAML file The configuration specifies Policies. Each Policy is tied to a specific algorithm which controls some aspect of the training. Some types of algorithms control the actual sparsification of the model. Such types are \"pruner\" and \"regularizer\". Some algorithms control some parameter of the training process, such as the learning-rate decay scheduler ( lr_scheduler ). The parameters of each algorithm are also specified in the configuration. In addition to specifying the algorithm, each Policy specifies scheduling parameters which control when the algorithm is executed: start epoch, end epoch and frequency. The Scheduler exposes callbacks for relevant training stages: epoch start/end, mini-batch start/end and pre-backward pass. Each scheduler callback activates the policies that were defined according the schedule that was defined. These callbacks are placed the training loop.","title":"Sparsification and fine-tuning"},{"location":"design.html#quantization","text":"A quantized model is obtained by replacing existing operations with quantized versions. The quantized versions can be either complete replacements, or wrappers. A wrapper will use the existing modules internally and add quantization and de-quantization operations before/after as necessary. In Distiller we will provide a set of quantized versions of common operations which will enable implementation of different quantization methods. The user can write a quantized model from scratch, using the quantized operations provided. We also provide a mechanism which takes an existing model and automatically replaces required operations with quantized versions. This mechanism is exposed by the Quantizer class. Quantizer should be sub-classed for each quantization method.","title":"Quantization"},{"location":"design.html#model-transformation","text":"The high-level flow is as follows: Define a mapping between the module types to be replaced (e.g. Conv2D, Linear, etc.) to a function which generates the replacement module. The mapping is defined in the replacement_factory attribute of the Quantizer class. Iterate over the modules defined in the model. For each module, if its type is in the mapping, call the replacement generation function. We pass the existing module to this function to allow wrapping of it. Replace the existing module with the module returned by the function. It is important to note that the name of the module does not change, as that could break the forward function of the parent module. Different quantization methods may, obviously, use different quantized operations. In addition, different methods may employ different \"strategies\" of replacing / wrapping existing modules. For instance, some methods replace ReLU with another activation function, while others keep it. Hence, for each quantization method, a different mapping will likely be defined. Each sub-class of Quantizer should populate the replacement_factory dictionary attribute with the appropriate mapping. To execute the model transformation, call the prepare_model function of the Quantizer instance.","title":"Model Transformation"},{"location":"design.html#flexible-bit-widths","text":"Each instance of Quantizer is parameterized by the number of bits to be used for quantization of different tensor types. The default ones are activations and weights. These are the bits_activations , bits_weights and bits_bias parameters in Quantizer 's constructor. Sub-classes may define bit-widths for other tensor types as needed. We also want to be able to override the default number of bits mentioned in the bullet above for certain layers. These could be very specific layers. However, many models are comprised of building blocks (\"container\" modules, such as Sequential) which contain several modules, and it is likely we'll want to override settings for entire blocks, or for a certain module across different blocks. When such building blocks are used, the names of the internal modules usually follow some pattern. So, for this purpose, Quantizer also accepts a mapping of regular expressions to number of bits. This allows the user to override specific layers using they're exact name, or a group of layers via a regular expression. This mapping is passed via the overrides parameter in the constructor. The overrides mapping is required to be an instance of collections.OrderedDict (as opposed to just a simple Python dict ). This is done in order to enable handling of overlapping name patterns. So, for example, one could define certain override parameters for a group of layers, e.g. 'conv*', but also define different parameters for specific layers in that group, e.g. 'conv1'. The patterns are evaluated eagerly - the first match wins. Therefore, the more specific patterns must come before the broad patterns.","title":"Flexible Bit-Widths"},{"location":"design.html#weights-quantization","text":"The Quantizer class also provides an API to quantize the weights of all layers at once. To use it, the param_quantization_fn attribute needs to point to a function that accepts a tensor and the number of bits. During model transformation, the Quantizer class will build a list of all model parameters that need to be quantized along with their bit-width. Then, the quantize_params function can be called, which will iterate over all parameters and quantize them using params_quantization_fn .","title":"Weights Quantization"},{"location":"design.html#quantization-aware-training","text":"The Quantizer class supports quantization-aware training, that is - training with quantization in the loop. This requires handling of a couple of flows / scenarios: Maintaining a full precision copy of the weights, as described here . This is enabled by setting train_with_fp_copy=True in the Quantizer constructor. At model transformation, in each module that has parameters that should be quantized, a new torch.nn.Parameter is added, which will maintain the required full precision copy of the parameters. Note that this is done in-place - a new module is not created. We preferred not to sub-class the existing PyTorch modules for this purpose. In order to this in-place, and also guarantee proper back-propagation through the weights quantization function, we employ the following \"hack\": The existing torch.nn.Parameter , e.g. weights , is replaced by a torch.nn.Parameter named float_weight . To maintain the existing functionality of the module, we then register a buffer in the module with the original name - weights . During training, float_weight will be passed to param_quantization_fn and the result will be stored in weight . In addition, some quantization methods may introduce additional learned parameters to the model. For example, in the PACT method, acitvations are clipped to a value \\alpha , which is a learned parameter per-layer To support these two cases, the Quantizer class also accepts an instance of a torch.optim.Optimizer (normally this would be one an instance of its sub-classes). The quantizer will take care of modifying the optimizer according to the changes made to the parameters. Optimizing New Parameters In cases where new parameters are required by the scheme, it is likely that they'll need to be optimized separately from the main model parameters. In that case, the sub-class for the speicifc method should override Quantizer._get_updated_optimizer_params_groups() , and return the proper groups plus any desired hyper-parameter overrides.","title":"Quantization-Aware Training"},{"location":"design.html#examples","text":"The base Quantizer class is implemented in distiller/quantization/quantizer.py . For a simple sub-class implementing symmetric linear quantization, see SymmetricLinearQuantizer in distiller/quantization/range_linear.py . In distiller/quantization/clipped_linear.py there are examples of lower-precision methods which use training with quantization. Specifically, see PACTQuantizer for an example of overriding Quantizer._get_updated_optimizer_params_groups() .","title":"Examples"},{"location":"install.html","text":"Distiller Installation These instructions will help get Distiller up and running on your local machine. You may also want to refer to these resources: Dataset installation instructions. Jupyter installation instructions. Notes: - Distiller has only been tested on Ubuntu 16.04 LTS, and with Python 3.5. - If you are not using a GPU, you might need to make small adjustments to the code. Clone Distiller Clone the Distiller code repository from github: $ git clone https://github.com/NervanaSystems/distiller.git The rest of the documentation that follows, assumes that you have cloned your repository to a directory called distiller . Create a Python virtual environment We recommend using a Python virtual environment , but that of course, is up to you. There's nothing special about using Distiller in a virtual environment, but we provide some instructions, for completeness. Before creating the virtual environment, make sure you are located in directory distiller . After creating the environment, you should see a directory called distiller/env . Using virtualenv If you don't have virtualenv installed, you can find the installation instructions here . To create the environment, execute: $ python3 -m virtualenv env This creates a subdirectory named env where the python virtual environment is stored, and configures the current shell to use it as the default python environment. Using venv If you prefer to use venv , then begin by installing it: $ sudo apt-get install python3-venv Then create the environment: $ python3 -m venv env As with virtualenv, this creates a directory called distiller/env . Activate the environment The environment activation and deactivation commands for venv and virtualenv are the same. !NOTE: Make sure to activate the environment, before proceeding with the installation of the dependency packages: $ source env/bin/activate Install the package Finally, install the Distiller package and its dependencies using pip3 : $ cd distiller $ pip3 install -e . This installs Distiller in \"development mode\", meaning any changes made in the code are reflected in the environment without re-running the install command (so no need to re-install after pulling changes from the Git repository). PyTorch is included in the requirements.txt file, and will currently download PyTorch version 1.0.1 for CUDA 9.0. This is the setup we've used for testing Distiller.","title":"Installation"},{"location":"install.html#distiller-installation","text":"These instructions will help get Distiller up and running on your local machine. You may also want to refer to these resources: Dataset installation instructions. Jupyter installation instructions. Notes: - Distiller has only been tested on Ubuntu 16.04 LTS, and with Python 3.5. - If you are not using a GPU, you might need to make small adjustments to the code.","title":"Distiller Installation"},{"location":"install.html#clone-distiller","text":"Clone the Distiller code repository from github: $ git clone https://github.com/NervanaSystems/distiller.git The rest of the documentation that follows, assumes that you have cloned your repository to a directory called distiller .","title":"Clone Distiller"},{"location":"install.html#create-a-python-virtual-environment","text":"We recommend using a Python virtual environment , but that of course, is up to you. There's nothing special about using Distiller in a virtual environment, but we provide some instructions, for completeness. Before creating the virtual environment, make sure you are located in directory distiller . After creating the environment, you should see a directory called distiller/env .","title":"Create a Python virtual environment"},{"location":"install.html#using-virtualenv","text":"If you don't have virtualenv installed, you can find the installation instructions here . To create the environment, execute: $ python3 -m virtualenv env This creates a subdirectory named env where the python virtual environment is stored, and configures the current shell to use it as the default python environment.","title":"Using virtualenv"},{"location":"install.html#using-venv","text":"If you prefer to use venv , then begin by installing it: $ sudo apt-get install python3-venv Then create the environment: $ python3 -m venv env As with virtualenv, this creates a directory called distiller/env .","title":"Using venv"},{"location":"install.html#activate-the-environment","text":"The environment activation and deactivation commands for venv and virtualenv are the same. !NOTE: Make sure to activate the environment, before proceeding with the installation of the dependency packages: $ source env/bin/activate","title":"Activate the environment"},{"location":"install.html#install-the-package","text":"Finally, install the Distiller package and its dependencies using pip3 : $ cd distiller $ pip3 install -e . This installs Distiller in \"development mode\", meaning any changes made in the code are reflected in the environment without re-running the install command (so no need to re-install after pulling changes from the Git repository). PyTorch is included in the requirements.txt file, and will currently download PyTorch version 1.0.1 for CUDA 9.0. This is the setup we've used for testing Distiller.","title":"Install the package"},{"location":"jupyter.html","text":"Jupyter environment The Jupyter notebooks environment allows us to plan our compression session and load Distiller data summaries to study and analyze compression results. Each notebook has embedded instructions and explanations, so here we provide only a brief description of each notebook. Installation Jupyter and its dependencies are included as part of the main requirements.txt file, so there is no need for a dedicated installation step. However, to use the ipywidgets extension, you will need to enable it: $ jupyter nbextension enable --py widgetsnbextension --sys-prefix You may want to refer to the ipywidgets extension installation documentation . Another extension which requires special installation handling is Qgrid . Qgrid is a Jupyter notebook widget that adds interactive features, such as sorting, to Panadas DataFrames rendering. To enable Qgrid: $ jupyter nbextension enable --py --sys-prefix qgrid Launching the Jupyter server There are all kinds of options to use when launching Jupyter which you can use. The example below tells the server to listen to connections from any IP address, and not to launch the browser window, but of course, you are free to launch Jupyter any way you want. Consult the user's guide for more details. $ jupyter-notebook --ip=0.0.0.0 --no-browser Using the Distiller notebooks The Distiller Jupyter notebooks are located in the distiller/jupyter directory. They are provided as tools that you can use to prepare your compression experiments and study their results. We welcome new ideas and implementations of Jupyter. Roughly, the notebooks can be divided into three categories. Theory jupyter/L1-regularization.ipynb : Experience hands-on how L1 and L2 regularization affect the solution of a toy loss-minimization problem, to get a better grasp on the interaction between regularization and sparsity. jupyter/alexnet_insights.ipynb : This notebook reviews and compares a couple of pruning sessions on Alexnet. We compare distributions, performance, statistics and show some visualizations of the weights tensors. Preparation for compression jupyter/model_summary.ipynb : Begin by getting familiar with your model. Examine the sizes and properties of layers and connections. Study which layers are compute-bound, and which are bandwidth-bound, and decide how to prune or regularize the model. jupyter/sensitivity_analysis.ipynb : If you performed pruning sensitivity analysis on your model, this notebook can help you load the results and graphically study how the layers behave. jupyter/interactive_lr_scheduler.ipynb : The learning rate decay policy affects pruning results, perhaps as much as it affects training results. Graph a few LR-decay policies to see how they behave. jupyter/jupyter/agp_schedule.ipynb : If you are using the Automated Gradual Pruner, this notebook can help you tune the schedule. Reviewing experiment results jupyter/compare_executions.ipynb : This is a simple notebook to help you graphically compare the results of executions of several experiments. jupyter/compression_insights.ipynb : This notebook is packed with code, tables and graphs to us understand the results of a compression session. Distiller provides summaries , which are Pandas dataframes, which contain statistical information about you model. We chose to use Pandas dataframes because they can be sliced, queried, summarized and graphed with a few lines of code.","title":"Jupyter Notebooks"},{"location":"jupyter.html#jupyter-environment","text":"The Jupyter notebooks environment allows us to plan our compression session and load Distiller data summaries to study and analyze compression results. Each notebook has embedded instructions and explanations, so here we provide only a brief description of each notebook.","title":"Jupyter environment"},{"location":"jupyter.html#installation","text":"Jupyter and its dependencies are included as part of the main requirements.txt file, so there is no need for a dedicated installation step. However, to use the ipywidgets extension, you will need to enable it: $ jupyter nbextension enable --py widgetsnbextension --sys-prefix You may want to refer to the ipywidgets extension installation documentation . Another extension which requires special installation handling is Qgrid . Qgrid is a Jupyter notebook widget that adds interactive features, such as sorting, to Panadas DataFrames rendering. To enable Qgrid: $ jupyter nbextension enable --py --sys-prefix qgrid","title":"Installation"},{"location":"jupyter.html#launching-the-jupyter-server","text":"There are all kinds of options to use when launching Jupyter which you can use. The example below tells the server to listen to connections from any IP address, and not to launch the browser window, but of course, you are free to launch Jupyter any way you want. Consult the user's guide for more details. $ jupyter-notebook --ip=0.0.0.0 --no-browser","title":"Launching the Jupyter server"},{"location":"jupyter.html#using-the-distiller-notebooks","text":"The Distiller Jupyter notebooks are located in the distiller/jupyter directory. They are provided as tools that you can use to prepare your compression experiments and study their results. We welcome new ideas and implementations of Jupyter. Roughly, the notebooks can be divided into three categories.","title":"Using the Distiller notebooks"},{"location":"jupyter.html#theory","text":"jupyter/L1-regularization.ipynb : Experience hands-on how L1 and L2 regularization affect the solution of a toy loss-minimization problem, to get a better grasp on the interaction between regularization and sparsity. jupyter/alexnet_insights.ipynb : This notebook reviews and compares a couple of pruning sessions on Alexnet. We compare distributions, performance, statistics and show some visualizations of the weights tensors.","title":"Theory"},{"location":"jupyter.html#preparation-for-compression","text":"jupyter/model_summary.ipynb : Begin by getting familiar with your model. Examine the sizes and properties of layers and connections. Study which layers are compute-bound, and which are bandwidth-bound, and decide how to prune or regularize the model. jupyter/sensitivity_analysis.ipynb : If you performed pruning sensitivity analysis on your model, this notebook can help you load the results and graphically study how the layers behave. jupyter/interactive_lr_scheduler.ipynb : The learning rate decay policy affects pruning results, perhaps as much as it affects training results. Graph a few LR-decay policies to see how they behave. jupyter/jupyter/agp_schedule.ipynb : If you are using the Automated Gradual Pruner, this notebook can help you tune the schedule.","title":"Preparation for compression"},{"location":"jupyter.html#reviewing-experiment-results","text":"jupyter/compare_executions.ipynb : This is a simple notebook to help you graphically compare the results of executions of several experiments. jupyter/compression_insights.ipynb : This notebook is packed with code, tables and graphs to us understand the results of a compression session. Distiller provides summaries , which are Pandas dataframes, which contain statistical information about you model. We chose to use Pandas dataframes because they can be sliced, queried, summarized and graphed with a few lines of code.","title":"Reviewing experiment results"},{"location":"knowledge_distillation.html","text":"Knowledge Distillation (For details on how to train a model with knowledge distillation in Distiller, see here ) Knowledge distillation is model compression method in which a small model is trained to mimic a pre-trained, larger model (or ensemble of models). This training setting is sometimes referred to as \"teacher-student\", where the large model is the teacher and the small model is the student (we'll be using these terms interchangeably). The method was first proposed by Bucila et al., 2006 and generalized by Hinton et al., 2015 . The implementation in Distiller is based on the latter publication. Here we'll provide a summary of the method. For more information the reader may refer to the paper (a video lecture with slides is also available). In distillation, knowledge is transferred from the teacher model to the student by minimizing a loss function in which the target is the distribution of class probabilities predicted by the teacher model. That is - the output of a softmax function on the teacher model's logits. However, in many cases, this probability distribution has the correct class at a very high probability, with all other class probabilities very close to 0. As such, it doesn't provide much information beyond the ground truth labels already provided in the dataset. To tackle this issue, Hinton et al., 2015 introduced the concept of \"softmax temperature\". The probability p_i of class i is calculated from the logits z as: p_i = \\frac{exp\\left(\\frac{z_i}{T}\\right)}{\\sum_{j} \\exp\\left(\\frac{z_j}{T}\\right)} where T is the temperature parameter. When T=1 we get the standard softmax function. As T grows, the probability distribution generated by the softmax function becomes softer, providing more information as to which classes the teacher found more similar to the predicted class. Hinton calls this the \"dark knowledge\" embedded in the teacher model, and it is this dark knowledge that we are transferring to the student model in the distillation process. When computing the loss function vs. the teacher's soft targets, we use the same value of T to compute the softmax on the student's logits. We call this loss the \"distillation loss\". Hinton et al., 2015 found that it is also beneficial to train the distilled model to produce the correct labels (based on the ground truth) in addition to the teacher's soft-labels. Hence, we also calculate the \"standard\" loss between the student's predicted class probabilities and the ground-truth labels (also called \"hard labels/targets\"). We dub this loss the \"student loss\". When calculating the class probabilities for the student loss we use T = 1 . The overall loss function, incorporating both distillation and student losses, is calculated as: \\mathcal{L}(x;W) = \\alpha * \\mathcal{H}(y, \\sigma(z_s; T=1)) + \\beta * \\mathcal{H}(\\sigma(z_t; T=\\tau), \\sigma(z_s, T=\\tau)) where x is the input, W are the student model parameters, y is the ground truth label, \\mathcal{H} is the cross-entropy loss function, \\sigma is the softmax function parameterized by the temperature T , and \\alpha and \\beta are coefficients. z_s and z_t are the logits of the student and teacher respectively. New Hyper-Parameters In general \\tau , \\alpha and \\beta are hyper parameters. In their experiments, Hinton et al., 2015 use temperature values ranging from 1 to 20. They note that empirically, when the student model is very small compared to the teacher model, lower temperatures work better. This makes sense if we consider that as we raise the temperature, the resulting soft-labels distribution becomes richer in information, and a very small model might not be able to capture all of this information. However, there's no clear way to predict up front what kind of capacity for information the student model will have. With regards to \\alpha and \\beta , Hinton et al., 2015 use a weighted average between the distillation loss and the student loss. That is, \\beta = 1 - \\alpha . They note that in general, they obtained the best results when setting \\alpha to be much smaller than \\beta (although in one of their experiments they use \\alpha = \\beta = 0.5 ). Other works which utilize knowledge distillation don't use a weighted average. Some set \\alpha = 1 while leaving \\beta tunable, while others don't set any constraints. Combining with Other Model Compression Techniques In the \"basic\" scenario, the smaller (student) model is a pre-defined architecture which just has a smaller number of parameters compared to the teacher model. For example, we could train ResNet-18 by distilling knowledge from ResNet-34. But, a model with smaller capacity can also be obtained by other model compression techniques - sparsification and/or quantization. So, for example, we could train a 4-bit ResNet-18 model with some method using quantization-aware training, and use a distillation loss function as described above. In that case, the teacher model can even be a FP32 ResNet-18 model. Same goes for pruning and regularization. Tann et al., 2017 , Mishra and Marr, 2018 and Polino et al., 2018 are some works that combine knowledge distillation with quantization . Theis et al., 2018 and Ashok et al., 2018 combine distillation with pruning . References Cristian Bucila, Rich Caruana, and Alexandru Niculescu-Mizil . Model Compression. KDD, 2006 Geoffrey Hinton, Oriol Vinyals and Jeff Dean . Distilling the Knowledge in a Neural Network. arxiv:1503.02531 Hokchhay Tann, Soheil Hashemi, Iris Bahar and Sherief Reda . Hardware-Software Codesign of Accurate, Multiplier-free Deep Neural Networks. DAC, 2017 Asit Mishra and Debbie Marr . Apprentice: Using Knowledge Distillation Techniques To Improve Low-Precision Network Accuracy. ICLR, 2018 Antonio Polino, Razvan Pascanu and Dan Alistarh . Model compression via distillation and quantization. ICLR, 2018 Anubhav Ashok, Nicholas Rhinehart, Fares Beainy and Kris M. Kitani . N2N learning: Network to Network Compression via Policy Gradient Reinforcement Learning. ICLR, 2018 Lucas Theis, Iryna Korshunova, Alykhan Tejani and Ferenc Husz\u00e1r . Faster gaze prediction with dense networks and Fisher pruning. arxiv:1801.05787","title":"Knowledge Distillation"},{"location":"knowledge_distillation.html#knowledge-distillation","text":"(For details on how to train a model with knowledge distillation in Distiller, see here ) Knowledge distillation is model compression method in which a small model is trained to mimic a pre-trained, larger model (or ensemble of models). This training setting is sometimes referred to as \"teacher-student\", where the large model is the teacher and the small model is the student (we'll be using these terms interchangeably). The method was first proposed by Bucila et al., 2006 and generalized by Hinton et al., 2015 . The implementation in Distiller is based on the latter publication. Here we'll provide a summary of the method. For more information the reader may refer to the paper (a video lecture with slides is also available). In distillation, knowledge is transferred from the teacher model to the student by minimizing a loss function in which the target is the distribution of class probabilities predicted by the teacher model. That is - the output of a softmax function on the teacher model's logits. However, in many cases, this probability distribution has the correct class at a very high probability, with all other class probabilities very close to 0. As such, it doesn't provide much information beyond the ground truth labels already provided in the dataset. To tackle this issue, Hinton et al., 2015 introduced the concept of \"softmax temperature\". The probability p_i of class i is calculated from the logits z as: p_i = \\frac{exp\\left(\\frac{z_i}{T}\\right)}{\\sum_{j} \\exp\\left(\\frac{z_j}{T}\\right)} where T is the temperature parameter. When T=1 we get the standard softmax function. As T grows, the probability distribution generated by the softmax function becomes softer, providing more information as to which classes the teacher found more similar to the predicted class. Hinton calls this the \"dark knowledge\" embedded in the teacher model, and it is this dark knowledge that we are transferring to the student model in the distillation process. When computing the loss function vs. the teacher's soft targets, we use the same value of T to compute the softmax on the student's logits. We call this loss the \"distillation loss\". Hinton et al., 2015 found that it is also beneficial to train the distilled model to produce the correct labels (based on the ground truth) in addition to the teacher's soft-labels. Hence, we also calculate the \"standard\" loss between the student's predicted class probabilities and the ground-truth labels (also called \"hard labels/targets\"). We dub this loss the \"student loss\". When calculating the class probabilities for the student loss we use T = 1 . The overall loss function, incorporating both distillation and student losses, is calculated as: \\mathcal{L}(x;W) = \\alpha * \\mathcal{H}(y, \\sigma(z_s; T=1)) + \\beta * \\mathcal{H}(\\sigma(z_t; T=\\tau), \\sigma(z_s, T=\\tau)) where x is the input, W are the student model parameters, y is the ground truth label, \\mathcal{H} is the cross-entropy loss function, \\sigma is the softmax function parameterized by the temperature T , and \\alpha and \\beta are coefficients. z_s and z_t are the logits of the student and teacher respectively.","title":"Knowledge Distillation"},{"location":"knowledge_distillation.html#new-hyper-parameters","text":"In general \\tau , \\alpha and \\beta are hyper parameters. In their experiments, Hinton et al., 2015 use temperature values ranging from 1 to 20. They note that empirically, when the student model is very small compared to the teacher model, lower temperatures work better. This makes sense if we consider that as we raise the temperature, the resulting soft-labels distribution becomes richer in information, and a very small model might not be able to capture all of this information. However, there's no clear way to predict up front what kind of capacity for information the student model will have. With regards to \\alpha and \\beta , Hinton et al., 2015 use a weighted average between the distillation loss and the student loss. That is, \\beta = 1 - \\alpha . They note that in general, they obtained the best results when setting \\alpha to be much smaller than \\beta (although in one of their experiments they use \\alpha = \\beta = 0.5 ). Other works which utilize knowledge distillation don't use a weighted average. Some set \\alpha = 1 while leaving \\beta tunable, while others don't set any constraints.","title":"New Hyper-Parameters"},{"location":"knowledge_distillation.html#references","text":"Cristian Bucila, Rich Caruana, and Alexandru Niculescu-Mizil . Model Compression. KDD, 2006 Geoffrey Hinton, Oriol Vinyals and Jeff Dean . Distilling the Knowledge in a Neural Network. arxiv:1503.02531 Hokchhay Tann, Soheil Hashemi, Iris Bahar and Sherief Reda . Hardware-Software Codesign of Accurate, Multiplier-free Deep Neural Networks. DAC, 2017 Asit Mishra and Debbie Marr . Apprentice: Using Knowledge Distillation Techniques To Improve Low-Precision Network Accuracy. ICLR, 2018 Antonio Polino, Razvan Pascanu and Dan Alistarh . Model compression via distillation and quantization. ICLR, 2018 Anubhav Ashok, Nicholas Rhinehart, Fares Beainy and Kris M. Kitani . N2N learning: Network to Network Compression via Policy Gradient Reinforcement Learning. ICLR, 2018 Lucas Theis, Iryna Korshunova, Alykhan Tejani and Ferenc Husz\u00e1r . Faster gaze prediction with dense networks and Fisher pruning. arxiv:1801.05787","title":"References"},{"location":"model_zoo.html","text":"Distiller Model Zoo How to contribute models to the Model Zoo We encourage you to contribute new models to the Model Zoo. We welcome implementations of published papers or of your own work. To assure that models and algorithms shared with others are high-quality, please commit your models with the following: Command-line arguments Log files PyTorch model Contents The Distiller model zoo is not a \"traditional\" model-zoo, because it does not necessarily contain best-in-class compressed models. Instead, the model-zoo contains a number of deep learning models that have been compressed using Distiller following some well-known research papers. These are meant to serve as examples of how Distiller can be used. Each model contains a Distiller schedule detailing how the model was compressed, a PyTorch checkpoint, text logs and TensorBoard logs. table, th, td { border: 1px solid black; } Paper Dataset Network Method & Granularity Schedule Features Learning both Weights and Connections for Efficient Neural Networks ImageNet Alexnet Element-wise pruning Iterative; Manual Magnitude thresholding based on a sensitivity quantifier. Element-wise sparsity sensitivity analysis To prune, or not to prune: exploring the efficacy of pruning for model compression ImageNet MobileNet Element-wise pruning Automated gradual; Iterative Magnitude thresholding based on target level Learning Structured Sparsity in Deep Neural Networks CIFAR10 ResNet20 Group regularization 1.Train with group-lasso 2.Remove zero groups and fine-tune Group Lasso regularization. Groups: kernels (2D), channels, filters (3D), layers (4D), vectors (rows, cols) Pruning Filters for Efficient ConvNets CIFAR10 ResNet56 Filter ranking; guided by sensitivity analysis 1.Rank filters 2. Remove filters and channels 3.Fine-tune One-shot ranking and pruning of filters; with network thinning Learning both Weights and Connections for Efficient Neural Networks This schedule is an example of \"Iterative Pruning\" for Alexnet/Imagent, as described in chapter 3 of Song Han's PhD dissertation: Efficient Methods and Hardware for Deep Learning and in his paper Learning both Weights and Connections for Efficient Neural Networks . The Distiller schedule uses SensitivityPruner which is similar to MagnitudeParameterPruner, but instead of specifying \"raw\" thresholds, it uses a \"sensitivity parameter\". Song Han's paper says that \"the pruning threshold is chosen as a quality parameter multiplied by the standard deviation of a layers weights,\" and this is not explained much further. In Distiller, the \"quality parameter\" is referred to as \"sensitivity\" and is based on the values learned from performing sensitivity analysis. Using a parameter that is related to the standard deviation is very helpful: under the assumption that the weights tensors are distributed normally, the standard deviation acts as a threshold normalizer. Note that Distiller's implementation deviates slightly from the algorithm Song Han describes in his PhD dissertation, in that the threshold value is set only once. In his PhD dissertation, Song Han describes a growing threshold, at each iteration. This requires n+1 hyper-parameters (n being the number of pruning iterations we use): the threshold and the threshold increase (delta) at each pruning iteration. Distiller's implementation takes advantage of the fact that as pruning progresses, more weights are pulled toward zero, and therefore the threshold \"traps\" more weights. Thus, we can use less hyper-parameters and achieve the same results. Distiller schedule: distiller/examples/sensitivity-pruning/alexnet.schedule_sensitivity.yaml Checkpoint file: alexnet.checkpoint.89.pth.tar Results Our reference is TorchVision's pretrained Alexnet model which has a Top1 accuracy of 56.55 and Top5=79.09. We prune away 88.44% of the parameters and achieve Top1=56.61 and Top5=79.45. Song Han prunes 89% of the parameters, which is slightly better than our results. Parameters: +----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ | | Name | Shape | NNZ (dense) | NNZ (sparse) | Cols (%) | Rows (%) | Ch (%) | 2D (%) | 3D (%) | Fine (%) | Std | Mean | Abs-Mean |----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------| | 0 | features.module.0.weight | (64, 3, 11, 11) | 23232 | 13411 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 42.27359 | 0.14391 | -0.00002 | 0.08805 | | 1 | features.module.3.weight | (192, 64, 5, 5) | 307200 | 115560 | 0.00000 | 0.00000 | 0.00000 | 1.91243 | 0.00000 | 62.38281 | 0.04703 | -0.00250 | 0.02289 | | 2 | features.module.6.weight | (384, 192, 3, 3) | 663552 | 256565 | 0.00000 | 0.00000 | 0.00000 | 6.18490 | 0.00000 | 61.33445 | 0.03354 | -0.00184 | 0.01803 | | 3 | features.module.8.weight | (256, 384, 3, 3) | 884736 | 315065 | 0.00000 | 0.00000 | 0.00000 | 6.96411 | 0.00000 | 64.38881 | 0.02646 | -0.00168 | 0.01422 | | 4 | features.module.10.weight | (256, 256, 3, 3) | 589824 | 186938 | 0.00000 | 0.00000 | 0.00000 | 15.49225 | 0.00000 | 68.30614 | 0.02714 | -0.00246 | 0.01409 | | 5 | classifier.1.weight | (4096, 9216) | 37748736 | 3398881 | 0.00000 | 0.21973 | 0.00000 | 0.21973 | 0.00000 | 90.99604 | 0.00589 | -0.00020 | 0.00168 | | 6 | classifier.4.weight | (4096, 4096) | 16777216 | 1782769 | 0.21973 | 3.46680 | 0.00000 | 3.46680 | 0.00000 | 89.37387 | 0.00849 | -0.00066 | 0.00263 | | 7 | classifier.6.weight | (1000, 4096) | 4096000 | 994738 | 3.36914 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 75.71440 | 0.01718 | 0.00030 | 0.00778 | | 8 | Total sparsity: | - | 61090496 | 7063928 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 88.43694 | 0.00000 | 0.00000 | 0.00000 | +----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ 2018-04-04 21:30:52,499 - Total sparsity: 88.44 2018-04-04 21:30:52,499 - --- validate (epoch=89)----------- 2018-04-04 21:30:52,499 - 128116 samples (256 per mini-batch) 2018-04-04 21:31:35,357 - ==> Top1: 51.838 Top5: 74.817 Loss: 2.150 2018-04-04 21:31:39,251 - --- test --------------------- 2018-04-04 21:31:39,252 - 50000 samples (256 per mini-batch) 2018-04-04 21:32:01,274 - ==> Top1: 56.606 Top5: 79.446 Loss: 1.893 To prune, or not to prune: exploring the efficacy of pruning for model compression In their paper Zhu and Gupta, \"compare the accuracy of large, but pruned models (large-sparse) and their smaller, but dense (small-dense) counterparts with identical memory footprint.\" They also \"propose a new gradual pruning technique that is simple and straightforward to apply across a variety of models/datasets with minimal tuning.\" This pruning schedule is implemented by distiller.AutomatedGradualPruner, which increases the sparsity level (expressed as a percentage of zero-valued elements) gradually over several pruning steps. Distiller's implementation only prunes elements once in an epoch (the model is fine-tuned in between pruning events), which is a small deviation from Zhu and Gupta's paper. The research paper specifies the schedule in terms of mini-batches, while our implementation specifies the schedule in terms of epochs. We feel that using epochs performs well, and is more \"stable\", since the number of mini-batches will change, if you change the batch size. ImageNet files: Distiller schedule: distiller/examples/agp-pruning/mobilenet.imagenet.schedule_agp.yaml Checkpoint file: checkpoint.pth.tar ResNet18 files: Distiller schedule: distiller/examples/agp-pruning/resnet18.schedule_agp.yaml Checkpoint file: checkpoint.pth.tar Results As our baseline we used a pretrained PyTorch MobileNet model (width=1) which has Top1=68.848 and Top5=88.740. In their paper, Zhu and Gupta prune 50% of the elements of MobileNet (width=1) with a 1.1% drop in accuracy. We pruned about 51.6% of the elements, with virtually no change in the accuracies (Top1: 68.808 and Top5: 88.656). We didn't try to prune more than this, but we do note that the baseline accuracy that we used is almost 2% lower than the accuracy published in the paper. +----+--------------------------+--------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ | | Name | Shape | NNZ (dense) | NNZ (sparse) | Cols (%) | Rows (%) | Ch (%) | 2D (%) | 3D (%) | Fine (%) | Std | Mean | Abs-Mean | |----+--------------------------+--------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------| | 0 | module.model.0.0.weight | (32, 3, 3, 3) | 864 | 864 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.14466 | 0.00103 | 0.06508 | | 1 | module.model.1.0.weight | (32, 1, 3, 3) | 288 | 288 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.32146 | 0.01020 | 0.12932 | | 2 | module.model.1.3.weight | (64, 32, 1, 1) | 2048 | 2048 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.11942 | 0.00024 | 0.03627 | | 3 | module.model.2.0.weight | (64, 1, 3, 3) | 576 | 576 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.15809 | 0.00543 | 0.11513 | | 4 | module.model.2.3.weight | (128, 64, 1, 1) | 8192 | 8192 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.08442 | -0.00031 | 0.04182 | | 5 | module.model.3.0.weight | (128, 1, 3, 3) | 1152 | 1152 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.16780 | 0.00125 | 0.10545 | | 6 | module.model.3.3.weight | (128, 128, 1, 1) | 16384 | 16384 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.07126 | -0.00197 | 0.04123 | | 7 | module.model.4.0.weight | (128, 1, 3, 3) | 1152 | 1152 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.10182 | 0.00171 | 0.08719 | | 8 | module.model.4.3.weight | (256, 128, 1, 1) | 32768 | 13108 | 0.00000 | 0.00000 | 10.15625 | 59.99756 | 12.50000 | 59.99756 | 0.05543 | -0.00002 | 0.02760 | | 9 | module.model.5.0.weight | (256, 1, 3, 3) | 2304 | 2304 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.12516 | -0.00288 | 0.08058 | | 10 | module.model.5.3.weight | (256, 256, 1, 1) | 65536 | 26215 | 0.00000 | 0.00000 | 12.50000 | 59.99908 | 23.82812 | 59.99908 | 0.04453 | 0.00002 | 0.02271 | | 11 | module.model.6.0.weight | (256, 1, 3, 3) | 2304 | 2304 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.08024 | 0.00252 | 0.06377 | | 12 | module.model.6.3.weight | (512, 256, 1, 1) | 131072 | 52429 | 0.00000 | 0.00000 | 23.82812 | 59.99985 | 14.25781 | 59.99985 | 0.03561 | -0.00057 | 0.01779 | | 13 | module.model.7.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.11008 | -0.00018 | 0.06829 | | 14 | module.model.7.3.weight | (512, 512, 1, 1) | 262144 | 104858 | 0.00000 | 0.00000 | 14.25781 | 59.99985 | 21.28906 | 59.99985 | 0.02944 | -0.00060 | 0.01515 | | 15 | module.model.8.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.08258 | 0.00370 | 0.04905 | | 16 | module.model.8.3.weight | (512, 512, 1, 1) | 262144 | 104858 | 0.00000 | 0.00000 | 21.28906 | 59.99985 | 28.51562 | 59.99985 | 0.02865 | -0.00046 | 0.01465 | | 17 | module.model.9.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.07578 | 0.00468 | 0.04201 | | 18 | module.model.9.3.weight | (512, 512, 1, 1) | 262144 | 104858 | 0.00000 | 0.00000 | 28.51562 | 59.99985 | 23.43750 | 59.99985 | 0.02939 | -0.00044 | 0.01511 | | 19 | module.model.10.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.07091 | 0.00014 | 0.04306 | | 20 | module.model.10.3.weight | (512, 512, 1, 1) | 262144 | 104858 | 0.00000 | 0.00000 | 24.60938 | 59.99985 | 20.89844 | 59.99985 | 0.03095 | -0.00059 | 0.01672 | | 21 | module.model.11.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.05729 | -0.00518 | 0.04267 | | 22 | module.model.11.3.weight | (512, 512, 1, 1) | 262144 | 104858 | 0.00000 | 0.00000 | 20.89844 | 59.99985 | 17.57812 | 59.99985 | 0.03229 | -0.00044 | 0.01797 | | 23 | module.model.12.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.04981 | -0.00136 | 0.03967 | | 24 | module.model.12.3.weight | (1024, 512, 1, 1) | 524288 | 209716 | 0.00000 | 0.00000 | 16.01562 | 59.99985 | 44.23828 | 59.99985 | 0.02514 | -0.00106 | 0.01278 | | 25 | module.model.13.0.weight | (1024, 1, 3, 3) | 9216 | 9216 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.02396 | -0.00949 | 0.01549 | | 26 | module.model.13.3.weight | (1024, 1024, 1, 1) | 1048576 | 419431 | 0.00000 | 0.00000 | 44.72656 | 59.99994 | 1.46484 | 59.99994 | 0.01801 | -0.00017 | 0.00931 | | 27 | module.fc.weight | (1000, 1024) | 1024000 | 409600 | 1.46484 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 60.00000 | 0.05078 | 0.00271 | 0.02734 | | 28 | Total sparsity: | - | 4209088 | 1726917 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 58.97171 | 0.00000 | 0.00000 | 0.00000 | +----+--------------------------+--------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ Total sparsity: 58.97 --- validate (epoch=199)----------- 128116 samples (256 per mini-batch) ==> Top1: 65.337 Top5: 84.984 Loss: 1.494 --- test --------------------- 50000 samples (256 per mini-batch) ==> Top1: 68.810 Top5: 88.626 Loss: 1.282 Learning Structured Sparsity in Deep Neural Networks This research paper from the University of Pittsburgh, \"proposes a Structured Sparsity Learning (SSL) method to regularize the structures (i.e., filters, channels, filter shapes, and layer depth) of DNNs. SSL can: (1) learn a compact structure from a bigger DNN to reduce computation cost; (2) obtain a hardware-friendly structured sparsity of DNN to efficiently accelerate the DNN\u2019s evaluation.\" Note that this paper does not use pruning, but instead uses group regularization during the training to force weights towards zero, as a group. We used a schedule which thresholds the regularized elements at a magnitude equal to the regularization strength. At the end of the regularization phase, we save the final sparsity masks generated by the regularization, and exit. Then we load this regularized model, remove the layers corresponding to the zeroed weight tensors (all of a layer's elements have a zero value). Baseline training We started by training the baseline ResNet20-Cifar dense network since we didn't have a pre-trained model. Distiller schedule: distiller/examples/ssl/resnet20_cifar_baseline_training.yaml Checkpoint files: distiller/examples/ssl/checkpoints/ $ time python3 compress_classifier.py --arch resnet20_cifar ../data.cifar10 -p=50 --lr=0.3 --epochs=180 --compress=../cifar10/resnet20/baseline_training.yaml -j=1 --deterministic Regularization Then we started training from scratch again, but this time we used Group Lasso regularization on entire layers: Distiller schedule: distiller/examples/ssl/ssl_4D-removal_4L_training.yaml $ time python3 compress_classifier.py --arch resnet20_cifar ../data.cifar10 -p=50 --lr=0.4 --epochs=180 --compress=../ssl/ssl_4D-removal_training.yaml -j=1 --deterministic The diagram below shows the training of Resnet20/CIFAR10 using Group Lasso regularization on entire layers (in blue) vs. training Resnet20/CIFAR10 baseline (in red). You may notice several interesting things: 1. The LR-decay policy is the same, but the two sessions start with different initial LR values. 2. The data-loss of the regularized training follows the same shape as the un-regularized training (baseline), and eventually the two seem to merge. 3. We see similar behavior in the validation Top1 and Top5 accuracy results, but the regularized training eventually performs better. 4. In the top right corner we see the behavior of the regularization loss ( Reg Loss ), which actually increases for some time, until the data-loss has a sharp drop (after ~16K mini-batches), at which point the regularization loss also starts dropping. This regularization yields 5 layers with zeroed weight tensors. We load this model, remove the 5 layers, and start the fine tuning of the weights. This process of layer removal is specific to ResNet for CIFAR, which we altered by adding code to skip over layers during the forward path. When you export to ONNX, the removed layers do not participate in the forward path, so they don't get incarnated. We managed to remove 5 of the 16 3x3 convolution layers which dominate the computation time. It's not bad, but we probably could have done better. Fine-tuning During the fine-tuning process, because the removed layers do not participate in the forward path, they do not appear in the backward path and are not backpropogated: therefore they are completely disconnected from the network. We copy the checkpoint file of the regularized model to checkpoint_trained_4D_regularized_5Lremoved.pth.tar . Distiller schedule: distiller/examples/ssl/ssl_4D-removal_finetuning.yaml $ time python3 compress_classifier.py --arch resnet20_cifar ../data.cifar10 -p=50 --lr=0.1 --epochs=250 --resume=../cifar10/resnet20/checkpoint_trained_4D_regularized_5Lremoved.pth.tar --compress=../ssl/ssl_4D-removal_finetuning.yaml -j=1 --deterministic Results Our baseline results for ResNet20 Cifar are: Top1=91.450 and Top5=99.750 We used Distiller's GroupLassoRegularizer to remove 5 layers from Resnet20 (CIFAR10) with no degradation of the accuracies. The regularized model exhibits really poor classification abilities: $ time python3 compress_classifier.py --arch resnet20_cifar ../data.cifar10 -p=50 --resume=../cifar10/resnet20/checkpoint_trained_4D_regularized_5Lremoved.pth.tar --evaluate => loading checkpoint ../cifar10/resnet20/checkpoint_trained_4D_regularized_5Lremoved.pth.tar best top@1: 90.620 Loaded compression schedule from checkpoint (epoch 179) Removing layer: module.layer1.0.conv1 [layer=0 block=0 conv=0] Removing layer: module.layer1.0.conv2 [layer=0 block=0 conv=1] Removing layer: module.layer1.1.conv1 [layer=0 block=1 conv=0] Removing layer: module.layer1.1.conv2 [layer=0 block=1 conv=1] Removing layer: module.layer2.2.conv2 [layer=1 block=2 conv=1] Files already downloaded and verified Files already downloaded and verified Dataset sizes: training=45000 validation=5000 test=10000 --- test --------------------- 10000 samples (256 per mini-batch) ==> Top1: 22.290 Top5: 68.940 Loss: 5.172 However, after fine-tuning, we recovered most of the accuracies loss, but not quite all of it: Top1=91.020 and Top5=99.670 We didn't spend time trying to wrestle with this network, and therefore didn't achieve SSL's published results (which showed that they managed to remove 6 layers and at the same time increase accuracies). Pruning Filters for Efficient ConvNets Quoting the authors directly: We present an acceleration method for CNNs, where we prune filters from CNNs that are identified as having a small effect on the output accuracy. By removing whole filters in the network together with their connecting feature maps, the computation costs are reduced significantly. In contrast to pruning weights, this approach does not result in sparse connectivity patterns. Hence, it does not need the support of sparse convolution libraries and can work with existing efficient BLAS libraries for dense matrix multiplications. The implementation of the research by Hao et al. required us to add filter-pruning sensitivity analysis, and support for \"network thinning\". After performing filter-pruning sensitivity analysis to assess which layers are more sensitive to the pruning of filters, we execute distiller.L1RankedStructureParameterPruner once in order to rank the filters of each layer by their L1-norm values, and then we prune the schedule-prescribed sparsity level. Distiller schedule: distiller/examples/pruning_filters_for_efficient_convnets/resnet56_cifar_filter_rank.yaml Checkpoint files: checkpoint_finetuned.pth.tar The excerpt from the schedule, displayed below, shows how we declare the L1RankedStructureParameterPruner. This class currently ranks filters only, but because in the future this class may support ranking of various structures, you need to specify for each parameter both the target sparsity level, and the structure type ('3D' is filter-wise pruning). pruners: filter_pruner: class: 'L1RankedStructureParameterPruner' reg_regims: 'module.layer1.0.conv1.weight': [0.6, '3D'] 'module.layer1.1.conv1.weight': [0.6, '3D'] 'module.layer1.2.conv1.weight': [0.6, '3D'] 'module.layer1.3.conv1.weight': [0.6, '3D'] In the policy, we specify that we want to invoke this pruner once, at epoch 180. Because we are starting from a network which was trained for 180 epochs (see Baseline training below), the filter ranking is performed right at the outset of this schedule. policies: - pruner: instance_name: filter_pruner epochs: [180] Following the pruning, we want to \"physically\" remove the pruned filters from the network, which involves reconfiguring the Convolutional layers and the parameter tensors. When we remove filters from Convolution layer n we need to perform several changes to the network: 1. Shrink layer n 's weights tensor, leaving only the \"important\" filters. 2. Configure layer n 's .out_channels member to its new, smaller, value. 3. If a BN layer follows layer n , then it also needs to be reconfigured and its scale and shift parameter vectors need to be shrunk. 4. If a Convolution layer follows the BN layer, then it will have less input channels which requires reconfiguration and shrinking of its weights. All of this is performed by distiller.ResnetCifarFilterRemover which is also scheduled at epoch 180. We call this process \"network thinning\". extensions: net_thinner: class: 'FilterRemover' thinning_func_str: remove_filters arch: 'resnet56_cifar' dataset: 'cifar10' Network thinning requires us to understand the layer connectivity and data-dependency of the DNN, and we are working on a robust method to perform this. On networks with topologies similar to ResNet (residuals) and GoogLeNet (inception), which have several inputs and outputs to/from Convolution layers, there is extra details to consider. Our current implementation is specific to certain layers in ResNet and is a bit fragile. We will continue to improve and generalize this. Baseline training We started by training the baseline ResNet56-Cifar dense network (180 epochs) since we didn't have a pre-trained model. Distiller schedule: distiller/examples/pruning_filters_for_efficient_convnets/resnet56_cifar_baseline_training.yaml Checkpoint files: checkpoint.resnet56_cifar_baseline.pth.tar Results We trained a ResNet56-Cifar10 network and achieve accuracy results which are on-par with published results: Top1: 92.970 and Top5: 99.740. We used Hao et al.'s algorithm to remove 37.3% of the original convolution MACs, while maintaining virtually the same accuracy as the baseline: Top1: 92.830 and Top5: 99.760","title":"Model Zoo"},{"location":"model_zoo.html#distiller-model-zoo","text":"","title":"Distiller Model Zoo"},{"location":"model_zoo.html#how-to-contribute-models-to-the-model-zoo","text":"We encourage you to contribute new models to the Model Zoo. We welcome implementations of published papers or of your own work. To assure that models and algorithms shared with others are high-quality, please commit your models with the following: Command-line arguments Log files PyTorch model","title":"How to contribute models to the Model Zoo"},{"location":"model_zoo.html#contents","text":"The Distiller model zoo is not a \"traditional\" model-zoo, because it does not necessarily contain best-in-class compressed models. Instead, the model-zoo contains a number of deep learning models that have been compressed using Distiller following some well-known research papers. These are meant to serve as examples of how Distiller can be used. Each model contains a Distiller schedule detailing how the model was compressed, a PyTorch checkpoint, text logs and TensorBoard logs. table, th, td { border: 1px solid black; } Paper Dataset Network Method & Granularity Schedule Features Learning both Weights and Connections for Efficient Neural Networks ImageNet Alexnet Element-wise pruning Iterative; Manual Magnitude thresholding based on a sensitivity quantifier. Element-wise sparsity sensitivity analysis To prune, or not to prune: exploring the efficacy of pruning for model compression ImageNet MobileNet Element-wise pruning Automated gradual; Iterative Magnitude thresholding based on target level Learning Structured Sparsity in Deep Neural Networks CIFAR10 ResNet20 Group regularization 1.Train with group-lasso 2.Remove zero groups and fine-tune Group Lasso regularization. Groups: kernels (2D), channels, filters (3D), layers (4D), vectors (rows, cols) Pruning Filters for Efficient ConvNets CIFAR10 ResNet56 Filter ranking; guided by sensitivity analysis 1.Rank filters 2. Remove filters and channels 3.Fine-tune One-shot ranking and pruning of filters; with network thinning","title":"Contents"},{"location":"model_zoo.html#learning-both-weights-and-connections-for-efficient-neural-networks","text":"This schedule is an example of \"Iterative Pruning\" for Alexnet/Imagent, as described in chapter 3 of Song Han's PhD dissertation: Efficient Methods and Hardware for Deep Learning and in his paper Learning both Weights and Connections for Efficient Neural Networks . The Distiller schedule uses SensitivityPruner which is similar to MagnitudeParameterPruner, but instead of specifying \"raw\" thresholds, it uses a \"sensitivity parameter\". Song Han's paper says that \"the pruning threshold is chosen as a quality parameter multiplied by the standard deviation of a layers weights,\" and this is not explained much further. In Distiller, the \"quality parameter\" is referred to as \"sensitivity\" and is based on the values learned from performing sensitivity analysis. Using a parameter that is related to the standard deviation is very helpful: under the assumption that the weights tensors are distributed normally, the standard deviation acts as a threshold normalizer. Note that Distiller's implementation deviates slightly from the algorithm Song Han describes in his PhD dissertation, in that the threshold value is set only once. In his PhD dissertation, Song Han describes a growing threshold, at each iteration. This requires n+1 hyper-parameters (n being the number of pruning iterations we use): the threshold and the threshold increase (delta) at each pruning iteration. Distiller's implementation takes advantage of the fact that as pruning progresses, more weights are pulled toward zero, and therefore the threshold \"traps\" more weights. Thus, we can use less hyper-parameters and achieve the same results. Distiller schedule: distiller/examples/sensitivity-pruning/alexnet.schedule_sensitivity.yaml Checkpoint file: alexnet.checkpoint.89.pth.tar","title":"Learning both Weights and Connections for Efficient Neural Networks"},{"location":"model_zoo.html#results","text":"Our reference is TorchVision's pretrained Alexnet model which has a Top1 accuracy of 56.55 and Top5=79.09. We prune away 88.44% of the parameters and achieve Top1=56.61 and Top5=79.45. Song Han prunes 89% of the parameters, which is slightly better than our results. Parameters: +----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ | | Name | Shape | NNZ (dense) | NNZ (sparse) | Cols (%) | Rows (%) | Ch (%) | 2D (%) | 3D (%) | Fine (%) | Std | Mean | Abs-Mean |----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------| | 0 | features.module.0.weight | (64, 3, 11, 11) | 23232 | 13411 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 42.27359 | 0.14391 | -0.00002 | 0.08805 | | 1 | features.module.3.weight | (192, 64, 5, 5) | 307200 | 115560 | 0.00000 | 0.00000 | 0.00000 | 1.91243 | 0.00000 | 62.38281 | 0.04703 | -0.00250 | 0.02289 | | 2 | features.module.6.weight | (384, 192, 3, 3) | 663552 | 256565 | 0.00000 | 0.00000 | 0.00000 | 6.18490 | 0.00000 | 61.33445 | 0.03354 | -0.00184 | 0.01803 | | 3 | features.module.8.weight | (256, 384, 3, 3) | 884736 | 315065 | 0.00000 | 0.00000 | 0.00000 | 6.96411 | 0.00000 | 64.38881 | 0.02646 | -0.00168 | 0.01422 | | 4 | features.module.10.weight | (256, 256, 3, 3) | 589824 | 186938 | 0.00000 | 0.00000 | 0.00000 | 15.49225 | 0.00000 | 68.30614 | 0.02714 | -0.00246 | 0.01409 | | 5 | classifier.1.weight | (4096, 9216) | 37748736 | 3398881 | 0.00000 | 0.21973 | 0.00000 | 0.21973 | 0.00000 | 90.99604 | 0.00589 | -0.00020 | 0.00168 | | 6 | classifier.4.weight | (4096, 4096) | 16777216 | 1782769 | 0.21973 | 3.46680 | 0.00000 | 3.46680 | 0.00000 | 89.37387 | 0.00849 | -0.00066 | 0.00263 | | 7 | classifier.6.weight | (1000, 4096) | 4096000 | 994738 | 3.36914 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 75.71440 | 0.01718 | 0.00030 | 0.00778 | | 8 | Total sparsity: | - | 61090496 | 7063928 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 88.43694 | 0.00000 | 0.00000 | 0.00000 | +----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ 2018-04-04 21:30:52,499 - Total sparsity: 88.44 2018-04-04 21:30:52,499 - --- validate (epoch=89)----------- 2018-04-04 21:30:52,499 - 128116 samples (256 per mini-batch) 2018-04-04 21:31:35,357 - ==> Top1: 51.838 Top5: 74.817 Loss: 2.150 2018-04-04 21:31:39,251 - --- test --------------------- 2018-04-04 21:31:39,252 - 50000 samples (256 per mini-batch) 2018-04-04 21:32:01,274 - ==> Top1: 56.606 Top5: 79.446 Loss: 1.893","title":"Results"},{"location":"model_zoo.html#to-prune-or-not-to-prune-exploring-the-efficacy-of-pruning-for-model-compression","text":"In their paper Zhu and Gupta, \"compare the accuracy of large, but pruned models (large-sparse) and their smaller, but dense (small-dense) counterparts with identical memory footprint.\" They also \"propose a new gradual pruning technique that is simple and straightforward to apply across a variety of models/datasets with minimal tuning.\" This pruning schedule is implemented by distiller.AutomatedGradualPruner, which increases the sparsity level (expressed as a percentage of zero-valued elements) gradually over several pruning steps. Distiller's implementation only prunes elements once in an epoch (the model is fine-tuned in between pruning events), which is a small deviation from Zhu and Gupta's paper. The research paper specifies the schedule in terms of mini-batches, while our implementation specifies the schedule in terms of epochs. We feel that using epochs performs well, and is more \"stable\", since the number of mini-batches will change, if you change the batch size. ImageNet files: Distiller schedule: distiller/examples/agp-pruning/mobilenet.imagenet.schedule_agp.yaml Checkpoint file: checkpoint.pth.tar ResNet18 files: Distiller schedule: distiller/examples/agp-pruning/resnet18.schedule_agp.yaml Checkpoint file: checkpoint.pth.tar","title":"To prune, or not to prune: exploring the efficacy of pruning for model compression"},{"location":"model_zoo.html#results_1","text":"As our baseline we used a pretrained PyTorch MobileNet model (width=1) which has Top1=68.848 and Top5=88.740. In their paper, Zhu and Gupta prune 50% of the elements of MobileNet (width=1) with a 1.1% drop in accuracy. We pruned about 51.6% of the elements, with virtually no change in the accuracies (Top1: 68.808 and Top5: 88.656). We didn't try to prune more than this, but we do note that the baseline accuracy that we used is almost 2% lower than the accuracy published in the paper. +----+--------------------------+--------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ | | Name | Shape | NNZ (dense) | NNZ (sparse) | Cols (%) | Rows (%) | Ch (%) | 2D (%) | 3D (%) | Fine (%) | Std | Mean | Abs-Mean | |----+--------------------------+--------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------| | 0 | module.model.0.0.weight | (32, 3, 3, 3) | 864 | 864 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.14466 | 0.00103 | 0.06508 | | 1 | module.model.1.0.weight | (32, 1, 3, 3) | 288 | 288 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.32146 | 0.01020 | 0.12932 | | 2 | module.model.1.3.weight | (64, 32, 1, 1) | 2048 | 2048 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.11942 | 0.00024 | 0.03627 | | 3 | module.model.2.0.weight | (64, 1, 3, 3) | 576 | 576 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.15809 | 0.00543 | 0.11513 | | 4 | module.model.2.3.weight | (128, 64, 1, 1) | 8192 | 8192 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.08442 | -0.00031 | 0.04182 | | 5 | module.model.3.0.weight | (128, 1, 3, 3) | 1152 | 1152 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.16780 | 0.00125 | 0.10545 | | 6 | module.model.3.3.weight | (128, 128, 1, 1) | 16384 | 16384 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.07126 | -0.00197 | 0.04123 | | 7 | module.model.4.0.weight | (128, 1, 3, 3) | 1152 | 1152 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.10182 | 0.00171 | 0.08719 | | 8 | module.model.4.3.weight | (256, 128, 1, 1) | 32768 | 13108 | 0.00000 | 0.00000 | 10.15625 | 59.99756 | 12.50000 | 59.99756 | 0.05543 | -0.00002 | 0.02760 | | 9 | module.model.5.0.weight | (256, 1, 3, 3) | 2304 | 2304 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.12516 | -0.00288 | 0.08058 | | 10 | module.model.5.3.weight | (256, 256, 1, 1) | 65536 | 26215 | 0.00000 | 0.00000 | 12.50000 | 59.99908 | 23.82812 | 59.99908 | 0.04453 | 0.00002 | 0.02271 | | 11 | module.model.6.0.weight | (256, 1, 3, 3) | 2304 | 2304 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.08024 | 0.00252 | 0.06377 | | 12 | module.model.6.3.weight | (512, 256, 1, 1) | 131072 | 52429 | 0.00000 | 0.00000 | 23.82812 | 59.99985 | 14.25781 | 59.99985 | 0.03561 | -0.00057 | 0.01779 | | 13 | module.model.7.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.11008 | -0.00018 | 0.06829 | | 14 | module.model.7.3.weight | (512, 512, 1, 1) | 262144 | 104858 | 0.00000 | 0.00000 | 14.25781 | 59.99985 | 21.28906 | 59.99985 | 0.02944 | -0.00060 | 0.01515 | | 15 | module.model.8.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.08258 | 0.00370 | 0.04905 | | 16 | module.model.8.3.weight | (512, 512, 1, 1) | 262144 | 104858 | 0.00000 | 0.00000 | 21.28906 | 59.99985 | 28.51562 | 59.99985 | 0.02865 | -0.00046 | 0.01465 | | 17 | module.model.9.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.07578 | 0.00468 | 0.04201 | | 18 | module.model.9.3.weight | (512, 512, 1, 1) | 262144 | 104858 | 0.00000 | 0.00000 | 28.51562 | 59.99985 | 23.43750 | 59.99985 | 0.02939 | -0.00044 | 0.01511 | | 19 | module.model.10.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.07091 | 0.00014 | 0.04306 | | 20 | module.model.10.3.weight | (512, 512, 1, 1) | 262144 | 104858 | 0.00000 | 0.00000 | 24.60938 | 59.99985 | 20.89844 | 59.99985 | 0.03095 | -0.00059 | 0.01672 | | 21 | module.model.11.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.05729 | -0.00518 | 0.04267 | | 22 | module.model.11.3.weight | (512, 512, 1, 1) | 262144 | 104858 | 0.00000 | 0.00000 | 20.89844 | 59.99985 | 17.57812 | 59.99985 | 0.03229 | -0.00044 | 0.01797 | | 23 | module.model.12.0.weight | (512, 1, 3, 3) | 4608 | 4608 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.04981 | -0.00136 | 0.03967 | | 24 | module.model.12.3.weight | (1024, 512, 1, 1) | 524288 | 209716 | 0.00000 | 0.00000 | 16.01562 | 59.99985 | 44.23828 | 59.99985 | 0.02514 | -0.00106 | 0.01278 | | 25 | module.model.13.0.weight | (1024, 1, 3, 3) | 9216 | 9216 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.02396 | -0.00949 | 0.01549 | | 26 | module.model.13.3.weight | (1024, 1024, 1, 1) | 1048576 | 419431 | 0.00000 | 0.00000 | 44.72656 | 59.99994 | 1.46484 | 59.99994 | 0.01801 | -0.00017 | 0.00931 | | 27 | module.fc.weight | (1000, 1024) | 1024000 | 409600 | 1.46484 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 60.00000 | 0.05078 | 0.00271 | 0.02734 | | 28 | Total sparsity: | - | 4209088 | 1726917 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 58.97171 | 0.00000 | 0.00000 | 0.00000 | +----+--------------------------+--------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ Total sparsity: 58.97 --- validate (epoch=199)----------- 128116 samples (256 per mini-batch) ==> Top1: 65.337 Top5: 84.984 Loss: 1.494 --- test --------------------- 50000 samples (256 per mini-batch) ==> Top1: 68.810 Top5: 88.626 Loss: 1.282","title":"Results"},{"location":"model_zoo.html#learning-structured-sparsity-in-deep-neural-networks","text":"This research paper from the University of Pittsburgh, \"proposes a Structured Sparsity Learning (SSL) method to regularize the structures (i.e., filters, channels, filter shapes, and layer depth) of DNNs. SSL can: (1) learn a compact structure from a bigger DNN to reduce computation cost; (2) obtain a hardware-friendly structured sparsity of DNN to efficiently accelerate the DNN\u2019s evaluation.\" Note that this paper does not use pruning, but instead uses group regularization during the training to force weights towards zero, as a group. We used a schedule which thresholds the regularized elements at a magnitude equal to the regularization strength. At the end of the regularization phase, we save the final sparsity masks generated by the regularization, and exit. Then we load this regularized model, remove the layers corresponding to the zeroed weight tensors (all of a layer's elements have a zero value).","title":"Learning Structured Sparsity in Deep Neural Networks"},{"location":"model_zoo.html#baseline-training","text":"We started by training the baseline ResNet20-Cifar dense network since we didn't have a pre-trained model. Distiller schedule: distiller/examples/ssl/resnet20_cifar_baseline_training.yaml Checkpoint files: distiller/examples/ssl/checkpoints/ $ time python3 compress_classifier.py --arch resnet20_cifar ../data.cifar10 -p=50 --lr=0.3 --epochs=180 --compress=../cifar10/resnet20/baseline_training.yaml -j=1 --deterministic","title":"Baseline training"},{"location":"model_zoo.html#regularization","text":"Then we started training from scratch again, but this time we used Group Lasso regularization on entire layers: Distiller schedule: distiller/examples/ssl/ssl_4D-removal_4L_training.yaml $ time python3 compress_classifier.py --arch resnet20_cifar ../data.cifar10 -p=50 --lr=0.4 --epochs=180 --compress=../ssl/ssl_4D-removal_training.yaml -j=1 --deterministic The diagram below shows the training of Resnet20/CIFAR10 using Group Lasso regularization on entire layers (in blue) vs. training Resnet20/CIFAR10 baseline (in red). You may notice several interesting things: 1. The LR-decay policy is the same, but the two sessions start with different initial LR values. 2. The data-loss of the regularized training follows the same shape as the un-regularized training (baseline), and eventually the two seem to merge. 3. We see similar behavior in the validation Top1 and Top5 accuracy results, but the regularized training eventually performs better. 4. In the top right corner we see the behavior of the regularization loss ( Reg Loss ), which actually increases for some time, until the data-loss has a sharp drop (after ~16K mini-batches), at which point the regularization loss also starts dropping. This regularization yields 5 layers with zeroed weight tensors. We load this model, remove the 5 layers, and start the fine tuning of the weights. This process of layer removal is specific to ResNet for CIFAR, which we altered by adding code to skip over layers during the forward path. When you export to ONNX, the removed layers do not participate in the forward path, so they don't get incarnated. We managed to remove 5 of the 16 3x3 convolution layers which dominate the computation time. It's not bad, but we probably could have done better.","title":"Regularization"},{"location":"model_zoo.html#fine-tuning","text":"During the fine-tuning process, because the removed layers do not participate in the forward path, they do not appear in the backward path and are not backpropogated: therefore they are completely disconnected from the network. We copy the checkpoint file of the regularized model to checkpoint_trained_4D_regularized_5Lremoved.pth.tar . Distiller schedule: distiller/examples/ssl/ssl_4D-removal_finetuning.yaml $ time python3 compress_classifier.py --arch resnet20_cifar ../data.cifar10 -p=50 --lr=0.1 --epochs=250 --resume=../cifar10/resnet20/checkpoint_trained_4D_regularized_5Lremoved.pth.tar --compress=../ssl/ssl_4D-removal_finetuning.yaml -j=1 --deterministic","title":"Fine-tuning"},{"location":"model_zoo.html#results_2","text":"Our baseline results for ResNet20 Cifar are: Top1=91.450 and Top5=99.750 We used Distiller's GroupLassoRegularizer to remove 5 layers from Resnet20 (CIFAR10) with no degradation of the accuracies. The regularized model exhibits really poor classification abilities: $ time python3 compress_classifier.py --arch resnet20_cifar ../data.cifar10 -p=50 --resume=../cifar10/resnet20/checkpoint_trained_4D_regularized_5Lremoved.pth.tar --evaluate => loading checkpoint ../cifar10/resnet20/checkpoint_trained_4D_regularized_5Lremoved.pth.tar best top@1: 90.620 Loaded compression schedule from checkpoint (epoch 179) Removing layer: module.layer1.0.conv1 [layer=0 block=0 conv=0] Removing layer: module.layer1.0.conv2 [layer=0 block=0 conv=1] Removing layer: module.layer1.1.conv1 [layer=0 block=1 conv=0] Removing layer: module.layer1.1.conv2 [layer=0 block=1 conv=1] Removing layer: module.layer2.2.conv2 [layer=1 block=2 conv=1] Files already downloaded and verified Files already downloaded and verified Dataset sizes: training=45000 validation=5000 test=10000 --- test --------------------- 10000 samples (256 per mini-batch) ==> Top1: 22.290 Top5: 68.940 Loss: 5.172 However, after fine-tuning, we recovered most of the accuracies loss, but not quite all of it: Top1=91.020 and Top5=99.670 We didn't spend time trying to wrestle with this network, and therefore didn't achieve SSL's published results (which showed that they managed to remove 6 layers and at the same time increase accuracies).","title":"Results"},{"location":"model_zoo.html#pruning-filters-for-efficient-convnets","text":"Quoting the authors directly: We present an acceleration method for CNNs, where we prune filters from CNNs that are identified as having a small effect on the output accuracy. By removing whole filters in the network together with their connecting feature maps, the computation costs are reduced significantly. In contrast to pruning weights, this approach does not result in sparse connectivity patterns. Hence, it does not need the support of sparse convolution libraries and can work with existing efficient BLAS libraries for dense matrix multiplications. The implementation of the research by Hao et al. required us to add filter-pruning sensitivity analysis, and support for \"network thinning\". After performing filter-pruning sensitivity analysis to assess which layers are more sensitive to the pruning of filters, we execute distiller.L1RankedStructureParameterPruner once in order to rank the filters of each layer by their L1-norm values, and then we prune the schedule-prescribed sparsity level. Distiller schedule: distiller/examples/pruning_filters_for_efficient_convnets/resnet56_cifar_filter_rank.yaml Checkpoint files: checkpoint_finetuned.pth.tar The excerpt from the schedule, displayed below, shows how we declare the L1RankedStructureParameterPruner. This class currently ranks filters only, but because in the future this class may support ranking of various structures, you need to specify for each parameter both the target sparsity level, and the structure type ('3D' is filter-wise pruning). pruners: filter_pruner: class: 'L1RankedStructureParameterPruner' reg_regims: 'module.layer1.0.conv1.weight': [0.6, '3D'] 'module.layer1.1.conv1.weight': [0.6, '3D'] 'module.layer1.2.conv1.weight': [0.6, '3D'] 'module.layer1.3.conv1.weight': [0.6, '3D'] In the policy, we specify that we want to invoke this pruner once, at epoch 180. Because we are starting from a network which was trained for 180 epochs (see Baseline training below), the filter ranking is performed right at the outset of this schedule. policies: - pruner: instance_name: filter_pruner epochs: [180] Following the pruning, we want to \"physically\" remove the pruned filters from the network, which involves reconfiguring the Convolutional layers and the parameter tensors. When we remove filters from Convolution layer n we need to perform several changes to the network: 1. Shrink layer n 's weights tensor, leaving only the \"important\" filters. 2. Configure layer n 's .out_channels member to its new, smaller, value. 3. If a BN layer follows layer n , then it also needs to be reconfigured and its scale and shift parameter vectors need to be shrunk. 4. If a Convolution layer follows the BN layer, then it will have less input channels which requires reconfiguration and shrinking of its weights. All of this is performed by distiller.ResnetCifarFilterRemover which is also scheduled at epoch 180. We call this process \"network thinning\". extensions: net_thinner: class: 'FilterRemover' thinning_func_str: remove_filters arch: 'resnet56_cifar' dataset: 'cifar10' Network thinning requires us to understand the layer connectivity and data-dependency of the DNN, and we are working on a robust method to perform this. On networks with topologies similar to ResNet (residuals) and GoogLeNet (inception), which have several inputs and outputs to/from Convolution layers, there is extra details to consider. Our current implementation is specific to certain layers in ResNet and is a bit fragile. We will continue to improve and generalize this.","title":"Pruning Filters for Efficient ConvNets"},{"location":"model_zoo.html#baseline-training_1","text":"We started by training the baseline ResNet56-Cifar dense network (180 epochs) since we didn't have a pre-trained model. Distiller schedule: distiller/examples/pruning_filters_for_efficient_convnets/resnet56_cifar_baseline_training.yaml Checkpoint files: checkpoint.resnet56_cifar_baseline.pth.tar","title":"Baseline training"},{"location":"model_zoo.html#results_3","text":"We trained a ResNet56-Cifar10 network and achieve accuracy results which are on-par with published results: Top1: 92.970 and Top5: 99.740. We used Hao et al.'s algorithm to remove 37.3% of the original convolution MACs, while maintaining virtually the same accuracy as the baseline: Top1: 92.830 and Top5: 99.760","title":"Results"},{"location":"pruning.html","text":"Pruning A common methodology for inducing sparsity in weights and activations is called pruning . Pruning is the application of a binary criteria to decide which weights to prune: weights which match the pruning criteria are assigned a value of zero. Pruned elements are \"trimmed\" from the model: we zero their values and also make sure they don't take part in the back-propagation process. We can prune weights, biases, and activations. Biases are few and their contribution to a layer's output is relatively large, so there is little incentive to prune them. We usually see sparse activations following a ReLU layer, because ReLU quenches negative activations to exact zero (\\(ReLU(x): max(0,x)\\)). Sparsity in weights is less common, as weights tend to be very small, but are often not exact zeros. Let's define sparsity Sparsity is a a measure of how many elements in a tensor are exact zeros, relative to the tensor size. A tensor is considered sparse if \"most\" of its elements are zero. How much is \"most\", is not strictly defined, but when you see a sparse tensor you know it ;-) The \\(l_0\\)-\"norm\" function measures how many zero-elements are in a tensor x : \\[\\lVert x \\rVert_0\\;=\\;|x_1|^0 + |x_2|^0 + ... + |x_n|^0 \\] In other words, an element contributes either a value of 1 or 0 to \\(l_0\\). Anything but an exact zero contributes a value of 1 - that's pretty cool. Sometimes it helps to think about density, the number of non-zero elements (NNZ) and sparsity's complement: \\[ density = 1 - sparsity \\] You can use distiller.sparsity and distiller.density to query a PyTorch tensor's sparsity and density. What is weights pruning? Weights pruning, or model pruning, is a set of methods to increase the sparsity (amount of zero-valued elements in a tensor) of a network's weights. In general, the term 'parameters' refers to both weights and bias tensors of a model. Biases are rarely, if ever, pruned because there are very few bias elements compared to weights elements, and it is just not worth the trouble. Pruning requires a criteria for choosing which elements to prune - this is called the pruning criteria . The most common pruning criteria is the absolute value of each element: the element's absolute value is compared to some threshold value, and if it is below the threshold the element is set to zero (i.e. pruned) . This is implemented by the distiller.MagnitudeParameterPruner class. The idea behind this method, is that weights with small \\(l_1\\)-norms (absolute value) contribute little to the final result (low saliency), so they are less important and can be removed. A related idea motivating pruning, is that models are over-parametrized and contain redundant logic and features. Therefore, some of these redundancies can be removed by setting their weights to zero. And yet another way to think of pruning is to phrase it as a search for a set of weights with as many zeros as possible, which still produces acceptable inference accuracies compared to the dense-model (non-pruned model). Another way to look at it, is to imagine that because of the very high-dimensionality of the parameter space, the immediate space around the dense-model's solution likely contains some sparse solutions, and we want to use find these sparse solutions. Pruning schedule The most straight-forward to prune is to take a trained model and prune it once; also called one-shot pruning . In Learning both Weights and Connections for Efficient Neural Networks Song Han et. al show that this is surprisingly effective, but also leaves a lot of potential sparsity untapped. The surprise is what they call the \"free lunch\" effect: \"reducing 2x the connections without losing accuracy even without retraining.\" However, they also note that when employing a pruning-followed-by-retraining regimen, they can achieve much better results (higher sparsity at no accuracy loss). This is called iterative pruning , and the retraining that follows pruning is often referred to as fine-tuning . How the pruning criteria changes between iterations, how many iterations we perform and how often, and which tensors are pruned - this is collectively called the pruning schedule . We can think of iterative pruning as repeatedly learning which weights are important, removing the least important ones based on some importance criteria, and then retraining the model to let it \"recover\" from the pruning by adjusting the remaining weights. At each iteration, we prune more weights. The decision of when to stop pruning is also expressed in the schedule, and it depends on the pruning algorithm. For example, if we are trying to achieve a specific sparsity level, then we stop when the pruning achieves that level. And if we are pruning weights structures in order to reduce the required compute budget, then we stop the pruning when this compute reduction is achieved. Distiller supports expressing the pruning schedule as a YAML file (which is then executed by an instance of a PruningScheduler). Pruning granularity Pruning individual weight elements is called element-wise pruning , and it is also sometimes referred to as fine-grained pruning. Coarse-grained pruning - also referred to as structured pruning , group pruning , or block pruning - is pruning entire groups of elements which have some significance. Groups come in various shapes and sizes, but an easy to visualize group-pruning is filter-pruning, in which entire filters are removed. Sensitivity analysis The hard part about inducing sparsity via pruning is determining what threshold, or sparsity level, to use for each layer's tensors. Sensitivity analysis is a method that tries to help us rank the tensors by their sensitivity to pruning. The idea is to set the pruning level (percentage) of a specific layer, and then to prune once, run an evaluation on the test dataset and record the accuracy score. We do this for all of the parameterized layers, and for each layer we examine several sparsity levels. This should teach us about the \"sensitivity\" of each of the layers to pruning. The evaluated model should be trained to maximum accuracy before running the analysis, because we aim to understand the behavior of the trained model's performance in relation to pruning of a specific weights tensor. Much as we can prune structures, we can also perform sensitivity analysis on structures. Distiller implements element-wise pruning sensitivity analysis using the \\(l_1\\)-norm of individual elements; and filter-wise pruning sensitivity analysis using the mean \\(l_1\\)-norm of filters. The authors of Pruning Filters for Efficient ConvNets describe how they do sensitivity analysis: \"To understand the sensitivity of each layer, we prune each layer independently and evaluate the resulting pruned network\u2019s accuracy on the validation set. Figure 2(b) shows that layers that maintain their accuracy as filters are pruned away correspond to layers with larger slopes in Figure 2(a). On the contrary, layers with relatively flat slopes are more sensitive to pruning. We empirically determine the number of filters to prune for each layer based on their sensitivity to pruning. For deep networks such as VGG-16 or ResNets, we observe that layers in the same stage (with the same feature map size) have a similar sensitivity to pruning. To avoid introducing layer-wise meta-parameters, we use the same pruning ratio for all layers in the same stage. For layers that are sensitive to pruning, we prune a smaller percentage of these layers or completely skip pruning them.\" The diagram below shows the results of running an element-wise sensitivity analysis on Alexnet, using Distillers's perform_sensitivity_analysis utility function. As reported by Song Han, and exhibited in the diagram, in Alexnet the feature detecting layers (convolution layers) are more sensitive to pruning, and their sensitivity drops, the deeper they are. The fully-connected layers are much less sensitive, which is great, because that's where most of the parameters are. References Song Han, Jeff Pool, John Tran, William J. Dally . Learning both Weights and Connections for Efficient Neural Networks , arXiv:1607.04381v2, 2015. Hao Li, Asim Kadav, Igor Durdanovic, Hanan Samet, Hans Peter Graf . Pruning Filters for Efficient ConvNets , arXiv:1608.08710v3, 2017.","title":"Pruning"},{"location":"pruning.html#pruning","text":"A common methodology for inducing sparsity in weights and activations is called pruning . Pruning is the application of a binary criteria to decide which weights to prune: weights which match the pruning criteria are assigned a value of zero. Pruned elements are \"trimmed\" from the model: we zero their values and also make sure they don't take part in the back-propagation process. We can prune weights, biases, and activations. Biases are few and their contribution to a layer's output is relatively large, so there is little incentive to prune them. We usually see sparse activations following a ReLU layer, because ReLU quenches negative activations to exact zero (\\(ReLU(x): max(0,x)\\)). Sparsity in weights is less common, as weights tend to be very small, but are often not exact zeros.","title":"Pruning"},{"location":"pruning.html#lets-define-sparsity","text":"Sparsity is a a measure of how many elements in a tensor are exact zeros, relative to the tensor size. A tensor is considered sparse if \"most\" of its elements are zero. How much is \"most\", is not strictly defined, but when you see a sparse tensor you know it ;-) The \\(l_0\\)-\"norm\" function measures how many zero-elements are in a tensor x : \\[\\lVert x \\rVert_0\\;=\\;|x_1|^0 + |x_2|^0 + ... + |x_n|^0 \\] In other words, an element contributes either a value of 1 or 0 to \\(l_0\\). Anything but an exact zero contributes a value of 1 - that's pretty cool. Sometimes it helps to think about density, the number of non-zero elements (NNZ) and sparsity's complement: \\[ density = 1 - sparsity \\] You can use distiller.sparsity and distiller.density to query a PyTorch tensor's sparsity and density.","title":"Let's define sparsity"},{"location":"pruning.html#what-is-weights-pruning","text":"Weights pruning, or model pruning, is a set of methods to increase the sparsity (amount of zero-valued elements in a tensor) of a network's weights. In general, the term 'parameters' refers to both weights and bias tensors of a model. Biases are rarely, if ever, pruned because there are very few bias elements compared to weights elements, and it is just not worth the trouble. Pruning requires a criteria for choosing which elements to prune - this is called the pruning criteria . The most common pruning criteria is the absolute value of each element: the element's absolute value is compared to some threshold value, and if it is below the threshold the element is set to zero (i.e. pruned) . This is implemented by the distiller.MagnitudeParameterPruner class. The idea behind this method, is that weights with small \\(l_1\\)-norms (absolute value) contribute little to the final result (low saliency), so they are less important and can be removed. A related idea motivating pruning, is that models are over-parametrized and contain redundant logic and features. Therefore, some of these redundancies can be removed by setting their weights to zero. And yet another way to think of pruning is to phrase it as a search for a set of weights with as many zeros as possible, which still produces acceptable inference accuracies compared to the dense-model (non-pruned model). Another way to look at it, is to imagine that because of the very high-dimensionality of the parameter space, the immediate space around the dense-model's solution likely contains some sparse solutions, and we want to use find these sparse solutions.","title":"What is weights pruning?"},{"location":"pruning.html#pruning-schedule","text":"The most straight-forward to prune is to take a trained model and prune it once; also called one-shot pruning . In Learning both Weights and Connections for Efficient Neural Networks Song Han et. al show that this is surprisingly effective, but also leaves a lot of potential sparsity untapped. The surprise is what they call the \"free lunch\" effect: \"reducing 2x the connections without losing accuracy even without retraining.\" However, they also note that when employing a pruning-followed-by-retraining regimen, they can achieve much better results (higher sparsity at no accuracy loss). This is called iterative pruning , and the retraining that follows pruning is often referred to as fine-tuning . How the pruning criteria changes between iterations, how many iterations we perform and how often, and which tensors are pruned - this is collectively called the pruning schedule . We can think of iterative pruning as repeatedly learning which weights are important, removing the least important ones based on some importance criteria, and then retraining the model to let it \"recover\" from the pruning by adjusting the remaining weights. At each iteration, we prune more weights. The decision of when to stop pruning is also expressed in the schedule, and it depends on the pruning algorithm. For example, if we are trying to achieve a specific sparsity level, then we stop when the pruning achieves that level. And if we are pruning weights structures in order to reduce the required compute budget, then we stop the pruning when this compute reduction is achieved. Distiller supports expressing the pruning schedule as a YAML file (which is then executed by an instance of a PruningScheduler).","title":"Pruning schedule"},{"location":"pruning.html#pruning-granularity","text":"Pruning individual weight elements is called element-wise pruning , and it is also sometimes referred to as fine-grained pruning. Coarse-grained pruning - also referred to as structured pruning , group pruning , or block pruning - is pruning entire groups of elements which have some significance. Groups come in various shapes and sizes, but an easy to visualize group-pruning is filter-pruning, in which entire filters are removed.","title":"Pruning granularity"},{"location":"pruning.html#sensitivity-analysis","text":"The hard part about inducing sparsity via pruning is determining what threshold, or sparsity level, to use for each layer's tensors. Sensitivity analysis is a method that tries to help us rank the tensors by their sensitivity to pruning. The idea is to set the pruning level (percentage) of a specific layer, and then to prune once, run an evaluation on the test dataset and record the accuracy score. We do this for all of the parameterized layers, and for each layer we examine several sparsity levels. This should teach us about the \"sensitivity\" of each of the layers to pruning. The evaluated model should be trained to maximum accuracy before running the analysis, because we aim to understand the behavior of the trained model's performance in relation to pruning of a specific weights tensor. Much as we can prune structures, we can also perform sensitivity analysis on structures. Distiller implements element-wise pruning sensitivity analysis using the \\(l_1\\)-norm of individual elements; and filter-wise pruning sensitivity analysis using the mean \\(l_1\\)-norm of filters. The authors of Pruning Filters for Efficient ConvNets describe how they do sensitivity analysis: \"To understand the sensitivity of each layer, we prune each layer independently and evaluate the resulting pruned network\u2019s accuracy on the validation set. Figure 2(b) shows that layers that maintain their accuracy as filters are pruned away correspond to layers with larger slopes in Figure 2(a). On the contrary, layers with relatively flat slopes are more sensitive to pruning. We empirically determine the number of filters to prune for each layer based on their sensitivity to pruning. For deep networks such as VGG-16 or ResNets, we observe that layers in the same stage (with the same feature map size) have a similar sensitivity to pruning. To avoid introducing layer-wise meta-parameters, we use the same pruning ratio for all layers in the same stage. For layers that are sensitive to pruning, we prune a smaller percentage of these layers or completely skip pruning them.\" The diagram below shows the results of running an element-wise sensitivity analysis on Alexnet, using Distillers's perform_sensitivity_analysis utility function. As reported by Song Han, and exhibited in the diagram, in Alexnet the feature detecting layers (convolution layers) are more sensitive to pruning, and their sensitivity drops, the deeper they are. The fully-connected layers are much less sensitive, which is great, because that's where most of the parameters are.","title":"Sensitivity analysis"},{"location":"pruning.html#references","text":"Song Han, Jeff Pool, John Tran, William J. Dally . Learning both Weights and Connections for Efficient Neural Networks , arXiv:1607.04381v2, 2015. Hao Li, Asim Kadav, Igor Durdanovic, Hanan Samet, Hans Peter Graf . Pruning Filters for Efficient ConvNets , arXiv:1608.08710v3, 2017.","title":"References"},{"location":"quantization.html","text":"Quantization Quantization refers to the process of reducing the number of bits that represent a number. In the context of deep learning, the predominant numerical format used for research and for deployment has so far been 32-bit floating point, or FP32. However, the desire for reduced bandwidth and compute requirements of deep learning models has driven research into using lower-precision numerical formats. It has been extensively demonstrated that weights and activations can be represented using 8-bit integers (or INT8) without incurring significant loss in accuracy. The use of even lower bit-widths, such as 4/2/1-bits, is an active field of research that has also shown great progress. Note that this discussion is on quantization only in the context of more efficient inference. Using lower-precision numerics for more efficient training is currently out of scope. Motivation: Overall Efficiency The more obvious benefit from quantization is significantly reduced bandwidth and storage . For instance, using INT8 for weights and activations consumes 4x less overall bandwidth compared to FP32. Additionally integer compute is faster than floating point compute. It is also much more area and energy efficient : INT8 Operation Energy Saving vs FP32 Area Saving vs FP32 Add 30x 116x Multiply 18.5x 27x ( Dally, 2015 ) Note that very aggressive quantization can yield even more efficiency. If weights are binary (-1, 1) or ternary (-1, 0, 1 using 2-bits), then convolution and fully-connected layers can be computed with additions and subtractions only, removing multiplications completely. If activations are binary as well, then additions can also be removed, in favor of bitwise operations ( Rastegari et al., 2016 ). Integer vs. FP32 There are two main attributes when discussing a numerical format. The first is dynamic range , which refers to the range of representable numbers. The second one is how many values can be represented within the dynamic range, which in turn determines the precision / resolution of the format (the distance between two numbers). For all integer formats, the dynamic range is [-2^{n-1} .. 2^{n-1}-1] , where n is the number of bits. So for INT8 the range is [-128 .. 127] , and for INT4 it is [-8 .. 7] (we're limiting ourselves to signed integers for now). The number of representable values is 2^n . Contrast that with FP32, where the dynamic range is \\pm 3.4\\ x\\ 10^{38} , and approximately 4.2\\ x\\ 10^9 values can be represented. We can immediately see that FP32 is much more versatile , in that it is able to represent a wide range of distributions accurately. This is a nice property for deep learning models, where the distributions of weights and activations are usually very different (at least in dynamic range). In addition the dynamic range can differ between layers in the model. In order to be able to represent these different distributions with an integer format, a scale factor is used to map the dynamic range of the tensor to the integer format range. But still we remain with the issue of having a significantly lower number of representable values, that is - much lower resolution. Note that this scale factor is, in most cases, a floating-point number. Hence, even when using integer numerics, some floating-point computations remain. Courbariaux et al., 2014 scale using only shifts, eliminating the floating point operation. In GEMMLWOP , the FP32 scale factor is approximated using an integer or fixed-point multiplication followed by a shift operation. In many cases the effect of this approximation on accuracy is negligible. Avoiding Overflows Convolution and fully connected layers involve the storing of intermediate results in accumulators. Due to the limited dynamic range of integer formats, if we would use the same bit-width for the weights and activation, and for the accumulators, we would likely overflow very quickly. Therefore, accumulators are usually implemented with higher bit-widths. The result of multiplying two n -bit integers is, at most, a 2n -bit number. In convolution layers, such multiplications are accumulated c\\cdot k^2 times, where c is the number of input channels and k is the kernel width (assuming a square kernel). Hence, to avoid overflowing, the accumulator should be 2n + M -bits wide, where M is at least log_2(c\\cdot k^2) . In many cases 32-bit accumulators are used, however for INT4 and lower it might be possible to use less than 32 -bits, depending on the expected use cases and layer widths. \"Conservative\" Quantization: INT8 In many cases, taking a model trained for FP32 and directly quantizing it to INT8, without any re-training, can result in a relatively low loss of accuracy (which may or may not be acceptable, depending on the use case). Some fine-tuning can further improve the accuracy ( Gysel at al., 2018 ). As mentioned above, a scale factor is used to adapt the dynamic range of the tensor at hand to that of the integer format. This scale factor needs to be calculated per-layer per-tensor. The simplest way is to map the min/max values of the float tensor to the min/max of the integer format. For weights and biases this is easy, as they are set once training is complete. For activations, the min/max float values can be obtained \"online\" during inference, or \"offline\". Offline means gathering activations statistics before deploying the model, either during training or by running a few \"calibration\" batches on the trained FP32 model. Based on these gathered statistics, the scaled factors are calculated and are fixed once the model is deployed. This method has the risk of encountering values outside the previously observed ranges at runtime. These values will be clipped, which might lead to accuracy degradation. Online means calculating the min/max values for each tensor dynamically during runtime. In this method clipping cannot occur, however the added computation resources required to calculate the min/max values at runtime might be prohibitive. It is important to note, however, that the full float range of an activations tensor usually includes elements which are statistically outliers. These values can be discarded by using a narrower min/max range, effectively allowing some clipping to occur in favor of increasing the resolution provided to the part of the distribution containing most of the information. A simple method which can yield nice results is to simply use an average of the observed min/max values instead of the actual values. Alternatively, statistical measures can be used to intelligently select where to clip the original range in order to preserve as much information as possible ( Migacz, 2017 ). Going further, Banner et al., 2018 have proposed a method for analytically computing the clipping value under certain conditions. Another possible optimization point is scale-factor scope . The most common way is use a single scale-factor per-layer, but it is also possible to calculate a scale-factor per-channel. This can be beneficial if the weight distributions vary greatly between channels. When used to directly quantize a model without re-training, as described so far, this method is commonly referred to as post-training quantization . However, recent publications have shown that there are cases where post-training quantization to INT8 doesn't preserve accuracy ( Benoit et al., 2018 , Krishnamoorthi, 2018 ). Namely, smaller models such as MobileNet seem to not respond as well to post-training quantization, presumabley due to their smaller representational capacity. In such cases, quantization-aware training is used. \"Aggressive\" Quantization: INT4 and Lower Naively quantizing a FP32 model to INT4 and lower usually incurs significant accuracy degradation. Many works have tried to mitigate this effect. They usually employ one or more of the following concepts in order to improve model accuracy: Training / Re-Training : For INT4 and lower, training is required in order to obtain reasonable accuracy. The training loop is modified to take quantization into account. See details in the next section . Zhou S et al., 2016 have shown that bootstrapping the quantized model with trained FP32 weights leads to higher accuracy, as opposed to training from scratch. Other methods require a trained FP32 model, either as a starting point ( Zhou A et al., 2017 ), or as a teacher network in a knowledge distillation training setup (see here ). Replacing the activation function : The most common activation function in vision models is ReLU, which is unbounded. That is - its dynamic range is not limited for positive inputs. This is very problematic for INT4 and below due to the very limited range and resolution. Therefore, most methods replace ReLU with another function which is bounded. In some cases a clipping function with hard coded values is used ( Zhou S et al., 2016 , Mishra et al., 2018 ). Another method learns the clipping value per layer, with better results ( Choi et al., 2018 ). Once the clipping value is set, the scale factor used for quantization is also set, and no further calibration steps are required (as opposed to INT8 methods described above). Modifying network structure : Mishra et al., 2018 try to compensate for the loss of information due to quantization by using wider layers (more channels). Lin et al., 2017 proposed a binary quantization method in which a single FP32 convolution is replaced with multiple binary convolutions, each scaled to represent a different \"base\", covering a larger dynamic range overall. First and last layer : Many methods do not quantize the first and last layer of the model. It has been observed by Han et al., 2015 that the first convolutional layer is more sensitive to weights pruning, and some quantization works cite the same reason and show it empirically ( Zhou S et al., 2016 , Choi et al., 2018 ). Some works also note that these layers usually constitute a very small portion of the overall computation within the model, further reducing the motivation to quantize them ( Rastegari et al., 2016 ). Most methods keep the first and last layers at FP32. However, Choi et al., 2018 showed that \"conservative\" quantization of these layers, e.g. to INT8, does not reduce accuracy. Iterative quantization : Most methods quantize the entire model at once. Zhou A et al., 2017 employ an iterative method, which starts with a trained FP32 baseline, and quantizes only a portion of the model at the time followed by several epochs of re-training to recover the accuracy loss from quantization. Mixed Weights and Activations Precision : It has been observed that activations are more sensitive to quantization than weights ( Zhou S et al., 2016 ). Hence it is not uncommon to see experiments with activations quantized to a higher precision compared to weights. Some works have focused solely on quantizing weights, keeping the activations at FP32 ( Li et al., 2016 , Zhu et al., 2016 ). Quantization-Aware Training As mentioned above, in order to minimize the loss of accuracy from \"aggressive\" quantization, many methods that target INT4 and lower (and in some cases for INT8 as well) involve training the model in a way that considers the quantization. This means training with quantization of weights and activations \"baked\" into the training procedure. The training graph usually looks like this: A full precision copy of the weights is maintained throughout the training process (\"weights_fp\" in the diagram). Its purpose is to accumulate the small changes from the gradients without loss of precision (Note that the quantization of the weights is an integral part of the training graph, meaning that we back-propagate through it as well). Once the model is trained, only the quantized weights are used for inference. In the diagram we show \"layer N\" as the conv + batch-norm + activation combination, but the same applies to fully-connected layers, element-wise operations, etc. During training, the operations within \"layer N\" can still run in full precision, with the \"quantize\" operations in the boundaries ensuring discrete-valued weights and activations. This is sometimes called \"simulated quantization\". Straight-Through Estimator An important question in this context is how to back-propagate through the quantization functions. These functions are discrete-valued, hence their derivative is 0 almost everywhere. So, using their gradients as-is would severely hinder the learning process. An approximation commonly used to overcome this issue is the \"straight-through estimator\" (STE) ( Hinton et al., 2012 , Bengio, 2013 ), which simply passes the gradient through these functions as-is. References William Dally . High-Performance Hardware for Machine Learning. Tutorial, NIPS, 2015 Mohammad Rastegari, Vicente Ordone, Joseph Redmon and Ali Farhadi . XNOR-Net: ImageNet Classification Using Binary Convolutional Neural Networks. ECCV, 2016 Matthieu Courbariaux, Yoshua Bengio and Jean-Pierre David . Training deep neural networks with low precision multiplications. arxiv:1412.7024 Philipp Gysel, Jon Pimentel, Mohammad Motamedi and Soheil Ghiasi . Ristretto: A Framework for Empirical Study of Resource-Efficient Inference in Convolutional Neural Networks. IEEE Transactions on Neural Networks and Learning Systems, 2018 Szymon Migacz . 8-bit Inference with TensorRT. GTC San Jose, 2017 Shuchang Zhou, Zekun Ni, Xinyu Zhou, He Wen, Yuxin Wu and Yuheng Zou . DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low Bitwidth Gradients. arxiv:1606.06160 Aojun Zhou, Anbang Yao, Yiwen Guo, Lin Xu and Yurong Chen . Incremental Network Quantization: Towards Lossless CNNs with Low-precision Weights. ICLR, 2017 Asit Mishra, Eriko Nurvitadhi, Jeffrey J Cook and Debbie Marr . WRPN: Wide Reduced-Precision Networks. ICLR, 2018 Jungwook Choi, Zhuo Wang, Swagath Venkataramani, Pierce I-Jen Chuang, Vijayalakshmi Srinivasan and Kailash Gopalakrishnan . PACT: Parameterized Clipping Activation for Quantized Neural Networks. arxiv:1805.06085 Xiaofan Lin, Cong Zhao and Wei Pan . Towards Accurate Binary Convolutional Neural Network. NIPS, 2017 Song Han, Jeff Pool, John Tran and William Dally . Learning both Weights and Connections for Efficient Neural Network. NIPS, 2015 Fengfu Li, Bo Zhang and Bin Liu . Ternary Weight Networks. arxiv:1605.04711 Chenzhuo Zhu, Song Han, Huizi Mao and William J. Dally . Trained Ternary Quantization. arxiv:1612.01064 Yoshua Bengio, Nicholas Leonard and Aaron Courville . Estimating or Propagating Gradients Through Stochastic Neurons for Conditional Computation. arxiv:1308.3432 Geoffrey Hinton, Nitish Srivastava, Kevin Swersky, Tijmen Tieleman and Abdelrahman Mohamed . Neural Networks for Machine Learning. Coursera, video lectures, 2012 Benoit Jacob, Skirmantas Kligys, Bo Chen, Menglong Zhu, Matthew Tang, Andrew Howard, Hartwig Adam and Dmitry Kalenichenko . Quantization and Training of Neural Networks for Efficient Integer-Arithmetic-Only Inference. ECCV, 2018 Raghuraman Krishnamoorthi . Quantizing deep convolutional networks for efficient inference: A whitepaper arxiv:1806.08342 Ron Banner, Yury Nahshan, Elad Hoffer and Daniel Soudry . ACIQ: Analytical Clipping for Integer Quantization of neural networks arxiv:1810.05723","title":"Quantization"},{"location":"quantization.html#quantization","text":"Quantization refers to the process of reducing the number of bits that represent a number. In the context of deep learning, the predominant numerical format used for research and for deployment has so far been 32-bit floating point, or FP32. However, the desire for reduced bandwidth and compute requirements of deep learning models has driven research into using lower-precision numerical formats. It has been extensively demonstrated that weights and activations can be represented using 8-bit integers (or INT8) without incurring significant loss in accuracy. The use of even lower bit-widths, such as 4/2/1-bits, is an active field of research that has also shown great progress. Note that this discussion is on quantization only in the context of more efficient inference. Using lower-precision numerics for more efficient training is currently out of scope.","title":"Quantization"},{"location":"quantization.html#motivation-overall-efficiency","text":"The more obvious benefit from quantization is significantly reduced bandwidth and storage . For instance, using INT8 for weights and activations consumes 4x less overall bandwidth compared to FP32. Additionally integer compute is faster than floating point compute. It is also much more area and energy efficient : INT8 Operation Energy Saving vs FP32 Area Saving vs FP32 Add 30x 116x Multiply 18.5x 27x ( Dally, 2015 ) Note that very aggressive quantization can yield even more efficiency. If weights are binary (-1, 1) or ternary (-1, 0, 1 using 2-bits), then convolution and fully-connected layers can be computed with additions and subtractions only, removing multiplications completely. If activations are binary as well, then additions can also be removed, in favor of bitwise operations ( Rastegari et al., 2016 ).","title":"Motivation: Overall Efficiency"},{"location":"quantization.html#integer-vs-fp32","text":"There are two main attributes when discussing a numerical format. The first is dynamic range , which refers to the range of representable numbers. The second one is how many values can be represented within the dynamic range, which in turn determines the precision / resolution of the format (the distance between two numbers). For all integer formats, the dynamic range is [-2^{n-1} .. 2^{n-1}-1] , where n is the number of bits. So for INT8 the range is [-128 .. 127] , and for INT4 it is [-8 .. 7] (we're limiting ourselves to signed integers for now). The number of representable values is 2^n . Contrast that with FP32, where the dynamic range is \\pm 3.4\\ x\\ 10^{38} , and approximately 4.2\\ x\\ 10^9 values can be represented. We can immediately see that FP32 is much more versatile , in that it is able to represent a wide range of distributions accurately. This is a nice property for deep learning models, where the distributions of weights and activations are usually very different (at least in dynamic range). In addition the dynamic range can differ between layers in the model. In order to be able to represent these different distributions with an integer format, a scale factor is used to map the dynamic range of the tensor to the integer format range. But still we remain with the issue of having a significantly lower number of representable values, that is - much lower resolution. Note that this scale factor is, in most cases, a floating-point number. Hence, even when using integer numerics, some floating-point computations remain. Courbariaux et al., 2014 scale using only shifts, eliminating the floating point operation. In GEMMLWOP , the FP32 scale factor is approximated using an integer or fixed-point multiplication followed by a shift operation. In many cases the effect of this approximation on accuracy is negligible.","title":"Integer vs. FP32"},{"location":"quantization.html#avoiding-overflows","text":"Convolution and fully connected layers involve the storing of intermediate results in accumulators. Due to the limited dynamic range of integer formats, if we would use the same bit-width for the weights and activation, and for the accumulators, we would likely overflow very quickly. Therefore, accumulators are usually implemented with higher bit-widths. The result of multiplying two n -bit integers is, at most, a 2n -bit number. In convolution layers, such multiplications are accumulated c\\cdot k^2 times, where c is the number of input channels and k is the kernel width (assuming a square kernel). Hence, to avoid overflowing, the accumulator should be 2n + M -bits wide, where M is at least log_2(c\\cdot k^2) . In many cases 32-bit accumulators are used, however for INT4 and lower it might be possible to use less than 32 -bits, depending on the expected use cases and layer widths.","title":"Avoiding Overflows"},{"location":"quantization.html#conservative-quantization-int8","text":"In many cases, taking a model trained for FP32 and directly quantizing it to INT8, without any re-training, can result in a relatively low loss of accuracy (which may or may not be acceptable, depending on the use case). Some fine-tuning can further improve the accuracy ( Gysel at al., 2018 ). As mentioned above, a scale factor is used to adapt the dynamic range of the tensor at hand to that of the integer format. This scale factor needs to be calculated per-layer per-tensor. The simplest way is to map the min/max values of the float tensor to the min/max of the integer format. For weights and biases this is easy, as they are set once training is complete. For activations, the min/max float values can be obtained \"online\" during inference, or \"offline\". Offline means gathering activations statistics before deploying the model, either during training or by running a few \"calibration\" batches on the trained FP32 model. Based on these gathered statistics, the scaled factors are calculated and are fixed once the model is deployed. This method has the risk of encountering values outside the previously observed ranges at runtime. These values will be clipped, which might lead to accuracy degradation. Online means calculating the min/max values for each tensor dynamically during runtime. In this method clipping cannot occur, however the added computation resources required to calculate the min/max values at runtime might be prohibitive. It is important to note, however, that the full float range of an activations tensor usually includes elements which are statistically outliers. These values can be discarded by using a narrower min/max range, effectively allowing some clipping to occur in favor of increasing the resolution provided to the part of the distribution containing most of the information. A simple method which can yield nice results is to simply use an average of the observed min/max values instead of the actual values. Alternatively, statistical measures can be used to intelligently select where to clip the original range in order to preserve as much information as possible ( Migacz, 2017 ). Going further, Banner et al., 2018 have proposed a method for analytically computing the clipping value under certain conditions. Another possible optimization point is scale-factor scope . The most common way is use a single scale-factor per-layer, but it is also possible to calculate a scale-factor per-channel. This can be beneficial if the weight distributions vary greatly between channels. When used to directly quantize a model without re-training, as described so far, this method is commonly referred to as post-training quantization . However, recent publications have shown that there are cases where post-training quantization to INT8 doesn't preserve accuracy ( Benoit et al., 2018 , Krishnamoorthi, 2018 ). Namely, smaller models such as MobileNet seem to not respond as well to post-training quantization, presumabley due to their smaller representational capacity. In such cases, quantization-aware training is used.","title":"\"Conservative\" Quantization: INT8"},{"location":"quantization.html#aggressive-quantization-int4-and-lower","text":"Naively quantizing a FP32 model to INT4 and lower usually incurs significant accuracy degradation. Many works have tried to mitigate this effect. They usually employ one or more of the following concepts in order to improve model accuracy: Training / Re-Training : For INT4 and lower, training is required in order to obtain reasonable accuracy. The training loop is modified to take quantization into account. See details in the next section . Zhou S et al., 2016 have shown that bootstrapping the quantized model with trained FP32 weights leads to higher accuracy, as opposed to training from scratch. Other methods require a trained FP32 model, either as a starting point ( Zhou A et al., 2017 ), or as a teacher network in a knowledge distillation training setup (see here ). Replacing the activation function : The most common activation function in vision models is ReLU, which is unbounded. That is - its dynamic range is not limited for positive inputs. This is very problematic for INT4 and below due to the very limited range and resolution. Therefore, most methods replace ReLU with another function which is bounded. In some cases a clipping function with hard coded values is used ( Zhou S et al., 2016 , Mishra et al., 2018 ). Another method learns the clipping value per layer, with better results ( Choi et al., 2018 ). Once the clipping value is set, the scale factor used for quantization is also set, and no further calibration steps are required (as opposed to INT8 methods described above). Modifying network structure : Mishra et al., 2018 try to compensate for the loss of information due to quantization by using wider layers (more channels). Lin et al., 2017 proposed a binary quantization method in which a single FP32 convolution is replaced with multiple binary convolutions, each scaled to represent a different \"base\", covering a larger dynamic range overall. First and last layer : Many methods do not quantize the first and last layer of the model. It has been observed by Han et al., 2015 that the first convolutional layer is more sensitive to weights pruning, and some quantization works cite the same reason and show it empirically ( Zhou S et al., 2016 , Choi et al., 2018 ). Some works also note that these layers usually constitute a very small portion of the overall computation within the model, further reducing the motivation to quantize them ( Rastegari et al., 2016 ). Most methods keep the first and last layers at FP32. However, Choi et al., 2018 showed that \"conservative\" quantization of these layers, e.g. to INT8, does not reduce accuracy. Iterative quantization : Most methods quantize the entire model at once. Zhou A et al., 2017 employ an iterative method, which starts with a trained FP32 baseline, and quantizes only a portion of the model at the time followed by several epochs of re-training to recover the accuracy loss from quantization. Mixed Weights and Activations Precision : It has been observed that activations are more sensitive to quantization than weights ( Zhou S et al., 2016 ). Hence it is not uncommon to see experiments with activations quantized to a higher precision compared to weights. Some works have focused solely on quantizing weights, keeping the activations at FP32 ( Li et al., 2016 , Zhu et al., 2016 ).","title":"\"Aggressive\" Quantization: INT4 and Lower"},{"location":"quantization.html#quantization-aware-training","text":"As mentioned above, in order to minimize the loss of accuracy from \"aggressive\" quantization, many methods that target INT4 and lower (and in some cases for INT8 as well) involve training the model in a way that considers the quantization. This means training with quantization of weights and activations \"baked\" into the training procedure. The training graph usually looks like this: A full precision copy of the weights is maintained throughout the training process (\"weights_fp\" in the diagram). Its purpose is to accumulate the small changes from the gradients without loss of precision (Note that the quantization of the weights is an integral part of the training graph, meaning that we back-propagate through it as well). Once the model is trained, only the quantized weights are used for inference. In the diagram we show \"layer N\" as the conv + batch-norm + activation combination, but the same applies to fully-connected layers, element-wise operations, etc. During training, the operations within \"layer N\" can still run in full precision, with the \"quantize\" operations in the boundaries ensuring discrete-valued weights and activations. This is sometimes called \"simulated quantization\".","title":"Quantization-Aware Training"},{"location":"quantization.html#straight-through-estimator","text":"An important question in this context is how to back-propagate through the quantization functions. These functions are discrete-valued, hence their derivative is 0 almost everywhere. So, using their gradients as-is would severely hinder the learning process. An approximation commonly used to overcome this issue is the \"straight-through estimator\" (STE) ( Hinton et al., 2012 , Bengio, 2013 ), which simply passes the gradient through these functions as-is.","title":"Straight-Through Estimator"},{"location":"quantization.html#references","text":"William Dally . High-Performance Hardware for Machine Learning. Tutorial, NIPS, 2015 Mohammad Rastegari, Vicente Ordone, Joseph Redmon and Ali Farhadi . XNOR-Net: ImageNet Classification Using Binary Convolutional Neural Networks. ECCV, 2016 Matthieu Courbariaux, Yoshua Bengio and Jean-Pierre David . Training deep neural networks with low precision multiplications. arxiv:1412.7024 Philipp Gysel, Jon Pimentel, Mohammad Motamedi and Soheil Ghiasi . Ristretto: A Framework for Empirical Study of Resource-Efficient Inference in Convolutional Neural Networks. IEEE Transactions on Neural Networks and Learning Systems, 2018 Szymon Migacz . 8-bit Inference with TensorRT. GTC San Jose, 2017 Shuchang Zhou, Zekun Ni, Xinyu Zhou, He Wen, Yuxin Wu and Yuheng Zou . DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low Bitwidth Gradients. arxiv:1606.06160 Aojun Zhou, Anbang Yao, Yiwen Guo, Lin Xu and Yurong Chen . Incremental Network Quantization: Towards Lossless CNNs with Low-precision Weights. ICLR, 2017 Asit Mishra, Eriko Nurvitadhi, Jeffrey J Cook and Debbie Marr . WRPN: Wide Reduced-Precision Networks. ICLR, 2018 Jungwook Choi, Zhuo Wang, Swagath Venkataramani, Pierce I-Jen Chuang, Vijayalakshmi Srinivasan and Kailash Gopalakrishnan . PACT: Parameterized Clipping Activation for Quantized Neural Networks. arxiv:1805.06085 Xiaofan Lin, Cong Zhao and Wei Pan . Towards Accurate Binary Convolutional Neural Network. NIPS, 2017 Song Han, Jeff Pool, John Tran and William Dally . Learning both Weights and Connections for Efficient Neural Network. NIPS, 2015 Fengfu Li, Bo Zhang and Bin Liu . Ternary Weight Networks. arxiv:1605.04711 Chenzhuo Zhu, Song Han, Huizi Mao and William J. Dally . Trained Ternary Quantization. arxiv:1612.01064 Yoshua Bengio, Nicholas Leonard and Aaron Courville . Estimating or Propagating Gradients Through Stochastic Neurons for Conditional Computation. arxiv:1308.3432 Geoffrey Hinton, Nitish Srivastava, Kevin Swersky, Tijmen Tieleman and Abdelrahman Mohamed . Neural Networks for Machine Learning. Coursera, video lectures, 2012 Benoit Jacob, Skirmantas Kligys, Bo Chen, Menglong Zhu, Matthew Tang, Andrew Howard, Hartwig Adam and Dmitry Kalenichenko . Quantization and Training of Neural Networks for Efficient Integer-Arithmetic-Only Inference. ECCV, 2018 Raghuraman Krishnamoorthi . Quantizing deep convolutional networks for efficient inference: A whitepaper arxiv:1806.08342 Ron Banner, Yury Nahshan, Elad Hoffer and Daniel Soudry . ACIQ: Analytical Clipping for Integer Quantization of neural networks arxiv:1810.05723","title":"References"},{"location":"regularization.html","text":"Regularization In their book Deep Learning Ian Goodfellow et al. define regularization as \"any modification we make to a learning algorithm that is intended to reduce its generalization error, but not its training error.\" PyTorch's optimizers use \\(l_2\\) parameter regularization to limit the capacity of models (i.e. reduce the variance). In general, we can write this as: \\[ loss(W;x;y) = loss_D(W;x;y) + \\lambda_R R(W) \\] And specifically, \\[ loss(W;x;y) = loss_D(W;x;y) + \\lambda_R \\lVert W \\rVert_2^2 \\] Where W is the collection of all weight elements in the network (i.e. this is model.parameters()), \\(loss(W;x;y)\\) is the total training loss, and \\(loss_D(W)\\) is the data loss (i.e. the error of the objective function, also called the loss function, or criterion in the Distiller sample image classifier compression application). optimizer = optim.SGD(model.parameters(), lr = 0.01, momentum=0.9, weight_decay=0.0001) criterion = nn.CrossEntropyLoss() ... for input, target in dataset: optimizer.zero_grad() output = model(input) loss = criterion(output, target) loss.backward() optimizer.step() \\(\\lambda_R\\) is a scalar called the regularization strength , and it balances the data error and the regularization error. In PyTorch, this is the weight_decay argument. \\(\\lVert W \\rVert_2^2\\) is the square of the \\(l_2\\)-norm of W, and as such it is a magnitude , or sizing, of the weights tensor. \\[ \\lVert W \\rVert_2^2 = \\sum_{l=1}^{L} \\sum_{i=1}^{n} |w_{l,i}|^2 \\;\\;where \\;n = torch.numel(w_l) \\] \\(L\\) is the number of layers in the network; and the notation about used 1-based numbering to simplify the notation. The qualitative differences between the \\(l_2\\)-norm, and the squared \\(l_2\\)-norm is explained in Deep Learning . Sparsity and Regularization We mention regularization because there is an interesting interaction between regularization and some DNN sparsity-inducing methods. In Dense-Sparse-Dense (DSD) , Song Han et al. use pruning as a regularizer to improve a model's accuracy: \"Sparsity is a powerful form of regularization. Our intuition is that, once the network arrives at a local minimum given the sparsity constraint, relaxing the constraint gives the network more freedom to escape the saddle point and arrive at a higher-accuracy local minimum.\" Regularization can also be used to induce sparsity. To induce element-wise sparsity we can use the \\(l_1\\)-norm, \\(\\lVert W \\rVert_1\\). \\[ \\lVert W \\rVert_1 = l_1(W) = \\sum_{i=1}^{|W|} |w_i| \\] \\(l_2\\)-norm regularization reduces overfitting and improves a model's accuracy by shrinking large parameters, but it does not force these parameters to absolute zero. \\(l_1\\)-norm regularization sets some of the parameter elements to zero, therefore limiting the model's capacity while making the model simpler. This is sometimes referred to as feature selection and gives us another interpretation of pruning. One of Distiller's Jupyter notebooks explains how the \\(l_1\\)-norm regularizer induces sparsity, and how it interacts with \\(l_2\\)-norm regularization. If we configure weight_decay to zero and use \\(l_1\\)-norm regularization, then we have: \\[ loss(W;x;y) = loss_D(W;x;y) + \\lambda_R \\lVert W \\rVert_1 \\] If we use both regularizers, we have: \\[ loss(W;x;y) = loss_D(W;x;y) + \\lambda_{R_2} \\lVert W \\rVert_2^2 + \\lambda_{R_1} \\lVert W \\rVert_1 \\] Class distiller.L1Regularizer implements \\(l_1\\)-norm regularization, and of course, you can also schedule regularization. l1_regularizer = distiller.s(model.parameters()) ... loss = criterion(output, target) + lambda * l1_regularizer() Group Regularization In Group Regularization, we penalize entire groups of parameter elements, instead of individual elements. Therefore, entire groups are either sparsified (i.e. all of the group elements have a value of zero) or not. The group structures have to be pre-defined. To the data loss, and the element-wise regularization (if any), we can add group-wise regularization penalty. We represent all of the parameter groups in layer \\(l\\) as \\( W_l^{(G)} \\), and we add the penalty of all groups for all layers. It gets a bit messy, but not overly complicated: \\[ loss(W;x;y) = loss_D(W;x;y) + \\lambda_R R(W) + \\lambda_g \\sum_{l=1}^{L} R_g(W_l^{(G)}) \\] Let's denote all of the weight elements in group \\(g\\) as \\(w^{(g)}\\). \\[ R_g(w^{(g)}) = \\sum_{g=1}^{G} \\lVert w^{(g)} \\rVert_g = \\sum_{g=1}^{G} \\sum_{i=1}^{|w^{(g)}|} {(w_i^{(g)})}^2 \\] where \\(w^{(g)} \\in w^{(l)} \\) and \\( |w^{(g)}| \\) is the number of elements in \\( w^{(g)} \\). \\( \\lambda_g \\sum_{l=1}^{L} R_g(W_l^{(G)}) \\) is called the Group Lasso regularizer. Much as in \\(l_1\\)-norm regularization we sum the magnitudes of all tensor elements, in Group Lasso we sum the magnitudes of element structures (i.e. groups). Group Regularization is also called Block Regularization, Structured Regularization, or coarse-grained sparsity (remember that element-wise sparsity is sometimes referred to as fine-grained sparsity). Group sparsity exhibits regularity (i.e. its shape is regular), and therefore it can be beneficial to improve inference speed. Huizi-et-al-2017 provides an overview of some of the different groups: kernel, channel, filter, layers. Fiber structures such as matrix columns and rows, as well as various shaped structures (block sparsity), and even intra kernel strided sparsity can also be used. distiller.GroupLassoRegularizer currently implements most of these groups, and you can easily add new groups. References Ian Goodfellow and Yoshua Bengio and Aaron Courville . Deep Learning , arXiv:1607.04381v2, 2017. Song Han, Jeff Pool, Sharan Narang, Huizi Mao, Enhao Gong, Shijian Tang, Erich Elsen, Peter Vajda, Manohar Paluri, John Tran, Bryan Catanzaro, William J. Dally . DSD: Dense-Sparse-Dense Training for Deep Neural Networks , arXiv:1607.04381v2, 2017. Huizi Mao, Song Han, Jeff Pool, Wenshuo Li, Xingyu Liu, Yu Wang, William J. Dally . Exploring the Regularity of Sparse Structure in Convolutional Neural Networks , arXiv:1705.08922v3, 2017. Sajid Anwar, Kyuyeon Hwang, and Wonyong Sung . Structured pruning of deep convolutional neural networks , arXiv:1512.08571, 2015","title":"Regularization"},{"location":"regularization.html#regularization","text":"In their book Deep Learning Ian Goodfellow et al. define regularization as \"any modification we make to a learning algorithm that is intended to reduce its generalization error, but not its training error.\" PyTorch's optimizers use \\(l_2\\) parameter regularization to limit the capacity of models (i.e. reduce the variance). In general, we can write this as: \\[ loss(W;x;y) = loss_D(W;x;y) + \\lambda_R R(W) \\] And specifically, \\[ loss(W;x;y) = loss_D(W;x;y) + \\lambda_R \\lVert W \\rVert_2^2 \\] Where W is the collection of all weight elements in the network (i.e. this is model.parameters()), \\(loss(W;x;y)\\) is the total training loss, and \\(loss_D(W)\\) is the data loss (i.e. the error of the objective function, also called the loss function, or criterion in the Distiller sample image classifier compression application). optimizer = optim.SGD(model.parameters(), lr = 0.01, momentum=0.9, weight_decay=0.0001) criterion = nn.CrossEntropyLoss() ... for input, target in dataset: optimizer.zero_grad() output = model(input) loss = criterion(output, target) loss.backward() optimizer.step() \\(\\lambda_R\\) is a scalar called the regularization strength , and it balances the data error and the regularization error. In PyTorch, this is the weight_decay argument. \\(\\lVert W \\rVert_2^2\\) is the square of the \\(l_2\\)-norm of W, and as such it is a magnitude , or sizing, of the weights tensor. \\[ \\lVert W \\rVert_2^2 = \\sum_{l=1}^{L} \\sum_{i=1}^{n} |w_{l,i}|^2 \\;\\;where \\;n = torch.numel(w_l) \\] \\(L\\) is the number of layers in the network; and the notation about used 1-based numbering to simplify the notation. The qualitative differences between the \\(l_2\\)-norm, and the squared \\(l_2\\)-norm is explained in Deep Learning .","title":"Regularization"},{"location":"regularization.html#sparsity-and-regularization","text":"We mention regularization because there is an interesting interaction between regularization and some DNN sparsity-inducing methods. In Dense-Sparse-Dense (DSD) , Song Han et al. use pruning as a regularizer to improve a model's accuracy: \"Sparsity is a powerful form of regularization. Our intuition is that, once the network arrives at a local minimum given the sparsity constraint, relaxing the constraint gives the network more freedom to escape the saddle point and arrive at a higher-accuracy local minimum.\" Regularization can also be used to induce sparsity. To induce element-wise sparsity we can use the \\(l_1\\)-norm, \\(\\lVert W \\rVert_1\\). \\[ \\lVert W \\rVert_1 = l_1(W) = \\sum_{i=1}^{|W|} |w_i| \\] \\(l_2\\)-norm regularization reduces overfitting and improves a model's accuracy by shrinking large parameters, but it does not force these parameters to absolute zero. \\(l_1\\)-norm regularization sets some of the parameter elements to zero, therefore limiting the model's capacity while making the model simpler. This is sometimes referred to as feature selection and gives us another interpretation of pruning. One of Distiller's Jupyter notebooks explains how the \\(l_1\\)-norm regularizer induces sparsity, and how it interacts with \\(l_2\\)-norm regularization. If we configure weight_decay to zero and use \\(l_1\\)-norm regularization, then we have: \\[ loss(W;x;y) = loss_D(W;x;y) + \\lambda_R \\lVert W \\rVert_1 \\] If we use both regularizers, we have: \\[ loss(W;x;y) = loss_D(W;x;y) + \\lambda_{R_2} \\lVert W \\rVert_2^2 + \\lambda_{R_1} \\lVert W \\rVert_1 \\] Class distiller.L1Regularizer implements \\(l_1\\)-norm regularization, and of course, you can also schedule regularization. l1_regularizer = distiller.s(model.parameters()) ... loss = criterion(output, target) + lambda * l1_regularizer()","title":"Sparsity and Regularization"},{"location":"regularization.html#group-regularization","text":"In Group Regularization, we penalize entire groups of parameter elements, instead of individual elements. Therefore, entire groups are either sparsified (i.e. all of the group elements have a value of zero) or not. The group structures have to be pre-defined. To the data loss, and the element-wise regularization (if any), we can add group-wise regularization penalty. We represent all of the parameter groups in layer \\(l\\) as \\( W_l^{(G)} \\), and we add the penalty of all groups for all layers. It gets a bit messy, but not overly complicated: \\[ loss(W;x;y) = loss_D(W;x;y) + \\lambda_R R(W) + \\lambda_g \\sum_{l=1}^{L} R_g(W_l^{(G)}) \\] Let's denote all of the weight elements in group \\(g\\) as \\(w^{(g)}\\). \\[ R_g(w^{(g)}) = \\sum_{g=1}^{G} \\lVert w^{(g)} \\rVert_g = \\sum_{g=1}^{G} \\sum_{i=1}^{|w^{(g)}|} {(w_i^{(g)})}^2 \\] where \\(w^{(g)} \\in w^{(l)} \\) and \\( |w^{(g)}| \\) is the number of elements in \\( w^{(g)} \\). \\( \\lambda_g \\sum_{l=1}^{L} R_g(W_l^{(G)}) \\) is called the Group Lasso regularizer. Much as in \\(l_1\\)-norm regularization we sum the magnitudes of all tensor elements, in Group Lasso we sum the magnitudes of element structures (i.e. groups). Group Regularization is also called Block Regularization, Structured Regularization, or coarse-grained sparsity (remember that element-wise sparsity is sometimes referred to as fine-grained sparsity). Group sparsity exhibits regularity (i.e. its shape is regular), and therefore it can be beneficial to improve inference speed. Huizi-et-al-2017 provides an overview of some of the different groups: kernel, channel, filter, layers. Fiber structures such as matrix columns and rows, as well as various shaped structures (block sparsity), and even intra kernel strided sparsity can also be used. distiller.GroupLassoRegularizer currently implements most of these groups, and you can easily add new groups.","title":"Group Regularization"},{"location":"regularization.html#references","text":"Ian Goodfellow and Yoshua Bengio and Aaron Courville . Deep Learning , arXiv:1607.04381v2, 2017. Song Han, Jeff Pool, Sharan Narang, Huizi Mao, Enhao Gong, Shijian Tang, Erich Elsen, Peter Vajda, Manohar Paluri, John Tran, Bryan Catanzaro, William J. Dally . DSD: Dense-Sparse-Dense Training for Deep Neural Networks , arXiv:1607.04381v2, 2017. Huizi Mao, Song Han, Jeff Pool, Wenshuo Li, Xingyu Liu, Yu Wang, William J. Dally . Exploring the Regularity of Sparse Structure in Convolutional Neural Networks , arXiv:1705.08922v3, 2017. Sajid Anwar, Kyuyeon Hwang, and Wonyong Sung . Structured pruning of deep convolutional neural networks , arXiv:1512.08571, 2015","title":"References"},{"location":"schedule.html","text":"Compression scheduler In iterative pruning, we create some kind of pruning regimen that specifies how to prune, and what to prune at every stage of the pruning and training stages. This motivated the design of CompressionScheduler : it needed to be part of the training loop, and to be able to make and implement pruning, regularization and quantization decisions. We wanted to be able to change the particulars of the compression schedule, w/o touching the code, and settled on using YAML as a container for this specification. We found that when we make many experiments on the same code base, it is easier to maintain all of these experiments if we decouple the differences from the code-base. Therefore, we added to the scheduler support for learning-rate decay scheduling because, again, we wanted the freedom to change the LR-decay policy without changing code. High level overview Let's briefly discuss the main mechanisms and abstractions: A schedule specification is composed of a list of sections defining instances of Pruners, Regularizers, Quantizers, LR-scheduler and Policies. Pruners, Regularizers and Quantizers are very similar: They implement either a Pruning/Regularization/Quantization algorithm, respectively. An LR-scheduler specifies the LR-decay algorithm. These define the what part of the schedule. The Policies define the when part of the schedule: at which epoch to start applying the Pruner/Regularizer/Quantizer/LR-decay, the epoch to end, and how often to invoke the policy (frequency of application). A policy also defines the instance of Pruner/Regularizer/Quantizer/LR-decay it is managing. The CompressionScheduler is configured from a YAML file or from a dictionary, but you can also manually create Policies, Pruners, Regularizers and Quantizers from code. Syntax through example We'll use alexnet.schedule_agp.yaml to explain some of the YAML syntax for configuring Sensitivity Pruning of Alexnet. version: 1 pruners: my_pruner: class: 'SensitivityPruner' sensitivities: 'features.module.0.weight': 0.25 'features.module.3.weight': 0.35 'features.module.6.weight': 0.40 'features.module.8.weight': 0.45 'features.module.10.weight': 0.55 'classifier.1.weight': 0.875 'classifier.4.weight': 0.875 'classifier.6.weight': 0.625 lr_schedulers: pruning_lr: class: ExponentialLR gamma: 0.9 policies: - pruner: instance_name : 'my_pruner' starting_epoch: 0 ending_epoch: 38 frequency: 2 - lr_scheduler: instance_name: pruning_lr starting_epoch: 24 ending_epoch: 200 frequency: 1 There is only one version of the YAML syntax, and the version number is not verified at the moment. However, to be future-proof it is probably better to let the YAML parser know that you are using version-1 syntax, in case there is ever a version 2. version: 1 In the pruners section, we define the instances of pruners we want the scheduler to instantiate and use. We define a single pruner instance, named my_pruner , of algorithm SensitivityPruner . We will refer to this instance in the Policies section. Then we list the sensitivity multipliers, \\(s\\), of each of the weight tensors. You may list as many Pruners as you want in this section, as long as each has a unique name. You can several types of pruners in one schedule. pruners: my_pruner: class: 'SensitivityPruner' sensitivities: 'features.module.0.weight': 0.25 'features.module.3.weight': 0.35 'features.module.6.weight': 0.40 'features.module.8.weight': 0.45 'features.module.10.weight': 0.55 'classifier.1.weight': 0.875 'classifier.4.weight': 0.875 'classifier.6.weight': 0.6 Next, we want to specify the learning-rate decay scheduling in the lr_schedulers section. We assign a name to this instance: pruning_lr . As in the pruners section, you may use any name, as long as all LR-schedulers have a unique name. At the moment, only one instance of LR-scheduler is allowed. The LR-scheduler must be a subclass of PyTorch's _LRScheduler . You can use any of the schedulers defined in torch.optim.lr_scheduler (see here ). In addition, we've implemented some additional schedulers in Distiller (see here ). The keyword arguments (kwargs) are passed directly to the LR-scheduler's constructor, so that as new LR-schedulers are added to torch.optim.lr_scheduler , they can be used without changing the application code. lr_schedulers: pruning_lr: class: ExponentialLR gamma: 0.9 Finally, we define the policies section which defines the actual scheduling. A Policy manages an instance of a Pruner , Regularizer , Quantizer , or LRScheduler , by naming the instance. In the example below, a PruningPolicy uses the pruner instance named my_pruner : it activates it at a frequency of 2 epochs (i.e. every other epoch), starting at epoch 0, and ending at epoch 38. policies: - pruner: instance_name : 'my_pruner' starting_epoch: 0 ending_epoch: 38 frequency: 2 - lr_scheduler: instance_name: pruning_lr starting_epoch: 24 ending_epoch: 200 frequency: 1 This is iterative pruning : Train Connectivity Prune Connections Retrain Weights Goto 2 It is described in Learning both Weights and Connections for Efficient Neural Networks : \"Our method prunes redundant connections using a three-step method. First, we train the network to learn which connections are important. Next, we prune the unimportant connections. Finally, we retrain the network to fine tune the weights of the remaining connections...After an initial training phase, we remove all connections whose weight is lower than a threshold. This pruning converts a dense, fully-connected layer to a sparse layer. This first phase learns the topology of the networks \u2014 learning which connections are important and removing the unimportant connections. We then retrain the sparse network so the remaining connections can compensate for the connections that have been removed. The phases of pruning and retraining may be repeated iteratively to further reduce network complexity.\" Regularization You can also define and schedule regularization. L1 regularization Format (this is an informal specification, not a valid ABNF specification): regularizers: <REGULARIZER_NAME_STR>: class: L1Regularizer reg_regims: <PYTORCH_PARAM_NAME_STR>: <STRENGTH_FLOAT> ... <PYTORCH_PARAM_NAME_STR>: <STRENGTH_FLOAT> threshold_criteria: [Mean_Abs | Max] For example: version: 1 regularizers: my_L1_reg: class: L1Regularizer reg_regims: 'module.layer3.1.conv1.weight': 0.000002 'module.layer3.1.conv2.weight': 0.000002 'module.layer3.1.conv3.weight': 0.000002 'module.layer3.2.conv1.weight': 0.000002 threshold_criteria: Mean_Abs policies: - regularizer: instance_name: my_L1_reg starting_epoch: 0 ending_epoch: 60 frequency: 1 Group regularization Format (informal specification): Format: regularizers: <REGULARIZER_NAME_STR>: class: L1Regularizer reg_regims: <PYTORCH_PARAM_NAME_STR>: [<STRENGTH_FLOAT>, <'2D' | '3D' | '4D' | 'Channels' | 'Cols' | 'Rows'>] <PYTORCH_PARAM_NAME_STR>: [<STRENGTH_FLOAT>, <'2D' | '3D' | '4D' | 'Channels' | 'Cols' | 'Rows'>] threshold_criteria: [Mean_Abs | Max] For example: version: 1 regularizers: my_filter_regularizer: class: GroupLassoRegularizer reg_regims: 'module.layer3.1.conv1.weight': [0.00005, '3D'] 'module.layer3.1.conv2.weight': [0.00005, '3D'] 'module.layer3.1.conv3.weight': [0.00005, '3D'] 'module.layer3.2.conv1.weight': [0.00005, '3D'] threshold_criteria: Mean_Abs policies: - regularizer: instance_name: my_filter_regularizer starting_epoch: 0 ending_epoch: 60 frequency: 1 Mixing it up You can mix pruning and regularization. version: 1 pruners: my_pruner: class: 'SensitivityPruner' sensitivities: 'features.module.0.weight': 0.25 'features.module.3.weight': 0.35 'features.module.6.weight': 0.40 'features.module.8.weight': 0.45 'features.module.10.weight': 0.55 'classifier.1.weight': 0.875 'classifier.4.weight': 0.875 'classifier.6.weight': 0.625 regularizers: 2d_groups_regularizer: class: GroupLassoRegularizer reg_regims: 'features.module.0.weight': [0.000012, '2D'] 'features.module.3.weight': [0.000012, '2D'] 'features.module.6.weight': [0.000012, '2D'] 'features.module.8.weight': [0.000012, '2D'] 'features.module.10.weight': [0.000012, '2D'] lr_schedulers: # Learning rate decay scheduler pruning_lr: class: ExponentialLR gamma: 0.9 policies: - pruner: instance_name : 'my_pruner' starting_epoch: 0 ending_epoch: 38 frequency: 2 - regularizer: instance_name: '2d_groups_regularizer' starting_epoch: 0 ending_epoch: 38 frequency: 1 - lr_scheduler: instance_name: pruning_lr starting_epoch: 24 ending_epoch: 200 frequency: 1 Quantization-Aware Training Similarly to pruners and regularizers, specifying a quantizer in the scheduler YAML follows the constructor arguments of the Quantizer class (see details here ). Note that only a single quantizer instance may be defined per YAML. Let's see an example: quantizers: dorefa_quantizer: class: DorefaQuantizer bits_activations: 8 bits_weights: 4 overrides: conv1: bits_weights: null bits_activations: null relu1: bits_weights: null bits_activations: null final_relu: bits_weights: null bits_activations: null fc: bits_weights: null bits_activations: null The specific quantization method we're instantiating here is DorefaQuantizer . Then we define the default bit-widths for activations and weights, in this case 8 and 4-bits, respectively. Then, we define the overrides mapping. In the example above, we choose not to quantize the first and last layer of the model. In the case of DorefaQuantizer , the weights are quantized as part of the convolution / FC layers, but the activations are quantized in separate layers, which replace the ReLU layers in the original model (remember - even though we replaced the ReLU modules with our own quantization modules, the name of the modules isn't changed). So, in all, we need to reference the first layer with parameters conv1 , the first activation layer relu1 , the last activation layer final_relu and the last layer with parameters fc . Specifying null means \"do not quantize\". Note that for quantizers, we reference names of modules, not names of parameters as we do for pruners and regularizers. Defining overrides for groups of layers using regular expressions Suppose we have a sub-module in our model named block1 , which contains multiple convolution layers which we would like to quantize to, say, 2-bits. The convolution layers are named conv1 , conv2 and so on. In that case we would define the following: overrides: 'block1\\.conv*': bits_weights: 2 bits_activations: null RegEx Note : Remember that the dot ( . ) is a meta-character (i.e. a reserved character) in regular expressions. So, to match the actual dot characters which separate sub-modules in PyTorch module names, we need to escape it: \\. Overlapping patterns are also possible, which allows to define some override for a groups of layers and also \"single-out\" specific layers for different overrides. For example, let's take the last example and configure a different override for block1.conv1 : overrides: 'block1\\.conv1': bits_weights: 4 bits_activations: null 'block1\\.conv*': bits_weights: 2 bits_activations: null Important Note : The patterns are evaluated eagerly - first match wins. So, to properly quantize a model using \"broad\" patterns and more \"specific\" patterns as just shown, make sure the specific pattern is listed before the broad one. The QuantizationPolicy , which controls the quantization procedure during training, is actually quite simplistic. All it does is call the prepare_model() function of the Quantizer when it's initialized, followed by the first call to quantize_params() . Then, at the end of each epoch, after the float copy of the weights has been updated, it calls the quantize_params() function again. policies: - quantizer: instance_name: dorefa_quantizer starting_epoch: 0 ending_epoch: 200 frequency: 1 Important Note : As mentioned here , since the quantizer modifies the model's parameters (assuming training with quantization in the loop is used), the call to prepare_model() must be performed before an optimizer is called. Therefore, currently, the starting epoch for a quantization policy must be 0, otherwise the quantization process will not work as expected. If one wishes to do a \"warm-startup\" (or \"boot-strapping\"), training for a few epochs with full precision and only then starting to quantize, the only way to do this right now is to execute a separate run to generate the boot-strapped weights, and execute a second which will resume the checkpoint with the boot-strapped weights. Post-Training Quantization Post-training quantization differs from the other techniques described here. Since it is not executed during training, it does not require any Policies nor a Scheduler. Currently, the only method implemented for post-training quantization is range-based linear quantization . Quantizing a model using this method, requires adding 2 lines of code: quantizer = distiller.quantization.PostTrainLinearQuantizer(model, <quantizer arguments>) quantizer.prepare_model() # Execute evaluation on model as usual See the documentation for PostTrainLinearQuantizer in range_linear.py for details on the available arguments. In addition to directly instantiating the quantizer with arguments, it can also be configured from a YAML file. The syntax for the YAML file is exactly the same as seen in the quantization-aware training section above. Not surprisingly, the class defined must be PostTrainLinearQuantizer , and any other components or policies defined in the YAML file are ignored. We'll see how to create the quantizer in this manner below. If more configurability is needed, a helper function can be used that will add a set of command-line arguments to configure the quantizer: parser = argparse.ArgumentParser() distiller.quantization.add_post_train_quant_args(parser) args = parser.parse_args() These are the available command line arguments: Arguments controlling quantization at evaluation time (\"post-training quantization\"): --quantize-eval, --qe Apply linear quantization to model before evaluation. Applicable only if --evaluate is also set --qe-calibration PORTION_OF_TEST_SET Run the model in evaluation mode on the specified portion of the test dataset and collect statistics. Ignores all other 'qe--*' arguments --qe-mode QE_MODE, --qem QE_MODE Linear quantization mode. Choices: sym | asym_s | asym_u --qe-bits-acts NUM_BITS, --qeba NUM_BITS Number of bits for quantization of activations --qe-bits-wts NUM_BITS, --qebw NUM_BITS Number of bits for quantization of weights --qe-bits-accum NUM_BITS Number of bits for quantization of the accumulator --qe-clip-acts, --qeca Enable clipping of activations using min/max values averaging over batch --qe-no-clip-layers LAYER_NAME [LAYER_NAME ...], --qencl LAYER_NAME [LAYER_NAME ...] List of layer names for which not to clip activations. Applicable only if --qe-clip-acts is also set --qe-per-channel, --qepc Enable per-channel quantization of weights (per output channel) --qe-stats-file PATH Path to YAML file with calibration stats. If not given, dynamic quantization will be run (Note that not all layer types are supported for dynamic quantization) --qe-config-file PATH Path to YAML file containing configuration for PostTrainLinearQuantizer (if present, all other --qe* arguments are ignored) (Note that --quantize-eval and --qe-calibration are mutually exclusive.) When using these command line arguments, the quantizer can be invoked as follows: if args.quantize_eval: if args.qe_config_file: quantizer = distiller.config_component_from_file_by_class(model, args.qe_config_file, 'PostTrainLinearQuantizer') else: quantizer = quantization.PostTrainLinearQuantizer(model, args.qe_bits_acts, args.qe_bits_wts, args.qe_bits_accum, None, args.qe_mode, args.qe_clip_acts, args.qe_no_clip_layers, args.qe_per_channel, args.qe_stats_file) quantizer.prepare_model() # Execute evaluation on model as usual Note that the command-line arguments don't expose the overrides parameter of the quantizer, which allows fine-grained control over how each layer is quantized. To utilize this functionality, configure with a YAML file. To see integration of these command line arguments in use, see the image classification example . For examples invocations of post-training quantization see here . Collecting Statistics for Quantization To collect generate statistics that can be used for static quantization of activations, do the following (shown here assuming the command line argument --qe-calibration shown above is used, which specifies the number of batches to use for statistics generation): if args.qe_calibration: distiller.utils.assign_layer_fq_names(model) msglogger.info(\"Generating quantization calibration stats based on {0} users\".format(args.qe_calibration)) collector = distiller.data_loggers.QuantCalibrationStatsCollector(model) with collector_context(collector): # Here call your model evaluation function, making sure to execute only # the portion of the dataset specified by the qe_calibration argument yaml_path = 'some/dir/quantization_stats.yaml' collector.save(yaml_path) The genreated YAML stats file can then be provided using the `--qe-stats-file argument. An example of a generated stats file can be found here . Pruning Fine-Control Sometimes the default pruning process doesn't satisfy our needs and we require finer control over the pruning process (e.g. over masking, gradient handling, and weight updates). Below we will explain the math and nuances of fine-control configuration. Setting up the problem We represent the weights of a DNN as the set \\theta=\\left\\{\\theta_{l} : 0 \\leq l \\leq : L\\right\\} where \\theta_{l} represents the parameters tensor (weights and biases) of layer l in a network having L layers. Usually we do not prune biases because of their small size and relative importance. Therefore, we will consider only the network weights (also known as network connections): W=\\left\\{W_{l} : 0 \\leq l \\leq : L\\right\\} We wish to optimize some objective (e.g. minimize the energy required to execute a network in inference mode) under some performance constraint (e.g. accuracy), and we do this by maximizing the sparsity of the network weights (sometimes under some chosen sparsity-pattern constraint). We formalize pruning as a 3-step action: Generating a mask - in which we define a sparsity-inducing function per layer, P_l , such that M_{l}=P_{l}\\left(W_{l}\\right) M_{l} is a binary matrix which is used to mask W_{l} . P_l is implemented by subclasses of distiller.pruner . Masking the weights using the Hadamard product: \\widehat{W}_{l}=M_{l} \\circ W_{l} Updating the weights (performed by the optimizer). By default, we compute the data-loss using the masked weights, and calculate the gradient of this loss with respect to the masked-weights. We update the weights by making a small adjustment to the masked weights : W_{l} \\leftarrow \\widehat{W}_{l}-\\alpha \\frac{\\partial Loss(\\widehat{W}_{l})}{\\partial \\widehat{W}_{l}} We show below how to change this default behavior. We also provide a more exact description of the weights update when using PyTorch's SGD optimizer. The pruning regimen follows a pruning-rate schedule which, analogously to learning-rate annealing, changes the pruning rate according to a configurable strategy over time. The schedule allows us to configure new masks either once at the beginning of epochs (most common), or at the beginning of mini-batches (for finer control). In the former, the masks are calculated and assigned to \\{M_{l}\\} once, at the beginning of epochs (the specific epochs are determined by the schedule). The pseudo-code below shows the typical training-loop with CompressionScheduler callbacks in bold font, and the three pruning actions described above in burgendy. Figure 1: Pruning algorithm pseudo-code We can perform masking by adding the masking operation to the network graph. We call this in-graph masking , as depicted in the bottom of Figure 2. In the forward-pass we apply element-wise multiplication of the weights W_{l} and the mask M_{l} to obtain the masked weights widehat{W}_{l} , which we apply to the Convolution operation. In the backward-pass we mask \\frac{\\partial L}{\\partial \\widehat{W}} to obtain \\frac{\\partial L}{\\partial W} with which we update W_{l} . Figure 2: Forward and backward weight masking In Distiller we perform out-of-graph masking in which we directly set the value of \\widehat{W}_{l} by applying a mask on W_{l} In the backward-pass we make sure that the weights are updated by the proper gradients. In the common pruning use-case we want the optimizer to update only the unmasked weights, but we can configure this behavior using the fine-control arguments, as explained below. Fine-Control For finer control over the behavior of the pruning process, Distiller provides a set of PruningPolicy arguments in the args field, as in the sample below. pruners: random_filter_pruner: class: BernoulliFilterPruner desired_sparsity: 0.1 group_type: Filters weights: [module.conv1.weight] policies: - pruner: instance_name: random_filter_pruner args: mini_batch_pruning_frequency: 16 discard_masks_at_minibatch_end: True use_double_copies: True mask_on_forward_only: True mask_gradients: True starting_epoch: 15 ending_epoch: 180 frequency: 1 Controls mini_batch_pruning_frequency (default: 0): controls pruning scheduling at the mini-batch granularity. Every mini_batch_pruning_frequency training steps (i.e. mini_batches) we configure a new mask. In between mask updates, we mask mini-batches with the current mask. discard_masks_at_minibatch_end (default: False): discards the pruning mask at the end of the mini-batch. In the example YAML above, a new mask is computed once every 16 mini-batches, applied in one forward-pass, and then discraded. In the next 15 mini-batches the mask is Null so we do not mask. mask_gradients (default: False): mask the weights gradients after performing the backward-pass, and before invoking the optimizer. One way to mask the gradients in PyTorch is to register to the backward callback of the weight tensors we want to mask, and alter the gradients there. We do this by setting mask_gradients: True , as in the sample YAML above. This is sufficient if our weights optimization uses plain-vanilla SGD, because the update maintains the sparsity of the weights: \\widehat{W}_{l} is sparse by definition, and the gradients are sparse because we mask them. W_{l} \\leftarrow \\widehat{W}_{l}-\\alpha \\frac{\\partial Loss(\\widehat{W}_{l})}{\\partial \\widehat{W}_{l}} But this is not always the case. For example, PyTorch\u2019s SGD optimizer with weight-decay ( \\lambda ) and momentum ( \\alpha ) has the optimization logic listed below: 1. \\Delta p=\\frac{\\partial Loss\\left(\\widehat{W}_{l}^{i}\\right)}{\\partial \\widehat{W}_{l}^{i}}+\\lambda \\widehat{W}_{l}^{i} 2. v_{i}=\\left\\lbrace \\matrix{ {\\Delta p: \\; if \\;i==0 }\\; \\cr {v_{i-1} \\rho+ (1-dampening)\\Delta p: \\; if \\; i>0} } \\right\\rbrace 3. W_{l}^{i+1} = \\widehat{W}_{l}^{i}-\\alpha v_{i} Let\u2019s look at the weight optimization update at some arbitrary step (i.e. mini-batch) k . We want to show that masking the weights and gradients ( W_{l}^{i=k} and \\frac{\\partial Loss\\left(\\widehat{W}_{l}^{i=k}\\right)}{\\partial \\widehat{W}_{l}^{i=k}} ) is not sufficient to guarantee that W_{l}^{i=k+1} is sparse. This is easy do: if we allow for the general case where v_i is not necessarily sparse, then W_{l}^{i+1} is not necessarily sparse. Masking the weights in the forward-pass, and gradients in the backward-pass, is not sufficient to maintain the sparsity of the weights! This is an important insight, and it means that na\u00efve in-graph masking is also not sufficient to guarantee sparsity of the updated weights. use_double_copies (default: False): If you want to compute the gradients using the masked weights and also to update the unmasked weights (instead of updating the masked weights, per usual), set use_double_copies = True . This changes step (3) to: 3. W_{l}^{i+1} = W_{1}^{i}-\\alpha \\Delta p mask_on_forward_only (default: False): when set to False the weights will also be masked after the Optimizer is done updating the weights, to remove any updates of the masked gradients. If we want to guarantee the sparsity of the updated weights, we must explicitly mask the weights after step (3) above: 4. {W}_{l}^{i+1} \\leftarrow M_{l}^{i} \\circ {W}_{l}^{i+1} This argument defaults to False , but you can skip step (4), by setting mask_on_forward_only = True . Finally, note that mask_gradients and not mask_on_forward_only are mutually exclusive, or simply put: if you are masking in the backward-pass, you should choose to either do it via mask_gradients or mask_on_forward_only=False , but not both. Knowledge Distillation Knowledge distillation (see here ) is also implemented as a Policy , which should be added to the scheduler. However, with the current implementation, it cannot be defined within the YAML file like the rest of the policies described above. To make the integration of this method into applications a bit easier, a helper function can be used that will add a set of command-line arguments related to knowledge distillation: import argparse import distiller parser = argparse.ArgumentParser() distiller.knowledge_distillation.add_distillation_args(parser) (The add_distillation_args function accepts some optional arguments, see its implementation at distiller/knowledge_distillation.py for details) These are the command line arguments exposed by this function: Knowledge Distillation Training Arguments: --kd-teacher ARCH Model architecture for teacher model --kd-pretrained Use pre-trained model for teacher --kd-resume PATH Path to checkpoint from which to load teacher weights --kd-temperature TEMP, --kd-temp TEMP Knowledge distillation softmax temperature --kd-distill-wt WEIGHT, --kd-dw WEIGHT Weight for distillation loss (student vs. teacher soft targets) --kd-student-wt WEIGHT, --kd-sw WEIGHT Weight for student vs. labels loss --kd-teacher-wt WEIGHT, --kd-tw WEIGHT Weight for teacher vs. labels loss --kd-start-epoch EPOCH_NUM Epoch from which to enable distillation Once arguments have been parsed, some initialization code is required, similar to the following: # Assuming: # \"args\" variable holds command line arguments # \"model\" variable holds the model we're going to train, that is - the student model # \"compression_scheduler\" variable holds a CompressionScheduler instance args.kd_policy = None if args.kd_teacher: # Create teacher model - replace this with your model creation code teacher = create_model(args.kd_pretrained, args.dataset, args.kd_teacher, device_ids=args.gpus) if args.kd_resume: teacher, _, _ = apputils.load_checkpoint(teacher, chkpt_file=args.kd_resume) # Create policy and add to scheduler dlw = distiller.DistillationLossWeights(args.kd_distill_wt, args.kd_student_wt, args.kd_teacher_wt) args.kd_policy = distiller.KnowledgeDistillationPolicy(model, teacher, args.kd_temp, dlw) compression_scheduler.add_policy(args.kd_policy, starting_epoch=args.kd_start_epoch, ending_epoch=args.epochs, frequency=1) Finally, during the training loop, we need to perform forward propagation through the teacher model as well. The KnowledgeDistillationPolicy class keeps a reference to both the student and teacher models, and exposes a forward function that performs forward propagation on both of them. Since this is not one of the standard policy callbacks, we need to call this function manually from our training loop, as follows: if args.kd_policy is None: # Revert to a \"normal\" forward-prop call if no knowledge distillation policy is present output = model(input_var) else: output = args.kd_policy.forward(input_var) To see this integration in action, take a look at the image classification sample at examples/classifier_compression/compress_classifier.py .","title":"Compression Scheduling"},{"location":"schedule.html#compression-scheduler","text":"In iterative pruning, we create some kind of pruning regimen that specifies how to prune, and what to prune at every stage of the pruning and training stages. This motivated the design of CompressionScheduler : it needed to be part of the training loop, and to be able to make and implement pruning, regularization and quantization decisions. We wanted to be able to change the particulars of the compression schedule, w/o touching the code, and settled on using YAML as a container for this specification. We found that when we make many experiments on the same code base, it is easier to maintain all of these experiments if we decouple the differences from the code-base. Therefore, we added to the scheduler support for learning-rate decay scheduling because, again, we wanted the freedom to change the LR-decay policy without changing code.","title":"Compression scheduler"},{"location":"schedule.html#high-level-overview","text":"Let's briefly discuss the main mechanisms and abstractions: A schedule specification is composed of a list of sections defining instances of Pruners, Regularizers, Quantizers, LR-scheduler and Policies. Pruners, Regularizers and Quantizers are very similar: They implement either a Pruning/Regularization/Quantization algorithm, respectively. An LR-scheduler specifies the LR-decay algorithm. These define the what part of the schedule. The Policies define the when part of the schedule: at which epoch to start applying the Pruner/Regularizer/Quantizer/LR-decay, the epoch to end, and how often to invoke the policy (frequency of application). A policy also defines the instance of Pruner/Regularizer/Quantizer/LR-decay it is managing. The CompressionScheduler is configured from a YAML file or from a dictionary, but you can also manually create Policies, Pruners, Regularizers and Quantizers from code.","title":"High level overview"},{"location":"schedule.html#syntax-through-example","text":"We'll use alexnet.schedule_agp.yaml to explain some of the YAML syntax for configuring Sensitivity Pruning of Alexnet. version: 1 pruners: my_pruner: class: 'SensitivityPruner' sensitivities: 'features.module.0.weight': 0.25 'features.module.3.weight': 0.35 'features.module.6.weight': 0.40 'features.module.8.weight': 0.45 'features.module.10.weight': 0.55 'classifier.1.weight': 0.875 'classifier.4.weight': 0.875 'classifier.6.weight': 0.625 lr_schedulers: pruning_lr: class: ExponentialLR gamma: 0.9 policies: - pruner: instance_name : 'my_pruner' starting_epoch: 0 ending_epoch: 38 frequency: 2 - lr_scheduler: instance_name: pruning_lr starting_epoch: 24 ending_epoch: 200 frequency: 1 There is only one version of the YAML syntax, and the version number is not verified at the moment. However, to be future-proof it is probably better to let the YAML parser know that you are using version-1 syntax, in case there is ever a version 2. version: 1 In the pruners section, we define the instances of pruners we want the scheduler to instantiate and use. We define a single pruner instance, named my_pruner , of algorithm SensitivityPruner . We will refer to this instance in the Policies section. Then we list the sensitivity multipliers, \\(s\\), of each of the weight tensors. You may list as many Pruners as you want in this section, as long as each has a unique name. You can several types of pruners in one schedule. pruners: my_pruner: class: 'SensitivityPruner' sensitivities: 'features.module.0.weight': 0.25 'features.module.3.weight': 0.35 'features.module.6.weight': 0.40 'features.module.8.weight': 0.45 'features.module.10.weight': 0.55 'classifier.1.weight': 0.875 'classifier.4.weight': 0.875 'classifier.6.weight': 0.6 Next, we want to specify the learning-rate decay scheduling in the lr_schedulers section. We assign a name to this instance: pruning_lr . As in the pruners section, you may use any name, as long as all LR-schedulers have a unique name. At the moment, only one instance of LR-scheduler is allowed. The LR-scheduler must be a subclass of PyTorch's _LRScheduler . You can use any of the schedulers defined in torch.optim.lr_scheduler (see here ). In addition, we've implemented some additional schedulers in Distiller (see here ). The keyword arguments (kwargs) are passed directly to the LR-scheduler's constructor, so that as new LR-schedulers are added to torch.optim.lr_scheduler , they can be used without changing the application code. lr_schedulers: pruning_lr: class: ExponentialLR gamma: 0.9 Finally, we define the policies section which defines the actual scheduling. A Policy manages an instance of a Pruner , Regularizer , Quantizer , or LRScheduler , by naming the instance. In the example below, a PruningPolicy uses the pruner instance named my_pruner : it activates it at a frequency of 2 epochs (i.e. every other epoch), starting at epoch 0, and ending at epoch 38. policies: - pruner: instance_name : 'my_pruner' starting_epoch: 0 ending_epoch: 38 frequency: 2 - lr_scheduler: instance_name: pruning_lr starting_epoch: 24 ending_epoch: 200 frequency: 1 This is iterative pruning : Train Connectivity Prune Connections Retrain Weights Goto 2 It is described in Learning both Weights and Connections for Efficient Neural Networks : \"Our method prunes redundant connections using a three-step method. First, we train the network to learn which connections are important. Next, we prune the unimportant connections. Finally, we retrain the network to fine tune the weights of the remaining connections...After an initial training phase, we remove all connections whose weight is lower than a threshold. This pruning converts a dense, fully-connected layer to a sparse layer. This first phase learns the topology of the networks \u2014 learning which connections are important and removing the unimportant connections. We then retrain the sparse network so the remaining connections can compensate for the connections that have been removed. The phases of pruning and retraining may be repeated iteratively to further reduce network complexity.\"","title":"Syntax through example"},{"location":"schedule.html#regularization","text":"You can also define and schedule regularization.","title":"Regularization"},{"location":"schedule.html#l1-regularization","text":"Format (this is an informal specification, not a valid ABNF specification): regularizers: <REGULARIZER_NAME_STR>: class: L1Regularizer reg_regims: <PYTORCH_PARAM_NAME_STR>: <STRENGTH_FLOAT> ... <PYTORCH_PARAM_NAME_STR>: <STRENGTH_FLOAT> threshold_criteria: [Mean_Abs | Max] For example: version: 1 regularizers: my_L1_reg: class: L1Regularizer reg_regims: 'module.layer3.1.conv1.weight': 0.000002 'module.layer3.1.conv2.weight': 0.000002 'module.layer3.1.conv3.weight': 0.000002 'module.layer3.2.conv1.weight': 0.000002 threshold_criteria: Mean_Abs policies: - regularizer: instance_name: my_L1_reg starting_epoch: 0 ending_epoch: 60 frequency: 1","title":"L1 regularization"},{"location":"schedule.html#group-regularization","text":"Format (informal specification): Format: regularizers: <REGULARIZER_NAME_STR>: class: L1Regularizer reg_regims: <PYTORCH_PARAM_NAME_STR>: [<STRENGTH_FLOAT>, <'2D' | '3D' | '4D' | 'Channels' | 'Cols' | 'Rows'>] <PYTORCH_PARAM_NAME_STR>: [<STRENGTH_FLOAT>, <'2D' | '3D' | '4D' | 'Channels' | 'Cols' | 'Rows'>] threshold_criteria: [Mean_Abs | Max] For example: version: 1 regularizers: my_filter_regularizer: class: GroupLassoRegularizer reg_regims: 'module.layer3.1.conv1.weight': [0.00005, '3D'] 'module.layer3.1.conv2.weight': [0.00005, '3D'] 'module.layer3.1.conv3.weight': [0.00005, '3D'] 'module.layer3.2.conv1.weight': [0.00005, '3D'] threshold_criteria: Mean_Abs policies: - regularizer: instance_name: my_filter_regularizer starting_epoch: 0 ending_epoch: 60 frequency: 1","title":"Group regularization"},{"location":"schedule.html#mixing-it-up","text":"You can mix pruning and regularization. version: 1 pruners: my_pruner: class: 'SensitivityPruner' sensitivities: 'features.module.0.weight': 0.25 'features.module.3.weight': 0.35 'features.module.6.weight': 0.40 'features.module.8.weight': 0.45 'features.module.10.weight': 0.55 'classifier.1.weight': 0.875 'classifier.4.weight': 0.875 'classifier.6.weight': 0.625 regularizers: 2d_groups_regularizer: class: GroupLassoRegularizer reg_regims: 'features.module.0.weight': [0.000012, '2D'] 'features.module.3.weight': [0.000012, '2D'] 'features.module.6.weight': [0.000012, '2D'] 'features.module.8.weight': [0.000012, '2D'] 'features.module.10.weight': [0.000012, '2D'] lr_schedulers: # Learning rate decay scheduler pruning_lr: class: ExponentialLR gamma: 0.9 policies: - pruner: instance_name : 'my_pruner' starting_epoch: 0 ending_epoch: 38 frequency: 2 - regularizer: instance_name: '2d_groups_regularizer' starting_epoch: 0 ending_epoch: 38 frequency: 1 - lr_scheduler: instance_name: pruning_lr starting_epoch: 24 ending_epoch: 200 frequency: 1","title":"Mixing it up"},{"location":"schedule.html#quantization-aware-training","text":"Similarly to pruners and regularizers, specifying a quantizer in the scheduler YAML follows the constructor arguments of the Quantizer class (see details here ). Note that only a single quantizer instance may be defined per YAML. Let's see an example: quantizers: dorefa_quantizer: class: DorefaQuantizer bits_activations: 8 bits_weights: 4 overrides: conv1: bits_weights: null bits_activations: null relu1: bits_weights: null bits_activations: null final_relu: bits_weights: null bits_activations: null fc: bits_weights: null bits_activations: null The specific quantization method we're instantiating here is DorefaQuantizer . Then we define the default bit-widths for activations and weights, in this case 8 and 4-bits, respectively. Then, we define the overrides mapping. In the example above, we choose not to quantize the first and last layer of the model. In the case of DorefaQuantizer , the weights are quantized as part of the convolution / FC layers, but the activations are quantized in separate layers, which replace the ReLU layers in the original model (remember - even though we replaced the ReLU modules with our own quantization modules, the name of the modules isn't changed). So, in all, we need to reference the first layer with parameters conv1 , the first activation layer relu1 , the last activation layer final_relu and the last layer with parameters fc . Specifying null means \"do not quantize\". Note that for quantizers, we reference names of modules, not names of parameters as we do for pruners and regularizers.","title":"Quantization-Aware Training"},{"location":"schedule.html#defining-overrides-for-groups-of-layers-using-regular-expressions","text":"Suppose we have a sub-module in our model named block1 , which contains multiple convolution layers which we would like to quantize to, say, 2-bits. The convolution layers are named conv1 , conv2 and so on. In that case we would define the following: overrides: 'block1\\.conv*': bits_weights: 2 bits_activations: null RegEx Note : Remember that the dot ( . ) is a meta-character (i.e. a reserved character) in regular expressions. So, to match the actual dot characters which separate sub-modules in PyTorch module names, we need to escape it: \\. Overlapping patterns are also possible, which allows to define some override for a groups of layers and also \"single-out\" specific layers for different overrides. For example, let's take the last example and configure a different override for block1.conv1 : overrides: 'block1\\.conv1': bits_weights: 4 bits_activations: null 'block1\\.conv*': bits_weights: 2 bits_activations: null Important Note : The patterns are evaluated eagerly - first match wins. So, to properly quantize a model using \"broad\" patterns and more \"specific\" patterns as just shown, make sure the specific pattern is listed before the broad one. The QuantizationPolicy , which controls the quantization procedure during training, is actually quite simplistic. All it does is call the prepare_model() function of the Quantizer when it's initialized, followed by the first call to quantize_params() . Then, at the end of each epoch, after the float copy of the weights has been updated, it calls the quantize_params() function again. policies: - quantizer: instance_name: dorefa_quantizer starting_epoch: 0 ending_epoch: 200 frequency: 1 Important Note : As mentioned here , since the quantizer modifies the model's parameters (assuming training with quantization in the loop is used), the call to prepare_model() must be performed before an optimizer is called. Therefore, currently, the starting epoch for a quantization policy must be 0, otherwise the quantization process will not work as expected. If one wishes to do a \"warm-startup\" (or \"boot-strapping\"), training for a few epochs with full precision and only then starting to quantize, the only way to do this right now is to execute a separate run to generate the boot-strapped weights, and execute a second which will resume the checkpoint with the boot-strapped weights.","title":"Defining overrides for groups of layers using regular expressions"},{"location":"schedule.html#post-training-quantization","text":"Post-training quantization differs from the other techniques described here. Since it is not executed during training, it does not require any Policies nor a Scheduler. Currently, the only method implemented for post-training quantization is range-based linear quantization . Quantizing a model using this method, requires adding 2 lines of code: quantizer = distiller.quantization.PostTrainLinearQuantizer(model, <quantizer arguments>) quantizer.prepare_model() # Execute evaluation on model as usual See the documentation for PostTrainLinearQuantizer in range_linear.py for details on the available arguments. In addition to directly instantiating the quantizer with arguments, it can also be configured from a YAML file. The syntax for the YAML file is exactly the same as seen in the quantization-aware training section above. Not surprisingly, the class defined must be PostTrainLinearQuantizer , and any other components or policies defined in the YAML file are ignored. We'll see how to create the quantizer in this manner below. If more configurability is needed, a helper function can be used that will add a set of command-line arguments to configure the quantizer: parser = argparse.ArgumentParser() distiller.quantization.add_post_train_quant_args(parser) args = parser.parse_args() These are the available command line arguments: Arguments controlling quantization at evaluation time (\"post-training quantization\"): --quantize-eval, --qe Apply linear quantization to model before evaluation. Applicable only if --evaluate is also set --qe-calibration PORTION_OF_TEST_SET Run the model in evaluation mode on the specified portion of the test dataset and collect statistics. Ignores all other 'qe--*' arguments --qe-mode QE_MODE, --qem QE_MODE Linear quantization mode. Choices: sym | asym_s | asym_u --qe-bits-acts NUM_BITS, --qeba NUM_BITS Number of bits for quantization of activations --qe-bits-wts NUM_BITS, --qebw NUM_BITS Number of bits for quantization of weights --qe-bits-accum NUM_BITS Number of bits for quantization of the accumulator --qe-clip-acts, --qeca Enable clipping of activations using min/max values averaging over batch --qe-no-clip-layers LAYER_NAME [LAYER_NAME ...], --qencl LAYER_NAME [LAYER_NAME ...] List of layer names for which not to clip activations. Applicable only if --qe-clip-acts is also set --qe-per-channel, --qepc Enable per-channel quantization of weights (per output channel) --qe-stats-file PATH Path to YAML file with calibration stats. If not given, dynamic quantization will be run (Note that not all layer types are supported for dynamic quantization) --qe-config-file PATH Path to YAML file containing configuration for PostTrainLinearQuantizer (if present, all other --qe* arguments are ignored) (Note that --quantize-eval and --qe-calibration are mutually exclusive.) When using these command line arguments, the quantizer can be invoked as follows: if args.quantize_eval: if args.qe_config_file: quantizer = distiller.config_component_from_file_by_class(model, args.qe_config_file, 'PostTrainLinearQuantizer') else: quantizer = quantization.PostTrainLinearQuantizer(model, args.qe_bits_acts, args.qe_bits_wts, args.qe_bits_accum, None, args.qe_mode, args.qe_clip_acts, args.qe_no_clip_layers, args.qe_per_channel, args.qe_stats_file) quantizer.prepare_model() # Execute evaluation on model as usual Note that the command-line arguments don't expose the overrides parameter of the quantizer, which allows fine-grained control over how each layer is quantized. To utilize this functionality, configure with a YAML file. To see integration of these command line arguments in use, see the image classification example . For examples invocations of post-training quantization see here .","title":"Post-Training Quantization"},{"location":"schedule.html#collecting-statistics-for-quantization","text":"To collect generate statistics that can be used for static quantization of activations, do the following (shown here assuming the command line argument --qe-calibration shown above is used, which specifies the number of batches to use for statistics generation): if args.qe_calibration: distiller.utils.assign_layer_fq_names(model) msglogger.info(\"Generating quantization calibration stats based on {0} users\".format(args.qe_calibration)) collector = distiller.data_loggers.QuantCalibrationStatsCollector(model) with collector_context(collector): # Here call your model evaluation function, making sure to execute only # the portion of the dataset specified by the qe_calibration argument yaml_path = 'some/dir/quantization_stats.yaml' collector.save(yaml_path) The genreated YAML stats file can then be provided using the `--qe-stats-file argument. An example of a generated stats file can be found here .","title":"Collecting Statistics for Quantization"},{"location":"schedule.html#pruning-fine-control","text":"Sometimes the default pruning process doesn't satisfy our needs and we require finer control over the pruning process (e.g. over masking, gradient handling, and weight updates). Below we will explain the math and nuances of fine-control configuration.","title":"Pruning Fine-Control"},{"location":"schedule.html#setting-up-the-problem","text":"We represent the weights of a DNN as the set \\theta=\\left\\{\\theta_{l} : 0 \\leq l \\leq : L\\right\\} where \\theta_{l} represents the parameters tensor (weights and biases) of layer l in a network having L layers. Usually we do not prune biases because of their small size and relative importance. Therefore, we will consider only the network weights (also known as network connections): W=\\left\\{W_{l} : 0 \\leq l \\leq : L\\right\\} We wish to optimize some objective (e.g. minimize the energy required to execute a network in inference mode) under some performance constraint (e.g. accuracy), and we do this by maximizing the sparsity of the network weights (sometimes under some chosen sparsity-pattern constraint). We formalize pruning as a 3-step action: Generating a mask - in which we define a sparsity-inducing function per layer, P_l , such that M_{l}=P_{l}\\left(W_{l}\\right) M_{l} is a binary matrix which is used to mask W_{l} . P_l is implemented by subclasses of distiller.pruner . Masking the weights using the Hadamard product: \\widehat{W}_{l}=M_{l} \\circ W_{l} Updating the weights (performed by the optimizer). By default, we compute the data-loss using the masked weights, and calculate the gradient of this loss with respect to the masked-weights. We update the weights by making a small adjustment to the masked weights : W_{l} \\leftarrow \\widehat{W}_{l}-\\alpha \\frac{\\partial Loss(\\widehat{W}_{l})}{\\partial \\widehat{W}_{l}} We show below how to change this default behavior. We also provide a more exact description of the weights update when using PyTorch's SGD optimizer. The pruning regimen follows a pruning-rate schedule which, analogously to learning-rate annealing, changes the pruning rate according to a configurable strategy over time. The schedule allows us to configure new masks either once at the beginning of epochs (most common), or at the beginning of mini-batches (for finer control). In the former, the masks are calculated and assigned to \\{M_{l}\\} once, at the beginning of epochs (the specific epochs are determined by the schedule). The pseudo-code below shows the typical training-loop with CompressionScheduler callbacks in bold font, and the three pruning actions described above in burgendy. Figure 1: Pruning algorithm pseudo-code We can perform masking by adding the masking operation to the network graph. We call this in-graph masking , as depicted in the bottom of Figure 2. In the forward-pass we apply element-wise multiplication of the weights W_{l} and the mask M_{l} to obtain the masked weights widehat{W}_{l} , which we apply to the Convolution operation. In the backward-pass we mask \\frac{\\partial L}{\\partial \\widehat{W}} to obtain \\frac{\\partial L}{\\partial W} with which we update W_{l} . Figure 2: Forward and backward weight masking In Distiller we perform out-of-graph masking in which we directly set the value of \\widehat{W}_{l} by applying a mask on W_{l} In the backward-pass we make sure that the weights are updated by the proper gradients. In the common pruning use-case we want the optimizer to update only the unmasked weights, but we can configure this behavior using the fine-control arguments, as explained below.","title":"Setting up the problem"},{"location":"schedule.html#fine-control","text":"For finer control over the behavior of the pruning process, Distiller provides a set of PruningPolicy arguments in the args field, as in the sample below. pruners: random_filter_pruner: class: BernoulliFilterPruner desired_sparsity: 0.1 group_type: Filters weights: [module.conv1.weight] policies: - pruner: instance_name: random_filter_pruner args: mini_batch_pruning_frequency: 16 discard_masks_at_minibatch_end: True use_double_copies: True mask_on_forward_only: True mask_gradients: True starting_epoch: 15 ending_epoch: 180 frequency: 1","title":"Fine-Control"},{"location":"schedule.html#controls","text":"mini_batch_pruning_frequency (default: 0): controls pruning scheduling at the mini-batch granularity. Every mini_batch_pruning_frequency training steps (i.e. mini_batches) we configure a new mask. In between mask updates, we mask mini-batches with the current mask. discard_masks_at_minibatch_end (default: False): discards the pruning mask at the end of the mini-batch. In the example YAML above, a new mask is computed once every 16 mini-batches, applied in one forward-pass, and then discraded. In the next 15 mini-batches the mask is Null so we do not mask. mask_gradients (default: False): mask the weights gradients after performing the backward-pass, and before invoking the optimizer. One way to mask the gradients in PyTorch is to register to the backward callback of the weight tensors we want to mask, and alter the gradients there. We do this by setting mask_gradients: True , as in the sample YAML above. This is sufficient if our weights optimization uses plain-vanilla SGD, because the update maintains the sparsity of the weights: \\widehat{W}_{l} is sparse by definition, and the gradients are sparse because we mask them. W_{l} \\leftarrow \\widehat{W}_{l}-\\alpha \\frac{\\partial Loss(\\widehat{W}_{l})}{\\partial \\widehat{W}_{l}} But this is not always the case. For example, PyTorch\u2019s SGD optimizer with weight-decay ( \\lambda ) and momentum ( \\alpha ) has the optimization logic listed below: 1. \\Delta p=\\frac{\\partial Loss\\left(\\widehat{W}_{l}^{i}\\right)}{\\partial \\widehat{W}_{l}^{i}}+\\lambda \\widehat{W}_{l}^{i} 2. v_{i}=\\left\\lbrace \\matrix{ {\\Delta p: \\; if \\;i==0 }\\; \\cr {v_{i-1} \\rho+ (1-dampening)\\Delta p: \\; if \\; i>0} } \\right\\rbrace 3. W_{l}^{i+1} = \\widehat{W}_{l}^{i}-\\alpha v_{i} Let\u2019s look at the weight optimization update at some arbitrary step (i.e. mini-batch) k . We want to show that masking the weights and gradients ( W_{l}^{i=k} and \\frac{\\partial Loss\\left(\\widehat{W}_{l}^{i=k}\\right)}{\\partial \\widehat{W}_{l}^{i=k}} ) is not sufficient to guarantee that W_{l}^{i=k+1} is sparse. This is easy do: if we allow for the general case where v_i is not necessarily sparse, then W_{l}^{i+1} is not necessarily sparse. Masking the weights in the forward-pass, and gradients in the backward-pass, is not sufficient to maintain the sparsity of the weights! This is an important insight, and it means that na\u00efve in-graph masking is also not sufficient to guarantee sparsity of the updated weights. use_double_copies (default: False): If you want to compute the gradients using the masked weights and also to update the unmasked weights (instead of updating the masked weights, per usual), set use_double_copies = True . This changes step (3) to: 3. W_{l}^{i+1} = W_{1}^{i}-\\alpha \\Delta p mask_on_forward_only (default: False): when set to False the weights will also be masked after the Optimizer is done updating the weights, to remove any updates of the masked gradients. If we want to guarantee the sparsity of the updated weights, we must explicitly mask the weights after step (3) above: 4. {W}_{l}^{i+1} \\leftarrow M_{l}^{i} \\circ {W}_{l}^{i+1} This argument defaults to False , but you can skip step (4), by setting mask_on_forward_only = True . Finally, note that mask_gradients and not mask_on_forward_only are mutually exclusive, or simply put: if you are masking in the backward-pass, you should choose to either do it via mask_gradients or mask_on_forward_only=False , but not both.","title":"Controls"},{"location":"schedule.html#knowledge-distillation","text":"Knowledge distillation (see here ) is also implemented as a Policy , which should be added to the scheduler. However, with the current implementation, it cannot be defined within the YAML file like the rest of the policies described above. To make the integration of this method into applications a bit easier, a helper function can be used that will add a set of command-line arguments related to knowledge distillation: import argparse import distiller parser = argparse.ArgumentParser() distiller.knowledge_distillation.add_distillation_args(parser) (The add_distillation_args function accepts some optional arguments, see its implementation at distiller/knowledge_distillation.py for details) These are the command line arguments exposed by this function: Knowledge Distillation Training Arguments: --kd-teacher ARCH Model architecture for teacher model --kd-pretrained Use pre-trained model for teacher --kd-resume PATH Path to checkpoint from which to load teacher weights --kd-temperature TEMP, --kd-temp TEMP Knowledge distillation softmax temperature --kd-distill-wt WEIGHT, --kd-dw WEIGHT Weight for distillation loss (student vs. teacher soft targets) --kd-student-wt WEIGHT, --kd-sw WEIGHT Weight for student vs. labels loss --kd-teacher-wt WEIGHT, --kd-tw WEIGHT Weight for teacher vs. labels loss --kd-start-epoch EPOCH_NUM Epoch from which to enable distillation Once arguments have been parsed, some initialization code is required, similar to the following: # Assuming: # \"args\" variable holds command line arguments # \"model\" variable holds the model we're going to train, that is - the student model # \"compression_scheduler\" variable holds a CompressionScheduler instance args.kd_policy = None if args.kd_teacher: # Create teacher model - replace this with your model creation code teacher = create_model(args.kd_pretrained, args.dataset, args.kd_teacher, device_ids=args.gpus) if args.kd_resume: teacher, _, _ = apputils.load_checkpoint(teacher, chkpt_file=args.kd_resume) # Create policy and add to scheduler dlw = distiller.DistillationLossWeights(args.kd_distill_wt, args.kd_student_wt, args.kd_teacher_wt) args.kd_policy = distiller.KnowledgeDistillationPolicy(model, teacher, args.kd_temp, dlw) compression_scheduler.add_policy(args.kd_policy, starting_epoch=args.kd_start_epoch, ending_epoch=args.epochs, frequency=1) Finally, during the training loop, we need to perform forward propagation through the teacher model as well. The KnowledgeDistillationPolicy class keeps a reference to both the student and teacher models, and exposes a forward function that performs forward propagation on both of them. Since this is not one of the standard policy callbacks, we need to call this function manually from our training loop, as follows: if args.kd_policy is None: # Revert to a \"normal\" forward-prop call if no knowledge distillation policy is present output = model(input_var) else: output = args.kd_policy.forward(input_var) To see this integration in action, take a look at the image classification sample at examples/classifier_compression/compress_classifier.py .","title":"Knowledge Distillation"},{"location":"tutorial-lang_model.html","text":"Using Distiller to prune a PyTorch language model Contents Introduction Setup Preparing the code Training-loop Creating compression baselines Compressing the language model What are we compressing? How are we compressing? When are we compressing? Until next time Introduction In this tutorial I'll show you how to compress a word-level language model using Distiller . Specifically, we use PyTorch\u2019s word-level language model sample code as the code-base of our example, weave in some Distiller code, and show how we compress the model using two different element-wise pruning algorithms. To make things manageable, I've divided the tutorial to two parts: in the first we will setup the sample application and prune using AGP . In the second part I'll show how I've added Baidu's RNN pruning algorithm and then use it to prune the same word-level language model. The completed code is available here . The results are displayed below and the code is available here . Note that we can improve the results by training longer, since the loss curves are usually still decreasing at the end of epoch 40. However, for demonstration purposes we don\u2019t need to do this. Type Sparsity NNZ Validation Test Command line Small 0% 7,135,600 101.13 96.29 time python3 main.py --cuda --epochs 40 --tied --wd=1e-6 Medium 0% 28,390,700 88.17 84.21 time python3 main.py --cuda --emsize 650 --nhid 650 --dropout 0.5 --epochs 40 --tied,--wd=1e-6 Large 0% 85,917,000 87.49 83.85 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --wd=1e-6 Large 70% 25,487,550 90.67 85.96 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_70.schedule_agp.yaml Large 70% 25,487,550 90.59 85.84 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_70.schedule_agp.yaml --wd=1e-6 Large 70% 25,487,550 87.40 82.93 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_70B.schedule_agp.yaml --wd=1e-6 Large 80.4% 16,847,550 89.31 83.64 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_80.schedule_agp.yaml --wd=1e-6 Large 90% 8,591,700 90.70 85.67 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_90.schedule_agp.yaml --wd=1e-6 Large 95% 4,295,850 98.42 92.79 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_95.schedule_agp.yaml --wd=1e-6 Table 1: AGP language model pruning results. NNZ stands for number of non-zero coefficients (embeddings are counted once, because they are tied). Figure 1: Perplexity vs model size (lower perplexity is better). The model is composed of an Encoder embedding, two LSTMs, and a Decoder embedding. The Encoder and decoder embeddings (projections) are tied to improve perplexity results (per https://arxiv.org/pdf/1611.01462.pdf), so in the sparsity statistics we account for only one of the encoder/decoder embeddings. We used the WikiText2 dataset (twice as large as PTB). We compared three model sizes: small (7.1M; 14M), medium (28M; 50M), large: (86M; 136M) \u2013 reported as (#parameters net/tied; #parameters gross). The results reported below use a preset seed (for reproducibility), and we expect results can be improved if we allow \u201ctrue\u201d pseudo-randomness. We limited our tests to 40 epochs, even though validation perplexity was still trending down. Essentially, this recreates the language model experiment in the AGP paper, and validates its conclusions: * \u201cWe see that sparse models are able to outperform dense models which have significantly more parameters.\u201d * The 80% sparse large model (which has 16.9M parameters and a perplexity of 83.64) is able to outperform the dense medium (which has 28.4M parameters and a perplexity of 84.21), a model which has 1.7 times more parameters. It also outperform the dense large model, which exemplifies how pruning can act as a regularizer. * \u201cOur results show that pruning works very well not only on the dense LSTM weights and dense softmax layer but also the dense embedding matrix. This suggests that during the optimization procedure the neural network can find a good sparse embedding for the words in the vocabulary that works well together with the sparse connectivity structure of the LSTM weights and softmax layer.\u201d Setup We start by cloning Pytorch\u2019s example repository . I\u2019ve copied the language model code to distiller\u2019s examples/word_language_model directory, so I\u2019ll use that for the rest of the tutorial. Next, let\u2019s create and activate a virtual environment, as explained in Distiller's README file. Now we can turn our attention to main.py , which contains the training application. Preparing the code We begin by adding code to invoke Distiller in file main.py . This involves a bit of mechanics, because we did not pip install Distiller in our environment (we don\u2019t have a setup.py script for Distiller as of yet). To make Distiller library functions accessible from main.py , we modify sys.path to include the distiller root directory by taking the current directory and pointing two directories up. This is very specific to the location of this example code, and it will break if you\u2019ve placed the code elsewhere \u2013 so be aware. import os import sys script_dir = os.path.dirname(__file__) module_path = os.path.abspath(os.path.join(script_dir, '..', '..')) if module_path not in sys.path: sys.path.append(module_path) import distiller import apputils from distiller.data_loggers import TensorBoardLogger, PythonLogger Next, we augment the application arguments with two Distiller-specific arguments. The first, --summary , gives us the ability to do simple compression instrumentation (e.g. log sparsity statistics). The second argument, --compress , is how we tell the application where the compression scheduling file is located. We also add two arguments - momentum and weight-decay - for the SGD optimizer. As I explain later, I replaced the original code's optimizer with SGD, so we need these extra arguments. # Distiller-related arguments SUMMARY_CHOICES = ['sparsity', 'model', 'modules', 'png', 'percentile'] parser.add_argument('--summary', type=str, choices=SUMMARY_CHOICES, help='print a summary of the model, and exit - options: ' + ' | '.join(SUMMARY_CHOICES)) parser.add_argument('--compress', dest='compress', type=str, nargs='?', action='store', help='configuration file for pruning the model (default is to use hard-coded schedule)') parser.add_argument('--momentum', default=0., type=float, metavar='M', help='momentum') parser.add_argument('--weight-decay', '--wd', default=0., type=float, metavar='W', help='weight decay (default: 1e-4)') We add code to handle the --summary application argument. It can be as simple as forwarding to distiller.model_summary or more complex, as in the Distiller sample. if args.summary: distiller.model_summary(model, None, args.summary, 'wikitext2') exit(0) Similarly, we add code to handle the --compress argument, which creates a CompressionScheduler and configures it from a YAML schedule file: if args.compress: source = args.compress compression_scheduler = distiller.CompressionScheduler(model) distiller.config.fileConfig(model, None, compression_scheduler, args.compress, msglogger) We also create the optimizer, and the learning-rate decay policy scheduler. The original PyTorch example manually manages the optimization and LR decay process, but I think that having a standard optimizer and LR-decay schedule gives us the flexibility to experiment with these during the training process. Using an SGD optimizer configured with momentum=0 and weight_decay=0 , and a ReduceLROnPlateau LR-decay policy with patience=0 and factor=0.5 will give the same behavior as in the original PyTorch example. From there, we can experiment with the optimizer and LR-decay configuration. optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay) lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', patience=0, verbose=True, factor=0.5) Next, we add code to setup the logging backends: a Python logger backend which reads its configuration from file and logs messages to the console and log file ( pylogger ); and a TensorBoard backend logger which logs statistics to a TensorBoard data file ( tflogger ). I configured the TensorBoard backend to log gradients because RNNs suffer from vanishing and exploding gradients, so we might want to take a look in case the training experiences a sudden failure. This code is not strictly required, but it is quite useful to be able to log the session progress, and to export logs to TensorBoard for realtime visualization of the training progress. # Distiller loggers msglogger = apputils.config_pylogger('logging.conf', None) tflogger = TensorBoardLogger(msglogger.logdir) tflogger.log_gradients = True pylogger = PythonLogger(msglogger) Training loop Now we scroll down all the way to the train() function. We'll change its signature to include the epoch , optimizer , and compression_schdule . We'll soon see why we need these. def train(epoch, optimizer, compression_scheduler=None) Function train() is responsible for training the network in batches for one epoch, and in its epoch loop we want to perform compression. The CompressionScheduler invokes ScheduledTrainingPolicy instances per the scheduling specification that was programmed in the CompressionScheduler instance. There are four main SchedulingPolicy types: PruningPolicy , RegularizationPolicy , LRPolicy , and QuantizationPolicy . We'll be using PruningPolicy , which is triggered on_epoch_begin (to invoke the Pruners , and on_minibatch_begin (to mask the weights). Later we will create a YAML scheduling file, and specify the schedule of AutomatedGradualPruner instances. Because we are writing a single application, which can be used with various Policies in the future (e.g. group-lasso regularization), we should add code to invoke all of the CompressionScheduler 's callbacks, not just the mandatory on_epoch_begin callback. We invoke on_minibatch_begin before running the forward-pass, before_backward_pass after computing the loss, and on_minibatch_end after completing the backward-pass. def train(epoch, optimizer, compression_scheduler=None): ... # The line below was fixed as per: https://github.com/pytorch/examples/issues/214 for batch, i in enumerate(range(0, train_data.size(0), args.bptt)): data, targets = get_batch(train_data, i) # Starting each batch, we detach the hidden state from how it was previously produced. # If we didn't, the model would try backpropagating all the way to start of the dataset. hidden = repackage_hidden(hidden) if compression_scheduler: compression_scheduler.on_minibatch_begin(epoch, minibatch_id=batch, minibatches_per_epoch=steps_per_epoch) output, hidden = model(data, hidden) loss = criterion(output.view(-1, ntokens), targets) if compression_scheduler: compression_scheduler.before_backward_pass(epoch, minibatch_id=batch, minibatches_per_epoch=steps_per_epoch, loss=loss) optimizer.zero_grad() loss.backward() # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs. torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip) optimizer.step() total_loss += loss.item() if compression_scheduler: compression_scheduler.on_minibatch_end(epoch, minibatch_id=batch, minibatches_per_epoch=steps_per_epoch) The rest of the code could stay as in the original PyTorch sample, but I wanted to use an SGD optimizer, so I replaced: for p in model.parameters(): p.data.add_(-lr, p.grad.data) with: optimizer.step() The rest of the code in function train() logs to a text file and a TensorBoard backend. Again, such code is not mandatory, but a few lines give us a lot of visibility: we have training progress information saved to log, and we can monitor the training progress in realtime on TensorBoard. That's a lot for a few lines of code ;-) if batch % args.log_interval == 0 and batch > 0: cur_loss = total_loss / args.log_interval elapsed = time.time() - start_time lr = optimizer.param_groups[0]['lr'] msglogger.info( '| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.4f} | ms/batch {:5.2f} ' '| loss {:5.2f} | ppl {:8.2f}'.format( epoch, batch, len(train_data) // args.bptt, lr, elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss))) total_loss = 0 start_time = time.time() stats = ('Peformance/Training/', OrderedDict([ ('Loss', cur_loss), ('Perplexity', math.exp(cur_loss)), ('LR', lr), ('Batch Time', elapsed * 1000)]) ) steps_completed = batch + 1 distiller.log_training_progress(stats, model.named_parameters(), epoch, steps_completed, steps_per_epoch, args.log_interval, [tflogger]) Finally we get to the outer training-loop which loops on args.epochs . We add the two final CompressionScheduler callbacks: on_epoch_begin , at the start of the loop, and on_epoch_end after running evaluate on the model and updating the learning-rate. try: for epoch in range(0, args.epochs): epoch_start_time = time.time() if compression_scheduler: compression_scheduler.on_epoch_begin(epoch) train(epoch, optimizer, compression_scheduler) val_loss = evaluate(val_data) lr_scheduler.step(val_loss) if compression_scheduler: compression_scheduler.on_epoch_end(epoch) And that's it! The language model sample is ready for compression. Creating compression baselines In To prune, or not to prune: exploring the efficacy of pruning for model compression Zhu and Gupta, \"compare the accuracy of large, but pruned models (large-sparse) and their smaller, but dense (small-dense) counterparts with identical memory footprint.\" They also \"propose a new gradual pruning technique that is simple and straightforward to apply across a variety of models/datasets with minimal tuning.\" This pruning schedule is implemented by distiller.AutomatedGradualPruner (AGP), which increases the sparsity level (expressed as a percentage of zero-valued elements) gradually over several pruning steps. Distiller's implementation only prunes elements once in an epoch (the model is fine-tuned in between pruning events), which is a small deviation from Zhu and Gupta's paper. The research paper specifies the schedule in terms of mini-batches, while our implementation specifies the schedule in terms of epochs. We feel that using epochs performs well, and is more \"stable\", since the number of mini-batches will change, if you change the batch size. Before we start compressing stuff ;-), we need to create baselines so we have something to benchmark against. Let's prepare small, medium, and large baseline models, like Table 3 of To prune, or Not to Prune . These will provide baseline perplexity results that we'll compare the compressed models against. I chose to use tied input/output embeddings, and constrained the training to 40 epochs. The table below shows the model sizes, where we are interested in the tied version (biases are ignored due to their small size and because we don't prune them). Size Number of Weights (untied) Number of Weights (tied) Small 13,951,200 7,295,600 Medium 50,021,400 28,390,700 Large 135,834,000 85,917,000 I started experimenting with the optimizer setup like in the PyTorch example, but I added some L2 regularization when I noticed that the training was overfitting. The two right columns show the perplexity results (lower is better) of each of the models with no L2 regularization and with 1e-5 and 1e-6. In all three model sizes using the smaller L2 regularization (1e-6) gave the best results. BTW, I'm not showing here experiments with even lower regularization because that did not help. Type Command line Validation Test Small time python3 main.py --cuda --epochs 40 --tied 105.23 99.53 Small time python3 main.py --cuda --epochs 40 --tied --wd=1e-6 101.13 96.29 Small time python3 main.py --cuda --epochs 40 --tied --wd=1e-5 109.49 103.53 Medium time python3 main.py --cuda --emsize 650 --nhid 650 --dropout 0.5 --epochs 40 --tied 90.93 86.20 Medium time python3 main.py --cuda --emsize 650 --nhid 650 --dropout 0.5 --epochs 40 --tied --wd=1e-6 88.17 84.21 Medium time python3 main.py --cuda --emsize 650 --nhid 650 --dropout 0.5 --epochs 40 --tied --wd=1e-5 97.75 93.06 Large time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied 88.23 84.21 Large time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --wd=1e-6 87.49 83.85 Large time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --wd=1e-5 99.22 94.28 Compressing the language model OK, so now let's recreate the results of the language model experiment from section 4.2 of paper. We're using PyTorch's sample, so the language model we implement is not exactly like the one in the AGP paper (and uses a different dataset), but it's close enough, so if everything goes well, we should see similar compression results. What are we compressing? To gain insight about the model parameters, we can use the command-line to produce a weights-sparsity table: $ python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --summary=sparsity Parameters: +---------+------------------+---------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ | | Name | Shape | NNZ (dense) | NNZ (sparse) | Cols (%) | Rows (%) | Ch (%) | 2D (%) | 3D (%) | Fine (%) | Std | Mean | Abs-Mean | |---------+------------------+---------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------| | 0.00000 | encoder.weight | (33278, 1500) | 49917000 | 49916999 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00000 | 0.05773 | -0.00000 | 0.05000 | | 1.00000 | rnn.weight_ih_l0 | (6000, 1500) | 9000000 | 9000000 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00000 | 0.01491 | 0.00001 | 0.01291 | | 2.00000 | rnn.weight_hh_l0 | (6000, 1500) | 9000000 | 8999999 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00001 | 0.01491 | 0.00000 | 0.01291 | | 3.00000 | rnn.weight_ih_l1 | (6000, 1500) | 9000000 | 8999999 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00001 | 0.01490 | -0.00000 | 0.01291 | | 4.00000 | rnn.weight_hh_l1 | (6000, 1500) | 9000000 | 9000000 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00000 | 0.01491 | -0.00000 | 0.01291 | | 5.00000 | decoder.weight | (33278, 1500) | 49917000 | 49916999 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00000 | 0.05773 | -0.00000 | 0.05000 | | 6.00000 | Total sparsity: | - | 135834000 | 135833996 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | +---------+------------------+---------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ Total sparsity: 0.00 So what's going on here? encoder.weight and decoder.weight are the input and output embeddings, respectively. Remember that in the configuration I chose for the three model sizes these embeddings are tied, which means that we only have one copy of parameters, that is shared between the encoder and decoder. We also have two pairs of RNN (LSTM really) parameters. There is a pair because the model uses the command-line argument args.nlayers to decide how many instances of RNN (or LSTM or GRU) cells to use, and it defaults to 2. The recurrent cells are LSTM cells, because this is the default of args.model , which is used in the initialization of RNNModel . Let's look at the parameters of the first RNN: rnn.weight_ih_l0 and rnn.weight_hh_l0 : what are these? Recall the LSTM equations that PyTorch implements. In the equations, there are 8 instances of vector-matrix multiplication (when batch=1). These can be combined into a single matrix-matrix multiplication (GEMM), but PyTorch groups these into two GEMM operations: one GEMM multiplies the inputs ( rnn.weight_ih_l0 ), and the other multiplies the hidden-state ( rnn.weight_hh_l0 ). How are we compressing? Let's turn to the configurations of the Large language model compression schedule to 70%, 80%, 90% and 95% sparsity. Using AGP it is easy to configure the pruning schedule to produce an exact sparsity of the compressed model. I'll use the 70% schedule to show a concrete example. The YAML file has two sections: pruners and policies . Section pruners defines instances of ParameterPruner - in our case we define three instances of AutomatedGradualPruner : for the weights of the first RNN ( l0_rnn_pruner ), the second RNN ( l1_rnn_pruner ) and the embedding layer ( embedding_pruner ). These names are arbitrary, and serve are name-handles which bind Policies to Pruners - so you can use whatever names you want. Each AutomatedGradualPruner is configured with an initial_sparsity and final_sparsity . For examples, the l0_rnn_pruner below is configured to prune 5% of the weights as soon as it starts working, and finish when 70% of the weights have been pruned. The weights parameter tells the Pruner which weight tensors to prune. pruners: l0_rnn_pruner: class: AutomatedGradualPruner initial_sparsity : 0.05 final_sparsity: 0.70 weights: [rnn.weight_ih_l0, rnn.weight_hh_l0] l1_rnn_pruner: class: AutomatedGradualPruner initial_sparsity : 0.05 final_sparsity: 0.70 weights: [rnn.weight_ih_l1, rnn.weight_hh_l1] embedding_pruner: class: AutomatedGradualPruner initial_sparsity : 0.05 final_sparsity: 0.70 weights: [encoder.weight] When are we compressing? If the pruners section defines \"what-to-do\", the policies section defines \"when-to-do\". This part is harder, because we define the pruning schedule, which requires us to try a few different schedules until we understand which schedule works best. Below we define three PruningPolicy instances. The first two instances start operating at epoch 2 ( starting_epoch ), end at epoch 20 ( ending_epoch ), and operate once every epoch ( frequency ; as I explained above, Distiller's Pruning scheduling operates only at on_epoch_begin ). In between pruning operations, the pruned model is fine-tuned. policies: - pruner: instance_name : l0_rnn_pruner starting_epoch: 2 ending_epoch: 20 frequency: 1 - pruner: instance_name : l1_rnn_pruner starting_epoch: 2 ending_epoch: 20 frequency: 1 - pruner: instance_name : embedding_pruner starting_epoch: 3 ending_epoch: 21 frequency: 1 We invoke the compression as follows: $ time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_70.schedule_agp.yaml Table 1 above shows that we can make a negligible improvement when adding L2 regularization. I did some experimenting with the sparsity distribution between the layers, and the scheduling frequency and noticed that the embedding layers are much less sensitive to pruning than the RNN cells. I didn't notice any difference between the RNN cells, but I also didn't invest in this exploration. A new 70% sparsity schedule , prunes the RNNs only to 50% sparsity, but prunes the embedding to 85% sparsity, and achieves almost a 3 points improvement in the test perplexity results. We provide similar pruning schedules for the other compression rates. Until next time This concludes the first part of the tutorial on pruning a PyTorch language model. In the next installment, I'll explain how we added an implementation of Baidu Research's Exploring Sparsity in Recurrent Neural Networks paper, and applied to this language model. Geek On.","title":"Pruning a Language Model"},{"location":"tutorial-lang_model.html#using-distiller-to-prune-a-pytorch-language-model","text":"","title":"Using Distiller to prune a PyTorch language model"},{"location":"tutorial-lang_model.html#contents","text":"Introduction Setup Preparing the code Training-loop Creating compression baselines Compressing the language model What are we compressing? How are we compressing? When are we compressing? Until next time","title":"Contents"},{"location":"tutorial-lang_model.html#introduction","text":"In this tutorial I'll show you how to compress a word-level language model using Distiller . Specifically, we use PyTorch\u2019s word-level language model sample code as the code-base of our example, weave in some Distiller code, and show how we compress the model using two different element-wise pruning algorithms. To make things manageable, I've divided the tutorial to two parts: in the first we will setup the sample application and prune using AGP . In the second part I'll show how I've added Baidu's RNN pruning algorithm and then use it to prune the same word-level language model. The completed code is available here . The results are displayed below and the code is available here . Note that we can improve the results by training longer, since the loss curves are usually still decreasing at the end of epoch 40. However, for demonstration purposes we don\u2019t need to do this. Type Sparsity NNZ Validation Test Command line Small 0% 7,135,600 101.13 96.29 time python3 main.py --cuda --epochs 40 --tied --wd=1e-6 Medium 0% 28,390,700 88.17 84.21 time python3 main.py --cuda --emsize 650 --nhid 650 --dropout 0.5 --epochs 40 --tied,--wd=1e-6 Large 0% 85,917,000 87.49 83.85 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --wd=1e-6 Large 70% 25,487,550 90.67 85.96 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_70.schedule_agp.yaml Large 70% 25,487,550 90.59 85.84 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_70.schedule_agp.yaml --wd=1e-6 Large 70% 25,487,550 87.40 82.93 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_70B.schedule_agp.yaml --wd=1e-6 Large 80.4% 16,847,550 89.31 83.64 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_80.schedule_agp.yaml --wd=1e-6 Large 90% 8,591,700 90.70 85.67 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_90.schedule_agp.yaml --wd=1e-6 Large 95% 4,295,850 98.42 92.79 time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_95.schedule_agp.yaml --wd=1e-6 Table 1: AGP language model pruning results. NNZ stands for number of non-zero coefficients (embeddings are counted once, because they are tied). Figure 1: Perplexity vs model size (lower perplexity is better). The model is composed of an Encoder embedding, two LSTMs, and a Decoder embedding. The Encoder and decoder embeddings (projections) are tied to improve perplexity results (per https://arxiv.org/pdf/1611.01462.pdf), so in the sparsity statistics we account for only one of the encoder/decoder embeddings. We used the WikiText2 dataset (twice as large as PTB). We compared three model sizes: small (7.1M; 14M), medium (28M; 50M), large: (86M; 136M) \u2013 reported as (#parameters net/tied; #parameters gross). The results reported below use a preset seed (for reproducibility), and we expect results can be improved if we allow \u201ctrue\u201d pseudo-randomness. We limited our tests to 40 epochs, even though validation perplexity was still trending down. Essentially, this recreates the language model experiment in the AGP paper, and validates its conclusions: * \u201cWe see that sparse models are able to outperform dense models which have significantly more parameters.\u201d * The 80% sparse large model (which has 16.9M parameters and a perplexity of 83.64) is able to outperform the dense medium (which has 28.4M parameters and a perplexity of 84.21), a model which has 1.7 times more parameters. It also outperform the dense large model, which exemplifies how pruning can act as a regularizer. * \u201cOur results show that pruning works very well not only on the dense LSTM weights and dense softmax layer but also the dense embedding matrix. This suggests that during the optimization procedure the neural network can find a good sparse embedding for the words in the vocabulary that works well together with the sparse connectivity structure of the LSTM weights and softmax layer.\u201d","title":"Introduction"},{"location":"tutorial-lang_model.html#setup","text":"We start by cloning Pytorch\u2019s example repository . I\u2019ve copied the language model code to distiller\u2019s examples/word_language_model directory, so I\u2019ll use that for the rest of the tutorial. Next, let\u2019s create and activate a virtual environment, as explained in Distiller's README file. Now we can turn our attention to main.py , which contains the training application.","title":"Setup"},{"location":"tutorial-lang_model.html#preparing-the-code","text":"We begin by adding code to invoke Distiller in file main.py . This involves a bit of mechanics, because we did not pip install Distiller in our environment (we don\u2019t have a setup.py script for Distiller as of yet). To make Distiller library functions accessible from main.py , we modify sys.path to include the distiller root directory by taking the current directory and pointing two directories up. This is very specific to the location of this example code, and it will break if you\u2019ve placed the code elsewhere \u2013 so be aware. import os import sys script_dir = os.path.dirname(__file__) module_path = os.path.abspath(os.path.join(script_dir, '..', '..')) if module_path not in sys.path: sys.path.append(module_path) import distiller import apputils from distiller.data_loggers import TensorBoardLogger, PythonLogger Next, we augment the application arguments with two Distiller-specific arguments. The first, --summary , gives us the ability to do simple compression instrumentation (e.g. log sparsity statistics). The second argument, --compress , is how we tell the application where the compression scheduling file is located. We also add two arguments - momentum and weight-decay - for the SGD optimizer. As I explain later, I replaced the original code's optimizer with SGD, so we need these extra arguments. # Distiller-related arguments SUMMARY_CHOICES = ['sparsity', 'model', 'modules', 'png', 'percentile'] parser.add_argument('--summary', type=str, choices=SUMMARY_CHOICES, help='print a summary of the model, and exit - options: ' + ' | '.join(SUMMARY_CHOICES)) parser.add_argument('--compress', dest='compress', type=str, nargs='?', action='store', help='configuration file for pruning the model (default is to use hard-coded schedule)') parser.add_argument('--momentum', default=0., type=float, metavar='M', help='momentum') parser.add_argument('--weight-decay', '--wd', default=0., type=float, metavar='W', help='weight decay (default: 1e-4)') We add code to handle the --summary application argument. It can be as simple as forwarding to distiller.model_summary or more complex, as in the Distiller sample. if args.summary: distiller.model_summary(model, None, args.summary, 'wikitext2') exit(0) Similarly, we add code to handle the --compress argument, which creates a CompressionScheduler and configures it from a YAML schedule file: if args.compress: source = args.compress compression_scheduler = distiller.CompressionScheduler(model) distiller.config.fileConfig(model, None, compression_scheduler, args.compress, msglogger) We also create the optimizer, and the learning-rate decay policy scheduler. The original PyTorch example manually manages the optimization and LR decay process, but I think that having a standard optimizer and LR-decay schedule gives us the flexibility to experiment with these during the training process. Using an SGD optimizer configured with momentum=0 and weight_decay=0 , and a ReduceLROnPlateau LR-decay policy with patience=0 and factor=0.5 will give the same behavior as in the original PyTorch example. From there, we can experiment with the optimizer and LR-decay configuration. optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay) lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', patience=0, verbose=True, factor=0.5) Next, we add code to setup the logging backends: a Python logger backend which reads its configuration from file and logs messages to the console and log file ( pylogger ); and a TensorBoard backend logger which logs statistics to a TensorBoard data file ( tflogger ). I configured the TensorBoard backend to log gradients because RNNs suffer from vanishing and exploding gradients, so we might want to take a look in case the training experiences a sudden failure. This code is not strictly required, but it is quite useful to be able to log the session progress, and to export logs to TensorBoard for realtime visualization of the training progress. # Distiller loggers msglogger = apputils.config_pylogger('logging.conf', None) tflogger = TensorBoardLogger(msglogger.logdir) tflogger.log_gradients = True pylogger = PythonLogger(msglogger)","title":"Preparing the code"},{"location":"tutorial-lang_model.html#training-loop","text":"Now we scroll down all the way to the train() function. We'll change its signature to include the epoch , optimizer , and compression_schdule . We'll soon see why we need these. def train(epoch, optimizer, compression_scheduler=None) Function train() is responsible for training the network in batches for one epoch, and in its epoch loop we want to perform compression. The CompressionScheduler invokes ScheduledTrainingPolicy instances per the scheduling specification that was programmed in the CompressionScheduler instance. There are four main SchedulingPolicy types: PruningPolicy , RegularizationPolicy , LRPolicy , and QuantizationPolicy . We'll be using PruningPolicy , which is triggered on_epoch_begin (to invoke the Pruners , and on_minibatch_begin (to mask the weights). Later we will create a YAML scheduling file, and specify the schedule of AutomatedGradualPruner instances. Because we are writing a single application, which can be used with various Policies in the future (e.g. group-lasso regularization), we should add code to invoke all of the CompressionScheduler 's callbacks, not just the mandatory on_epoch_begin callback. We invoke on_minibatch_begin before running the forward-pass, before_backward_pass after computing the loss, and on_minibatch_end after completing the backward-pass. def train(epoch, optimizer, compression_scheduler=None): ... # The line below was fixed as per: https://github.com/pytorch/examples/issues/214 for batch, i in enumerate(range(0, train_data.size(0), args.bptt)): data, targets = get_batch(train_data, i) # Starting each batch, we detach the hidden state from how it was previously produced. # If we didn't, the model would try backpropagating all the way to start of the dataset. hidden = repackage_hidden(hidden) if compression_scheduler: compression_scheduler.on_minibatch_begin(epoch, minibatch_id=batch, minibatches_per_epoch=steps_per_epoch) output, hidden = model(data, hidden) loss = criterion(output.view(-1, ntokens), targets) if compression_scheduler: compression_scheduler.before_backward_pass(epoch, minibatch_id=batch, minibatches_per_epoch=steps_per_epoch, loss=loss) optimizer.zero_grad() loss.backward() # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs. torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip) optimizer.step() total_loss += loss.item() if compression_scheduler: compression_scheduler.on_minibatch_end(epoch, minibatch_id=batch, minibatches_per_epoch=steps_per_epoch) The rest of the code could stay as in the original PyTorch sample, but I wanted to use an SGD optimizer, so I replaced: for p in model.parameters(): p.data.add_(-lr, p.grad.data) with: optimizer.step() The rest of the code in function train() logs to a text file and a TensorBoard backend. Again, such code is not mandatory, but a few lines give us a lot of visibility: we have training progress information saved to log, and we can monitor the training progress in realtime on TensorBoard. That's a lot for a few lines of code ;-) if batch % args.log_interval == 0 and batch > 0: cur_loss = total_loss / args.log_interval elapsed = time.time() - start_time lr = optimizer.param_groups[0]['lr'] msglogger.info( '| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.4f} | ms/batch {:5.2f} ' '| loss {:5.2f} | ppl {:8.2f}'.format( epoch, batch, len(train_data) // args.bptt, lr, elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss))) total_loss = 0 start_time = time.time() stats = ('Peformance/Training/', OrderedDict([ ('Loss', cur_loss), ('Perplexity', math.exp(cur_loss)), ('LR', lr), ('Batch Time', elapsed * 1000)]) ) steps_completed = batch + 1 distiller.log_training_progress(stats, model.named_parameters(), epoch, steps_completed, steps_per_epoch, args.log_interval, [tflogger]) Finally we get to the outer training-loop which loops on args.epochs . We add the two final CompressionScheduler callbacks: on_epoch_begin , at the start of the loop, and on_epoch_end after running evaluate on the model and updating the learning-rate. try: for epoch in range(0, args.epochs): epoch_start_time = time.time() if compression_scheduler: compression_scheduler.on_epoch_begin(epoch) train(epoch, optimizer, compression_scheduler) val_loss = evaluate(val_data) lr_scheduler.step(val_loss) if compression_scheduler: compression_scheduler.on_epoch_end(epoch) And that's it! The language model sample is ready for compression.","title":"Training loop"},{"location":"tutorial-lang_model.html#creating-compression-baselines","text":"In To prune, or not to prune: exploring the efficacy of pruning for model compression Zhu and Gupta, \"compare the accuracy of large, but pruned models (large-sparse) and their smaller, but dense (small-dense) counterparts with identical memory footprint.\" They also \"propose a new gradual pruning technique that is simple and straightforward to apply across a variety of models/datasets with minimal tuning.\" This pruning schedule is implemented by distiller.AutomatedGradualPruner (AGP), which increases the sparsity level (expressed as a percentage of zero-valued elements) gradually over several pruning steps. Distiller's implementation only prunes elements once in an epoch (the model is fine-tuned in between pruning events), which is a small deviation from Zhu and Gupta's paper. The research paper specifies the schedule in terms of mini-batches, while our implementation specifies the schedule in terms of epochs. We feel that using epochs performs well, and is more \"stable\", since the number of mini-batches will change, if you change the batch size. Before we start compressing stuff ;-), we need to create baselines so we have something to benchmark against. Let's prepare small, medium, and large baseline models, like Table 3 of To prune, or Not to Prune . These will provide baseline perplexity results that we'll compare the compressed models against. I chose to use tied input/output embeddings, and constrained the training to 40 epochs. The table below shows the model sizes, where we are interested in the tied version (biases are ignored due to their small size and because we don't prune them). Size Number of Weights (untied) Number of Weights (tied) Small 13,951,200 7,295,600 Medium 50,021,400 28,390,700 Large 135,834,000 85,917,000 I started experimenting with the optimizer setup like in the PyTorch example, but I added some L2 regularization when I noticed that the training was overfitting. The two right columns show the perplexity results (lower is better) of each of the models with no L2 regularization and with 1e-5 and 1e-6. In all three model sizes using the smaller L2 regularization (1e-6) gave the best results. BTW, I'm not showing here experiments with even lower regularization because that did not help. Type Command line Validation Test Small time python3 main.py --cuda --epochs 40 --tied 105.23 99.53 Small time python3 main.py --cuda --epochs 40 --tied --wd=1e-6 101.13 96.29 Small time python3 main.py --cuda --epochs 40 --tied --wd=1e-5 109.49 103.53 Medium time python3 main.py --cuda --emsize 650 --nhid 650 --dropout 0.5 --epochs 40 --tied 90.93 86.20 Medium time python3 main.py --cuda --emsize 650 --nhid 650 --dropout 0.5 --epochs 40 --tied --wd=1e-6 88.17 84.21 Medium time python3 main.py --cuda --emsize 650 --nhid 650 --dropout 0.5 --epochs 40 --tied --wd=1e-5 97.75 93.06 Large time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied 88.23 84.21 Large time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --wd=1e-6 87.49 83.85 Large time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --wd=1e-5 99.22 94.28","title":"Creating compression baselines"},{"location":"tutorial-lang_model.html#compressing-the-language-model","text":"OK, so now let's recreate the results of the language model experiment from section 4.2 of paper. We're using PyTorch's sample, so the language model we implement is not exactly like the one in the AGP paper (and uses a different dataset), but it's close enough, so if everything goes well, we should see similar compression results.","title":"Compressing the language model"},{"location":"tutorial-lang_model.html#what-are-we-compressing","text":"To gain insight about the model parameters, we can use the command-line to produce a weights-sparsity table: $ python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --summary=sparsity Parameters: +---------+------------------+---------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ | | Name | Shape | NNZ (dense) | NNZ (sparse) | Cols (%) | Rows (%) | Ch (%) | 2D (%) | 3D (%) | Fine (%) | Std | Mean | Abs-Mean | |---------+------------------+---------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------| | 0.00000 | encoder.weight | (33278, 1500) | 49917000 | 49916999 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00000 | 0.05773 | -0.00000 | 0.05000 | | 1.00000 | rnn.weight_ih_l0 | (6000, 1500) | 9000000 | 9000000 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00000 | 0.01491 | 0.00001 | 0.01291 | | 2.00000 | rnn.weight_hh_l0 | (6000, 1500) | 9000000 | 8999999 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00001 | 0.01491 | 0.00000 | 0.01291 | | 3.00000 | rnn.weight_ih_l1 | (6000, 1500) | 9000000 | 8999999 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00001 | 0.01490 | -0.00000 | 0.01291 | | 4.00000 | rnn.weight_hh_l1 | (6000, 1500) | 9000000 | 9000000 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00000 | 0.01491 | -0.00000 | 0.01291 | | 5.00000 | decoder.weight | (33278, 1500) | 49917000 | 49916999 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00000 | 0.05773 | -0.00000 | 0.05000 | | 6.00000 | Total sparsity: | - | 135834000 | 135833996 | 0.00000 | 0.00000 | 0 | 0.00000 | 0 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | +---------+------------------+---------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ Total sparsity: 0.00 So what's going on here? encoder.weight and decoder.weight are the input and output embeddings, respectively. Remember that in the configuration I chose for the three model sizes these embeddings are tied, which means that we only have one copy of parameters, that is shared between the encoder and decoder. We also have two pairs of RNN (LSTM really) parameters. There is a pair because the model uses the command-line argument args.nlayers to decide how many instances of RNN (or LSTM or GRU) cells to use, and it defaults to 2. The recurrent cells are LSTM cells, because this is the default of args.model , which is used in the initialization of RNNModel . Let's look at the parameters of the first RNN: rnn.weight_ih_l0 and rnn.weight_hh_l0 : what are these? Recall the LSTM equations that PyTorch implements. In the equations, there are 8 instances of vector-matrix multiplication (when batch=1). These can be combined into a single matrix-matrix multiplication (GEMM), but PyTorch groups these into two GEMM operations: one GEMM multiplies the inputs ( rnn.weight_ih_l0 ), and the other multiplies the hidden-state ( rnn.weight_hh_l0 ).","title":"What are we compressing?"},{"location":"tutorial-lang_model.html#how-are-we-compressing","text":"Let's turn to the configurations of the Large language model compression schedule to 70%, 80%, 90% and 95% sparsity. Using AGP it is easy to configure the pruning schedule to produce an exact sparsity of the compressed model. I'll use the 70% schedule to show a concrete example. The YAML file has two sections: pruners and policies . Section pruners defines instances of ParameterPruner - in our case we define three instances of AutomatedGradualPruner : for the weights of the first RNN ( l0_rnn_pruner ), the second RNN ( l1_rnn_pruner ) and the embedding layer ( embedding_pruner ). These names are arbitrary, and serve are name-handles which bind Policies to Pruners - so you can use whatever names you want. Each AutomatedGradualPruner is configured with an initial_sparsity and final_sparsity . For examples, the l0_rnn_pruner below is configured to prune 5% of the weights as soon as it starts working, and finish when 70% of the weights have been pruned. The weights parameter tells the Pruner which weight tensors to prune. pruners: l0_rnn_pruner: class: AutomatedGradualPruner initial_sparsity : 0.05 final_sparsity: 0.70 weights: [rnn.weight_ih_l0, rnn.weight_hh_l0] l1_rnn_pruner: class: AutomatedGradualPruner initial_sparsity : 0.05 final_sparsity: 0.70 weights: [rnn.weight_ih_l1, rnn.weight_hh_l1] embedding_pruner: class: AutomatedGradualPruner initial_sparsity : 0.05 final_sparsity: 0.70 weights: [encoder.weight]","title":"How are we compressing?"},{"location":"tutorial-lang_model.html#when-are-we-compressing","text":"If the pruners section defines \"what-to-do\", the policies section defines \"when-to-do\". This part is harder, because we define the pruning schedule, which requires us to try a few different schedules until we understand which schedule works best. Below we define three PruningPolicy instances. The first two instances start operating at epoch 2 ( starting_epoch ), end at epoch 20 ( ending_epoch ), and operate once every epoch ( frequency ; as I explained above, Distiller's Pruning scheduling operates only at on_epoch_begin ). In between pruning operations, the pruned model is fine-tuned. policies: - pruner: instance_name : l0_rnn_pruner starting_epoch: 2 ending_epoch: 20 frequency: 1 - pruner: instance_name : l1_rnn_pruner starting_epoch: 2 ending_epoch: 20 frequency: 1 - pruner: instance_name : embedding_pruner starting_epoch: 3 ending_epoch: 21 frequency: 1 We invoke the compression as follows: $ time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --compress=../../examples/agp-pruning/word_lang_model.LARGE_70.schedule_agp.yaml Table 1 above shows that we can make a negligible improvement when adding L2 regularization. I did some experimenting with the sparsity distribution between the layers, and the scheduling frequency and noticed that the embedding layers are much less sensitive to pruning than the RNN cells. I didn't notice any difference between the RNN cells, but I also didn't invest in this exploration. A new 70% sparsity schedule , prunes the RNNs only to 50% sparsity, but prunes the embedding to 85% sparsity, and achieves almost a 3 points improvement in the test perplexity results. We provide similar pruning schedules for the other compression rates.","title":"When are we compressing?"},{"location":"tutorial-lang_model.html#until-next-time","text":"This concludes the first part of the tutorial on pruning a PyTorch language model. In the next installment, I'll explain how we added an implementation of Baidu Research's Exploring Sparsity in Recurrent Neural Networks paper, and applied to this language model. Geek On.","title":"Until next time"},{"location":"tutorial-struct_pruning.html","text":"Pruning Filters & Channels Introduction Channel and filter pruning are examples of structured-pruning which create compressed models that do not require special hardware to execute. This latter fact makes this form of structured pruning particularly interesting and popular. In networks that have serial data dependencies, it is pretty straight-forward to understand and define how to prune channels and filters. However, in more complex models, with parallel-data dependencies (paths) - such as ResNets (skip connections) and GoogLeNet (Inception layers) \u2013 things become increasingly more complex and require a deeper understanding of the data flow in the model, in order to define the pruning schedule. This post explains channel and filter pruning, the challenges, and how to define a Distiller pruning schedule for these structures. The details of the implementation are left for a separate post. Before we dive into pruning, let\u2019s level-set on the terminology, because different people (and even research papers) do not always agree on the nomenclature. This reflects my understanding of the nomenclature, and therefore these are the names used in Distiller. I\u2019ll restrict this discussion to Convolution layers in CNNs, to contain the scope of the topic I\u2019ll be covering, although Distiller supports pruning of other structures such as matrix columns and rows. PyTorch describes torch.nn.Conv2d as applying \u201ca 2D convolution over an input signal composed of several input planes.\u201d We call each of these input planes a feature-map (or FM, for short). Another name is input channel , as in the R/G/B channels of an image. Some people refer to feature-maps as activations (i.e. the activation of neurons), although I think strictly speaking activations are the output of an activation layer that was fed a group of feature-maps. Because it is very common, and because the use of an activation is orthogonal to our discussion, I will use activations to refer to the output of a Convolution layer (i.e. 3D stack of feature-maps). In the PyTorch documentation Convolution outputs have shape (N, C out , H out , W out ) where N is a batch size, C out denotes a number of output channels, H out is a height of output planes in pixels, and W out is width in pixels. We won\u2019t be paying much attention to the batch-size since it\u2019s not important to our discussion, so without loss of generality we can set N=1. I\u2019m also assuming the most common Convolutions having groups==1 . Convolution weights are 4D: (F, C, K, K) where F is the number of filters, C is the number of channels, and K is the kernel size (we can assume the kernel height and width are equal for simplicity). A kernel is a 2D matrix (K, K) that is part of a 3D feature detector. This feature detector is called a filter and it is basically a stack of 2D kernels . Each kernel is convolved with a 2D input channel (i.e. feature-map) so if there are C in channels in the input, then there are C in kernels in a filter (C == C in ). Each filter is convolved with the entire input to create a single output channel (i.e. feature-map). If there are C out output channels, then there are C out filters (F == C out ). Filter Pruning Filter pruning and channel pruning are very similar, and I\u2019ll expand on that similarity later on \u2013 but for now let\u2019s focus on filter pruning. In filter pruning we use some criterion to determine which filters are important and which are not. Researchers came up with all sorts of pruning criteria: the L1-magnitude of the filters (citation), the entropy of the activations (citation), and the classification accuracy reduction (citation) are just some examples. Disregarding how we chose the filters to prune, let\u2019s imagine that in the diagram below, we chose to prune (remove) the green and orange filters (the circle with the \u201c*\u201d designates a Convolution operation). Since we have two less filters operating on the input, we must have two less output feature-maps. So when we prune filters, besides changing the physical size of the weight tensors, we also need to reconfigure the immediate Convolution layer (change its out_channels ) and the following Convolution layer (change its in_channels ). And finally, because the next layer\u2019s input is now smaller (has fewer channels), we should also shrink the next layer\u2019s weights tensors, by removing the channels corresponding to the filters we pruned. We say that there is a data-dependency between the two Convolution layers. I didn\u2019t make any mention of the activation function that usually follows Convolution, because these functions are parameter-less and are not sensitive to the shape of their input. There are some other dependencies that Distiller resolves (such as Optimizer parameters tightly-coupled to the weights) that I won\u2019t discuss here, because they are implementation details. The scheduler YAML syntax for this example is pasted below. We use L1-norm ranking of weight filters, and the pruning-rate is set by the AGP algorithm (Automatic Gradual Pruning). The Convolution layers are conveniently named conv1 and conv2 in this example. pruners: example_pruner: class: L1RankedStructureParameterPruner_AGP initial_sparsity : 0.10 final_sparsity: 0.50 group_type: Filters weights: [module.conv1.weight] Now let\u2019s add a Batch Normalization layer between the two convolutions: The Batch Normalization layer is parameterized by a couple of tensors that contain information per input-channel (i.e. scale and shift). Because our Convolution produces less output FMs, and these are the input to the Batch Normalization layer, we also need to reconfigure the Batch Normalization layer. And we also need to physically shrink the Batch Normalization layer\u2019s scale and shift tensors, which are coefficients in the BN input transformation. Moreover, the scale and shift coefficients that we remove from the tensors, must correspond to the filters (or output feature-maps channels) that we removed from the Convolution weight tensors. This small nuance will prove to be a large pain, but we\u2019ll get to that in later examples. The presence of a Batch Normalization layer in the example above is transparent to us, and in fact, the YAML schedule does not change. Distiller detects the presence of Batch Normalization layers and adjusts their parameters automatically. Let\u2019s look at another example, with non-serial data-dependencies. Here, the output of conv1 is the input for conv2 and conv3 . This is an example of parallel data-dependency, since both conv2 and conv3 depend on conv1 . Note that the Distiller YAML schedule is unchanged from the previous two examples, since we are still only explicitly pruning the weight filters of conv1 . The weight channels of conv2 and conv3 are pruned implicitly by Distiller in a process called \u201cThinning\u201d (on which I will expand in a different post). Next, let\u2019s look at another example also involving three Convolutions, but this time we want to prune the filters of two convolutional layers, whose outputs are element-wise-summed and fed into a third Convolution. In this example conv3 is dependent on both conv1 and conv2 , and there are two implications to this dependency. The first, and more obvious implication, is that we need to prune the same number of filters from both conv1 and conv2 . Since we apply element-wise addition on the outputs of conv1 and conv2 , they must have the same shape - and they can only have the same shape if conv1 and conv2 prune the same number of filters. The second implication of this triangular data-dependency is that both conv1 and conv2 must prune the same filters! Let\u2019s imagine for a moment, that we ignore this second constraint. The diagram below illustrates the dilemma that arises: how should we prune the channels of the weights of conv3 ? Obviously, we can\u2019t. We must apply the second constraint \u2013 and that means that we now need to be proactive: we need to decide whether to use the prune conv1 and conv2 according to the filter-pruning choices of conv1 or of conv2 . The diagram below illustrates the pruning scheme after deciding to follow the pruning choices of conv1 . The YAML compression schedule syntax needs to be able to express the two dependencies (or constraints) discussed above. First we need to tell the Filter Pruner that we there is a dependency of type Leader . This means that all of the tensors listed in the weights field are pruned together, to the same extent at each iteration, and that to prune the filters we will use the pruning decisions of the first tensor listed. In the example below module.conv1.weight and module.conv2.weight are pruned together according to the pruning choices for module.conv1.weight . pruners: example_pruner: class: L1RankedStructureParameterPruner_AGP initial_sparsity : 0.10 final_sparsity: 0.50 group_type: Filters group_dependency: Leader weights: [module.conv1.weight, module.conv2.weight] When we turn to filter-pruning ResNets we see some pretty long dependency chains because of the skip-connections. If you don\u2019t pay attention, you can easily under-specify (or mis-specify) dependency chains and Distiller will exit with an exception. The exception does not explain the specification error and this needs to be improved. Channel Pruning Channel pruning is very similar to Filter pruning with all the details of dependencies reversed. Look again at example #1, but this time imagine that we\u2019ve changed our schedule to prune the channels of module.conv2.weight . pruners: example_pruner: class: L1RankedStructureParameterPruner_AGP initial_sparsity : 0.10 final_sparsity: 0.50 group_type: Channels weights: [module.conv2.weight] As the diagram shows, conv1 is now dependent on conv2 and its weights filters will be implicitly pruned according to the channels removed from the weights of conv2 . Geek On.","title":"Pruning Filters and Channels"},{"location":"tutorial-struct_pruning.html#pruning-filters-channels","text":"","title":"Pruning Filters &amp; Channels"},{"location":"tutorial-struct_pruning.html#introduction","text":"Channel and filter pruning are examples of structured-pruning which create compressed models that do not require special hardware to execute. This latter fact makes this form of structured pruning particularly interesting and popular. In networks that have serial data dependencies, it is pretty straight-forward to understand and define how to prune channels and filters. However, in more complex models, with parallel-data dependencies (paths) - such as ResNets (skip connections) and GoogLeNet (Inception layers) \u2013 things become increasingly more complex and require a deeper understanding of the data flow in the model, in order to define the pruning schedule. This post explains channel and filter pruning, the challenges, and how to define a Distiller pruning schedule for these structures. The details of the implementation are left for a separate post. Before we dive into pruning, let\u2019s level-set on the terminology, because different people (and even research papers) do not always agree on the nomenclature. This reflects my understanding of the nomenclature, and therefore these are the names used in Distiller. I\u2019ll restrict this discussion to Convolution layers in CNNs, to contain the scope of the topic I\u2019ll be covering, although Distiller supports pruning of other structures such as matrix columns and rows. PyTorch describes torch.nn.Conv2d as applying \u201ca 2D convolution over an input signal composed of several input planes.\u201d We call each of these input planes a feature-map (or FM, for short). Another name is input channel , as in the R/G/B channels of an image. Some people refer to feature-maps as activations (i.e. the activation of neurons), although I think strictly speaking activations are the output of an activation layer that was fed a group of feature-maps. Because it is very common, and because the use of an activation is orthogonal to our discussion, I will use activations to refer to the output of a Convolution layer (i.e. 3D stack of feature-maps). In the PyTorch documentation Convolution outputs have shape (N, C out , H out , W out ) where N is a batch size, C out denotes a number of output channels, H out is a height of output planes in pixels, and W out is width in pixels. We won\u2019t be paying much attention to the batch-size since it\u2019s not important to our discussion, so without loss of generality we can set N=1. I\u2019m also assuming the most common Convolutions having groups==1 . Convolution weights are 4D: (F, C, K, K) where F is the number of filters, C is the number of channels, and K is the kernel size (we can assume the kernel height and width are equal for simplicity). A kernel is a 2D matrix (K, K) that is part of a 3D feature detector. This feature detector is called a filter and it is basically a stack of 2D kernels . Each kernel is convolved with a 2D input channel (i.e. feature-map) so if there are C in channels in the input, then there are C in kernels in a filter (C == C in ). Each filter is convolved with the entire input to create a single output channel (i.e. feature-map). If there are C out output channels, then there are C out filters (F == C out ).","title":"Introduction"},{"location":"tutorial-struct_pruning.html#filter-pruning","text":"Filter pruning and channel pruning are very similar, and I\u2019ll expand on that similarity later on \u2013 but for now let\u2019s focus on filter pruning. In filter pruning we use some criterion to determine which filters are important and which are not. Researchers came up with all sorts of pruning criteria: the L1-magnitude of the filters (citation), the entropy of the activations (citation), and the classification accuracy reduction (citation) are just some examples. Disregarding how we chose the filters to prune, let\u2019s imagine that in the diagram below, we chose to prune (remove) the green and orange filters (the circle with the \u201c*\u201d designates a Convolution operation). Since we have two less filters operating on the input, we must have two less output feature-maps. So when we prune filters, besides changing the physical size of the weight tensors, we also need to reconfigure the immediate Convolution layer (change its out_channels ) and the following Convolution layer (change its in_channels ). And finally, because the next layer\u2019s input is now smaller (has fewer channels), we should also shrink the next layer\u2019s weights tensors, by removing the channels corresponding to the filters we pruned. We say that there is a data-dependency between the two Convolution layers. I didn\u2019t make any mention of the activation function that usually follows Convolution, because these functions are parameter-less and are not sensitive to the shape of their input. There are some other dependencies that Distiller resolves (such as Optimizer parameters tightly-coupled to the weights) that I won\u2019t discuss here, because they are implementation details. The scheduler YAML syntax for this example is pasted below. We use L1-norm ranking of weight filters, and the pruning-rate is set by the AGP algorithm (Automatic Gradual Pruning). The Convolution layers are conveniently named conv1 and conv2 in this example. pruners: example_pruner: class: L1RankedStructureParameterPruner_AGP initial_sparsity : 0.10 final_sparsity: 0.50 group_type: Filters weights: [module.conv1.weight] Now let\u2019s add a Batch Normalization layer between the two convolutions: The Batch Normalization layer is parameterized by a couple of tensors that contain information per input-channel (i.e. scale and shift). Because our Convolution produces less output FMs, and these are the input to the Batch Normalization layer, we also need to reconfigure the Batch Normalization layer. And we also need to physically shrink the Batch Normalization layer\u2019s scale and shift tensors, which are coefficients in the BN input transformation. Moreover, the scale and shift coefficients that we remove from the tensors, must correspond to the filters (or output feature-maps channels) that we removed from the Convolution weight tensors. This small nuance will prove to be a large pain, but we\u2019ll get to that in later examples. The presence of a Batch Normalization layer in the example above is transparent to us, and in fact, the YAML schedule does not change. Distiller detects the presence of Batch Normalization layers and adjusts their parameters automatically. Let\u2019s look at another example, with non-serial data-dependencies. Here, the output of conv1 is the input for conv2 and conv3 . This is an example of parallel data-dependency, since both conv2 and conv3 depend on conv1 . Note that the Distiller YAML schedule is unchanged from the previous two examples, since we are still only explicitly pruning the weight filters of conv1 . The weight channels of conv2 and conv3 are pruned implicitly by Distiller in a process called \u201cThinning\u201d (on which I will expand in a different post). Next, let\u2019s look at another example also involving three Convolutions, but this time we want to prune the filters of two convolutional layers, whose outputs are element-wise-summed and fed into a third Convolution. In this example conv3 is dependent on both conv1 and conv2 , and there are two implications to this dependency. The first, and more obvious implication, is that we need to prune the same number of filters from both conv1 and conv2 . Since we apply element-wise addition on the outputs of conv1 and conv2 , they must have the same shape - and they can only have the same shape if conv1 and conv2 prune the same number of filters. The second implication of this triangular data-dependency is that both conv1 and conv2 must prune the same filters! Let\u2019s imagine for a moment, that we ignore this second constraint. The diagram below illustrates the dilemma that arises: how should we prune the channels of the weights of conv3 ? Obviously, we can\u2019t. We must apply the second constraint \u2013 and that means that we now need to be proactive: we need to decide whether to use the prune conv1 and conv2 according to the filter-pruning choices of conv1 or of conv2 . The diagram below illustrates the pruning scheme after deciding to follow the pruning choices of conv1 . The YAML compression schedule syntax needs to be able to express the two dependencies (or constraints) discussed above. First we need to tell the Filter Pruner that we there is a dependency of type Leader . This means that all of the tensors listed in the weights field are pruned together, to the same extent at each iteration, and that to prune the filters we will use the pruning decisions of the first tensor listed. In the example below module.conv1.weight and module.conv2.weight are pruned together according to the pruning choices for module.conv1.weight . pruners: example_pruner: class: L1RankedStructureParameterPruner_AGP initial_sparsity : 0.10 final_sparsity: 0.50 group_type: Filters group_dependency: Leader weights: [module.conv1.weight, module.conv2.weight] When we turn to filter-pruning ResNets we see some pretty long dependency chains because of the skip-connections. If you don\u2019t pay attention, you can easily under-specify (or mis-specify) dependency chains and Distiller will exit with an exception. The exception does not explain the specification error and this needs to be improved.","title":"Filter Pruning"},{"location":"tutorial-struct_pruning.html#channel-pruning","text":"Channel pruning is very similar to Filter pruning with all the details of dependencies reversed. Look again at example #1, but this time imagine that we\u2019ve changed our schedule to prune the channels of module.conv2.weight . pruners: example_pruner: class: L1RankedStructureParameterPruner_AGP initial_sparsity : 0.10 final_sparsity: 0.50 group_type: Channels weights: [module.conv2.weight] As the diagram shows, conv1 is now dependent on conv2 and its weights filters will be implicitly pruned according to the channels removed from the weights of conv2 . Geek On.","title":"Channel Pruning"},{"location":"usage.html","text":"Using the sample application The Distiller repository contains a sample application, distiller/examples/classifier_compression/compress_classifier.py , and a set of scheduling files which demonstrate Distiller's features. Following is a brief discussion of how to use this application and the accompanying schedules. You might also want to refer to the following resources: An explanation of the scheduler file format. An in-depth discussion of how we used these schedule files to implement several state-of-the-art DNN compression research papers. The sample application supports various features for compression of image classification DNNs, and gives an example of how to integrate distiller in your own application. The code is documented and should be considered the best source of documentation, but we provide some elaboration here. This diagram shows how where compress_classifier.py fits in the compression workflow, and how we integrate the Jupyter notebooks as part of our research work. Command line arguments To get help on the command line arguments, invoke: $ python3 compress_classifier.py --help For example: $ time python3 compress_classifier.py -a alexnet --lr 0.005 -p 50 ../../../data.imagenet -j 44 --epochs 90 --pretrained --compress=../sensitivity-pruning/alexnet.schedule_sensitivity.yaml Parameters: +----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ | | Name | Shape | NNZ (dense) | NNZ (sparse) | Cols (%) | Rows (%) | Ch (%) | 2D (%) | 3D (%) | Fine (%) | Std | Mean | Abs-Mean | |----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------| | 0 | features.module.0.weight | (64, 3, 11, 11) | 23232 | 13411 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 42.27359 | 0.14391 | -0.00002 | 0.08805 | | 1 | features.module.3.weight | (192, 64, 5, 5) | 307200 | 115560 | 0.00000 | 0.00000 | 0.00000 | 1.91243 | 0.00000 | 62.38281 | 0.04703 | -0.00250 | 0.02289 | | 2 | features.module.6.weight | (384, 192, 3, 3) | 663552 | 256565 | 0.00000 | 0.00000 | 0.00000 | 6.18490 | 0.00000 | 61.33445 | 0.03354 | -0.00184 | 0.01803 | | 3 | features.module.8.weight | (256, 384, 3, 3) | 884736 | 315065 | 0.00000 | 0.00000 | 0.00000 | 6.96411 | 0.00000 | 64.38881 | 0.02646 | -0.00168 | 0.01422 | | 4 | features.module.10.weight | (256, 256, 3, 3) | 589824 | 186938 | 0.00000 | 0.00000 | 0.00000 | 15.49225 | 0.00000 | 68.30614 | 0.02714 | -0.00246 | 0.01409 | | 5 | classifier.1.weight | (4096, 9216) | 37748736 | 3398881 | 0.00000 | 0.21973 | 0.00000 | 0.21973 | 0.00000 | 90.99604 | 0.00589 | -0.00020 | 0.00168 | | 6 | classifier.4.weight | (4096, 4096) | 16777216 | 1782769 | 0.21973 | 3.46680 | 0.00000 | 3.46680 | 0.00000 | 89.37387 | 0.00849 | -0.00066 | 0.00263 | | 7 | classifier.6.weight | (1000, 4096) | 4096000 | 994738 | 3.36914 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 75.71440 | 0.01718 | 0.00030 | 0.00778 | | 8 | Total sparsity: | - | 61090496 | 7063928 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 88.43694 | 0.00000 | 0.00000 | 0.00000 | +----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ 2018-04-04 21:30:52,499 - Total sparsity: 88.44 2018-04-04 21:30:52,499 - --- validate (epoch=89)----------- 2018-04-04 21:30:52,499 - 128116 samples (256 per mini-batch) 2018-04-04 21:31:04,646 - Epoch: [89][ 50/ 500] Loss 2.175988 Top1 51.289063 Top5 74.023438 2018-04-04 21:31:06,427 - Epoch: [89][ 100/ 500] Loss 2.171564 Top1 51.175781 Top5 74.308594 2018-04-04 21:31:11,432 - Epoch: [89][ 150/ 500] Loss 2.159347 Top1 51.546875 Top5 74.473958 2018-04-04 21:31:14,364 - Epoch: [89][ 200/ 500] Loss 2.156857 Top1 51.585938 Top5 74.568359 2018-04-04 21:31:18,381 - Epoch: [89][ 250/ 500] Loss 2.152790 Top1 51.707813 Top5 74.681250 2018-04-04 21:31:22,195 - Epoch: [89][ 300/ 500] Loss 2.149962 Top1 51.791667 Top5 74.755208 2018-04-04 21:31:25,508 - Epoch: [89][ 350/ 500] Loss 2.150936 Top1 51.827009 Top5 74.767857 2018-04-04 21:31:29,538 - Epoch: [89][ 400/ 500] Loss 2.150853 Top1 51.781250 Top5 74.763672 2018-04-04 21:31:32,842 - Epoch: [89][ 450/ 500] Loss 2.150156 Top1 51.828125 Top5 74.821181 2018-04-04 21:31:35,338 - Epoch: [89][ 500/ 500] Loss 2.150417 Top1 51.833594 Top5 74.817187 2018-04-04 21:31:35,357 - ==> Top1: 51.838 Top5: 74.817 Loss: 2.150 2018-04-04 21:31:35,364 - Saving checkpoint 2018-04-04 21:31:39,251 - --- test --------------------- 2018-04-04 21:31:39,252 - 50000 samples (256 per mini-batch) 2018-04-04 21:31:51,512 - Test: [ 50/ 195] Loss 1.487607 Top1 63.273438 Top5 85.695312 2018-04-04 21:31:55,015 - Test: [ 100/ 195] Loss 1.638043 Top1 60.636719 Top5 83.664062 2018-04-04 21:31:58,732 - Test: [ 150/ 195] Loss 1.833214 Top1 57.619792 Top5 80.447917 2018-04-04 21:32:01,274 - ==> Top1: 56.606 Top5: 79.446 Loss: 1.893 Let's look at the command line again: $ time python3 compress_classifier.py -a alexnet --lr 0.005 -p 50 ../../../data.imagenet -j 44 --epochs 90 --pretrained --compress=../sensitivity-pruning/alexnet.schedule_sensitivity.yaml In this example, we prune a TorchVision pre-trained AlexNet network, using the following configuration: Learning-rate of 0.005 Print progress every 50 mini-batches. Use 44 worker threads to load data (make sure to use something suitable for your machine). Run for 90 epochs. Torchvision's pre-trained models did not store the epoch metadata, so pruning starts at epoch 0. When you train and prune your own networks, the last training epoch is saved as a metadata with the model. Therefore, when you load such models, the first epoch is not 0, but it is the last training epoch. The pruning schedule is provided in alexnet.schedule_sensitivity.yaml Log files are written to directory logs . Examples Distiller comes with several example schedules which can be used together with compress_classifier.py . These example schedules (YAML) files, contain the command line that is used in order to invoke the schedule (so that you can easily recreate the results in your environment), together with the results of the pruning or regularization. The results usually contain a table showing the sparsity of each of the model parameters, together with the validation and test top1, top5 and loss scores. For more details on the example schedules, you can refer to the coverage of the Model Zoo . examples/agp-pruning : Automated Gradual Pruning (AGP) on MobileNet and ResNet18 (ImageNet dataset) examples/hybrid : AlexNet AGP with 2D (kernel) regularization (ImageNet dataset) AlexNet sensitivity pruning with 2D regularization examples/network_slimming : ResNet20 Network Slimming (this is work-in-progress) examples/pruning_filters_for_efficient_convnets : ResNet56 baseline training (CIFAR10 dataset) ResNet56 filter removal using filter ranking examples/sensitivity_analysis : Element-wise pruning sensitivity-analysis: AlexNet (ImageNet) MobileNet (ImageNet) ResNet18 (ImageNet) ResNet20 (CIFAR10) ResNet34 (ImageNet) Filter-wise pruning sensitivity-analysis: ResNet20 (CIFAR10) ResNet56 (CIFAR10) examples/sensitivity-pruning : AlexNet sensitivity pruning with Iterative Pruning AlexNet sensitivity pruning with One-Shot Pruning examples/ssl : ResNet20 baseline training (CIFAR10 dataset) Structured Sparsity Learning (SSL) with layer removal on ResNet20 SSL with channels removal on ResNet20 examples/quantization : AlexNet w. Batch-Norm (base FP32 + DoReFa) Pre-activation ResNet20 on CIFAR10 (base FP32 + DoReFa) Pre-activation ResNet18 on ImageNEt (base FP32 + DoReFa) Experiment reproducibility Experiment reproducibility is sometimes important. Pete Warden recently expounded about this in his blog . PyTorch's support for deterministic execution requires us to use only one thread for loading data (other wise the multi-threaded execution of the data loaders can create random order and change the results), and to set the seed of the CPU and GPU PRNGs. Using the --deterministic command-line flag and setting j=1 will produce reproducible results (for the same PyTorch version). Performing pruning sensitivity analysis Distiller supports element-wise and filter-wise pruning sensitivity analysis. In both cases, L1-norm is used to rank which elements or filters to prune. For example, when running filter-pruning sensitivity analysis, the L1-norm of the filters of each layer's weights tensor are calculated, and the bottom x% are set to zero. The analysis process is quite long, because currently we use the entire test dataset to assess the accuracy performance at each pruning level of each weights tensor. Using a small dataset for this would save much time and we plan on assessing if this will provide sufficient results. Results are output as a CSV file ( sensitivity.csv ) and PNG file ( sensitivity.png ). The implementation is in distiller/sensitivity.py and it contains further details about process and the format of the CSV file. The example below performs element-wise pruning sensitivity analysis on ResNet20 for CIFAR10: $ python3 compress_classifier.py -a resnet20_cifar ../../../data.cifar10/ -j=1 --resume=../cifar10/resnet20/checkpoint_trained_dense.pth.tar --sense=element The sense command-line argument can be set to either element or filter , depending on the type of analysis you want done. There is also a Jupyter notebook with example invocations, outputs and explanations. Post-Training Quantization The following example qunatizes ResNet18 for ImageNet: $ python3 compress_classifier.py -a resnet18 ../../../data.imagenet --pretrained --quantize-eval --evaluate See here for more details on how to invoke post-training quantization from the command line. A checkpoint with the quantized model will be dumped in the run directory. It will contain the quantized model parameters (the data type will still be FP32, but the values will be integers). The calculated quantization parameters (scale and zero-point) are stored as well in each quantized layer. For more examples of post-training quantization see here . Summaries You can use the sample compression application to generate model summary reports, such as the attributes and compute summary report (see screen capture below). You can log sparsity statistics (written to console and CSV file), performance, optimizer and model information, and also create a PNG image of the DNN. Creating a PNG image is an experimental feature (it relies on features which are not available on PyTorch 3.1 and that we hope will be available in PyTorch's next release), so to use it you will need to compile the PyTorch master branch, and hope for the best ;-). $ python3 compress_classifier.py --resume=../ssl/checkpoints/checkpoint_trained_ch_regularized_dense.pth.tar -a=resnet20_cifar ../../../data.cifar10 --summary=compute Generates: +----+------------------------------+--------+----------+-----------------+--------------+-----------------+--------------+------------------+---------+ | | Name | Type | Attrs | IFM | IFM volume | OFM | OFM volume | Weights volume | MACs | |----+------------------------------+--------+----------+-----------------+--------------+-----------------+--------------+------------------+---------| | 0 | module.conv1 | Conv2d | k=(3, 3) | (1, 3, 32, 32) | 3072 | (1, 16, 32, 32) | 16384 | 432 | 442368 | | 1 | module.layer1.0.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 2 | module.layer1.0.conv2 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 3 | module.layer1.1.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 4 | module.layer1.1.conv2 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 5 | module.layer1.2.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 6 | module.layer1.2.conv2 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 7 | module.layer2.0.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 32, 16, 16) | 8192 | 4608 | 1179648 | | 8 | module.layer2.0.conv2 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 | | 9 | module.layer2.0.downsample.0 | Conv2d | k=(1, 1) | (1, 16, 32, 32) | 16384 | (1, 32, 16, 16) | 8192 | 512 | 131072 | | 10 | module.layer2.1.conv1 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 | | 11 | module.layer2.1.conv2 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 | | 12 | module.layer2.2.conv1 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 | | 13 | module.layer2.2.conv2 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 | | 14 | module.layer3.0.conv1 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 64, 8, 8) | 4096 | 18432 | 1179648 | | 15 | module.layer3.0.conv2 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 | | 16 | module.layer3.0.downsample.0 | Conv2d | k=(1, 1) | (1, 32, 16, 16) | 8192 | (1, 64, 8, 8) | 4096 | 2048 | 131072 | | 17 | module.layer3.1.conv1 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 | | 18 | module.layer3.1.conv2 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 | | 19 | module.layer3.2.conv1 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 | | 20 | module.layer3.2.conv2 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 | | 21 | module.fc | Linear | | (1, 64) | 64 | (1, 10) | 10 | 640 | 640 | +----+------------------------------+--------+----------+-----------------+--------------+-----------------+--------------+------------------+---------+ Total MACs: 40,813,184 Using TensorBoard Google's TensorBoard is an excellent tool for visualizing the progress of DNN training. Distiller's logger supports writing performance indicators and parameter statistics in a file format that can be read by TensorBoard (Distiller uses TensorFlow's APIs in order to do this, which is why Distiller requires the installation of TensorFlow). To view the graphs, invoke the TensorBoard server. For example: $ tensorboard --logdir=logs Distillers's setup (requirements.txt) installs TensorFlow for CPU. If you want a different installation, please follow the TensorFlow installation instructions . Collecting activations statistics In CNNs with ReLU layers, ReLU activations (feature-maps) also exhibit a nice level of sparsity (50-60% sparsity is typical). You can collect activation statistics using the --act_stats command-line flag. For example: $ python3 compress_classifier.py -a=resnet56_cifar -p=50 ../../../data.cifar10 --resume=checkpoint.resnet56_cifar_baseline.pth.tar --act-stats=test -e The test parameter indicates that, in this example, we want to collect activation statistics during the test phase. Note that we also used the -e command-line argument to indicate that we want to run a test phase. The other two legal parameter values are train and valid which collect activation statistics during the training and validation phases, respectively. Collectors and their collaterals An instance of a subclass of ActivationStatsCollector can be used to collect activation statistics. Currently, ActivationStatsCollector has two types of subclasses: SummaryActivationStatsCollector and RecordsActivationStatsCollector . Instances of SummaryActivationStatsCollector compute the mean of some statistic of the activation. It is rather light-weight and quicker than collecting a record per activation. The statistic function is configured in the constructor. In the sample compression application, compress_classifier.py , we create a dictionary of collectors. For example: SummaryActivationStatsCollector(model, \"sparsity\", lambda t: 100 * distiller.utils.sparsity(t)) The lambda expression is invoked per activation encountered during forward passes, and the value it returns (in this case, the sparsity of the activation tensors, multiplied by 100) is stored in module.sparsity ( \"sparsity\" is this collector's name). To access the statistics, you can invoke collector.value() , or you can access each module's data directly. Another type of collector is RecordsActivationStatsCollector which computes a hard-coded set of activations statistics and collects a record per activation . For obvious reasons, this is slower than instances of SummaryActivationStatsCollector . ActivationStatsCollector default to collecting activations statistics only on the output activations of ReLU layers, but we can choose any layer type we want. In the example below we collect statistics from outputs of torch.nn.Conv2d layers. RecordsActivationStatsCollector(model, classes=[torch.nn.Conv2d]) Collectors can write their data to Excel workbooks (which are named using the collector's name), by invoking collector.to_xlsx(path_to_workbook) . In compress_classifier.py we currently create four different collectors which you can selectively disable. You can also add other statistics collectors and use a different function to compute your new statistic. collectors = missingdict({ \"sparsity\": SummaryActivationStatsCollector(model, \"sparsity\", lambda t: 100 * distiller.utils.sparsity(t)), \"l1_channels\": SummaryActivationStatsCollector(model, \"l1_channels\", distiller.utils.activation_channels_l1), \"apoz_channels\": SummaryActivationStatsCollector(model, \"apoz_channels\", distiller.utils.activation_channels_apoz), \"records\": RecordsActivationStatsCollector(model, classes=[torch.nn.Conv2d])}) By default, these Collectors write their data to files in the active log directory. You can use a utility function, distiller.log_activation_statsitics , to log the data of an ActivationStatsCollector instance to one of the backend-loggers. For an example, the code below logs the \"sparsity\" collector to a TensorBoard log file. distiller.log_activation_statsitics(epoch, \"train\", loggers=[tflogger], collector=collectors[\"sparsity\"]) Caveats Distiller collects activations statistics using PyTorch's forward-hooks mechanism. Collectors iteratively register the modules' forward-hooks, and collectors are called during the forward traversal and get exposed to activation data. Registering for forward callbacks is performed like this: module.register_forward_hook This makes apparent two limitations of this mechanism: We can only register on PyTorch modules. This means that we can't register on the forward hook of a functionals such as torch.nn.functional.relu and torch.nn.functional.max_pool2d . Therefore, you may need to replace functionals with their module alternative. For example: class MadeUpNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 6, 5) def forward(self, x): x = F.relu(self.conv1(x)) return x Can be changed to: class MadeUpNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.relu(self.conv1(x)) return x We can only use a module instance once in our models. If we use the same module several times, then we can't determine which node in the graph has invoked the callback, because the PyTorch callback signature def hook(module, input, output) doesn't provide enough contextual information. TorchVision's ResNet is an example of a model that uses the same instance of nn.ReLU multiple times: class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) # <================ out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) # <================ return out In Distiller we changed ResNet to use multiple instances of nn.ReLU, and each instance is used only once: class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu1 = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.relu2 = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu1(out) # <================ out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu2(out) # <================ return out Using the Jupyter notebooks The Jupyter notebooks contain many examples of how to use the statistics summaries generated by Distiller. They are explained in a separate page. Generating this documentation Install mkdocs and the required packages by executing: $ pip3 install -r doc-requirements.txt To build the project documentation run: $ cd distiller/docs-src $ mkdocs build --clean This will create a folder named 'site' which contains the documentation website. Open distiller/docs/site/index.html to view the documentation home page.","title":"Usage"},{"location":"usage.html#using-the-sample-application","text":"The Distiller repository contains a sample application, distiller/examples/classifier_compression/compress_classifier.py , and a set of scheduling files which demonstrate Distiller's features. Following is a brief discussion of how to use this application and the accompanying schedules. You might also want to refer to the following resources: An explanation of the scheduler file format. An in-depth discussion of how we used these schedule files to implement several state-of-the-art DNN compression research papers. The sample application supports various features for compression of image classification DNNs, and gives an example of how to integrate distiller in your own application. The code is documented and should be considered the best source of documentation, but we provide some elaboration here. This diagram shows how where compress_classifier.py fits in the compression workflow, and how we integrate the Jupyter notebooks as part of our research work.","title":"Using the sample application"},{"location":"usage.html#command-line-arguments","text":"To get help on the command line arguments, invoke: $ python3 compress_classifier.py --help For example: $ time python3 compress_classifier.py -a alexnet --lr 0.005 -p 50 ../../../data.imagenet -j 44 --epochs 90 --pretrained --compress=../sensitivity-pruning/alexnet.schedule_sensitivity.yaml Parameters: +----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ | | Name | Shape | NNZ (dense) | NNZ (sparse) | Cols (%) | Rows (%) | Ch (%) | 2D (%) | 3D (%) | Fine (%) | Std | Mean | Abs-Mean | |----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------| | 0 | features.module.0.weight | (64, 3, 11, 11) | 23232 | 13411 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 42.27359 | 0.14391 | -0.00002 | 0.08805 | | 1 | features.module.3.weight | (192, 64, 5, 5) | 307200 | 115560 | 0.00000 | 0.00000 | 0.00000 | 1.91243 | 0.00000 | 62.38281 | 0.04703 | -0.00250 | 0.02289 | | 2 | features.module.6.weight | (384, 192, 3, 3) | 663552 | 256565 | 0.00000 | 0.00000 | 0.00000 | 6.18490 | 0.00000 | 61.33445 | 0.03354 | -0.00184 | 0.01803 | | 3 | features.module.8.weight | (256, 384, 3, 3) | 884736 | 315065 | 0.00000 | 0.00000 | 0.00000 | 6.96411 | 0.00000 | 64.38881 | 0.02646 | -0.00168 | 0.01422 | | 4 | features.module.10.weight | (256, 256, 3, 3) | 589824 | 186938 | 0.00000 | 0.00000 | 0.00000 | 15.49225 | 0.00000 | 68.30614 | 0.02714 | -0.00246 | 0.01409 | | 5 | classifier.1.weight | (4096, 9216) | 37748736 | 3398881 | 0.00000 | 0.21973 | 0.00000 | 0.21973 | 0.00000 | 90.99604 | 0.00589 | -0.00020 | 0.00168 | | 6 | classifier.4.weight | (4096, 4096) | 16777216 | 1782769 | 0.21973 | 3.46680 | 0.00000 | 3.46680 | 0.00000 | 89.37387 | 0.00849 | -0.00066 | 0.00263 | | 7 | classifier.6.weight | (1000, 4096) | 4096000 | 994738 | 3.36914 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 75.71440 | 0.01718 | 0.00030 | 0.00778 | | 8 | Total sparsity: | - | 61090496 | 7063928 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 0.00000 | 88.43694 | 0.00000 | 0.00000 | 0.00000 | +----+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+ 2018-04-04 21:30:52,499 - Total sparsity: 88.44 2018-04-04 21:30:52,499 - --- validate (epoch=89)----------- 2018-04-04 21:30:52,499 - 128116 samples (256 per mini-batch) 2018-04-04 21:31:04,646 - Epoch: [89][ 50/ 500] Loss 2.175988 Top1 51.289063 Top5 74.023438 2018-04-04 21:31:06,427 - Epoch: [89][ 100/ 500] Loss 2.171564 Top1 51.175781 Top5 74.308594 2018-04-04 21:31:11,432 - Epoch: [89][ 150/ 500] Loss 2.159347 Top1 51.546875 Top5 74.473958 2018-04-04 21:31:14,364 - Epoch: [89][ 200/ 500] Loss 2.156857 Top1 51.585938 Top5 74.568359 2018-04-04 21:31:18,381 - Epoch: [89][ 250/ 500] Loss 2.152790 Top1 51.707813 Top5 74.681250 2018-04-04 21:31:22,195 - Epoch: [89][ 300/ 500] Loss 2.149962 Top1 51.791667 Top5 74.755208 2018-04-04 21:31:25,508 - Epoch: [89][ 350/ 500] Loss 2.150936 Top1 51.827009 Top5 74.767857 2018-04-04 21:31:29,538 - Epoch: [89][ 400/ 500] Loss 2.150853 Top1 51.781250 Top5 74.763672 2018-04-04 21:31:32,842 - Epoch: [89][ 450/ 500] Loss 2.150156 Top1 51.828125 Top5 74.821181 2018-04-04 21:31:35,338 - Epoch: [89][ 500/ 500] Loss 2.150417 Top1 51.833594 Top5 74.817187 2018-04-04 21:31:35,357 - ==> Top1: 51.838 Top5: 74.817 Loss: 2.150 2018-04-04 21:31:35,364 - Saving checkpoint 2018-04-04 21:31:39,251 - --- test --------------------- 2018-04-04 21:31:39,252 - 50000 samples (256 per mini-batch) 2018-04-04 21:31:51,512 - Test: [ 50/ 195] Loss 1.487607 Top1 63.273438 Top5 85.695312 2018-04-04 21:31:55,015 - Test: [ 100/ 195] Loss 1.638043 Top1 60.636719 Top5 83.664062 2018-04-04 21:31:58,732 - Test: [ 150/ 195] Loss 1.833214 Top1 57.619792 Top5 80.447917 2018-04-04 21:32:01,274 - ==> Top1: 56.606 Top5: 79.446 Loss: 1.893 Let's look at the command line again: $ time python3 compress_classifier.py -a alexnet --lr 0.005 -p 50 ../../../data.imagenet -j 44 --epochs 90 --pretrained --compress=../sensitivity-pruning/alexnet.schedule_sensitivity.yaml In this example, we prune a TorchVision pre-trained AlexNet network, using the following configuration: Learning-rate of 0.005 Print progress every 50 mini-batches. Use 44 worker threads to load data (make sure to use something suitable for your machine). Run for 90 epochs. Torchvision's pre-trained models did not store the epoch metadata, so pruning starts at epoch 0. When you train and prune your own networks, the last training epoch is saved as a metadata with the model. Therefore, when you load such models, the first epoch is not 0, but it is the last training epoch. The pruning schedule is provided in alexnet.schedule_sensitivity.yaml Log files are written to directory logs .","title":"Command line arguments"},{"location":"usage.html#examples","text":"Distiller comes with several example schedules which can be used together with compress_classifier.py . These example schedules (YAML) files, contain the command line that is used in order to invoke the schedule (so that you can easily recreate the results in your environment), together with the results of the pruning or regularization. The results usually contain a table showing the sparsity of each of the model parameters, together with the validation and test top1, top5 and loss scores. For more details on the example schedules, you can refer to the coverage of the Model Zoo . examples/agp-pruning : Automated Gradual Pruning (AGP) on MobileNet and ResNet18 (ImageNet dataset) examples/hybrid : AlexNet AGP with 2D (kernel) regularization (ImageNet dataset) AlexNet sensitivity pruning with 2D regularization examples/network_slimming : ResNet20 Network Slimming (this is work-in-progress) examples/pruning_filters_for_efficient_convnets : ResNet56 baseline training (CIFAR10 dataset) ResNet56 filter removal using filter ranking examples/sensitivity_analysis : Element-wise pruning sensitivity-analysis: AlexNet (ImageNet) MobileNet (ImageNet) ResNet18 (ImageNet) ResNet20 (CIFAR10) ResNet34 (ImageNet) Filter-wise pruning sensitivity-analysis: ResNet20 (CIFAR10) ResNet56 (CIFAR10) examples/sensitivity-pruning : AlexNet sensitivity pruning with Iterative Pruning AlexNet sensitivity pruning with One-Shot Pruning examples/ssl : ResNet20 baseline training (CIFAR10 dataset) Structured Sparsity Learning (SSL) with layer removal on ResNet20 SSL with channels removal on ResNet20 examples/quantization : AlexNet w. Batch-Norm (base FP32 + DoReFa) Pre-activation ResNet20 on CIFAR10 (base FP32 + DoReFa) Pre-activation ResNet18 on ImageNEt (base FP32 + DoReFa)","title":"Examples"},{"location":"usage.html#experiment-reproducibility","text":"Experiment reproducibility is sometimes important. Pete Warden recently expounded about this in his blog . PyTorch's support for deterministic execution requires us to use only one thread for loading data (other wise the multi-threaded execution of the data loaders can create random order and change the results), and to set the seed of the CPU and GPU PRNGs. Using the --deterministic command-line flag and setting j=1 will produce reproducible results (for the same PyTorch version).","title":"Experiment reproducibility"},{"location":"usage.html#performing-pruning-sensitivity-analysis","text":"Distiller supports element-wise and filter-wise pruning sensitivity analysis. In both cases, L1-norm is used to rank which elements or filters to prune. For example, when running filter-pruning sensitivity analysis, the L1-norm of the filters of each layer's weights tensor are calculated, and the bottom x% are set to zero. The analysis process is quite long, because currently we use the entire test dataset to assess the accuracy performance at each pruning level of each weights tensor. Using a small dataset for this would save much time and we plan on assessing if this will provide sufficient results. Results are output as a CSV file ( sensitivity.csv ) and PNG file ( sensitivity.png ). The implementation is in distiller/sensitivity.py and it contains further details about process and the format of the CSV file. The example below performs element-wise pruning sensitivity analysis on ResNet20 for CIFAR10: $ python3 compress_classifier.py -a resnet20_cifar ../../../data.cifar10/ -j=1 --resume=../cifar10/resnet20/checkpoint_trained_dense.pth.tar --sense=element The sense command-line argument can be set to either element or filter , depending on the type of analysis you want done. There is also a Jupyter notebook with example invocations, outputs and explanations.","title":"Performing pruning sensitivity analysis"},{"location":"usage.html#post-training-quantization","text":"The following example qunatizes ResNet18 for ImageNet: $ python3 compress_classifier.py -a resnet18 ../../../data.imagenet --pretrained --quantize-eval --evaluate See here for more details on how to invoke post-training quantization from the command line. A checkpoint with the quantized model will be dumped in the run directory. It will contain the quantized model parameters (the data type will still be FP32, but the values will be integers). The calculated quantization parameters (scale and zero-point) are stored as well in each quantized layer. For more examples of post-training quantization see here .","title":"Post-Training Quantization"},{"location":"usage.html#summaries","text":"You can use the sample compression application to generate model summary reports, such as the attributes and compute summary report (see screen capture below). You can log sparsity statistics (written to console and CSV file), performance, optimizer and model information, and also create a PNG image of the DNN. Creating a PNG image is an experimental feature (it relies on features which are not available on PyTorch 3.1 and that we hope will be available in PyTorch's next release), so to use it you will need to compile the PyTorch master branch, and hope for the best ;-). $ python3 compress_classifier.py --resume=../ssl/checkpoints/checkpoint_trained_ch_regularized_dense.pth.tar -a=resnet20_cifar ../../../data.cifar10 --summary=compute Generates: +----+------------------------------+--------+----------+-----------------+--------------+-----------------+--------------+------------------+---------+ | | Name | Type | Attrs | IFM | IFM volume | OFM | OFM volume | Weights volume | MACs | |----+------------------------------+--------+----------+-----------------+--------------+-----------------+--------------+------------------+---------| | 0 | module.conv1 | Conv2d | k=(3, 3) | (1, 3, 32, 32) | 3072 | (1, 16, 32, 32) | 16384 | 432 | 442368 | | 1 | module.layer1.0.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 2 | module.layer1.0.conv2 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 3 | module.layer1.1.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 4 | module.layer1.1.conv2 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 5 | module.layer1.2.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 6 | module.layer1.2.conv2 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 16, 32, 32) | 16384 | 2304 | 2359296 | | 7 | module.layer2.0.conv1 | Conv2d | k=(3, 3) | (1, 16, 32, 32) | 16384 | (1, 32, 16, 16) | 8192 | 4608 | 1179648 | | 8 | module.layer2.0.conv2 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 | | 9 | module.layer2.0.downsample.0 | Conv2d | k=(1, 1) | (1, 16, 32, 32) | 16384 | (1, 32, 16, 16) | 8192 | 512 | 131072 | | 10 | module.layer2.1.conv1 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 | | 11 | module.layer2.1.conv2 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 | | 12 | module.layer2.2.conv1 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 | | 13 | module.layer2.2.conv2 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 32, 16, 16) | 8192 | 9216 | 2359296 | | 14 | module.layer3.0.conv1 | Conv2d | k=(3, 3) | (1, 32, 16, 16) | 8192 | (1, 64, 8, 8) | 4096 | 18432 | 1179648 | | 15 | module.layer3.0.conv2 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 | | 16 | module.layer3.0.downsample.0 | Conv2d | k=(1, 1) | (1, 32, 16, 16) | 8192 | (1, 64, 8, 8) | 4096 | 2048 | 131072 | | 17 | module.layer3.1.conv1 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 | | 18 | module.layer3.1.conv2 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 | | 19 | module.layer3.2.conv1 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 | | 20 | module.layer3.2.conv2 | Conv2d | k=(3, 3) | (1, 64, 8, 8) | 4096 | (1, 64, 8, 8) | 4096 | 36864 | 2359296 | | 21 | module.fc | Linear | | (1, 64) | 64 | (1, 10) | 10 | 640 | 640 | +----+------------------------------+--------+----------+-----------------+--------------+-----------------+--------------+------------------+---------+ Total MACs: 40,813,184","title":"Summaries"},{"location":"usage.html#using-tensorboard","text":"Google's TensorBoard is an excellent tool for visualizing the progress of DNN training. Distiller's logger supports writing performance indicators and parameter statistics in a file format that can be read by TensorBoard (Distiller uses TensorFlow's APIs in order to do this, which is why Distiller requires the installation of TensorFlow). To view the graphs, invoke the TensorBoard server. For example: $ tensorboard --logdir=logs Distillers's setup (requirements.txt) installs TensorFlow for CPU. If you want a different installation, please follow the TensorFlow installation instructions .","title":"Using TensorBoard"},{"location":"usage.html#collecting-activations-statistics","text":"In CNNs with ReLU layers, ReLU activations (feature-maps) also exhibit a nice level of sparsity (50-60% sparsity is typical). You can collect activation statistics using the --act_stats command-line flag. For example: $ python3 compress_classifier.py -a=resnet56_cifar -p=50 ../../../data.cifar10 --resume=checkpoint.resnet56_cifar_baseline.pth.tar --act-stats=test -e The test parameter indicates that, in this example, we want to collect activation statistics during the test phase. Note that we also used the -e command-line argument to indicate that we want to run a test phase. The other two legal parameter values are train and valid which collect activation statistics during the training and validation phases, respectively.","title":"Collecting activations statistics"},{"location":"usage.html#collectors-and-their-collaterals","text":"An instance of a subclass of ActivationStatsCollector can be used to collect activation statistics. Currently, ActivationStatsCollector has two types of subclasses: SummaryActivationStatsCollector and RecordsActivationStatsCollector . Instances of SummaryActivationStatsCollector compute the mean of some statistic of the activation. It is rather light-weight and quicker than collecting a record per activation. The statistic function is configured in the constructor. In the sample compression application, compress_classifier.py , we create a dictionary of collectors. For example: SummaryActivationStatsCollector(model, \"sparsity\", lambda t: 100 * distiller.utils.sparsity(t)) The lambda expression is invoked per activation encountered during forward passes, and the value it returns (in this case, the sparsity of the activation tensors, multiplied by 100) is stored in module.sparsity ( \"sparsity\" is this collector's name). To access the statistics, you can invoke collector.value() , or you can access each module's data directly. Another type of collector is RecordsActivationStatsCollector which computes a hard-coded set of activations statistics and collects a record per activation . For obvious reasons, this is slower than instances of SummaryActivationStatsCollector . ActivationStatsCollector default to collecting activations statistics only on the output activations of ReLU layers, but we can choose any layer type we want. In the example below we collect statistics from outputs of torch.nn.Conv2d layers. RecordsActivationStatsCollector(model, classes=[torch.nn.Conv2d]) Collectors can write their data to Excel workbooks (which are named using the collector's name), by invoking collector.to_xlsx(path_to_workbook) . In compress_classifier.py we currently create four different collectors which you can selectively disable. You can also add other statistics collectors and use a different function to compute your new statistic. collectors = missingdict({ \"sparsity\": SummaryActivationStatsCollector(model, \"sparsity\", lambda t: 100 * distiller.utils.sparsity(t)), \"l1_channels\": SummaryActivationStatsCollector(model, \"l1_channels\", distiller.utils.activation_channels_l1), \"apoz_channels\": SummaryActivationStatsCollector(model, \"apoz_channels\", distiller.utils.activation_channels_apoz), \"records\": RecordsActivationStatsCollector(model, classes=[torch.nn.Conv2d])}) By default, these Collectors write their data to files in the active log directory. You can use a utility function, distiller.log_activation_statsitics , to log the data of an ActivationStatsCollector instance to one of the backend-loggers. For an example, the code below logs the \"sparsity\" collector to a TensorBoard log file. distiller.log_activation_statsitics(epoch, \"train\", loggers=[tflogger], collector=collectors[\"sparsity\"])","title":"Collectors and their collaterals"},{"location":"usage.html#caveats","text":"Distiller collects activations statistics using PyTorch's forward-hooks mechanism. Collectors iteratively register the modules' forward-hooks, and collectors are called during the forward traversal and get exposed to activation data. Registering for forward callbacks is performed like this: module.register_forward_hook This makes apparent two limitations of this mechanism: We can only register on PyTorch modules. This means that we can't register on the forward hook of a functionals such as torch.nn.functional.relu and torch.nn.functional.max_pool2d . Therefore, you may need to replace functionals with their module alternative. For example: class MadeUpNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 6, 5) def forward(self, x): x = F.relu(self.conv1(x)) return x Can be changed to: class MadeUpNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.relu(self.conv1(x)) return x We can only use a module instance once in our models. If we use the same module several times, then we can't determine which node in the graph has invoked the callback, because the PyTorch callback signature def hook(module, input, output) doesn't provide enough contextual information. TorchVision's ResNet is an example of a model that uses the same instance of nn.ReLU multiple times: class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) # <================ out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) # <================ return out In Distiller we changed ResNet to use multiple instances of nn.ReLU, and each instance is used only once: class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu1 = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.relu2 = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu1(out) # <================ out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu2(out) # <================ return out","title":"Caveats"},{"location":"usage.html#using-the-jupyter-notebooks","text":"The Jupyter notebooks contain many examples of how to use the statistics summaries generated by Distiller. They are explained in a separate page.","title":"Using the Jupyter notebooks"},{"location":"usage.html#generating-this-documentation","text":"Install mkdocs and the required packages by executing: $ pip3 install -r doc-requirements.txt To build the project documentation run: $ cd distiller/docs-src $ mkdocs build --clean This will create a folder named 'site' which contains the documentation website. Open distiller/docs/site/index.html to view the documentation home page.","title":"Generating this documentation"}]}
\ No newline at end of file
diff --git a/docs/sitemap.xml b/docs/sitemap.xml
index b7671b3..f1a5d67 100644
--- a/docs/sitemap.xml
+++ b/docs/sitemap.xml
@@ -2,87 +2,87 @@
 <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
     <url>
      <loc>None</loc>
-     <lastmod>2019-04-01</lastmod>
+     <lastmod>2019-04-08</lastmod>
      <changefreq>daily</changefreq>
     </url>
     <url>
      <loc>None</loc>
-     <lastmod>2019-04-01</lastmod>
+     <lastmod>2019-04-08</lastmod>
      <changefreq>daily</changefreq>
     </url>
     <url>
      <loc>None</loc>
-     <lastmod>2019-04-01</lastmod>
+     <lastmod>2019-04-08</lastmod>
      <changefreq>daily</changefreq>
     </url>
     <url>
      <loc>None</loc>
-     <lastmod>2019-04-01</lastmod>
+     <lastmod>2019-04-08</lastmod>
      <changefreq>daily</changefreq>
     </url>
     <url>
      <loc>None</loc>
-     <lastmod>2019-04-01</lastmod>
+     <lastmod>2019-04-08</lastmod>
      <changefreq>daily</changefreq>
     </url>
     <url>
      <loc>None</loc>
-     <lastmod>2019-04-01</lastmod>
+     <lastmod>2019-04-08</lastmod>
      <changefreq>daily</changefreq>
     </url>
     <url>
      <loc>None</loc>
-     <lastmod>2019-04-01</lastmod>
+     <lastmod>2019-04-08</lastmod>
      <changefreq>daily</changefreq>
     </url>
     <url>
      <loc>None</loc>
-     <lastmod>2019-04-01</lastmod>
+     <lastmod>2019-04-08</lastmod>
      <changefreq>daily</changefreq>
     </url>
     <url>
      <loc>None</loc>
-     <lastmod>2019-04-01</lastmod>
+     <lastmod>2019-04-08</lastmod>
      <changefreq>daily</changefreq>
     </url>
     <url>
      <loc>None</loc>
-     <lastmod>2019-04-01</lastmod>
+     <lastmod>2019-04-08</lastmod>
      <changefreq>daily</changefreq>
     </url>
     <url>
      <loc>None</loc>
-     <lastmod>2019-04-01</lastmod>
+     <lastmod>2019-04-08</lastmod>
      <changefreq>daily</changefreq>
     </url>
     <url>
      <loc>None</loc>
-     <lastmod>2019-04-01</lastmod>
+     <lastmod>2019-04-08</lastmod>
      <changefreq>daily</changefreq>
     </url>
     <url>
      <loc>None</loc>
-     <lastmod>2019-04-01</lastmod>
+     <lastmod>2019-04-08</lastmod>
      <changefreq>daily</changefreq>
     </url>
     <url>
      <loc>None</loc>
-     <lastmod>2019-04-01</lastmod>
+     <lastmod>2019-04-08</lastmod>
      <changefreq>daily</changefreq>
     </url>
     <url>
      <loc>None</loc>
-     <lastmod>2019-04-01</lastmod>
+     <lastmod>2019-04-08</lastmod>
      <changefreq>daily</changefreq>
     </url>
     <url>
      <loc>None</loc>
-     <lastmod>2019-04-01</lastmod>
+     <lastmod>2019-04-08</lastmod>
      <changefreq>daily</changefreq>
     </url>
     <url>
      <loc>None</loc>
-     <lastmod>2019-04-01</lastmod>
+     <lastmod>2019-04-08</lastmod>
      <changefreq>daily</changefreq>
     </url>
 </urlset>
\ No newline at end of file
diff --git a/docs/sitemap.xml.gz b/docs/sitemap.xml.gz
index 76473256f5b59cff3fc59273f8c3c57e332c4ac3..508c0f98a760b1fd759946de366d9e251ac67fa3 100644
GIT binary patch
literal 205
zcmV;;05bm{iwFoWKdW2<|8r?{Wo=<_E_iKh0PWSW4#FT12H@SNAnXNd99l!^=IEpk
zKq$5(EmRJzzP;2|<2wk;9X}Vo@p%0hdUV$wjI$Na3ByQR<r-t#3ZGAhtiZL{@SE>J
z2NZ`bRJaRa+*3MD69E(SAV(n1w-i(tFF@CjGE!o(LGjEVEFI##Q{w2X=5+2pp@cBF
zMr>JLW^9+SB0jI#I-)vhTYd5RDjI3}C#OHjqR=mszyu~RfeB3Dzk}}**SB~9n5Fr*
H?gsz>kbqr~

literal 205
zcmV;;05bm{iwFomC!$;e|8r?{Wo=<_E_iKh0PWSW4#FT12H?F<LD<Vt<6sS?o1>FH
z0HN5Dv`_)9zP;2|<2wk;9X}Vo@mT#BdURLLYilZ85Q>p9(l*+(6+WL1`3~1SV>j1(
z6;K>DzQSDy;~rp|CgOEagBU%rt_3f<c=51?KuL*thT@q&SUSXdEBVnH#bE9}q3~g_
z4d2kB%;`3#MSNbfbwqZ;wCdv2l{Z57PX<59qQI9)U;-1Ezyv1n-@$i@>zls-bBvHA
H?gsz>eN|vz

diff --git a/docs/tutorial-lang_model.html b/docs/tutorial-lang_model.html
index 1966666..583aaf6 100644
--- a/docs/tutorial-lang_model.html
+++ b/docs/tutorial-lang_model.html
@@ -317,8 +317,8 @@ Note that we can improve the results by training longer, since the loss curves a
 <p>We compared three model sizes: small (7.1M; 14M), medium (28M; 50M), large: (86M; 136M) – reported as (#parameters net/tied; #parameters gross).
 The results reported below use a preset seed (for reproducibility), and we expect results can be improved if we allow “true” pseudo-randomness.  We limited our tests to 40 epochs, even though validation perplexity was still trending down.</p>
 <p>Essentially, this recreates the language model experiment in the AGP paper, and validates its conclusions:
-<em> “We see that sparse models are able to outperform dense models which have significantly more parameters.”
-</em> The 80% sparse large model (which has 16.9M parameters and a perplexity of 83.64) is able to outperform the dense medium (which has 28.4M parameters and a perplexity of 84.21), a model which has 1.7 times more parameters.  It also outperform the dense large model, which exemplifies how pruning can act as a regularizer.
+* “We see that sparse models are able to outperform dense models which have significantly more parameters.”
+* The 80% sparse large model (which has 16.9M parameters and a perplexity of 83.64) is able to outperform the dense medium (which has 28.4M parameters and a perplexity of 84.21), a model which has 1.7 times more parameters.  It also outperform the dense large model, which exemplifies how pruning can act as a regularizer.
 * “Our results show that pruning works very well not only on the dense LSTM weights and dense softmax layer but also the dense embedding matrix. This suggests that during the optimization procedure the neural network can find a good sparse embedding for the words in the vocabulary that works well together with the sparse connectivity structure of the LSTM weights and softmax layer.”</p>
 <h2 id="setup">Setup</h2>
 <p>We start by cloning Pytorch’s example <a href="https://github.com/pytorch/examples/tree/master">repository</a>. I’ve copied the language model code to distiller’s examples/word_language_model directory, so I’ll use that for the rest of the tutorial.
diff --git a/examples/classifier_compression/compress_classifier.py b/examples/classifier_compression/compress_classifier.py
index e7b318c..ee8d0d5 100755
--- a/examples/classifier_compression/compress_classifier.py
+++ b/examples/classifier_compression/compress_classifier.py
@@ -39,6 +39,7 @@ train():
         loss = criterion(output, target)
         compression_scheduler.before_backward_pass(epoch)
         loss.backward()
+        compression_scheduler.before_parameter_optimization(epoch)
         optimizer.step()
         compression_scheduler.on_minibatch_end(epoch)
 
@@ -386,6 +387,8 @@ def train(train_loader, model, criterion, optimizer, epoch,
         # Compute the gradient and do SGD step
         optimizer.zero_grad()
         loss.backward()
+        if compression_scheduler:
+            compression_scheduler.before_parameter_optimization(epoch, train_step, steps_per_epoch, optimizer)
         optimizer.step()
         if compression_scheduler:
             compression_scheduler.on_minibatch_end(epoch, train_step, steps_per_epoch, optimizer)
diff --git a/examples/drop_filter/plain20_cifar_dropfilter_training.yaml b/examples/drop_filter/plain20_cifar_dropfilter_training.yaml
new file mode 100755
index 0000000..47386b1
--- /dev/null
+++ b/examples/drop_filter/plain20_cifar_dropfilter_training.yaml
@@ -0,0 +1,71 @@
+# This script performs DropFilter - a regularization method similar to Dropout, which drops entire convolutional
+# filters, instead of mere neurons.
+# However, unlike the original intent of DropFilter - to act as a regularizer and reduce the generalization error
+# of the network, here we employ higher rates of filter-dropping (rates are increased over time by following an AGP
+# schedule) in order to make the network more robust to filter-pruning.  We test this robustness using sensitivity
+# analysis.
+#
+# References:
+# [1] DropFilter: Dropout for Convolutions
+#     Zhengsu Chen Jianwei Niu Qi Tian
+#     https://arxiv.org/abs/1810.09849
+# [2] DropFilter: A Novel Regularization Method for Learning Convolutional Neural Networks
+#     Hengyue Pan, Hui Jiang, Xin Niu, Yong Dou
+#     https://arxiv.org/abs/1811.06783
+#
+#
+#
+# time python3 compress_classifier.py --arch=plain20_cifar ../../../data.cifar --lr=0.3 --epochs=180 --compress=plain20_cifar_dropfilter_training.yaml -p=50 --gpus=0 --masks-sparsity --vs=0 --epochs=220
+#
+# --- validate (epoch=219)-----------
+# 10000 samples (256 per mini-batch)
+# ==> Top1: 89.410    Top5: 99.550    Loss: 0.454
+#
+# ==> Best [Top1: 89.610   Top5: 99.560   Sparsity:0.00   Params: 268336 on epoch: 139]
+# Saving checkpoint to: logs/2019.03.24-133353/checkpoint.pth.tar
+# --- test ---------------------
+# 10000 samples (256 per mini-batch)
+# ==> Top1: 89.410    Top5: 99.550    Loss: 0.422
+#
+# real    37m16.853s
+# user    131m1.775s
+# sys     15m12.706s
+
+lr_schedulers:
+  training_lr:
+    class: StepLR
+    step_size: 45
+    gamma: 0.20
+
+pruners:
+  random_filter_pruner:
+    class: BernoulliFilterPruner_AGP
+    initial_sparsity : 0.05
+    final_sparsity: 0.50
+    group_type: Filters
+    weights: [module.conv1.weight,
+              module.layer1.0.conv1.weight, module.layer1.1.conv1.weight, module.layer1.2.conv1.weight,
+              module.layer1.0.conv2.weight, module.layer1.1.conv2.weight, module.layer1.2.conv2.weight,
+              module.layer2.0.conv1.weight, module.layer2.1.conv1.weight, module.layer2.2.conv1.weight,
+              module.layer2.0.conv2.weight, module.layer2.1.conv2.weight, module.layer2.2.conv2.weight,
+              module.layer3.0.conv1.weight, module.layer3.1.conv1.weight, module.layer3.2.conv1.weight,
+              module.layer3.0.conv2.weight, module.layer3.1.conv2.weight, module.layer3.2.conv2.weight]
+
+policies:
+  - lr_scheduler:
+      instance_name: training_lr
+    starting_epoch: 30
+    ending_epoch: 200
+    frequency: 1
+
+  - pruner:
+      instance_name: random_filter_pruner
+      args:
+        mini_batch_pruning_frequency: 16
+        discard_masks_at_minibatch_end: True
+        use_double_copies: True
+        mask_on_forward_only: True
+        mask_gradients: True
+    starting_epoch: 15
+    ending_epoch: 220
+    frequency: 1
diff --git a/examples/drop_filter/plain20_cifar_dropfilter_training_regularization.yaml b/examples/drop_filter/plain20_cifar_dropfilter_training_regularization.yaml
new file mode 100755
index 0000000..a763db1
--- /dev/null
+++ b/examples/drop_filter/plain20_cifar_dropfilter_training_regularization.yaml
@@ -0,0 +1,71 @@
+# This script performs DropFilter - a regularization method similar to Dropout, which drops entire convolutional
+# filters, instead of mere neurons.
+#
+# The sample below increase the test Top1 of Plain20 by 0.3% (from 90.55 to 90.85) when averaged across 3 trials.
+#
+# References:
+# [1] DropFilter: Dropout for Convolutions
+#     Zhengsu Chen Jianwei Niu Qi Tian
+#     https://arxiv.org/abs/1810.09849
+# [2] DropFilter: A Novel Regularization Method for Learning Convolutional Neural Networks
+#     Hengyue Pan, Hui Jiang, Xin Niu, Yong Dou
+#     https://arxiv.org/abs/1811.06783
+#
+#
+#
+# time python3 compress_classifier.py --arch=plain20_cifar ../../../data.cifar --lr=0.3 --epochs=180 --compress=../drop_filter/plain20_cifar_dropfilter_training_regularization.yaml -p=50 --gpus=0 --masks-sparsity --vs=0
+#
+# --- validate (epoch=179)-----------
+# 10000 samples (256 per mini-batch)
+# ==> Top1: 90.760    Top5: 99.650    Loss: 0.362
+#
+# ==> Best [Top1: 90.880   Top5: 99.650   Sparsity:0.00   Params: 268336 on epoch: 170]
+# Saving checkpoint to: logs/2019.03.23-185744/checkpoint.pth.tar
+# --- test ---------------------
+# 10000 samples (256 per mini-batch)
+# ==> Top1: 90.760    Top5: 99.650    Loss: 0.359
+#
+#
+# Log file for this run: /home/cvds_lab/nzmora/pytorch_workspace/distiller/examples/classifier_compression/logs/2019.03.23-185744/2019.03.23-185744.log
+#
+# real    30m59.049s
+# user    109m16.449s
+# sys     12m9.995s
+
+lr_schedulers:
+  training_lr:
+    class: StepLR
+    step_size: 45
+    gamma: 0.20
+
+pruners:
+  random_filter_pruner:
+    class: BernoulliFilterPruner
+    desired_sparsity: 0.1
+    group_type: Filters
+    weights: [module.conv1.weight,
+              module.layer1.0.conv1.weight, module.layer1.1.conv1.weight, module.layer1.2.conv1.weight,
+              module.layer1.0.conv2.weight, module.layer1.1.conv2.weight, module.layer1.2.conv2.weight,
+              module.layer2.0.conv1.weight, module.layer2.1.conv1.weight, module.layer2.2.conv1.weight,
+              module.layer2.0.conv2.weight, module.layer2.1.conv2.weight, module.layer2.2.conv2.weight,
+              module.layer3.0.conv1.weight, module.layer3.1.conv1.weight, module.layer3.2.conv1.weight,
+              module.layer3.0.conv2.weight, module.layer3.1.conv2.weight, module.layer3.2.conv2.weight]
+
+policies:
+  - lr_scheduler:
+      instance_name: training_lr
+    starting_epoch: 30
+    ending_epoch: 200
+    frequency: 1
+
+  - pruner:
+      instance_name: random_filter_pruner
+      args:
+        mini_batch_pruning_frequency: 16
+        discard_masks_at_minibatch_end: True
+        # use_double_copies: True
+        mask_on_forward_only: True
+        mask_gradients: True
+    starting_epoch: 15
+    ending_epoch: 180
+    frequency: 1
diff --git a/examples/drop_filter/resnet20_cifar_randomlevel_training.yaml b/examples/drop_filter/resnet20_cifar_randomlevel_training.yaml
new file mode 100755
index 0000000..81a8dfc
--- /dev/null
+++ b/examples/drop_filter/resnet20_cifar_randomlevel_training.yaml
@@ -0,0 +1,87 @@
+# Random drop-filter where we randomly choose a percentage of filters to prune (level), then use L1-norm ranking
+# to choose which filters to prune.
+#
+#
+# time python3 compress_classifier.py --arch=resnet20_cifar ../../../data.cifar --lr=0.3 --epochs=180 --batch=256 --compress=../drop_filter/resnet20_cifar_randomlevel_training.yaml --vs=0 -p=50 --gpus=0
+#
+# Parameters:
+# +----+-------------------------------------+----------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+
+# |    | Name                                | Shape          |   NNZ (dense) |   NNZ (sparse) |   Cols (%) |   Rows (%) |   Ch (%) |   2D (%) |   3D (%) |   Fine (%) |     Std |     Mean |   Abs-Mean |
+# |----+-------------------------------------+----------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------|
+# |  0 | module.conv1.weight                 | (16, 3, 3, 3)  |           432 |            432 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.40816 | -0.00610 |    0.26546 |
+# |  1 | module.layer1.0.conv1.weight        | (16, 16, 3, 3) |          2304 |           2304 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.15262 | -0.00699 |    0.10400 |
+# |  2 | module.layer1.0.conv2.weight        | (16, 16, 3, 3) |          2304 |           2304 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.15914 | -0.01044 |    0.11828 |
+# |  3 | module.layer1.1.conv1.weight        | (16, 16, 3, 3) |          2304 |           2304 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.13560 | -0.00450 |    0.09826 |
+# |  4 | module.layer1.1.conv2.weight        | (16, 16, 3, 3) |          2304 |           2304 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.13313 | -0.00876 |    0.10116 |
+# |  5 | module.layer1.2.conv1.weight        | (16, 16, 3, 3) |          2304 |           2304 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.17824 | -0.00447 |    0.13122 |
+# |  6 | module.layer1.2.conv2.weight        | (16, 16, 3, 3) |          2304 |           2304 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.14746 | -0.00306 |    0.11315 |
+# |  7 | module.layer2.0.conv1.weight        | (32, 16, 3, 3) |          4608 |           4608 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.13769 | -0.01100 |    0.10709 |
+# |  8 | module.layer2.0.conv2.weight        | (32, 32, 3, 3) |          9216 |           9216 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.11786 | -0.00354 |    0.09118 |
+# |  9 | module.layer2.0.downsample.0.weight | (32, 16, 1, 1) |           512 |            512 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.25319 |  0.00040 |    0.19276 |
+# | 10 | module.layer2.1.conv1.weight        | (32, 32, 3, 3) |          9216 |           9216 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.10268 | -0.00995 |    0.07987 |
+# | 11 | module.layer2.1.conv2.weight        | (32, 32, 3, 3) |          9216 |           9216 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.09043 | -0.00485 |    0.07108 |
+# | 12 | module.layer2.2.conv1.weight        | (32, 32, 3, 3) |          9216 |           9216 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.09899 | -0.01216 |    0.07835 |
+# | 13 | module.layer2.2.conv2.weight        | (32, 32, 3, 3) |          9216 |           9216 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.08196 | -0.00298 |    0.06411 |
+# | 14 | module.layer3.0.conv1.weight        | (64, 32, 3, 3) |         18432 |          18432 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.10177 | -0.00970 |    0.08108 |
+# | 15 | module.layer3.0.conv2.weight        | (64, 64, 3, 3) |         36864 |          36864 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.09456 | -0.00433 |    0.07474 |
+# | 16 | module.layer3.0.downsample.0.weight | (64, 32, 1, 1) |          2048 |           2048 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.14403 | -0.01702 |    0.11294 |
+# | 17 | module.layer3.1.conv1.weight        | (64, 64, 3, 3) |         36864 |          36864 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.09133 | -0.00750 |    0.07241 |
+# | 18 | module.layer3.1.conv2.weight        | (64, 64, 3, 3) |         36864 |          36864 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.07808 | -0.00790 |    0.06185 |
+# | 19 | module.layer3.2.conv1.weight        | (64, 64, 3, 3) |         36864 |          36864 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.07204 | -0.00507 |    0.05624 |
+# | 20 | module.layer3.2.conv2.weight        | (64, 64, 3, 3) |         36864 |          36864 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.04531 | -0.00329 |    0.03415 |
+# | 21 | module.fc.weight                    | (10, 64)       |           640 |            640 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.52613 | -0.00002 |    0.40702 |
+# | 22 | Total sparsity:                     | -              |        270896 |         270896 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.00000 |  0.00000 |    0.00000 |
+# +----+-------------------------------------+----------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+
+# Total sparsity: 0.00
+#
+# --- validate (epoch=179)-----------
+# 10000 samples (256 per mini-batch)
+# ==> Top1: 91.660    Top5: 99.690    Loss: 0.374
+#
+# ==> Best [Top1: 91.800   Top5: 99.690   Sparsity:0.00   Params: 270896 on epoch: 171]
+# Saving checkpoint to: logs/2019.03.24-162956/checkpoint.pth.tar
+# --- test ---------------------
+# 10000 samples (256 per mini-batch)
+# ==> Top1: 91.660    Top5: 99.690    Loss: 0.366
+#
+#
+# Log file for this run: /home/cvds_lab/nzmora/pytorch_workspace/distiller/examples/classifier_compression/logs/2019.03.24-162956/2019.03.24-162956.log
+#
+# real    31m57.174s
+# user    98m48.700s
+# sys     12m8.557s
+
+
+lr_schedulers:
+  training_lr:
+    class: StepLR
+    step_size: 45
+    gamma: 0.10
+
+pruners:
+  random_filter_pruner:
+    class: RandomLevelStructureParameterPruner
+    sparsity_range: [0.1, 0.2]
+    group_type: Filters
+    weights: [module.layer1.0.conv1.weight, module.layer1.1.conv1.weight, module.layer1.2.conv1.weight,
+              module.layer2.0.conv1.weight, module.layer2.1.conv1.weight, module.layer2.2.conv1.weight,
+              module.layer3.0.conv1.weight, module.layer3.1.conv1.weight, module.layer3.2.conv1.weight]
+
+policies:
+  - lr_scheduler:
+      instance_name: training_lr
+    starting_epoch: 30
+    ending_epoch: 200
+    frequency: 1
+
+  - pruner:
+      instance_name: random_filter_pruner
+      args:
+        mini_batch_pruning_frequency: 16
+        discard_masks_at_minibatch_end: True
+        mask_on_forward_only: True
+        use_double_copies: True
+        mask_gradients: True
+    starting_epoch: 0
+    ending_epoch: 300
+    frequency: 1
-- 
GitLab