diff --git a/examples/automated_deep_compression/ADC.py b/examples/automated_deep_compression/ADC.py
index cca53e17d4fa0d984e8bb5d1c1027282ba5b7b9c..2d358d0a656972ee80d419b566d9a7f3a902d499 100755
--- a/examples/automated_deep_compression/ADC.py
+++ b/examples/automated_deep_compression/ADC.py
@@ -30,49 +30,110 @@ def create_model_masks(model):
 
 
 USE_COACH = True
-
-
-def do_adc(model, dataset, arch, data_loader, validate_fn, save_checkpoint_fn):
-    np.random.seed()
-
-    if USE_COACH:
-        task_parameters = TaskParameters(framework_type="tensorflow",
-                                         experiment_path="./experiments/test")
-        extra_params = {'save_checkpoint_secs': None,
-                        'render': True}
-        task_parameters.__dict__.update(extra_params)
-
-        graph_manager.env_params.additional_simulator_parameters = {
-            'model': model,
-            'dataset': dataset,
-            'arch': arch,
-            'data_loader': data_loader,
-            'validate_fn': validate_fn,
-            'save_checkpoint_fn': save_checkpoint_fn
-        }
-        graph_manager.create_graph(task_parameters)
-        graph_manager.improve()
-        return
-
+PERFORM_THINNING = True
+
+def coach_adc(model, dataset, arch, data_loader, validate_fn, save_checkpoint_fn):
+    task_parameters = TaskParameters(framework_type="tensorflow",
+                                     experiment_path="./experiments/test")
+    extra_params = {'save_checkpoint_secs': None,
+                    'render': True}
+    task_parameters.__dict__.update(extra_params)
+
+    graph_manager.env_params.additional_simulator_parameters = {
+        'model': model,
+        'dataset': dataset,
+        'arch': arch,
+        'data_loader': data_loader,
+        'validate_fn': validate_fn,
+        'save_checkpoint_fn': save_checkpoint_fn,
+        'action_range': (0.15, 0.97)
+    }
+    graph_manager.create_graph(task_parameters)
+    graph_manager.improve()
+
+
+def random_adc(model, dataset, arch, data_loader, validate_fn, save_checkpoint_fn):
     """Random ADC agent"""
-    env = CNNEnvironment(model, dataset, arch, data_loader, validate_fn, save_checkpoint_fn)
+    action_range = (0.0, 1.0)
+    env = CNNEnvironment(model, dataset, arch, data_loader,
+                         validate_fn, save_checkpoint_fn, action_range)
 
-    for ep in range(10):
+    best = [-1000, None]
+    env.action_space = RandomADCActionSpace(action_range[0], action_range[1])
+    for ep in range(100):
         observation = env.reset()
+        action_config = []
         for t in range(100):
-            env.render(0, 0)
+            #env.render(0, 0)
             msglogger.info("[episode={}:{}] observation = {}".format(ep, t, observation))
             # take a random action
             action = env.action_space.sample()
+            action_config.append(action)
             observation, reward, done, info = env.step(action)
+            if reward > best[0]:
+                best[0] = reward
+                best[1] = action_config
+                msglogger.info("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
+                msglogger.info("New solution found: episode={} reward={} config={}".format(ep, reward, action_config))
             if done:
                 msglogger.info("Episode finished after {} timesteps".format(t+1))
                 break
 
 
+def do_adc(model, dataset, arch, data_loader, validate_fn, save_checkpoint_fn):
+    np.random.seed()
+
+    if USE_COACH:
+        return coach_adc(model, dataset, arch, data_loader, validate_fn, save_checkpoint_fn)
+    return random_adc(model, dataset, arch, data_loader, validate_fn, save_checkpoint_fn)
+
+
 class RandomADCActionSpace(object):
+    def __init__(self, low, high):
+        self.low = low
+        self.high = high
+
+    def sample(self):
+        return random.uniform(self.low, self.high)
+
+
+class PredictableADCActionSpace(object):
+    def __init__(self, low, high):
+        #self.actions = (0.51, 0.26, 0.23, 0.09, 0.24, 0.36, 0.90, 0.97, 0.98, 0.98, 0.98, 0.98, 0)
+        #self.actions = (0.51, 0.26, 0.23, 0.09, 0.24, 0.36, 0.0, 0.0, 0.50, 0.50, 0.50, 0.50, 0)
+        #self.actions = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.50, 0.65, 0.60, 0.00, 0.00, 0)
+        self.actions = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00, 0.0, 0.0, 0.8, 0.00, 0)   # Top1 90.100000    Top5 99.420000    reward -0.113175
+        self.actions = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00, 0.0, 0.0, 0.7, 0.00, 0)  # Top1 90.540000    Top5 99.360000    reward -0.124923
+        self.actions = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00, 0.0, 0.0, 0.6, 0.00, 0)  # Top1 90.600000    Top5 99.340000    reward -0.128869
+
+        self.actions = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00, 0.0, 0.0, 0.8, 0.8, 0)   # Top1 87.600000    Top5 98.980000    reward -0.198718
+        self.actions = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00, 0.0, 0.0, 0.8, 0.8, 0.65)  # Top1 74.720000    Top5 97.700000    reward -0.447991
+        self.actions = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00, 0.0, 0.8, 0.8, 0.8, 0.65) # Top1 39.540000    Top5 95.420000    reward -0.886748
+
+        #self.actions = [0] * 13                                                           # Top1 90.480000    Top5 99.400000    reward -0.117374
+        self.step = 0
+        self.episode = 0
+        self.l1 = 0
+        self.l2 = 0
+        self.update_action_vector()
+
+    def update_action_vector(self):
+        self.actions = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00, 0.0, 0, 0.8, 0.05, 0)  # Top1 89.640000    Top5 99.520000    reward -0.093653
+        self.actions = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00, 0.0, 0, 0.8, 0.05, self.episode * 0.05)
+        self.actions = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00, 0.0, 0, self.l1 * 0.05, self.l2 * 0.05, 0)
+
     def sample(self):
-        return random.uniform(0, 1)
+        action = self.actions[self.step]
+        self.step = (self.step + 1) % len(self.actions)
+        if self.step == 0:
+            self.l1 = (self.l1 + 1) % 20
+            if self.l1 == 0:
+                self.l2 = (self.l2 + 1) % 20
+            if self.l2 == 19:
+                print("Done - exiting")
+                exit()
+            self.update_action_vector()
+        return action
 
 
 def collect_conv_details(model, dataset):
@@ -86,6 +147,7 @@ def collect_conv_details(model, dataset):
     g = SummaryGraph(model.cuda(), dummy_input.cuda())
     conv_layers = OrderedDict()
     total_macs = 0
+    total_nnz = 0
     for id, (name, m) in enumerate(model.named_modules()):
         if isinstance(m, torch.nn.Conv2d):
             conv = SimpleNamespace()
@@ -97,7 +159,12 @@ def collect_conv_details(model, dataset):
             conv_op = g.find_op(normalize_module_name(name))
             assert conv_op is not None
 
+            total_nnz += conv_op['attrs']['weights_vol']
             conv.macs = conv_op['attrs']['MACs']
+            conv_pname = name + ".weight"
+            conv_p = distiller.model_find_param(model, conv_pname)
+            conv.macs *= distiller.density_ch(conv_p)
+
             total_macs += conv.macs
             conv.ofm_h = g.param_shape(conv_op['outputs'][0])[2]
             conv.ofm_w = g.param_shape(conv_op['outputs'][0])[3]
@@ -108,18 +175,18 @@ def collect_conv_details(model, dataset):
             conv.id = id
             conv_layers[len(conv_layers)] = conv
 
-    return conv_layers, total_macs
+    return conv_layers, total_macs, total_nnz
 
 
 class CNNEnvironment(gym.Env):
     metadata = {'render.modes': ['human']}
+     #STATE_EMBEDDING_LEN = len(Observation._fields) + 12
     STATE_EMBEDDING_LEN = len(Observation._fields)
 
-    def __init__(self, model, dataset, arch, data_loader, validate_fn, save_checkpoint_fn):
+    def __init__(self, model, dataset, arch, data_loader, validate_fn, save_checkpoint_fn, action_range):
         self.pylogger = distiller.data_loggers.PythonLogger(msglogger)
         self.tflogger = distiller.data_loggers.TensorBoardLogger(msglogger.logdir)
 
-        self.action_space = RandomADCActionSpace()
         self.dataset = dataset
         self.arch = arch
         self.data_loader = data_loader
@@ -127,34 +194,33 @@ class CNNEnvironment(gym.Env):
         self.save_checkpoint_fn = save_checkpoint_fn
         self.orig_model = model
 
-        self.conv_layers, self.dense_model_macs = collect_conv_details(model, dataset)
+        self.max_reward = -1000
+
+        self.conv_layers, self.dense_model_macs, self.dense_model_size = collect_conv_details(model, dataset)
         self.reset(init_only=True)
         msglogger.info("Model %s has %d Convolution layers", arch, len(self.conv_layers))
         msglogger.info("\tTotal MACs: %s" % distiller.pretty_int(self.dense_model_macs))
 
         self.debug_stats = {'episode': 0}
-
+        self.action_low = action_range[0]
+        self.action_high = action_range[1]
         # Gym
         # spaces documentation: https://gym.openai.com/docs/
-        self.action_space = spaces.Box(0, 1, shape=(1,))
+        self.action_space = spaces.Box(self.action_low, self.action_high, shape=(1,))
+        self.action_space.default_action = self.action_low
         self.observation_space = spaces.Box(0, float("inf"), shape=(self.STATE_EMBEDDING_LEN,))
 
     def reset(self, init_only=False):
         """Reset the environment.
         This is invoked by the Agent.
         """
-        msglogger.info("Resetting the environment")
-        self.current_layer_id = 0
+        msglogger.info("Resetting the environment (init_only={})".format(init_only))
+        self.current_layer_id = -1
         self.prev_action = 0
         self.model = copy.deepcopy(self.orig_model)
         self.zeros_mask_dict = create_model_masks(self.model)
         self._remaining_macs = self.dense_model_macs
         self._removed_macs = 0
-
-        # self.unprocessed_layers = []
-        # for conv in self.conv_layers:
-        #     self.unprocessed_layers.append(conv)
-        # self.processed_layers = []
         if init_only:
             return
 
@@ -168,11 +234,13 @@ class CNNEnvironment(gym.Env):
         return len(self.conv_layers)
 
     def current_layer(self):
+        return self.get_layer(self.current_layer_id)
+
+    def get_layer(self, idx):
         try:
-            return self.conv_layers[self.current_layer_id]
+            return self.conv_layers[idx]
         except KeyError:
             return None
-
     def episode_is_done(self):
         return self.current_layer_id == self.num_layers()
 
@@ -210,13 +278,78 @@ class CNNEnvironment(gym.Env):
         msglogger.info("Environment: current_layer_id=%d" % self.current_layer_id)
         distiller.log_weights_sparsity(self.model, -1, loggers=[self.pylogger])
 
+    def get_action(self, a):
+        #desired_reduction = 0.5e8
+        desired_reduction = 2.3e8
+        #desired_reduction = 1.5e8
+        #if self.current_layer_id == 0:
+        #    reduced = 0
+        reduced = self._removed_macs
+        rest = self._remaining_macs
+
+        duty = desired_reduction - (reduced + rest)
+        flops = self.get_macs(self.current_layer())
+        msglogger.info("action ********** a={}  duty={} desired_reduction={} reduced={}  rest={}  flops={}".format(a, duty, desired_reduction, reduced, rest, flops))
+
+        if duty > 0:
+            #duty = 0.9*desired_reduction - (reduced + rest)
+            duty = desired_reduction - (reduced + rest)
+            msglogger.info("action ********** duty/flops={}".format(duty / flops))
+            msglogger.info("action ********** 1 - duty/flops={}".format(1 - duty / flops))
+            #a = max(1-self.action_low, min(a, 1 - duty/flops))
+
+            ##
+            ##  Consider using max=0 for R = error * macs
+            ##           using max= self.action_low for FLOP-limited?  Add noise so it doesn't get stuck in one place?
+            ##
+            #a = max(self.action_low, min(a, 1 - duty/flops))
+            a = max(0, min(a, 1 - duty/flops))
+        return a
+
+    # def get_action(self, a):
+    #     desired_reduction = 1.5e8
+    #     #if self.current_layer_id == 0:
+    #     #    reduced = 0
+    #     reduced = self._removed_macs
+    #     rest = self._remaining_macs
+    #
+    #     duty = desired_reduction - reduced - rest
+    #     flops = self.get_macs(self.current_layer())
+    #     msglogger.info("action ********** a={}  duty={} desired_reduction={} reduced={}  rest={}  flops={}".format(a, duty, desired_reduction, reduced, rest, flops))
+    #
+    #     if duty > 0:
+    #         msglogger.info("action ********** duty/flops={}".format(duty / flops))
+    #         msglogger.info("action ********** 1 - duty/flops={}".format(1 - duty / flops))
+    #         #a = max(1-self.action_low, min(a, 1 - duty/flops))
+    #         a = max(self.action_low, min(a, 1 - duty/flops))
+    #     return a
+
+    def save_checkpoint(self, is_best=False):
+        # Save the learned-model checkpoint
+        scheduler = distiller.CompressionScheduler(self.model)
+        masks = {param_name: masker.mask for param_name, masker in self.zeros_mask_dict.items()}
+        scheduler.load_state_dict(state={'masks_dict': masks})
+        if is_best:
+            name = "BEST_adc_episode_{}".format(self.debug_stats['episode'])
+        else:
+            name = "adc_episode_{}".format(self.debug_stats['episode'])
+        self.save_checkpoint_fn(epoch=self.debug_stats['episode'], model=self.model, scheduler=scheduler, name=name)
+
+
     def step(self, action):
         """Take a step, given an action.
-        This is invoked by the Agent.
+
+        The action represents the desired sparsity.
+        This function is invoked by the Agent.
         """
+        msglogger.info("env.step - current_layer_id={} action={}".format(self.current_layer_id, action))
+        assert action == 0 or (action >= self.action_low-0.001 and action <= self.action_high+0.001)
+        #action = self.get_action(action)
+        msglogger.info("action ********** (leave) {}".format(action))
+        action = 1 - action
         layer_macs = self.get_macs(self.current_layer())
-        if action > 0:
-            actual_action = self.__remove_channels(self.current_layer_id, action)
+        if action > 0 and self.current_layer_id>-1:
+            actual_action = self.__remove_channels(self.current_layer_id, action, prune_what="filters")
         else:
             actual_action = 0
         layer_macs_after_action = self.get_macs(self.current_layer())
@@ -226,28 +359,88 @@ class CNNEnvironment(gym.Env):
         next_layer_macs = self.get_macs(self.current_layer())
         self._removed_macs += (layer_macs - layer_macs_after_action)
         self._remaining_macs -= next_layer_macs
+        self.prev_action = actual_action
+
+        stats = ('Peformance/Validation/',
+                 {'action': action} )
+        distiller.log_training_progress(stats, None, self.debug_stats['episode'], steps_completed=self.current_layer_id,
+                                        total_steps=13,
+                                        log_freq=1, loggers=[self.tflogger])
+
+    # def step(self, action):
+    #     """Take a step, given an action.
+    #
+    #     The action represents the desired sparsity.
+    #     This function is invoked by the Agent.
+    #     """
+    #     msglogger.info("env.step - current_layer_id={} action={}".format(self.current_layer_id, action))
+    #     assert action == 0 or (action >= self.action_low and action <= self.action_high)
+    #     action = 1 - action
+    #     layer_macs = self.get_macs(self.current_layer())
+    #     if action > 0 and self.current_layer_id>-1:
+    #         actual_action = self.__remove_channels(self.current_layer_id, action, prune_what="filters")
+    #     else:
+    #         actual_action = 0
+    #     layer_macs_after_action = self.get_macs(self.current_layer())
+    #
+    #     # Update the various counters after taking the step
+    #     self.current_layer_id += 1
+    #     next_layer_macs = self.get_macs(self.current_layer())
+    #     self._removed_macs += (layer_macs - layer_macs_after_action)
+    #     self._remaining_macs -= next_layer_macs
+    #     self.prev_action = actual_action
+    #
+    #     stats = ('Peformance/Validation/',
+    #              {'action': action} )
+    #     distiller.log_training_progress(stats, None, self.debug_stats['episode'], steps_completed=self.current_layer_id,
+    #                                     total_steps=13,
+    #                                     log_freq=1, loggers=[self.tflogger])
 
-        #self.prev_action = actual_action
         if self.episode_is_done():
             observation = self.get_final_obs()
-            reward = self.compute_reward()
+            reward, top1 = self.compute_reward()
             # Save the learned-model checkpoint
-            scheduler = distiller.CompressionScheduler(self.model)
-            scheduler.load_state_dict(state={'masks_dict': self.zeros_mask_dict})
-            self.save_checkpoint_fn(epoch=self.debug_stats['episode'], model=self.model, scheduler=scheduler)
+            #self.save_checkpoint()
+            # scheduler = distiller.CompressionScheduler(self.model)
+            # scheduler.load_state_dict(state={'masks_dict': self.zeros_mask_dict})
+            # name = "adc_episode_{}".format(self.debug_stats['episode'])
+            # self.save_checkpoint_fn(epoch=self.debug_stats['episode'], model=self.model, scheduler=scheduler, name=name)
             self.debug_stats['episode'] += 1
+
+            if reward > self.max_reward:
+                self.max_reward = reward
+                self.save_checkpoint(is_best=True)
+                msglogger.info("Best reward={}  episode={}  top1={}".format(reward, self.debug_stats['episode'], top1))
+
         else:
             observation = self._get_obs(next_layer_macs)
             if True:
                 reward = 0
             else:
-                reward = self.compute_reward()
+                reward,_ = self.compute_reward()
 
-        self.prev_action = actual_action
+        #self.prev_action = actual_action
         info = {}
         return observation, reward, self.episode_is_done(), info
 
-    def _get_obs(self, macs):
+    def _get_obs1(self, macs):
+        """Produce a state embedding (i.e. an observation)"""
+
+        layer = self.current_layer()
+        conv_module = distiller.model_find_module(self.model, layer.name)
+
+        obs = np.array([layer.t, conv_module.out_channels, conv_module.in_channels,
+                        layer.ifm_h, layer.ifm_w, layer.stride[0], layer.k,
+                        macs/self.dense_model_macs,
+                        self.removed_macs(), self.remaining_macs(), 1-self.prev_action])
+
+        assert len(obs) == self.STATE_EMBEDDING_LEN
+        assert (macs/self.dense_model_macs + self.removed_macs() + self.remaining_macs()) <= 1
+        #msglogger.info("obs={}".format(Observation._make(obs)))
+        msglogger.info("obs={}".format(obs))
+        return obs
+
+    def _get_obs2(self, macs):
         """Produce a state embedding (i.e. an observation)"""
 
         layer = self.current_layer()
@@ -255,23 +448,90 @@ class CNNEnvironment(gym.Env):
 
         obs = np.array([layer.t, conv_module.out_channels, conv_module.in_channels,
                         layer.ifm_h, layer.ifm_w, layer.stride[0], layer.k,
-                        macs/self.dense_model_macs, self.removed_macs(), self.remaining_macs(), self.prev_action])
+                        macs/self.dense_model_macs,
+                        self.removed_macs(), self.remaining_macs(), 1-self.prev_action])
 
+        id = np.zeros(13)
+        id[layer.t] = 1
+        obs = np.array([conv_module.out_channels, conv_module.in_channels,
+                        layer.ifm_h, layer.ifm_w, layer.stride[0], layer.k,
+                        macs/self.dense_model_macs,
+                        self.removed_macs(), self.remaining_macs(), 1-self.prev_action])
+
+        obs = np.concatenate([id, obs])
         assert len(obs) == self.STATE_EMBEDDING_LEN
         assert (macs/self.dense_model_macs + self.removed_macs() + self.remaining_macs()) <= 1
+        #msglogger.info("obs={}".format(Observation._make(obs)))
+        msglogger.info("obs={}".format(obs))
+        return obs
+
+    def _get_obs3(self, macs):
+        """Produce a state embedding (i.e. an observation)"""
+
+        layer = self.current_layer()
+        conv_module = distiller.model_find_module(self.model, layer.name)
+
+        obs = np.array([layer.t, conv_module.out_channels, conv_module.in_channels,
+                        layer.ifm_h, layer.ifm_w, layer.stride[0], layer.k,
+                        macs/self.dense_model_macs,
+                        self.removed_macs(), self.remaining_macs(), 1-self.prev_action])
+
+        id = np.zeros(13)
+        id[layer.t] = 1
+        # NORMALIZE THE FEATURES!!
+        obs = np.array([conv_module.out_channels/512, conv_module.in_channels/512,
+                        layer.ifm_h/32, layer.ifm_w/32, layer.stride[0]/2, layer.k/3,
+                        macs/self.dense_model_macs,
+                        self.removed_macs(), self.remaining_macs(), 1-self.prev_action])
+
+        obs = np.concatenate([id, obs])
+        assert len(obs) == self.STATE_EMBEDDING_LEN
+        assert (macs/self.dense_model_macs + self.removed_macs() + self.remaining_macs()) <= 1
+        #msglogger.info("obs={}".format(Observation._make(obs)))
+        msglogger.info("obs={}".format(obs))
+        return obs
+
+    def _get_obs4(self, macs):
+        """Produce a state embedding (i.e. an observation)"""
+
+        layer = self.current_layer()
+        conv_module = distiller.model_find_module(self.model, layer.name)
+
+        # NORMALIZE THE FEATURES!!
+        obs = np.array([layer.t, conv_module.out_channels / 512, conv_module.in_channels / 512,
+                        layer.ifm_h / 32, layer.ifm_w / 32, layer.stride[0] / 2, layer.k / 3,
+                        macs / self.dense_model_macs,
+                        self.removed_macs(), self.remaining_macs(), 1 - self.prev_action])
+
+        assert len(obs) == self.STATE_EMBEDDING_LEN
+        assert (macs / self.dense_model_macs + self.removed_macs() + self.remaining_macs()) <= 1
         msglogger.info("obs={}".format(Observation._make(obs)))
         return obs
 
+    def _get_obs(self, macs):
+        #return self._get_obs3(macs)
+        return self._get_obs4(macs)
+
+
     def get_final_obs(self):
         """Return the final stae embedding (observation)
         The final state is reached after we traverse all of the Convolution layers.
         """
-        obs = np.array([-1, 0, 0,
-                         0, 0, 0, 0,
-                         0, self.removed_macs(), 0, self.prev_action])
+        if True:
+            obs = np.array([-1, 0, 0,
+                             0, 0, 0, 0,
+                             0, self.removed_macs(), 0, 1 - self.prev_action])
+        else:
+            id = np.zeros(13)
+            obs = np.array([ 0, 0,
+                             0, 0, 0, 0,
+                             0, self.removed_macs(), 0, 1 - self.prev_action])
+            obs = np.concatenate([id, obs])
+
         assert len(obs) == self.STATE_EMBEDDING_LEN
         return obs
 
+
     def get_macs(self, layer):
         """Return the number of MACs required to compute <layer>'s Convolution"""
         if layer is None:
@@ -279,7 +539,14 @@ class CNNEnvironment(gym.Env):
 
         conv_module = distiller.model_find_module(self.model, layer.name)
         # MACs = volume(OFM) * (#IFM * K^2)
-        return (conv_module.out_channels * layer.ofm_h * layer.ofm_w) * (conv_module.in_channels * layer.k**2)
+        dense_macs = (conv_module.out_channels * layer.ofm_h * layer.ofm_w) * (conv_module.in_channels * layer.k**2)
+        if PERFORM_THINNING:
+            return dense_macs
+
+        # If we didn't physically remove structures, we need to use the structural sparsity to compute MACs
+        conv_pname = layer.name + ".weight"
+        conv_p = distiller.model_find_param(self.model, conv_pname)
+        return dense_macs * distiller.density_ch(conv_p)
 
     def __remove_channels(self, idx, fraction_to_prune, prune_what="channels"):
         """Physically remove channels and corresponding filters from the model"""
@@ -298,17 +565,18 @@ class CNNEnvironment(gym.Env):
         conv_pname = layer.name + ".weight"
         conv_p = distiller.model_find_param(self.model, conv_pname)
 
-        msglogger.info("ADC: removing %.1f%% channels from %s" % (fraction_to_prune*100, conv_pname))
+        msglogger.info("ADC: removing %.1f%% %s from %s" % (fraction_to_prune*100, prune_what, conv_pname))
 
         if prune_what == "channels":
             calculate_sparsity = distiller.sparsity_ch
             reg_regims = {conv_pname: [fraction_to_prune, "Channels"]}
             remove_structures = distiller.remove_channels
-        else:
+        elif prune_what == "filters":
             calculate_sparsity = distiller.sparsity_3D
             reg_regims = {conv_pname: [fraction_to_prune, "3D"]}
             remove_structures = distiller.remove_filters
-
+        else:
+            raise ValueError("unsupported structure {}".format(prune_what))
         # Create a channel-ranking pruner
         pruner = distiller.pruning.L1RankedStructureParameterPruner("adc_pruner", reg_regims)
         pruner.set_param_mask(conv_p, conv_pname, self.zeros_mask_dict, meta=None)
@@ -320,17 +588,28 @@ class CNNEnvironment(gym.Env):
 
         # Use the mask to prune
         self.zeros_mask_dict[conv_pname].apply_mask(conv_p)
+
+        if PERFORM_THINNING:
+            remove_structures(self.model, self.zeros_mask_dict, self.arch, self.dataset, optimizer=None)
         actual_sparsity = calculate_sparsity(conv_p)
-        remove_structures(self.model, self.zeros_mask_dict, self.arch, self.dataset, optimizer=None)
         return actual_sparsity
 
     def compute_reward(self):
         """The ADC paper defines reward = -Error"""
         distiller.log_weights_sparsity(self.model, -1, loggers=[self.pylogger])
+        compression = distiller.model_numel(self.model, param_dims=[4]) / self.dense_model_size
+        _, total_macs, total_nnz = collect_conv_details(self.model, self.dataset)
+        msglogger.info("Total parameters left: %.2f%%" % (compression*100))
+        msglogger.info("Total compute left: %.2f%%" % (total_macs/self.dense_model_macs*100))
 
         top1, top5, vloss = self.validate_fn(model=self.model, epoch=self.debug_stats['episode'])
-        _, total_macs = collect_conv_details(self.model, self.dataset)
-        reward = -1 * vloss * math.log(total_macs)
+        #reward = -1 * (1 - top1/100)
+        #reward = -1 * (1-top1/100) * math.log(total_macs/self.dense_model_macs)
+        reward = -1 * (1-top1/100) * math.log(total_macs)
+        #reward = -1 * (1-top1/100) + math.log(total_macs/self.dense_model_macs)
+        #reward = 4*top1/100 - math.log(total_macs)
+        #reward = reward * total_macs/213201664
+        #reward = reward - 5 * total_macs/213201664
         #reward = -1 * vloss * math.sqrt(math.log(total_macs))
         #reward = top1 / math.log(total_macs)
         #alpha = 0.9
@@ -350,8 +629,9 @@ class CNNEnvironment(gym.Env):
                               ('Top5', top5),
                               ('reward', reward),
                               ('total_macs', int(total_macs)),
-                              ('log(total_macs)', math.log(total_macs))]))
+                              ('log(total_macs)', math.log(total_macs)),
+                              ('log(total_macs/self.dense_model_macs)', math.log(total_macs/self.dense_model_macs)),
+                              ('total_nnz', int(total_nnz))]))
         distiller.log_training_progress(stats, None, self.debug_stats['episode'], steps_completed=0, total_steps=1,
                                         log_freq=1, loggers=[self.tflogger, self.pylogger])
-
-        return reward
+        return reward, top1
diff --git a/examples/automated_deep_compression/adc_sensitivity_analysis.ipynb b/examples/automated_deep_compression/adc_sensitivity_analysis.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..b73ddd27e1f0ddb353be29f0b9d9d8eba6610c8c
--- /dev/null
+++ b/examples/automated_deep_compression/adc_sensitivity_analysis.ipynb
@@ -0,0 +1,414 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Sensitivity Analysis\n",
+    "\n",
+    "\n",
+    "Some pruning algorthims tune their hyperparameters based on the results of pruning sensitivity analysis.  Distiller support L1-norm element-wise pruning sensitivity analysis, and filter-wise pruning sensitivity analysis based on the mean L1-norm ranking of filters.\n",
+    "\n",
+    "## Table of Contents\n",
+    "\n",
+    "1. [Load a pruning sensitivity analysis file](#Load-a-pruning-sensitivity-analysis-file)\n",
+    "2. [Examine parameters sensitivities](#Examine-parameters-sensitivities)<br>\n",
+    "    2.1. [Plot layer sensitivities at a selected sparsity level](#Plot-layer-sensitivities-at-a-selected-sparsity-level)<br>\n",
+    "    2.2. [Compare layer sensitivities](#Compare-layer-sensitivities)\n",
+    "3. [Filter pruning sensitivity analysis](#Filter-pruning-sensitivity-analysis)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Load a pruning sensitivity analysis file\n",
+    "\n",
+    "You prepare a sensitivity analysis file by invoking ```distiller.perform_sensitivity_analysis()```.  Checkout the documentation of ```distiller.perform_sensitivity_analysis()``` for more information.<br>\n",
+    "Alternatively, you can use the sample ```compress_classifier.py``` application to perform sensitivity analysis on one of the supported models.  In the example below, we invoke sensitivity analysis on a pretrained Resnet18 from torchvision, using the ImageNet test dataset for evaluation. \n",
+    "\n",
+    "```\n",
+    "$ python3 compress_classifier.py -a resnet18 ../../../data.imagenet -j 12 --pretrained --sense=element\n",
+    "```\n",
+    "\n",
+    "The outputs of performing pruning sensitivity analysis on several different networks is available at ```../examples/sensitivity-analysis``` "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import pandas as pd\n",
+    "import matplotlib.pyplot as plt\n",
+    "import numpy as np\n",
+    "import ipywidgets as widgets\n",
+    "from ipywidgets import interactive, interact, Layout\n",
+    "\n",
+    "df = pd.read_csv('../automated_deep_compression/vgg16_cifar_sensitivity_filters.csv')\n",
+    "df['sparsity'] = round(df['sparsity'], 2)\n",
+    "df_filter = df"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The code below converts the sensitivities dataframe to a sensitivities dictionary. <br> \n",
+    "Using this dictionary makes it easier for us when we want to plot sensitivities."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "from collections import OrderedDict\n",
+    "\n",
+    "def get_param_names(df):\n",
+    "    return list(set(df['parameter']))\n",
+    "\n",
+    "def get_sensitivity_levels(df):\n",
+    "    return list(set(df['sparsity']))\n",
+    "\n",
+    "def df2sensitivities(df):\n",
+    "    param_names = get_param_names(df)\n",
+    "    sparsities = get_sensitivity_levels(df)\n",
+    "\n",
+    "    sensitivities = {}\n",
+    "    for param_name in param_names:\n",
+    "        sensitivities[param_name] = OrderedDict()\n",
+    "        param_stats = df[(df.parameter == param_name)]\n",
+    "        \n",
+    "        for row in range(len(param_stats.index)):\n",
+    "            s = param_stats.iloc[[row]].sparsity\n",
+    "            top1 = param_stats.iloc[[row]].top1\n",
+    "            top5 = param_stats.iloc[[row]].top5\n",
+    "            sensitivities[param_name][float(s)] = (float(top1), float(top5))\n",
+    "    return sensitivities "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Examine parameters sensitivities\n",
+    "\n",
+    "After loading the sensitivity analysis CSV file into a Pandas dataframe, we can examine it.\n",
+    "\n",
+    "### Plot layer sensitivities at a selected sparsity level\n",
+    "Use the dropdown to choose the sparsity level, and select whether you choose to view the top1 accuracies or top5.<br>\n",
+    "Under the plot we display the numerical values of the accuracies, in case you want to have a closer look at the details."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "def view2(level, acc):\n",
+    "    filtered = df[df.sparsity == level]\n",
+    "    s = filtered.style.apply(highlight_min_max)\n",
+    "    \n",
+    "    param_names = filtered['parameter']\n",
+    "    \n",
+    "    # Plot the sensitivities\n",
+    "    x = range(filtered[acc].shape[0])\n",
+    "    y = filtered[acc].values.tolist()\n",
+    "    fig = plt.figure(figsize=(20,10))\n",
+    "    plt.plot(x, y, label=param_names, marker=\"o\", markersize=10, markerfacecolor=\"C1\")\n",
+    "    plt.ylabel(str(acc))\n",
+    "    plt.xlabel('parameter')\n",
+    "    plt.xticks(rotation='vertical')\n",
+    "    plt.xticks(x, param_names)\n",
+    "    plt.title('Pruning Sensitivity per layer %d' % level)    \n",
+    "    #return s\n",
+    "\n",
+    "def highlight_min_max(s):\n",
+    "    \"\"\"Highlight the max and min values in the series\"\"\"\n",
+    "    if s.name not in ['top1', 'top5']:\n",
+    "        return ['' for v in s] \n",
+    "    \n",
+    "    is_max = s == s.max()\n",
+    "    maxes = ['background-color: green' if v else '' for v in is_max]\n",
+    "    is_min = s == s.min()\n",
+    "    mins = ['background-color: red' if v else '' for v in is_min]    \n",
+    "    return [h1 if len(h1)>len(h2) else h2 for (h1,h2) in zip(maxes, mins)]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "sparsities = np.sort(get_sensitivity_levels(df))\n",
+    "acc_radio = widgets.RadioButtons(options=['top1', 'top5'], value='top1', description='Accuracy:')\n",
+    "levels_dropdown = widgets.Dropdown(description='Sparsity:', options=sparsities)\n",
+    "interact(view2, level=levels_dropdown, acc=acc_radio);"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Sometimes we want to look at the sensitivies of a specific weights tensor:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def view_sparsity(param_name):\n",
+    "    display(df[df['parameter']==param_name])\n",
+    "\n",
+    "param_names = sorted(df['parameter'].unique().tolist())\n",
+    "param_dropdown = widgets.Dropdown(description='Parameter:', options=param_names)\n",
+    "interact(view_sparsity, param_name=param_dropdown);"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Compare layer sensitivities\n",
+    "\n",
+    "Plot the pruning sensitivities of selected layers.\n",
+    "<br>Select multiple parameters using SHIFT and CTRL."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Relative import of code from distiller, w/o installing the package\n",
+    "import os\n",
+    "import sys\n",
+    "module_path = os.path.abspath(os.path.join('..', '..'))\n",
+    "if module_path not in sys.path:\n",
+    "    sys.path.append(module_path)\n",
+    "\n",
+    "import pandas as pd\n",
+    "import distiller\n",
+    "import models\n",
+    "import torch\n",
+    "import apputils\n",
+    "\n",
+    "def create_macs_table(model):\n",
+    "    dummy_input = torch.randn(1, 3, 32, 32)\n",
+    "    g = apputils.SummaryGraph(model.cuda(), dummy_input.cuda())\n",
+    "    macs_tbl = {}\n",
+    "    for id, (name, m) in enumerate(model.named_modules()):\n",
+    "        if isinstance(m, torch.nn.Conv2d):\n",
+    "            conv_op = g.find_op(distiller.normalize_module_name(name))\n",
+    "            macs_tbl[name +\".weight\"] = conv_op['attrs']['MACs']\n",
+    "    return macs_tbl\n",
+    "\n",
+    "import math\n",
+    "\n",
+    "def compute_log_macs(dense_macs, sparsity_in, sparsity_out, top1_acc):\n",
+    "    #print(dense_macs, sparsity_in, sparsity_out, top1_acc)\n",
+    "    sparse_macs = dense_macs * ((1-sparsity_in) * (1-sparsity_out))\n",
+    "    return -1 * math.log(sparse_macs)\n",
+    "\n",
+    "def compute_reward(dense_macs, sparsity_in, sparsity_out, top1_acc):\n",
+    "    #print(dense_macs, sparsity_in, sparsity_out, top1_acc)\n",
+    "    sparse_macs = dense_macs * ((1-sparsity_in) * (1-sparsity_out))\n",
+    "    #print(dense_macs, sparsity_in)\n",
+    "    #print(math.log(sparse_macs))\n",
+    "    reward = -1 * (1-top1_acc/100) * math.log(sparse_macs)\n",
+    "    return reward"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "model = models.create_model(False, \"cifar10\", \"vgg16_cifar\")\n",
+    "macs_tbl = create_macs_table(model)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "# assign a different color to each parameter (otherwise, colors change on us as we make different selections)\n",
+    "param_names = df['parameter'].unique().tolist()\n",
+    "color_idx = np.linspace(0, 1, len(param_names))\n",
+    "colors = {}  \n",
+    "for i, pname in zip(color_idx, param_names):\n",
+    "    colors[pname] = color= plt.get_cmap('tab20')(i)\n",
+    "plt.rcParams.update({'font.size': 18})\n",
+    "\n",
+    "def view(weights='', acc=0):\n",
+    "    sensitivities= None\n",
+    "    if weights[0]=='All':\n",
+    "        sensitivities = df2sensitivities(df)\n",
+    "    else:\n",
+    "        mask = False\n",
+    "        mask = [(df.parameter == pname) for pname in weights]\n",
+    "        mask = np.logical_or.reduce(mask)\n",
+    "        sensitivities = df2sensitivities(df[mask])\n",
+    "\n",
+    "    # Plot the sensitivities\n",
+    "    fig, ax1 = plt.subplots(figsize=(20,10))\n",
+    "    for param_name, sensitivity in sensitivities.items():\n",
+    "        sense = [values[acc] for sparsity, values in sensitivity.items()]\n",
+    "        sparsities = [sparsity for sparsity, values in sensitivity.items()]\n",
+    "        ax1.plot(sparsities, sense, label=param_name, marker=\"o\", markersize=10, color=colors[param_name])\n",
+    "        \n",
+    "        ax2 = ax1.twinx()\n",
+    "        y2 = [compute_reward(macs_tbl[param_name], sparsities[i], sparsities[i], sense[i]) for i in range(len(sense)) ]\n",
+    "        ax2.plot(sparsities, y2, label=param_names, marker=\"o\", markersize=10, color=colors[param_name], markerfacecolor=\"red\")\n",
+    "        ax2.set_ylabel(\"Reward\")\n",
+    "  \n",
+    "\n",
+    "    ax1.set_ylabel('top1')\n",
+    "    ax1.set_xlabel('sparsity')\n",
+    "    ax1.set_title('Pruning Sensitivity')\n",
+    "    ax1.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), fancybox=True, shadow=True, ncol=3)\n",
+    "\n",
+    "items = ['All']+param_names\n",
+    "w = widgets.SelectMultiple(options=items, value=[items[1]], layout=Layout(width='50%'), description='Weights:')\n",
+    "acc_widget = widgets.RadioButtons(options={'top1': 0, 'top5': 1}, value=0, description='Accuracy:')\n",
+    "interactive(view, acc=acc_widget, weights=w)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Filter pruning sensitivity analysis\n",
+    "\n",
+    "Just as we perform element-wise pruning sensitivity analysis, we can also analyze a model's filter-wise pruning sensitivity.  Although the sparsity levels are reported in percentage steps, the actual pruning level might be somewhat lower, because when we prune filters the minimum granularity of pruning is ```1/numer_of_filters```.\n",
+    "\n",
+    "\n",
+    "We performed a filter-wise pruning sensitivity analysis on ResNet20-Cifar using the following command:\n",
+    "```\n",
+    "python3 compress_classifier.py -a resnet20_cifar ../../../data.cifar10/ -j 12 --resume=../ssl/checkpoints/checkpoint_trained_dense.pth.tar --sense=filter\n",
+    "```\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def view_sparsity(param_name):\n",
+    "    display(df_filter[df_filter['parameter']==param_name])\n",
+    "    \n",
+    "param_names = sorted(df_filter['parameter'].unique().tolist())\n",
+    "param_dropdown = widgets.Dropdown(description='Parameter:', options=param_names)\n",
+    "interact(view_sparsity, param_name=param_dropdown);"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now let's look at the sparsity vs. the compute:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "df_filter = df"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "def view_fliters(level, acc):\n",
+    "    filtered = df_filter[df_filter.sparsity == level]\n",
+    "    s = filtered.style.apply(highlight_min_max)    \n",
+    "    param_names = filtered['parameter']\n",
+    "    \n",
+    "    # Plot the sensitivities\n",
+    "    x = range(filtered[acc].shape[0])\n",
+    "    y = filtered[acc].values.tolist()\n",
+    "    #y2 = [macs_tbl[name]*level**2 * math. for i, name in enumerate(param_names)]\n",
+    "    y2 = []\n",
+    "    for i, name in enumerate(param_names):\n",
+    "        y2.append(compute_reward(macs_tbl[name], level, level, y[i]))\n",
+    "   \n",
+    "\n",
+    "    fig, ax1 = plt.subplots(figsize=(20,10))\n",
+    "    ax1.plot(x, y, label=param_names, marker=\"o\", markersize=10, markerfacecolor=\"C1\")\n",
+    "    ax1.set_ylabel(str(acc))\n",
+    "    ax1.tick_params(axis=x)\n",
+    "    ax1.set_xticks(x)\n",
+    "    ax1.set_xticklabels(param_names, rotation='vertical')\n",
+    "    ax1.set_title('Filter pruning sensitivity per layer ({}% sparsity)'.format(level*100)) \n",
+    "    \n",
+    "    ax2 = ax1.twinx()    \n",
+    "    ax2.plot(x, y2, label=param_names, marker=\"o\", markersize=10, markerfacecolor=\"C2\")\n",
+    "    ax2.set_ylabel(\"reward\")\n",
+    "    return s\n",
+    "\n",
+    "\n",
+    "\n",
+    "df_filter['sparsity'] = round(df_filter['sparsity'], 2)\n",
+    "sparsities = np.sort(get_sensitivity_levels(df_filter))\n",
+    "acc_radio = widgets.RadioButtons(options=['top1', 'top5'], value='top1', description='Accuracy:')\n",
+    "levels_dropdown = widgets.Dropdown(description='Sparsity:', options=sparsities)\n",
+    "interact(view_fliters, level=levels_dropdown, acc=acc_radio);"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.5.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/examples/automated_deep_compression/adc_vgg16_cifar_finetuning.yaml b/examples/automated_deep_compression/adc_vgg16_cifar_finetuning.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..932c665f3a141dc10559b7da7de30b14f39c0aa3
--- /dev/null
+++ b/examples/automated_deep_compression/adc_vgg16_cifar_finetuning.yaml
@@ -0,0 +1,12 @@
+lr_schedulers:
+  training_lr:
+    class: StepLR
+    step_size: 25 #15
+    gamma: 0.10
+
+policies:
+  - lr_scheduler:
+      instance_name: training_lr
+    starting_epoch: 0
+    ending_epoch: 300
+    frequency: 1
diff --git a/examples/automated_deep_compression/presets/ADC_DDPG.py b/examples/automated_deep_compression/presets/ADC_DDPG.py
index e7e5a1f5bc1ae27eb06c6b1e0d31c00680fd8487..2d6ce113f2add532b0b5f24325018c92f58b1ebf 100755
--- a/examples/automated_deep_compression/presets/ADC_DDPG.py
+++ b/examples/automated_deep_compression/presets/ADC_DDPG.py
@@ -8,19 +8,17 @@ from exploration_policies.additive_noise import AdditiveNoiseParameters
 from exploration_policies.truncated_normal import TruncatedNormalParameters
 from schedules import ConstantSchedule, PieceWiseSchedule, ExponentialSchedule
 from memories.memory import MemoryGranularity
+from base_parameters import EmbedderScheme
 from architectures.tensorflow_components.architecture import Dense
 
+
 ####################
 # Block Scheduling #
 ####################
 schedule_params = ScheduleParameters()
 schedule_params.improve_steps = EnvironmentEpisodes(400)
-if True:
-    schedule_params.steps_between_evaluation_periods = EnvironmentEpisodes(10)
-    schedule_params.evaluation_steps = EnvironmentEpisodes(3)
-else:
-    schedule_params.steps_between_evaluation_periods = EnvironmentEpisodes(1)
-    schedule_params.evaluation_steps = EnvironmentEpisodes(1)
+schedule_params.steps_between_evaluation_periods = EnvironmentEpisodes(1000)
+schedule_params.evaluation_steps = EnvironmentEpisodes(0)
 schedule_params.heatup_steps = EnvironmentSteps(2)
 
 #####################
@@ -31,39 +29,28 @@ agent_params.network_wrappers['actor'].input_embedders_parameters['observation']
 agent_params.network_wrappers['actor'].middleware_parameters.scheme = [Dense([300])]
 agent_params.network_wrappers['critic'].input_embedders_parameters['observation'].scheme = [Dense([300])]
 agent_params.network_wrappers['critic'].middleware_parameters.scheme = [Dense([300])]
-agent_params.network_wrappers['critic'].input_embedders_parameters['action'].scheme = [Dense([300])]
+agent_params.network_wrappers['critic'].input_embedders_parameters['action'].scheme = EmbedderScheme.Empty
 #agent_params.network_wrappers['critic'].clip_gradients = 100
 #agent_params.network_wrappers['actor'].clip_gradients = 100
 
 agent_params.algorithm.rate_for_copying_weights_to_target = 0.01  # Tau pg. 11
+agent_params.algorithm.num_steps_between_copying_online_weights_to_target = EnvironmentSteps(1)
+agent_params.algorithm.discount = 1
 agent_params.memory.max_size = (MemoryGranularity.Transitions, 2000)
-# agent_params.memory.max_size = (MemoryGranularity.Episodes, 2000)
-agent_params.exploration = TruncatedNormalParameters() # AdditiveNoiseParameters()
+agent_params.exploration =  TruncatedNormalParameters() # AdditiveNoiseParameters()
 steps_per_episode = 13
 agent_params.exploration.noise_percentage_schedule = PieceWiseSchedule([(ConstantSchedule(0.5), EnvironmentSteps(100*steps_per_episode)),
-                                                                        (ExponentialSchedule(0.5, 0, 0.95), EnvironmentSteps(350*steps_per_episode))])
+                                                                        (ExponentialSchedule(0.5, 0, 0.996), EnvironmentSteps(300*steps_per_episode))])
 agent_params.algorithm.num_consecutive_playing_steps = EnvironmentSteps(1)
 agent_params.input_filter = MujocoInputFilter()
 agent_params.output_filter = MujocoOutputFilter()
-# agent_params.network_wrappers['actor'].learning_rate = 0.0001
-# agent_params.network_wrappers['critic'].learning_rate = 0.0001
-# These seem like good values for Reward = -Error
 agent_params.network_wrappers['actor'].learning_rate = 0.0001
-agent_params.network_wrappers['critic'].learning_rate = 0.0001
-# agent_params.network_wrappers['actor'].learning_rate = 0.1
-# agent_params.network_wrappers['critic'].learning_rate = 0.1
-# agent_params.network_wrappers['actor'].learning_rate =  0.000001
-# agent_params.network_wrappers['critic'].learning_rate = 0.000001
+agent_params.network_wrappers['critic'].learning_rate = 0.001
 
 ##############################
 #      Gym                   #
 ##############################
 env_params = GymEnvironmentParameters()
-#env_params.level = '/home/cvds_lab/nzmora/pytorch_workspace/distiller/examples/automated_deep_compression/gym_env/distiller_adc/distiller_adc.py:AutomatedDeepCompression'
-# This path works when training from Coach
-#env_params.level = '../distiller/examples/automated_deep_compression/gym_env/distiller_adc/distiller_adc.py:AutomatedDeepCompression'
-# This path works when training from Distiller
-#env_params.level = '../automated_deep_compression/gym_env/distiller_adc/distiller_adc.py:AutomatedDeepCompression'
 env_params.level = '../automated_deep_compression/ADC.py:CNNEnvironment'
 
 
diff --git a/examples/automated_deep_compression/vgg16_cifar_baseline_training.yaml b/examples/automated_deep_compression/vgg16_cifar_baseline_training.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..f56f656d7d1d9daf398a7b6260ac05fbb25d52eb
--- /dev/null
+++ b/examples/automated_deep_compression/vgg16_cifar_baseline_training.yaml
@@ -0,0 +1,54 @@
+# We used this schedule to train CIFAR10-VGG16 from scratch.
+#
+# time python3 compress_classifier.py --arch vgg16_cifar  ../../../data.cifar10 -p=50 --lr=0.05 --epochs=180 --compress=../automated_deep_compression/vgg16_cifar_baseline_training.yaml -j=1 --deterministic
+#
+# Parameters:
+# +----------+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+
+# |          | Name                      | Shape            |   NNZ (dense) |   NNZ (sparse) |   Cols (%) |   Rows (%) |   Ch (%) |   2D (%) |   3D (%) |   Fine (%) |     Std |     Mean |   Abs-Mean |
+# |----------+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------|
+# |  0.00000 | features.module.0.weight  | (64, 3, 3, 3)    |          1728 |           1728 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.25026 | -0.00189 |    0.18302 |
+# |  1.00000 | features.module.2.weight  | (64, 64, 3, 3)   |         36864 |          36864 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.07487 | -0.01098 |    0.05490 |
+# |  2.00000 | features.module.5.weight  | (128, 64, 3, 3)  |         73728 |          73728 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.06412 | -0.00765 |    0.04841 |
+# |  3.00000 | features.module.7.weight  | (128, 128, 3, 3) |        147456 |         147456 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.05229 | -0.00949 |    0.04124 |
+# |  4.00000 | features.module.10.weight | (256, 128, 3, 3) |        294912 |         294912 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.04503 | -0.00606 |    0.03530 |
+# |  5.00000 | features.module.12.weight | (256, 256, 3, 3) |        589824 |         589824 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.03495 | -0.00444 |    0.02725 |
+# |  6.00000 | features.module.14.weight | (256, 256, 3, 3) |        589824 |         589824 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.03285 | -0.00550 |    0.02571 |
+# |  7.00000 | features.module.17.weight | (512, 256, 3, 3) |       1179648 |        1179648 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.02082 | -0.00241 |    0.01615 |
+# |  8.00000 | features.module.19.weight | (512, 512, 3, 3) |       2359296 |        2359296 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.01364 | -0.00101 |    0.01070 |
+# |  9.00000 | features.module.21.weight | (512, 512, 3, 3) |       2359296 |        2359296 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.01194 | -0.00085 |    0.00941 |
+# | 10.00000 | features.module.24.weight | (512, 512, 3, 3) |       2359296 |        2359296 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.01060 | -0.00013 |    0.00842 |
+# | 11.00000 | features.module.26.weight | (512, 512, 3, 3) |       2359296 |        2359296 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.01031 | -0.00002 |    0.00821 |
+# | 12.00000 | features.module.28.weight | (512, 512, 3, 3) |       2359296 |        2359296 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.01036 | -0.00019 |    0.00823 |
+# | 13.00000 | classifier.weight         | (10, 512)        |          5120 |           5120 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.06897 | -0.00003 |    0.04909 |
+# | 14.00000 | Total sparsity:           | -                |      14715584 |       14715584 |    0.00000 |    0.00000 |  0.00000 |  0.00000 |  0.00000 |    0.00000 | 0.00000 |  0.00000 |    0.00000 |
+# +----------+---------------------------+------------------+---------------+----------------+------------+------------+----------+----------+----------+------------+---------+----------+------------+
+# Total sparsity: 0.00
+#
+# --- validate (epoch=179)-----------
+# 5000 samples (256 per mini-batch)
+# ==> Top1: 90.160    Top5: 99.260    Loss: 0.633
+#
+# Saving checkpoint to: logs/2018.07.13-234036/checkpoint.pth.tar
+# --- test ---------------------
+# 10000 samples (256 per mini-batch)
+# ==> Top1: 90.930    Top5: 99.470    Loss: 0.669
+#
+#
+# Log file for this run: /home/cvds_lab/nzmora/pytorch_workspace/distiller/examples/classifier_compression/logs/2018.07.13-234036/2018.07.13-234036.log
+#
+# real    49m11.296s
+# user    82m20.495s
+# sys     15m36.971s
+
+lr_schedulers:
+  training_lr:
+    class: StepLR
+    step_size: 45
+    gamma: 0.10
+
+policies:
+    - lr_scheduler:
+        instance_name: training_lr
+      starting_epoch: 35
+      ending_epoch: 200
+      frequency: 1
diff --git a/examples/automated_deep_compression/vgg16_cifar_sensitivity_channels.csv b/examples/automated_deep_compression/vgg16_cifar_sensitivity_channels.csv
new file mode 100644
index 0000000000000000000000000000000000000000..e704513d94288cd7ae20cd6422e9771a0fc097f3
--- /dev/null
+++ b/examples/automated_deep_compression/vgg16_cifar_sensitivity_channels.csv
@@ -0,0 +1,248 @@
+parameter,sparsity,top1,top5,loss
+features.0.weight,0.0,89.22,99.58,0.7636774435639381
+features.0.weight,0.05,89.22,99.58,0.7636774435639381
+features.0.weight,0.1,89.22,99.58,0.7636774435639381
+features.0.weight,0.15000000000000002,89.22,99.58,0.7636774435639381
+features.0.weight,0.2,89.22,99.58,0.7636774435639381
+features.0.weight,0.25,89.22,99.58,0.7636774435639381
+features.0.weight,0.30000000000000004,89.22,99.58,0.7636774435639381
+features.0.weight,0.35000000000000003,68.15,95.98,2.6556635022163384
+features.0.weight,0.4,68.15,95.98,2.6556635022163384
+features.0.weight,0.45,68.15,95.98,2.6556635022163384
+features.0.weight,0.5,68.15,95.98,2.6556635022163384
+features.0.weight,0.55,68.15,95.98,2.6556635022163384
+features.0.weight,0.6000000000000001,68.15,95.98,2.6556635022163384
+features.0.weight,0.65,68.15,95.98,2.6556635022163384
+features.0.weight,0.7000000000000001,35.099999999999994,79.22,6.594213616847991
+features.0.weight,0.75,35.099999999999994,79.22,6.594213616847991
+features.0.weight,0.8,35.099999999999994,79.22,6.594213616847991
+features.0.weight,0.8500000000000001,35.099999999999994,79.22,6.594213616847991
+features.0.weight,0.9,35.099999999999994,79.22,6.594213616847991
+features.2.weight,0.0,89.22,99.58,0.7636774435639381
+features.2.weight,0.05,89.19,99.56,0.7651889458298684
+features.2.weight,0.1,89.18,99.55000000000001,0.7670430049300195
+features.2.weight,0.15000000000000002,89.09,99.62,0.7812006041407588
+features.2.weight,0.2,88.98,99.62,0.7917364612221719
+features.2.weight,0.25,88.55999999999999,99.62,0.8324924081563949
+features.2.weight,0.30000000000000004,88.3,99.59,0.861794525384903
+features.2.weight,0.35000000000000003,87.42,99.53,0.9269773110747336
+features.2.weight,0.4,86.69,99.55000000000001,1.003380635380745
+features.2.weight,0.45,86.55,99.49,1.0392508804798124
+features.2.weight,0.5,84.01,99.35000000000001,1.1889065220952029
+features.2.weight,0.55,82.83,99.19,1.2583119288086895
+features.2.weight,0.6000000000000001,80.4,98.92,1.4815124928951267
+features.2.weight,0.65,74.77000000000001,97.84,2.059452801942826
+features.2.weight,0.7000000000000001,71.06,97.00999999999999,2.372544836997985
+features.2.weight,0.75,63.0,94.96,3.3752232074737547
+features.2.weight,0.8,52.370000000000005,91.8,4.972057831287385
+features.2.weight,0.8500000000000001,46.220000000000006,87.36,5.8365840196609495
+features.2.weight,0.9,23.599999999999998,68.82,11.625418305397034
+features.5.weight,0.0,89.22,99.58,0.7636774435639381
+features.5.weight,0.05,89.2,99.58,0.7695665135979651
+features.5.weight,0.1,89.07000000000001,99.58,0.777985467016697
+features.5.weight,0.15000000000000002,88.8,99.59,0.7968418329954148
+features.5.weight,0.2,88.99000000000001,99.53999999999999,0.8164728745818136
+features.5.weight,0.25,88.58,99.56,0.8313623622059821
+features.5.weight,0.30000000000000004,88.37,99.53,0.8286508128046989
+features.5.weight,0.35000000000000003,87.33999999999999,99.45,0.9081800788640976
+features.5.weight,0.4,85.89,99.31,1.0216026619076728
+features.5.weight,0.45,83.39999999999999,98.87,1.1970515057444573
+features.5.weight,0.5,82.09,98.72,1.2643444284796712
+features.5.weight,0.55,80.93,98.57000000000001,1.402244460582733
+features.5.weight,0.6000000000000001,78.4,98.24000000000001,1.6622346818447113
+features.5.weight,0.65,76.1,97.92999999999999,1.821042415499687
+features.5.weight,0.7000000000000001,70.7,97.07000000000001,2.141398164629936
+features.5.weight,0.75,56.00000000000001,93.44,3.3105062603950506
+features.5.weight,0.8,57.42,95.12,2.8576306760311123
+features.5.weight,0.8500000000000001,19.169999999999998,87.91,9.00937354564667
+features.5.weight,0.9,20.279999999999998,87.05000000000001,7.018709814548493
+features.7.weight,0.0,89.22,99.58,0.7636774435639381
+features.7.weight,0.05,89.25,99.6,0.7612242087721828
+features.7.weight,0.1,89.33,99.59,0.7606694415211678
+features.7.weight,0.15000000000000002,89.08,99.63,0.7673289388418197
+features.7.weight,0.2,89.13,99.6,0.7778240397572519
+features.7.weight,0.25,88.88000000000001,99.53999999999999,0.7890061371028423
+features.7.weight,0.30000000000000004,88.72,99.52,0.8045065321028233
+features.7.weight,0.35000000000000003,88.33,99.55000000000001,0.8166114121675492
+features.7.weight,0.4,87.72,99.56,0.8861592426896094
+features.7.weight,0.45,87.7,99.53,0.8387746773660182
+features.7.weight,0.5,87.03,99.47,0.8264810189604759
+features.7.weight,0.55,86.53,99.42,0.8401749722659588
+features.7.weight,0.6000000000000001,85.37,99.36,0.829752704501152
+features.7.weight,0.65,84.53,99.25,0.8404730081558227
+features.7.weight,0.7000000000000001,81.17999999999999,98.83999999999999,1.0205668166279793
+features.7.weight,0.75,79.34,98.67,1.0138855665922164
+features.7.weight,0.8,73.58,97.69,1.3308990895748138
+features.7.weight,0.8500000000000001,69.06,97.38,1.3030580282211301
+features.7.weight,0.9,53.18000000000001,89.85,2.2446724444627772
+features.10.weight,0.0,89.22,99.58,0.7636774435639381
+features.10.weight,0.05,89.06,99.58,0.7635266423225402
+features.10.weight,0.1,89.12,99.57000000000001,0.7542888924479484
+features.10.weight,0.15000000000000002,88.92999999999999,99.53999999999999,0.7609276778995991
+features.10.weight,0.2,88.79,99.5,0.77874139547348
+features.10.weight,0.25,88.66000000000001,99.5,0.7760347723960876
+features.10.weight,0.30000000000000004,88.16,99.45,0.7967557474970819
+features.10.weight,0.35000000000000003,87.44000000000001,99.42999999999999,0.8225055634975433
+features.10.weight,0.4,85.3,99.37,0.9658117935061457
+features.10.weight,0.45,84.67,99.33999999999999,0.9441864915192125
+features.10.weight,0.5,83.3,99.32,0.9676069594919682
+features.10.weight,0.55,83.11,99.32,0.9194813281297682
+features.10.weight,0.6000000000000001,80.66,99.1,1.1133895620703695
+features.10.weight,0.65,74.46,98.33,1.4871227934956552
+features.10.weight,0.7000000000000001,69.29,97.13000000000001,1.9071772634983062
+features.10.weight,0.75,63.5,95.04,2.1230391800403594
+features.10.weight,0.8,51.83,90.86999999999999,2.8775381684303287
+features.10.weight,0.8500000000000001,43.400000000000006,86.31,3.10533235669136
+features.10.weight,0.9,29.349999999999998,68.19,4.870438265800476
+features.12.weight,0.0,89.22,99.58,0.7636774435639381
+features.12.weight,0.05,89.27000000000001,99.58,0.7646440446376801
+features.12.weight,0.1,89.29,99.57000000000001,0.7583929166197775
+features.12.weight,0.15000000000000002,89.21,99.56,0.762963669002056
+features.12.weight,0.2,89.01,99.56,0.7385730564594268
+features.12.weight,0.25,88.92999999999999,99.53999999999999,0.7342981152236461
+features.12.weight,0.30000000000000004,88.6,99.59,0.757648891210556
+features.12.weight,0.35000000000000003,88.31,99.49,0.7588193118572234
+features.12.weight,0.4,87.78,99.5,0.7745968967676164
+features.12.weight,0.45,86.92,99.49,0.8063072130084035
+features.12.weight,0.5,86.22,99.46000000000001,0.8100244864821435
+features.12.weight,0.55,85.72,99.35000000000001,0.8138590052723886
+features.12.weight,0.6000000000000001,83.6,99.22999999999999,0.8948326170444489
+features.12.weight,0.65,81.17999999999999,98.83,0.9743211939930914
+features.12.weight,0.7000000000000001,76.52,98.29,1.1834679618477826
+features.12.weight,0.75,73.09,97.68,1.2202372476458552
+features.12.weight,0.8,64.86000000000001,95.85000000000001,1.600474599003792
+features.12.weight,0.8500000000000001,55.16,93.10000000000001,2.0641291558742516
+features.12.weight,0.9,35.31999999999999,80.07,3.333579432964325
+features.14.weight,0.0,89.22,99.58,0.7636774435639381
+features.14.weight,0.05,89.18,99.57000000000001,0.7642287686467173
+features.14.weight,0.1,89.14,99.53999999999999,0.763840524852276
+features.14.weight,0.15000000000000002,88.91,99.56,0.7627260759472849
+features.14.weight,0.2,88.97,99.56,0.7475016176700593
+features.14.weight,0.25,88.83,99.55000000000001,0.7279256418347356
+features.14.weight,0.30000000000000004,88.6,99.55000000000001,0.7334924757480621
+features.14.weight,0.35000000000000003,88.3,99.48,0.7149026282131673
+features.14.weight,0.4,88.03999999999999,99.48,0.6982361137866975
+features.14.weight,0.45,87.16000000000001,99.4,0.7273787923157217
+features.14.weight,0.5,86.11999999999999,99.42,0.7806331239640713
+features.14.weight,0.55,85.65,99.42999999999999,0.7705157600343229
+features.14.weight,0.6000000000000001,84.58,99.39,0.7391175635159015
+features.14.weight,0.65,83.72,99.1,0.7629736252129078
+features.14.weight,0.7000000000000001,81.67999999999999,99.03,0.7983839586377143
+features.14.weight,0.75,78.05,98.53,0.9488561064004899
+features.14.weight,0.8,74.28,98.26,1.0382560193538666
+features.14.weight,0.8500000000000001,62.260000000000005,96.88,1.7040498256683347
+features.14.weight,0.9,54.63,95.19,2.015580037236214
+features.17.weight,0.0,89.22,99.58,0.7636774435639381
+features.17.weight,0.05,89.3,99.55000000000001,0.7643002554774284
+features.17.weight,0.1,89.12,99.53,0.75629862844944
+features.17.weight,0.15000000000000002,88.73,99.55000000000001,0.7490760326385498
+features.17.weight,0.2,88.94999999999999,99.48,0.7469013914465904
+features.17.weight,0.25,88.92,99.57000000000001,0.7395543828606607
+features.17.weight,0.30000000000000004,88.61,99.6,0.7252944745123386
+features.17.weight,0.35000000000000003,88.11,99.51,0.745880564302206
+features.17.weight,0.4,87.22,99.5,0.7651523858308793
+features.17.weight,0.45,86.42,99.55000000000001,0.7797240674495696
+features.17.weight,0.5,85.6,99.46000000000001,0.8063808932900429
+features.17.weight,0.55,85.13999999999999,99.26,0.7946531414985656
+features.17.weight,0.6000000000000001,84.71,99.22999999999999,0.7559326574206353
+features.17.weight,0.65,81.97,98.9,0.8353566706180572
+features.17.weight,0.7000000000000001,77.60000000000001,98.32,1.074595110118389
+features.17.weight,0.75,75.83,98.03,1.1351665094494818
+features.17.weight,0.8,67.91,97.13000000000001,1.5376916050910947
+features.17.weight,0.8500000000000001,56.65,92.96,2.404172331094741
+features.17.weight,0.9,38.81,82.69999999999999,3.5184253454208374
+features.19.weight,0.0,89.22,99.58,0.7636774435639381
+features.19.weight,0.05,89.24,99.6,0.7618191495537758
+features.19.weight,0.1,89.25,99.53999999999999,0.7589655950665476
+features.19.weight,0.15000000000000002,88.92999999999999,99.51,0.7426741398870944
+features.19.weight,0.2,89.07000000000001,99.51,0.7234230980277062
+features.19.weight,0.25,88.79,99.58,0.6930524609982966
+features.19.weight,0.30000000000000004,88.58,99.56,0.6842170007526875
+features.19.weight,0.35000000000000003,88.53,99.56,0.6687206380069257
+features.19.weight,0.4,88.48,99.57000000000001,0.6517048284411427
+features.19.weight,0.45,88.23,99.53,0.6357874415814878
+features.19.weight,0.5,88.08,99.55000000000001,0.6103163883090019
+features.19.weight,0.55,87.83,99.47,0.602372144907713
+features.19.weight,0.6000000000000001,87.41,99.44,0.593823041766882
+features.19.weight,0.65,86.64,99.47,0.5951531857252119
+features.19.weight,0.7000000000000001,85.49,99.48,0.602915671467781
+features.19.weight,0.75,83.46000000000001,99.37,0.6216656222939492
+features.19.weight,0.8,80.97999999999999,99.08,0.6921703658998012
+features.19.weight,0.8500000000000001,79.28999999999999,98.89,0.7110876709222793
+features.19.weight,0.9,75.01,98.34,0.7872768387198449
+features.21.weight,0.0,89.22,99.58,0.7636774435639381
+features.21.weight,0.05,89.24,99.56,0.7612854838371276
+features.21.weight,0.1,89.33,99.56,0.7543460696935653
+features.21.weight,0.15000000000000002,89.24,99.59,0.7417637541890146
+features.21.weight,0.2,89.14999999999999,99.56,0.7299148380756378
+features.21.weight,0.25,89.08,99.57000000000001,0.7093379102647305
+features.21.weight,0.30000000000000004,89.03,99.55000000000001,0.6862362027168273
+features.21.weight,0.35000000000000003,88.9,99.55000000000001,0.6694970756769182
+features.21.weight,0.4,88.86,99.56,0.6425345957279206
+features.21.weight,0.45,88.91,99.58,0.6317106172442436
+features.21.weight,0.5,88.72,99.53999999999999,0.6111330606043336
+features.21.weight,0.55,88.55999999999999,99.53,0.5959938228130341
+features.21.weight,0.6000000000000001,88.34,99.59,0.5689934313297272
+features.21.weight,0.65,88.0,99.56,0.5504519559442997
+features.21.weight,0.7000000000000001,87.31,99.5,0.5406161874532701
+features.21.weight,0.75,86.11999999999999,99.5,0.5501984223723412
+features.21.weight,0.8,84.65,99.4,0.5430169485509396
+features.21.weight,0.8500000000000001,82.98,99.31,0.5470309577882291
+features.21.weight,0.9,78.58000000000001,98.72,0.6479422532021998
+features.24.weight,0.0,89.22,99.58,0.7636774435639381
+features.24.weight,0.05,89.29,99.57000000000001,0.750853791832924
+features.24.weight,0.1,89.26,99.57000000000001,0.7314087107777596
+features.24.weight,0.15000000000000002,89.22,99.55000000000001,0.7105505846440792
+features.24.weight,0.2,89.28,99.52,0.695668651163578
+features.24.weight,0.25,89.22,99.55000000000001,0.6813249096274374
+features.24.weight,0.30000000000000004,89.18,99.53999999999999,0.6682336099445821
+features.24.weight,0.35000000000000003,89.07000000000001,99.53999999999999,0.6366836458444595
+features.24.weight,0.4,88.88000000000001,99.49,0.6114222474396228
+features.24.weight,0.45,88.85,99.48,0.6024916984140875
+features.24.weight,0.5,88.73,99.48,0.5679814592003822
+features.24.weight,0.55,88.6,99.51,0.5559957988560197
+features.24.weight,0.6000000000000001,88.12,99.47,0.5366946041584015
+features.24.weight,0.65,87.9,99.46000000000001,0.513071321696043
+features.24.weight,0.7000000000000001,87.25,99.44,0.49167033433914187
+features.24.weight,0.75,87.1,99.42999999999999,0.450179835408926
+features.24.weight,0.8,85.57000000000001,99.39,0.4781026504933834
+features.24.weight,0.8500000000000001,84.08,99.15,0.4855682924389839
+features.24.weight,0.9,81.17999999999999,98.97,0.551998709887266
+features.26.weight,0.0,89.22,99.58,0.7636774435639381
+features.26.weight,0.05,89.22,99.58,0.7478332489728926
+features.26.weight,0.1,89.16,99.59,0.7375122860074043
+features.26.weight,0.15000000000000002,89.23,99.57000000000001,0.7194580778479576
+features.26.weight,0.2,89.22,99.56,0.6933473661541939
+features.26.weight,0.25,89.24,99.59,0.6729100845754146
+features.26.weight,0.30000000000000004,89.25,99.58,0.64560554549098
+features.26.weight,0.35000000000000003,89.14,99.61,0.6163844108581544
+features.26.weight,0.4,89.09,99.63,0.5909451678395272
+features.26.weight,0.45,89.05,99.59,0.5593577705323695
+features.26.weight,0.5,89.05,99.62,0.5350267551839353
+features.26.weight,0.55,88.87,99.6,0.5076540984213354
+features.26.weight,0.6000000000000001,88.81,99.6,0.47115837484598155
+features.26.weight,0.65,88.87,99.59,0.4354794979095459
+features.26.weight,0.7000000000000001,88.94999999999999,99.61,0.4001040123403072
+features.26.weight,0.75,88.73,99.57000000000001,0.3735382024198771
+features.26.weight,0.8,88.7,99.63,0.33791394121944907
+features.26.weight,0.8500000000000001,88.08,99.59,0.35034140944480896
+features.26.weight,0.9,87.99,99.47,0.406125946342945
+features.28.weight,0.0,89.22,99.58,0.7636774435639381
+features.28.weight,0.05,89.24,99.58,0.7483089432120323
+features.28.weight,0.1,89.23,99.56,0.7294130131602288
+features.28.weight,0.15000000000000002,89.26,99.55000000000001,0.7089642792940138
+features.28.weight,0.2,89.2,99.56,0.6845127731561659
+features.28.weight,0.25,89.21,99.59,0.6567807316780089
+features.28.weight,0.30000000000000004,89.18,99.56,0.6314538806676866
+features.28.weight,0.35000000000000003,89.27000000000001,99.53999999999999,0.600998058915138
+features.28.weight,0.4,89.12,99.55000000000001,0.577172427624464
+features.28.weight,0.45,89.19,99.53,0.5453893564641474
+features.28.weight,0.5,89.09,99.57000000000001,0.5098466560244562
+features.28.weight,0.55,89.13,99.56,0.47850212901830663
+features.28.weight,0.6000000000000001,89.07000000000001,99.56,0.4394362322986126
+features.28.weight,0.65,89.11,99.6,0.40969536155462266
+features.28.weight,0.7000000000000001,88.88000000000001,99.58,0.37882810831069946
+features.28.weight,0.75,88.89,99.56,0.3571597643196583
+features.28.weight,0.8,88.33,99.57000000000001,0.3542283728718757
+features.28.weight,0.8500000000000001,87.85000000000001,99.47,0.38023133948445315
+features.28.weight,0.9,86.32,99.37,0.5236741743981836
diff --git a/examples/automated_deep_compression/vgg16_cifar_sensitivity_filters.csv b/examples/automated_deep_compression/vgg16_cifar_sensitivity_filters.csv
new file mode 100644
index 0000000000000000000000000000000000000000..5155e7b9943d5394900e5a734c9c8b3523bb085a
--- /dev/null
+++ b/examples/automated_deep_compression/vgg16_cifar_sensitivity_filters.csv
@@ -0,0 +1,248 @@
+parameter,sparsity,top1,top5,loss
+features.module.0.weight,0.0,90.93,99.47,0.6692559264600276
+features.module.0.weight,0.05,90.92,99.47,0.6690988287329673
+features.module.0.weight,0.1,90.91,99.47,0.6692533545196054
+features.module.0.weight,0.15000000000000002,90.8,99.51,0.667614870518446
+features.module.0.weight,0.2,90.74,99.45,0.6767612747848034
+features.module.0.weight,0.25,90.77,99.47,0.6721171900629997
+features.module.0.weight,0.30000000000000004,90.51,99.5,0.6861152164638042
+features.module.0.weight,0.35000000000000003,90.32,99.52,0.6949880458414555
+features.module.0.weight,0.4,90.01,99.51,0.7139997139573095
+features.module.0.weight,0.45,89.5,99.42999999999999,0.7504620119929313
+features.module.0.weight,0.5,89.03,99.4,0.8016829878091813
+features.module.0.weight,0.55,88.79,99.3,0.8072707198560237
+features.module.0.weight,0.6000000000000001,87.49,99.2,0.918370771408081
+features.module.0.weight,0.65,86.57000000000001,99.14,0.9536818996071813
+features.module.0.weight,0.7000000000000001,83.78,98.81,1.1387261122465133
+features.module.0.weight,0.75,77.08,97.66,1.560247530043125
+features.module.0.weight,0.8,67.58999999999999,95.26,2.167594322562217
+features.module.0.weight,0.8500000000000001,60.61000000000001,93.34,2.581526911258697
+features.module.0.weight,0.9,40.63,85.02000000000001,4.008871185779572
+features.module.2.weight,0.0,90.93,99.47,0.6692559219896795
+features.module.2.weight,0.05,90.89,99.48,0.669460067898035
+features.module.2.weight,0.1,90.86,99.44,0.6713570088148118
+features.module.2.weight,0.15000000000000002,90.83,99.4,0.6726143166422844
+features.module.2.weight,0.2,90.7,99.38,0.6837243437767029
+features.module.2.weight,0.25,90.25,99.45,0.7265349410474299
+features.module.2.weight,0.30000000000000004,89.21,99.38,0.7966136120259761
+features.module.2.weight,0.35000000000000003,88.35,99.33,0.8673222482204439
+features.module.2.weight,0.4,88.03,99.31,0.8825861036777498
+features.module.2.weight,0.45,87.22999999999999,99.2,0.950459983944893
+features.module.2.weight,0.5,84.44,98.91,1.1552318662405017
+features.module.2.weight,0.55,80.42,98.4,1.4645473361015318
+features.module.2.weight,0.6000000000000001,77.59,97.95,1.6982344359159471
+features.module.2.weight,0.65,73.29,96.89,1.995515561103821
+features.module.2.weight,0.7000000000000001,69.61,96.07,2.3076962500810625
+features.module.2.weight,0.75,59.56,93.41000000000001,3.028918415307999
+features.module.2.weight,0.8,50.59,89.38000000000001,3.415172547101974
+features.module.2.weight,0.8500000000000001,35.84,79.69,4.509777694940567
+features.module.2.weight,0.9,27.390000000000004,72.75,5.223302805423739
+features.module.5.weight,0.0,90.93,99.47,0.6692559219896795
+features.module.5.weight,0.05,90.95,99.46000000000001,0.6681679628789423
+features.module.5.weight,0.1,90.72,99.46000000000001,0.6689290963113308
+features.module.5.weight,0.15000000000000002,90.63,99.45,0.6755255162715913
+features.module.5.weight,0.2,90.58,99.44,0.6889681063592434
+features.module.5.weight,0.25,90.36,99.47,0.6984777480363847
+features.module.5.weight,0.30000000000000004,90.18,99.47,0.7110927015542983
+features.module.5.weight,0.35000000000000003,89.92,99.41,0.716961894184351
+features.module.5.weight,0.4,89.61,99.46000000000001,0.7260976850986481
+features.module.5.weight,0.45,88.63,99.38,0.7429451391100883
+features.module.5.weight,0.5,85.77,99.1,0.8889077246189118
+features.module.5.weight,0.55,82.94,98.9,1.0354220077395442
+features.module.5.weight,0.6000000000000001,79.86,98.5,1.1959240287542339
+features.module.5.weight,0.65,73.37,97.63,1.6120872646570208
+features.module.5.weight,0.7000000000000001,69.10000000000001,96.7,1.8702481120824814
+features.module.5.weight,0.75,55.06999999999999,93.22,2.8268324673175815
+features.module.5.weight,0.8,34.309999999999995,83.72,4.615156948566437
+features.module.5.weight,0.8500000000000001,23.61,75.48,5.859028172492981
+features.module.5.weight,0.9,14.92,66.66000000000001,6.933825767040253
+features.module.7.weight,0.0,90.93,99.47,0.6692559219896795
+features.module.7.weight,0.05,90.82000000000001,99.48,0.6706861115992069
+features.module.7.weight,0.1,90.72,99.42999999999999,0.6782251007854939
+features.module.7.weight,0.15000000000000002,90.53999999999999,99.44,0.6914359241724015
+features.module.7.weight,0.2,90.22,99.42999999999999,0.6971473284065722
+features.module.7.weight,0.25,89.98,99.36,0.7087507657706736
+features.module.7.weight,0.30000000000000004,89.56,99.36,0.712729112803936
+features.module.7.weight,0.35000000000000003,89.06,99.36,0.7362848788499831
+features.module.7.weight,0.4,88.28,99.29,0.7792428113520145
+features.module.7.weight,0.45,87.74,99.1,0.8087845697999
+features.module.7.weight,0.5,86.06,98.99,0.8717495843768117
+features.module.7.weight,0.55,83.42,98.65,1.0323444232344625
+features.module.7.weight,0.6000000000000001,81.32000000000001,98.33,1.204463630914688
+features.module.7.weight,0.65,78.05,97.83,1.3818809509277346
+features.module.7.weight,0.7000000000000001,72.31,96.54,1.7939699172973635
+features.module.7.weight,0.75,60.160000000000004,92.30000000000001,2.63507724404335
+features.module.7.weight,0.8,55.39000000000001,90.16,2.9101451873779296
+features.module.7.weight,0.8500000000000001,45.14,86.77,3.833881312608719
+features.module.7.weight,0.9,35.089999999999996,76.77000000000001,4.710112679004671
+features.module.10.weight,0.0,90.93,99.47,0.6692559219896795
+features.module.10.weight,0.05,90.95,99.46000000000001,0.6705086551606654
+features.module.10.weight,0.1,90.71000000000001,99.41,0.6711264587938788
+features.module.10.weight,0.15000000000000002,90.68,99.42999999999999,0.6777696236968042
+features.module.10.weight,0.2,90.53999999999999,99.37,0.6781696103513241
+features.module.10.weight,0.25,90.24,99.42,0.6863313406705858
+features.module.10.weight,0.30000000000000004,89.96,99.41,0.6885525263845922
+features.module.10.weight,0.35000000000000003,89.5,99.42,0.7048376530408857
+features.module.10.weight,0.4,88.64,99.33,0.7461733937263489
+features.module.10.weight,0.45,87.37,99.17,0.8162848562002182
+features.module.10.weight,0.5,86.45,99.17,0.8736617445945739
+features.module.10.weight,0.55,84.74,98.89,0.9555087387561797
+features.module.10.weight,0.6000000000000001,83.01,98.61,1.062857578694821
+features.module.10.weight,0.65,80.05,98.29,1.2567715153098107
+features.module.10.weight,0.7000000000000001,76.01,97.69,1.4884072214365005
+features.module.10.weight,0.75,74.56,97.55,1.5503956228494644
+features.module.10.weight,0.8,68.17,95.81,1.9940202355384824
+features.module.10.weight,0.8500000000000001,51.17,89.96,3.296700370311738
+features.module.10.weight,0.9,41.75,84.13,3.6957924604415897
+features.module.12.weight,0.0,90.93,99.47,0.6692559219896795
+features.module.12.weight,0.05,90.96,99.44,0.6679774925112725
+features.module.12.weight,0.1,90.82000000000001,99.42,0.67003466039896
+features.module.12.weight,0.15000000000000002,90.71000000000001,99.42999999999999,0.6702918030321597
+features.module.12.weight,0.2,90.33,99.42999999999999,0.6718512803316118
+features.module.12.weight,0.25,90.34,99.39,0.6750395081937313
+features.module.12.weight,0.30000000000000004,90.09,99.41,0.6719778679311275
+features.module.12.weight,0.35000000000000003,89.79,99.41,0.6831181965768336
+features.module.12.weight,0.4,89.3,99.39,0.6883666053414346
+features.module.12.weight,0.45,88.64,99.35000000000001,0.7329882495105265
+features.module.12.weight,0.5,87.44000000000001,99.32,0.7966521874070169
+features.module.12.weight,0.55,86.14999999999999,99.22999999999999,0.8666965961456297
+features.module.12.weight,0.6000000000000001,84.33,98.88,0.9393015101552009
+features.module.12.weight,0.65,81.05,98.50999999999999,1.1145302936434744
+features.module.12.weight,0.7000000000000001,75.02,97.82,1.4555630862712863
+features.module.12.weight,0.75,72.78999999999999,97.35000000000001,1.4904023647308347
+features.module.12.weight,0.8,65.55,96.23,1.916279849410057
+features.module.12.weight,0.8500000000000001,57.269999999999996,93.26,2.2968807488679883
+features.module.12.weight,0.9,44.720000000000006,89.22,3.0090278327465074
+features.module.14.weight,0.0,90.93,99.47,0.6692559219896795
+features.module.14.weight,0.05,90.9,99.42999999999999,0.6660525508224964
+features.module.14.weight,0.1,90.86999999999999,99.38,0.668628565222025
+features.module.14.weight,0.15000000000000002,90.8,99.38,0.6654499001801015
+features.module.14.weight,0.2,90.75999999999999,99.38,0.6629222929477692
+features.module.14.weight,0.25,90.64,99.39,0.6670274205505848
+features.module.14.weight,0.30000000000000004,90.31,99.47,0.6693187534809112
+features.module.14.weight,0.35000000000000003,90.0,99.42999999999999,0.6754926271736622
+features.module.14.weight,0.4,89.5,99.42999999999999,0.7073422700166703
+features.module.14.weight,0.45,89.07000000000001,99.46000000000001,0.7237449675798417
+features.module.14.weight,0.5,88.37,99.33999999999999,0.7446963548660278
+features.module.14.weight,0.55,87.51,99.2,0.7726304724812508
+features.module.14.weight,0.6000000000000001,85.38,98.92999999999999,0.87779576331377
+features.module.14.weight,0.65,83.95,98.78,0.9448466539382935
+features.module.14.weight,0.7000000000000001,81.11,98.16,1.057556112110615
+features.module.14.weight,0.75,72.61,96.63000000000001,1.5405680358409881
+features.module.14.weight,0.8,64.36,93.04,2.1472644239664076
+features.module.14.weight,0.8500000000000001,53.44,85.92999999999999,2.9526685476303105
+features.module.14.weight,0.9,47.089999999999996,84.05,3.471282905340194
+features.module.17.weight,0.0,90.93,99.47,0.6692559219896795
+features.module.17.weight,0.05,90.92,99.46000000000001,0.6684170112013814
+features.module.17.weight,0.1,90.85,99.44,0.6631406359374521
+features.module.17.weight,0.15000000000000002,90.88000000000001,99.42,0.6575572237372398
+features.module.17.weight,0.2,90.93,99.41,0.6481918901205062
+features.module.17.weight,0.25,90.75999999999999,99.39,0.6438624344766137
+features.module.17.weight,0.30000000000000004,90.79,99.4,0.6331793077290057
+features.module.17.weight,0.35000000000000003,90.75,99.42,0.6228962279856204
+features.module.17.weight,0.4,90.68,99.42,0.6127338252961634
+features.module.17.weight,0.45,90.59,99.4,0.6080214835703373
+features.module.17.weight,0.5,90.47,99.42,0.5982955045998096
+features.module.17.weight,0.55,90.25,99.42,0.5902760840952397
+features.module.17.weight,0.6000000000000001,89.98,99.4,0.5756854690611363
+features.module.17.weight,0.65,89.77,99.44,0.5583911545574665
+features.module.17.weight,0.7000000000000001,88.84,99.25,0.5669960446655753
+features.module.17.weight,0.75,88.22,99.11999999999999,0.5783131875097753
+features.module.17.weight,0.8,86.49,98.66,0.6255424864590168
+features.module.17.weight,0.8500000000000001,84.13,98.21,0.6861111536622048
+features.module.17.weight,0.9,76.94,97.27,0.9720454081892966
+features.module.19.weight,0.0,90.93,99.47,0.6692559219896795
+features.module.19.weight,0.05,90.94,99.46000000000001,0.6684708371758462
+features.module.19.weight,0.1,90.9,99.46000000000001,0.6665540747344495
+features.module.19.weight,0.15000000000000002,90.89,99.45,0.6624954134225844
+features.module.19.weight,0.2,90.91,99.42999999999999,0.6564127676188944
+features.module.19.weight,0.25,90.91,99.42,0.6511186212301254
+features.module.19.weight,0.30000000000000004,90.92,99.42,0.6449287861585616
+features.module.19.weight,0.35000000000000003,90.86,99.45,0.6337990917265414
+features.module.19.weight,0.4,90.94,99.42,0.6230915866792202
+features.module.19.weight,0.45,90.9,99.42,0.6135770685970784
+features.module.19.weight,0.5,90.86,99.44,0.6015985280275347
+features.module.19.weight,0.55,90.71000000000001,99.44,0.5907493270933629
+features.module.19.weight,0.6000000000000001,90.68,99.42,0.5779680147767066
+features.module.19.weight,0.65,90.53999999999999,99.42999999999999,0.5652678206562994
+features.module.19.weight,0.7000000000000001,90.02,99.44,0.5602145388722419
+features.module.19.weight,0.75,89.75,99.31,0.5480382151901723
+features.module.19.weight,0.8,88.74,99.22,0.5589367933571338
+features.module.19.weight,0.8500000000000001,87.28,98.95,0.5609808459877967
+features.module.19.weight,0.9,83.17,98.19,0.6770205676555634
+features.module.21.weight,0.0,90.93,99.47,0.6692559219896795
+features.module.21.weight,0.05,90.92,99.47,0.6683227315545083
+features.module.21.weight,0.1,90.89,99.47,0.6670222692191601
+features.module.21.weight,0.15000000000000002,90.89,99.44,0.6641224145889284
+features.module.21.weight,0.2,90.91,99.45,0.657768272608519
+features.module.21.weight,0.25,90.91,99.44,0.6513003595173358
+features.module.21.weight,0.30000000000000004,90.93,99.42999999999999,0.6455009885132316
+features.module.21.weight,0.35000000000000003,90.93,99.44,0.6367211237549781
+features.module.21.weight,0.4,90.9,99.42999999999999,0.6218342565000056
+features.module.21.weight,0.45,90.9,99.44,0.6115089938044547
+features.module.21.weight,0.5,90.94,99.47,0.5980993054807184
+features.module.21.weight,0.55,90.88000000000001,99.51,0.584993703663349
+features.module.21.weight,0.6000000000000001,90.68,99.5,0.5650006860494615
+features.module.21.weight,0.65,90.59,99.52,0.5536180756986141
+features.module.21.weight,0.7000000000000001,90.36,99.49,0.5418409958481787
+features.module.21.weight,0.75,90.23,99.48,0.5325972847640514
+features.module.21.weight,0.8,90.13,99.42,0.5136933751404287
+features.module.21.weight,0.8500000000000001,89.18,99.33,0.5227328665554523
+features.module.21.weight,0.9,87.3,99.3,0.5440353430807591
+features.module.24.weight,0.0,90.93,99.47,0.6692559219896795
+features.module.24.weight,0.05,90.91,99.47,0.66840605661273
+features.module.24.weight,0.1,90.91,99.46000000000001,0.6665401868522167
+features.module.24.weight,0.15000000000000002,90.93,99.45,0.6622105807065961
+features.module.24.weight,0.2,90.86,99.45,0.6560657866299151
+features.module.24.weight,0.25,90.86999999999999,99.44,0.6493664145469665
+features.module.24.weight,0.30000000000000004,90.91,99.45,0.6416803397238253
+features.module.24.weight,0.35000000000000003,90.89,99.46000000000001,0.6323044292628768
+features.module.24.weight,0.4,90.95,99.47,0.6217416435480118
+features.module.24.weight,0.45,90.89,99.49,0.6078125417232514
+features.module.24.weight,0.5,90.86,99.48,0.5926271870732307
+features.module.24.weight,0.55,90.9,99.5,0.5726493634283543
+features.module.24.weight,0.6000000000000001,90.89,99.51,0.550549419224262
+features.module.24.weight,0.65,90.93,99.51,0.5310931198298929
+features.module.24.weight,0.7000000000000001,90.82000000000001,99.52,0.5074854679405688
+features.module.24.weight,0.75,90.68,99.52,0.4790469661355019
+features.module.24.weight,0.8,90.5,99.46000000000001,0.45561300590634346
+features.module.24.weight,0.8500000000000001,89.98,99.39,0.42840092480182645
+features.module.24.weight,0.9,88.71,99.24,0.4082508966326714
+features.module.26.weight,0.0,90.93,99.47,0.6692559219896795
+features.module.26.weight,0.05,90.94,99.47,0.6645257070660592
+features.module.26.weight,0.1,90.92,99.45,0.6580530107021331
+features.module.26.weight,0.15000000000000002,90.91,99.46000000000001,0.649077967554331
+features.module.26.weight,0.2,90.91,99.46000000000001,0.6362913250923157
+features.module.26.weight,0.25,90.93,99.5,0.6227086402475833
+features.module.26.weight,0.30000000000000004,90.92,99.53,0.6098518386483192
+features.module.26.weight,0.35000000000000003,90.9,99.52,0.5931080147624016
+features.module.26.weight,0.4,90.86,99.51,0.5737856447696684
+features.module.26.weight,0.45,90.82000000000001,99.51,0.5504772827029231
+features.module.26.weight,0.5,90.86,99.53,0.522583256661892
+features.module.26.weight,0.55,90.86999999999999,99.53999999999999,0.5000082895159721
+features.module.26.weight,0.6000000000000001,90.86,99.55000000000001,0.47699484005570414
+features.module.26.weight,0.65,90.86,99.53,0.4562730476260185
+features.module.26.weight,0.7000000000000001,90.79,99.55000000000001,0.4265191875398158
+features.module.26.weight,0.75,90.61,99.48,0.39904330074787137
+features.module.26.weight,0.8,90.41,99.38,0.3716635003685951
+features.module.26.weight,0.8500000000000001,89.79,99.33,0.36077326089143763
+features.module.26.weight,0.9,88.07000000000001,99.16,0.40490268096327775
+features.module.28.weight,0.0,90.93,99.47,0.6692559219896795
+features.module.28.weight,0.05,90.92,99.47,0.6642911553382873
+features.module.28.weight,0.1,90.94,99.48,0.6563593268394471
+features.module.28.weight,0.15000000000000002,90.89,99.5,0.6469362922012806
+features.module.28.weight,0.2,90.91,99.5,0.6352568596601487
+features.module.28.weight,0.25,90.89,99.51,0.6218712389469145
+features.module.28.weight,0.30000000000000004,90.91,99.53,0.6074293464422227
+features.module.28.weight,0.35000000000000003,90.86999999999999,99.55000000000001,0.5928989790380002
+features.module.28.weight,0.4,90.91,99.57000000000001,0.5732079967856407
+features.module.28.weight,0.45,90.93,99.58,0.5560689799487589
+features.module.28.weight,0.5,90.81,99.58,0.539565099030733
+features.module.28.weight,0.55,90.86999999999999,99.53999999999999,0.51952208802104
+features.module.28.weight,0.6000000000000001,90.91,99.52,0.49096294045448297
+features.module.28.weight,0.65,90.86,99.53999999999999,0.4640089146792888
+features.module.28.weight,0.7000000000000001,90.60000000000001,99.53,0.43963218927383413
+features.module.28.weight,0.75,90.31,99.49,0.42467857301235196
+features.module.28.weight,0.8,89.75999999999999,99.36,0.4174311764538288
+features.module.28.weight,0.8500000000000001,88.08,98.92,0.4726337507367134
+features.module.28.weight,0.9,78.35,97.63,0.6953305929899215