From 5059419b8e436e773299031beedfd968cb84bff8 Mon Sep 17 00:00:00 2001
From: Neta Zmora <neta.zmora@intel.com>
Date: Wed, 23 Oct 2019 16:42:57 +0300
Subject: [PATCH] Fix AMC notebooks' sample commnand-line examples

As documented in issue #395, some of the command-line examples in the
AMC notebooks are incorrect.
Also, fix some bugs that were introduced with the refactoring of the
low-level pruning API
---
 distiller/norms.py                            |  6 +++--
 distiller/pruning/ranked_structures_pruner.py | 24 ++++++++++---------
 .../amc/jupyter/amc_plain20.ipynb             | 18 +++++++-------
 .../amc/jupyter/amc_random.ipynb              | 22 ++++++++---------
 .../amc/jupyter/amc_resnet20.ipynb            |  4 ++--
 5 files changed, 39 insertions(+), 35 deletions(-)

diff --git a/distiller/norms.py b/distiller/norms.py
index 46cca86..bafda4e 100644
--- a/distiller/norms.py
+++ b/distiller/norms.py
@@ -152,7 +152,9 @@ def channels_norm(param, norm_fn, group_len=1, length_normalized=False):
     Returns:
         1D tensor with lp-norms of the groups
     """
-    assert param.dim() == 4, "param has invalid dimensions"
+    assert param.dim() in (2, 4), "param has invalid dimensions"
+    if param.dim() == 2:
+        return cols_norm(param, norm_fn, group_len, length_normalized)
     param = param.transpose(0, 1).contiguous()
     group_size = group_len * np.prod(param.shape[1:])
     return generic_norm(param.view(-1, group_size), norm_fn, group_size, length_normalized, dim=1)
@@ -296,7 +298,7 @@ def k_smallest_elems(mags, k, noise):
 
 
 def rank_channels(param, group_len, magnitude_fn, fraction_to_partition, rounding_fn, noise):
-    assert param.dim() == 4, "This ranking is only supported for 4D tensors"
+    assert param.dim() in (2, 4), "This ranking is only supported for 2D and 4D tensors"
     n_channels = param.size(1)
     n_ch_to_prune = num_structs_to_prune(n_channels, group_len, fraction_to_partition, rounding_fn)
     if n_ch_to_prune == 0:
diff --git a/distiller/pruning/ranked_structures_pruner.py b/distiller/pruning/ranked_structures_pruner.py
index 92336dc..1453031 100755
--- a/distiller/pruning/ranked_structures_pruner.py
+++ b/distiller/pruning/ranked_structures_pruner.py
@@ -147,10 +147,14 @@ class LpRankedStructureParameterPruner(_RankedStructureParameterPruner):
                                 model=None, binary_map=None, magnitude_fn=distiller.norms.l1_norm,
                                 noise=0.0, group_size=1, rounding_fn=math.floor):
         if binary_map is None:
+
+            if param.dim() == 2:
+                # 2D Linear parameters 
+                return LpRankedStructureParameterPruner.rank_and_prune_rows(fraction_to_prune, param, param_name,
+                                                                            zeros_mask_dict, model, binary_map,
+                                                                            magnitude_fn, group_size)
             bottomk_channels, channel_mags = distiller.norms.rank_channels(param, group_size, magnitude_fn,
                                                                            fraction_to_prune, rounding_fn, noise)
-            # bottomk_channels, channel_mags = LpRankedStructureParameterPruner.rank_channels(
-            #     magnitude_fn, fraction_to_prune, param, group_size, rounding_fn, noise)
             if bottomk_channels is None:
                 # Empty list means that fraction_to_prune is too low to prune anything
                 return
@@ -689,13 +693,12 @@ class FMReconstructionChannelPruner(_RankedStructureParameterPruner):
         if binary_map is None:
             op_type = 'conv' if param.dim() == 4 else 'fc'
             if op_type == 'conv':
-                bottomk_channels, channel_mags = LpRankedStructureParameterPruner.rank_channels(
-                    magnitude_fn, fraction_to_prune, param, group_size, rounding_fn, noise)
-
+                bottomk_channels, channel_mags = distiller.norms.rank_channels(param, group_size, magnitude_fn,
+                                                                               fraction_to_prune, rounding_fn, noise)
             else:
-                bottomk_channels, channel_mags = LpRankedStructureParameterPruner.rank_cols(
-                     magnitude_fn, fraction_to_prune, param)
-
+                bottomk_channels, channel_mags = distiller.norms.rank_cols(param, group_size, magnitude_fn,
+                                                                           fraction_to_prune, rounding_fn=math.floor,
+                                                                           noise=None)
             # Todo: this little piece of code can be refactored
             if bottomk_channels is None:
                 # Empty list means that fraction_to_prune is too low to prune anything
@@ -704,7 +707,6 @@ class FMReconstructionChannelPruner(_RankedStructureParameterPruner):
             threshold = bottomk_channels[-1]
             binary_map = channel_mags.gt(threshold)
 
-
             # These are the indices of channels we want to keep
             indices = binary_map.nonzero().squeeze()
             if len(indices.shape) == 0:
@@ -757,8 +759,8 @@ class FMReconstructionChannelPruner(_RankedStructureParameterPruner):
         if zeros_mask_dict is not None:
             binary_map = binary_map.type(param.type())
             if op_type == 'conv':
-                zeros_mask_dict[param_name].mask = _ = distiller.thresholding.expand_binary_map(param,
-                                                                                                'Channels', binary_map)
+                zeros_mask_dict[param_name].mask, _ = distiller.thresholding.expand_binary_map(param,
+                                                                                               'Channels', binary_map)
                 msglogger.info("FMReconstructionChannelPruner - param: %s pruned=%.3f goal=%.3f (%d/%d)",
                                param_name,
                                distiller.sparsity_ch(zeros_mask_dict[param_name].mask),
diff --git a/examples/auto_compression/amc/jupyter/amc_plain20.ipynb b/examples/auto_compression/amc/jupyter/amc_plain20.ipynb
index e58ec54..d2e2543 100644
--- a/examples/auto_compression/amc/jupyter/amc_plain20.ipynb
+++ b/examples/auto_compression/amc/jupyter/amc_plain20.ipynb
@@ -79,7 +79,7 @@
     "\n",
     "The command-line is provided below:\n",
     "    \n",
-    "    time python3 ../../classifier_compression/multi-run.py ${AMC_EXP_PATH}/plain20-ddpg-private amc.py  --arch=plain20_cifar ${CIFAR10_PATH} --resume=checkpoint.plain20_cifar.pth.tar --lr=0.05 --amc --amc-protocol=mac-constrained --amc-action-range 0.05 1.0 --amc-target-density=0.5 -p=50 --etes=0.075 --amc-ft-epochs=0 --amc-prune-pattern=channels --amc-prune-method=fm-reconstruction --amc-agent-algo=DDPG --amc-cfg=auto_compression_channels.yaml --evs=0.5 --etrs=0.5 --amc-rllib=hanlab -j=1\n",
+    "    time python3 ../../classifier_compression/multi-run.py ${AMC_EXP_PATH}/plain20-ddpg-private amc.py  --arch=plain20_cifar ${CIFAR10_PATH} --resume=${CHECKPOINTS_PATH}/checkpoint.plain20_cifar.pth.tar --lr=0.05 --amc-protocol=mac-constrained --amc-action-range 0.05 1.0 --amc-target-density=0.5 -p=50 --etes=0.075 --amc-ft-epochs=0 --amc-prune-pattern=channels --amc-prune-method=fm-reconstruction --amc-agent-algo=DDPG --amc-cfg=auto_compression_channels.yaml --evs=0.5 --etrs=0.5 --amc-rllib=hanlab -j=1\n",
     "    \n",
     "Each colored line represents one experiment execution instance. We plot the search-Top1 score of discovered networks as the RL-based AMC system learns to find better compressed networks. You might be impressed by:\n",
     "* The variability in behavior, which is typical for RL algorithms.\n",
@@ -133,12 +133,12 @@
    "source": [
     "Fine-tune for 2 epochs (using 16 processes): notice the low LR which we use because we only FT for 2 episodes.\n",
     "\n",
-    "    time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-ddpg-private/2019.07.24-174636 --arch=plain20_cifar --lr=0.001 --vs=0 -p=50 --compress=plain20_fine_tune.yaml ${CIFAR10_PATH} -j=1 --epochs=2 --output-csv=ft_2epoch_results.csv --processes=16\n",
+    "    time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-ddpg-private/2019.07.24-174636 --arch=plain20_cifar --lr=0.001 --vs=0 -p=50 --compress=../plain20_fine_tune.yaml ${CIFAR10_PATH} -j=1 --epochs=2 --output-csv=ft_2epoch_results.csv --processes=16\n",
     "\n",
     "\n",
     "Fine-tune for 60 epochs (using 16 processes): notice the x100 higher LR.\n",
     "\n",
-    "    time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-ddpg-private/2019.07.24-174636 --arch=plain20_cifar --lr=0.1 --vs=0 -p=50 --compress=plain20_fine_tune.yaml ${CIFAR10_PATH} -j=1 --epochs=60 --output-csv=ft_60epoch_results.csv --processes=16"
+    "    time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-ddpg-private/2019.07.24-174636 --arch=plain20_cifar --lr=0.1 --vs=0 -p=50 --compress=../plain20_fine_tune.yaml ${CIFAR10_PATH} -j=1 --epochs=60 --output-csv=ft_60epoch_results.csv --processes=16"
    ]
   },
   {
@@ -286,7 +286,7 @@
     "Results are interesting and encouraging as there is learning. However, this is less sample-efficient compared to DDPG, and therefore takes longer.\n",
     "There might be some room to tune the PPO hyper-params to generate better results.\n",
     "\n",
-    "    time python3 ../../classifier_compression/multi-run.py ${AMC_EXP_PATH}/plain20-ppo-coach amc.py --arch=plain20_cifar ${CIFAR10_PATH} --resume=checkpoint.plain20_cifar.pth.tar --lr=0.05 --amc --amc-protocol=mac-constrained --amc-action-range 0.05 1.0 --amc-target-density=0.5 -p=50 --etes=0.075 --amc-ft-epochs=0 --amc-prune-pattern=channels --amc-prune-method=fm-reconstruction --amc-agent-algo=ClippedPPO-continuous --amc-cfg=auto_compression_channels.yaml --evs=0.5 --etrs=0.5 --amc-rllib=coach -j=1"
+    "    time python3 ../../classifier_compression/multi-run.py ${AMC_EXP_PATH}/plain20-ppo-coach amc.py --arch=plain20_cifar ${CIFAR10_PATH} --resume=${CHECKPOINTS_PATH}/checkpoint.plain20_cifar.pth.tar --lr=0.05 --amc-protocol=mac-constrained --amc-action-range 0.05 1.0 --amc-target-density=0.5 -p=50 --etes=0.075 --amc-ft-epochs=0 --amc-prune-pattern=channels --amc-prune-method=fm-reconstruction --amc-agent-algo=ClippedPPO-continuous --amc-cfg=auto_compression_channels.yaml --evs=0.5 --etrs=0.5 --amc-rllib=coach -j=1"
    ]
   },
   {
@@ -321,7 +321,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "    time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-ppo-coach/2019.07.28-012356 --arch=plain20_cifar --lr=0.1 --vs=0 -p=50 --compress=plain20_fine_tune.yaml ${CIFAR10_PATH} -j=1 --epochs=60 --output-csv=ft_60epoch_results.csv --processes=16"
+    "    time python3 parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-ppo-coach/2019.07.28-012356 --arch=plain20_cifar --lr=0.1 --vs=0 -p=50 --compress=../plain20_fine_tune.yaml ${CIFAR10_PATH} -j=1 --epochs=60 --output-csv=ft_60epoch_results.csv --processes=16"
    ]
   },
   {
@@ -358,7 +358,7 @@
    "source": [
     "## Using a different reward function\n",
     "\n",
-    "    time python3 ../../classifier_compression/multi-run.py ${AMC_EXP_PATH}/plain20-ddpg-private-punish compress_classifier.py --arch=plain20_cifar ${CIFAR10_PATH} --resume=checkpoint.plain20_cifar.pth.tar --lr=0.05 --amc-protocol=punish-agent --amc-action-range 0.05 1.0 --amc-target-density=0.5 -p=50 --etes=0.075 --amc-ft-epochs=0 --amc-prune-pattern=channels --amc-prune-method=fm-reconstruction --amc-agent-algo=DDPG --amc-cfg=../automated_deep_compression/auto_compression_channels.yaml --evs=0.5 --etrs=0.5 --amc-rllib=hanlab -j=1"
+    "    time python3 ../../classifier_compression/multi-run.py ${AMC_EXP_PATH}/plain20-ddpg-private-punish amc.py --arch=plain20_cifar ${CIFAR10_PATH} --resume=${CHECKPOINTS_PATH}/checkpoint.plain20_cifar.pth.tar --lr=0.05 --amc-protocol=punish-agent --amc-action-range 0.05 1.0 --amc-target-density=0.5 -p=50 --etes=0.075 --amc-ft-epochs=0 --amc-prune-pattern=channels --amc-prune-method=fm-reconstruction --amc-agent-algo=DDPG --amc-cfg=auto_compression_channels.yaml --evs=0.5 --etrs=0.5 --amc-rllib=hanlab -j=1"
    ]
   },
   {
@@ -395,7 +395,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "    time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-ddpg-private-punish/2019.07.29-171102 --arch=plain20_cifar --lr=0.1 --vs=0 -p=50 --compress=plain20_fine_tune.yaml ${CIFAR10_PATH} -j=1 --epochs=60 --output-csv=ft_60epoch_results.csv --processes=16"
+    "    time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-ddpg-private-punish/2019.07.29-171102 --arch=plain20_cifar --lr=0.1 --vs=0 -p=50 --compress=../plain20_fine_tune.yaml ${CIFAR10_PATH} -j=1 --epochs=60 --output-csv=ft_60epoch_results.csv --processes=16"
    ]
   },
   {
@@ -450,7 +450,7 @@
     "\n",
     "    time python3 ../../classifier_compression/multi-run.py ${AMC_EXP_PATH}/plain20-ddpg-private amc.py  --arch=plain20_cifar ${CIFAR10_PATH} --resume=${CHECKPOINTS_PATH}/checkpoint.plain20_cifar.pth.tar --lr=0.05 --amc-protocol=mac-constrained --amc-action-range 0.05 1.0 --amc-target-density=0.5 -p=50 --etes=0.075 --amc-ft-epochs=0 --amc-prune-pattern=channels --amc-prune-method=fm-reconstruction --amc-agent-algo=DDPG --amc-cfg=auto_compression_channels.yaml --evs=1.0 --etrs=0.01 --amc-rllib=hanlab -j=1\n",
     "    \n",
-    "    time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-ddpg-private/2019.08.01-181040 --arch=plain20_cifar --lr=0.1 --vs=0 -p=50 --compress=plain20_fine_tune.yaml ${CIFAR10_PATH} -j=1 --epochs=60 --output-csv=ft_60epoch_results.csv --processes=16"
+    "    time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-ddpg-private/2019.08.01-181040 --arch=plain20_cifar --lr=0.1 --vs=0 -p=50 --compress=../plain20_fine_tune.yaml ${CIFAR10_PATH} -j=1 --epochs=60 --output-csv=ft_60epoch_results.csv --processes=16"
    ]
   },
   {
@@ -493,7 +493,7 @@
     "\n",
     "    time python3 ../../classifier_compression/multi-run.py ${AMC_EXP_PATH}/plain20-ddpg-private amc.py  --arch=plain20_cifar ${CIFAR10_PATH} --resume=${CHECKPOINTS_PATH}/checkpoint.plain20_cifar.pth.tar --lr=0.05 --amc-protocol=mac-constrained --amc-action-range 0.05 1.0 --amc-target-density=0.5 -p=50 --etes=0.075 --amc-ft-epochs=0 --amc-prune-pattern=channels --amc-prune-method=fm-reconstruction --amc-agent-algo=DDPG --amc-cfg=auto_compression_channels.yaml --evs=0.5 --etrs=0.01 --amc-rllib=hanlab -j=1 --amc-save-chkpts\n",
     "    \n",
-    "    time python ../../classifier_compression/parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-ddpg-private/2019.08.03-000628 --arch=plain20_cifar --lr=0.1 --vs=0 -p=50 --compress=../plain20_fine_tune.yaml ${CIFAR10_PATH} -j=1 --epochs=60 --output-csv=ft_60epoch_results.csv --processes=16 --top-performing-chkpts"
+    "    time python3 ../../classifier_compression/parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-ddpg-private/2019.08.03-000628 --arch=plain20_cifar --lr=0.1 --vs=0 -p=50 --compress=../plain20_fine_tune.yaml ${CIFAR10_PATH} -j=1 --epochs=60 --output-csv=ft_60epoch_results.csv --processes=16 --top-performing-chkpts"
    ]
   },
   {
diff --git a/examples/auto_compression/amc/jupyter/amc_random.ipynb b/examples/auto_compression/amc/jupyter/amc_random.ipynb
index 0de236e..259d057 100644
--- a/examples/auto_compression/amc/jupyter/amc_random.ipynb
+++ b/examples/auto_compression/amc/jupyter/amc_random.ipynb
@@ -60,19 +60,19 @@
    "source": [
     "**Test**\n",
     "\n",
-    "    time python3 ../../classifier_compression/multi-run.py ${AMC_EXP_PATH}/plain20-random-reconstruction_nondeterministic amc.py --arch=plain20_cifar ${CIFAR10_PATH} --resume=checkpoint.plain20_cifar.pth.tar --lr=0.05 --amc-protocol=mac-constrained --amc-action-range 0.05 1.0 --amc-target-density=0.5 -p=50 --etes=0.075 --amc-ft-epochs=0 --amc-prune-pattern=channels --amc-prune-method=fm-reconstruction --amc-agent-algo=Random-policy --amc-cfg=../automated_deep_compression/auto_compression_channels.yaml --evs=0.5 --etrs=0.5 --amc-rllib=random -j=1\n",
+    "    time python3 ../../classifier_compression/multi-run.py ${AMC_EXP_PATH}/plain20-random-reconstruction_nondeterministic amc.py --arch=plain20_cifar ${CIFAR10_PATH} --resume=${CHECKPOINTS_PATH}/checkpoint.plain20_cifar.pth.tar --lr=0.05 --amc-protocol=mac-constrained --amc-action-range 0.05 1.0 --amc-target-density=0.5 -p=50 --etes=0.075 --amc-ft-epochs=0 --amc-prune-pattern=channels --amc-prune-method=fm-reconstruction --amc-agent-algo=Random-policy --amc-cfg=auto_compression_channels.yaml --evs=0.5 --etrs=0.5 --amc-rllib=random -j=1\n",
     "\n",
     "==> experiments/plain20-random-reconstruction/2019.07.22-120953/\n",
     "\n",
-    "    time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-random-reconstruction_nondeterministic/2019.07.23-124600 --arch=plain20_cifar --lr=0.005 --vs=0 -p=50 --epochs=60 --compress=../automated_deep_compression/fine_tune.yaml ${CIFAR10_PATH} -j=1 --deterministic --ft-epochs=1 --output-csv=ft_1epoch_results.csv\n",
+    "    time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-random-reconstruction_nondeterministic/2019.07.23-124600 --arch=plain20_cifar --lr=0.005 --vs=0 -p=50 --epochs=60 --compress=../plain20_fine_tune.yaml ${CIFAR10_PATH} -j=1 --deterministic --epoch=1 --output-csv=ft_1epoch_results.csv\n",
     "    \n",
     "**Baseline**\n",
     "\n",
-    "    time python3 ../../classifier_compression/multi-run.py ${AMC_EXP_PATH}/plain20-random-l1_rank amc.py --arch=plain20_cifar ${CIFAR10_PATH} --resume=checkpoint.plain20_cifar.pth.tar --lr=0.05 --amc-protocol=mac-constrained --amc-action-range 0.05 1.0 --amc-target-density=0.5 -p=50 --etes=0.075 --amc-ft-epochs=0 --amc-prune-pattern=channels --amc-prune-method=l1-rank --amc-agent-algo=Random-policy --amc-cfg=../automated_deep_compression/auto_compression_channels.yaml --evs=0.5 --etrs=0.5 --amc-rllib=random -j=1\n",
+    "    time python3 ../../classifier_compression/multi-run.py ${AMC_EXP_PATH}/plain20-random-l1_rank amc.py --arch=plain20_cifar ${CIFAR10_PATH} --resume=${CHECKPOINTS_PATH}/checkpoint.plain20_cifar.pth.tar --lr=0.05 --amc-protocol=mac-constrained --amc-action-range 0.05 1.0 --amc-target-density=0.5 -p=50 --etes=0.075 --amc-ft-epochs=0 --amc-prune-pattern=channels --amc-prune-method=l1-rank --amc-agent-algo=Random-policy --amc-cfg=auto_compression_channels.yaml --evs=0.5 --etrs=0.5 --amc-rllib=random -j=1\n",
     "\n",
     "==> classifier_compression/experiments/plain20-random-l1_rank/2019.07.21-004045\n",
     "\n",
-    "    time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-random-l1_rank/2019.07.21-004045/ --arch=plain20_cifar --lr=0.005 --vs=0 -p=50 --epochs=60 --compress=../automated_deep_compression/fine_tune.yaml ${CIFAR10_PATH} -j=1 --deterministic --ft-epochs=1 --output-csv=ft_1epoch_results.csv\n",
+    "    time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-random-l1_rank/2019.07.21-004045/ --arch=plain20_cifar --lr=0.005 --vs=0 -p=50 --epochs=60 --compress=../plain20_fine_tune.yaml ${CIFAR10_PATH} -j=1 --deterministic --epoch=1 --output-csv=ft_1epoch_results.csv\n",
     "\n",
     "==> classifier_compression/experiments/plain20-random-l1_rank/2019.07.21-004045/ft_1epoch_results.csv"
    ]
@@ -165,7 +165,7 @@
     "  NOTE: we fine-tune _AFTER_ the RL search is done, so this FT process does not help the agent.  And, in any case, we are using a random agent.\n",
     "  \n",
     "<code>\n",
-    "time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-random-reconstruction_nondeterministic/2019.07.23-124600 --arch=plain20_cifar --lr=0.005 --vs=0 -p=50 --epochs=60 --compress=plain20_fine_tune.yaml ${CIFAR10_PATH} -j=1 --deterministic --ft-epochs=20 --output-csv=ft_20epoch_results.csv\n",
+    "time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-random-reconstruction_nondeterministic/2019.07.23-124600 --arch=plain20_cifar --lr=0.005 --vs=0 -p=50 --epochs=60 --compress=../plain20_fine_tune.yaml ${CIFAR10_PATH} -j=1 --deterministic --epoch=20 --output-csv=ft_20epoch_results.csv\n",
     "</code>\n",
     "\n",
     "#### Answer:\n",
@@ -280,11 +280,11 @@
     "* In other words, how many fine-tuning epochs do we need to perform during the RL search to get a cleaner, less noisy (i.e. more stable) search-Top1 (used in the multi-objective reward signal)?\n",
     "\n",
     "\n",
-    "    time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-random-l1_rank/2019.07.21-004045/ --arch=plain20_cifar --lr=0.005 --vs=0 -p=50 --epochs=60 --compress=../automated_deep_compression/fine_tune.yaml ${CIFAR10_PATH} -j=1 --deterministic --ft-epochs=3 --output-csv=ft_3epoch_results.csv\n",
+    "    time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-random-l1_rank/2019.07.21-004045/ --arch=plain20_cifar --lr=0.005 --vs=0 -p=50 --epochs=60 --compress=../plain20_fine_tune.yaml ${CIFAR10_PATH} -j=1 --deterministic --epoch=3 --output-csv=ft_3epoch_results.csv\n",
     "\n",
-    "    time python para,,e,-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-random-l1_rank/2019.07.21-004045/ --arch=plain20_cifar --lr=0.005 --vs=0 -p=50 --epochs=60 --compress=../automated_deep_compression/fine_tune.yaml ${CIFAR10_PATH} -j=1 --deterministic --ft-epochs=6 --output-csv=ft_6epoch_results.csv\n",
+    "    time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-random-l1_rank/2019.07.21-004045/ --arch=plain20_cifar --lr=0.005 --vs=0 -p=50 --epochs=60 --compress=../plain20_fine_tune.yaml ${CIFAR10_PATH} -j=1 --deterministic --epoch=6 --output-csv=ft_6epoch_results.csv\n",
     "\n",
-    "    time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-random-l1_rank/2019.07.21-004045/ --arch=plain20_cifar --lr=0.005 --vs=0 -p=50 --epochs=60 --compress=../automated_deep_compression/fine_tune.yaml ${CIFAR10_PATH} -j=1 --deterministic --ft-epochs=20 --output-csv=ft_20epoch_results.csv"
+    "    time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/plain20-random-l1_rank/2019.07.21-004045/ --arch=plain20_cifar --lr=0.005 --vs=0 -p=50 --epochs=60 --compress=../plain20_fine_tune.yaml ${CIFAR10_PATH} -j=1 --deterministic --epoch=20 --output-csv=ft_20epoch_results.csv"
    ]
   },
   {
@@ -441,13 +441,13 @@
   "pycharm": {
    "stem_cell": {
     "cell_type": "raw",
+    "source": [],
     "metadata": {
      "collapsed": false
-    },
-    "source": []
+    }
    }
   }
  },
  "nbformat": 4,
  "nbformat_minor": 2
-}
+}
\ No newline at end of file
diff --git a/examples/auto_compression/amc/jupyter/amc_resnet20.ipynb b/examples/auto_compression/amc/jupyter/amc_resnet20.ipynb
index 4232c7b..29477ab 100644
--- a/examples/auto_compression/amc/jupyter/amc_resnet20.ipynb
+++ b/examples/auto_compression/amc/jupyter/amc_resnet20.ipynb
@@ -79,7 +79,7 @@
     "\n",
     "The command-line is provided below:\n",
     "    \n",
-    "    time python3 ../../classifier_compression/multi-run.py ${AMC_EXP_PATH}/resnet20-ddpg-private amc.py  --arch=resnet20_cifar ${CIFAR10_PATH} --resume=../ssl/checkpoints/checkpoint_trained_dense.pth.tar --amc-protocol=mac-constrained --amc-action-range 0.05 1.0 --amc-target-density=0.5 -p=50 --etes=0.075 --amc-ft-epochs=0 --amc-prune-pattern=channels --amc-prune-method=fm-reconstruction --amc-agent-algo=DDPG --amc-cfg=auto_compression_channels.yaml --amc-rllib=hanlab -j=1\n",
+    "    time python3 ../../classifier_compression/multi-run.py ${AMC_EXP_PATH}/resnet20-ddpg-private amc.py  --arch=resnet20_cifar ${CIFAR10_PATH} --resume=../../ssl/checkpoints/checkpoint_trained_dense.pth.tar --amc-protocol=mac-constrained --amc-action-range 0.05 1.0 --amc-target-density=0.5 -p=50 --etes=0.075 --amc-ft-epochs=0 --amc-prune-pattern=channels --amc-prune-method=fm-reconstruction --amc-agent-algo=DDPG --amc-cfg=auto_compression_channels.yaml --amc-rllib=hanlab -j=1\n",
     "    \n",
     "Each colored line represents one experiment execution instance. We plot the search-Top1 score of discovered networks as the RL-based AMC system learns to find better compressed networks. You might be impressed by:\n",
     "* The variability in behavior, which is typical for RL algorithms.\n",
@@ -120,7 +120,7 @@
     "\n",
     "Fine-tune for 60 epochs (using 16 processes): **notice the large LR**.\n",
     "\n",
-    "    time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/resnet20-ddpg-private/2019.07.30-015225 --arch=resnet20_cifar --lr=0.1 --vs=0 -p=50 --compress=../automated_deep_compression/fine_tune.yaml ${CIFAR10_PATH} -j=1 --epochs=60 --output-csv=ft_60epoch_results.csv --processes=16"
+    "    time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/resnet20-ddpg-private/2019.07.30-015225 --arch=resnet20_cifar --lr=0.1 --vs=0 -p=50 --compress=../plain20_fine_tune.yaml ${CIFAR10_PATH} -j=1 --epochs=60 --output-csv=ft_60epoch_results.csv --processes=16"
    ]
   },
   {
-- 
GitLab