From 78144d4cbf76b5c6df9aa57cae3b47e8332f93d2 Mon Sep 17 00:00:00 2001
From: Guy Jacob <guy.jacob@intel.com>
Date: Wed, 6 Nov 2019 17:25:52 +0200
Subject: [PATCH] Bugfix: Deepcopy args when creating ClassifierCompressor
 (#421)

Co-authored-by: Bar <29775567+barrh@users.noreply.github.com>
Co-authored-by: Guy Jacob <guy.jacob@intel.com>
---
 distiller/apputils/image_classifier.py         | 18 ++++++++++--------
 distiller/quantization/ptq_greedy_search.py    |  1 +
 .../auto_compression/amc/parallel-finetune.py  |  6 +++---
 .../compress_classifier.py                     |  2 +-
 4 files changed, 15 insertions(+), 12 deletions(-)

diff --git a/distiller/apputils/image_classifier.py b/distiller/apputils/image_classifier.py
index 47287fc..451a0ec 100755
--- a/distiller/apputils/image_classifier.py
+++ b/distiller/apputils/image_classifier.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 #
 
+import copy
 import math
 import time
 import os
@@ -54,11 +55,11 @@ class ClassifierCompressor(object):
         - Classifier training, verification and testing
     """
     def __init__(self, args, script_dir):
-        self.args = args
-        _infer_implicit_args(args)
-        self.logdir = _init_logger(args, script_dir)
-        _config_determinism(args)
-        _config_compute_device(args)
+        self.args = copy.deepcopy(args)
+        _infer_implicit_args(self.args)
+        self.logdir = _init_logger(self.args, script_dir)
+        _config_determinism(self.args)
+        _config_compute_device(self.args)
         
         # Create a couple of logging backends.  TensorBoardLogger writes log files in a format
         # that can be read by Google's Tensor Board.  PythonLogger writes to the Python logger.
@@ -68,12 +69,13 @@ class ClassifierCompressor(object):
             self.tflogger = TensorBoardLogger(msglogger.logdir)
             self.pylogger = PythonLogger(msglogger)
         (self.model, self.compression_scheduler, self.optimizer, 
-             self.start_epoch, self.ending_epoch) = _init_learner(args)
+             self.start_epoch, self.ending_epoch) = _init_learner(self.args)
 
         # Define loss function (criterion)
-        self.criterion = nn.CrossEntropyLoss().to(args.device)
+        self.criterion = nn.CrossEntropyLoss().to(self.args.device)
         self.train_loader, self.val_loader, self.test_loader = (None, None, None)
-        self.activations_collectors = create_activation_stats_collectors(self.model, *args.activation_stats)
+        self.activations_collectors = create_activation_stats_collectors(
+            self.model, *self.args.activation_stats)
     
     def load_datasets(self):
         """Load the datasets"""
diff --git a/distiller/quantization/ptq_greedy_search.py b/distiller/quantization/ptq_greedy_search.py
index 2d9d8a5..5333c4e 100644
--- a/distiller/quantization/ptq_greedy_search.py
+++ b/distiller/quantization/ptq_greedy_search.py
@@ -434,6 +434,7 @@ if __name__ == "__main__":
     args = get_default_args()
     args.epochs = float('inf')  # hack for args parsing so there's no error in epochs
     cc = classifier.ClassifierCompressor(args, script_dir=os.path.dirname(__file__))
+    args = deepcopy(cc.args)  # Get back args after modifications in ClassifierCompressor.__init__
     eval_data_loader = classifier.load_data(args, load_train=False, load_val=False)
 
     # quant calibration dataloader:
diff --git a/examples/auto_compression/amc/parallel-finetune.py b/examples/auto_compression/amc/parallel-finetune.py
index 80c834f..0dedaaf 100755
--- a/examples/auto_compression/amc/parallel-finetune.py
+++ b/examples/auto_compression/amc/parallel-finetune.py
@@ -178,13 +178,13 @@ def finetune_checkpoint(ckpt_file, gpu, app_args, loaders):
     app = classifier.ClassifierCompressor(app_args, script_dir=os.path.dirname(__file__))
     app.train_loader, app.val_loader, app.test_loader = loaders
     best = [float("-inf"), float("-inf"), float("inf")]
-    for epoch in range(app_args.epochs):
-        validate = epoch >= math.floor((1-app_args.validate_enable_factor) * app_args.epochs)
+    for epoch in range(app.args.epochs):
+        validate = epoch >= math.floor((1 - app.args.validate_enable_factor) * app.args.epochs)
         top1, top5, loss = app.train_validate_with_scheduling(epoch, validate=validate, verbose=False)
         if validate:
             if top1 > best[0]:
                 best = [top1, top5, loss]
-    if app_args.validate_enable_factor == 0:
+    if app.args.validate_enable_factor == 0:
         # We did not validate, so our score is the performance on the Test dataset
         return (name, app.test())
     return (name, best)
diff --git a/examples/classifier_compression/compress_classifier.py b/examples/classifier_compression/compress_classifier.py
index 93ac393..5108dbf 100755
--- a/examples/classifier_compression/compress_classifier.py
+++ b/examples/classifier_compression/compress_classifier.py
@@ -73,7 +73,7 @@ def main():
     app = ClassifierCompressorSampleApp(args, script_dir=os.path.dirname(__file__))
     if app.handle_subapps():
         return
-    init_knowledge_distillation(app.args,  app.model, app.compression_scheduler)
+    init_knowledge_distillation(app.args, app.model, app.compression_scheduler)
     app.run_training_loop()
     # Finally run results on the test set
     return app.test()
-- 
GitLab