diff --git a/distiller/apputils/image_classifier.py b/distiller/apputils/image_classifier.py
index 47287fc9e6848a9ecba594dc339213f4389f8b76..451a0ec1b20feec6f10407cb9b6cd589f1d9a853 100755
--- a/distiller/apputils/image_classifier.py
+++ b/distiller/apputils/image_classifier.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 #
 
+import copy
 import math
 import time
 import os
@@ -54,11 +55,11 @@ class ClassifierCompressor(object):
         - Classifier training, verification and testing
     """
     def __init__(self, args, script_dir):
-        self.args = args
-        _infer_implicit_args(args)
-        self.logdir = _init_logger(args, script_dir)
-        _config_determinism(args)
-        _config_compute_device(args)
+        self.args = copy.deepcopy(args)
+        _infer_implicit_args(self.args)
+        self.logdir = _init_logger(self.args, script_dir)
+        _config_determinism(self.args)
+        _config_compute_device(self.args)
         
         # Create a couple of logging backends.  TensorBoardLogger writes log files in a format
         # that can be read by Google's Tensor Board.  PythonLogger writes to the Python logger.
@@ -68,12 +69,13 @@ class ClassifierCompressor(object):
             self.tflogger = TensorBoardLogger(msglogger.logdir)
             self.pylogger = PythonLogger(msglogger)
         (self.model, self.compression_scheduler, self.optimizer, 
-             self.start_epoch, self.ending_epoch) = _init_learner(args)
+             self.start_epoch, self.ending_epoch) = _init_learner(self.args)
 
         # Define loss function (criterion)
-        self.criterion = nn.CrossEntropyLoss().to(args.device)
+        self.criterion = nn.CrossEntropyLoss().to(self.args.device)
         self.train_loader, self.val_loader, self.test_loader = (None, None, None)
-        self.activations_collectors = create_activation_stats_collectors(self.model, *args.activation_stats)
+        self.activations_collectors = create_activation_stats_collectors(
+            self.model, *self.args.activation_stats)
     
     def load_datasets(self):
         """Load the datasets"""
diff --git a/distiller/quantization/ptq_greedy_search.py b/distiller/quantization/ptq_greedy_search.py
index 2d9d8a5b252f380375acda7abf0b4ddd9ef2894d..5333c4e0bfe0f9a52ab18b9a6a1b33984d15a9c2 100644
--- a/distiller/quantization/ptq_greedy_search.py
+++ b/distiller/quantization/ptq_greedy_search.py
@@ -434,6 +434,7 @@ if __name__ == "__main__":
     args = get_default_args()
     args.epochs = float('inf')  # hack for args parsing so there's no error in epochs
     cc = classifier.ClassifierCompressor(args, script_dir=os.path.dirname(__file__))
+    args = deepcopy(cc.args)  # Get back args after modifications in ClassifierCompressor.__init__
     eval_data_loader = classifier.load_data(args, load_train=False, load_val=False)
 
     # quant calibration dataloader:
diff --git a/examples/auto_compression/amc/parallel-finetune.py b/examples/auto_compression/amc/parallel-finetune.py
index 80c834f9ba5dbc764fe1436edbceaad09a13d912..0dedaaf98c58e05ff0f3caa521d2300c78ec1915 100755
--- a/examples/auto_compression/amc/parallel-finetune.py
+++ b/examples/auto_compression/amc/parallel-finetune.py
@@ -178,13 +178,13 @@ def finetune_checkpoint(ckpt_file, gpu, app_args, loaders):
     app = classifier.ClassifierCompressor(app_args, script_dir=os.path.dirname(__file__))
     app.train_loader, app.val_loader, app.test_loader = loaders
     best = [float("-inf"), float("-inf"), float("inf")]
-    for epoch in range(app_args.epochs):
-        validate = epoch >= math.floor((1-app_args.validate_enable_factor) * app_args.epochs)
+    for epoch in range(app.args.epochs):
+        validate = epoch >= math.floor((1 - app.args.validate_enable_factor) * app.args.epochs)
         top1, top5, loss = app.train_validate_with_scheduling(epoch, validate=validate, verbose=False)
         if validate:
             if top1 > best[0]:
                 best = [top1, top5, loss]
-    if app_args.validate_enable_factor == 0:
+    if app.args.validate_enable_factor == 0:
         # We did not validate, so our score is the performance on the Test dataset
         return (name, app.test())
     return (name, best)
diff --git a/examples/classifier_compression/compress_classifier.py b/examples/classifier_compression/compress_classifier.py
index 93ac393c382ed993ebc4c82844870ce7a65471b6..5108dbfd78f3132a8a7c06afdc33e6f9c052754e 100755
--- a/examples/classifier_compression/compress_classifier.py
+++ b/examples/classifier_compression/compress_classifier.py
@@ -73,7 +73,7 @@ def main():
     app = ClassifierCompressorSampleApp(args, script_dir=os.path.dirname(__file__))
     if app.handle_subapps():
         return
-    init_knowledge_distillation(app.args,  app.model, app.compression_scheduler)
+    init_knowledge_distillation(app.args, app.model, app.compression_scheduler)
     app.run_training_loop()
     # Finally run results on the test set
     return app.test()