From 74a4f7ab245a961129de1e42ea1d063dd35be114 Mon Sep 17 00:00:00 2001
From: Neta Zmora <neta.zmora@intel.com>
Date: Sun, 17 Mar 2019 15:50:27 +0200
Subject: [PATCH] Replace exit()s with ValueError()s

In several places we hit an error state and exit using exit(),
instead of raising a ValueError - fixed this.
---
 .../classifier_compression/compress_classifier.py     | 11 ++++-------
 1 file changed, 4 insertions(+), 7 deletions(-)

diff --git a/examples/classifier_compression/compress_classifier.py b/examples/classifier_compression/compress_classifier.py
index 05c0361..c33cc3f 100755
--- a/examples/classifier_compression/compress_classifier.py
+++ b/examples/classifier_compression/compress_classifier.py
@@ -104,8 +104,7 @@ def main():
         # in his blog: https://petewarden.com/2018/03/19/the-machine-learning-reproducibility-crisis/
         # In Pytorch, support for deterministic execution is still a bit clunky.
         if args.workers > 1:
-            msglogger.error('ERROR: Setting --deterministic requires setting --workers/-j to 0 or 1')
-            exit(1)
+            raise ValueError('ERROR: Setting --deterministic requires setting --workers/-j to 0 or 1')
         # Use a well-known seed, for repeatability of experiments
         distiller.set_deterministic()
     else:
@@ -124,14 +123,12 @@ def main():
             try:
                 args.gpus = [int(s) for s in args.gpus.split(',')]
             except ValueError:
-                msglogger.error('ERROR: Argument --gpus must be a comma-separated list of integers only')
-                exit(1)
+                raise ValueError('ERROR: Argument --gpus must be a comma-separated list of integers only')
             available_gpus = torch.cuda.device_count()
             for dev_id in args.gpus:
                 if dev_id >= available_gpus:
-                    msglogger.error('ERROR: GPU device ID {0} requested, but only {1} devices available'
-                                    .format(dev_id, available_gpus))
-                    exit(1)
+                    raise ValueError('ERROR: GPU device ID {0} requested, but only {1} devices available'
+                                     .format(dev_id, available_gpus))
             # Set default device in case the first one on the list != 0
             torch.cuda.set_device(args.gpus[0])
 
-- 
GitLab