From 0edfb5a919e7c05e08563b71fb7117e25f177b38 Mon Sep 17 00:00:00 2001 From: Neta Zmora <neta.zmora@intel.com> Date: Sun, 13 Jan 2019 17:08:29 +0200 Subject: [PATCH] compress_classifier.py: fix handling of --cpu application argument --- examples/classifier_compression/compress_classifier.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/classifier_compression/compress_classifier.py b/examples/classifier_compression/compress_classifier.py index daf46fb..381a17c 100755 --- a/examples/classifier_compression/compress_classifier.py +++ b/examples/classifier_compression/compress_classifier.py @@ -142,7 +142,7 @@ parser.add_argument('--deterministic', '--det', action='store_true', help='Ensure deterministic execution for re-producible results.') parser.add_argument('--gpus', metavar='DEV_ID', default=None, help='Comma-separated list of GPU device IDs to be used (default is to use all available devices)') -parser.add_argument('--cpu', action='store_true', +parser.add_argument('--cpu', action='store_true', default=False, help='Use CPU only. \n' 'Flag not set => uses GPUs according to the --gpus flag value.' 'Flag set => overrides the --gpus flag') @@ -294,7 +294,7 @@ def main(): # results are not re-produced when benchmark is set. So enabling only if deterministic mode disabled. cudnn.benchmark = True - if args.cpu is not None or not torch.cuda.is_available(): + if args.cpu or not torch.cuda.is_available(): # Set GPU index to -1 if using CPU args.device = 'cpu' else: -- GitLab