From 55c2d1dc581ba1940d7a602005f6a15c0971af19 Mon Sep 17 00:00:00 2001
From: barrh <elhararb@gmail.com>
Date: Mon, 9 Sep 2019 00:30:10 +0300
Subject: [PATCH] Fix epochs limitation check

This patch moves the check on remaining training epochs into the training loop, as it is irrelevant for non-training invocation of ClassifierCompressor.
Thus, allowing resumed checkpoints be evaluated regardless of the epochs limitation.
---
 distiller/apputils/image_classifier.py | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/distiller/apputils/image_classifier.py b/distiller/apputils/image_classifier.py
index f2f2a57..c5cd3f6 100755
--- a/distiller/apputils/image_classifier.py
+++ b/distiller/apputils/image_classifier.py
@@ -152,6 +152,12 @@ class ClassifierCompressor(object):
             validate_one_epoch
             finalize_epoch
         """
+        if self.start_epoch >= self.ending_epoch:
+            msglogger.error(
+                'epoch count is too low, starting epoch is {} but total epochs set to {}'.format(
+                self.start_epoch, self.ending_epoch))
+        raise ValueError('Epochs parameter is too low. Nothing to do.')
+
         # Load the datasets lazily
         self.load_datasets()
 
@@ -396,13 +402,7 @@ def _init_learner(args):
     elif compression_scheduler is None:
         compression_scheduler = distiller.CompressionScheduler(model)
 
-    ending_epoch = args.epochs
-    if start_epoch >= ending_epoch:
-        msglogger.error(
-            'epoch count is too low, starting epoch is {} but total epochs set to {}'.format(
-            start_epoch, ending_epoch))
-        raise ValueError('Epochs parameter is too low. Nothing to do.')
-    return model, compression_scheduler, optimizer, start_epoch, ending_epoch
+    return model, compression_scheduler, optimizer, start_epoch, args.epochs
 
 
 def create_activation_stats_collectors(model, *phases):
-- 
GitLab