From f922973a9d35622018e379ef97a868ae13dc21a7 Mon Sep 17 00:00:00 2001
From: Neta Zmora <neta.zmora@intel.com>
Date: Wed, 19 Dec 2018 15:58:06 +0200
Subject: [PATCH] Bug fix: set the overall loss when not using a compression
 scheduler

If compression_scheduler==None, then we need to set the value of
losses[OVERALL_LOSS_KEY] (so it is the same as losses[OBJECTIVE_LOSS_KEY]).
This was overlooked.
---
 examples/classifier_compression/compress_classifier.py | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/examples/classifier_compression/compress_classifier.py b/examples/classifier_compression/compress_classifier.py
index 5ba8222..d0b53db 100755
--- a/examples/classifier_compression/compress_classifier.py
+++ b/examples/classifier_compression/compress_classifier.py
@@ -509,6 +509,8 @@ def train(train_loader, model, criterion, optimizer, epoch,
                 if lc.name not in losses:
                     losses[lc.name] = tnt.AverageValueMeter()
                 losses[lc.name].add(lc.value.item())
+        else:
+            losses[OVERALL_LOSS_KEY].add(loss.item())
 
         # Compute the gradient and do SGD step
         optimizer.zero_grad()
-- 
GitLab