From f762fef4285837f60a09e48848d01fc786d4eb27 Mon Sep 17 00:00:00 2001
From: Guy Jacob <guy.jacob@intel.com>
Date: Wed, 31 Jul 2019 12:48:57 +0300
Subject: [PATCH] Fix crash in dynamic post-train quant (caused by commit
 69b1452)

---
 distiller/quantization/range_linear.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/distiller/quantization/range_linear.py b/distiller/quantization/range_linear.py
index 343f887..f31c945 100644
--- a/distiller/quantization/range_linear.py
+++ b/distiller/quantization/range_linear.py
@@ -982,7 +982,6 @@ class PostTrainLinearQuantizer(Quantizer):
         self.adjacency_map = summary_graph.adjacency_map(dedicated_modules_only=False)
 
         if not self.model_activation_stats:
-            msglogger.info("No activation stats - skipping optimizations for modules followed by Relu/Tanh/Sigmoid")
             return
 
         # Update the activation stats to reflect BN folding
@@ -1005,6 +1004,10 @@ class PostTrainLinearQuantizer(Quantizer):
         # Now we look for certain "fusions" of layers and activations
         # We modify stats to make sure we quantize only the ranges relevant to the activation function
         # By doing so we reduce quantization error while still keeping all
+        if not self.model_activation_stats:
+            msglogger.info("No activation stats - skipping optimizations for modules followed by Relu/Tanh/Sigmoid")
+            return
+
         msglogger.info('Optimizing output statistics for modules followed by ReLU/Tanh/Sigmoid')
 
         named_modules = OrderedDict(self.model.named_modules())
-- 
GitLab