From 34f9a55b68a588fcc8325dfc37534f12ef358673 Mon Sep 17 00:00:00 2001
From: Guy Jacob <guy.jacob@intel.com>
Date: Wed, 3 Jul 2019 15:01:11 +0300
Subject: [PATCH] Dump outputs in run log dir instead of script dir

---
 .gitignore                                             |  1 +
 distiller/model_summaries.py                           |  7 ++++---
 examples/classifier_compression/compress_classifier.py |  4 ++--
 tests/common.py                                        | 10 ++++++++++
 tests/full_flow_tests.py                               |  9 +++++----
 tests/test_model_summary.py                            |  9 ++++++---
 6 files changed, 28 insertions(+), 12 deletions(-)

diff --git a/.gitignore b/.gitignore
index ccc482d..913ea4c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,6 +8,7 @@ logs/
 __pycache__/
 .pytest_cache
 .cache
+pytest_collaterals/
 
 # Virtual env
 env/
diff --git a/distiller/model_summaries.py b/distiller/model_summaries.py
index 5ee06fd..ec07004 100755
--- a/distiller/model_summaries.py
+++ b/distiller/model_summaries.py
@@ -42,12 +42,13 @@ __all__ = ['model_summary',
            'draw_model_to_file', 'draw_img_classifier_to_file', 'export_img_classifier_to_onnx']
 
 
-def model_summary(model, what, dataset=None):
+def model_summary(model, what, dataset=None, logdir=''):
     if what.startswith('png'):
-        draw_img_classifier_to_file(model, 'model.png', dataset, what == 'png_w_params')
+        png_fname = os.path.join(logdir, 'model.png')
+        draw_img_classifier_to_file(model, png_fname, dataset, what == 'png_w_params')
     elif what == 'sparsity':
         pylogger = PythonLogger(msglogger)
-        csvlogger = CsvLogger()
+        csvlogger = CsvLogger(logdir=logdir)
         distiller.log_weights_sparsity(model, -1, loggers=[pylogger, csvlogger])
     elif what == 'compute':
         try:
diff --git a/examples/classifier_compression/compress_classifier.py b/examples/classifier_compression/compress_classifier.py
index 0b13698..93f9298 100755
--- a/examples/classifier_compression/compress_classifier.py
+++ b/examples/classifier_compression/compress_classifier.py
@@ -667,8 +667,8 @@ def sensitivity_analysis(model, criterion, data_loader, loggers, args, sparsitie
                                                          sparsities=sparsities,
                                                          test_func=test_fnc,
                                                          group=args.sensitivity)
-    distiller.sensitivities_to_png(sensitivity, 'sensitivity.png')
-    distiller.sensitivities_to_csv(sensitivity, 'sensitivity.csv')
+    distiller.sensitivities_to_png(sensitivity, os.path.join(msglogger.logdir, 'sensitivity.png'))
+    distiller.sensitivities_to_csv(sensitivity, os.path.join(msglogger.logdir, 'sensitivity.csv'))
 
 
 def automated_deep_compression(model, criterion, optimizer, loggers, args):
diff --git a/tests/common.py b/tests/common.py
index 728d62f..1afabf6 100755
--- a/tests/common.py
+++ b/tests/common.py
@@ -14,10 +14,20 @@
 # limitations under the License.
 #
 import torch
+import os
+import errno
 import distiller
 from distiller.models import create_model
 
 
+PYTEST_COLLATERALS_DIR = os.path.join(os.path.dirname(__file__), 'pytest_collaterals')
+try:
+    os.makedirs(PYTEST_COLLATERALS_DIR)
+except OSError as e:
+    if e.errno != errno.EEXIST:
+        raise
+
+
 def setup_test(arch, dataset, parallel):
     model = create_model(False, dataset, arch, parallel=parallel)
     assert model is not None
diff --git a/tests/full_flow_tests.py b/tests/full_flow_tests.py
index 01f15c6..64fa60b 100755
--- a/tests/full_flow_tests.py
+++ b/tests/full_flow_tests.py
@@ -85,7 +85,7 @@ def compare_values(name, expected, actual):
         return True
 
 
-def accuracy_checker(log, expected_top1, expected_top5):
+def accuracy_checker(log, run_dir, expected_top1, expected_top5):
     tops = re.findall(r"Top1: (?P<top1>\d*\.\d*) *Top5: (?P<top5>\d*\.\d*)", log)
     if not tops:
         error('No accuracy results in log')
@@ -95,7 +95,7 @@ def accuracy_checker(log, expected_top1, expected_top5):
     return compare_values('Top-5', expected_top5, float(tops[-1][1]))
 
 
-def collateral_checker(log, *collateral_list):
+def collateral_checker(log, run_dir, *collateral_list):
     """Test that the test produced the expected collaterals.
 
     A collateral_list is a list of tuples, where tuple elements are:
@@ -103,7 +103,8 @@ def collateral_checker(log, *collateral_list):
         1: expected file size
     """
     for collateral in collateral_list:
-        statinfo = os.stat(collateral[0])
+        file_path = os.path.join(run_dir, collateral[0])
+        statinfo = os.stat(file_path)
         if statinfo.st_size != collateral[1]:
             return False
     return True
@@ -202,7 +203,7 @@ def run_tests():
                             format(p.returncode), idx, cmd, log_path, failed_tests, log)
             continue
         test_progress('Running checker: ' + colorize(tc.checker_fn.__name__, Colors.YELLOW))
-        if not tc.checker_fn(log, *tc.checker_args):
+        if not tc.checker_fn(log, os.path.split(log_path)[0], *tc.checker_args):
             process_failure('Checker failed', idx, cmd, log_path, failed_tests, log)
             continue
         success('TEST PASSED')
diff --git a/tests/test_model_summary.py b/tests/test_model_summary.py
index f187d12..a818689 100755
--- a/tests/test_model_summary.py
+++ b/tests/test_model_summary.py
@@ -17,6 +17,7 @@
 import logging
 import distiller
 import pytest
+import os
 import common  # common test code
 
 
@@ -29,13 +30,15 @@ logger.addHandler(fh)
 
 SUMMARY_CHOICES = ['sparsity', 'compute', 'model', 'modules', 'png', 'png_w_params']
 
+
 @pytest.mark.parametrize('display_param_nodes', [True, False])
 def test_png_generation(display_param_nodes):
     dataset = "cifar10"
     arch = "resnet20_cifar"
     model, _ = common.setup_test(arch, dataset, parallel=True)
     # 2 different ways to create a PNG
-    distiller.draw_img_classifier_to_file(model, 'model.png', dataset, display_param_nodes)
+    png_fname = os.path.join(common.PYTEST_COLLATERALS_DIR, 'model.png')
+    distiller.draw_img_classifier_to_file(model, png_fname, dataset, display_param_nodes)
 
 
 def test_compute_summary():
@@ -64,7 +67,7 @@ def test_summary(what):
     dataset = "cifar10"
     arch = "resnet20_cifar"
     model, _ = common.setup_test(arch, dataset, parallel=True)
-    distiller.model_summary(model, what, dataset=dataset)
+    distiller.model_summary(model, what, dataset=dataset, logdir=common.PYTEST_COLLATERALS_DIR)
 
 
 @pytest.mark.parametrize('what', SUMMARY_CHOICES)
@@ -72,4 +75,4 @@ def test_mnist(what):
     dataset = "mnist"
     arch = "simplenet_mnist"
     model, _ = common.setup_test(arch, dataset, parallel=True)
-    distiller.model_summary(model, what, dataset=dataset)
\ No newline at end of file
+    distiller.model_summary(model, what, dataset=dataset, logdir=common.PYTEST_COLLATERALS_DIR)
-- 
GitLab