From 0a8a3b3108456d1698c976f277b0783f32f70636 Mon Sep 17 00:00:00 2001 From: Neta Zmora <neta.zmora@intel.com> Date: Thu, 18 Oct 2018 19:58:08 +0300 Subject: [PATCH] Bug fix: remove softmax layer from model loading code We should only add softmax when we explicitly require it (as when exporting to ONNX), because CrossEntropyLoss implicitly computes softmax on the logits it receives as input. This cade was left there by mistake and should have never been pushed to git. --- models/__init__.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/models/__init__.py b/models/__init__.py index 8d40c77..04b11b2 100755 --- a/models/__init__.py +++ b/models/__init__.py @@ -75,9 +75,5 @@ def create_model(pretrained, dataset, arch, parallel=True, device_ids=None): elif parallel: model = torch.nn.DataParallel(model, device_ids=device_ids) - # explicitly add a softmax layer, because it is useful when exporting to ONNX - model.original_forward = model.forward - softmax = torch.nn.Softmax(dim=1) - model.forward = lambda input: softmax(model.original_forward(input)) model.cuda() return model -- GitLab