diff --git a/examples/word_language_model/main.py b/examples/word_language_model/main.py index 1ad14db5907a04a62f1238e64cf5194e44b23195..6e1cb71dc3008ab7dab94a04f1bd8e767e6e8249 100755 --- a/examples/word_language_model/main.py +++ b/examples/word_language_model/main.py @@ -271,7 +271,7 @@ def train(epoch, optimizer, compression_scheduler=None): def export_onnx(path, batch_size, seq_len): msglogger.info('The model is also exported in ONNX format at {}'. - format(os.path.realpath(args.onnx_export))) + format(os.path.realpath(args.onnx_export))) model.eval() dummy_input = torch.LongTensor(seq_len * batch_size).zero_().view(-1, batch_size).to(device) hidden = model.init_hidden(batch_size) @@ -297,7 +297,7 @@ if args.summary: bottomk, _ = torch.topk(param.abs().view(-1), int(percentile * param.numel()), largest=False, sorted=True) threshold = bottomk.data[-1] - msglogger.info("parameter %s: q = %.2f" %(name, threshold)) + msglogger.info("parameter %s: q = %.2f" % (name, threshold)) else: distiller.model_summary(model, which_summary, 'wikitext2') exit(0)