diff --git a/examples/ncf/ncf.py b/examples/ncf/ncf.py index a32fa982044a0137b25d7032c1bbf1f86395790d..b3148ea97079a4c6e9f9c834e6f4743bcd012cce 100644 --- a/examples/ncf/ncf.py +++ b/examples/ncf/ncf.py @@ -103,7 +103,7 @@ def predict(model, users, items, batch_size=1024, use_cuda=True): if use_cuda: x = x.cuda(async=True) return torch.autograd.Variable(x) - outp = model(proc(user), proc(item), sigmoid=True) + outp = model(proc(user), proc(item), torch.tensor([True], dtype=torch.bool)) outp = outp.data.cpu().numpy() preds += list(outp.flatten()) return preds @@ -354,7 +354,8 @@ def main(): if args.quantize_eval and args.qe_calibration is None: model.cpu() quantizer = quantization.PostTrainLinearQuantizer.from_args(model, args) - quantizer.prepare_model() + dummy_input = (torch.tensor([1]), torch.tensor([1]), torch.tensor([True], dtype=torch.bool)) + quantizer.prepare_model(dummy_input) model.cuda() distiller.utils.assign_layer_fq_names(model) @@ -406,7 +407,7 @@ def main(): if compression_scheduler: compression_scheduler.on_minibatch_begin(epoch, batch_index, steps_per_epoch, optimizer) - outputs = model(user, item) + outputs = model(user, item, torch.tensor([False], dtype=torch.bool)) loss = criterion(outputs, label) if compression_scheduler: diff --git a/examples/ncf/neumf.py b/examples/ncf/neumf.py index dbf71b312b1cf799f2ba0d26ee19611ba3d3b0d3..93b50ca2db37316fb11433362cbed15cfeb24041 100644 --- a/examples/ncf/neumf.py +++ b/examples/ncf/neumf.py @@ -98,7 +98,7 @@ class NeuMF(nn.Module): super(NeuMF, self).load_state_dict(state_dict, strict) - def forward(self, user, item, sigmoid=False): + def forward(self, user, item, sigmoid): xmfu = self.mf_user_embed(user) # .to(self.post_embed_device) xmfi = self.mf_item_embed(item) # .to(self.post_embed_device) xmf = self.mf_mult(xmfu, xmfi)