[docs]classNeuMF(GeneralRecommender):r"""NeuMF is an neural network enhanced matrix factorization model. It replace the dot product to mlp for a more precise user-item interaction. Note: Our implementation only contains a rough pretraining function. """input_type=InputType.POINTWISEdef__init__(self,config,dataset):super(NeuMF,self).__init__(config,dataset)# load dataset infoself.LABEL=config["LABEL_FIELD"]# load parameters infoself.mf_embedding_size=config["mf_embedding_size"]self.mlp_embedding_size=config["mlp_embedding_size"]self.mlp_hidden_size=config["mlp_hidden_size"]self.dropout_prob=config["dropout_prob"]self.mf_train=config["mf_train"]self.mlp_train=config["mlp_train"]self.use_pretrain=config["use_pretrain"]self.mf_pretrain_path=config["mf_pretrain_path"]self.mlp_pretrain_path=config["mlp_pretrain_path"]# define layers and lossself.user_mf_embedding=nn.Embedding(self.n_users,self.mf_embedding_size)self.item_mf_embedding=nn.Embedding(self.n_items,self.mf_embedding_size)self.user_mlp_embedding=nn.Embedding(self.n_users,self.mlp_embedding_size)self.item_mlp_embedding=nn.Embedding(self.n_items,self.mlp_embedding_size)self.mlp_layers=MLPLayers([2*self.mlp_embedding_size]+self.mlp_hidden_size,self.dropout_prob)self.mlp_layers.logger=None# remove logger to use torch.save()ifself.mf_trainandself.mlp_train:self.predict_layer=nn.Linear(self.mf_embedding_size+self.mlp_hidden_size[-1],1)elifself.mf_train:self.predict_layer=nn.Linear(self.mf_embedding_size,1)elifself.mlp_train:self.predict_layer=nn.Linear(self.mlp_hidden_size[-1],1)self.sigmoid=nn.Sigmoid()self.loss=nn.BCEWithLogitsLoss()# parameters initializationifself.use_pretrain:self.load_pretrain()else:self.apply(self._init_weights)
[docs]defforward(self,user,item):user_mf_e=self.user_mf_embedding(user)item_mf_e=self.item_mf_embedding(item)user_mlp_e=self.user_mlp_embedding(user)item_mlp_e=self.item_mlp_embedding(item)ifself.mf_train:mf_output=torch.mul(user_mf_e,item_mf_e)# [batch_size, embedding_size]ifself.mlp_train:mlp_output=self.mlp_layers(torch.cat((user_mlp_e,item_mlp_e),-1))# [batch_size, layers[-1]]ifself.mf_trainandself.mlp_train:output=self.predict_layer(torch.cat((mf_output,mlp_output),-1))elifself.mf_train:output=self.predict_layer(mf_output)elifself.mlp_train:output=self.predict_layer(mlp_output)else:raiseRuntimeError("mf_train and mlp_train can not be False at the same time")returnoutput.squeeze(-1)
[docs]defdump_parameters(self):r"""A simple implementation of dumping model parameters for pretrain."""ifself.mf_trainandnotself.mlp_train:save_path=self.mf_pretrain_pathtorch.save(self,save_path)elifself.mlp_trainandnotself.mf_train:save_path=self.mlp_pretrain_pathtorch.save(self,save_path)