Sequential Recommendation for cold-start users with meta transitional learning
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

models.py 6.1KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. from collections import OrderedDict
  2. import torch
  3. import torch.nn as nn
  4. from torch.nn import functional as F
  5. class Embedding(nn.Module):
  6. def __init__(self, num_ent, parameter):
  7. super(Embedding, self).__init__()
  8. # self.device = torch.device('cuda:0')
  9. self.device = torch.device(parameter['device'])
  10. self.es = parameter['embed_dim']
  11. self.embedding = nn.Embedding(num_ent + 1, self.es)
  12. nn.init.xavier_uniform_(self.embedding.weight)
  13. def forward(self, triples):
  14. idx = [[[t[0], t[2]] for t in batch] for batch in triples]
  15. idx = torch.LongTensor(idx).to(self.device)
  16. return self.embedding(idx)
  17. class MetaLearner(nn.Module):
  18. def __init__(self, K, embed_size=100, num_hidden1=500, num_hidden2=200, out_size=100, dropout_p=0.5):
  19. super(MetaLearner, self).__init__()
  20. self.embed_size = embed_size
  21. self.K = K
  22. # self.out_size = out_size
  23. # self.hidden_size = out_size
  24. self.out_size = embed_size
  25. self.hidden_size = embed_size
  26. self.rnn = nn.LSTM(embed_size,self.hidden_size,2,dropout=0.2)
  27. # nn.init.xavier_normal_(self.rnn.all_weights)
  28. def forward(self, inputs):
  29. size = inputs.shape
  30. x = torch.stack([inputs[:,0,0,:],inputs[:,0,1,:],inputs[:,1,1,:]],dim=1)
  31. x = x.transpose(0,1)
  32. _,(x,c) = self.rnn(x)
  33. x = x[-1]
  34. x = x.squeeze(0)
  35. return x.view(size[0], 1, 1, self.out_size)
  36. class EmbeddingLearner(nn.Module):
  37. def __init__(self):
  38. super(EmbeddingLearner, self).__init__()
  39. def forward(self, h, t, r, pos_num):
  40. score = -torch.norm(h + r - t, 2, -1).squeeze(2)
  41. p_score = score[:, :pos_num]
  42. n_score = score[:, pos_num:]
  43. return p_score, n_score
  44. def bpr_loss(p_scores, n_values,device):
  45. ratio = int(n_values.shape[1] / p_scores.shape[1])
  46. temp_pvalues = torch.tensor([]).cuda(device=device)
  47. for i in range(p_scores.shape[1]):
  48. temp_pvalues = torch.cat((temp_pvalues, p_scores[:, i, None].expand(-1, ratio)), dim=1)
  49. d = torch.sub(temp_pvalues,n_values)
  50. t = F.logsigmoid(d)
  51. loss = -1 * (1.0/n_values.shape[1]) * t.sum(dim=1)
  52. loss = loss.sum(dim=0)
  53. return loss
  54. def bpr_max_loss(p_scores, n_values,device):
  55. s = F.softmax(n_values,dim=1)
  56. ratio = int(n_values.shape[1] / p_scores.shape[1])
  57. temp_pvalues = torch.tensor([]).cuda(device=device)
  58. for i in range(p_scores.shape[1]):
  59. temp_pvalues = torch.cat((temp_pvalues,p_scores[:,i,None].expand(-1,ratio)),dim=1)
  60. d = torch.sigmoid(torch.sub(temp_pvalues,n_values))
  61. t = torch.mul(s,d)
  62. loss = -1 * torch.log(t.sum(dim=1))
  63. loss = loss.sum()
  64. return loss
  65. def top_loss(p_scores, n_values):
  66. p1 = p_scores[:, 0, None]
  67. p2 = p_scores[:, 1, None]
  68. num_neg = n_values.shape[1]
  69. half_index = int(num_neg / 2)
  70. d1 = torch.sub(p1, n_values[:, 0:half_index])
  71. d2 = torch.sub(p2, n_values[:, half_index:])
  72. # print("d1 shape:",d1.shape)
  73. # print("add shape:",torch.cat((d1,d2),dim=1).shape)
  74. t1 = torch.sigmoid(torch.cat((d1,d2),dim=1))
  75. # print("t1 shape:",t1.shape)
  76. t2 = torch.sigmoid(torch.pow(n_values,2))
  77. # print("t2 shape:",t2.shape)
  78. t3 = torch.add(t1,t2)
  79. # print("t3 shape:",t3.shape)
  80. loss = t3.sum()
  81. # print(loss.shape)
  82. # loss /= (n_values.shape[1] * p_scores.shape[0])
  83. loss /= n_values.shape[1]
  84. return loss
  85. class MetaTL(nn.Module):
  86. def __init__(self, itemnum, parameter):
  87. super(MetaTL, self).__init__()
  88. self.device = torch.device(parameter['device'])
  89. self.beta = parameter['beta']
  90. # self.dropout_p = parameter['dropout_p']
  91. self.embed_dim = parameter['embed_dim']
  92. self.margin = parameter['margin']
  93. self.embedding = Embedding(itemnum, parameter)
  94. self.relation_learner = MetaLearner(parameter['K'] - 1, embed_size=self.embed_dim, num_hidden1=500,
  95. num_hidden2=200, out_size=100, dropout_p=0)
  96. self.embedding_learner = EmbeddingLearner()
  97. # self.loss_func = nn.MarginRankingLoss(self.margin)
  98. self.loss_func = bpr_loss
  99. self.rel_q_sharing = dict()
  100. def split_concat(self, positive, negative):
  101. pos_neg_e1 = torch.cat([positive[:, :, 0, :],
  102. negative[:, :, 0, :]], 1).unsqueeze(2)
  103. pos_neg_e2 = torch.cat([positive[:, :, 1, :],
  104. negative[:, :, 1, :]], 1).unsqueeze(2)
  105. return pos_neg_e1, pos_neg_e2
  106. def forward(self, task, iseval=False, curr_rel=''):
  107. # transfer task string into embedding
  108. support, support_negative, query, negative = [self.embedding(t) for t in task]
  109. K = support.shape[1] # num of K
  110. num_sn = support_negative.shape[1] # num of support negative
  111. num_q = query.shape[1] # num of query
  112. num_n = negative.shape[1] # num of query negative
  113. rel = self.relation_learner(support)
  114. rel.retain_grad()
  115. rel_s = rel.expand(-1, K+num_sn, -1, -1)
  116. if iseval and curr_rel != '' and curr_rel in self.rel_q_sharing.keys():
  117. rel_q = self.rel_q_sharing[curr_rel]
  118. else:
  119. sup_neg_e1, sup_neg_e2 = self.split_concat(support, support_negative)
  120. p_score, n_score = self.embedding_learner(sup_neg_e1, sup_neg_e2, rel_s, K)
  121. y = torch.Tensor([1]).to(self.device)
  122. self.zero_grad()
  123. # sorted,indecies = torch.sort(n_score, descending=True,dim=1)
  124. # n_values = sorted[:,0:p_score.shape[1]]
  125. # loss = self.loss_func(p_score, n_values, y)
  126. loss = self.loss_func(p_score,n_score,device=self.device)
  127. loss.backward(retain_graph=True)
  128. grad_meta = rel.grad
  129. rel_q = rel - self.beta*grad_meta
  130. self.rel_q_sharing[curr_rel] = rel_q
  131. rel_q = rel_q.expand(-1, num_q + num_n, -1, -1)
  132. que_neg_e1, que_neg_e2 = self.split_concat(query, negative)
  133. p_score, n_score = self.embedding_learner(que_neg_e1, que_neg_e2, rel_q, num_q)
  134. return p_score, n_score