|
|
|
@@ -52,6 +52,20 @@ class EmbeddingLearner(nn.Module): |
|
|
|
n_score = score[:, pos_num:] |
|
|
|
return p_score, n_score |
|
|
|
|
|
|
|
def bpr_loss(p_scores, n_values): |
|
|
|
p1 = p_scores[:,0,None] |
|
|
|
p2 = p_scores[:,1,None] |
|
|
|
|
|
|
|
num_neg = n_values.shape[1] |
|
|
|
half_index = int(num_neg/2) |
|
|
|
|
|
|
|
d1 = torch.sub(p1,n_values[:,0:half_index]) |
|
|
|
d2 = torch.sub(p2,n_values[:,half_index:]) |
|
|
|
|
|
|
|
t = F.logsigmoid(torch.add(d1,d2)) |
|
|
|
|
|
|
|
loss = (-1) * t.sum() / n_values.shape[1] |
|
|
|
return loss |
|
|
|
|
|
|
|
class MetaTL(nn.Module): |
|
|
|
def __init__(self, itemnum, parameter): |
|
|
|
@@ -68,6 +82,7 @@ class MetaTL(nn.Module): |
|
|
|
|
|
|
|
self.embedding_learner = EmbeddingLearner() |
|
|
|
self.loss_func = nn.MarginRankingLoss(self.margin) |
|
|
|
|
|
|
|
self.rel_q_sharing = dict() |
|
|
|
|
|
|
|
def split_concat(self, positive, negative): |
|
|
|
@@ -101,15 +116,15 @@ class MetaTL(nn.Module): |
|
|
|
y = torch.Tensor([1]).to(self.device) |
|
|
|
self.zero_grad() |
|
|
|
|
|
|
|
sorted,indecies = torch.sort(n_score, descending=True,dim=1) |
|
|
|
n_values = sorted[:,0:p_score.shape[1]] |
|
|
|
loss = self.loss_func(p_score, n_values, y) |
|
|
|
# sorted,indecies = torch.sort(n_score, descending=True,dim=1) |
|
|
|
# n_values = sorted[:,0:p_score.shape[1]] |
|
|
|
n_values = n_score |
|
|
|
loss = bpr_loss(p_score,n_values) |
|
|
|
# loss = self.loss_func(p_score, n_values, y) |
|
|
|
loss.backward(retain_graph=True) |
|
|
|
|
|
|
|
grad_meta = rel.grad |
|
|
|
rel_q = rel - self.beta*grad_meta |
|
|
|
|
|
|
|
|
|
|
|
self.rel_q_sharing[curr_rel] = rel_q |
|
|
|
|
|
|
|
rel_q = rel_q.expand(-1, num_q + num_n, -1, -1) |