| @@ -0,0 +1,27 @@ | |||
| Sequential Recommendation for Cold-start Users with Meta Transitional Learning(SIGIR2021) | |||
| ============ | |||
| ## CuRe | |||
| Code of paper "[Sequential Recommendation for Cold-start Users with Meta Transitional Learning](http://people.tamu.edu/~jwang713/pubs/MetaTL-sigir2021)". | |||
| ## Requirements | |||
| python==3.6.8 | |||
| ## Usage | |||
| ```python main.py --K 3``` | |||
| ## Cite | |||
| Please cite our paper if you use this code in your own work: | |||
| ``` | |||
| @inproceedings{wang2021sequential, | |||
| title={Sequential Recommendation for Cold-start Users with Meta Transitional Learning}, | |||
| author={Wang, Jianling and Ding, Kaize and Caverlee, James}, | |||
| booktitle={Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval}, | |||
| pages={1783--1787}, | |||
| year={2021} | |||
| } | |||
| ``` | |||
| @@ -0,0 +1,63 @@ | |||
| from trainer import * | |||
| from utils import * | |||
| from sampler import * | |||
| import json | |||
| import argparse | |||
| def get_params(): | |||
| args = argparse.ArgumentParser() | |||
| args.add_argument("-data", "--dataset", default="electronics", type=str) | |||
| args.add_argument("-seed", "--seed", default=None, type=int) | |||
| args.add_argument("-K", "--K", default=3, type=int) #NUMBER OF SHOT | |||
| args.add_argument("-dim", "--embed_dim", default=100, type=int) | |||
| args.add_argument("-bs", "--batch_size", default=1024, type=int) | |||
| args.add_argument("-lr", "--learning_rate", default=0.001, type=float) | |||
| args.add_argument("-epo", "--epoch", default=100000, type=int) | |||
| args.add_argument("-prt_epo", "--print_epoch", default=100, type=int) | |||
| args.add_argument("-eval_epo", "--eval_epoch", default=1000, type=int) | |||
| args.add_argument("-b", "--beta", default=5, type=float) | |||
| args.add_argument("-m", "--margin", default=1, type=float) | |||
| args.add_argument("-p", "--dropout_p", default=0.5, type=float) | |||
| args.add_argument("-gpu", "--device", default=0, type=int) | |||
| args = args.parse_args() | |||
| params = {} | |||
| for k, v in vars(args).items(): | |||
| params[k] = v | |||
| params['device'] = torch.device('cuda:'+str(args.device)) | |||
| return params, args | |||
| if __name__ == '__main__': | |||
| params, args = get_params() | |||
| if params['seed'] is not None: | |||
| SEED = params['seed'] | |||
| torch.manual_seed(SEED) | |||
| torch.cuda.manual_seed(SEED) | |||
| torch.backends.cudnn.deterministic = True | |||
| np.random.seed(SEED) | |||
| random.seed(SEED) | |||
| user_train, usernum_train, itemnum, user_input_test, user_test, user_input_valid, user_valid = data_load(args.dataset, args.K) | |||
| sampler = WarpSampler(user_train, usernum_train, itemnum, batch_size=args.batch_size, maxlen=args.K, n_workers=3) | |||
| sampler_test = DataLoader(user_input_test, user_test, itemnum, params) | |||
| sampler_valid = DataLoader(user_input_valid, user_valid, itemnum, params) | |||
| trainer = Trainer([sampler, sampler_valid, sampler_test], itemnum, params) | |||
| trainer.train() | |||
| sampler.close() | |||
| @@ -0,0 +1,130 @@ | |||
| from collections import OrderedDict | |||
| import torch | |||
| import torch.nn as nn | |||
| from torch.nn import functional as F | |||
| class Embedding(nn.Module): | |||
| def __init__(self, num_ent, parameter): | |||
| super(Embedding, self).__init__() | |||
| self.device = parameter['device'] | |||
| self.es = parameter['embed_dim'] | |||
| self.embedding = nn.Embedding(num_ent + 1, self.es) | |||
| nn.init.xavier_uniform_(self.embedding.weight) | |||
| def forward(self, triples): | |||
| idx = [[[t[0], t[2]] for t in batch] for batch in triples] | |||
| idx = torch.LongTensor(idx).to(self.device) | |||
| return self.embedding(idx) | |||
| class MetaLearner(nn.Module): | |||
| def __init__(self, K, embed_size=100, num_hidden1=500, num_hidden2=200, out_size=100, dropout_p=0.5): | |||
| super(MetaLearner, self).__init__() | |||
| self.embed_size = embed_size | |||
| self.K = K | |||
| self.out_size = out_size | |||
| self.rel_fc1 = nn.Sequential(OrderedDict([ | |||
| ('fc', nn.Linear(2*embed_size, num_hidden1)), | |||
| ('bn', nn.BatchNorm1d(K)), | |||
| ('relu', nn.LeakyReLU()), | |||
| ('drop', nn.Dropout(p=dropout_p)), | |||
| ])) | |||
| self.rel_fc2 = nn.Sequential(OrderedDict([ | |||
| ('fc', nn.Linear(num_hidden1, num_hidden2)), | |||
| ('bn', nn.BatchNorm1d(K)), | |||
| ('relu', nn.LeakyReLU()), | |||
| ('drop', nn.Dropout(p=dropout_p)), | |||
| ])) | |||
| self.rel_fc3 = nn.Sequential(OrderedDict([ | |||
| ('fc', nn.Linear(num_hidden2, out_size)), | |||
| ('bn', nn.BatchNorm1d(K)), | |||
| ])) | |||
| nn.init.xavier_normal_(self.rel_fc1.fc.weight) | |||
| nn.init.xavier_normal_(self.rel_fc2.fc.weight) | |||
| nn.init.xavier_normal_(self.rel_fc3.fc.weight) | |||
| def forward(self, inputs): | |||
| size = inputs.shape | |||
| x = inputs.contiguous().view(size[0], size[1], -1) | |||
| x = self.rel_fc1(x) | |||
| x = self.rel_fc2(x) | |||
| x = self.rel_fc3(x) | |||
| x = torch.mean(x, 1) | |||
| return x.view(size[0], 1, 1, self.out_size) | |||
| class EmbeddingLearner(nn.Module): | |||
| def __init__(self): | |||
| super(EmbeddingLearner, self).__init__() | |||
| def forward(self, h, t, r, pos_num): | |||
| score = -torch.norm(h + r - t, 2, -1).squeeze(2) | |||
| p_score = score[:, :pos_num] | |||
| n_score = score[:, pos_num:] | |||
| return p_score, n_score | |||
| class MetaTL(nn.Module): | |||
| def __init__(self, itemnum, parameter): | |||
| super(MetaTL, self).__init__() | |||
| self.device = parameter['device'] | |||
| self.beta = parameter['beta'] | |||
| self.dropout_p = parameter['dropout_p'] | |||
| self.embed_dim = parameter['embed_dim'] | |||
| self.margin = parameter['margin'] | |||
| self.embedding = Embedding(itemnum, parameter) | |||
| self.relation_learner = MetaLearner(parameter['K'] - 1, embed_size=100, num_hidden1=500, | |||
| num_hidden2=200, out_size=100, dropout_p=self.dropout_p) | |||
| self.embedding_learner = EmbeddingLearner() | |||
| self.loss_func = nn.MarginRankingLoss(self.margin) | |||
| self.rel_q_sharing = dict() | |||
| def split_concat(self, positive, negative): | |||
| pos_neg_e1 = torch.cat([positive[:, :, 0, :], | |||
| negative[:, :, 0, :]], 1).unsqueeze(2) | |||
| pos_neg_e2 = torch.cat([positive[:, :, 1, :], | |||
| negative[:, :, 1, :]], 1).unsqueeze(2) | |||
| return pos_neg_e1, pos_neg_e2 | |||
| def forward(self, task, iseval=False, curr_rel=''): | |||
| # transfer task string into embedding | |||
| support, support_negative, query, negative = [self.embedding(t) for t in task] | |||
| K = support.shape[1] # num of K | |||
| num_sn = support_negative.shape[1] # num of support negative | |||
| num_q = query.shape[1] # num of query | |||
| num_n = negative.shape[1] # num of query negative | |||
| rel = self.relation_learner(support) | |||
| rel.retain_grad() | |||
| rel_s = rel.expand(-1, K+num_sn, -1, -1) | |||
| if iseval and curr_rel != '' and curr_rel in self.rel_q_sharing.keys(): | |||
| rel_q = self.rel_q_sharing[curr_rel] | |||
| else: | |||
| sup_neg_e1, sup_neg_e2 = self.split_concat(support, support_negative) | |||
| p_score, n_score = self.embedding_learner(sup_neg_e1, sup_neg_e2, rel_s, K) | |||
| y = torch.Tensor([1]).to(self.device) | |||
| self.zero_grad() | |||
| loss = self.loss_func(p_score, n_score, y) | |||
| loss.backward(retain_graph=True) | |||
| grad_meta = rel.grad | |||
| rel_q = rel - self.beta*grad_meta | |||
| self.rel_q_sharing[curr_rel] = rel_q | |||
| rel_q = rel_q.expand(-1, num_q + num_n, -1, -1) | |||
| que_neg_e1, que_neg_e2 = self.split_concat(query, negative) | |||
| p_score, n_score = self.embedding_learner(que_neg_e1, que_neg_e2, rel_q, num_q) | |||
| return p_score, n_score | |||
| @@ -0,0 +1,124 @@ | |||
| import sys | |||
| import copy | |||
| import torch | |||
| import random | |||
| import numpy as np | |||
| from collections import defaultdict, Counter | |||
| from multiprocessing import Process, Queue | |||
| def random_neq(l, r, s): | |||
| t = np.random.randint(l, r) | |||
| while t in s: | |||
| t = np.random.randint(l, r) | |||
| return t | |||
| def sample_function_mixed(user_train, usernum, itemnum, batch_size, maxlen, result_queue, SEED): | |||
| def sample(): | |||
| if random.random()<0.5: | |||
| user = np.random.randint(1, usernum + 1) | |||
| while len(user_train[user]) <= 1: user = np.random.randint(1, usernum + 1) | |||
| seq = np.zeros([maxlen], dtype=np.int32) | |||
| pos = np.zeros([maxlen], dtype=np.int32) | |||
| neg = np.zeros([maxlen], dtype=np.int32) | |||
| if len(user_train[user]) < maxlen: | |||
| nxt_idx = len(user_train[user]) - 1 | |||
| else: | |||
| nxt_idx = np.random.randint(maxlen,len(user_train[user])) | |||
| nxt = user_train[user][nxt_idx] | |||
| idx = maxlen - 1 | |||
| ts = set(user_train[user]) | |||
| for i in reversed(user_train[user][min(0, nxt_idx - 1 - maxlen) : nxt_idx - 1]): | |||
| seq[idx] = i | |||
| pos[idx] = nxt | |||
| if nxt != 0: neg[idx] = random_neq(1, itemnum + 1, ts) | |||
| nxt = i | |||
| idx -= 1 | |||
| if idx == -1: break | |||
| curr_rel = user | |||
| support_triples, support_negative_triples, query_triples, negative_triples = [], [], [], [] | |||
| for idx in range(maxlen-1): | |||
| support_triples.append([seq[idx],curr_rel,pos[idx]]) | |||
| support_negative_triples.append([seq[idx],curr_rel,neg[idx]]) | |||
| query_triples.append([seq[-1],curr_rel,pos[-1]]) | |||
| negative_triples.append([seq[-1],curr_rel,neg[-1]]) | |||
| return support_triples, support_negative_triples, query_triples, negative_triples, curr_rel | |||
| else: | |||
| user = np.random.randint(1, usernum + 1) | |||
| while len(user_train[user]) <= 1: user = np.random.randint(1, usernum + 1) | |||
| seq = np.zeros([maxlen], dtype=np.int32) | |||
| pos = np.zeros([maxlen], dtype=np.int32) | |||
| neg = np.zeros([maxlen], dtype=np.int32) | |||
| list_idx = random.sample([i for i in range(len(user_train[user]))], maxlen + 1) | |||
| list_item = [user_train[user][i] for i in sorted(list_idx)] | |||
| nxt = list_item[-1] | |||
| idx = maxlen - 1 | |||
| ts = set(user_train[user]) | |||
| for i in reversed(list_item[:-1]): | |||
| seq[idx] = i | |||
| pos[idx] = nxt | |||
| if nxt != 0: neg[idx] = random_neq(1, itemnum + 1, ts) | |||
| nxt = i | |||
| idx -= 1 | |||
| if idx == -1: break | |||
| curr_rel = user | |||
| support_triples, support_negative_triples, query_triples, negative_triples = [], [], [], [] | |||
| for idx in range(maxlen-1): | |||
| support_triples.append([seq[idx],curr_rel,pos[idx]]) | |||
| support_negative_triples.append([seq[idx],curr_rel,neg[idx]]) | |||
| query_triples.append([seq[-1],curr_rel,pos[-1]]) | |||
| negative_triples.append([seq[-1],curr_rel,neg[-1]]) | |||
| return support_triples, support_negative_triples, query_triples, negative_triples, curr_rel | |||
| np.random.seed(SEED) | |||
| while True: | |||
| one_batch = [] | |||
| for i in range(batch_size): | |||
| one_batch.append(sample()) | |||
| support, support_negative, query, negative, curr_rel = zip(*one_batch) | |||
| result_queue.put(([support, support_negative, query, negative], curr_rel)) | |||
| class WarpSampler(object): | |||
| def __init__(self, User, usernum, itemnum, batch_size=64, maxlen=10, n_workers=1): | |||
| self.result_queue = Queue(maxsize=n_workers * 10) | |||
| self.processors = [] | |||
| for i in range(n_workers): | |||
| self.processors.append( | |||
| Process(target=sample_function_mixed, args=(User, | |||
| usernum, | |||
| itemnum, | |||
| batch_size, | |||
| maxlen, | |||
| self.result_queue, | |||
| np.random.randint(2e9) | |||
| ))) | |||
| self.processors[-1].daemon = True | |||
| self.processors[-1].start() | |||
| def next_batch(self): | |||
| return self.result_queue.get() | |||
| def close(self): | |||
| for p in self.processors: | |||
| p.terminate() | |||
| p.join() | |||
| @@ -0,0 +1,140 @@ | |||
| from models import * | |||
| import os | |||
| import sys | |||
| import torch | |||
| import shutil | |||
| import logging | |||
| import numpy as np | |||
| class Trainer: | |||
| def __init__(self, data_loaders, itemnum, parameter): | |||
| self.parameter = parameter | |||
| # data loader | |||
| self.train_data_loader = data_loaders[0] | |||
| self.dev_data_loader = data_loaders[1] | |||
| self.test_data_loader = data_loaders[2] | |||
| # parameters | |||
| self.batch_size = parameter['batch_size'] | |||
| self.learning_rate = parameter['learning_rate'] | |||
| self.epoch = parameter['epoch'] | |||
| self.print_epoch = parameter['print_epoch'] | |||
| self.eval_epoch = parameter['eval_epoch'] | |||
| self.device = parameter['device'] | |||
| self.MetaTL = MetaTL(itemnum, parameter) | |||
| self.MetaTL.to(self.device) | |||
| self.optimizer = torch.optim.Adam(self.MetaTL.parameters(), self.learning_rate) | |||
| def rank_predict(self, data, x, ranks): | |||
| # query_idx is the idx of positive score | |||
| query_idx = x.shape[0] - 1 | |||
| # sort all scores with descending, because more plausible triple has higher score | |||
| _, idx = torch.sort(x, descending=True) | |||
| rank = list(idx.cpu().numpy()).index(query_idx) + 1 | |||
| ranks.append(rank) | |||
| # update data | |||
| if rank <= 10: | |||
| data['Hits@10'] += 1 | |||
| data['NDCG@10'] += 1 / np.log2(rank + 1) | |||
| if rank <= 5: | |||
| data['Hits@5'] += 1 | |||
| data['NDCG@5'] += 1 / np.log2(rank + 1) | |||
| if rank == 1: | |||
| data['Hits@1'] += 1 | |||
| data['NDCG@1'] += 1 / np.log2(rank + 1) | |||
| data['MRR'] += 1.0 / rank | |||
| def do_one_step(self, task, iseval=False, curr_rel=''): | |||
| loss, p_score, n_score = 0, 0, 0 | |||
| if not iseval: | |||
| self.optimizer.zero_grad() | |||
| p_score, n_score = self.MetaTL(task, iseval, curr_rel) | |||
| y = torch.Tensor([1]).to(self.device) | |||
| loss = self.MetaTL.loss_func(p_score, n_score, y) | |||
| loss.backward() | |||
| self.optimizer.step() | |||
| elif curr_rel != '': | |||
| p_score, n_score = self.MetaTL(task, iseval, curr_rel) | |||
| y = torch.Tensor([1]).to(self.device) | |||
| loss = self.MetaTL.loss_func(p_score, n_score, y) | |||
| return loss, p_score, n_score | |||
| def train(self): | |||
| # initialization | |||
| best_epoch = 0 | |||
| best_value = 0 | |||
| bad_counts = 0 | |||
| # training by epoch | |||
| for e in range(self.epoch): | |||
| # sample one batch from data_loader | |||
| train_task, curr_rel = self.train_data_loader.next_batch() | |||
| loss, _, _ = self.do_one_step(train_task, iseval=False, curr_rel=curr_rel) | |||
| # print the loss on specific epoch | |||
| if e % self.print_epoch == 0: | |||
| loss_num = loss.item() | |||
| print("Epoch: {}\tLoss: {:.4f}".format(e, loss_num)) | |||
| # do evaluation on specific epoch | |||
| if e % self.eval_epoch == 0 and e != 0: | |||
| print('Epoch {} Validating...'.format(e)) | |||
| valid_data = self.eval(istest=False, epoch=e) | |||
| print('Epoch {} Testing...'.format(e)) | |||
| test_data = self.eval(istest=True, epoch=e) | |||
| print('Finish') | |||
| def eval(self, istest=False, epoch=None): | |||
| self.MetaTL.eval() | |||
| self.MetaTL.rel_q_sharing = dict() | |||
| if istest: | |||
| data_loader = self.test_data_loader | |||
| else: | |||
| data_loader = self.dev_data_loader | |||
| data_loader.curr_tri_idx = 0 | |||
| # initial return data of validation | |||
| data = {'MRR': 0, 'Hits@1': 0, 'Hits@5': 0, 'Hits@10': 0, 'NDCG@1': 0, 'NDCG@5': 0, 'NDCG@10': 0} | |||
| ranks = [] | |||
| t = 0 | |||
| temp = dict() | |||
| while True: | |||
| # sample all the eval tasks | |||
| eval_task, curr_rel = data_loader.next_one_on_eval() | |||
| # at the end of sample tasks, a symbol 'EOT' will return | |||
| if eval_task == 'EOT': | |||
| break | |||
| t += 1 | |||
| _, p_score, n_score = self.do_one_step(eval_task, iseval=True, curr_rel=curr_rel) | |||
| x = torch.cat([n_score, p_score], 1).squeeze() | |||
| self.rank_predict(data, x, ranks) | |||
| # print current temp data dynamically | |||
| for k in data.keys(): | |||
| temp[k] = data[k] / t | |||
| sys.stdout.write("{}\tMRR: {:.3f}\tNDCG@10: {:.3f}\tNDCG@5: {:.3f}\tNDCG@1: {:.3f}\tHits@10: {:.3f}\tHits@5: {:.3f}\tHits@1: {:.3f}\r".format( | |||
| t, temp['MRR'], temp['NDCG@10'], temp['NDCG@5'], temp['NDCG@1'], temp['Hits@10'], temp['Hits@5'], temp['Hits@1'])) | |||
| sys.stdout.flush() | |||
| # print overall evaluation result and return it | |||
| for k in data.keys(): | |||
| data[k] = round(data[k] / t, 3) | |||
| if istest: | |||
| print("TEST: \tMRR: {:.3f}\tNDCG@10: {:.3f}\tNDCG@5: {:.3f}\tNDCG@1: {:.3f}\tHits@10: {:.3f}\tHits@5: {:.3f}\tHits@1: {:.3f}\r".format( | |||
| temp['MRR'], temp['NDCG@10'], temp['NDCG@5'], temp['NDCG@1'], temp['Hits@10'], temp['Hits@5'], temp['Hits@1'])) | |||
| else: | |||
| print("VALID: \tMRR: {:.3f}\tNDCG@10: {:.3f}\tNDCG@5: {:.3f}\tNDCG@1: {:.3f}\tHits@10: {:.3f}\tHits@5: {:.3f}\tHits@1: {:.3f}\r".format( | |||
| temp['MRR'], temp['NDCG@10'], temp['NDCG@5'], temp['NDCG@1'], temp['Hits@10'], temp['Hits@5'], temp['Hits@1'])) | |||
| return data | |||
| @@ -0,0 +1,142 @@ | |||
| import sys | |||
| import copy | |||
| import torch | |||
| import random | |||
| import numpy as np | |||
| from collections import defaultdict, Counter | |||
| from multiprocessing import Process, Queue | |||
| # sampler for batch generation | |||
| def random_neq(l, r, s): | |||
| t = np.random.randint(l, r) | |||
| while t in s: | |||
| t = np.random.randint(l, r) | |||
| return t | |||
| def trans_to_cuda(variable): | |||
| if torch.cuda.is_available(): | |||
| return variable.cuda() | |||
| else: | |||
| return variable | |||
| def trans_to_cpu(variable): | |||
| if torch.cuda.is_available(): | |||
| return variable.cpu() | |||
| else: | |||
| return variable | |||
| # train/val/test data generation | |||
| def data_load(fname, num_sample): | |||
| usernum = 0 | |||
| itemnum = 0 | |||
| user_train = defaultdict(list) | |||
| # assume user/item index starting from 1 | |||
| f = open('data/%s/%s_train.csv' % (fname, fname), 'r') | |||
| for line in f: | |||
| u, i, t = line.rstrip().split('\t') | |||
| u = int(u) | |||
| i = int(i) | |||
| usernum = max(u, usernum) | |||
| itemnum = max(i, itemnum) | |||
| user_train[u].append(i) | |||
| f.close() | |||
| # read in new users for testing | |||
| user_input_test = {} | |||
| user_input_valid = {} | |||
| user_valid = {} | |||
| user_test = {} | |||
| User_test_new = defaultdict(list) | |||
| f = open('data/%s/%s_test_new_user.csv' % (fname, fname), 'r') | |||
| for line in f: | |||
| u, i, t = line.rstrip().split('\t') | |||
| u = int(u) | |||
| i = int(i) | |||
| User_test_new[u].append(i) | |||
| f.close() | |||
| for user in User_test_new: | |||
| if len(User_test_new[user]) > num_sample: | |||
| if random.random()<0.3: | |||
| user_input_valid[user] = User_test_new[user][:num_sample] | |||
| user_valid[user] = [] | |||
| user_valid[user].append(User_test_new[user][num_sample]) | |||
| else: | |||
| user_input_test[user] = User_test_new[user][:num_sample] | |||
| user_test[user] = [] | |||
| user_test[user].append(User_test_new[user][num_sample]) | |||
| return [user_train, usernum, itemnum, user_input_test, user_test, user_input_valid, user_valid] | |||
| class DataLoader(object): | |||
| def __init__(self, user_train, user_test, itemnum, parameter): | |||
| self.curr_rel_idx = 0 | |||
| self.bs = parameter['batch_size'] | |||
| self.maxlen = parameter['K'] | |||
| self.valid_user = [] | |||
| for u in user_train: | |||
| if len(user_train[u]) < self.maxlen or len(user_test[u]) < 1: continue | |||
| self.valid_user.append(u) | |||
| self.num_tris = len(self.valid_user) | |||
| self.train = user_train | |||
| self.test = user_test | |||
| self.itemnum = itemnum | |||
| def next_one_on_eval(self): | |||
| if self.curr_tri_idx == self.num_tris: | |||
| return "EOT", "EOT" | |||
| u = self.valid_user[self.curr_tri_idx] | |||
| self.curr_tri_idx += 1 | |||
| seq = np.zeros([self.maxlen], dtype=np.int32) | |||
| pos = np.zeros([self.maxlen - 1], dtype=np.int32) | |||
| neg = np.zeros([self.maxlen - 1], dtype=np.int32) | |||
| idx = self.maxlen - 1 | |||
| ts = set(self.train[u]) | |||
| for i in reversed(self.train[u]): | |||
| seq[idx] = i | |||
| if idx > 0: | |||
| pos[idx - 1] = i | |||
| if i != 0: neg[idx - 1] = random_neq(1, self.itemnum + 1, ts) | |||
| idx -= 1 | |||
| if idx == -1: break | |||
| curr_rel = u | |||
| support_triples, support_negative_triples, query_triples, negative_triples = [], [], [], [] | |||
| for idx in range(self.maxlen-1): | |||
| support_triples.append([seq[idx],curr_rel,pos[idx]]) | |||
| support_negative_triples.append([seq[idx],curr_rel,neg[idx]]) | |||
| rated = ts | |||
| rated.add(0) | |||
| query_triples.append([seq[-1],curr_rel,self.test[u][0]]) | |||
| for _ in range(100): | |||
| t = np.random.randint(1, self.itemnum + 1) | |||
| while t in rated: t = np.random.randint(1, self.itemnum + 1) | |||
| negative_triples.append([seq[-1],curr_rel,t]) | |||
| support_triples = [support_triples] | |||
| support_negative_triples = [support_negative_triples] | |||
| query_triples = [query_triples] | |||
| negative_triples = [negative_triples] | |||
| return [support_triples, support_negative_triples, query_triples, negative_triples], curr_rel | |||