import random from torch.nn import L1Loss import numpy as np from fast_adapt import fast_adapt from sklearn.metrics import ndcg_score def hyper_test(embedding, head, total_dataset, adaptation_step): test_set_size = len(total_dataset) random.shuffle(total_dataset) a, b, c, d = zip(*total_dataset) losses_q = [] ndcgs11 = [] ndcgs33 = [] head.eval() for iterator in range(test_set_size): try: supp_xs = a[iterator].cuda() supp_ys = b[iterator].cuda() query_xs = c[iterator].cuda() query_ys = d[iterator].cuda() except IndexError: print("index error in test method") continue learner = head.clone() temp_sxs = embedding(supp_xs) temp_qxs = embedding(query_xs) evaluation_error, predictions = fast_adapt(learner, temp_sxs, temp_qxs, supp_ys, query_ys, adaptation_step, get_predictions=True) l1 = L1Loss(reduction='mean') loss_q = l1(predictions.view(-1), query_ys) losses_q.append(float(loss_q)) predictions = predictions.view(-1) y_true = query_ys.cpu().detach().numpy() y_pred = predictions.cpu().detach().numpy() ndcgs11.append(float(ndcg_score([y_true], [y_pred], k=1, sample_weight=None, ignore_ties=False))) ndcgs33.append(float(ndcg_score([y_true], [y_pred], k=3, sample_weight=None, ignore_ties=False))) del supp_xs, supp_ys, query_xs, query_ys, predictions, y_true, y_pred, loss_q # calculate metrics try: losses_q = np.array(losses_q).mean() except: losses_q = 100 try: ndcg1 = np.array(ndcgs11).mean() ndcg3 = np.array(ndcgs33).mean() except: ndcg1 = 0 ndcg3 = 0 head.train() return losses_q, ndcg1, ndcg3