12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091 |
- import os
- import torch
- import pickle
- import random
- from options import config, states
- from torch.nn import functional as F
- from torch.nn import L1Loss
- import matchzoo as mz
- import numpy as np
- from fast_adapt import fast_adapt
- from sklearn.metrics import ndcg_score
-
-
- def test(embedding,head, total_dataset, batch_size, num_epoch,adaptation_step=config['inner']):
-
- test_set_size = len(total_dataset)
- random.shuffle(total_dataset)
- a, b, c, d = zip(*total_dataset)
- losses_q = []
- # ndcgs1 = []
- ndcgs11 = []
- # ndcgs111 = []
- # ndcgs3 = []
- ndcgs33=[]
- # ndcgs333 = []
-
- for iterator in range(test_set_size):
- if config['use_cuda']:
- try:
- supp_xs = a[iterator].cuda()
- supp_ys = b[iterator].cuda()
- query_xs = c[iterator].cuda()
- query_ys = d[iterator].cuda()
- except IndexError:
- print("index error in test method")
- continue
- else:
- try:
- supp_xs = a[iterator]
- supp_ys = b[iterator]
- query_xs = c[iterator]
- query_ys = d[iterator]
- except IndexError:
- print("index error in test method")
- continue
-
- num_local_update = adaptation_step
- learner = head.clone()
- temp_sxs = embedding(supp_xs)
- temp_qxs = embedding(query_xs)
-
- evaluation_error,predictions = fast_adapt(learner,
- temp_sxs,
- temp_qxs,
- supp_ys,
- query_ys,
- config['inner'],
- get_predictions=True)
-
- l1 = L1Loss(reduction='mean')
- loss_q = l1(predictions.view(-1), query_ys)
- # print("testing - iterator:{} - l1:{} ".format(iterator,loss_q))
- losses_q.append(float(loss_q))
-
- predictions = predictions.view(-1)
- y_true = query_ys.cpu().detach().numpy()
- y_pred = predictions.cpu().detach().numpy()
- # ndcgs1.append(float(mz.metrics.NormalizedDiscountedCumulativeGain(k=1)(y_true, y_pred)))
- # ndcgs3.append(float(mz.metrics.NormalizedDiscountedCumulativeGain(k=3)(y_true, y_pred)))
-
- ndcgs11.append(float(ndcg_score([y_true], [y_pred], k=1, sample_weight=None, ignore_ties=False)))
- ndcgs33.append(float(ndcg_score([y_true], [y_pred], k=3, sample_weight=None, ignore_ties=False)))
-
- del supp_xs, supp_ys, query_xs, query_ys, predictions, y_true, y_pred, loss_q
- # torch.cuda.empty_cache()
-
-
- # calculate metrics
- # losses_q = torch.stack(losses_q).mean(0)
- losses_q = np.array(losses_q).mean()
- print("mean of mse: ", losses_q)
- # n1 = np.array(ndcgs1).mean()
- # print("nDCG1: ", n1)
- print("nDCG1: ", np.array(ndcgs11).mean())
- # print("nDCG1: ", np.array(ndcgs111).mean())
- # n3 = np.array(ndcgs3).mean()
- # print("nDCG3: ", n3)
- print("nDCG3: ", np.array(ndcgs33).mean())
- # print("nDCG3: ", np.array(ndcgs333).mean())
-
|