|
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768 |
- import os
- import torch
- import pickle
- import random
- from options import config, states
- from torch.nn import functional as F
- from torch.nn import L1Loss
- import matchzoo as mz
- import numpy as np
- from fast_adapt import fast_adapt
-
-
- def test(embedding,head, total_dataset, batch_size, num_epoch):
-
- test_set_size = len(total_dataset)
- random.shuffle(total_dataset)
- a, b, c, d = zip(*total_dataset)
- losses_q = []
- ndcgs1 = []
- ndcgs3 = []
-
- for iterator in range(test_set_size):
- try:
- supp_xs = a[iterator].cuda()
- supp_ys = b[iterator].cuda()
- query_xs = c[iterator].cuda()
- query_ys = d[iterator].cuda()
- except IndexError:
- print("index error in test method")
- continue
-
- num_local_update = config['inner']
- learner = head.clone()
- temp_sxs = embedding(supp_xs)
- temp_qxs = embedding(query_xs)
-
- evaluation_error,predictions = fast_adapt(learner,
- temp_sxs,
- temp_qxs,
- supp_ys,
- query_ys,
- config['inner'],
- get_predictions=True
- )
-
- l1 = L1Loss(reduction='mean')
- loss_q = l1(predictions.view(-1), query_ys)
- # print("testing - iterator:{} - l1:{} ".format(iterator,loss_q))
- losses_q.append(float(loss_q))
-
- y_true = query_ys.cpu().detach().numpy()
- y_pred = predictions.cpu().detach().numpy()
- ndcgs1.append(float(mz.metrics.NormalizedDiscountedCumulativeGain(k=1)(y_true, y_pred)))
- ndcgs3.append(float(mz.metrics.NormalizedDiscountedCumulativeGain(k=3)(y_true, y_pred)))
-
- del supp_xs, supp_ys, query_xs, query_ys, predictions, y_true, y_pred, loss_q
- # torch.cuda.empty_cache()
-
- # calculate metrics
- # losses_q = torch.stack(losses_q).mean(0)
- losses_q = np.array(losses_q).mean()
- print("mean of mse: ", losses_q)
- n1 = np.array(ndcgs1).mean()
- print("nDCG1: ", n1)
- n3 = np.array(ndcgs3).mean()
- print("nDCG3: ", n3)
-
-
|