extend Melu code to perform different meta algorithms and hyperparameters
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

learnToLearnTest.py 3.3KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. import os
  2. import torch
  3. import pickle
  4. import random
  5. from options import config, states
  6. from torch.nn import functional as F
  7. from torch.nn import L1Loss
  8. import matchzoo as mz
  9. import numpy as np
  10. from fast_adapt import fast_adapt
  11. from sklearn.metrics import ndcg_score
  12. def test(embedding,head, total_dataset, batch_size, num_epoch,adaptation_step=config['inner']):
  13. test_set_size = len(total_dataset)
  14. random.shuffle(total_dataset)
  15. a, b, c, d = zip(*total_dataset)
  16. losses_q = []
  17. # ndcgs1 = []
  18. ndcgs11 = []
  19. # ndcgs111 = []
  20. # ndcgs3 = []
  21. ndcgs33=[]
  22. # ndcgs333 = []
  23. for iterator in range(test_set_size):
  24. if config['use_cuda']:
  25. try:
  26. supp_xs = a[iterator].cuda()
  27. supp_ys = b[iterator].cuda()
  28. query_xs = c[iterator].cuda()
  29. query_ys = d[iterator].cuda()
  30. except IndexError:
  31. print("index error in test method")
  32. continue
  33. else:
  34. try:
  35. supp_xs = a[iterator]
  36. supp_ys = b[iterator]
  37. query_xs = c[iterator]
  38. query_ys = d[iterator]
  39. except IndexError:
  40. print("index error in test method")
  41. continue
  42. num_local_update = adaptation_step
  43. learner = head.clone()
  44. temp_sxs = embedding(supp_xs)
  45. temp_qxs = embedding(query_xs)
  46. evaluation_error,predictions = fast_adapt(learner,
  47. temp_sxs,
  48. temp_qxs,
  49. supp_ys,
  50. query_ys,
  51. # config['inner'],
  52. adaptation_step,
  53. get_predictions=True)
  54. l1 = L1Loss(reduction='mean')
  55. loss_q = l1(predictions.view(-1), query_ys)
  56. # print("testing - iterator:{} - l1:{} ".format(iterator,loss_q))
  57. losses_q.append(float(loss_q))
  58. predictions = predictions.view(-1)
  59. y_true = query_ys.cpu().detach().numpy()
  60. y_pred = predictions.cpu().detach().numpy()
  61. # ndcgs1.append(float(mz.metrics.NormalizedDiscountedCumulativeGain(k=1)(y_true, y_pred)))
  62. # ndcgs3.append(float(mz.metrics.NormalizedDiscountedCumulativeGain(k=3)(y_true, y_pred)))
  63. ndcgs11.append(float(ndcg_score([y_true], [y_pred], k=1, sample_weight=None, ignore_ties=False)))
  64. ndcgs33.append(float(ndcg_score([y_true], [y_pred], k=3, sample_weight=None, ignore_ties=False)))
  65. del supp_xs, supp_ys, query_xs, query_ys, predictions, y_true, y_pred, loss_q
  66. # torch.cuda.empty_cache()
  67. # calculate metrics
  68. # losses_q = torch.stack(losses_q).mean(0)
  69. losses_q = np.array(losses_q).mean()
  70. print("mean of mse: ", losses_q)
  71. # n1 = np.array(ndcgs1).mean()
  72. # print("nDCG1: ", n1)
  73. print("nDCG1: ", np.array(ndcgs11).mean())
  74. # print("nDCG1: ", np.array(ndcgs111).mean())
  75. # n3 = np.array(ndcgs3).mean()
  76. # print("nDCG3: ", n3)
  77. print("nDCG3: ", np.array(ndcgs33).mean())
  78. # print("nDCG3: ", np.array(ndcgs333).mean())
  79. print("is there nan? " + str(np.any(np.isnan(ndcgs11))))
  80. print("is there nan? " + str(np.any(np.isnan(ndcgs33))))
  81. print("is there nan? " + str(np.any(np.isnan(losses_q))))