You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

utils.py 9.5KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. import numpy as np
  2. import scipy.sparse as sp
  3. import torch
  4. from sklearn.metrics import accuracy_score, f1_score
  5. import random
  6. from os import listdir
  7. from os.path import isfile, join
  8. from sklearn.model_selection import train_test_split
  9. def encode_onehot(labels):
  10. classes = set(labels)
  11. classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
  12. enumerate(classes)}
  13. labels_onehot = np.array(list(map(classes_dict.get, labels)),
  14. dtype=np.int32)
  15. return labels_onehot
  16. def load_bet_adj(layer_num1, layer_num2, idx_map_l1, idx_map_l2, path="../data/cora/", dataset="cora"):
  17. temp = "{}{}.bet" + str(layer_num1) + "_" + str(layer_num2)
  18. # print("bet file")
  19. # print(temp)
  20. if not isfile(temp.format(path, dataset)):
  21. return None
  22. edges_unordered = np.genfromtxt(temp.format(path, dataset), dtype=np.int32)
  23. # idx1 = np.array(np.unique(edges_unordered[:,0]), dtype=np.int32)
  24. # idx2 = np.array(np.unique(edges_unordered[:, 1]), dtype=np.int32)
  25. # idx_map_l1 = {j: i for i, j in enumerate(idx1)}
  26. # idx_map_l2 = {j: i for i, j in enumerate(idx2)}
  27. N1 = len(list(idx_map_l1))
  28. N2 = len(list(idx_map_l2))
  29. edges = np.array(list(map(idx_map_l1.get, edges_unordered[:,0])) + list(map(idx_map_l2.get, edges_unordered[:,1])),
  30. dtype=np.int32).reshape(edges_unordered.shape, order='F')
  31. adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
  32. shape=(N1, N2),
  33. dtype=np.float32)
  34. adj_orig = sparse_mx_to_torch_sparse_tensor(adj)
  35. adj = normalize(adj)
  36. adj = sparse_mx_to_torch_sparse_tensor(adj)
  37. return adj, adj_orig
  38. def load_in_adj(layer_num, idx_map=None, path="../data/cora/", dataset="cora"):
  39. temp = "{}{}.adj" + str(layer_num)
  40. edges_unordered = np.genfromtxt(temp.format(path, dataset),
  41. dtype=np.int32)
  42. if idx_map is None:
  43. idx = np.array(np.unique(edges_unordered.flatten()), dtype=np.int32)
  44. N = len(list(idx))
  45. idx_map = {j: i for i, j in enumerate(idx)}
  46. else:
  47. N = len(list(idx_map))
  48. edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
  49. dtype=np.int32).reshape(edges_unordered.shape)
  50. adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
  51. shape=(N, N),
  52. dtype=np.float32)
  53. # print("Edges")
  54. # print(np.count_nonzero(adj.toarray()))
  55. #build symmetric adjacency matrix
  56. adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
  57. adj_orig = sparse_mx_to_torch_sparse_tensor(adj)
  58. adj = normalize(adj + sp.eye(adj.shape[0]))
  59. adj = sparse_mx_to_torch_sparse_tensor(adj)
  60. return adj, adj_orig
  61. def load_features_labels(layer_num, path, dataset,N=-1):
  62. print('Loading {} dataset...'.format(dataset))
  63. temp = "{}{}.feat"+str(layer_num)
  64. idx_features_labels = np.genfromtxt(temp.format(path, dataset),
  65. dtype=np.dtype(str))
  66. temp = idx_features_labels[:, 1:-1]
  67. if temp.size == 0:
  68. features = sp.csr_matrix(np.identity(N), dtype=np.float32)
  69. else:
  70. features = sp.csr_matrix(temp, dtype=np.float32)
  71. labels = encode_onehot(idx_features_labels[:, -1])
  72. #build graph
  73. idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
  74. idx_map = {j: i for i, j in enumerate(idx)}
  75. features = normalize(features)
  76. features = sp.csr_matrix(features)
  77. features = sparse_mx_to_torch_sparse_tensor(features)
  78. # features = torch.FloatTensor(np.array(features.todense()))
  79. labels = torch.LongTensor(np.where(labels)[1])
  80. return features, labels, idx_map
  81. def train_val_test_split(N, val_size=0.2, test_size=0.2, random_state=1):
  82. idx_train_temp, idx_test = train_test_split(range(N), test_size=test_size, random_state=random_state)
  83. if val_size == 0:
  84. idx_train = idx_train_temp
  85. else:
  86. idx_train, idx_val = train_test_split(idx_train_temp, test_size=val_size, random_state=random_state)
  87. idx_train = torch.LongTensor(idx_train)
  88. idx_val = torch.LongTensor(idx_val)
  89. idx_test = torch.LongTensor(idx_test)
  90. return idx_train, idx_val, idx_test
  91. def trains_vals_tests_split(n_layers, labels_sizes, val_size, test_size, random_state):
  92. idx_trains = []
  93. idx_vals = []
  94. idx_tests = []
  95. for i in range(n_layers):
  96. idx_train, idx_val, idx_test = train_val_test_split(labels_sizes[i], val_size, test_size, random_state)
  97. idx_trains.append(idx_train)
  98. idx_vals.append(idx_val)
  99. idx_tests.append(idx_test)
  100. return idx_trains, idx_vals, idx_tests
  101. def load_data(path="../data/cora/", dataset="cora"):
  102. layers = 0
  103. adjs = []
  104. adjs_orig = []
  105. adjs_sizes = []
  106. adjs_pos_weights = []
  107. adjs_norms = []
  108. bet_adjs = []
  109. bet_adjs_orig = []
  110. bet_adjs_sizes = []
  111. bet_pos_weights = []
  112. bet_norms = []
  113. features = []
  114. features_sizes = []
  115. labels = []
  116. labels_nclass = []
  117. idx_maps = []
  118. for f in listdir(path):
  119. if isfile(join(path, f)):
  120. if 'adj' in f:
  121. layers += 1
  122. for i in range(layers):
  123. feature, label, idx_map = load_features_labels(i, path, dataset)
  124. adj, adj_orig = load_in_adj(i, idx_map, path, dataset)
  125. pos_weight = float(adj.shape[0] * adj.shape[1] - adj.to_dense().sum()) / adj.to_dense().sum()
  126. norm = adj.shape[0] * adj.shape[1] / float((adj.shape[0] * adj.shape[1] - adj.to_dense().sum()) * 2)
  127. idx_maps.append(idx_map)
  128. adjs.append(adj)
  129. adjs_orig.append(adj_orig)
  130. adjs_sizes.append(tuple(adj.size()))
  131. adjs_pos_weights.append(pos_weight)
  132. adjs_norms.append(norm)
  133. features.append(feature)
  134. features_sizes.append(feature.shape[1])
  135. labels.append(label)
  136. labels_nclass.append((label.max().item() + 1))
  137. for i in range(layers):
  138. for j in range(i+1,layers):
  139. bet_adj, bet_adj_orig = load_bet_adj(i, j, idx_maps[i], idx_maps[j], path, dataset)
  140. bet_adjs.append(bet_adj)
  141. bet_adjs_orig.append(bet_adj_orig)
  142. bet_adjs_sizes.append(tuple(bet_adj.size()) if not bet_adj is None else tuple((0,0)))
  143. if not bet_adj is None:
  144. pos_weight = float(
  145. bet_adj.shape[0] * bet_adj.shape[1] - bet_adj.to_dense().sum()) / bet_adj.to_dense().sum()
  146. norm = bet_adj.shape[0] * bet_adj.shape[1] / float((bet_adj.shape[0] * bet_adj.shape[1] - bet_adj.to_dense().sum()) * 2)
  147. else:
  148. norm = None
  149. pos_weight = None
  150. bet_pos_weights.append(pos_weight)
  151. bet_norms.append(norm)
  152. return adjs, adjs_orig, adjs_sizes, adjs_pos_weights, adjs_norms, bet_pos_weights, bet_norms, bet_adjs, bet_adjs_orig, \
  153. bet_adjs_sizes, features, features_sizes, labels, labels_nclass
  154. def normalize(mx):
  155. """Row-normalize sparse matrix"""
  156. rowsum = np.array(mx.sum(1))
  157. r_inv = np.power(rowsum, -1).flatten()
  158. r_inv[np.isinf(r_inv)] = 0.
  159. r_mat_inv = sp.diags(r_inv)
  160. mx = r_mat_inv.dot(mx)
  161. return mx
  162. def class_accuracy(output, labels, type=None):
  163. preds = output.max(1)[1].type_as(labels)
  164. return accuracy_score(labels.data, preds)
  165. def class_f1(output, labels, type='micro'):
  166. preds = output.max(1)[1].type_as(labels)
  167. return f1_score(labels.data, preds, average=type)
  168. def layer_accuracy(output, real, type=None):
  169. preds = output.data.clone()
  170. true = real.data.clone()
  171. preds[output < 0.5] = 0
  172. preds[output >= 0.5] = 1
  173. true[real > 0] = 1
  174. return accuracy_score(true, preds)
  175. def layer_f1(output, real, type='micro'):
  176. preds = output.data.clone()
  177. true = real.data.clone()
  178. preds[output < 0.5] = 0
  179. preds[output >= 0.5] = 1
  180. true[real > 0] = 1
  181. return f1_score(true, preds, average=type)
  182. def writer_data(values, writer, epoch, type, name):
  183. try:
  184. for i, j in enumerate(values):
  185. # my_dic[name+str(i)] = j
  186. writer.add_scalar(type + "/" + name + str(i), j, epoch)
  187. except TypeError as te:
  188. writer.add_scalar(type + "/" + name, values, epoch)
  189. # type = in_class, in_struc, bet_struc
  190. def dict_to_writer(stats, writer, epoch, type, train_test_val):
  191. for key, value in stats.items():
  192. # type_str = train_test_val + "/" + type + '/Loss' if key == 'loss' else train_test_val + type + '/Stats'
  193. type_str = train_test_val + "/" + type
  194. writer_data(value, writer, epoch, type_str, key)
  195. def sparse_mx_to_torch_sparse_tensor(sparse_mx):
  196. """Convert a scipy sparse matrix to a torch sparse tensor."""
  197. sparse_mx = sparse_mx.tocoo().astype(np.float32)
  198. indices = torch.from_numpy(
  199. np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
  200. values = torch.from_numpy(sparse_mx.data)
  201. shape = torch.Size(sparse_mx.shape)
  202. return torch.sparse.FloatTensor(indices, values, shape)
  203. def sparse_to_tuple(sparse_mx):
  204. if not sp.isspmatrix_coo(sparse_mx):
  205. sparse_mx = sparse_mx.tocoo()
  206. coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
  207. values = sparse_mx.data
  208. shape = sparse_mx.shape
  209. return coords, values, shape
  210. def between_index(n_inputs, i,j):
  211. return int(i * n_inputs - (i * (i + 1) / 2) + (j - i - 1))
  212. def gather_edges(pos_edges, neg_edges):
  213. all_edges = [[], []]
  214. all_edges[0].extend([idx_i[0] for idx_i in pos_edges])
  215. all_edges[1].extend([idx_i[1] for idx_i in pos_edges])
  216. all_edges[0].extend([idx_i[0] for idx_i in neg_edges])
  217. all_edges[1].extend([idx_i[1] for idx_i in neg_edges])
  218. return all_edges