You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

models.py 2.6KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475
  1. import torch
  2. import torch.nn as nn
  3. import torch.nn.functional as F
  4. import os
  5. import sys
  6. PROJ_DIR = os.path.dirname(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..')))
  7. sys.path.insert(0, PROJ_DIR)
  8. from drug.models import GCN
  9. from drug.datasets import DDInteractionDataset
  10. from model.utils import get_FP_by_negative_index
  11. class Connector(nn.Module):
  12. def __init__(self, gpu_id=None):
  13. self.gpu_id = gpu_id
  14. super(Connector, self).__init__()
  15. self.ddiDataset = DDInteractionDataset()
  16. self.gcn = GCN(self.ddiDataset.num_features, self.ddiDataset.num_features // 2, gpu_id)
  17. #Cell line features
  18. # np.load('cell_feat.npy')
  19. def forward(self, drug1_idx, drug2_idx, cell_feat):
  20. x = self.ddiDataset.get().x
  21. edge_index = self.ddiDataset.get().edge_index
  22. x = self.gcn(x, edge_index)
  23. drug1_idx = torch.flatten(drug1_idx)
  24. drug2_idx = torch.flatten(drug2_idx)
  25. #drug1_feat = x[drug1_idx]
  26. #drug2_feat = x[drug2_idx]
  27. drug1_feat = torch.empty((len(drug1_idx), len(x[0])))
  28. drug2_feat = torch.empty((len(drug2_idx), len(x[0])))
  29. for index, element in enumerate(drug1_idx):
  30. drug1_feat[index] = (x[element])
  31. for index, element in enumerate(drug2_idx):
  32. drug2_feat[index] = (x[element])
  33. if self.gpu_id is not None:
  34. drug1_feat = drug1_feat.cuda(self.gpu_id)
  35. drug2_feat = drug2_feat.cuda(self.gpu_id)
  36. for i, x in enumerate(drug1_idx):
  37. if x < 0:
  38. drug1_feat[i] = get_FP_by_negative_index(x)
  39. for i, x in enumerate(drug2_idx):
  40. if x < 0:
  41. drug2_feat[i] = get_FP_by_negative_index(x)
  42. feat = torch.cat([drug1_feat, drug2_feat, cell_feat], 1)
  43. return feat
  44. class MLP(nn.Module):
  45. def __init__(self, input_size: int, hidden_size: int, gpu_id=None):
  46. super(MLP, self).__init__()
  47. self.layers = nn.Sequential(
  48. nn.Linear(input_size, hidden_size),
  49. nn.ReLU(),
  50. nn.BatchNorm1d(hidden_size),
  51. nn.Linear(hidden_size, hidden_size // 2),
  52. nn.ReLU(),
  53. nn.BatchNorm1d(hidden_size // 2),
  54. nn.Linear(hidden_size // 2, 1)
  55. )
  56. self.connector = Connector(gpu_id)
  57. def forward(self, drug1_idx, drug2_idx, cell_feat): # prev input: self, drug1_feat: torch.Tensor, drug2_feat: torch.Tensor, cell_feat: torch.Tensor
  58. feat = self.connector(drug1_idx, drug2_idx, cell_feat)
  59. out = self.layers(feat)
  60. return out
  61. # other PRODeepSyn models have been deleted for now