You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

models.py 2.2KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667
  1. import torch
  2. import torch.nn as nn
  3. import torch.nn.functional as F
  4. import os
  5. import sys
  6. PROJ_DIR = os.path.dirname(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..')))
  7. sys.path.insert(0, PROJ_DIR)
  8. from drug.models import GCN
  9. from drug.datasets import DDInteractionDataset
  10. from model.utils import get_FP_by_negative_index
  11. class Connector(nn.Module):
  12. def __init__(self, gpu_id=None):
  13. super(Connector, self).__init__()
  14. # self.ddiDataset = DDInteractionDataset(gpu_id = gpu_id)
  15. self.gcn = None
  16. #Cell line features
  17. # np.load('cell_feat.npy')
  18. def forward(self, drug1_idx, drug2_idx, cell_feat, subgraph):
  19. if self.gcn == None:
  20. self.gcn = GCN(subgraph.num_features, subgraph.num_features // 2)
  21. x = subgraph.get().x
  22. edge_index = subgraph.edge_index
  23. x = self.gcn(x, edge_index)
  24. drug1_idx = torch.flatten(drug1_idx)
  25. drug2_idx = torch.flatten(drug2_idx)
  26. drug1_feat = x[drug1_idx]
  27. drug2_feat = x[drug2_idx]
  28. for i, idx in enumerate(drug1_idx):
  29. if idx < 0:
  30. drug1_feat[i] = get_FP_by_negative_index(idx)
  31. for i, idx in enumerate(drug2_idx):
  32. if idx < 0:
  33. drug2_feat[i] = get_FP_by_negative_index(idx)
  34. feat = torch.cat([drug1_feat, drug2_feat, cell_feat], 1)
  35. return feat
  36. class MLP(nn.Module):
  37. def __init__(self, input_size: int, hidden_size: int, gpu_id=None):
  38. super(MLP, self).__init__()
  39. self.layers = nn.Sequential(
  40. nn.Linear(input_size, hidden_size),
  41. nn.ReLU(),
  42. nn.BatchNorm1d(hidden_size),
  43. nn.Linear(hidden_size, hidden_size // 2),
  44. nn.ReLU(),
  45. nn.BatchNorm1d(hidden_size // 2),
  46. nn.Linear(hidden_size // 2, 1)
  47. )
  48. self.connector = Connector(gpu_id)
  49. # prev input: self, drug1_feat: torch.Tensor, drug2_feat: torch.Tensor, cell_feat: torch.Tensor, subgraph: related subgraph for the batch
  50. def forward(self, drug1_idx, drug2_idx, cell_feat, subgraph):
  51. feat = self.connector(drug1_idx, drug2_idx, cell_feat, subgraph)
  52. out = self.layers(feat)
  53. return out
  54. # other PRODeepSyn models have been deleted for now