DeepTraCDR: Prediction Cancer Drug Response using multimodal deep learning with Transformers
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

DeepTraCDR_model.py 11KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. import numpy as np
  2. import torch
  3. import torch.nn as nn
  4. import torch.nn.functional as F
  5. from torch.nn import MultiheadAttention, TransformerEncoder, TransformerEncoderLayer
  6. from utils import torch_corr_x_y, cross_entropy_loss, prototypical_loss
  7. from sklearn.metrics import roc_auc_score, average_precision_score, precision_score, recall_score, f1_score
  8. class ConstructAdjMatrix(nn.Module):
  9. """Constructs normalized adjacency matrices for graph-based computations."""
  10. def __init__(self, original_adj_mat, device="cpu"):
  11. super().__init__()
  12. self.adj = torch.from_numpy(original_adj_mat).float().to(device) if isinstance(original_adj_mat, np.ndarray) else original_adj_mat.to(device)
  13. self.device = device
  14. def forward(self):
  15. """Computes normalized Laplacian matrices for cells and drugs."""
  16. with torch.no_grad():
  17. # Compute degree matrices for normalization
  18. d_x = torch.diag(torch.pow(torch.sum(self.adj, dim=1) + 1, -0.5))
  19. d_y = torch.diag(torch.pow(torch.sum(self.adj, dim=0) + 1, -0.5))
  20. # Aggregate cell and drug Laplacian matrices
  21. agg_cell_lp = torch.mm(torch.mm(d_x, self.adj), d_y)
  22. agg_drug_lp = torch.mm(torch.mm(d_y, self.adj.T), d_x)
  23. # Self-loop matrices for cells and drugs
  24. self_cell_lp = torch.diag(torch.add(torch.pow(torch.sum(self.adj, dim=1) + 1, -1), 1))
  25. self_drug_lp = torch.diag(torch.add(torch.pow(torch.sum(self.adj, dim=0) + 1, -1), 1))
  26. return agg_cell_lp.to(self.device), agg_drug_lp.to(self.device), self_cell_lp.to(self.device), self_drug_lp.to(self.device)
  27. class LoadFeature(nn.Module):
  28. """Loads and processes cell expression and drug fingerprint features."""
  29. def __init__(self, cell_exprs, drug_fingerprints, device="cpu"):
  30. super().__init__()
  31. self.device = device
  32. self.cell_exprs = torch.from_numpy(cell_exprs).float().to(device)
  33. self.drug_fingerprints = [torch.from_numpy(fp).float().to(device) for fp in drug_fingerprints]
  34. # Drug feature projection layers
  35. self.drug_proj = nn.ModuleList([
  36. nn.Sequential(
  37. nn.Linear(fp.shape[1], 512),
  38. nn.BatchNorm1d(512),
  39. nn.GELU(),
  40. nn.Dropout(0.3)
  41. ).to(device) for fp in drug_fingerprints
  42. ])
  43. # Transformer for drug feature encoding
  44. self.transformer = TransformerEncoder(
  45. TransformerEncoderLayer(d_model=512, nhead=8, dim_feedforward=2048, batch_first=True),
  46. num_layers=3
  47. ).to(device)
  48. # Normalization layers
  49. self.cell_norm = nn.LayerNorm(cell_exprs.shape[1]).to(device)
  50. self.drug_norm = nn.LayerNorm(512).to(device)
  51. # Cell feature encoder
  52. self.cell_encoder = nn.Sequential(
  53. nn.Linear(cell_exprs.shape[1], 1024),
  54. nn.BatchNorm1d(1024),
  55. nn.GELU(),
  56. nn.Dropout(0.3),
  57. nn.Linear(1024, 512)
  58. ).to(device)
  59. def forward(self):
  60. """Encodes cell and drug features into a unified embedding space."""
  61. cell_feat = self.cell_norm(self.cell_exprs)
  62. cell_encoded = self.cell_encoder(cell_feat)
  63. # Project and transform drug features
  64. projected = [proj(fp) for proj, fp in zip(self.drug_proj, self.drug_fingerprints)]
  65. stacked = torch.stack(projected, dim=1)
  66. drug_feat = self.transformer(stacked)
  67. drug_feat = self.drug_norm(drug_feat.mean(dim=1))
  68. return cell_encoded, drug_feat
  69. class GEncoder(nn.Module):
  70. """Graph encoder for cell and drug feature aggregation with attention."""
  71. def __init__(self, agg_c_lp, agg_d_lp, self_c_lp, self_d_lp, device="cpu"):
  72. super().__init__()
  73. self.agg_c_lp = agg_c_lp
  74. self.agg_d_lp = agg_d_lp
  75. self.self_c_lp = self_c_lp
  76. self.self_d_lp = self_d_lp
  77. self.device = device
  78. # Cell feature encoder
  79. self.cell_encoder = nn.Sequential(
  80. nn.Linear(512, 1024),
  81. nn.BatchNorm1d(1024),
  82. nn.GELU(),
  83. nn.Dropout(0.3),
  84. nn.Linear(1024, 512)
  85. ).to(device)
  86. # Drug feature encoder
  87. self.drug_encoder = nn.Sequential(
  88. nn.Linear(512, 1024),
  89. nn.BatchNorm1d(1024),
  90. nn.GELU(),
  91. nn.Dropout(0.3),
  92. nn.Linear(1024, 512)
  93. ).to(device)
  94. # Attention mechanism for cross-modal interaction
  95. self.attention = MultiheadAttention(embed_dim=512, num_heads=8, batch_first=True).to(device)
  96. self.residual = nn.Linear(512, 512).to(device)
  97. # Final feature fusion
  98. self.fc = nn.Sequential(
  99. nn.Linear(1024, 512),
  100. nn.BatchNorm1d(512),
  101. nn.GELU(),
  102. nn.Dropout(0.2)
  103. ).to(device)
  104. def forward(self, cell_f, drug_f):
  105. """Aggregates and encodes cell and drug features using graph convolution and attention."""
  106. # Aggregate features via graph convolution
  107. cell_agg = torch.mm(self.agg_c_lp, drug_f)
  108. drug_agg = torch.mm(self.agg_d_lp, cell_f)
  109. # Encode aggregated features
  110. cell_fc = self.cell_encoder(cell_agg)
  111. drug_fc = self.drug_encoder(drug_agg)
  112. # Apply attention mechanism
  113. attn_output, _ = self.attention(
  114. query=cell_fc.unsqueeze(0),
  115. key=drug_fc.unsqueeze(0),
  116. value=drug_fc.unsqueeze(0)
  117. )
  118. attn_output = attn_output.squeeze(0)
  119. cell_emb = cell_fc + self.residual(attn_output)
  120. # Apply final activation
  121. return F.gelu(cell_emb), F.gelu(drug_fc)
  122. class GDecoder(nn.Module):
  123. """Decodes cell and drug embeddings into interaction scores."""
  124. def __init__(self, emb_dim, gamma):
  125. super().__init__()
  126. self.gamma = gamma
  127. self.decoder = nn.Sequential(
  128. nn.Linear(2 * emb_dim, 1024),
  129. nn.BatchNorm1d(1024),
  130. nn.GELU(),
  131. nn.Dropout(0.2),
  132. nn.Linear(1024, 1)
  133. )
  134. self.corr_weight = nn.Parameter(torch.tensor(0.5))
  135. def forward(self, cell_emb, drug_emb):
  136. """Predicts interaction scores using combined embeddings and correlation."""
  137. cell_exp = cell_emb.unsqueeze(1).repeat(1, drug_emb.size(0), 1)
  138. drug_exp = drug_emb.unsqueeze(0).repeat(cell_emb.size(0), 1, 1)
  139. combined = torch.cat([cell_exp, drug_exp], dim=-1)
  140. scores = self.decoder(combined.view(-1, 2 * cell_emb.size(1))).view(cell_emb.size(0), drug_emb.size(0))
  141. corr = torch_corr_x_y(cell_emb, drug_emb)
  142. return torch.sigmoid(self.gamma * (self.corr_weight * scores + (1 - self.corr_weight) * corr))
  143. class DeepTraCDR(nn.Module):
  144. """Main model integrating adjacency matrix construction, feature loading, encoding, and decoding."""
  145. def __init__(self, adj_mat, cell_exprs, drug_finger, layer_size, gamma, device="cpu"):
  146. super().__init__()
  147. self.device = device
  148. self.adj_mat = torch.from_numpy(adj_mat).float().to(device) if isinstance(adj_mat, np.ndarray) else adj_mat.to(device)
  149. self.construct_adj = ConstructAdjMatrix(self.adj_mat, device=device)
  150. self.load_feat = LoadFeature(cell_exprs, drug_finger, device=device)
  151. # Precompute adjacency matrices
  152. agg_c, agg_d, self_c, self_d = self.construct_adj()
  153. self.encoder = GEncoder(agg_c, agg_d, self_c, self_d, device=device).to(device)
  154. self.decoder = GDecoder(layer_size[-1], gamma).to(device)
  155. def forward(self):
  156. """Executes the full forward pass of the model."""
  157. cell_f, drug_f = self.load_feat()
  158. cell_emb, drug_emb = self.encoder(cell_f, drug_f)
  159. return self.decoder(cell_emb, drug_emb), cell_emb, drug_emb
  160. class Optimizer:
  161. """Handles model training and evaluation with performance metrics."""
  162. def __init__(self, model, train_data, test_data, test_mask, train_mask, adj_matrix, evaluate_fun, lr=0.001, wd=1e-05, epochs=200, test_freq=20, device="cpu"):
  163. self.model = model.to(device)
  164. self.train_data = train_data.float().to(device)
  165. self.test_data = test_data.float().to(device)
  166. self.train_mask = train_mask.to(device)
  167. self.test_mask_bool = test_mask.to(device).bool()
  168. self.adj_matrix = adj_matrix.to(device)
  169. self.evaluate_fun = evaluate_fun
  170. self.optimizer = torch.optim.Adam(self.model.parameters(), lr=lr, weight_decay=wd)
  171. self.epochs = epochs
  172. self.test_freq = test_freq
  173. def train(self):
  174. """Trains the model and evaluates performance on test data."""
  175. true_data = torch.masked_select(self.test_data, self.test_mask_bool).cpu().numpy()
  176. best_metrics = {'auc': 0.0, 'auprc': 0.0, 'precision': 0.0, 'recall': 0.0, 'f1': 0.0}
  177. best_pred = None
  178. for epoch in range(self.epochs):
  179. self.model.train()
  180. pred, cell_emb, drug_emb = self.model()
  181. # Compute losses
  182. ce_loss = cross_entropy_loss(self.train_data, pred, self.train_mask)
  183. proto_loss = prototypical_loss(cell_emb, drug_emb, self.adj_matrix)
  184. total_loss = 0.7 * ce_loss + 0.3 * proto_loss
  185. # Backpropagation
  186. self.optimizer.zero_grad()
  187. total_loss.backward()
  188. self.optimizer.step()
  189. # Evaluate periodically
  190. if epoch % self.test_freq == 0:
  191. self.model.eval()
  192. with torch.no_grad():
  193. pred_masked = torch.masked_select(pred, self.test_mask_bool).cpu().numpy()
  194. metrics = self._compute_metrics(true_data, pred_masked)
  195. # Update best metrics
  196. if metrics['auc'] > best_metrics['auc']:
  197. best_metrics.update(metrics)
  198. best_pred = pred_masked.copy()
  199. print(f"Epoch {epoch}: Loss={total_loss.item():.4f}, AUC={metrics['auc']:.4f}, "
  200. f"AUPRC={metrics['auprc']:.4f}, Precision={metrics['precision']:.4f}, "
  201. f"Recall={metrics['recall']:.4f}, F1-Score={metrics['f1']:.4f}")
  202. # Print final best metrics
  203. print("\nBest Metrics:")
  204. for metric, value in best_metrics.items():
  205. print(f"{metric.upper()}: {value:.4f}")
  206. return true_data, best_pred, *best_metrics.values()
  207. def _compute_metrics(self, true_data, pred_masked):
  208. """Computes evaluation metrics for model predictions."""
  209. try:
  210. auc = roc_auc_score(true_data, pred_masked)
  211. auprc = average_precision_score(true_data, pred_masked)
  212. except ValueError:
  213. auc = auprc = 0.0
  214. pred_labels = (pred_masked >= 0.5).astype(int)
  215. return {
  216. 'auc': auc,
  217. 'auprc': auprc,
  218. 'precision': precision_score(true_data, pred_labels, zero_division=0),
  219. 'recall': recall_score(true_data, pred_labels, zero_division=0),
  220. 'f1': f1_score(true_data, pred_labels, zero_division=0)
  221. }