DeepTraCDR: Prediction Cancer Drug Response using multimodal deep learning with Transformers
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

model.py 15KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. import numpy as np
  2. import torch
  3. import torch.nn as nn
  4. import torch.nn.functional as F
  5. from torch.nn import MultiheadAttention, TransformerEncoder, TransformerEncoderLayer
  6. from sklearn.metrics import roc_auc_score, average_precision_score
  7. from utils import torch_corr_x_y, cross_entropy_loss, prototypical_loss
  8. class AdjacencyMatrixConstructor(nn.Module):
  9. """
  10. Constructs normalized adjacency matrices for graph-based computations.
  11. These matrices are used for aggregating cell and drug features in the GCN model.
  12. """
  13. def __init__(self, original_adj_mat, device="cpu"):
  14. super().__init__()
  15. # Convert numpy array to torch tensor if necessary and move to specified device
  16. if isinstance(original_adj_mat, np.ndarray):
  17. original_adj_mat = torch.from_numpy(original_adj_mat).float()
  18. self.adj = original_adj_mat.to(device)
  19. self.device = device
  20. def forward(self):
  21. """
  22. Computes normalized adjacency matrices for cell and drug aggregations.
  23. Returns four matrices: aggregated cell, aggregated drug, self-cell, and self-drug.
  24. """
  25. with torch.no_grad():
  26. # Compute degree normalization matrices
  27. degree_x = torch.pow(torch.sum(self.adj, dim=1) + 1, -0.5)
  28. degree_y = torch.pow(torch.sum(self.adj, dim=0) + 1, -0.5)
  29. d_x = torch.diag(degree_x)
  30. d_y = torch.diag(degree_y)
  31. # Compute aggregated Laplacian matrices
  32. agg_cell_lp = torch.mm(torch.mm(d_x, self.adj), d_y) # [num_cells x num_drugs]
  33. agg_drug_lp = torch.mm(torch.mm(d_y, self.adj.T), d_x) # [num_drugs x num_cells]
  34. # Compute self-loop Laplacian matrices
  35. self_cell_lp = torch.diag(torch.add(torch.pow(torch.sum(self.adj, dim=1) + 1, -1), 1))
  36. self_drug_lp = torch.diag(torch.add(torch.pow(torch.sum(self.adj, dim=0) + 1, -1), 1))
  37. return (
  38. agg_cell_lp.to(self.device),
  39. agg_drug_lp.to(self.device),
  40. self_cell_lp.to(self.device),
  41. self_drug_lp.to(self.device)
  42. )
  43. class FeatureLoader(nn.Module):
  44. """
  45. Loads and preprocesses cell expression and drug fingerprint features.
  46. Applies transformations to project features into a common embedding space.
  47. """
  48. def __init__(self, cell_exprs, drug_fingerprints, device="cpu"):
  49. super().__init__()
  50. self.device = device
  51. # Convert input features to torch tensors and move to device
  52. self.cell_exprs = torch.from_numpy(cell_exprs).float().to(device)
  53. self.drug_fingerprints = [torch.from_numpy(fp).float().to(device) for fp in drug_fingerprints]
  54. # Projection layers for drug fingerprints
  55. self.drug_proj = nn.ModuleList([
  56. nn.Sequential(
  57. nn.Linear(fp.shape[1], 512),
  58. nn.BatchNorm1d(512),
  59. nn.GELU(),
  60. nn.Dropout(0.5)
  61. ).to(device) for fp in drug_fingerprints
  62. ])
  63. # Transformer encoder for drug features
  64. self.transformer = TransformerEncoder(
  65. TransformerEncoderLayer(
  66. d_model=512,
  67. nhead=8,
  68. dim_feedforward=2048,
  69. batch_first=True
  70. ),
  71. num_layers=1
  72. ).to(device)
  73. # Normalization layers
  74. self.cell_norm = nn.LayerNorm(cell_exprs.shape[1]).to(device)
  75. self.drug_norm = nn.LayerNorm(512).to(device)
  76. # Encoder for cell features
  77. self.cell_encoder = nn.Sequential(
  78. nn.Linear(cell_exprs.shape[1], 1024),
  79. nn.BatchNorm1d(1024),
  80. nn.GELU(),
  81. nn.Dropout(0.5),
  82. nn.Linear(1024, 512)
  83. ).to(device)
  84. def forward(self):
  85. """
  86. Processes cell and drug features to produce encoded representations.
  87. Returns encoded cell and drug features in a common 512-dimensional space.
  88. """
  89. # Normalize and encode cell features
  90. cell_feat = self.cell_norm(self.cell_exprs) # [num_cells x num_cell_features]
  91. cell_encoded = self.cell_encoder(cell_feat) # [num_cells x 512]
  92. # Project and transform drug fingerprints
  93. projected = [proj(fp) for proj, fp in zip(self.drug_proj, self.drug_fingerprints)] # List of [num_samples x 512]
  94. stacked = torch.stack(projected, dim=1) # [num_samples x num_drugs x 512]
  95. drug_feat = self.transformer(stacked) # [num_samples x num_drugs x 512]
  96. drug_feat = self.drug_norm(drug_feat.mean(dim=1)) # [num_samples x 512]
  97. return cell_encoded, drug_feat
  98. class GraphEncoder(nn.Module):
  99. """
  100. Encodes cell and drug features using graph-based aggregation and attention mechanisms.
  101. Produces final embeddings for cells and drugs.
  102. """
  103. def __init__(self, agg_cell_lp, agg_drug_lp, self_cell_lp, self_drug_lp, device="cpu"):
  104. super().__init__()
  105. self.agg_cell_lp = agg_cell_lp
  106. self.agg_drug_lp = agg_drug_lp
  107. self.self_cell_lp = self_cell_lp
  108. self.self_drug_lp = self_drug_lp
  109. self.device = device
  110. # Encoder for aggregated cell features
  111. self.cell_encoder = nn.Sequential(
  112. nn.Linear(512, 1024),
  113. nn.BatchNorm1d(1024),
  114. nn.GELU(),
  115. nn.Dropout(0.5),
  116. nn.Linear(1024, 512)
  117. ).to(device)
  118. # Encoder for aggregated drug features
  119. self.drug_encoder = nn.Sequential(
  120. nn.Linear(512, 1024),
  121. nn.BatchNorm1d(1024),
  122. nn.GELU(),
  123. nn.Dropout(0.5),
  124. nn.Linear(1024, 512)
  125. ).to(device)
  126. # Attention mechanism for cell-drug interactions
  127. self.attention = MultiheadAttention(embed_dim=512, num_heads=8, batch_first=True).to(device)
  128. self.residual = nn.Linear(512, 512).to(device)
  129. # Final fully connected layer
  130. self.fc = nn.Sequential(
  131. nn.Linear(1024, 512),
  132. nn.BatchNorm1d(512),
  133. nn.GELU(),
  134. nn.Dropout(0.5)
  135. ).to(device)
  136. def forward(self, cell_features, drug_features):
  137. """
  138. Encodes cell and drug features using graph aggregation and attention.
  139. Returns final cell and drug embeddings.
  140. """
  141. # Aggregate features using Laplacian matrices
  142. cell_agg = torch.mm(self.agg_cell_lp, drug_features) + torch.mm(self.self_cell_lp, cell_features) # [num_cells x 512]
  143. drug_agg = torch.mm(self.agg_drug_lp, cell_features) + torch.mm(self.self_drug_lp, drug_features) # [num_drugs x 512]
  144. # Encode aggregated features
  145. cell_fc = self.cell_encoder(cell_agg) # [num_cells x 512]
  146. drug_fc = self.drug_encoder(drug_agg) # [num_drugs x 512]
  147. # Apply attention mechanism
  148. attn_output, _ = self.attention(
  149. query=cell_fc.unsqueeze(0), # [1 x num_cells x 512]
  150. key=drug_fc.unsqueeze(0), # [1 x num_drugs x 512]
  151. value=drug_fc.unsqueeze(0) # [1 x num_drugs x 512]
  152. )
  153. attn_output = attn_output.squeeze(0) # [num_cells x 512]
  154. # Combine attention output with residual connection
  155. cell_emb = cell_fc + self.residual(attn_output) # [num_cells x 512]
  156. # Apply final activation
  157. cell_emb = F.gelu(cell_emb) # [num_cells x 512]
  158. drug_emb = F.gelu(drug_fc) # [num_drugs x 512]
  159. return cell_emb, drug_emb
  160. class GraphDecoder(nn.Module):
  161. """
  162. Decodes cell and drug embeddings to predict interaction scores.
  163. Combines embeddings and applies a correlation-based adjustment.
  164. """
  165. def __init__(self, emb_dim, gamma):
  166. super().__init__()
  167. self.gamma = gamma
  168. # Decoder network for combined embeddings
  169. self.decoder = nn.Sequential(
  170. nn.Linear(2 * emb_dim, 1024),
  171. nn.BatchNorm1d(1024),
  172. nn.GELU(),
  173. nn.Dropout(0.2),
  174. nn.Linear(1024, 1)
  175. )
  176. # Learnable weight for balancing scores and correlation
  177. self.corr_weight = nn.Parameter(torch.tensor(0.5))
  178. def forward(self, cell_emb, drug_emb):
  179. """
  180. Decodes cell and drug embeddings to produce interaction scores.
  181. Returns a matrix of interaction probabilities.
  182. """
  183. # Expand embeddings for pairwise combinations
  184. cell_exp = cell_emb.unsqueeze(1).repeat(1, drug_emb.size(0), 1) # [num_cells x num_drugs x emb_dim]
  185. drug_exp = drug_emb.unsqueeze(0).repeat(cell_emb.size(0), 1, 1) # [num_cells x num_drugs x emb_dim]
  186. # Combine cell and drug embeddings
  187. combined = torch.cat([cell_exp, drug_exp], dim=-1) # [num_cells x num_drugs x 2*emb_dim]
  188. # Compute interaction scores
  189. scores = self.decoder(combined.view(-1, 2 * cell_emb.size(1))).view(cell_emb.size(0), drug_emb.size(0)) # [num_cells x num_drugs]
  190. # Compute correlation between embeddings
  191. corr = torch_corr_x_y(cell_emb, drug_emb) # [num_cells x num_drugs]
  192. # Combine scores and correlation with learnable weight
  193. return torch.sigmoid(self.gamma * (self.corr_weight * scores + (1 - self.corr_weight) * corr))
  194. class DeepTraCDR(nn.Module):
  195. """
  196. Main Graph Convolutional Network model for predicting cell-drug interactions.
  197. Combines feature loading, graph encoding, and decoding.
  198. """
  199. def __init__(self, adj_mat, cell_exprs, drug_fingerprints, layer_size, gamma, device="cpu"):
  200. super().__init__()
  201. self.device = device
  202. # Convert adjacency matrix to tensor if necessary
  203. if isinstance(adj_mat, np.ndarray):
  204. adj_mat = torch.from_numpy(adj_mat).float()
  205. self.adj_mat = adj_mat.to(device)
  206. # Initialize components
  207. self.construct_adj = AdjacencyMatrixConstructor(self.adj_mat, device=device)
  208. self.load_feat = FeatureLoader(cell_exprs, drug_fingerprints, device=device)
  209. # Compute fixed adjacency matrices
  210. agg_cell, agg_drug, self_cell, self_drug = self.construct_adj()
  211. # Initialize encoder and decoder
  212. self.encoder = GraphEncoder(agg_cell, agg_drug, self_cell, self_drug, device=device).to(device)
  213. self.decoder = GraphDecoder(512, gamma).to(device) # emb_dim fixed to 512
  214. def forward(self):
  215. """
  216. Performs a full forward pass through the DeepTraCDR model.
  217. Returns predicted interaction scores and final embeddings.
  218. """
  219. # Load and encode features
  220. cell_features, drug_features = self.load_feat()
  221. # Encode features using graph structure
  222. cell_emb, drug_emb = self.encoder(cell_features, drug_features)
  223. # Decode to predict interaction scores
  224. return self.decoder(cell_emb, drug_emb), cell_emb, drug_emb
  225. class ModelOptimizer:
  226. """
  227. Handles training and evaluation of the DeepTraCDR model.
  228. Implements early stopping and tracks best performance metrics.
  229. """
  230. def __init__(self, model, train_data, test_data, test_mask, train_mask, adj_matrix, evaluate_fun, lr=0.001, wd=1e-05, epochs=200, test_freq=20, patience=100, device="gpu"):
  231. self.model = model.to(device)
  232. self.train_data = train_data.float().to(device)
  233. self.test_data = test_data.float().to(device)
  234. self.train_mask = train_mask.to(device)
  235. self.test_mask_bool = test_mask.to(device).bool()
  236. self.device = device
  237. # Convert adjacency matrix to tensor if necessary
  238. if isinstance(adj_matrix, np.ndarray):
  239. adj_matrix = torch.from_numpy(adj_matrix).float()
  240. self.adj_matrix = adj_matrix.to(device)
  241. self.evaluate_fun = evaluate_fun
  242. self.optimizer = torch.optim.Adam(self.model.parameters(), lr=lr, weight_decay=wd)
  243. self.epochs = epochs
  244. self.test_freq = test_freq
  245. self.patience = patience
  246. self.best_auc = 0.0
  247. self.best_auprc = 0.0
  248. self.best_weights = None
  249. self.counter = 0 # Early stopping counter
  250. self.best_epoch_auc = None
  251. self.best_epoch_auprc = None
  252. def train(self):
  253. """
  254. Trains the model with early stopping and evaluates performance.
  255. Returns the best AUC and AUPRC achieved during training.
  256. """
  257. true_data = torch.masked_select(self.test_data, self.test_mask_bool).cpu().numpy()
  258. for epoch in range(self.epochs):
  259. self.model.train()
  260. # Forward pass and compute loss
  261. pred_train, cell_emb, drug_emb = self.model()
  262. ce_loss = cross_entropy_loss(self.train_data, pred_train, self.train_mask)
  263. proto_loss = prototypical_loss(cell_emb, drug_emb, self.adj_matrix)
  264. total_loss = 0.7 * ce_loss + 0.3 * proto_loss
  265. # Backward pass and optimization
  266. self.optimizer.zero_grad()
  267. total_loss.backward()
  268. self.optimizer.step()
  269. # Evaluate model
  270. self.model.eval()
  271. with torch.no_grad():
  272. # Compute metrics for training data
  273. train_pred, _, _ = self.model()
  274. train_pred_masked = torch.masked_select(train_pred, self.train_mask).cpu().numpy()
  275. train_true_data = torch.masked_select(self.train_data, self.train_mask).cpu().numpy()
  276. try:
  277. train_auc = roc_auc_score(train_true_data, train_pred_masked)
  278. train_auprc = average_precision_score(train_true_data, train_pred_masked)
  279. except ValueError:
  280. train_auc, train_auprc = 0.0, 0.0
  281. # Compute metrics for test data
  282. pred_eval, _, _ = self.model()
  283. pred_masked = torch.masked_select(pred_eval, self.test_mask_bool).cpu().numpy()
  284. try:
  285. auc = roc_auc_score(true_data, pred_masked)
  286. auprc = average_precision_score(true_data, pred_masked)
  287. except ValueError:
  288. auc, auprc = 0.0, 0.0
  289. # Update best metrics and weights
  290. if auc > self.best_auc:
  291. self.best_auc = auc
  292. self.best_auprc = auprc
  293. self.best_weights = self.model.state_dict().copy()
  294. self.counter = 0
  295. self.best_epoch_auc = auc
  296. self.best_epoch_auprc = auprc
  297. else:
  298. self.counter += 1
  299. # Log progress
  300. if epoch % self.test_freq == 0 or epoch == self.epochs - 1:
  301. print(f"Epoch {epoch}: Loss={total_loss.item():.4f}, Train AUC={train_auc:.4f}, Train AUPRC={train_auprc:.4f}, Test AUC={auc:.4f}, Test AUPRC={auprc:.4f}")
  302. # Check early stopping
  303. if self.counter >= self.patience:
  304. print(f"\nEarly stopping triggered at epoch {epoch}!")
  305. print(f"No improvement in AUC for {self.patience} consecutive epochs.")
  306. break
  307. # Load best weights
  308. if self.best_weights is not None:
  309. self.model.load_state_dict(self.best_weights)
  310. # Final evaluation
  311. self.model.eval()
  312. with torch.no_grad():
  313. final_pred, _, _ = self.model()
  314. final_pred_masked = torch.masked_select(final_pred, self.test_mask_bool).cpu().numpy()
  315. best_auc = roc_auc_score(true_data, final_pred_masked)
  316. best_auprc = average_precision_score(true_data, final_pred_masked)
  317. # Print final results
  318. print("\nBest Metrics After Training (on Test Data):")
  319. print(f"AUC: {self.best_auc:.4f}")
  320. print(f"AUPRC: {self.best_auprc:.4f}")
  321. return self.best_auc, self.best_auprc