DeepTraCDR: Prediction Cancer Drug Response using multimodal deep learning with Transformers
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

main_target.py 6.2KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144
  1. # main_target.py
  2. import argparse
  3. import numpy as np
  4. import pandas as pd
  5. import scipy.sparse as sp
  6. from sklearn.model_selection import KFold
  7. from sklearn.metrics import roc_auc_score, average_precision_score, precision_score, recall_score, f1_score
  8. from model import DeepTraCDR, ModelOptimizer
  9. from utils import evaluate_auc, common_data_index
  10. from data_sampler import TargetSampler
  11. from data_loader import load_data
  12. import torch
  13. from torch.optim.lr_scheduler import OneCycleLR
  14. # Clear CUDA cache to optimize GPU memory usage
  15. torch.cuda.empty_cache()
  16. def main():
  17. """
  18. Main function to execute the DeepTraCDR model training and evaluation pipeline.
  19. Parses command-line arguments, loads data, performs k-fold cross-validation,
  20. and reports performance metrics.
  21. """
  22. # Initialize argument parser for command-line arguments
  23. parser = argparse.ArgumentParser(description="DeepTraCDR Advanced Model Training")
  24. parser.add_argument('-device', type=str, default="cuda:0" if torch.cuda.is_available() else "cpu",
  25. help="Device to run the model on (cuda:0 or cpu)")
  26. parser.add_argument('-data', type=str, default='gdsc', help="Dataset to use (gdsc or ccle)")
  27. parser.add_argument('--wd', type=float, default=1e-4, help="Weight decay for optimizer")
  28. parser.add_argument('--layer_size', nargs='+', type=int, default=[512],
  29. help="List of layer sizes for the GCN model")
  30. parser.add_argument('--gamma', type=float, default=15, help="Gamma parameter for model")
  31. parser.add_argument('--epochs', type=int, default=1000, help="Number of training epochs")
  32. parser.add_argument('--test_freq', type=int, default=50, help="Frequency of evaluation during training")
  33. parser.add_argument('--lr', type=float, default=0.0005, help="Learning rate for optimizer")
  34. args = parser.parse_args()
  35. # Load target drug data based on the specified dataset
  36. if args.data == "gdsc":
  37. target_drug_cids = np.array([5330286, 11338033, 24825971])
  38. # Load cell-drug binary matrix for GDSC dataset
  39. cell_drug = pd.read_csv("/media/external_16TB_1/ali_kianfar/Data/GDSC/cell_drug_binary.csv",
  40. index_col=0, header=0)
  41. cell_drug.columns = cell_drug.columns.astype(np.int32)
  42. drug_cids = cell_drug.columns.values
  43. cell_target_drug = np.array(cell_drug.loc[:, target_drug_cids], dtype=np.float32)
  44. target_pos_num = sp.coo_matrix(cell_target_drug).data.shape[0]
  45. target_indexes = common_data_index(drug_cids, target_drug_cids)
  46. elif args.data == "ccle":
  47. target_drug_cids = np.array([5330286])
  48. # Load cell-drug binary matrix for CCLE dataset
  49. cell_drug = pd.read_csv("/media/external_16TB_1/ali_kianfar/Data/CCLE/cell_drug_binary.csv",
  50. index_col=0, header=0)
  51. cell_drug.columns = cell_drug.columns.astype(np.int32)
  52. drug_cids = cell_drug.columns.values
  53. cell_target_drug = np.array(cell_drug.loc[:, target_drug_cids], dtype=np.float32)
  54. target_pos_num = sp.coo_matrix(cell_target_drug).data.shape[0]
  55. target_indexes = common_data_index(drug_cids, target_drug_cids)
  56. # Load dataset components including adjacency matrix, fingerprints, and expression data
  57. full_adj, drug_fingerprints, exprs, null_mask, pos_num, args = load_data(args)
  58. full_adj_np = full_adj.copy()
  59. # Log original adjacency matrix shape for debugging
  60. print(f"Original adj_mat shape: {full_adj.shape}")
  61. # Log shapes of loaded data for verification
  62. print("\n--- Data Shapes ---")
  63. print(f"Expression data shape: {exprs.shape}")
  64. print(f"Null mask shape: {null_mask.shape}")
  65. # Convert adjacency matrix to PyTorch tensor if it is a NumPy array
  66. if isinstance(full_adj, np.ndarray):
  67. full_adj = torch.from_numpy(full_adj).float().to(args.device)
  68. # Log converted adjacency matrix shape for verification
  69. print(f"Converted adj_mat shape: {full_adj.shape}")
  70. # Initialize k-fold cross-validation parameters
  71. k = 5
  72. n_kfolds = 5
  73. all_metrics = {
  74. 'auc': [],
  75. 'auprc': [],
  76. }
  77. # Perform k-fold cross-validation
  78. for n_kfold in range(n_kfolds):
  79. kfold = KFold(n_splits=k, shuffle=True, random_state=n_kfold)
  80. for fold, (train_idx, test_idx) in enumerate(kfold.split(np.arange(target_pos_num))):
  81. # Initialize data sampler for train/test split
  82. sampler = TargetSampler(response_mat=full_adj_np, null_mask=null_mask, target_indexes=target_indexes,
  83. pos_train_index=train_idx, pos_test_index=test_idx)
  84. # Initialize DeepTraCDR model
  85. model = DeepTraCDR(
  86. adj_mat=full_adj,
  87. cell_exprs=exprs,
  88. drug_fingerprints=drug_fingerprints,
  89. layer_size=args.layer_size,
  90. gamma=args.gamma,
  91. device=args.device
  92. )
  93. # Initialize optimizer for model training
  94. opt = ModelOptimizer(
  95. model=model,
  96. train_data=sampler.train_data,
  97. test_data=sampler.test_data,
  98. test_mask=sampler.test_mask,
  99. train_mask=sampler.train_mask,
  100. adj_matrix=full_adj,
  101. evaluate_fun=evaluate_auc,
  102. lr=args.lr,
  103. wd=args.wd,
  104. epochs=args.epochs,
  105. test_freq=args.test_freq,
  106. device=args.device
  107. )
  108. # Train model and retrieve evaluation metrics
  109. true, pred, best_auc, best_auprc = opt.train()
  110. # Store metrics for this fold
  111. all_metrics['auc'].append(best_auc)
  112. all_metrics['auprc'].append(best_auprc)
  113. # Log performance for the current fold
  114. print(f"Fold {n_kfold * k + fold + 1}: AUC={best_auc:.4f}, AUPRC={best_auprc:.4f}")
  115. # Calculate and log mean and standard deviation of metrics
  116. print(f"\nFinal Average Metrics:")
  117. for metric, values in all_metrics.items():
  118. mean = np.mean(values)
  119. std = np.std(values)
  120. print(f"{metric.upper()}: {mean:.4f} ± {std:.4f}")
  121. if __name__ == "__main__":
  122. # Set precision for matrix multiplication to optimize performance
  123. torch.set_float32_matmul_precision('high')
  124. main()