123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172 |
- import argparse
- import numpy as np
- import pandas as pd
- import torch
- import scipy.sparse as sp
- from sklearn.model_selection import KFold
- from sklearn.metrics import roc_auc_score, average_precision_score
- from model import DeepTraCDR, Optimizer
- from utils import evaluate_auc, common_data_index
- from data_sampler import TargetSampler
- from data_loader import load_data
-
- # Clear CUDA cache to optimize memory usage
- torch.cuda.empty_cache()
-
- def main():
- # Parse command-line arguments for model configuration
- parser = argparse.ArgumentParser(description="DeepTraCDR Case Study for Drug Response Prediction")
- parser.add_argument('-device', type=str, default="cuda:0" if torch.cuda.is_available() else "cpu",
- help="Device to run the model (cuda:0 or cpu)")
- parser.add_argument('-data', type=str, default='gdsc',
- help="Dataset to use (e.g., gdsc or ccle)")
- parser.add_argument('--wd', type=float, default=1e-4, help="Weight decay for optimizer")
- parser.add_argument('--layer_size', nargs='+', type=int, default=[512],
- help="List of layer sizes for the GCN model")
- parser.add_argument('--gamma', type=float, default=15, help="Gamma parameter for loss function")
- parser.add_argument('--epochs', type=int, default=1000, help="Number of training epochs")
- parser.add_argument('--test_freq', type=int, default=50, help="Frequency of evaluation during training")
- parser.add_argument('--patience', type=int, default=100, help="Patience for early stopping")
- parser.add_argument('--lr', type=float, default=0.0005, help="Learning rate for optimizer")
- parser.add_argument('--k_fold', type=int, default=5, help="Number of folds for cross-validation")
- args = parser.parse_args()
-
- # Load dataset-specific drug response data
- if args.data == "gdsc":
- # Define target drug CIDs (e.g., Dasatinib=5330286, GSK690693=11338033)
- target_drug_cids = np.array([5330286, 11338033, 24825971])
-
- # Load cell-drug binary response matrix
- cell_drug = pd.read_csv(
- "/media/external_16TB_1/ali_kianfar/Data/GDSC/cell_drug_binary.csv",
- index_col=0, header=0
- )
- cell_drug.columns = cell_drug.columns.astype(np.int32)
- drug_cids = cell_drug.columns.values
-
- # Extract target drug responses and compute positive sample count
- cell_target_drug = np.array(cell_drug.loc[:, target_drug_cids], dtype=np.float32)
- target_pos_num = sp.coo_matrix(cell_target_drug).data.shape[0]
- target_indexes = common_data_index(drug_cids, target_drug_cids)
-
- elif args.data == "ccle":
- # Define target drug CIDs for CCLE dataset
- target_drug_cids = np.array([5330286])
-
- # Load cell-drug binary response matrix
- cell_drug = pd.read_csv(
- "/media/external_16TB_1/ali_kianfar/Data/CCLE/cell_drug_binary.csv",
- index_col=0, header=0
- )
- cell_drug.columns = cell_drug.columns.astype(np.int32)
- drug_cids = cell_drug.columns.values
-
- # Extract target drug responses and compute positive sample count
- cell_target_drug = np.array(cell_drug.loc[:, target_drug_cids], dtype=np.float32)
- target_pos_num = sp.coo_matrix(cell_target_drug).data.shape[0]
- target_indexes = common_data_index(drug_cids, target_drug_cids)
-
- # Load additional data (adjacency matrix, fingerprints, expression, etc.)
- full_adj, drug_fingerprints, exprs, null_mask, pos_num, args = load_data(args)
- full_adj_np = full_adj.copy() # Copy for sampler usage
-
- # Print data shapes for verification
- print(f"Adjacency matrix shape: {full_adj.shape}")
- print(f"Expression data shape: {exprs.shape}")
- print(f"Null mask shape: {null_mask.shape}")
-
- # Convert adjacency matrix to PyTorch tensor
- if isinstance(full_adj, np.ndarray):
- full_adj = torch.from_numpy(full_adj).float().to(args.device)
-
- # Initialize k-fold cross-validation
- k = args.k_fold
- n_kfolds = 5 # Number of k-fold iterations
- all_metrics = {'auc': [], 'auprc': []}
-
- # Perform k-fold cross-validation
- for n_kfold in range(n_kfolds):
- kfold = KFold(n_splits=k, shuffle=True, random_state=n_kfold)
- idx_all = np.arange(target_pos_num)
-
- for fold, (train_idx, test_idx) in enumerate(kfold.split(idx_all)):
- print(f"\n--- Fold {fold+1}/{k} (Iteration {n_kfold+1}/{n_kfolds}) ---")
-
- # Initialize data sampler for training and testing
- sampler = TargetSampler(
- response_mat=full_adj_np,
- null_mask=null_mask,
- target_indexes=target_indexes,
- pos_train_index=train_idx,
- pos_test_index=test_idx
- )
-
- # Initialize DeepTraCDR model
- model = DeepTraCDR(
- adj_mat=full_adj,
- cell_exprs=exprs,
- drug_finger=drug_fingerprints,
- layer_size=args.layer_size,
- gamma=args.gamma,
- device=args.device
- )
-
- # Initialize optimizer for training
- opt = Optimizer(
- model=model,
- train_data=sampler.train_data,
- test_data=sampler.test_data,
- test_mask=sampler.test_mask,
- train_mask=sampler.train_mask,
- adj_matrix=full_adj,
- evaluate_fun=evaluate_auc,
- lr=args.lr,
- wd=args.wd,
- epochs=args.epochs,
- test_freq=args.test_freq,
- patience=args.patience,
- device=args.device
- )
-
- # Train the model and retrieve best metrics
- true, pred, best_auc, best_auprc = opt.train()
- all_metrics['auc'].append(best_auc)
- all_metrics['auprc'].append(best_auprc)
-
- print(f"Fold {fold+1}: AUC={best_auc:.4f}, AUPRC={best_auprc:.4f}")
-
- # Compute and display average metrics across all folds
- print(f"\nFinal Average Metrics (Across {n_kfolds*k} Folds):")
- for metric, values in all_metrics.items():
- mean = np.mean(values)
- std = np.std(values)
- print(f"{metric.upper()}: {mean:.4f} ± {std:.4f}")
-
- # Perform case study: Predict missing responses for target drugs
- print("\n--- Case Study: Predicting Missing Responses for Target Drugs ---")
- model.eval()
- with torch.no_grad():
- final_pred, cell_emb, drug_emb = model() # Shape: [num_cells, num_drugs]
-
- # Create a DataFrame to sort cell lines by predicted sensitivity
- num_cells, num_drugs = final_pred.size()
- cell_names = cell_drug.index.values # Cell line names
- cid_list = cell_drug.columns.values # Drug CIDs
-
- # Identify top 10 sensitive cell lines for each target drug
- for d in range(num_drugs):
- cid = cid_list[d]
- if cid in [5330286, 11338033]: # Focus on Dasatinib or GSK690693
- drug_preds = final_pred[:, d].cpu().numpy()
- sorted_idx = np.argsort(-drug_preds) # Sort in descending order
- top_10_cells = [(cell_names[i], drug_preds[i]) for i in sorted_idx[:10]]
-
- drug_name = "Dasatinib" if cid == 5330286 else "GSK690693"
- print(f"\nTop 10 Sensitive Cell Lines for {drug_name} (CID={cid}):")
- for rank, (cell, score) in enumerate(top_10_cells, start=1):
- print(f"{rank}. Cell: {cell}, Score: {score:.4f}")
-
- if __name__ == "__main__":
- # Set high precision for matrix multiplication
- torch.set_float32_matmul_precision('high')
- main()
|