|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657 |
- from tqdm import tqdm
- import numpy as np
- import torch
-
- import os
- import sys
- sys.path.insert(1, os.path.join(sys.path[0], '..'))
-
- from _datasets import AutoLoad
- from _trainer import auto_train
- from _mydelta import auto_mutate
- from _models import auto_model
- from _config import Config, load_config
- from _utils import print_system_info, silent_logs
-
-
- DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
-
- def run_experminent(config, task_name):
- np.random.seed(config.random_seed)
- torch.manual_seed(config.random_seed)
- # ______________________LOAD MODEL_____________________________
- model, tokenizer = auto_model(config.model_name, AutoLoad.get_task_output(task_name))
-
- # ______________________MUTATE MODEL_____________________________
- n_prefix_token = 0
- if config.peft_params is not None:
- n_prefix_token = config.peft_params.n_tokens
- delta_module = auto_mutate(
- model=model,
- tokenizer=tokenizer,
- peft_params=config.peft_params.to_dict(),
- remove_dropout=config.remove_dropout
- )
-
- # ______________________LOAD DATA_____________________________
- autoload = AutoLoad(tokenizer, n_prefix_token=n_prefix_token)
-
- # ______________________TRAIN_____________________________
- dataset = autoload.get_and_map(task_name)
- auto_train(model, tokenizer, dataset, config, device=DEVICE)
-
-
- if __name__ == '__main__':
- print_system_info()
- silent_logs()
- configs = load_config(sys.argv[1])
-
- run_configs = tqdm(configs.run_configs, position=0, desc="Experiment")
-
- for run_config in run_configs:
- tasks = tqdm(run_config.tasks, position=1, desc="Task:", leave=False)
- for task_name in tasks:
- tasks.set_description(f'Task: {task_name}')
- torch.cuda.empty_cache()
-
- run_experminent(run_config, task_name)
|