1234567891011121314151617181920212223242526272829303132333435363738394041424344454647 |
- import numpy as np
- import torch
-
- import os
- import sys
- sys.path.insert(1, os.path.join(sys.path[0], '..'))
-
- from _utils import silent_logs, sp_decode
- from _datasets import AutoLoad
- from _trainer import auto_train
- from _mydelta import auto_mutate
- from _models import auto_model
- from _config import Config
-
-
- DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
-
- def run_experminent(config, task_name):
- silent_logs()
- np.random.seed(config.random_seed)
- torch.manual_seed(config.random_seed)
- # ______________________LOAD MODEL_____________________________
- model, tokenizer = auto_model(config.model_name, AutoLoad.get_task_output(task_name))
-
- # ______________________MUTATE MODEL_____________________________
- n_prefix_token = 0
- if config.peft_params is not None:
- n_prefix_token = config.peft_params.n_tokens
- delta_module = auto_mutate(
- model=model,
- tokenizer=tokenizer,
- peft_params=config.peft_params.to_dict(),
- remove_dropout=config.remove_dropout
- )
-
- # ______________________LOAD DATA_____________________________
- autoload = AutoLoad(tokenizer, n_prefix_token=n_prefix_token)
-
- # ______________________TRAIN_____________________________
- dataset = autoload.get_and_map(task_name)
- auto_train(model, tokenizer, dataset, config, device=DEVICE)
-
- if __name__ == '__main__':
- config_json = sp_decode(sys.argv[1])
- config = Config(config_json, '')
- task_name = sp_decode(sys.argv[2])
- run_experminent(config, task_name)
|