Meta Byte Track
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

meta_trainer.py 12KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366
  1. # Mahdi Abdollahpour, 27/11/2021, 02:35 PM, PyCharm, ByteTrack
  2. from loguru import logger
  3. import torch
  4. from torch.nn.parallel import DistributedDataParallel as DDP
  5. from torch.utils.tensorboard import SummaryWriter
  6. from yolox.data import DataPrefetcher
  7. from yolox.utils import (
  8. MeterBuffer,
  9. ModelEMA,
  10. all_reduce_norm,
  11. get_model_info,
  12. get_rank,
  13. get_world_size,
  14. gpu_mem_usage,
  15. load_ckpt,
  16. occupy_mem,
  17. save_checkpoint,
  18. setup_logger,
  19. synchronize
  20. )
  21. import datetime
  22. import os
  23. import time
  24. import learn2learn as l2l
  25. class MetaTrainer:
  26. def __init__(self, exp, args):
  27. # init function only defines some basic attr, other attrs like model, optimizer are built in
  28. # before_train methods.
  29. self.exp = exp
  30. self.args = args
  31. # training related attr
  32. self.max_epoch = exp.max_epoch
  33. self.amp_training = args.fp16
  34. self.scaler = torch.cuda.amp.GradScaler(enabled=args.fp16)
  35. self.is_distributed = get_world_size() > 1
  36. self.rank = get_rank()
  37. self.local_rank = args.local_rank
  38. self.device = "cuda:{}".format(self.local_rank)
  39. self.use_model_ema = exp.ema
  40. # data/dataloader related attr
  41. self.data_type = torch.float16 if args.fp16 else torch.float32
  42. self.input_size = exp.input_size
  43. self.best_ap = 0
  44. self.adaptation_period = args.adaptation_period
  45. # metric record
  46. self.meter = MeterBuffer(window_size=exp.print_interval)
  47. self.file_name = os.path.join(exp.output_dir, args.experiment_name)
  48. if self.rank == 0:
  49. os.makedirs(self.file_name, exist_ok=True)
  50. setup_logger(
  51. self.file_name,
  52. distributed_rank=self.rank,
  53. filename="train_log.txt",
  54. mode="a",
  55. )
  56. def train(self):
  57. self.before_train()
  58. try:
  59. self.train_in_epoch()
  60. except Exception:
  61. raise
  62. finally:
  63. self.after_train()
  64. def train_in_epoch(self):
  65. for self.epoch in range(self.start_epoch, self.max_epoch):
  66. self.before_epoch()
  67. self.train_in_task()
  68. self.after_epoch()
  69. def train_in_iter(self, task):
  70. for self.iter in range(len(task)):
  71. self.before_iter()
  72. self.train_one_iter()
  73. self.after_iter()
  74. def train_in_task(self):
  75. for task in self.train_loaders:
  76. self.before_task(task)
  77. self.train_in_iter(task)
  78. self.after_task()
  79. def before_task(self, train_loader):
  80. logger.info("init prefetcher, this might take one minute or less...")
  81. self.train_loader = train_loader
  82. self.prefetcher = DataPrefetcher(train_loader)
  83. self.learner = self.model.clone()
  84. def after_task(self):
  85. pass
  86. def adapt(self, inps, targets):
  87. # adapt_inps =inps[:1, ...]
  88. # targets_inps =targets[:1, ...]
  89. # print(adapt_inps.shape)
  90. # print(targets_inps.shape)
  91. outputs = self.learner(inps, targets)
  92. loss = outputs["total_loss"]
  93. self.learner.adapt(loss)
  94. def train_one_iter(self):
  95. iter_start_time = time.time()
  96. inps, targets = self.prefetcher.next()
  97. inps = inps.to(self.data_type)
  98. targets = targets.to(self.data_type)
  99. targets.requires_grad = False
  100. data_end_time = time.time()
  101. with torch.cuda.amp.autocast(enabled=self.amp_training):
  102. if self.iter % self.adaptation_period == 0:
  103. self.adapt(inps, targets)
  104. outputs = self.learner(inps, targets)
  105. loss = outputs["total_loss"]
  106. for p in self.exp.all_parameters:
  107. if p.grad is not None:
  108. p.grad.data.mul_(1.0 / self.args.batch_size)
  109. self.optimizer.zero_grad()
  110. self.scaler.scale(loss).backward()
  111. self.scaler.step(self.optimizer)
  112. self.scaler.update()
  113. if self.use_model_ema:
  114. self.ema_model.update(self.model)
  115. lr = self.lr_scheduler.update_lr(self.progress_in_iter + 1)
  116. for param_group in self.optimizer.param_groups:
  117. param_group["lr"] = lr
  118. iter_end_time = time.time()
  119. self.meter.update(
  120. iter_time=iter_end_time - iter_start_time,
  121. data_time=data_end_time - iter_start_time,
  122. lr=lr,
  123. **outputs,
  124. )
  125. def before_train(self):
  126. logger.info("args: {}".format(self.args))
  127. # logger.info("exp value:\n{}".format(self.exp))
  128. # model related init
  129. torch.cuda.set_device(self.local_rank)
  130. model = self.exp.get_model()
  131. logger.info(
  132. "Model Summary: {}".format(get_model_info(model, self.exp.test_size))
  133. )
  134. model.to(self.device)
  135. # value of epoch will be set in `resume_train`
  136. model = self.resume_train(model)
  137. self.model = l2l.algorithms.MAML(model, lr=self.exp.inner_lr, first_order=self.exp.first_order)
  138. # solver related init
  139. self.optimizer = self.exp.get_optimizer(self.args.batch_size)
  140. # data related init
  141. self.no_aug = self.start_epoch >= self.max_epoch - self.exp.no_aug_epochs
  142. print('Getting data loaders')
  143. self.train_loaders = self.exp.get_data_loaders(
  144. batch_size=self.args.batch_size,
  145. is_distributed=self.is_distributed,
  146. no_aug=self.no_aug,
  147. )
  148. # max_iter means iters per epoch
  149. self.max_iter = 0
  150. for train_loader in self.train_loaders:
  151. self.max_iter += len(train_loader)
  152. self.lr_scheduler = self.exp.get_lr_scheduler(
  153. self.exp.basic_lr_per_img * self.args.batch_size, self.max_iter
  154. )
  155. if self.args.occupy:
  156. occupy_mem(self.local_rank)
  157. if self.is_distributed:
  158. self.model = DDP(self.model, device_ids=[self.local_rank], broadcast_buffers=False)
  159. if self.use_model_ema:
  160. self.ema_model = ModelEMA(self.model, 0.9998)
  161. self.ema_model.updates = self.max_iter * self.start_epoch
  162. # self.model = model
  163. self.model.train()
  164. self.evaluator = self.exp.get_evaluator(
  165. batch_size=self.args.batch_size, is_distributed=self.is_distributed
  166. )
  167. # Tensorboard logger
  168. if self.rank == 0:
  169. self.tblogger = SummaryWriter(self.file_name)
  170. logger.info("Training start...")
  171. # logger.info("\n{}".format(model))
  172. def after_train(self):
  173. logger.info(
  174. "Training of experiment is done and the best AP is {:.2f}".format(
  175. self.best_ap * 100
  176. )
  177. )
  178. def before_epoch(self):
  179. logger.info("---> start train epoch{}".format(self.epoch + 1))
  180. if self.epoch + 1 == self.max_epoch - self.exp.no_aug_epochs or self.no_aug:
  181. logger.info("--->No mosaic aug now!")
  182. for train_loader in self.train_loaders:
  183. train_loader.close_mosaic()
  184. logger.info("--->Add additional L1 loss now!")
  185. if self.is_distributed:
  186. self.model.module.head.use_l1 = True
  187. else:
  188. self.model.head.use_l1 = True
  189. self.exp.eval_interval = 1
  190. if not self.no_aug:
  191. self.save_ckpt(ckpt_name="last_mosaic_epoch")
  192. def after_epoch(self):
  193. if self.use_model_ema:
  194. self.ema_model.update_attr(self.model)
  195. self.save_ckpt(ckpt_name="latest")
  196. if (self.epoch + 1) % self.exp.eval_interval == 0:
  197. all_reduce_norm(self.model)
  198. self.evaluate_and_save_model()
  199. def before_iter(self):
  200. pass
  201. def after_iter(self):
  202. """
  203. `after_iter` contains two parts of logic:
  204. * log information
  205. * reset setting of resize
  206. """
  207. # log needed information
  208. if (self.iter + 1) % self.exp.print_interval == 0:
  209. # TODO check ETA logic
  210. left_iters = self.max_iter * self.max_epoch - (self.progress_in_iter + 1)
  211. eta_seconds = self.meter["iter_time"].global_avg * left_iters
  212. eta_str = "ETA: {}".format(datetime.timedelta(seconds=int(eta_seconds)))
  213. progress_str = "epoch: {}/{}, iter: {}/{}".format(
  214. self.epoch + 1, self.max_epoch, self.iter + 1, self.max_iter
  215. )
  216. loss_meter = self.meter.get_filtered_meter("loss")
  217. loss_str = ", ".join(
  218. ["{}: {:.3f}".format(k, v.latest) for k, v in loss_meter.items()]
  219. )
  220. time_meter = self.meter.get_filtered_meter("time")
  221. time_str = ", ".join(
  222. ["{}: {:.3f}s".format(k, v.avg) for k, v in time_meter.items()]
  223. )
  224. logger.info(
  225. "{}, mem: {:.0f}Mb, {}, {}, lr: {:.3e}".format(
  226. progress_str,
  227. gpu_mem_usage(),
  228. time_str,
  229. loss_str,
  230. self.meter["lr"].latest,
  231. )
  232. + (", size: {:d}, {}".format(self.input_size[0], eta_str))
  233. )
  234. self.meter.clear_meters()
  235. # random resizing
  236. if self.exp.random_size is not None and (self.progress_in_iter + 1) % 10 == 0:
  237. self.input_size = self.exp.random_resize(
  238. self.train_loader, self.epoch, self.rank, self.is_distributed
  239. )
  240. @property
  241. def progress_in_iter(self):
  242. return self.epoch * self.max_iter + self.iter
  243. def resume_train(self, model):
  244. if self.args.resume:
  245. logger.info("resume training")
  246. if self.args.ckpt is None:
  247. ckpt_file = os.path.join(self.file_name, "latest" + "_ckpt.pth.tar")
  248. else:
  249. ckpt_file = self.args.ckpt
  250. ckpt = torch.load(ckpt_file, map_location=self.device)
  251. # resume the model/optimizer state dict
  252. model.load_state_dict(ckpt["model"])
  253. self.optimizer.load_state_dict(ckpt["optimizer"])
  254. start_epoch = (
  255. self.args.start_epoch - 1
  256. if self.args.start_epoch is not None
  257. else ckpt["start_epoch"]
  258. )
  259. self.start_epoch = start_epoch
  260. logger.info(
  261. "loaded checkpoint '{}' (epoch {})".format(
  262. self.args.resume, self.start_epoch
  263. )
  264. ) # noqa
  265. else:
  266. if self.args.ckpt is not None:
  267. logger.info("loading checkpoint for fine tuning")
  268. ckpt_file = self.args.ckpt
  269. ckpt = torch.load(ckpt_file, map_location=self.device)["model"]
  270. model = load_ckpt(model, ckpt)
  271. self.start_epoch = 0
  272. return model
  273. def evaluate_and_save_model(self):
  274. evalmodel = self.ema_model.ema if self.use_model_ema else self.model
  275. ap50_95, ap50, summary = self.exp.eval(
  276. evalmodel, self.evaluator, self.is_distributed
  277. )
  278. self.model.train()
  279. if self.rank == 0:
  280. self.tblogger.add_scalar("val/COCOAP50", ap50, self.epoch + 1)
  281. self.tblogger.add_scalar("val/COCOAP50_95", ap50_95, self.epoch + 1)
  282. logger.info("\n" + summary)
  283. synchronize()
  284. # self.best_ap = max(self.best_ap, ap50_95)
  285. self.save_ckpt("last_epoch", ap50 > self.best_ap)
  286. self.best_ap = max(self.best_ap, ap50)
  287. def save_ckpt(self, ckpt_name, update_best_ckpt=False):
  288. if self.rank == 0:
  289. save_model = self.ema_model.ema if self.use_model_ema else self.model
  290. logger.info("Save weights to {}".format(self.file_name))
  291. ckpt_state = {
  292. "start_epoch": self.epoch + 1,
  293. "model": save_model.state_dict(),
  294. "optimizer": self.optimizer.state_dict(),
  295. }
  296. save_checkpoint(
  297. ckpt_state,
  298. update_best_ckpt,
  299. self.file_name,
  300. ckpt_name,
  301. )