Meta Byte Track
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

yolox_x_mot17_on_mot17.py 5.8KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. # encoding: utf-8
  2. import os
  3. import random
  4. import torch
  5. import torch.nn as nn
  6. import torch.distributed as dist
  7. from yolox.exp import MetaExp as MyMetaExp
  8. from yolox.data import get_yolox_datadir
  9. from os import listdir
  10. from os.path import isfile, join
  11. class Exp(MyMetaExp):
  12. def __init__(self):
  13. super(Exp, self).__init__()
  14. self.num_classes = 1
  15. self.depth = 1.33
  16. self.width = 1.25
  17. self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
  18. self.train_dir = '/home/abdollahpour.ce.sharif/ByteTrackData/MOT17/annotations'
  19. onlyfiles = [f for f in listdir(self.train_dir) if isfile(join(self.train_dir, f))]
  20. self.train_anns = [file for file in onlyfiles if file.__contains__('train') and file.__contains__('FRCNN')]
  21. # # TODO: remove
  22. # self.train_anns = self.train_anns[3:]
  23. self.val_dir = '/home/abdollahpour.ce.sharif/ByteTrackData/MOT17/annotations'
  24. onlyfiles = [f for f in listdir(self.val_dir) if isfile(join(self.val_dir, f))]
  25. self.val_anns = [file for file in onlyfiles if file.__contains__('train') and file.__contains__('FRCNN')]
  26. # self.val_anns = self.val_anns[-1:]
  27. print('train_anns', self.train_anns)
  28. print('val_anns', self.val_anns)
  29. self.input_size = (800, 1440)
  30. self.test_size = (800, 1440)
  31. # self.test_size = (736, 1920)
  32. self.random_size = (20, 36)
  33. self.max_epoch = 80
  34. self.print_interval = 250
  35. self.eval_interval = 5
  36. self.test_conf = 0.1
  37. self.nmsthre = 0.7
  38. self.no_aug_epochs = 10
  39. # self.basic_lr_per_img = 0.001 / 64.0
  40. self.basic_lr_per_img = 0.0001 / 64.0
  41. self.warmup_epochs = 1
  42. def get_data_loaders(self, batch_size, is_distributed, no_aug=False):
  43. from yolox.data import (
  44. MOTDataset,
  45. TrainTransform,
  46. YoloBatchSampler,
  47. DataLoader,
  48. InfiniteSampler,
  49. MosaicDetection,
  50. )
  51. train_loaders = []
  52. for train_ann in self.train_anns:
  53. dataset = MOTDataset(
  54. data_dir=os.path.join(get_yolox_datadir(), "MOT17"),
  55. json_file=train_ann,
  56. name='train',
  57. img_size=self.input_size,
  58. preproc=TrainTransform(
  59. rgb_means=(0.485, 0.456, 0.406),
  60. std=(0.229, 0.224, 0.225),
  61. max_labels=500,
  62. ),
  63. )
  64. dataset = MosaicDetection(
  65. dataset,
  66. mosaic=not no_aug,
  67. img_size=self.input_size,
  68. preproc=TrainTransform(
  69. rgb_means=(0.485, 0.456, 0.406),
  70. std=(0.229, 0.224, 0.225),
  71. max_labels=1000,
  72. ),
  73. degrees=self.degrees,
  74. translate=self.translate,
  75. scale=self.scale,
  76. shear=self.shear,
  77. perspective=self.perspective,
  78. enable_mixup=self.enable_mixup,
  79. )
  80. self.dataset = dataset
  81. if is_distributed:
  82. batch_size = batch_size // dist.get_world_size()
  83. sampler = InfiniteSampler(
  84. len(self.dataset), seed=self.seed if self.seed else 0
  85. )
  86. batch_sampler = YoloBatchSampler(
  87. sampler=sampler,
  88. batch_size=batch_size,
  89. drop_last=False,
  90. input_dimension=self.input_size,
  91. mosaic=not no_aug,
  92. )
  93. dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
  94. dataloader_kwargs["batch_sampler"] = batch_sampler
  95. train_loader = DataLoader(self.dataset, **dataloader_kwargs)
  96. train_loaders.append(train_loader)
  97. return train_loaders
  98. def get_eval_loaders(self, batch_size, is_distributed, testdev=False):
  99. from yolox.data import MOTDataset, ValTransform, ValTransformWithPseudo
  100. val_loaders = []
  101. for val_ann in self.val_anns:
  102. valdataset = MOTDataset(
  103. data_dir=os.path.join(get_yolox_datadir(), "MOT17"),
  104. json_file=val_ann,
  105. img_size=self.test_size,
  106. name='train', # change to train when running on training set
  107. preproc=ValTransformWithPseudo(
  108. rgb_means=(0.485, 0.456, 0.406),
  109. std=(0.229, 0.224, 0.225),
  110. ),
  111. )
  112. if is_distributed:
  113. batch_size = batch_size // dist.get_world_size()
  114. sampler = torch.utils.data.distributed.DistributedSampler(
  115. valdataset, shuffle=False
  116. )
  117. else:
  118. sampler = torch.utils.data.SequentialSampler(valdataset)
  119. dataloader_kwargs = {
  120. "num_workers": self.data_num_workers,
  121. "pin_memory": True,
  122. "sampler": sampler,
  123. }
  124. dataloader_kwargs["batch_size"] = batch_size
  125. val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)
  126. val_loaders.append(val_loader)
  127. return val_loaders
  128. def get_evaluator(self, batch_size, is_distributed, testdev=False):
  129. from yolox.evaluators import COCOEvaluator
  130. val_loaders = self.get_eval_loaders(batch_size, is_distributed, testdev=testdev)
  131. evaluators = []
  132. for val_loader in val_loaders:
  133. evaluator = COCOEvaluator(
  134. dataloader=val_loader,
  135. img_size=self.test_size,
  136. confthre=self.test_conf,
  137. nmsthre=self.nmsthre,
  138. num_classes=self.num_classes,
  139. testdev=testdev,
  140. )
  141. evaluators.append(evaluator)
  142. return evaluators