Meta Byte Track
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

opts.py 22KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439
  1. from __future__ import absolute_import
  2. from __future__ import division
  3. from __future__ import print_function
  4. import argparse
  5. import os
  6. import sys
  7. import json
  8. class opts(object):
  9. def __init__(self):
  10. self.parser = argparse.ArgumentParser()
  11. # basic experiment setting
  12. self.parser.add_argument('task', default='',
  13. help='ctdet | ddd | multi_pose '
  14. '| tracking or combined with ,')
  15. self.parser.add_argument('--dataset', default='coco',
  16. help='see lib/dataset/dataset_facotry for ' +
  17. 'available datasets')
  18. self.parser.add_argument('--test_dataset', default='',
  19. help='coco | kitti | coco_hp | pascal')
  20. self.parser.add_argument('--exp_id', default='default')
  21. self.parser.add_argument('--test', action='store_true')
  22. self.parser.add_argument('--debug', type=int, default=0,
  23. help='level of visualization.'
  24. '1: only show the final detection results'
  25. '2: show the network output features'
  26. '3: use matplot to display' # useful when lunching training with ipython notebook
  27. '4: save all visualizations to disk')
  28. self.parser.add_argument('--no_pause', action='store_true')
  29. self.parser.add_argument('--demo', default='',
  30. help='path to image/ image folders/ video. '
  31. 'or "webcam"')
  32. self.parser.add_argument('--load_model', default='',
  33. help='path to pretrained model')
  34. self.parser.add_argument('--resume', action='store_true',
  35. help='resume an experiment. '
  36. 'Reloaded the optimizer parameter and '
  37. 'set load_model to model_last.pth '
  38. 'in the exp dir if load_model is empty.')
  39. # system
  40. self.parser.add_argument('--gpus', default='0',
  41. help='-1 for CPU, use comma for multiple gpus')
  42. self.parser.add_argument('--num_workers', type=int, default=4,
  43. help='dataloader threads. 0 for single-thread.')
  44. self.parser.add_argument('--not_cuda_benchmark', action='store_true',
  45. help='disable when the input size is not fixed.')
  46. self.parser.add_argument('--seed', type=int, default=317,
  47. help='random seed') # from CornerNet
  48. self.parser.add_argument('--not_set_cuda_env', action='store_true',
  49. help='used when training in slurm clusters.')
  50. # log
  51. self.parser.add_argument('--print_iter', type=int, default=0,
  52. help='disable progress bar and print to screen.')
  53. self.parser.add_argument('--save_all', action='store_true',
  54. help='save model to disk every 5 epochs.')
  55. self.parser.add_argument('--vis_thresh', type=float, default=0.3,
  56. help='visualization threshold.')
  57. self.parser.add_argument('--debugger_theme', default='white',
  58. choices=['white', 'black'])
  59. self.parser.add_argument('--eval_val', action='store_true')
  60. self.parser.add_argument('--save_imgs', default='', help='')
  61. self.parser.add_argument('--save_img_suffix', default='', help='')
  62. self.parser.add_argument('--skip_first', type=int, default=-1, help='')
  63. self.parser.add_argument('--save_video', action='store_true')
  64. self.parser.add_argument('--save_framerate', type=int, default=30)
  65. self.parser.add_argument('--resize_video', action='store_true')
  66. self.parser.add_argument('--video_h', type=int, default=512, help='')
  67. self.parser.add_argument('--video_w', type=int, default=512, help='')
  68. self.parser.add_argument('--transpose_video', action='store_true')
  69. self.parser.add_argument('--show_track_color', action='store_true')
  70. self.parser.add_argument('--not_show_bbox', action='store_true')
  71. self.parser.add_argument('--not_show_number', action='store_true')
  72. self.parser.add_argument('--qualitative', action='store_true')
  73. self.parser.add_argument('--tango_color', action='store_true')
  74. # model
  75. self.parser.add_argument('--arch', default='dla_34',
  76. help='model architecture. Currently tested'
  77. 'res_18 | res_101 | resdcn_18 | resdcn_101 |'
  78. 'dlav0_34 | dla_34 | hourglass')
  79. self.parser.add_argument('--dla_node', default='dcn')
  80. self.parser.add_argument('--head_conv', type=int, default=-1,
  81. help='conv layer channels for output head'
  82. '0 for no conv layer'
  83. '-1 for default setting: '
  84. '64 for resnets and 256 for dla.')
  85. self.parser.add_argument('--num_head_conv', type=int, default=1)
  86. self.parser.add_argument('--head_kernel', type=int, default=3, help='')
  87. self.parser.add_argument('--down_ratio', type=int, default=4,
  88. help='output stride. Currently only supports 4.')
  89. self.parser.add_argument('--not_idaup', action='store_true')
  90. self.parser.add_argument('--num_classes', type=int, default=-1)
  91. self.parser.add_argument('--num_layers', type=int, default=101)
  92. self.parser.add_argument('--backbone', default='dla34')
  93. self.parser.add_argument('--neck', default='dlaup')
  94. self.parser.add_argument('--msra_outchannel', type=int, default=256)
  95. self.parser.add_argument('--efficient_level', type=int, default=0)
  96. self.parser.add_argument('--prior_bias', type=float, default=-4.6) # -2.19
  97. self.parser.add_argument('--embedding', action='store_true')
  98. self.parser.add_argument('--box_nms', type=float, default=-1)
  99. self.parser.add_argument('--inference', action='store_true')
  100. self.parser.add_argument('--clip_len', type=int, default=1, help='number of images used in trades'
  101. 'including the current image')
  102. self.parser.add_argument('--no_repeat', action='store_true', default=True)
  103. self.parser.add_argument('--seg', action='store_true', default=False)
  104. self.parser.add_argument('--seg_feat_channel', default=8, type=int, help='.')
  105. self.parser.add_argument('--deform_kernel_size', type=int, default=3)
  106. self.parser.add_argument('--trades', action='store_true', help='Track to Detect and Segment:'
  107. 'An Online Multi Object Tracker')
  108. # input
  109. self.parser.add_argument('--input_res', type=int, default=-1,
  110. help='input height and width. -1 for default from '
  111. 'dataset. Will be overriden by input_h | input_w')
  112. self.parser.add_argument('--input_h', type=int, default=-1,
  113. help='input height. -1 for default from dataset.')
  114. self.parser.add_argument('--input_w', type=int, default=-1,
  115. help='input width. -1 for default from dataset.')
  116. self.parser.add_argument('--dataset_version', default='')
  117. # train
  118. self.parser.add_argument('--optim', default='adam')
  119. self.parser.add_argument('--lr', type=float, default=1.25e-4,
  120. help='learning rate for batch size 32.')
  121. self.parser.add_argument('--lr_step', type=str, default='60',
  122. help='drop learning rate by 10.')
  123. self.parser.add_argument('--save_point', type=str, default='90',
  124. help='when to save the model to disk.')
  125. self.parser.add_argument('--num_epochs', type=int, default=70,
  126. help='total training epochs.')
  127. self.parser.add_argument('--batch_size', type=int, default=32,
  128. help='batch size')
  129. self.parser.add_argument('--master_batch_size', type=int, default=-1,
  130. help='batch size on the master gpu.')
  131. self.parser.add_argument('--num_iters', type=int, default=-1,
  132. help='default: #samples / batch_size.')
  133. self.parser.add_argument('--val_intervals', type=int, default=10000,
  134. help='number of epochs to run validation.')
  135. self.parser.add_argument('--trainval', action='store_true',
  136. help='include validation in training and '
  137. 'test on test set')
  138. self.parser.add_argument('--ltrb', action='store_true',
  139. help='')
  140. self.parser.add_argument('--ltrb_weight', type=float, default=0.1,
  141. help='')
  142. self.parser.add_argument('--reset_hm', action='store_true')
  143. self.parser.add_argument('--reuse_hm', action='store_true')
  144. self.parser.add_argument('--use_kpt_center', action='store_true')
  145. self.parser.add_argument('--add_05', action='store_true')
  146. self.parser.add_argument('--dense_reg', type=int, default=1, help='')
  147. # test
  148. self.parser.add_argument('--flip_test', action='store_true',
  149. help='flip data augmentation.')
  150. self.parser.add_argument('--test_scales', type=str, default='1',
  151. help='multi scale test augmentation.')
  152. self.parser.add_argument('--nms', action='store_true',
  153. help='run nms in testing.')
  154. self.parser.add_argument('--K', type=int, default=100,
  155. help='max number of output objects.')
  156. self.parser.add_argument('--not_prefetch_test', action='store_true',
  157. help='not use parallal data pre-processing.')
  158. self.parser.add_argument('--fix_short', type=int, default=-1)
  159. self.parser.add_argument('--keep_res', action='store_true',
  160. help='keep the original resolution'
  161. ' during validation.')
  162. self.parser.add_argument('--map_argoverse_id', action='store_true',
  163. help='if trained on nuscenes and eval on kitti')
  164. self.parser.add_argument('--out_thresh', type=float, default=-1,
  165. help='')
  166. self.parser.add_argument('--depth_scale', type=float, default=1,
  167. help='')
  168. self.parser.add_argument('--save_results', action='store_true')
  169. self.parser.add_argument('--load_results', default='')
  170. self.parser.add_argument('--use_loaded_results', action='store_true')
  171. self.parser.add_argument('--ignore_loaded_cats', default='')
  172. self.parser.add_argument('--model_output_list', action='store_true',
  173. help='Used when convert to onnx')
  174. self.parser.add_argument('--non_block_test', action='store_true')
  175. self.parser.add_argument('--vis_gt_bev', default='', help='')
  176. self.parser.add_argument('--kitti_split', default='3dop',
  177. help='different validation split for kitti: '
  178. '3dop | subcnn')
  179. self.parser.add_argument('--test_focal_length', type=int, default=-1)
  180. # dataset
  181. self.parser.add_argument('--not_rand_crop', action='store_true',
  182. help='not use the random crop data augmentation'
  183. 'from CornerNet.')
  184. self.parser.add_argument('--not_max_crop', action='store_true',
  185. help='used when the training dataset has'
  186. 'inbalanced aspect ratios.')
  187. self.parser.add_argument('--shift', type=float, default=0,
  188. help='when not using random crop, 0.1'
  189. 'apply shift augmentation.')
  190. self.parser.add_argument('--scale', type=float, default=0,
  191. help='when not using random crop, 0.4'
  192. 'apply scale augmentation.')
  193. self.parser.add_argument('--aug_rot', type=float, default=0,
  194. help='probability of applying '
  195. 'rotation augmentation.')
  196. self.parser.add_argument('--rotate', type=float, default=0,
  197. help='when not using random crop'
  198. 'apply rotation augmentation.')
  199. self.parser.add_argument('--flip', type=float, default=0.5,
  200. help='probability of applying flip augmentation.')
  201. self.parser.add_argument('--no_color_aug', action='store_true',
  202. help='not use the color augmenation '
  203. 'from CornerNet')
  204. # Tracking
  205. self.parser.add_argument('--tracking', action='store_true')
  206. self.parser.add_argument('--pre_hm', action='store_true')
  207. self.parser.add_argument('--same_aug_pre', action='store_true')
  208. self.parser.add_argument('--zero_pre_hm', action='store_true')
  209. self.parser.add_argument('--hm_disturb', type=float, default=0)
  210. self.parser.add_argument('--lost_disturb', type=float, default=0)
  211. self.parser.add_argument('--fp_disturb', type=float, default=0)
  212. self.parser.add_argument('--pre_thresh', type=float, default=-1)
  213. self.parser.add_argument('--track_thresh', type=float, default=0.3)
  214. self.parser.add_argument('--match_thresh', type=float, default=0.8)
  215. self.parser.add_argument('--track_buffer', type=int, default=30)
  216. self.parser.add_argument('--new_thresh', type=float, default=0.0)
  217. self.parser.add_argument('--max_frame_dist', type=int, default=3)
  218. self.parser.add_argument('--ltrb_amodal', action='store_true')
  219. self.parser.add_argument('--ltrb_amodal_weight', type=float, default=0.1)
  220. self.parser.add_argument('--window_size', type=int, default=20)
  221. self.parser.add_argument('--public_det', action='store_true')
  222. self.parser.add_argument('--no_pre_img', action='store_true')
  223. self.parser.add_argument('--zero_tracking', action='store_true')
  224. self.parser.add_argument('--hungarian', action='store_true')
  225. self.parser.add_argument('--max_age', type=int, default=-1)
  226. # loss
  227. self.parser.add_argument('--tracking_weight', type=float, default=1)
  228. self.parser.add_argument('--reg_loss', default='l1',
  229. help='regression loss: sl1 | l1 | l2')
  230. self.parser.add_argument('--hm_weight', type=float, default=1,
  231. help='loss weight for keypoint heatmaps.')
  232. self.parser.add_argument('--off_weight', type=float, default=1,
  233. help='loss weight for keypoint local offsets.')
  234. self.parser.add_argument('--wh_weight', type=float, default=0.1,
  235. help='loss weight for bounding box size.')
  236. self.parser.add_argument('--hp_weight', type=float, default=1,
  237. help='loss weight for human pose offset.')
  238. self.parser.add_argument('--hm_hp_weight', type=float, default=1,
  239. help='loss weight for human keypoint heatmap.')
  240. self.parser.add_argument('--amodel_offset_weight', type=float, default=1,
  241. help='Please forgive the typo.')
  242. self.parser.add_argument('--dep_weight', type=float, default=1,
  243. help='loss weight for depth.')
  244. self.parser.add_argument('--dim_weight', type=float, default=1,
  245. help='loss weight for 3d bounding box size.')
  246. self.parser.add_argument('--rot_weight', type=float, default=1,
  247. help='loss weight for orientation.')
  248. self.parser.add_argument('--nuscenes_att', action='store_true')
  249. self.parser.add_argument('--nuscenes_att_weight', type=float, default=1)
  250. self.parser.add_argument('--velocity', action='store_true')
  251. self.parser.add_argument('--velocity_weight', type=float, default=1)
  252. self.parser.add_argument('--nID', type=int, default=-1)
  253. # custom dataset
  254. self.parser.add_argument('--custom_dataset_img_path', default='')
  255. self.parser.add_argument('--custom_dataset_ann_path', default='')
  256. def parse(self, args=''):
  257. if args == '':
  258. opt = self.parser.parse_args()
  259. else:
  260. opt = self.parser.parse_args(args)
  261. if opt.test_dataset == '':
  262. opt.test_dataset = opt.dataset
  263. opt.gpus_str = opt.gpus
  264. opt.gpus = [int(gpu) for gpu in opt.gpus.split(',')]
  265. opt.gpus = [i for i in range(len(opt.gpus))] if opt.gpus[0] >=0 else [-1]
  266. opt.lr_step = [int(i) for i in opt.lr_step.split(',')]
  267. opt.save_point = [int(i) for i in opt.save_point.split(',')]
  268. opt.test_scales = [float(i) for i in opt.test_scales.split(',')]
  269. opt.save_imgs = [i for i in opt.save_imgs.split(',')] \
  270. if opt.save_imgs != '' else []
  271. opt.ignore_loaded_cats = \
  272. [int(i) for i in opt.ignore_loaded_cats.split(',')] \
  273. if opt.ignore_loaded_cats != '' else []
  274. opt.num_workers = max(opt.num_workers, 2 * len(opt.gpus))
  275. opt.pre_img = False
  276. if 'tracking' in opt.task:
  277. print('Running tracking')
  278. opt.tracking = True
  279. # opt.out_thresh = max(opt.track_thresh, opt.out_thresh)
  280. # opt.pre_thresh = max(opt.track_thresh, opt.pre_thresh)
  281. # opt.new_thresh = max(opt.track_thresh, opt.new_thresh)
  282. opt.pre_img = not opt.no_pre_img
  283. print('Using tracking threshold for out threshold!', opt.track_thresh)
  284. # if 'ddd' in opt.task:
  285. opt.show_track_color = True
  286. if opt.dataset in ['mot', 'mots', 'youtube_vis']:
  287. opt.overlap_thresh = 0.05
  288. elif opt.dataset == 'nuscenes':
  289. opt.window_size = 7
  290. opt.overlap_thresh = -1
  291. else:
  292. opt.overlap_thresh = 0.05
  293. opt.fix_res = not opt.keep_res
  294. print('Fix size testing.' if opt.fix_res else 'Keep resolution testing.')
  295. if opt.head_conv == -1: # init default head_conv
  296. opt.head_conv = 256 if 'dla' in opt.arch else 64
  297. opt.pad = 127 if 'hourglass' in opt.arch else 31
  298. opt.num_stacks = 2 if opt.arch == 'hourglass' else 1
  299. if opt.master_batch_size == -1:
  300. opt.master_batch_size = opt.batch_size // len(opt.gpus)
  301. rest_batch_size = (opt.batch_size - opt.master_batch_size)
  302. opt.chunk_sizes = [opt.master_batch_size]
  303. for i in range(len(opt.gpus) - 1):
  304. slave_chunk_size = rest_batch_size // (len(opt.gpus) - 1)
  305. if i < rest_batch_size % (len(opt.gpus) - 1):
  306. slave_chunk_size += 1
  307. opt.chunk_sizes.append(slave_chunk_size)
  308. print('training chunk_sizes:', opt.chunk_sizes)
  309. if opt.debug > 0:
  310. opt.num_workers = 0
  311. opt.batch_size = 1
  312. opt.gpus = [opt.gpus[0]]
  313. opt.master_batch_size = -1
  314. # log dirs
  315. opt.root_dir = os.path.join(os.path.dirname(__file__), '..', '..')
  316. opt.data_dir = os.path.join(opt.root_dir, 'data')
  317. opt.exp_dir = os.path.join(opt.root_dir, 'exp', opt.task)
  318. opt.save_dir = os.path.join(opt.exp_dir, opt.exp_id)
  319. opt.debug_dir = os.path.join(opt.save_dir, 'debug')
  320. if opt.resume and opt.load_model == '':
  321. opt.load_model = os.path.join(opt.save_dir, 'model_last.pth')
  322. return opt
  323. def update_dataset_info_and_set_heads(self, opt, dataset):
  324. opt.num_classes = dataset.num_categories \
  325. if opt.num_classes < 0 else opt.num_classes
  326. # input_h(w): opt.input_h overrides opt.input_res overrides dataset default
  327. input_h, input_w = dataset.default_resolution
  328. input_h = opt.input_res if opt.input_res > 0 else input_h
  329. input_w = opt.input_res if opt.input_res > 0 else input_w
  330. opt.input_h = opt.input_h if opt.input_h > 0 else input_h
  331. opt.input_w = opt.input_w if opt.input_w > 0 else input_w
  332. opt.output_h = opt.input_h // opt.down_ratio
  333. opt.output_w = opt.input_w // opt.down_ratio
  334. opt.input_res = max(opt.input_h, opt.input_w)
  335. opt.output_res = max(opt.output_h, opt.output_w)
  336. opt.heads = {'hm': opt.num_classes, 'reg': 2, 'wh': 2}
  337. if not opt.trades:
  338. if 'tracking' in opt.task:
  339. opt.heads.update({'tracking': 2})
  340. if 'ddd' in opt.task:
  341. opt.heads.update({'dep': 1, 'rot': 8, 'dim': 3, 'amodel_offset': 2})
  342. if 'multi_pose' in opt.task:
  343. opt.heads.update({
  344. 'hps': dataset.num_joints * 2, 'hm_hp': dataset.num_joints,
  345. 'hp_offset': 2})
  346. if opt.ltrb:
  347. opt.heads.update({'ltrb': 4})
  348. if opt.ltrb_amodal:
  349. opt.heads.update({'ltrb_amodal': 4})
  350. if opt.nuscenes_att:
  351. opt.heads.update({'nuscenes_att': 8})
  352. if opt.velocity:
  353. opt.heads.update({'velocity': 3})
  354. if opt.embedding:
  355. opt.heads.update({'embedding': 128})
  356. if opt.seg:
  357. opt.heads.update({'conv_weight': 2*opt.seg_feat_channel**2 + 5*opt.seg_feat_channel + 1})
  358. opt.heads.update({'seg_feat': opt.seg_feat_channel})
  359. weight_dict = {'hm': opt.hm_weight, 'wh': opt.wh_weight,
  360. 'reg': opt.off_weight, 'hps': opt.hp_weight,
  361. 'hm_hp': opt.hm_hp_weight, 'hp_offset': opt.off_weight,
  362. 'dep': opt.dep_weight, 'rot': opt.rot_weight,
  363. 'dim': opt.dim_weight,
  364. 'amodel_offset': opt.amodel_offset_weight,
  365. 'ltrb': opt.ltrb_weight,
  366. 'tracking': opt.tracking_weight,
  367. 'ltrb_amodal': opt.ltrb_amodal_weight,
  368. 'nuscenes_att': opt.nuscenes_att_weight,
  369. 'velocity': opt.velocity_weight,
  370. 'embedding': 1.0,
  371. 'conv_weight': 1.0,
  372. 'seg_feat':1.0}
  373. opt.weights = {head: weight_dict[head] for head in opt.heads}
  374. if opt.trades:
  375. opt.weights['cost_volume'] = 1.0
  376. if opt.seg:
  377. opt.weights['mask_loss'] = 1.0
  378. for head in opt.weights:
  379. if opt.weights[head] == 0:
  380. del opt.heads[head]
  381. opt.head_conv = {head: [opt.head_conv \
  382. for i in range(opt.num_head_conv if head != 'reg' else 1)] for head in opt.heads}
  383. print('input h w:', opt.input_h, opt.input_w)
  384. print('heads', opt.heads)
  385. print('weights', opt.weights)
  386. print('head conv', opt.head_conv)
  387. return opt
  388. def init(self, args=''):
  389. # only used in demo
  390. default_dataset_info = {
  391. 'ctdet': 'coco', 'multi_pose': 'coco_hp', 'ddd': 'nuscenes',
  392. 'tracking,ctdet': 'coco', 'tracking,multi_pose': 'coco_hp',
  393. 'tracking,ddd': 'nuscenes'
  394. }
  395. opt = self.parse()
  396. from dataset.dataset_factory import dataset_factory
  397. train_dataset = default_dataset_info[opt.task] \
  398. if opt.task in default_dataset_info else 'coco'
  399. if opt.dataset != 'coco':
  400. dataset = dataset_factory[opt.dataset]
  401. else:
  402. dataset = dataset_factory[train_dataset]
  403. opt = self.update_dataset_info_and_set_heads(opt, dataset)
  404. return opt