Meta Byte Track
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

mot_evaluator.py 25KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680
  1. from collections import defaultdict
  2. from loguru import logger
  3. from tqdm import tqdm
  4. import torch
  5. from yolox.utils import (
  6. gather,
  7. is_main_process,
  8. postprocess,
  9. synchronize,
  10. time_synchronized,
  11. xyxy2xywh
  12. )
  13. from yolox.tracker.byte_tracker import BYTETracker
  14. from yolox.sort_tracker.sort import Sort
  15. from yolox.deepsort_tracker.deepsort import DeepSort
  16. from yolox.motdt_tracker.motdt_tracker import OnlineTracker
  17. import contextlib
  18. import io
  19. import os
  20. import itertools
  21. import json
  22. import tempfile
  23. import time
  24. def write_results(filename, results):
  25. save_format = '{frame},{id},{x1},{y1},{w},{h},{s},-1,-1,-1\n'
  26. with open(filename, 'w') as f:
  27. for frame_id, tlwhs, track_ids, scores in results:
  28. for tlwh, track_id, score in zip(tlwhs, track_ids, scores):
  29. if track_id < 0:
  30. continue
  31. x1, y1, w, h = tlwh
  32. line = save_format.format(frame=frame_id, id=track_id, x1=round(x1, 1), y1=round(y1, 1), w=round(w, 1), h=round(h, 1), s=round(score, 2))
  33. f.write(line)
  34. logger.info('save results to {}'.format(filename))
  35. def write_results_no_score(filename, results):
  36. save_format = '{frame},{id},{x1},{y1},{w},{h},-1,-1,-1,-1\n'
  37. with open(filename, 'w') as f:
  38. for frame_id, tlwhs, track_ids in results:
  39. for tlwh, track_id in zip(tlwhs, track_ids):
  40. if track_id < 0:
  41. continue
  42. x1, y1, w, h = tlwh
  43. line = save_format.format(frame=frame_id, id=track_id, x1=round(x1, 1), y1=round(y1, 1), w=round(w, 1), h=round(h, 1))
  44. f.write(line)
  45. logger.info('save results to {}'.format(filename))
  46. class MOTEvaluator:
  47. """
  48. COCO AP Evaluation class. All the data in the val2017 dataset are processed
  49. and evaluated by COCO API.
  50. """
  51. def __init__(
  52. self, args, dataloader, img_size, confthre, nmsthre, num_classes):
  53. """
  54. Args:
  55. dataloader (Dataloader): evaluate dataloader.
  56. img_size (int): image size after preprocess. images are resized
  57. to squares whose shape is (img_size, img_size).
  58. confthre (float): confidence threshold ranging from 0 to 1, which
  59. is defined in the config file.
  60. nmsthre (float): IoU threshold of non-max supression ranging from 0 to 1.
  61. """
  62. self.dataloader = dataloader
  63. self.img_size = img_size
  64. self.confthre = confthre
  65. self.nmsthre = nmsthre
  66. self.num_classes = num_classes
  67. self.args = args
  68. def evaluate(
  69. self,
  70. model,
  71. distributed=False,
  72. half=False,
  73. trt_file=None,
  74. decoder=None,
  75. test_size=None,
  76. result_folder=None
  77. ):
  78. """
  79. COCO average precision (AP) Evaluation. Iterate inference on the test dataset
  80. and the results are evaluated by COCO API.
  81. NOTE: This function will change training mode to False, please save states if needed.
  82. Args:
  83. model : model to evaluate.
  84. Returns:
  85. ap50_95 (float) : COCO AP of IoU=50:95
  86. ap50 (float) : COCO AP of IoU=50
  87. summary (sr): summary info of evaluation.
  88. """
  89. # TODO half to amp_test
  90. tensor_type = torch.cuda.HalfTensor if half else torch.cuda.FloatTensor
  91. model = model.eval()
  92. if half:
  93. model = model.half()
  94. ids = []
  95. data_list = []
  96. results = []
  97. video_names = defaultdict()
  98. progress_bar = tqdm if is_main_process() else iter
  99. inference_time = 0
  100. track_time = 0
  101. n_samples = len(self.dataloader) - 1
  102. if trt_file is not None:
  103. from torch2trt import TRTModule
  104. model_trt = TRTModule()
  105. model_trt.load_state_dict(torch.load(trt_file))
  106. x = torch.ones(1, 3, test_size[0], test_size[1]).cuda()
  107. model(x)
  108. model = model_trt
  109. tracker = BYTETracker(self.args)
  110. ori_thresh = self.args.track_thresh
  111. for cur_iter, (imgs, _, info_imgs, ids) in enumerate(
  112. progress_bar(self.dataloader)
  113. ):
  114. with torch.no_grad():
  115. # init tracker
  116. frame_id = info_imgs[2].item()
  117. video_id = info_imgs[3].item()
  118. img_file_name = info_imgs[4]
  119. video_name = img_file_name[0].split('/')[0]
  120. if video_name == 'MOT17-05-FRCNN' or video_name == 'MOT17-06-FRCNN':
  121. self.args.track_buffer = 14
  122. elif video_name == 'MOT17-13-FRCNN' or video_name == 'MOT17-14-FRCNN':
  123. self.args.track_buffer = 25
  124. else:
  125. self.args.track_buffer = 30
  126. if video_name == 'MOT17-01-FRCNN':
  127. self.args.track_thresh = 0.65
  128. elif video_name == 'MOT17-06-FRCNN':
  129. self.args.track_thresh = 0.65
  130. elif video_name == 'MOT17-12-FRCNN':
  131. self.args.track_thresh = 0.7
  132. elif video_name == 'MOT17-14-FRCNN':
  133. self.args.track_thresh = 0.67
  134. else:
  135. self.args.track_thresh = ori_thresh
  136. if video_name == 'MOT20-06' or video_name == 'MOT20-08':
  137. self.args.track_thresh = 0.3
  138. else:
  139. self.args.track_thresh = ori_thresh
  140. if video_name not in video_names:
  141. video_names[video_id] = video_name
  142. if frame_id == 1:
  143. tracker = BYTETracker(self.args)
  144. if len(results) != 0:
  145. result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id - 1]))
  146. write_results(result_filename, results)
  147. results = []
  148. imgs = imgs.type(tensor_type)
  149. # skip the the last iters since batchsize might be not enough for batch inference
  150. is_time_record = cur_iter < len(self.dataloader) - 1
  151. if is_time_record:
  152. start = time.time()
  153. outputs = model(imgs)
  154. if decoder is not None:
  155. outputs = decoder(outputs, dtype=outputs.type())
  156. print('outputs', outputs.shape)
  157. outputs = postprocess(outputs, self.num_classes, self.confthre, self.nmsthre)
  158. if is_time_record:
  159. infer_end = time_synchronized()
  160. inference_time += infer_end - start
  161. output_results = self.convert_to_coco_format(outputs, info_imgs, ids)
  162. data_list.extend(output_results)
  163. # run tracking
  164. if outputs[0] is not None:
  165. online_targets = tracker.update(outputs[0], info_imgs, self.img_size)
  166. online_tlwhs = []
  167. online_ids = []
  168. online_scores = []
  169. for t in online_targets:
  170. tlwh = t.tlwh
  171. tid = t.track_id
  172. vertical = tlwh[2] / tlwh[3] > 1.6
  173. if tlwh[2] * tlwh[3] > self.args.min_box_area and not vertical:
  174. online_tlwhs.append(tlwh)
  175. online_ids.append(tid)
  176. online_scores.append(t.score)
  177. # save results
  178. results.append((frame_id, online_tlwhs, online_ids, online_scores))
  179. if is_time_record:
  180. track_end = time_synchronized()
  181. track_time += track_end - infer_end
  182. if cur_iter == len(self.dataloader) - 1:
  183. result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id]))
  184. write_results(result_filename, results)
  185. statistics = torch.cuda.FloatTensor([inference_time, track_time, n_samples])
  186. if distributed:
  187. data_list = gather(data_list, dst=0)
  188. data_list = list(itertools.chain(*data_list))
  189. torch.distributed.reduce(statistics, dst=0)
  190. eval_results = self.evaluate_prediction(data_list, statistics)
  191. synchronize()
  192. return eval_results
  193. def evaluate_sort(
  194. self,
  195. model,
  196. distributed=False,
  197. half=False,
  198. trt_file=None,
  199. decoder=None,
  200. test_size=None,
  201. result_folder=None
  202. ):
  203. """
  204. COCO average precision (AP) Evaluation. Iterate inference on the test dataset
  205. and the results are evaluated by COCO API.
  206. NOTE: This function will change training mode to False, please save states if needed.
  207. Args:
  208. model : model to evaluate.
  209. Returns:
  210. ap50_95 (float) : COCO AP of IoU=50:95
  211. ap50 (float) : COCO AP of IoU=50
  212. summary (sr): summary info of evaluation.
  213. """
  214. # TODO half to amp_test
  215. tensor_type = torch.cuda.HalfTensor if half else torch.cuda.FloatTensor
  216. model = model.eval()
  217. if half:
  218. model = model.half()
  219. ids = []
  220. data_list = []
  221. results = []
  222. video_names = defaultdict()
  223. progress_bar = tqdm if is_main_process() else iter
  224. inference_time = 0
  225. track_time = 0
  226. n_samples = len(self.dataloader) - 1
  227. if trt_file is not None:
  228. from torch2trt import TRTModule
  229. model_trt = TRTModule()
  230. model_trt.load_state_dict(torch.load(trt_file))
  231. x = torch.ones(1, 3, test_size[0], test_size[1]).cuda()
  232. model(x)
  233. model = model_trt
  234. tracker = Sort(self.args.track_thresh)
  235. for cur_iter, (imgs, _, info_imgs, ids) in enumerate(
  236. progress_bar(self.dataloader)
  237. ):
  238. with torch.no_grad():
  239. # init tracker
  240. frame_id = info_imgs[2].item()
  241. video_id = info_imgs[3].item()
  242. img_file_name = info_imgs[4]
  243. video_name = img_file_name[0].split('/')[0]
  244. if video_name not in video_names:
  245. video_names[video_id] = video_name
  246. if frame_id == 1:
  247. tracker = Sort(self.args.track_thresh)
  248. if len(results) != 0:
  249. result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id - 1]))
  250. write_results_no_score(result_filename, results)
  251. results = []
  252. imgs = imgs.type(tensor_type)
  253. # skip the the last iters since batchsize might be not enough for batch inference
  254. is_time_record = cur_iter < len(self.dataloader) - 1
  255. if is_time_record:
  256. start = time.time()
  257. outputs = model(imgs)
  258. if decoder is not None:
  259. outputs = decoder(outputs, dtype=outputs.type())
  260. outputs = postprocess(outputs, self.num_classes, self.confthre, self.nmsthre)
  261. if is_time_record:
  262. infer_end = time_synchronized()
  263. inference_time += infer_end - start
  264. output_results = self.convert_to_coco_format(outputs, info_imgs, ids)
  265. data_list.extend(output_results)
  266. # run tracking
  267. online_targets = tracker.update(outputs[0], info_imgs, self.img_size)
  268. online_tlwhs = []
  269. online_ids = []
  270. for t in online_targets:
  271. tlwh = [t[0], t[1], t[2] - t[0], t[3] - t[1]]
  272. tid = t[4]
  273. vertical = tlwh[2] / tlwh[3] > 1.6
  274. if tlwh[2] * tlwh[3] > self.args.min_box_area and not vertical:
  275. online_tlwhs.append(tlwh)
  276. online_ids.append(tid)
  277. # save results
  278. results.append((frame_id, online_tlwhs, online_ids))
  279. if is_time_record:
  280. track_end = time_synchronized()
  281. track_time += track_end - infer_end
  282. if cur_iter == len(self.dataloader) - 1:
  283. result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id]))
  284. write_results_no_score(result_filename, results)
  285. statistics = torch.cuda.FloatTensor([inference_time, track_time, n_samples])
  286. if distributed:
  287. data_list = gather(data_list, dst=0)
  288. data_list = list(itertools.chain(*data_list))
  289. torch.distributed.reduce(statistics, dst=0)
  290. eval_results = self.evaluate_prediction(data_list, statistics)
  291. synchronize()
  292. return eval_results
  293. def evaluate_deepsort(
  294. self,
  295. model,
  296. distributed=False,
  297. half=False,
  298. trt_file=None,
  299. decoder=None,
  300. test_size=None,
  301. result_folder=None,
  302. model_folder=None
  303. ):
  304. """
  305. COCO average precision (AP) Evaluation. Iterate inference on the test dataset
  306. and the results are evaluated by COCO API.
  307. NOTE: This function will change training mode to False, please save states if needed.
  308. Args:
  309. model : model to evaluate.
  310. Returns:
  311. ap50_95 (float) : COCO AP of IoU=50:95
  312. ap50 (float) : COCO AP of IoU=50
  313. summary (sr): summary info of evaluation.
  314. """
  315. # TODO half to amp_test
  316. tensor_type = torch.cuda.HalfTensor if half else torch.cuda.FloatTensor
  317. model = model.eval()
  318. if half:
  319. model = model.half()
  320. ids = []
  321. data_list = []
  322. results = []
  323. video_names = defaultdict()
  324. progress_bar = tqdm if is_main_process() else iter
  325. inference_time = 0
  326. track_time = 0
  327. n_samples = len(self.dataloader) - 1
  328. if trt_file is not None:
  329. from torch2trt import TRTModule
  330. model_trt = TRTModule()
  331. model_trt.load_state_dict(torch.load(trt_file))
  332. x = torch.ones(1, 3, test_size[0], test_size[1]).cuda()
  333. model(x)
  334. model = model_trt
  335. tracker = DeepSort(model_folder, min_confidence=self.args.track_thresh)
  336. for cur_iter, (imgs, _, info_imgs, ids) in enumerate(
  337. progress_bar(self.dataloader)
  338. ):
  339. with torch.no_grad():
  340. # init tracker
  341. frame_id = info_imgs[2].item()
  342. video_id = info_imgs[3].item()
  343. img_file_name = info_imgs[4]
  344. video_name = img_file_name[0].split('/')[0]
  345. if video_name not in video_names:
  346. video_names[video_id] = video_name
  347. if frame_id == 1:
  348. tracker = DeepSort(model_folder, min_confidence=self.args.track_thresh)
  349. if len(results) != 0:
  350. result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id - 1]))
  351. write_results_no_score(result_filename, results)
  352. results = []
  353. imgs = imgs.type(tensor_type)
  354. # skip the the last iters since batchsize might be not enough for batch inference
  355. is_time_record = cur_iter < len(self.dataloader) - 1
  356. if is_time_record:
  357. start = time.time()
  358. outputs = model(imgs)
  359. if decoder is not None:
  360. outputs = decoder(outputs, dtype=outputs.type())
  361. outputs = postprocess(outputs, self.num_classes, self.confthre, self.nmsthre)
  362. if is_time_record:
  363. infer_end = time_synchronized()
  364. inference_time += infer_end - start
  365. output_results = self.convert_to_coco_format(outputs, info_imgs, ids)
  366. data_list.extend(output_results)
  367. # run tracking
  368. online_targets = tracker.update(outputs[0], info_imgs, self.img_size, img_file_name[0])
  369. online_tlwhs = []
  370. online_ids = []
  371. for t in online_targets:
  372. tlwh = [t[0], t[1], t[2] - t[0], t[3] - t[1]]
  373. tid = t[4]
  374. vertical = tlwh[2] / tlwh[3] > 1.6
  375. if tlwh[2] * tlwh[3] > self.args.min_box_area and not vertical:
  376. online_tlwhs.append(tlwh)
  377. online_ids.append(tid)
  378. # save results
  379. results.append((frame_id, online_tlwhs, online_ids))
  380. if is_time_record:
  381. track_end = time_synchronized()
  382. track_time += track_end - infer_end
  383. if cur_iter == len(self.dataloader) - 1:
  384. result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id]))
  385. write_results_no_score(result_filename, results)
  386. statistics = torch.cuda.FloatTensor([inference_time, track_time, n_samples])
  387. if distributed:
  388. data_list = gather(data_list, dst=0)
  389. data_list = list(itertools.chain(*data_list))
  390. torch.distributed.reduce(statistics, dst=0)
  391. eval_results = self.evaluate_prediction(data_list, statistics)
  392. synchronize()
  393. return eval_results
  394. def evaluate_motdt(
  395. self,
  396. model,
  397. distributed=False,
  398. half=False,
  399. trt_file=None,
  400. decoder=None,
  401. test_size=None,
  402. result_folder=None,
  403. model_folder=None
  404. ):
  405. """
  406. COCO average precision (AP) Evaluation. Iterate inference on the test dataset
  407. and the results are evaluated by COCO API.
  408. NOTE: This function will change training mode to False, please save states if needed.
  409. Args:
  410. model : model to evaluate.
  411. Returns:
  412. ap50_95 (float) : COCO AP of IoU=50:95
  413. ap50 (float) : COCO AP of IoU=50
  414. summary (sr): summary info of evaluation.
  415. """
  416. # TODO half to amp_test
  417. tensor_type = torch.cuda.HalfTensor if half else torch.cuda.FloatTensor
  418. model = model.eval()
  419. if half:
  420. model = model.half()
  421. ids = []
  422. data_list = []
  423. results = []
  424. video_names = defaultdict()
  425. progress_bar = tqdm if is_main_process() else iter
  426. inference_time = 0
  427. track_time = 0
  428. n_samples = len(self.dataloader) - 1
  429. if trt_file is not None:
  430. from torch2trt import TRTModule
  431. model_trt = TRTModule()
  432. model_trt.load_state_dict(torch.load(trt_file))
  433. x = torch.ones(1, 3, test_size[0], test_size[1]).cuda()
  434. model(x)
  435. model = model_trt
  436. tracker = OnlineTracker(model_folder, min_cls_score=self.args.track_thresh)
  437. for cur_iter, (imgs, _, info_imgs, ids) in enumerate(
  438. progress_bar(self.dataloader)
  439. ):
  440. with torch.no_grad():
  441. # init tracker
  442. frame_id = info_imgs[2].item()
  443. video_id = info_imgs[3].item()
  444. img_file_name = info_imgs[4]
  445. video_name = img_file_name[0].split('/')[0]
  446. if video_name not in video_names:
  447. video_names[video_id] = video_name
  448. if frame_id == 1:
  449. tracker = OnlineTracker(model_folder, min_cls_score=self.args.track_thresh)
  450. if len(results) != 0:
  451. result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id - 1]))
  452. write_results(result_filename, results)
  453. results = []
  454. imgs = imgs.type(tensor_type)
  455. # skip the the last iters since batchsize might be not enough for batch inference
  456. is_time_record = cur_iter < len(self.dataloader) - 1
  457. if is_time_record:
  458. start = time.time()
  459. outputs = model(imgs)
  460. if decoder is not None:
  461. outputs = decoder(outputs, dtype=outputs.type())
  462. outputs = postprocess(outputs, self.num_classes, self.confthre, self.nmsthre)
  463. if is_time_record:
  464. infer_end = time_synchronized()
  465. inference_time += infer_end - start
  466. output_results = self.convert_to_coco_format(outputs, info_imgs, ids)
  467. data_list.extend(output_results)
  468. # run tracking
  469. online_targets = tracker.update(outputs[0], info_imgs, self.img_size, img_file_name[0])
  470. online_tlwhs = []
  471. online_ids = []
  472. online_scores = []
  473. for t in online_targets:
  474. tlwh = t.tlwh
  475. tid = t.track_id
  476. vertical = tlwh[2] / tlwh[3] > 1.6
  477. if tlwh[2] * tlwh[3] > self.args.min_box_area and not vertical:
  478. online_tlwhs.append(tlwh)
  479. online_ids.append(tid)
  480. online_scores.append(t.score)
  481. # save results
  482. results.append((frame_id, online_tlwhs, online_ids, online_scores))
  483. if is_time_record:
  484. track_end = time_synchronized()
  485. track_time += track_end - infer_end
  486. if cur_iter == len(self.dataloader) - 1:
  487. result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id]))
  488. write_results(result_filename, results)
  489. statistics = torch.cuda.FloatTensor([inference_time, track_time, n_samples])
  490. if distributed:
  491. data_list = gather(data_list, dst=0)
  492. data_list = list(itertools.chain(*data_list))
  493. torch.distributed.reduce(statistics, dst=0)
  494. eval_results = self.evaluate_prediction(data_list, statistics)
  495. synchronize()
  496. return eval_results
  497. def convert_to_coco_format(self, outputs, info_imgs, ids):
  498. data_list = []
  499. for (output, img_h, img_w, img_id) in zip(
  500. outputs, info_imgs[0], info_imgs[1], ids
  501. ):
  502. if output is None:
  503. continue
  504. output = output.cpu()
  505. bboxes = output[:, 0:4]
  506. # preprocessing: resize
  507. scale = min(
  508. self.img_size[0] / float(img_h), self.img_size[1] / float(img_w)
  509. )
  510. bboxes /= scale
  511. bboxes = xyxy2xywh(bboxes)
  512. cls = output[:, 6]
  513. scores = output[:, 4] * output[:, 5]
  514. for ind in range(bboxes.shape[0]):
  515. label = self.dataloader.dataset.class_ids[int(cls[ind])]
  516. pred_data = {
  517. "image_id": int(img_id),
  518. "category_id": label,
  519. "bbox": bboxes[ind].numpy().tolist(),
  520. "score": scores[ind].numpy().item(),
  521. "segmentation": [],
  522. } # COCO json format
  523. data_list.append(pred_data)
  524. return data_list
  525. def evaluate_prediction(self, data_dict, statistics):
  526. if not is_main_process():
  527. return 0, 0, None
  528. logger.info("Evaluate in main process...")
  529. annType = ["segm", "bbox", "keypoints"]
  530. inference_time = statistics[0].item()
  531. track_time = statistics[1].item()
  532. n_samples = statistics[2].item()
  533. a_infer_time = 1000 * inference_time / (n_samples * self.dataloader.batch_size)
  534. a_track_time = 1000 * track_time / (n_samples * self.dataloader.batch_size)
  535. time_info = ", ".join(
  536. [
  537. "Average {} time: {:.2f} ms".format(k, v)
  538. for k, v in zip(
  539. ["forward", "track", "inference"],
  540. [a_infer_time, a_track_time, (a_infer_time + a_track_time)],
  541. )
  542. ]
  543. )
  544. info = time_info + "\n"
  545. # Evaluate the Dt (detection) json comparing with the ground truth
  546. if len(data_dict) > 0:
  547. cocoGt = self.dataloader.dataset.coco
  548. # TODO: since pycocotools can't process dict in py36, write data to json file.
  549. _, tmp = tempfile.mkstemp()
  550. json.dump(data_dict, open(tmp, "w"))
  551. cocoDt = cocoGt.loadRes(tmp)
  552. '''
  553. try:
  554. from yolox.layers import COCOeval_opt as COCOeval
  555. except ImportError:
  556. from pycocotools import cocoeval as COCOeval
  557. logger.warning("Use standard COCOeval.")
  558. '''
  559. #from pycocotools.cocoeval import COCOeval
  560. from yolox.layers import COCOeval_opt as COCOeval
  561. cocoEval = COCOeval(cocoGt, cocoDt, annType[1])
  562. cocoEval.evaluate()
  563. cocoEval.accumulate()
  564. redirect_string = io.StringIO()
  565. with contextlib.redirect_stdout(redirect_string):
  566. cocoEval.summarize()
  567. info += redirect_string.getvalue()
  568. return cocoEval.stats[0], cocoEval.stats[1], info
  569. else:
  570. return 0, 0, info