Meta Byte Track
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

fast_coco_eval_api.py 5.6KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. #!/usr/bin/env python3
  2. # -*- coding:utf-8 -*-
  3. # This file comes from
  4. # https://github.com/facebookresearch/detectron2/blob/master/detectron2/evaluation/fast_eval_api.py
  5. # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
  6. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  7. import numpy as np
  8. from pycocotools.cocoeval import COCOeval
  9. ##TODO: check this
  10. # import torch first to make yolox._C work without ImportError of libc10.so
  11. # in YOLOX, env is already set in __init__.py.
  12. from yolox import _C
  13. import copy
  14. import time
  15. class COCOeval_opt(COCOeval):
  16. """
  17. This is a slightly modified version of the original COCO API, where the functions evaluateImg()
  18. and accumulate() are implemented in C++ to speedup evaluation
  19. """
  20. def evaluate(self):
  21. """
  22. Run per image evaluation on given images and store results in self.evalImgs_cpp, a
  23. datastructure that isn't readable from Python but is used by a c++ implementation of
  24. accumulate(). Unlike the original COCO PythonAPI, we don't populate the datastructure
  25. self.evalImgs because this datastructure is a computational bottleneck.
  26. :return: None
  27. """
  28. tic = time.time()
  29. print("Running per image evaluation...")
  30. p = self.params
  31. # add backward compatibility if useSegm is specified in params
  32. if p.useSegm is not None:
  33. p.iouType = "segm" if p.useSegm == 1 else "bbox"
  34. print(
  35. "useSegm (deprecated) is not None. Running {} evaluation".format(
  36. p.iouType
  37. )
  38. )
  39. print("Evaluate annotation type *{}*".format(p.iouType))
  40. p.imgIds = list(np.unique(p.imgIds))
  41. if p.useCats:
  42. p.catIds = list(np.unique(p.catIds))
  43. p.maxDets = sorted(p.maxDets)
  44. self.params = p
  45. self._prepare()
  46. # loop through images, area range, max detection number
  47. catIds = p.catIds if p.useCats else [-1]
  48. if p.iouType == "segm" or p.iouType == "bbox":
  49. computeIoU = self.computeIoU
  50. elif p.iouType == "keypoints":
  51. computeIoU = self.computeOks
  52. self.ious = {
  53. (imgId, catId): computeIoU(imgId, catId)
  54. for imgId in p.imgIds
  55. for catId in catIds
  56. }
  57. maxDet = p.maxDets[-1]
  58. # <<<< Beginning of code differences with original COCO API
  59. def convert_instances_to_cpp(instances, is_det=False):
  60. # Convert annotations for a list of instances in an image to a format that's fast
  61. # to access in C++
  62. instances_cpp = []
  63. for instance in instances:
  64. instance_cpp = _C.InstanceAnnotation(
  65. int(instance["id"]),
  66. instance["score"] if is_det else instance.get("score", 0.0),
  67. instance["area"],
  68. bool(instance.get("iscrowd", 0)),
  69. bool(instance.get("ignore", 0)),
  70. )
  71. instances_cpp.append(instance_cpp)
  72. return instances_cpp
  73. # Convert GT annotations, detections, and IOUs to a format that's fast to access in C++
  74. ground_truth_instances = [
  75. [convert_instances_to_cpp(self._gts[imgId, catId]) for catId in p.catIds]
  76. for imgId in p.imgIds
  77. ]
  78. detected_instances = [
  79. [
  80. convert_instances_to_cpp(self._dts[imgId, catId], is_det=True)
  81. for catId in p.catIds
  82. ]
  83. for imgId in p.imgIds
  84. ]
  85. ious = [[self.ious[imgId, catId] for catId in catIds] for imgId in p.imgIds]
  86. if not p.useCats:
  87. # For each image, flatten per-category lists into a single list
  88. ground_truth_instances = [
  89. [[o for c in i for o in c]] for i in ground_truth_instances
  90. ]
  91. detected_instances = [
  92. [[o for c in i for o in c]] for i in detected_instances
  93. ]
  94. # Call C++ implementation of self.evaluateImgs()
  95. self._evalImgs_cpp = _C.COCOevalEvaluateImages(
  96. p.areaRng,
  97. maxDet,
  98. p.iouThrs,
  99. ious,
  100. ground_truth_instances,
  101. detected_instances,
  102. )
  103. self._evalImgs = None
  104. self._paramsEval = copy.deepcopy(self.params)
  105. toc = time.time()
  106. print("COCOeval_opt.evaluate() finished in {:0.2f} seconds.".format(toc - tic))
  107. # >>>> End of code differences with original COCO API
  108. def accumulate(self):
  109. """
  110. Accumulate per image evaluation results and store the result in self.eval. Does not
  111. support changing parameter settings from those used by self.evaluate()
  112. """
  113. print("Accumulating evaluation results...")
  114. tic = time.time()
  115. if not hasattr(self, "_evalImgs_cpp"):
  116. print("Please run evaluate() first")
  117. self.eval = _C.COCOevalAccumulate(self._paramsEval, self._evalImgs_cpp)
  118. # recall is num_iou_thresholds X num_categories X num_area_ranges X num_max_detections
  119. self.eval["recall"] = np.array(self.eval["recall"]).reshape(
  120. self.eval["counts"][:1] + self.eval["counts"][2:]
  121. )
  122. # precision and scores are num_iou_thresholds X num_recall_thresholds X num_categories X
  123. # num_area_ranges X num_max_detections
  124. self.eval["precision"] = np.array(self.eval["precision"]).reshape(
  125. self.eval["counts"]
  126. )
  127. self.eval["scores"] = np.array(self.eval["scores"]).reshape(self.eval["counts"])
  128. toc = time.time()
  129. print(
  130. "COCOeval_opt.accumulate() finished in {:0.2f} seconds.".format(toc - tic)
  131. )