# Mahdi Abdollahpour, 27/11/2021, 02:42 PM, PyCharm, ByteTrack import torch import torch.distributed as dist import torch.nn as nn import os import random from .base_meta_exp import BaseMetaExp import learn2learn as l2l class MetaExp(BaseMetaExp): def __init__(self): super().__init__() # ---------------- model config ---------------- # self.num_classes = 80 self.depth = 1.00 self.width = 1.00 # ---------------- dataloader config ---------------- # # set worker to 4 for shorter dataloader init time # TODO: deal with this multi threading self.data_num_workers = 4 self.input_size = (640, 640) self.random_size = (14, 26) self.train_anns = ["instances_train2017.json"] self.val_anns = ["instances_val2017.json"] # --------------- transform config ----------------- # self.degrees = 10.0 self.translate = 0.1 self.scale = (0.1, 2) self.mscale = (0.8, 1.6) self.shear = 2.0 self.perspective = 0.0 self.enable_mixup = True # -------------- training config --------------------- # self.warmup_epochs = 5 self.max_epoch = 300 self.warmup_lr = 0 self.basic_lr_per_img = 0.01 / 64.0 self.scheduler = "yoloxwarmcos" self.no_aug_epochs = 15 self.min_lr_ratio = 0.05 self.ema = True self.weight_decay = 5e-4 self.momentum = 0.9 self.print_interval = 10 self.eval_interval = 10 self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] # ----------------- testing config ------------------ # self.test_size = (640, 640) self.test_conf = 0.001 self.nmsthre = 0.65 # ----------------- Meta-learning ------------------ # self.first_order = True self.inner_lr = 1e-6 # self.inner_lr = 1e-8 def get_model(self): from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead def init_yolo(M): for m in M.modules(): if isinstance(m, nn.BatchNorm2d): m.eps = 1e-3 m.momentum = 0.03 if getattr(self, "model", None) is None: in_channels = [256, 512, 1024] backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels) head = YOLOXHead(self.num_classes, self.width, in_channels=in_channels) self.model = YOLOX(backbone, head) self.model.apply(init_yolo) self.model.head.initialize_biases(1e-2) return self.model def get_data_loaders(self, batch_size, is_distributed, no_aug=False): from yolox.data import ( COCODataset, DataLoader, InfiniteSampler, MosaicDetection, TrainTransform, YoloBatchSampler ) train_loaders = [] for train_ann in self.train_anns: dataset = COCODataset( data_dir=None, json_file=train_ann, img_size=self.input_size, preproc=TrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=50, ), ) dataset = MosaicDetection( dataset, mosaic=not no_aug, img_size=self.input_size, preproc=TrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=120, ), degrees=self.degrees, translate=self.translate, scale=self.scale, shear=self.shear, perspective=self.perspective, enable_mixup=self.enable_mixup, ) self.dataset = dataset if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = InfiniteSampler(len(self.dataset), seed=self.seed if self.seed else 0) batch_sampler = YoloBatchSampler( sampler=sampler, batch_size=batch_size, drop_last=False, input_dimension=self.input_size, mosaic=not no_aug, ) dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True} dataloader_kwargs["batch_sampler"] = batch_sampler train_loader = DataLoader(self.dataset, **dataloader_kwargs) train_loaders.append(train_loader) return train_loaders def random_resize(self, data_loader, epoch, rank, is_distributed): tensor = torch.LongTensor(2).cuda() if rank == 0: size_factor = self.input_size[1] * 1.0 / self.input_size[0] size = random.randint(*self.random_size) size = (int(32 * size), 32 * int(size * size_factor)) tensor[0] = size[0] tensor[1] = size[1] if is_distributed: dist.barrier() dist.broadcast(tensor, 0) input_size = data_loader.change_input_dim( multiple=(tensor[0].item(), tensor[1].item()), random_range=None ) return input_size def get_optimizer(self, batch_size): if "optimizer" not in self.__dict__: if self.warmup_epochs > 0: lr = self.warmup_lr else: lr = self.basic_lr_per_img * batch_size pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in self.model.named_modules(): if hasattr(v, "bias") and isinstance(v.bias, nn.Parameter): pg2.append(v.bias) # biases if isinstance(v, nn.BatchNorm2d) or "bn" in k: pg0.append(v.weight) # no decay elif hasattr(v, "weight") and isinstance(v.weight, nn.Parameter): pg1.append(v.weight) # apply decay optimizer = torch.optim.SGD( pg0, lr=lr, momentum=self.momentum, nesterov=True ) optimizer.add_param_group( {"params": pg1, "weight_decay": self.weight_decay} ) # add pg1 with weight_decay optimizer.add_param_group({"params": pg2}) self.all_parameters = pg0 + pg1 + pg2 self.optimizer = optimizer return self.optimizer def get_lr_scheduler(self, lr, iters_per_epoch): from yolox.utils import LRScheduler scheduler = LRScheduler( self.scheduler, lr, iters_per_epoch, self.max_epoch, warmup_epochs=self.warmup_epochs, warmup_lr_start=self.warmup_lr, no_aug_epochs=self.no_aug_epochs, min_lr_ratio=self.min_lr_ratio, ) return scheduler def get_eval_loaders(self, batch_size, is_distributed, testdev=False): from yolox.data import COCODataset, ValTransform val_loaders = [] for val_ann in self.val_anns: valdataset = COCODataset( data_dir=None, json_file=val_ann if not testdev else "image_info_test-dev2017.json", name="val2017" if not testdev else "test2017", img_size=self.test_size, preproc=ValTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225) ), ) if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = torch.utils.data.distributed.DistributedSampler( valdataset, shuffle=False ) else: sampler = torch.utils.data.SequentialSampler(valdataset) dataloader_kwargs = { "num_workers": self.data_num_workers, "pin_memory": True, "sampler": sampler, } dataloader_kwargs["batch_size"] = batch_size val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs) val_loaders.append(val_loader) return val_loaders def get_evaluators(self, batch_size, is_distributed, testdev=False): from yolox.evaluators import COCOEvaluator val_loaders = self.get_eval_loaders(batch_size, is_distributed, testdev=testdev) evaluators = [] for val_loader in val_loaders: evaluator = COCOEvaluator( dataloader=val_loader, img_size=self.test_size, confthre=self.test_conf, nmsthre=self.nmsthre, num_classes=self.num_classes, testdev=testdev, ) evaluators.append(evaluator) return evaluators def eval(self, model, evaluators, is_distributed, half=False): ap50_95s = 0.0 ap50s = 0.0 summarys = '' for evaluator in evaluators: ap50_95, ap50, summary = evaluator.evaluate(model, is_distributed, half) ap50_95s += ap50_95 ap50s += ap50 summarys += ("\n" + summary) n = len(evaluators) return (ap50_95s / n), (ap50s / n), summarys