Browse Source

problem is solved with adapt on GT

master
Mahdi Abdollah Pour 2 years ago
parent
commit
10a8270e28

+ 2
- 2
experiments/track_on_20.sh View File

#PBS -N track_17_on_20_ada_12
#PBS -N track_17_on_20_ada_4
#PBS -m abe #PBS -m abe
#PBS -M [email protected] #PBS -M [email protected]
#PBS -l nodes=1:ppn=1:gpus=1 #PBS -l nodes=1:ppn=1:gpus=1






python tools/track.py -t metamot -f exps/example/metamot/yolox_x_mot17_on_mot20.py -d 1 -b 1 -c /home/abdollahpour.ce.sharif/ByteTrack/meta_experiments/train_17_on_20_resume2/best_ckpt.pth.tar --local_rank 0 -expn track_17_on_20_ada_12 --mot20 --adaptation_period 12
python tools/track.py -t metamot -f exps/example/metamot/yolox_x_mot17_on_mot20.py -d 1 -b 1 -c /home/abdollahpour.ce.sharif/ByteTrack/meta_experiments/train_17_on_20_resume2/best_ckpt.pth.tar --local_rank 0 -expn track_17_on_20_ada_4 --mot20 --adaptation_period 4 --fp16

+ 1
- 1
experiments/track_on_20_with_gt.sh View File







python tools/track.py -t metamot -f exps/example/metamot/yolox_x_mot17_on_mot20.py -d 1 -b 1 -c /home/abdollahpour.ce.sharif/ByteTrack/meta_experiments/train_17_on_20_resume2/best_ckpt.pth.tar --local_rank 0 -expn track_17_on_20_ada_4_with_GT --mot20 --adaptation_period 4 --fp16 --use_existing_files
python tools/track.py -t metamot -f exps/example/metamot/yolox_x_mot17_on_mot20.py -d 1 -b 1 -c /home/abdollahpour.ce.sharif/ByteTrack/meta_experiments/train_17_on_20_resume2/best_ckpt.pth.tar --local_rank 0 -expn track_17_on_20_ada_4_with_GT --mot20 --adaptation_period 4 --fp16

+ 5
- 5
yolox/core/meta_trainer.py View File



self.optimizer.zero_grad() self.optimizer.zero_grad()


logger.info("loss Norm: {} , scale {}".format(torch.norm(loss), self.scaler.get_scale()))
loss = self.scaler.scale(loss)
logger.info("loss Norm: {} , scale {}".format(torch.norm(loss), self.scaler.get_scale()))
# self.scaler.scale(loss).backward()
loss.backward()
# logger.info("loss Norm: {} , scale {}".format(torch.norm(loss), self.scaler.get_scale()))
# loss = self.scaler.scale(loss)
# logger.info("loss Norm: {} , scale {}".format(torch.norm(loss), self.scaler.get_scale()))
self.scaler.scale(loss).backward()
# loss.backward()
self.scaler.step(self.optimizer) self.scaler.step(self.optimizer)
self.scaler.update() self.scaler.update()



+ 1
- 1
yolox/evaluators/mot_evaluator.py View File

else: else:
learner = model learner = model
# TODO half to amp_test # TODO half to amp_test
self.scaler = torch.cuda.amp.GradScaler(enabled=half,init_scale=8192)
self.scaler = torch.cuda.amp.GradScaler(enabled=half, init_scale=2730)


learner = learner.eval() learner = learner.eval()
self.amp_training = False self.amp_training = False

+ 1
- 1
yolox/exp/meta_yolox_base.py View File

# ----------------- Meta-learning ------------------ # # ----------------- Meta-learning ------------------ #
self.first_order = True self.first_order = True
self.inner_lr = 1e-6 self.inner_lr = 1e-6
# self.inner_lr = 1e-10
# self.inner_lr = 1e-8


def get_model(self): def get_model(self):
from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead

Loading…
Cancel
Save