Browse Source

problem is solved with adapt on GT

master
Mahdi Abdollah Pour 2 years ago
parent
commit
10a8270e28

+ 2
- 2
experiments/track_on_20.sh View File

@@ -1,4 +1,4 @@
#PBS -N track_17_on_20_ada_12
#PBS -N track_17_on_20_ada_4
#PBS -m abe
#PBS -M [email protected]
#PBS -l nodes=1:ppn=1:gpus=1
@@ -15,4 +15,4 @@ cd /home/abdollahpour.ce.sharif/ByteTrack



python tools/track.py -t metamot -f exps/example/metamot/yolox_x_mot17_on_mot20.py -d 1 -b 1 -c /home/abdollahpour.ce.sharif/ByteTrack/meta_experiments/train_17_on_20_resume2/best_ckpt.pth.tar --local_rank 0 -expn track_17_on_20_ada_12 --mot20 --adaptation_period 12
python tools/track.py -t metamot -f exps/example/metamot/yolox_x_mot17_on_mot20.py -d 1 -b 1 -c /home/abdollahpour.ce.sharif/ByteTrack/meta_experiments/train_17_on_20_resume2/best_ckpt.pth.tar --local_rank 0 -expn track_17_on_20_ada_4 --mot20 --adaptation_period 4 --fp16

+ 1
- 1
experiments/track_on_20_with_gt.sh View File

@@ -13,4 +13,4 @@ cd /home/abdollahpour.ce.sharif/ByteTrack



python tools/track.py -t metamot -f exps/example/metamot/yolox_x_mot17_on_mot20.py -d 1 -b 1 -c /home/abdollahpour.ce.sharif/ByteTrack/meta_experiments/train_17_on_20_resume2/best_ckpt.pth.tar --local_rank 0 -expn track_17_on_20_ada_4_with_GT --mot20 --adaptation_period 4 --fp16 --use_existing_files
python tools/track.py -t metamot -f exps/example/metamot/yolox_x_mot17_on_mot20.py -d 1 -b 1 -c /home/abdollahpour.ce.sharif/ByteTrack/meta_experiments/train_17_on_20_resume2/best_ckpt.pth.tar --local_rank 0 -expn track_17_on_20_ada_4_with_GT --mot20 --adaptation_period 4 --fp16

+ 5
- 5
yolox/core/meta_trainer.py View File

@@ -138,11 +138,11 @@ class MetaTrainer:

self.optimizer.zero_grad()

logger.info("loss Norm: {} , scale {}".format(torch.norm(loss), self.scaler.get_scale()))
loss = self.scaler.scale(loss)
logger.info("loss Norm: {} , scale {}".format(torch.norm(loss), self.scaler.get_scale()))
# self.scaler.scale(loss).backward()
loss.backward()
# logger.info("loss Norm: {} , scale {}".format(torch.norm(loss), self.scaler.get_scale()))
# loss = self.scaler.scale(loss)
# logger.info("loss Norm: {} , scale {}".format(torch.norm(loss), self.scaler.get_scale()))
self.scaler.scale(loss).backward()
# loss.backward()
self.scaler.step(self.optimizer)
self.scaler.update()


+ 1
- 1
yolox/evaluators/mot_evaluator.py View File

@@ -112,7 +112,7 @@ class MOTEvaluator:
else:
learner = model
# TODO half to amp_test
self.scaler = torch.cuda.amp.GradScaler(enabled=half,init_scale=8192)
self.scaler = torch.cuda.amp.GradScaler(enabled=half, init_scale=2730)

learner = learner.eval()
self.amp_training = False

+ 1
- 1
yolox/exp/meta_yolox_base.py View File

@@ -63,7 +63,7 @@ class MetaExp(BaseMetaExp):
# ----------------- Meta-learning ------------------ #
self.first_order = True
self.inner_lr = 1e-6
# self.inner_lr = 1e-10
# self.inner_lr = 1e-8

def get_model(self):
from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead

Loading…
Cancel
Save