-
Notifications
You must be signed in to change notification settings - Fork 6
Expand file tree
/
Copy pathengine_pretrain.py
More file actions
130 lines (104 loc) · 5.1 KB
/
engine_pretrain.py
File metadata and controls
130 lines (104 loc) · 5.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
# ------------------------------------------------------------------------
# SiameseIM
# Copyright (c) SenseTime. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from MAE (https://github.com/facebookresearch/mae)
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
# ------------------------------------------------------------------------
# References:
# BEiT: https://github.com/microsoft/unilm/tree/master/beit
# DeiT: https://github.com/facebookresearch/deit
# ------------------------------------------------------------------------
import math
import os
import sys
from turtle import update
from typing import Iterable
from pathlib import Path
import torch
import util.misc as misc
import util.lr_sched as lr_sched
def train_one_epoch(model: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler,
log_writer=None,
args=None):
model.train(True)
metric_logger = misc.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 50
accum_iter = args.accum_iter
optimizer.zero_grad()
if log_writer is not None:
print('log_dir: {}'.format(log_writer.log_dir))
for data_iter_step, data in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
if args.with_blockwise_mask:
samples, labels, mask = data
else:
samples, labels = data
mask = None
# we use a per iteration (instead of per epoch) lr scheduler
if data_iter_step % accum_iter == 0:
lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)
if args.mmschedule == 'const':
mm = args.mm
elif args.mmschedule == 'cosine':
mm = 1. - 0.5 * (1. + math.cos(math.pi * (data_iter_step / len(data_loader) + epoch) / args.epochs)) * (1. - args.mm)
metric_logger.update(mm=mm)
update_mm = (data_iter_step % accum_iter == 0)
if args.loss_type in ['sim',]:
x1, x2, delta_i, delta_j, delta_h, delta_w, relative_flip, flip_delta_j = samples
x1 = x1.to(device, non_blocking=True)
x2 = x2.to(device, non_blocking=True)
delta_i = delta_i.to(x1)
delta_j = delta_j.to(x1)
delta_h = delta_h.to(x1)
delta_w = delta_w.to(x1)
flip_delta_j = flip_delta_j.to(x1)
rel_pos_21 = (delta_i, delta_j, delta_h, delta_w, relative_flip, flip_delta_j)
with torch.cuda.amp.autocast(enabled=(not args.fp32)):
loss, outputs = model(x1, x2, rel_pos_21, mm, update_mm, mask=mask)
metric_logger.update(**outputs)
else:
samples = samples.to(device, non_blocking=True)
with torch.cuda.amp.autocast(enabled=(not args.fp32)):
loss, _, _ = model(samples, mask_ratio=args.mask_ratio)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
loss /= accum_iter
grad_norm = loss_scaler(loss, optimizer, parameters=model.parameters(),
update_grad=(data_iter_step + 1) % accum_iter == 0, clip_grad=args.clip_grad)
if args.fp32:
loss_scale = None
else:
loss_scale = loss_scaler.state_dict()['scale']
metric_logger.update(grad_norm=grad_norm)
metric_logger.update(loss_scale=loss_scale)
if (data_iter_step + 1) % accum_iter == 0:
optimizer.zero_grad()
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
lr = optimizer.param_groups[0]["lr"]
metric_logger.update(lr=lr)
loss_value_reduce = misc.all_reduce_mean(loss_value)
outputs_reduced = {k_: misc.all_reduce_mean(v_) for k_, v_ in outputs.items()}
if log_writer is not None and (data_iter_step + 1) % accum_iter == 0:
""" We use epoch_1000x as the x-axis in tensorboard.
This calibrates different curves when batch size changes.
"""
epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)
log_writer.add_scalar('train_loss', loss_value_reduce, epoch_1000x)
log_writer.add_scalar('lr', lr, epoch_1000x)
log_writer.add_scalar('grad_norm', grad_norm, epoch_1000x)
if loss_scale is not None:
log_writer.add_scalar('loss_scale', loss_scale, epoch_1000x)
log_writer.add_scalar('mm', mm, epoch_1000x)
for k_, v_ in outputs_reduced.items():
log_writer.add_scalar(f'train/{k_}', v_, epoch_1000x)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}