-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathoptim_factory.py
173 lines (155 loc) · 6.82 KB
/
optim_factory.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
from torch import optim as optim
import torch.nn as nn
def add_weight_decay_and_lr(model: nn.Module, weight_decay=1e-5, text_lr=5e-4, skip_list=()):
# set text_lr for text model
visual_decay = []
visual_nodecay = []
text_decay = []
text_nodecay = []
for name, param in model.named_parameters():
if not param.requires_grad: continue
no_decay = len(param.shape) == 1 or name.endswith(".bias") or name in skip_list
visual = ((name.startswith("visual")) or ('visual' in name))
if visual and no_decay:
print('visual key:', name)
visual_nodecay.append(param)
elif visual:
print('visual key:', name)
visual_decay.append(param)
elif not visual and no_decay:
print('text key:', name)
text_nodecay.append(param)
else:
print('text key:', name)
text_decay.append(param)
return [
{'params': visual_decay, 'weight_decay': weight_decay},
{'params': visual_nodecay, 'weight_decay': 0.},
{'params': text_decay, 'weight_decay': weight_decay, 'lr': text_lr},
{'params': text_nodecay, 'weight_decay': 0., 'lr': text_lr}]
def add_weight_decay(model, weight_decay=1e-5, skip_list=()):
decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list:
no_decay.append(param)
else:
decay.append(param)
return [
{'params': no_decay, 'weight_decay': 0.},
{'params': decay, 'weight_decay': weight_decay}]
def add_weight_decay_fusion(model, weight_decay=1e-5, skip_list=(), fusion_lr=5e-4):
decay = []
no_decay = []
fusion_decay = []
fusion_nodecay = []
for name, param in model.named_parameters():
no_decay_bool = len(param.shape) == 1 or name.endswith(".bias") or name in skip_list
fusion = ((name.startswith("fusion_model")) or ('fusion_model' in name))
if not param.requires_grad:
continue # frozen weights
if fusion and no_decay_bool:
print('fusion key:', name)
fusion_nodecay.append(param)
elif fusion:
print('fusion key:', name)
fusion_decay.append(param)
elif no_decay_bool:
no_decay.append(param)
else:
decay.append(param)
return [
{'params': no_decay, 'weight_decay': 0.},
{'params': decay, 'weight_decay': weight_decay},
{'params': fusion_decay, 'weight_decay': weight_decay, 'lr': fusion_lr},
{'params': fusion_nodecay, 'weight_decay': 0., 'lr': fusion_lr}]
def add_weight_decay_and_lr_fusion(model: nn.Module, weight_decay=1e-5, text_lr=5e-4, fusion_lr=5e-4, skip_list=()):
# set text_lr for text model
visual_decay = []
visual_nodecay = []
text_decay = []
text_nodecay = []
fusion_decay = []
fusion_nodecay = []
for name, param in model.named_parameters():
if not param.requires_grad: continue
no_decay = len(param.shape) == 1 or name.endswith(".bias") or name in skip_list
visual = ((name.startswith("visual")) or ('visual' in name))
fusion = ((name.startswith("fusion_model")) or ('fusion_model' in name))
if visual and no_decay:
print('visual key:', name)
visual_nodecay.append(param)
elif visual:
print('visual key:', name)
visual_decay.append(param)
elif fusion and no_decay:
print('fusion key:', name)
fusion_nodecay.append(param)
elif fusion:
print('fusion key:', name)
fusion_decay.append(param)
elif not visual and not fusion and no_decay:
print('text key:', name)
text_nodecay.append(param)
else:
print('text key:', name)
text_decay.append(param)
return [
{'params': visual_decay, 'weight_decay': weight_decay},
{'params': visual_nodecay, 'weight_decay': 0.},
{'params': text_decay, 'weight_decay': weight_decay, 'lr': text_lr},
{'params': text_nodecay, 'weight_decay': 0., 'lr': text_lr},
{'params': fusion_decay, 'weight_decay': weight_decay, 'lr': fusion_lr},
{'params': fusion_nodecay, 'weight_decay': 0., 'lr': fusion_lr}]
def create_optimizer(args, model, filter_bias_and_bn=True):
opt_lower = args.opt.lower()
weight_decay = args.weight_decay
if args.consider_fusion_module and weight_decay and filter_bias_and_bn:
skip = {}
assert hasattr(args, "fusion_lr") and args.fusion_lr > 0
if hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
if hasattr(args, "text_lr") and args.text_lr > 0:
print("lr: {:.2g} text_lr: {:.2g}".format(args.lr, args.text_lr))
parameters = add_weight_decay_and_lr_fusion(model, weight_decay, text_lr=args.text_lr, skip_list=skip, fusion_lr=args.fusion_lr)
else:
parameters = add_weight_decay_fusion(model, weight_decay, skip, fusion_lr=args.fusion_lr)
weight_decay = 0.
elif weight_decay and filter_bias_and_bn:
skip = {}
if hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
if hasattr(args, "text_lr") and args.text_lr > 0:
print("lr: {:.2g} text_lr: {:.2g}".format(args.lr, args.text_lr))
parameters = add_weight_decay_and_lr(model, weight_decay, text_lr=args.text_lr, skip_list=skip)
else:
parameters = add_weight_decay(model, weight_decay, skip)
weight_decay = 0.
else:
parameters = model.parameters()
opt_args = dict(lr=args.lr, weight_decay=weight_decay)
if hasattr(args, 'opt_eps') and args.opt_eps is not None:
opt_args['eps'] = args.opt_eps
if hasattr(args, 'opt_betas') and args.opt_betas is not None:
opt_args['betas'] = args.opt_betas
opt_split = opt_lower.split('_')
opt_lower = opt_split[-1]
if opt_lower == 'sgd' or opt_lower == 'nesterov':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'momentum':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'adam':
optimizer = optim.Adam(parameters, **opt_args)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, **opt_args)
elif opt_lower == 'adadelta':
optimizer = optim.Adadelta(parameters, **opt_args)
elif opt_lower == 'rmsprop':
optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
else:
assert False and "Invalid optimizer"
return optimizer