model = MyModel(**args)# 不同层使用不同学习率
param_optimizer = list(model.named_parameters())# pretrain model param
param_pre =[(n, p)for n, p in param_optimizer if'bert'in n]# downstream model param
param_downstream =[(n, p)for n, p in param_optimizer if'bert' not in n]
no_decay =['bias', 'LayerNorm', 'layer_norm']
optimizer_grouped_parameters =[# pretrain model param{'params':[p for n, p in param_pre if not any(nd in n forndin no_decay)],
'weight_decay': self.config.weight_decay_rate, 'lr': self.config.bert_lr
},
{'params':[p for n, p in param_pre if any(nd in n forndin no_decay)],
'weight_decay':0.0, 'lr': self.config.bert_lr
},
# downstream model{'params':[p for n, p in param_downstream if not any(nd in n forndin no_decay)],
'weight_decay': self.config.weight_decay_rate, 'lr': self.config.downstream_lr
},
{'params':[p for n, p in param_downstream if any(nd in n forndin no_decay)],
'weight_decay':0.0, 'lr': self.config.downstream_lr
}]# define the optimizer# optimizer = optim.Adam(filter(lambda p: p.requires_grad, ori_model.parameters()), lr=self.config.learning_rate) # 在网络中固定部分参数进行训练
optimizer = optim.Adam(optimizer_grouped_parameters, lr=self.config.bert_lr, betas=(0.9, 0.99))