BERT源码分析PART III
Posted AINLP
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了BERT源码分析PART III相关的知识,希望对你有一定的参考价值。
学校:上海交通大学
研究方向:自然语言处理
写在前面
继续之前没有介绍完的Pre-training部分,在上一篇中我们已经完成了对输入数据的处理,接下来看看BERT是怎么完成Masked LM和Next Sentence Prediction两个任务的训练的。
√run_pretraining
任务#1:Masked LM
get_masked_lm_output
函数用于计算任务#1的训练loss。输入为BertModel的最后一层sequence_output输出([batch_size, seq_length, hidden_size]),因为对一个序列的MASK标记的预测属于标注问题,需要整个sequence的输出状态。
1def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,
2 label_ids, label_weights):
3 """Get loss and log probs for the masked LM."""
4 # 获取mask词的encode
5 input_tensor = gather_indexes(input_tensor, positions)
6
7 with tf.variable_scope("cls/predictions"):
8 # 在输出之前添加一个非线性变换,只在预训练阶段起作用
9 with tf.variable_scope("transform"):
10 input_tensor = tf.layers.dense(
11 input_tensor,
12 units=bert_config.hidden_size,
13 activation=modeling.get_activation(bert_config.hidden_act),
14 kernel_initializer=modeling.create_initializer(
15 bert_config.initializer_range))
16 input_tensor = modeling.layer_norm(input_tensor)
17
18 # output_weights是和传入的word embedding一样的
19 # 这里再添加一个bias
20 output_bias = tf.get_variable(
21 "output_bias",
22 shape=[bert_config.vocab_size],
23 initializer=tf.zeros_initializer())
24 logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
25 logits = tf.nn.bias_add(logits, output_bias)
26 log_probs = tf.nn.log_softmax(logits, axis=-1)
27
28 # label_ids表示mask掉的Token的id
29 label_ids = tf.reshape(label_ids, [-1])
30 label_weights = tf.reshape(label_weights, [-1])
31
32 one_hot_labels = tf.one_hot(
33 label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
34
35 # 但是由于实际MASK的可能不到20,比如只MASK18,那么label_ids有2个0(padding)
36 # 而label_weights=[1, 1, ...., 0, 0],说明后面两个label_id是padding的,计算loss要去掉。
37 per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
38 numerator = tf.reduce_sum(label_weights * per_example_loss)
39 denominator = tf.reduce_sum(label_weights) + 1e-5
40 loss = numerator / denominator
41
42 return (loss, per_example_loss, log_probs)
任务#2 Next Sentence Prediction
get_next_sentence_output
函数用于计算任务#2的训练loss。输入为BertModel的最后一层pooled_output输出([batch_size, hidden_size]),因为该任务属于二分类问题,所以只需要每个序列的第一个token【CLS】即可。
1def get_next_sentence_output(bert_config, input_tensor, labels):
2 """Get loss and log probs for the next sentence prediction."""
3
4 # 标签0表示 下一个句子关系成立; 标签1表示 下一个句子关系不成立。
5 # 这个分类器的参数在实际Fine-tuning阶段会丢弃掉
6 with tf.variable_scope("cls/seq_relationship"):
7 output_weights = tf.get_variable(
8 "output_weights",
9 shape=[2, bert_config.hidden_size],
10 initializer=modeling.create_initializer(bert_config.initializer_range))
11 output_bias = tf.get_variable(
12 "output_bias", shape=[2], initializer=tf.zeros_initializer())
13
14 logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
15 logits = tf.nn.bias_add(logits, output_bias)
16 log_probs = tf.nn.log_softmax(logits, axis=-1)
17 labels = tf.reshape(labels, [-1])
18 one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
19 per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
20 loss = tf.reduce_mean(per_example_loss)
21 return (loss, per_example_loss, log_probs)
自定义模型
module_fn_builder
函数,用于构造Estimator使用的model_fn
。定义好了上述两个训练任务,就可以写出训练过程,之后将训练集传入自动训练。
1def model_fn_builder(bert_config, init_checkpoint, learning_rate,
2 num_train_steps, num_warmup_steps, use_tpu,
3 use_one_hot_embeddings):
4
5 def model_fn(features, labels, mode, params):
6
7 tf.logging.info("*** Features ***")
8 for name in sorted(features.keys()):
9 tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
10
11 input_ids = features["input_ids"]
12 input_mask = features["input_mask"]
13 segment_ids = features["segment_ids"]
14 masked_lm_positions = features["masked_lm_positions"]
15 masked_lm_ids = features["masked_lm_ids"]
16 masked_lm_weights = features["masked_lm_weights"]
17 next_sentence_labels = features["next_sentence_labels"]
18
19 is_training = (mode == tf.estimator.ModeKeys.TRAIN)
20
21 # 创建Transformer实例对象
22 model = modeling.BertModel(
23 config=bert_config,
24 is_training=is_training,
25 input_ids=input_ids,
26 input_mask=input_mask,
27 token_type_ids=segment_ids,
28 use_one_hot_embeddings=use_one_hot_embeddings)
29
30 # 获得MASK LM任务的批损失,平均损失以及预测概率矩阵
31 (masked_lm_loss,
32 masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(
33 bert_config, model.get_sequence_output(), model.get_embedding_table(),
34 masked_lm_positions, masked_lm_ids, masked_lm_weights)
35
36 # 获得NEXT SENTENCE PREDICTION任务的批损失,平均损失以及预测概率矩阵
37 (next_sentence_loss, next_sentence_example_loss,
38 next_sentence_log_probs) = get_next_sentence_output(
39 bert_config, model.get_pooled_output(), next_sentence_labels)
40
41 # 总的损失定义为两者之和
42 total_loss = masked_lm_loss + next_sentence_loss
43
44 # 获取所有变量
45 tvars = tf.trainable_variables()
46
47 initialized_variable_names = {}
48 scaffold_fn = None
49 # 如果有之前保存的模型,则进行恢复
50 if init_checkpoint:
51 (assignment_map, initialized_variable_names
52 ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
53 if use_tpu:
54
55 def tpu_scaffold():
56 tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
57 return tf.train.Scaffold()
58
59 scaffold_fn = tpu_scaffold
60 else:
61 tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
62
63 tf.logging.info("**** Trainable Variables ****")
64 for var in tvars:
65 init_string = ""
66 if var.name in initialized_variable_names:
67 init_string = ", *INIT_FROM_CKPT*"
68 tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
69 init_string)
70
71 output_spec = None
72 # 训练过程,获得spec
73 if mode == tf.estimator.ModeKeys.TRAIN:
74 train_op = optimization.create_optimizer(
75 total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
76
77 output_spec = tf.contrib.tpu.TPUEstimatorSpec(
78 mode=mode,
79 loss=total_loss,
80 train_op=train_op,
81 scaffold_fn=scaffold_fn)
82 # 验证过程spec
83 elif mode == tf.estimator.ModeKeys.EVAL:
84
85 def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
86 masked_lm_weights, next_sentence_example_loss,
87 next_sentence_log_probs, next_sentence_labels):
88 """计算损失和准确率"""
89 masked_lm_log_probs = tf.reshape(masked_lm_log_probs,
90 [-1, masked_lm_log_probs.shape[-1]])
91 masked_lm_predictions = tf.argmax(
92 masked_lm_log_probs, axis=-1, output_type=tf.int32)
93 masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
94 masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
95 masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
96 masked_lm_accuracy = tf.metrics.accuracy(
97 labels=masked_lm_ids,
98 predictions=masked_lm_predictions,
99 weights=masked_lm_weights)
100 masked_lm_mean_loss = tf.metrics.mean(
101 values=masked_lm_example_loss, weights=masked_lm_weights)
102
103 next_sentence_log_probs = tf.reshape(
104 next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])
105 next_sentence_predictions = tf.argmax(
106 next_sentence_log_probs, axis=-1, output_type=tf.int32)
107 next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
108 next_sentence_accuracy = tf.metrics.accuracy(
109 labels=next_sentence_labels, predictions=next_sentence_predictions)
110 next_sentence_mean_loss = tf.metrics.mean(
111 values=next_sentence_example_loss)
112
113 return {
114 "masked_lm_accuracy": masked_lm_accuracy,
115 "masked_lm_loss": masked_lm_mean_loss,
116 "next_sentence_accuracy": next_sentence_accuracy,
117 "next_sentence_loss": next_sentence_mean_loss,
118 }
119
120 eval_metrics = (metric_fn, [
121 masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
122 masked_lm_weights, next_sentence_example_loss,
123 next_sentence_log_probs, next_sentence_labels
124 ])
125 output_spec = tf.contrib.tpu.TPUEstimatorSpec(
126 mode=mode,
127 loss=total_loss,
128 eval_metrics=eval_metrics,
129 scaffold_fn=scaffold_fn)
130 else:
131 raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode))
132
133 return output_spec
134
135 return model_fn
主函数
基于上述函数实现训练过程
1def main(_):
2 tf.logging.set_verbosity(tf.logging.INFO)
3 if not FLAGS.do_train and not FLAGS.do_eval:
4 raise ValueError("At least one of `do_train` or `do_eval` must be True.")
5 bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
6 tf.gfile.MakeDirs(FLAGS.output_dir)
7
8 input_files = []
9 for input_pattern in FLAGS.input_file.split(","):
10 input_files.extend(tf.gfile.Glob(input_pattern))
11
12 tf.logging.info("*** Input Files ***")
13 for input_file in input_files:
14 tf.logging.info(" %s" % input_file)
15
16 tpu_cluster_resolver = None
17 if FLAGS.use_tpu and FLAGS.tpu_name:
18 tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
19 FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
20
21 is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
22 run_config = tf.contrib.tpu.RunConfig(
23 cluster=tpu_cluster_resolver,
24 master=FLAGS.master,
25 model_dir=FLAGS.output_dir,
26 save_checkpoints_steps=FLAGS.save_checkpoints_steps,
27 tpu_config=tf.contrib.tpu.TPUConfig(
28 iterations_per_loop=FLAGS.iterations_per_loop,
29 num_shards=FLAGS.num_tpu_cores,
30 per_host_input_for_training=is_per_host))
31
32 # 自定义模型用于estimator训练
33 model_fn = model_fn_builder(
34 bert_config=bert_config,
35 init_checkpoint=FLAGS.init_checkpoint,
36 learning_rate=FLAGS.learning_rate,
37 num_train_steps=FLAGS.num_train_steps,
38 num_warmup_steps=FLAGS.num_warmup_steps,
39 use_tpu=FLAGS.use_tpu,
40 use_one_hot_embeddings=FLAGS.use_tpu)
41
42 # 如果没有TPU,会自动转为CPU/GPU的Estimator
43 estimator = tf.contrib.tpu.TPUEstimator(
44 use_tpu=FLAGS.use_tpu,
45 model_fn=model_fn,
46 config=run_config,
47 train_batch_size=FLAGS.train_batch_size,
48 eval_batch_size=FLAGS.eval_batch_size)
49
50 if FLAGS.do_train:
51 tf.logging.info("***** Running training *****")
52 tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
53 train_input_fn = input_fn_builder(
54 input_files=input_files,
55 max_seq_length=FLAGS.max_seq_length,
56 max_predictions_per_seq=FLAGS.max_predictions_per_seq,
57 is_training=True)
58 estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
59
60 if FLAGS.do_eval:
61 tf.logging.info("***** Running evaluation *****")
62 tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
63
64 eval_input_fn = input_fn_builder(
65 input_files=input_files,
66 max_seq_length=FLAGS.max_seq_length,
67 max_predictions_per_seq=FLAGS.max_predictions_per_seq,
68 is_training=False)
69
70 result = estimator.evaluate(
71 input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)
72
73 output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
74 with tf.gfile.GFile(output_eval_file, "w") as writer:
75 tf.logging.info("***** Eval results *****")
76 for key in sorted(result.keys()):
77 tf.logging.info(" %s = %s", key, str(result[key]))
78 writer.write("%s = %s\n" % (key, str(result[key])))
代码测试
预训练运行脚本
1python run_pretraining.py \
2 --input_file=/tmp/tf_examples.tfrecord \
3 --output_dir=/tmp/pretraining_output \
4 --do_train=True \
5 --do_eval=True \
6 --bert_config_file=$BERT_BASE_DIR/bert_config.json \
7 --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \
8 --train_batch_size=32 \
9 --max_seq_length=128 \
10 --max_predictions_per_seq=20 \
11 --num_train_steps=20 \
12 --num_warmup_steps=10 \
13 --learning_rate=2e-5
之后你可以得到类似以下输出日志:
1***** Eval results *****
2 global_step = 20
3 loss = 0.0979674
4 masked_lm_accuracy = 0.985479
5 masked_lm_loss = 0.0979328
6 next_sentence_accuracy = 1.0
7 next_sentence_loss = 3.45724e-05
最后贴一个预训练过程的tips【反正我也做不了,看看就行= 。=】
相关文章:
以上是关于BERT源码分析PART III的主要内容,如果未能解决你的问题,请参考以下文章