文本分类-06Transformer

Posted yifanrensheng

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了文本分类-06Transformer相关的知识,希望对你有一定的参考价值。

目录

  1. 大纲概述
  2. 数据集合
  3. 数据处理
  4. 预训练word2vec模型

一、大纲概述

文本分类这个系列将会有8篇左右文章,从github直接下载代码,从百度云下载训练数据,在pycharm上导入即可使用,包括基于word2vec预训练的文本分类,与及基于近几年的预训练模型(ELMo,BERT等)的文本分类。总共有以下系列:

word2vec预训练词向量

textCNN 模型

charCNN 模型

Bi-LSTM 模型

Bi-LSTM + Attention 模型

Transformer 模型

ELMo 预训练模型

BERT 预训练模型

二、数据集合

数据集为IMDB 电影影评,总共有三个数据文件,在/data/rawData目录下,包括unlabeledTrainData.tsv,labeledTrainData.tsv,testData.tsv。在进行文本分类时需要有标签的数据(labeledTrainData),但是在训练word2vec词向量模型(无监督学习)时可以将无标签的数据一起用上。

训练数据地址:链接:https://pan.baidu.com/s/1-XEwx1ai8kkGsMagIFKX_g 提取码:rtz8

? ?

Transformer模型的相关介绍可见这几篇文章:介绍源码讲解

三、主要代码 

3.1 配置训练参数:parameter_config.py

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

# Author:yifan

#需要的所有导入包,存放留用,转换到jupyter后直接使用

# 1 配置训练参数

class TrainingConfig(object):

epoches = 4

evaluateEvery = 100

checkpointEvery = 100

learningRate = 0.001

?

class ModelConfig(object):

embeddingSize = 200

filters = 128 #内层一维卷积核的数量,外层卷积核的数量应该等于embeddingSize,因为要确保每个layer后的输出维度和输入维度是一致的。

numHeads = 8 # Attention 的头数

numBlocks = 1 # 设置transformer block的数量

epsilon = 1e-8 # LayerNorm 层中的最小除数

keepProp = 0.9 # multi head attention 中的dropout

dropoutKeepProb = 0.5 # 全连接层的dropout

l2RegLambda = 0.0

?

class Config(object):

sequenceLength = 200 # 取了所有序列长度的均值

batchSize = 128

dataSource = "../data/preProcess/labeledTrain.csv"

stopWordSource = "../data/english"

numClasses = 1 # 二分类设置为1,多分类设置为类别的数目

rate = 0.8 # 训练集的比例

training = TrainingConfig()

model = ModelConfig()

?

# 实例化配置参数对象

config = Config()

3.2 获取训练数据:get_train_data.py

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

# Author:yifan

import json

from collections import Counter

import gensim

import pandas as pd

import numpy as np

import parameter_config

? ?

# 2 数据预处理的类,生成训练集和测试集

class Dataset(object):

def __init__(self, config):

self.config = config

self._dataSource = config.dataSource

self._stopWordSource = config.stopWordSource

self._sequenceLength = config.sequenceLength # 每条输入的序列处理为定长

self._embeddingSize = config.model.embeddingSize

self._batchSize = config.batchSize

self._rate = config.rate

self._stopWordDict = {}

self.trainReviews = []

self.trainLabels = []

self.evalReviews = []

self.evalLabels = []

self.wordEmbedding = None

self.labelList = []

def _readData(self, filePath):

"""

csv文件中读取数据集,就本次测试的文件做记录

"""

df = pd.read_csv(filePath) #读取文件,是三列的数据,第一列是review,第二列sentiment,第三列rate

if self.config.numClasses == 1:

labels = df["sentiment"].tolist() #读取sentiment列的数据, 显示输出01序列数组25000

elif self.config.numClasses > 1:

labels = df["rate"].tolist() #因为numClasses控制,本次取样没有取超过二分类 该处没有输出

review = df["review"].tolist()

reviews = [line.strip().split() for line in review] #按空格语句切分

return reviews, labels

def _labelToIndex(self, labels, label2idx):

"""

将标签转换成索引表示

"""

labelIds = [label2idx[label] for label in labels] #print(labels==labelIds) 结果显示为true,也就是两个一样

return labelIds

def _wordToIndex(self, reviews, word2idx):

"""将词转换成索引"""

reviewIds = [[word2idx.get(item, word2idx["UNK"]) for item in review] for review in reviews]

# print(max(max(reviewIds)))

# print(reviewIds)

return reviewIds #返回25000个无序的数组

def _genTrainEvalData(self, x, y, word2idx, rate):

"""生成训练集和验证集 """

reviews = []

# print(self._sequenceLength)

# print(len(x))

for review in x: #self._sequenceLength200,表示长的切成200,短的补齐,x数据依旧是25000

if len(review) >= self._sequenceLength:

reviews.append(review[:self._sequenceLength])

else:

reviews.append(review + [word2idx["PAD"]] * (self._sequenceLength - len(review)))

# print(len(review + [word2idx["PAD"]] * (self._sequenceLength - len(review))))

#以下是按照rate比例切分训练和测试数据:

trainIndex = int(len(x) * rate)

trainReviews = np.asarray(reviews[:trainIndex], dtype="int64")

trainLabels = np.array(y[:trainIndex], dtype="float32")

evalReviews = np.asarray(reviews[trainIndex:], dtype="int64")

evalLabels = np.array(y[trainIndex:], dtype="float32")

return trainReviews, trainLabels, evalReviews, evalLabels

? ?

def _getWordEmbedding(self, words):

"""按照我们的数据集中的单词取出预训练好的word2vec中的词向量

反馈词和对应的向量(200维度),另外前面增加PAD对用0的数组,UNK对应随机数组。

"""

wordVec = gensim.models.KeyedVectors.load_word2vec_format("../word2vec/word2Vec.bin", binary=True)

vocab = []

wordEmbedding = []

# 添加 "pad" "UNK",

vocab.append("PAD")

vocab.append("UNK")

wordEmbedding.append(np.zeros(self._embeddingSize)) # _embeddingSize 本文定义的是200

wordEmbedding.append(np.random.randn(self._embeddingSize))

# print(wordEmbedding)

for word in words:

try:

vector = wordVec.wv[word]

vocab.append(word)

wordEmbedding.append(vector)

except:

print(word + "不存在于词向量中")

# print(vocab[:3],wordEmbedding[:3])

return vocab, np.array(wordEmbedding)

def _genVocabulary(self, reviews, labels):

"""生成词向量和词汇-索引映射字典,可以用全数据集"""

allWords = [word for review in reviews for word in review] #单词数量5738236 reviews25000个观点句子【】

subWords = [word for word in allWords if word not in self.stopWordDict] # 去掉停用词

wordCount = Counter(subWords) # 统计词频

sortWordCount = sorted(wordCount.items(), key=lambda x: x[1], reverse=True) #返回键值对,并按照数量排序

# print(len(sortWordCount)) #161330

# print(sortWordCount[:4],sortWordCount[-4:]) # [(‘movie‘, 41104), (‘film‘, 36981), (‘one‘, 24966), (‘like‘, 19490)] [(‘daeseleires‘, 1), (‘nice310‘, 1), (‘shortsightedness‘, 1), (‘unfairness‘, 1)]

words = [item[0] for item in sortWordCount if item[1] >= 5] # 去除低频词,低于5

vocab, wordEmbedding = self._getWordEmbedding(words)

self.wordEmbedding = wordEmbedding

word2idx = dict(zip(vocab, list(range(len(vocab))))) #生成类似这种{‘I‘: 0, ‘love‘: 1, ‘yanzi‘: 2}

uniqueLabel = list(set(labels)) #标签去重 最后就 0 1

label2idx = dict(zip(uniqueLabel, list(range(len(uniqueLabel))))) #本文就 {0: 0, 1: 1}

self.labelList = list(range(len(uniqueLabel)))

# 将词汇-索引映射表保存为json数据,之后做inference时直接加载来处理数据

with open("../data/wordJson/word2idx.json", "w", encoding="utf-8") as f:

json.dump(word2idx, f)

with open("../data/wordJson/label2idx.json", "w", encoding="utf-8") as f:

json.dump(label2idx, f)

return word2idx, label2idx

? ?

def _readStopWord(self, stopWordPath):

"""

读取停用词

"""

with open(stopWordPath, "r") as f:

stopWords = f.read()

stopWordList = stopWords.splitlines()

# 将停用词用列表的形式生成,之后查找停用词时会比较快

self.stopWordDict = dict(zip(stopWordList, list(range(len(stopWordList)))))

? ?

def dataGen(self):

"""

初始化训练集和验证集

"""

# 初始化停用词

self._readStopWord(self._stopWordSource)

# 初始化数据集

reviews, labels = self._readData(self._dataSource)

# 初始化词汇-索引映射表和词向量矩阵

word2idx, label2idx = self._genVocabulary(reviews, labels)

# 将标签和句子数值化

labelIds = self._labelToIndex(labels, label2idx)

reviewIds = self._wordToIndex(reviews, word2idx)

# 初始化训练集和测试集

trainReviews, trainLabels, evalReviews, evalLabels = self._genTrainEvalData(reviewIds, labelIds, word2idx,

self._rate)

self.trainReviews = trainReviews

self.trainLabels = trainLabels

? ?

self.evalReviews = evalReviews

self.evalLabels = evalLabels

? ?

#获取前些模块的数据

# config =parameter_config.Config()

# data = Dataset(config)

# data.dataGen()

3.3 模型构建:mode_structure.py

关于transformer模型的一些使用心得:

1)在这里选择固定的one-hot的position embedding比论文中提出的利用正弦余弦函数生成的position embedding的效果要好,可能的原因是论文中提出的position embedding是作为可训练的值传入的,

这样就增加了模型的复杂度,在小数据集(IMDB训练集大小:20000)上导致性能有所下降。

2)mask可能不需要,添加mask和去除mask对结果基本没啥影响,也许在其他的任务或者数据集上有作用,但论文也并没有提出一定要在encoder结构中加入mask,mask更多的是用在decoder。

3)transformer的层数,transformer的层数可以根据自己的数据集大小调整,在小数据集上基本上一层就够了。

4)在subLayers上加dropout正则化,主要是在multi-head attention层加,因为feed forward是用卷积实现的,不加dropout应该没关系,当然如果feed forward用全连接层实现,那也加上dropout。

5)在小数据集上transformer的效果并不一定比Bi-LSTM + Attention好,在IMDB上效果就更差。

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

# Author:yifan

import numpy as np

import tensorflow as tf

import parameter_config

? ?

# 构建模型 3 Transformer模型

# 生成位置嵌入

def fixedPositionEmbedding(batchSize, sequenceLen):

embeddedPosition = []

for batch in range(batchSize):

x = []

for step in range(sequenceLen): #类似one-hot方式的构造

a = np.zeros(sequenceLen)

a[step] = 1

x.append(a)

embeddedPosition.append(x)

return np.array(embeddedPosition, dtype="float32")

? ?

# 模型构建

class Transformer(object):

"""

Transformer Encoder 用于文本分类

"""

def __init__(self, config, wordEmbedding):

# 定义模型的输入

self.inputX = tf.placeholder(tf.int32, [None, config.sequenceLength], name="inputX")

self.inputY = tf.placeholder(tf.int32, [None], name="inputY")

self.dropoutKeepProb = tf.placeholder(tf.float32, name="dropoutKeepProb")

self.embeddedPosition = tf.placeholder(tf.float32, [None, config.sequenceLength, config.sequenceLength], name="embeddedPosition")

self.config = config

# 定义l2损失

l2Loss = tf.constant(0.0)

?

# 词嵌入层, 位置向量的定义方式有两种:一是直接用固定的one-hot的形式传入,然后和词向量拼接,

# 在当前的数据集上表现效果更好。另一种

# 就是按照论文中的方法实现,这样的效果反而更差,可能是增大了模型的复杂度,在小数据集上表现不佳。

with tf.name_scope("embedding"):

# 利用预训练的词向量初始化词嵌入矩阵

self.W = tf.Variable(tf.cast(wordEmbedding, dtype=tf.float32, name="word2vec") ,name="W")

# 利用词嵌入矩阵将输入的数据中的词转换成词向量,维度[batch_size, sequence_length, embedding_size]

self.embedded = tf.nn.embedding_lookup(self.W, self.inputX)

self.embeddedWords = tf.concat([self.embedded, self.embeddedPosition], -1)

? ?

with tf.name_scope("transformer"):

for i in range(config.model.numBlocks):

with tf.name_scope("transformer-{}".format(i + 1)):

# 维度[batch_size, sequence_length, embedding_size]

multiHeadAtt = self._multiheadAttention(rawKeys=self.inputX, queries=self.embeddedWords,

keys=self.embeddedWords)

# 维度[batch_size, sequence_length, embedding_size]

self.embeddedWords = self._feedForward(multiHeadAtt,

[config.model.filters, config.model.embeddingSize + config.sequenceLength])

?

outputs = tf.reshape(self.embeddedWords, [-1, config.sequenceLength * (config.model.embeddingSize + config.sequenceLength)])

outputSize = outputs.get_shape()[-1].value

?

with tf.name_scope("dropout"):

outputs = tf.nn.dropout(outputs, keep_prob=self.dropoutKeepProb)

?

# 全连接层的输出

with tf.name_scope("output"):

outputW = tf.get_variable(

"outputW",

shape=[outputSize, config.numClasses],

initializer=tf.contrib.layers.xavier_initializer())

?

outputB= tf.Variable(tf.constant(0.1, shape=[config.numClasses]), name="outputB")

l2Loss += tf.nn.l2_loss(outputW)

l2Loss += tf.nn.l2_loss(outputB)

self.logits = tf.nn.xw_plus_b(outputs, outputW, outputB, name="logits")

?

if config.numClasses == 1:

self.predictions = tf.cast(tf.greater_equal(self.logits, 0.0), tf.float32, name="predictions")

elif config.numClasses > 1:

self.predictions = tf.argmax(self.logits, axis=-1, name="predictions")

?

# 计算二元交叉熵损失

with tf.name_scope("loss"):

?

if config.numClasses == 1:

losses = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=tf.cast(tf.reshape(self.inputY, [-1, 1]),

dtype=tf.float32))

elif config.numClasses > 1:

losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.inputY)

?

self.loss = tf.reduce_mean(losses) + config.model.l2RegLambda * l2Loss

?

def _layerNormalization(self, inputs, scope="layerNorm"):

# LayerNorm层和BN层有所不同

epsilon = self.config.model.epsilon

? ?

inputsShape = inputs.get_shape() # [batch_size, sequence_length, embedding_size]

? ?

paramsShape = inputsShape[-1:]

? ?

# LayerNorm是在最后的维度上计算输入的数据的均值和方差,BN层是考虑所有维度的

# mean, variance的维度都是[batch_size, sequence_len, 1]

mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)

? ?

beta = tf.Variable(tf.zeros(paramsShape))

? ?

gamma = tf.Variable(tf.ones(paramsShape))

normalized = (inputs - mean) / ((variance + epsilon) ** .5)

?

outputs = gamma * normalized + beta

? ?

return outputs

?

def _multiheadAttention(self, rawKeys, queries, keys, numUnits=None, causality=False, scope="multiheadAttention"):

# rawKeys 的作用是为了计算mask时用的,因为keys是加上了position embedding的,其中不存在padding0的值

?

numHeads = self.config.model.numHeads

keepProp = self.config.model.keepProp

?

if numUnits is None: # 若是没传入值,直接去输入数据的最后一维,即embedding size.

numUnits = queries.get_shape().as_list()[-1]

? ?

# tf.layers.dense可以做多维tensor数据的非线性映射,在计算self-Attention时,一定要对这三个值进行非线性映射,

# 其实这一步就是论文中Multi-Head Attention中的对分割后的数据进行权重映射的步骤,我们在这里先映射后分割,原则上是一样的。

# Q, K, V的维度都是[batch_size, sequence_length, embedding_size]

Q = tf.layers.dense(queries, numUnits, activation=tf.nn.relu)

K = tf.layers.dense(keys, numUnits, activation=tf.nn.relu)

V = tf.layers.dense(keys, numUnits, activation=tf.nn.relu)

? ?

# 将数据按最后一维分割成num_heads, 然后按照第一维拼接

# Q, K, V 的维度都是[batch_size * numHeads, sequence_length, embedding_size/numHeads]

Q_ = tf.concat(tf.split(Q, numHeads, axis=-1), axis=0)

K_ = tf.concat(tf.split(K, numHeads, axis=-1), axis=0)

V_ = tf.concat(tf.split(V, numHeads, axis=-1), axis=0)

? ?

# 计算keysqueries之间的点积,维度[batch_size * numHeads, queries_len, key_len], 后两维是querieskeys的序列长度

similary = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1]))

? ?

# 对计算的点积进行缩放处理,除以向量长度的根号值

scaledSimilary = similary / (K_.get_shape().as_list()[-1] ** 0.5)

? ?

# 在我们输入的序列中会存在padding这个样的填充词,这种词应该对最终的结果是毫无帮助的,原则上说当padding都是输入0时,

# 计算出来的权重应该也是0,但是在transformer中引入了位置向量,当和位置向量相加之后,其值就不为0了,因此在添加位置向量

# 之前,我们需要将其mask0。虽然在queries中也存在这样的填充词,但原则上模型的结果之和输入有关,而且在self-Attention

# queryies = keys,因此只要一方为0,计算出的权重就为0

# 具体关于key mask的介绍可以看看这里: https://github.com/Kyubyong/transformer/issues/3

? ?

# 利用tftile进行张量扩张, 维度[batch_size * numHeads, keys_len] keys_len = keys 的序列长度

keyMasks = tf.tile(rawKeys, [numHeads, 1])

? ?

# 增加一个维度,并进行扩张,得到维度[batch_size * numHeads, queries_len, keys_len]

keyMasks = tf.tile(tf.expand_dims(keyMasks, 1), [1, tf.shape(queries)[1], 1])

? ?

# tf.ones_like生成元素全为1,维度和scaledSimilary相同的tensor, 然后得到负无穷大的值

paddings = tf.ones_like(scaledSimilary) * (-2 ** (32 + 1))

? ?

# tf.where(condition, x, y),condition中的元素为bool值,其中对应的Truex中的元素替换,对应的Falsey中的元素替换

# 因此condition,x,y的维度是一样的。下面就是keyMasks中的值为0就用paddings中的值替换

maskedSimilary = tf.where(tf.equal(keyMasks, 0), paddings, scaledSimilary) # 维度[batch_size * numHeads, queries_len, key_len]

? ?

# 在计算当前的词时,只考虑上文,不考虑下文,出现在Transformer Decoder中。在文本分类时,可以只用Transformer Encoder

# Decoder是生成模型,主要用在语言生成中

if causality:

diagVals = tf.ones_like(maskedSimilary[0, :, :]) # [queries_len, keys_len]

tril = tf.contrib.linalg.LinearOperatorTriL(diagVals).to_dense() # [queries_len, keys_len]

masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(maskedSimilary)[0], 1, 1]) # [batch_size * numHeads, queries_len, keys_len]

? ?

paddings = tf.ones_like(masks) * (-2 ** (32 + 1))

maskedSimilary = tf.where(tf.equal(masks, 0), paddings, maskedSimilary) # [batch_size * numHeads, queries_len, keys_len]

? ?

# 通过softmax计算权重系数,维度 [batch_size * numHeads, queries_len, keys_len]

weights = tf.nn.softmax(maskedSimilary)

? ?

# 加权和得到输出值, 维度[batch_size * numHeads, sequence_length, embedding_size/numHeads]

outputs = tf.matmul(weights, V_)

? ?

# 将多头Attention计算的得到的输出重组成最初的维度[batch_size, sequence_length, embedding_size]

outputs = tf.concat(tf.split(outputs, numHeads, axis=0), axis=2)

?

outputs = tf.nn.dropout(outputs, keep_prob=keepProp)

? ?

# 对每个subLayers建立残差连接,即H(x) = F(x) + x

outputs += queries

# normalization

outputs = self._layerNormalization(outputs)

return outputs

? ?

def _feedForward(self, inputs, filters, scope="multiheadAttention"):

# 在这里的前向传播采用卷积神经网络

?

# 内层

params = {"inputs": inputs, "filters": filters[0], "kernel_size": 1,

"activation": tf.nn.relu, "use_bias": True}

outputs = tf.layers.conv1d(**params)

? ?

# 外层

params = {"inputs": outputs, "filters": filters[1], "kernel_size": 1,

"activation": None, "use_bias": True}

? ?

# 这里用到了一维卷积,实际上卷积核尺寸还是二维的,只是只需要指定高度,宽度和embedding size的尺寸一致

# 维度[batch_size, sequence_length, embedding_size]

outputs = tf.layers.conv1d(**params)

? ?

# 残差连接

outputs += inputs

? ?

# 归一化处理

outputs = self._layerNormalization(outputs)

? ?

return outputs

?

def _positionEmbedding(self, scope="positionEmbedding"):

# 生成可训练的位置向量

batchSize = self.config.batchSize

sequenceLen = self.config.sequenceLength

embeddingSize = self.config.model.embeddingSize

?

# 生成位置的索引,并扩张到batch中所有的样本上

positionIndex = tf.tile(tf.expand_dims(tf.range(sequenceLen), 0), [batchSize, 1])

? ?

# 根据正弦和余弦函数来获得每个位置上的embedding的第一部分

positionEmbedding = np.array([[pos / np.power(10000, (i-i%2) / embeddingSize) for i in range(embeddingSize)]

for pos in range(sequenceLen)])

? ?

# 然后根据奇偶性分别用sincos函数来包装

positionEmbedding[:, 0::2] = np.sin(positionEmbedding[:, 0::2])

positionEmbedding[:, 1::2] = np.cos(positionEmbedding[:, 1::2])

? ?

# positionEmbedding转换成tensor的格式

positionEmbedding_ = tf.cast(positionEmbedding, dtype=tf.float32)

? ?

# 得到三维的矩阵[batchSize, sequenceLen, embeddingSize]

positionEmbedded = tf.nn.embedding_lookup(positionEmbedding_, positionIndex)

? ?

return positionEmbedded

3.4 模型训练:mode_trainning.py

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

281

282

283

284

285

286

287

288

289

290

291

292

293

294

295

296

297

298

299

300

301

302

303

304

305

306

307

308

309

310

311

312

313

314

315

316

317

318

319

320

321

322

323

324

325

326

327

328

329

330

331

332

333

334

335

336

337

338

339

340

341

342

343

344

345

346

347

348

349

import os

import datetime

import numpy as np

import tensorflow as tf

import parameter_config

import get_train_data

import mode_structure

? ?

#获取前些模块的数据

config =parameter_config.Config()

data = get_train_data.Dataset(config)

data.dataGen()

? ?

#4生成batch数据集

def nextBatch(x, y, batchSize):

# 生成batch数据集,用生成器的方式输出

perm = np.arange(len(x)) #返回[0 1 2 ... len(x)]的数组

np.random.shuffle(perm) #乱序

x = x[perm]

y = y[perm]

numBatches = len(x) // batchSize

for i in range(numBatches):

start = i * batchSize

end = start + batchSize

batchX = np.array(x[start: end], dtype="int64")

batchY = np.array(y[start: end], dtype="float32")

yield batchX, batchY

? ?

# 5 定义计算metrics的函数

"""

定义各类性能指标

"""

def mean(item: list) -> float:

"""

计算列表中元素的平均值

:param item: 列表对象

:return:

"""

res = sum(item) / len(item) if len(item) > 0 else 0

return res

? ?

def accuracy(pred_y, true_y):

"""

计算二类和多类的准确率

:param pred_y: 预测结果

:param true_y: 真实结果

:return:

"""

if isinstance(pred_y[0], list):

pred_y = [item[0] for item in pred_y]

corr = 0

for i in range(len(pred_y)):

if pred_y[i] == true_y[i]:

corr += 1

acc = corr / len(pred_y) if len(pred_y) > 0 else 0

return acc

? ?

def binary_precision(pred_y, true_y, positive=1):

"""

二类的精确率计算

:param pred_y: 预测结果

:param true_y: 真实结果

:param positive: 正例的索引表示

:return:

"""

corr = 0

pred_corr = 0

for i in range(len(pred_y)):

if pred_y[i] == positive:

pred_corr += 1

if pred_y[i] == true_y[i]:

corr += 1

? ?

prec = corr / pred_corr if pred_corr > 0 else 0

return prec

? ?

def binary_recall(pred_y, true_y, positive=1):

"""

二类的召回率

:param pred_y: 预测结果

:param true_y: 真实结果

:param positive: 正例的索引表示

:return:

"""

corr = 0

true_corr = 0

for i in range(len(pred_y)):

if true_y[i] == positive:

true_corr += 1

if pred_y[i] == true_y[i]:

corr += 1

? ?

rec = corr / true_corr if true_corr > 0 else 0

return rec

? ?

def binary_f_beta(pred_y, true_y, beta=1.0, positive=1):

"""

二类的f beta

:param pred_y: 预测结果

:param true_y: 真实结果

:param beta: beta

:param positive: 正例的索引表示

:return:

"""

precision = binary_precision(pred_y, true_y, positive)

recall = binary_recall(pred_y, true_y, positive)

try:

f_b = (1 + beta * beta) * precision * recall / (beta * beta * precision + recall)

except:

f_b = 0

return f_b

? ?

def multi_precision(pred_y, true_y, labels):

"""

多类的精确率

:param pred_y: 预测结果

:param true_y: 真实结果

:param labels: 标签列表

:return:

"""

if isinstance(pred_y[0], list):

pred_y = [item[0] for item in pred_y]

? ?

precisions = [binary_precision(pred_y, true_y, label) for label in labels]

prec = mean(precisions)

return prec

? ?

def multi_recall(pred_y, true_y, labels):

"""

多类的召回率

:param pred_y: 预测结果

:param true_y: 真实结果

:param labels: 标签列表

:return:

"""

if isinstance(pred_y[0], list):

pred_y = [item[0] for item in pred_y]

? ?

recalls = [binary_recall(pred_y, true_y, label) for label in labels]

rec = mean(recalls)

return rec

? ?

def multi_f_beta(pred_y, true_y, labels, beta=1.0):

"""

多类的f beta

:param pred_y: 预测结果

:param true_y: 真实结果

:param labels: 标签列表

:param beta: beta

:return:

"""

if isinstance(pred_y[0], list):

pred_y = [item[0] for item in pred_y]

? ?

f_betas = [binary_f_beta(pred_y, true_y, beta, label) for label in labels]

f_beta = mean(f_betas)

return f_beta

? ?

def get_binary_metrics(pred_y, true_y, f_beta=1.0):

"""

得到二分类的性能指标

:param pred_y:

:param true_y:

:param f_beta:

:return:

"""

acc = accuracy(pred_y, true_y)

recall = binary_recall(pred_y, true_y)

precision = binary_precision(pred_y, true_y)

f_beta = binary_f_beta(pred_y, true_y, f_beta)

return acc, recall, precision, f_beta

? ?

def get_multi_metrics(pred_y, true_y, labels, f_beta=1.0):

"""

得到多分类的性能指标

:param pred_y:

:param true_y:

:param labels:

:param f_beta:

:return:

"""

acc = accuracy(pred_y, true_y)

recall = multi_recall(pred_y, true_y, labels)

precision = multi_precision(pred_y, true_y, labels)

f_beta = multi_f_beta(pred_y, true_y, labels, f_beta)

return acc, recall, precision, f_beta

? ?

# 6 训练模型

# 生成训练集和验证集

trainReviews = data.trainReviews

trainLabels = data.trainLabels

evalReviews = data.evalReviews

evalLabels = data.evalLabels

? ?

wordEmbedding = data.wordEmbedding

labelList = data.labelList

embeddedPosition = mode_structure.fixedPositionEmbedding(config.batchSize, config.sequenceLength) #使用的是one-hot形式

? ?

# 训练模型

# 定义计算图

with tf.Graph().as_default():

session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)

session_conf.gpu_options.allow_growth=True

session_conf.gpu_options.per_process_gpu_memory_fraction = 0.9 # 配置gpu占用率

sess = tf.Session(config=session_conf)

?

# 定义会话

with sess.as_default():

transformer = mode_structure.Transformer(config, wordEmbedding)

globalStep = tf.Variable(0, name="globalStep", trainable=False)

# 定义优化函数,传入学习速率参数

optimizer = tf.train.AdamOptimizer(config.training.learningRate)

# 计算梯度,得到梯度和变量

gradsAndVars = optimizer.compute_gradients(transformer.loss)

# 将梯度应用到变量下,生成训练器

trainOp = optimizer.apply_gradients(gradsAndVars, global_step=globalStep)

?

# summary绘制tensorBoard

gradSummaries = []

for g, v in gradsAndVars:

if g is not None:

tf.summary.histogram("{}/grad/hist".format(v.name), g)

tf.summary.scalar("{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))

?

outDir = os.path.abspath(os.path.join(os.path.curdir, "summarys"))

print("Writing to {} ".format(outDir))

?

lossSummary = tf.summary.scalar("loss", transformer.loss)

summaryOp = tf.summary.merge_all()

?

trainSummaryDir = os.path.join(outDir, "train")

trainSummaryWriter = tf.summary.FileWriter(trainSummaryDir, sess.graph)

evalSummaryDir = os.path.join(outDir, "eval")

evalSummaryWriter = tf.summary.FileWriter(evalSummaryDir, sess.graph)

?

?

# 初始化所有变量

saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)

?

# 保存模型的一种方式,保存为pb文件

savedModelPath = "../model/transformer/savedModel"

if os.path.exists(savedModelPath):

os.rmdir(savedModelPath)

builder = tf.saved_model.builder.SavedModelBuilder(savedModelPath)

?

sess.run(tf.global_variables_initializer())

? ?

def trainStep(batchX, batchY):

"""

训练函数

"""

feed_dict = {

transformer.inputX: batchX,

transformer.inputY: batchY,

transformer.dropoutKeepProb: config.model.dropoutKeepProb,

transformer.embeddedPosition: embeddedPosition

}

_, summary, step, loss, predictions = sess.run(

[trainOp, summaryOp, globalStep, transformer.loss, transformer.predictions],

feed_dict)

?

if config.numClasses == 1:

acc, recall, prec, f_beta = get_binary_metrics(pred_y=predictions, true_y=batchY)

elif config.numClasses > 1:

acc, recall, prec, f_beta = get_multi_metrics(pred_y=predictions, true_y=batchY,

labels=labelList)

?

trainSummaryWriter.add_summary(summary, step)

return loss, acc, prec, recall, f_beta

? ?

def devStep(batchX, batchY):

"""

验证函数

"""

feed_dict = {

transformer.inputX: batchX,

transformer.inputY: batchY,

transformer.dropoutKeepProb: 1.0,

transformer.embeddedPosition: embeddedPosition

}

summary, step, loss, predictions = sess.run(

[summaryOp, globalStep, transformer.loss, transformer.predictions],

feed_dict)

?

if config.numClasses == 1:

acc, recall, prec, f_beta = get_binary_metrics(pred_y=predictions, true_y=batchY)

? ?

?

elif config.numClasses > 1:

acc, recall, prec, f_beta = get_multi_metrics(pred_y=predictions, true_y=batchY,

labels=labelList)

?

trainSummaryWriter.add_summary(summary, step)

?

return loss, acc, prec, recall, f_beta

?

for i in range(config.training.epoches):

# 训练模型

print("start training model")

for batchTrain in nextBatch(trainReviews, trainLabels, config.batchSize):

loss, acc, prec, recall, f_beta = trainStep(batchTrain[0], batchTrain[1])

?

currentStep = tf.train.global_step(sess, globalStep)

print("train: step: {}, loss: {}, acc: {}, recall: {}, precision: {}, f_beta: {}".format(

currentStep, loss, acc, recall, prec, f_beta))

if currentStep % config.training.evaluateEvery == 0:

print(" Evaluation:")

?

losses = []

accs = []

f_betas = []

precisions = []

recalls = []

?

for batchEval in nextBatch(evalReviews, evalLabels, config.batchSize):

loss, acc, precision, recall, f_beta = devStep(batchEval[0], batchEval[1])

losses.append(loss)

accs.append(acc)

f_betas.append(f_beta)

precisions.append(precision)

recalls.append(recall)

?

time_str = datetime.datetime.now().isoformat()

print("{}, step: {}, loss: {}, acc: {},precision: {}, recall: {}, f_beta: {}".format(time_str, currentStep, mean(losses),

mean(accs), mean(precisions),

mean(recalls), mean(f_betas)))

?

if currentStep % config.training.checkpointEvery == 0:

# 保存模型的另一种方法,保存checkpoint文件

path = saver.save(sess, "../model/Transformer/model/my-model", global_step=currentStep)

print("Saved model checkpoint to {} ".format(path))

?

inputs = {"inputX": tf.saved_model.utils.build_tensor_info(transformer.inputX),

"keepProb": tf.saved_model.utils.build_tensor_info(transformer.dropoutKeepProb)}

? ?

outputs = {"predictions": tf.saved_model.utils.build_tensor_info(transformer.predictions)}

? ?

prediction_signature = tf.saved_model.signature_def_utils.build_signature_def(inputs=inputs, outputs=outputs,

method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)

legacy_init_op = tf.group(tf.tables_initializer(), name="legacy_init_op")

builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING],

signature_def_map={"predict": prediction_signature}, legacy_init_op=legacy_init_op)

? ?

builder.save()

3.5 预测:predict.py

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

# Author:yifan

import os

import csv

import time

import datetime

import random

import json

from collections import Counter

from math import sqrt

import gensim

import pandas as pd

import numpy as np

import tensorflow as tf

from sklearn.metrics import roc_auc_score, accuracy_score, precision_score, recall_score

import parameter_config

config =parameter_config.Config()

import mode_structure

embeddedPositions = mode_structure.fixedPositionEmbedding(config.batchSize, config.sequenceLength)[0] #使用的是one-hot形式

# print(type(embeddedPositions))

# print(embeddedPositions.shape)

#7预测代码

# x = "this movie is full of references like mad max ii the wild one and many others the ladybug′s face it′s a clear reference or tribute to peter lorre this movie is a masterpiece we′ll talk much more about in the future"

# x = "his movie is the same as the third level movie. There‘s no place to look good"

x = "This film is not good" #最终反馈为1 感觉不准

# x = "This film is bad" #最终反馈为0

? ?

# 注:下面两个词典要保证和当前加载的模型对应的词典是一致的

with open("../data/wordJson/word2idx.json", "r", encoding="utf-8") as f:

word2idx = json.load(f)

with open("../data/wordJson/label2idx.json", "r", encoding="utf-8") as f: #label2idx.json内容{"0": 0, "1": 1}

label2idx = json.load(f)

idx2label = {value: key for key, value in label2idx.items()}

? ?

#x 的处理,变成模型能识别的向量xIds

xIds = [word2idx.get(item, word2idx["UNK"]) for item in x.split(" ")] #返回x对应的向量

if len(xIds) >= config.sequenceLength: #xIds 句子单词个数是否超过了sequenceLength200

xIds = xIds[:config.sequenceLength]

print("ddd",xIds)

else:

xIds = xIds + [word2idx["PAD"]] * (config.sequenceLength - len(xIds))

print("xxx", xIds)

? ?

graph = tf.Graph()

with graph.as_default():

gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)

session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, gpu_options=gpu_options)

sess = tf.Session(config=session_conf)

? ?

with sess.as_default():

# 恢复模型

checkpoint_file = tf.train.latest_checkpoint("../model/transformer/model/")

saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))

saver.restore(sess, checkpoint_file)

? ?

# 获得需要喂给模型的参数,输出的结果依赖的输入值

inputX = graph.get_operation_by_name("inputX").outputs[0]

dropoutKeepProb = graph.get_operation_by_name("dropoutKeepProb").outputs[0]

embeddedPosition = graph.get_operation_by_name("embeddedPosition").outputs[0]

# inputX = tf.placeholder(tf.int32, [None, config.sequenceLength], name="inputX")

# dropoutKeepProb = tf.placeholder(tf.float32, name="dropoutKeepProb")

# embeddedPosition = tf.placeholder(tf.float32, [None, config.sequenceLength, config.sequenceLength],

# name="embeddedPosition") #这种方式不行

? ?

# 获得输出的结果

predictions = graph.get_tensor_by_name("output/predictions:0")

pred = sess.run(predictions, feed_dict={inputX: [xIds], dropoutKeepProb: 1.0, embeddedPosition: [embeddedPositions]})[0]

? ?

# print(pred)

pred = [idx2label[item] for item in pred]

print(pred)

结果

技术图片

相关代码可见:https://github.com/yifanhunter/NLP_textClassifier

主要参考:

【1】 https://home.cnblogs.com/u/jiangxinyang/

以上是关于文本分类-06Transformer的主要内容,如果未能解决你的问题,请参考以下文章

Python深度学习14——Keras实现Transformer中文文本十分类

新闻文本分类任务:使用Transformer实现

文本分类ACT: an Attentive Convolutional Transformer for Efficient Text Classification

文本分类实战—— RCNN模型

文本分类实战—— Bi-LSTM模型

NLP文本分类方法汇总