接着上一篇文章
#训练词向量 import os file_name = './embedding/Word2Vec_word_200.model' if not os.path.exists(file_name): model = Word2Vec([document.split(' ')for document in df['content'].values], size=200, window=5, iter=10, workers=11, seed=2018, min_count=2) model.save(file_name) else: model = Word2Vec.load(file_name) print("add word2vec finished....") #将词编码成序列,api的具体用法可以查询官方文档 tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=50000, lower=False,filters="") tokenizer.fit_on_texts(df['content'].tolist()) #将多个文档转换为word下标的向量形式 train_ = tokenizer.texts_to_sequences(train['content'].values) val_ = tokenizer.texts_to_sequences(val['content'].values) test_ = tokenizer.texts_to_sequences(test['content'].values) #将标量数据 转换成numpy ndarray train_ = tf.keras.preprocessing.sequence.pad_sequences(train_, maxlen=800) val_ = tf.keras.preprocessing.sequence.pad_sequences(val_, maxlen=800) test_ = tf.keras.preprocessing.sequence.pad_sequences(test_, maxlen=800) #将每个词的索引拼接起来 word_vocab = tokenizer.word_index count = 0 #初始化一个0数组 embedding_matrix = np.zeros((len(word_vocab) + 1, 200)) #将word_vocab词向量取出来,如果这个词在我们训练的词向量中,就把这个词添加在embedding_matrix中 for word, i in word_vocab.items(): embedding_vector = model.wv[word] if word in model else None if embedding_vector is not None: count += 1 embedding_matrix[i] = embedding_vector else: unk_vec = np.random.random(200) * 0.5 unk_vec = unk_vec - unk_vec.mean() embedding_matrix[i] = unk_vec #label 编码 from sklearn.preprocessing import LabelEncoder from tensorflow.keras.utils import to_categorical lb = LabelEncoder() train_label = lb.fit_transform(train['label'].values) val_label = lb.transform(val['label'].values) test_label = lb.transform(test['label'].values) #构建模型,这一块很简单,就不说明了 content = tf.keras.layers.Input(shape=(800), dtype='int32') embedding = tf.keras.layers.Embedding( name="word_embedding", input_dim=embedding_matrix.shape[0], weights=[embedding_matrix], output_dim=embedding_matrix.shape[1], trainable=False) x = tf.keras.layers.SpatialDropout1D(0.2)(embedding(content)) #编码层 #bi-GRU #bi-GRU x = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(200, return_sequences=True))(x) #(batch,800,400) x = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(200, return_sequences=True))(x) #池化层 avg_pool = tf.keras.layers.GlobalAveragePooling1D()(x) #(batch,400) max_pool = tf.keras.layers.GlobalMaxPooling1D()(x)#(batch,400) conc = tf.keras.layers.concatenate([avg_pool, max_pool]) x = tf.keras.layers.Dense(1000)(conc) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Activation(activation="relu")(x) x = tf.keras.layers.Dropout(0.2)(x) x = tf.keras.layers.Dense(500)(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Activation(activation="relu")(x) x = tf.keras.layers.Dense(10)(x) output = tf.nn.softmax(x) model = tf.keras.models.Model(inputs=content, outputs=output) #查看模型 model.summary() ''' Model: "model" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== input_1 (InputLayer) [(None, 800)] 0 __________________________________________________________________________________________________ word_embedding (Embedding) (None, 800, 200) 82703200 input_1[0][0] __________________________________________________________________________________________________ spatial_dropout1d (SpatialDropo (None, 800, 200) 0 word_embedding[0][0] __________________________________________________________________________________________________ bidirectional (Bidirectional) (None, 800, 400) 482400 spatial_dropout1d[0][0] __________________________________________________________________________________________________ bidirectional_1 (Bidirectional) (None, 800, 400) 722400 bidirectional[0][0] __________________________________________________________________________________________________ global_average_pooling1d (Globa (None, 400) 0 bidirectional_1[0][0] __________________________________________________________________________________________________ global_max_pooling1d (GlobalMax (None, 400) 0 bidirectional_1[0][0] __________________________________________________________________________________________________ concatenate (Concatenate) (None, 800) 0 global_average_pooling1d[0][0] global_max_pooling1d[0][0] __________________________________________________________________________________________________ dense (Dense) (None, 1000) 801000 concatenate[0][0] __________________________________________________________________________________________________ batch_normalization (BatchNorma (None, 1000) 4000 dense[0][0] __________________________________________________________________________________________________ activation (Activation) (None, 1000) 0 batch_normalization[0][0] __________________________________________________________________________________________________ dropout (Dropout) (None, 1000) 0 activation[0][0] __________________________________________________________________________________________________ dense_1 (Dense) (None, 500) 500500 dropout[0][0] __________________________________________________________________________________________________ batch_normalization_1 (BatchNor (None, 500) 2000 dense_1[0][0] __________________________________________________________________________________________________ activation_1 (Activation) (None, 500) 0 batch_normalization_1[0][0] __________________________________________________________________________________________________ dense_2 (Dense) (None, 10) 5010 activation_1[0][0] __________________________________________________________________________________________________ tf_op_layer_Softmax (TensorFlow [(None, 10)] 0 dense_2[0][0] ================================================================================================== Total params: 85,220,510 Trainable params: 2,514,310 Non-trainable params: 82,706,200 '''