最近开始了深度学习相关的知识学习,学习参考的书籍是Python与深度学习。感觉这本书讲得很好,因为理论的知识在书中有很详细的描述,这里我就记录一下自己编写书中实例,自己实现了一遍,加深印象。
1. imdb电影评论分类
import keras
import numpy as np
from keras.datasets import imdb
from keras import models
from keras import layers
from keras import optimizers
import matplotlib.pyplot as plt
# 处理的数据集一共是50000条,分别有25000条评论用于训练与测试
# 1. 导入数据,其中参数num_words表示保留了10000个高频词汇,低频的词汇就不保留了
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
print(train_data[0])
print(train_labels[0])
# 2. 将整数序列编码为二进制矩阵
def vectorize_sequences(sequences, dimension=10000):
# (创建一个形状为 (len(sequences), dimension) 的零矩阵)
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1. # (将 results[i] 的指定索引设为 1)
return results
# (将训练数据向量化)
x_train = vectorize_sequences(train_data)
# (将测试数据向量化)
x_test = vectorize_sequences(test_data)
print(x_train[0])
# 将标签也向量化
y_train = np.asarray(train_labels.astype('float32'))
y_test = np.asarray(test_labels.astype('float32'))
# 3. 构建网络
model = models.Sequential()
model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(16,activation='relu'))
model.add(layers.Dense(1,activation='sigmoid'))
# 4. 编译模型
model.compile(
optimizer=optimizers.RMSprop(lr=0.001),
loss='binary_crossentropy',
metrics=['accuracy']
)
# 5. 留出验证集
x_val = x_train[:10000]
partial_x_train = x_train[10000:]
y_val = y_train[:10000]
partial_y_train = y_train[10000:]
# 6.训练模型,这里会返回一个history,里面包含了关于损失和准确率的信息
history =model.fit(
partial_x_train,
partial_y_train,
batch_size=512,
epochs=20,
validation_data=(x_val,y_val)
)
# 7. 使用matplotlib画出损失和准确率
history_dict = history.history
loss_value = history_dict['loss']
val_loss_value = history_dict['val_loss']
epochs = range(1,len(loss_value)+1)
plt.plot(epochs,loss_value,'bo',label = 'train_loss')
plt.plot(epochs,val_loss_value,'b',label = 'val_loss')
plt.title('train and validation loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend()
plt.show()
根据结果来看,这样训练出现了过拟合的现象,在后面的学习中会改善这一点。
2. 新闻多分类简单学习
import keras
import numpy as np
from keras.datasets import reuters
from keras import models
from keras import layers
from keras import optimizers
from keras.utils.np_utils import to_categorical
import matplotlib.pyplot as plt
# 这是一个新闻的多分类问题,训练数据有8982个,测试数据有2246个
# 1. 导入数据,其中参数num_words表示保留了10000个高频词汇,低频的词汇就不保留了
(train_data, train_labels), (test_data, test_labels) = reuters.load_data(num_words=10000)
print(train_data[0])
print(train_labels[0])
# 2. 将整数序列编码为二进制矩阵
def vectorize_sequences(sequences, dimension=10000):
# (创建一个形状为 (len(sequences), dimension) 的零矩阵)
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1. # (将 results[i] 的指定索引设为 1)
return results
# (将训练数据向量化)
x_train = vectorize_sequences(train_data)
# (将测试数据向量化)
x_test = vectorize_sequences(test_data)
print(x_train[0])
# 将标签也向量化
one_hot_train_labels = to_categorical(train_labels)
one_hot_test_labels = to_categorical(test_labels)
# 3. 构建网络
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(46, activation='softmax'))
# 4. 编译模型
model.compile(
optimizer=optimizers.RMSprop(lr=0.001),
loss='categorical_crossentropy',
metrics=['accuracy']
)
# 5. 留出验证集
x_val = x_train[:1000]
partial_x_train = x_train[1000:]
y_val = one_hot_train_labels[:1000]
partial_y_train = one_hot_train_labels[1000:]
# 6.训练模型,这里会返回一个
history = model.fit(
partial_x_train,
partial_y_train,
batch_size=512,
epochs=20,
validation_data=(x_val, y_val)
)
# 7. 使用matplotlib画出损失和准确率
history_dict = history.history
loss_value = history_dict['loss']
val_loss_value = history_dict['val_loss']
epochs = range(1, len(loss_value) + 1)
plt.plot(epochs, loss_value, 'bo', label='train_loss')
plt.plot(epochs, val_loss_value, 'b', label='val_loss')
plt.title('train and validation loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend()
plt.show()
result = model.evaluate(x_test,one_hot_test_labels)
print(result)
3.波士顿房价回归预测
import keras
import numpy as np
from keras.datasets import boston_housing
from keras import models
from keras import layers
from keras import optimizers
import matplotlib.pyplot as plt
# 回归问题,预测房价的中位数,其中有404个训练数据,102个测试用例
# 1. 加载数据
(train_data,train_targets),(test_data,test_targets) = boston_housing.load_data()
print(train_data.shape)
print(test_data.shape)
# 2. 数据处理,因为输入数据的特征取值范围不同,存在一些取值范围时0-1,一些事0-100,所以需要对数据进行标准化
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data/= std
# 这里对测试数据也进行标准版,但是只能使用测试数据的均值和标准差,不能再测试数据上运算得到任何结果
test_data-=mean
test_data/=std
# 3. 构建模型,这里因为要将模型多次实例化,所以定义了一个函数
def build_model():
model = models.Sequential()
model.add(layers.Dense(64,activation='relu',input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64,activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop',loss='mse',metrics=['mae'])
return model
# 4. 因为数据集较少,使用k折交叉验证的方法,来验证方法
# 首先定义要分几折,这里分的是4折
k=4
num_val_samples = len(train_data)//4
num_epoch = 500
all_scores = []
all_mae_histories = []
for i in range(k):
# 首先准备验证数据,即第k个分区的数据
print('proposing fold # ',i)
val_data = train_data[num_val_samples*i:num_val_samples*(i+1)]
val_targets = train_targets[num_val_samples*i:num_val_samples*(i+1)]
# 然后准备剩余k-1个分区的训练数据
partial_train_data = np.concatenate(
[train_data[:num_val_samples*i],
train_data[num_val_samples*(i+1):]],
axis=0
)
partial_train_targets = np.concatenate(
[train_targets[:num_val_samples*i],
train_targets[num_val_samples*(i+1):]],
axis=0
)
# 最后训练模型,这里的verbose设置为0,是控制日志的输出,0为不输出,静默模式
model = build_model()
history = model.fit(partial_train_data,partial_train_targets,
validation_data=(val_data,val_targets),
batch_size=1,epochs=num_epoch,verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epoch)]
#绘制图像
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
# 5.最后测试训练模型
model = build_model()
model.fit(train_data,train_targets,batch_size=16,epochs=80,verbose=0)
test_mse,test_mae = model.evaluate(test_data,test_targets)
print(test_mae)