基于jupyter-notebook的口罩数据集的划分、训练、测试,并实现摄像头口罩识别判断

    技术2025-10-04  16

    目录

    口罩数据集的划分、训练、测试下载数据集划分数据集构建小型卷积网络数据预处理训练数据增强 实现摄像头口罩识别判断基于口罩数据集训练出的模型进行识别通过开源的数据模型进行识别下载运行

    口罩数据集的划分、训练、测试

    这里的方法类似与笑脸识别的方法,也是类似于猫狗识别

    可以参考笑脸数据集的划分训练测试

    下载数据集

    首先我们需要下载口罩数据集 这里我采用的是老师给的数据集 但是里面的数据比较混乱,于是我自己整理了一下,不是口罩的数据集运用到了笑脸数据集里面的图片

    划分数据集

    import keras import os, shutil # The path to the directory where the original # dataset was uncompressed original_dataset_dir = 'D:\\mask\\train' # The directory where we will # store our smaller dataset base_dir = 'D:\\mask1' os.mkdir(base_dir) # Directories for our training, # validation and test splits train_dir = os.path.join(base_dir, 'train') os.mkdir(train_dir) validation_dir = os.path.join(base_dir, 'validation') os.mkdir(validation_dir) test_dir = os.path.join(base_dir, 'test') os.mkdir(test_dir) # Directory with our training masks pictures train_masks_dir = os.path.join(train_dir, 'masks') os.mkdir(train_masks_dir) # Directory with our training unmasks pictures train_unmasks_dir = os.path.join(train_dir, 'unmasks') os.mkdir(train_unmasks_dir) # Directory with our validation masks pictures validation_masks_dir = os.path.join(validation_dir, 'masks') os.mkdir(validation_masks_dir) # Directory with our validation unmasks pictures validation_unmasks_dir = os.path.join(validation_dir, 'unmasks') os.mkdir(validation_unmasks_dir) # Directory with our validation masks pictures test_masks_dir = os.path.join(test_dir, 'masks') os.mkdir(test_masks_dir) # Directory with our validation unmasks pictures test_unmasks_dir = os.path.join(test_dir, 'unmasks') os.mkdir(test_unmasks_dir)

    建立文件夹之后,自己手动添加进去数据,方法类似笑脸数据集部分

    打印查看

    print('total training mask images:', len(os.listdir(train_masks_dir))) print('total training unmask images:', len(os.listdir(train_unmasks_dir))) print('total validation mask images:', len(os.listdir(validation_masks_dir))) print('total validation unmask images:', len(os.listdir(validation_unmasks_dir))) print('total test mask images:', len(os.listdir(test_masks_dir))) print('total test unmask images:', len(os.listdir(test_unmasks_dir)))

    构建小型卷积网络

    from keras import layers from keras import models model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Flatten()) model.add(layers.Dense(512, activation='relu')) model.add(layers.Dense(1, activation='sigmoid'))

    了解征图的尺寸是如何随着每一层变化的

    model.summary()

    数据预处理

    from keras import optimizers model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc']) from keras.preprocessing.image import ImageDataGenerator # All images will be rescaled by 1./255 train_datagen = ImageDataGenerator(rescale=1./255) test_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( # This is the target directory train_dir, # All images will be resized to 150x150 target_size=(150, 150), batch_size=20, # Since we use binary_crossentropy loss, we need binary labels class_mode='binary') validation_generator = test_datagen.flow_from_directory( validation_dir, target_size=(150, 150), batch_size=20, class_mode='binary')

    输出

    for data_batch, labels_batch in train_generator: print('data batch shape:', data_batch.shape) print('labels batch shape:', labels_batch.shape) break

    训练

    history = model.fit_generator( train_generator, steps_per_epoch=100, epochs=30, validation_data=validation_generator, validation_steps=50)

    model.save('D:/mask1/masks_and_unmasks_small_1.h5')

    在训练和验证数据上绘制模型的损失和准确性

    import matplotlib.pyplot as plt acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show()

    数据增强

    datagen = ImageDataGenerator( rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest')

    如果我们使用这种数据增加配置训练一个新的网络,我们的网络将永远不会看到两次相同的输入。然而,它看到的输入仍然是高度相关的,因为它们来自少量的原始图像——我们不能产生新的信息,我们只能混合现有的信息。因此,这可能还不足以完全消除过度拟合。 为了进一步对抗过拟合,我们还将在我们的模型中增加一个Dropout层,就在密集连接分类器之前:

    model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Flatten()) model.add(layers.Dropout(0.5)) model.add(layers.Dense(512, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc'])

    用数据增强 退出来训练我们的网络

    train_datagen = ImageDataGenerator( rescale=1./255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True,) # Note that the validation data should not be augmented! test_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( # This is the target directory train_dir, # All images will be resized to 150x150 target_size=(150, 150), batch_size=32, # Since we use binary_crossentropy loss, we need binary labels class_mode='binary') validation_generator = test_datagen.flow_from_directory( validation_dir, target_size=(150, 150), batch_size=32, class_mode='binary') history = model.fit_generator( train_generator, steps_per_epoch=100, epochs=50, validation_data=validation_generator, validation_steps=50)

    由于训练时间太长了,我把这里改成了50,就是训练50次 但是训练时间依然很长

    保存模型

    model.save('D:/mask1/masks_and_unmasks_small_2.h5')

    在训练和验证数据上绘制模型的损失和准确性

    acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show()

    但是由于训练时间太长了,我就不进一步训练了

    实现摄像头口罩识别判断

    基于口罩数据集训练出的模型进行识别

    #检测视频或者摄像头中的人脸 import cv2 from keras.preprocessing import image from keras.models import load_model import numpy as np import dlib from PIL import Image model = load_model('D:/mask1/masks_and_unmasks_small_2.h5') detector = dlib.get_frontal_face_detector() video=cv2.VideoCapture(0) font = cv2.FONT_HERSHEY_SIMPLEX def rec(img): gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) dets=detector(gray,1) if dets is not None: for face in dets: left=face.left() top=face.top() right=face.right() bottom=face.bottom() cv2.rectangle(img,(left,top),(right,bottom),(0,255,0),2) img1=cv2.resize(img[top:bottom,left:right],dsize=(150,150)) img1=cv2.cvtColor(img1,cv2.COLOR_BGR2RGB) img1 = np.array(img1)/255. img_tensor = img1.reshape(-1,150,150,3) prediction =model.predict(img_tensor) if prediction[0][0]>0.5: result='unmask' else: result='mask' cv2.putText(img, result, (left,top), font, 2, (0, 255, 0), 2, cv2.LINE_AA) cv2.imshow('Video', img) while video.isOpened(): res, img_rd = video.read() if not res: break rec(img_rd) if cv2.waitKey(5) & 0xFF == ord('q'): break video.release() cv2.destroyAllWindows()

    测试如下:

    通过开源的数据模型进行识别

    下载

    开源人脸口罩检测模型和数据

    这个模型是基于tensorflow和keras的,要先配置好

    运行

    然后直接运行这个文件就可以了

    我是在Spyder里面运行的,也是在anaconda环境下面,对于这种文件夹我感觉更好操作一点

    Processed: 0.021, SQL: 9