Python+opencv实现人脸表情判别,口罩识别

    技术2022-07-10  132

    文章目录

    一、dlib,face_recognition以及opencv-python库安装dlib安装方法:Cmake安装Boost下载安装dlib face_recognition安装方法opencv-python安装方法 二、dlib的68点模型三、Python实现人脸识别&表情判别四、基于CNN的表情识别五、口罩识别自己训练模型进行识别导入keras库,划分数据集构建网络数据预处理数据增强训练模型保存模型使用模型进行识别 六、参考文章

    一、dlib,face_recognition以及opencv-python库安装

    介于我使用的是jupyter notebook,所以在安装dlib和opencv-python时是在 这个命令行安装的

    dlib安装方法:

    1.若可以,直接使用上图所示命令行输入以下命令:

    pip install cmake pip install boost pip install dlib

    若安装了visual studio2019应该就可以直接pip install dlib,至少我是这样

    由于很多在执行第三句时都会报错,所以这里提供第二种办法

    2.去dlib官网:http://dlib.net/ 或者 https://github.com/davisking/dlib 下载压缩包

    下载完成后,解压缩

    在安装dlib前需要安装Boost和Cmake,dlib19之后你需要安装vs2015以上的IDE,本人是安装的vs2019,(建议先安装好VS之后再安装Cmake和 boost)

    Cmake安装

    官网下载安装包:https://cmake.org/download/

    我下的是

    直接安装之后,配置环境变量

    Boost下载

    安装boost:下载地址:http://www.boost.org/

    如果vs安装的是2015以上的版本,可以直接进行下一步,最好安装最新版本,不然会找不到b2命令

    下载之后将其解压缩,进入boost_1_73_0文件夹中,找到bootstrap.bat批处理文件,双击运行,等待运行完成后(命令行自动消失)会生成两个文件b2.exe和bjam.exe

    然后将这两个文件复制到boost_1_73_0根文件夹下: 同样开启一个命令行,定位到这个文件夹,运行命令: b2 install

    这个安装需要一段时间,耐心等候。

    利用b2编译库文件:

    b2 -a –with-python address-model=64 toolset=msvc runtime-link=static 之前你cmake下载的64位这里(address-model)写64,如果是32位的就把之前的64改成32

    安装完成后配置boost环境变量

    安装dlib

    进入你的dlib解压路径,输入python setup.py install

    成功之后会在文件夹中看见dlib和dlib.egg-info ,将这两个文件夹复制到你的python安装的目录下的Lib文件中:

    —>例如我的python环境为python2.7,

    —>所以将其放在python2-7文件夹的Python2-7\Lib\site-packages中

    —>这时,就已经完成了dlib的配置

    face_recognition安装方法

    在Anaconda Prompt下输入以下命令

    pip install face_recognition

    opencv-python安装方法

    在Anaconda Prompt下输入以下命令

    pip install opencv-python

    但如果一直失败,建议在Anaconda Prompt下输入以下命令或者换源

    pip install -i https://pypi.tuna.tsinghua.edu.cn/simple opencv-python

    二、dlib的68点模型

    dlib的68点模型,使用网络上大神训练好的特征预测器,用来进行python代码人脸识别的特征预测。

    三、Python实现人脸识别&表情判别

    import sys import dlib # 人脸识别的库dlib import numpy as np # 数据处理的库numpy import cv2 # 图像处理的库OpenCv class face_emotion(): def __init__(self): # 使用特征提取器get_frontal_face_detector self.detector = dlib.get_frontal_face_detector() # dlib的68点模型,使用作者训练好的特征预测器 self.predictor = dlib.shape_predictor("F:/face.dat") # 建cv2摄像头对象,这里使用电脑自带摄像头,如果接了外部摄像头,则自动切换到外部摄像头 self.cap = cv2.VideoCapture(0) # 设置视频参数,propId设置的视频参数,value设置的参数值 self.cap.set(3, 480) # 截图screenshoot的计数器 self.cnt = 0 def learning_face(self): # 眉毛直线拟合数据缓冲 line_brow_x = [] line_brow_y = [] # cap.isOpened() 返回true/false 检查初始化是否成功 while (self.cap.isOpened()): # cap.read() # 返回两个值: # 一个布尔值true/false,用来判断读取视频是否成功/是否到视频末尾 # 图像对象,图像的三维矩阵 flag, im_rd = self.cap.read() # 每帧数据延时1ms,延时为0读取的是静态帧 k = cv2.waitKey(1) # 取灰度 img_gray = cv2.cvtColor(im_rd, cv2.COLOR_RGB2GRAY) # 使用人脸检测器检测每一帧图像中的人脸。并返回人脸数rects faces = self.detector(img_gray, 0) # 待会要显示在屏幕上的字体 font = cv2.FONT_HERSHEY_SIMPLEX # 如果检测到人脸 if (len(faces) != 0): # 对每个人脸都标出68个特征点 for i in range(len(faces)): # enumerate方法同时返回数据对象的索引和数据,k为索引,d为faces中的对象 for k, d in enumerate(faces): # 用红色矩形框出人脸 cv2.rectangle(im_rd, (d.left(), d.top()), (d.right(), d.bottom()), (0, 0, 255)) # 计算人脸热别框边长 self.face_width = d.right() - d.left() # 使用预测器得到68点数据的坐标 shape = self.predictor(im_rd, d) # 圆圈显示每个特征点 for i in range(68): cv2.circle(im_rd, (shape.part(i).x, shape.part(i).y), 2, (0, 255, 0), -1, 8) # cv2.putText(im_rd, str(i), (shape.part(i).x, shape.part(i).y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, # (255, 255, 255)) # 分析任意n点的位置关系来作为表情识别的依据 mouth_width = (shape.part(54).x - shape.part(48).x) / self.face_width # 嘴巴咧开程度 mouth_higth = (shape.part(66).y - shape.part(62).y) / self.face_width # 嘴巴张开程度 # 通过两个眉毛上的10个特征点,分析挑眉程度和皱眉程度 brow_sum = 0 # 高度之和 frown_sum = 0 # 两边眉毛距离之和 for j in range(17, 21): brow_sum += (shape.part(j).y - d.top()) + (shape.part(j + 5).y - d.top()) frown_sum += shape.part(j + 5).x - shape.part(j).x line_brow_x.append(shape.part(j).x) line_brow_y.append(shape.part(j).y) # self.brow_k, self.brow_d = self.fit_slr(line_brow_x, line_brow_y) # 计算眉毛的倾斜程度 tempx = np.array(line_brow_x) tempy = np.array(line_brow_y) z1 = np.polyfit(tempx, tempy, 1) # 拟合成一次直线 self.brow_k = -round(z1[0], 3) # 拟合出曲线的斜率和实际眉毛的倾斜方向是相反的 brow_hight = (brow_sum / 10) / self.face_width # 眉毛高度占比 brow_width = (frown_sum / 5) / self.face_width # 眉毛距离占比 # 眼睛睁开程度 eye_sum = (shape.part(41).y - shape.part(37).y + shape.part(40).y - shape.part(38).y + shape.part(47).y - shape.part(43).y + shape.part(46).y - shape.part(44).y) eye_hight = (eye_sum / 4) / self.face_width # 分情况讨论 # 张嘴,可能是开心或者惊讶 if round(mouth_higth >= 0.03): if eye_hight >= 0.056: cv2.putText(im_rd, "amazing", (d.left(), d.bottom() + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2, 4) else: cv2.putText(im_rd, "happy", (d.left(), d.bottom() + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2, 4) # 没有张嘴,可能是正常和生气 else: if self.brow_k <= -0.3: cv2.putText(im_rd, "angry", (d.left(), d.bottom() + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2, 4) else: cv2.putText(im_rd, "nature", (d.left(), d.bottom() + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2, 4) # 标出人脸数 cv2.putText(im_rd, "Faces: " + str(len(faces)), (20, 50), font, 1, (0, 0, 255), 1, cv2.LINE_AA) else: # 没有检测到人脸 cv2.putText(im_rd, "No Face", (20, 50), font, 1, (0, 0, 255), 1, cv2.LINE_AA) # 添加说明 im_rd = cv2.putText(im_rd, "S: screenshot", (20, 400), font, 0.8, (0, 0, 255), 1, cv2.LINE_AA) im_rd = cv2.putText(im_rd, "Q: quit", (20, 450), font, 0.8, (0, 0, 255), 1, cv2.LINE_AA) # 按下s键截图保存 if (k == ord('s')): self.cnt += 1 cv2.imwrite("screenshoot" + str(self.cnt) + ".jpg", im_rd) # 按下q键退出 if (k == ord('q')): break # 窗口显示 cv2.imshow("camera", im_rd) # 释放摄像头 self.cap.release() # 删除建立的窗口 cv2.destroyAllWindows() if __name__ == "__main__": my_face = face_emotion() my_face.learning_face()

    四、基于CNN的表情识别

    确定嘴唇的位置 # 显示嘴部特征点 # Draw the positions of someone's lip import dlib # 人脸识别的库 Dlib import cv2 # 图像处理的库 OpenCv from get_features import get_features # return the positions of feature points path_test_img = "F:/data/data_imgs/test_imgs/i064qa-mn.jpg" detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor('F:/shape_predictor_68_face_landmarks.dat') # Get lip's positions of features points positions_lip = get_features(path_test_img) img_rd = cv2.imread(path_test_img) # Draw on the lip points for i in range(0, len(positions_lip), 2): print(positions_lip[i], positions_lip[i+1]) cv2.circle(img_rd, tuple([positions_lip[i], positions_lip[i+1]]), radius=1, color=(0, 255, 0)) cv2.namedWindow("img_read", 2) cv2.imshow("img_read", img_rd) cv2.waitKey(0)

    检测图片里面的人物是否微笑 # use the saved model from sklearn.externals import joblib from get_features import get_features import ML_ways_sklearn import cv2 # path of test img path_test_img = "F:/data/data_imgs/test_imgs/i064qa-mn.jpg" # 提取单张40维度特征 positions_lip_test = get_features(path_test_img) # path of models path_models = "F:/data/data_models/" print("The result of"+path_test_img+":") print('\n') # ######### LR ########### LR = joblib.load(path_models+"model_LR.m") ss_LR = ML_ways_sklearn.model_LR() X_test_LR = ss_LR.transform([positions_lip_test]) y_predict_LR = str(LR.predict(X_test_LR)[0]).replace('0', "no smile").replace('1', "with smile") print("LR:", y_predict_LR) # ######### LSVC ########### LSVC = joblib.load(path_models+"model_LSVC.m") ss_LSVC = ML_ways_sklearn.model_LSVC() X_test_LSVC = ss_LSVC.transform([positions_lip_test]) y_predict_LSVC = str(LSVC.predict(X_test_LSVC)[0]).replace('0', "no smile").replace('1', "with smile") print("LSVC:", y_predict_LSVC) # ######### MLPC ########### MLPC = joblib.load(path_models+"model_MLPC.m") ss_MLPC = ML_ways_sklearn.model_MLPC() X_test_MLPC = ss_MLPC.transform([positions_lip_test]) y_predict_MLPC = str(MLPC.predict(X_test_MLPC)[0]).replace('0', "no smile").replace('1', "with smile") print("MLPC:", y_predict_MLPC) # ######### SGDC ########### SGDC = joblib.load(path_models+"model_SGDC.m") ss_SGDC = ML_ways_sklearn.model_SGDC() X_test_SGDC = ss_SGDC.transform([positions_lip_test]) y_predict_SGDC = str(SGDC.predict(X_test_SGDC)[0]).replace('0', "no smile").replace('1', "with smile") print("SGDC:", y_predict_SGDC) img_test = cv2.imread(path_test_img) img_height = int(img_test.shape[0]) img_width = int(img_test.shape[1]) # show the results on the image font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(img_test, "LR: "+y_predict_LR, (int(img_height/10), int(img_width/10)), font, 0.8, (84, 255, 159), 1, cv2.LINE_AA) cv2.putText(img_test, "LSVC: "+y_predict_LSVC, (int(img_height/10), int(img_width/10*2)), font, 0.8, (84, 255, 159), 1, cv2.LINE_AA) cv2.putText(img_test, "MLPC: "+y_predict_MLPC, (int(img_height/10), int(img_width/10)*3), font, 0.8, (84, 255, 159), 1, cv2.LINE_AA) cv2.putText(img_test, "SGDC: "+y_predict_SGDC, (int(img_height/10), int(img_width/10)*4), font, 0.8, (84, 255, 159), 1, cv2.LINE_AA) cv2.namedWindow("img", 2) cv2.imshow("img", img_test) cv2.waitKey(0)

    五、口罩识别

    import cv2 detector= cv2.CascadeClassifier('E:/opencv/opencv/build/etc/haarcascades/haarcascade_frontalface_default.xml') mask_detector=cv2.CascadeClassifier('F:/cascade.xml') cap = cv2.VideoCapture(0) while True: ret, img = cap.read() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = detector.detectMultiScale(gray, 1.1, 3) for (x, y, w, h) in faces: #参数分别为 图片、左上角坐标,右下角坐标,颜色,厚度 face=img[y:y+h,x:x+w] # 裁剪坐标为[y0:y1, x0:x1] mask_face=mask_detector.detectMultiScale(gray, 1.1, 5) for (x2,y2,w2,h2) in mask_face: cv2.rectangle(img, (x2, y2), (x2 + w2, y2 + h2), (0, 0, 255), 2) cv2.imshow('mask', img) cv2.waitKey(3) cap.release() cv2.destroyAllWindows()

    E:/opencv/opencv/build/etc/haarcascades/haarcascade_frontalface_default.xml是opencv中自带的人脸识别xml文件,opencv中还有其他的比如笑脸识别,猫脸识别,眼睛识别等等,可以参照我的安装路径去看看

    cascade.xml是训练好的分类器,资源在这

    自己训练模型进行识别

    导入keras库,划分数据集

    import keras keras.__version__

    ‘2.3.1’

    import os, shutil # dataset was uncompressed original_dataset_dir = 'F:/人工智能与机器学习/mask/test' # original_dataset_dir2 = 'F:/人工智能与机器学习/mask/nomask' # The directory where we will # store our smaller dataset base_dir = 'F:/人工智能与机器学习/mask/maskout' os.mkdir(base_dir) # Directories for our training, # validation and test splits train_dir = os.path.join(base_dir, 'train') os.mkdir(train_dir) validation_dir = os.path.join(base_dir, 'validation') os.mkdir(validation_dir) test_dir = os.path.join(base_dir, 'test') os.mkdir(test_dir) # Directory with our training cat pictures train_mask_dir = os.path.join(train_dir, 'mask') os.mkdir(train_mask_dir) # Directory with our training dog pictures train_unmask_dir = os.path.join(train_dir, 'unmask') os.mkdir(train_unmask_dir) # Directory with our validation cat pictures validation_mask_dir = os.path.join(validation_dir, 'mask') os.mkdir(validation_mask_dir) # Directory with our validation dog pictures validation_unmask_dir = os.path.join(validation_dir, 'unmask') os.mkdir(validation_unmask_dir) # Directory with our validation cat pictures test_mask_dir = os.path.join(test_dir, 'mask') os.mkdir(test_mask_dir) # Directory with our validation dog pictures test_unmask_dir = os.path.join(test_dir, 'unmask') os.mkdir(test_unmask_dir) # Copy first 1000 cat images to train_cats_dir fnames = ['{}.jpg'.format(i) for i in range(561)] for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(train_mask_dir, fname) shutil.copyfile(src, dst) # Copy next 500 cat images to validation_cats_dir fnames = ['{}.jpg'.format(i) for i in range(561)] for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(validation_mask_dir, fname) shutil.copyfile(src, dst) # Copy next 500 cat images to test_cats_dir fnames = ['{}.jpg'.format(i) for i in range(561)] for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(test_mask_dir, fname) shutil.copyfile(src, dst) # Copy first 1000 dog images to train_dogs_dir fnames = ['{}.jpg'.format(i) for i in range(561,1591)] for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(train_unmask_dir, fname) shutil.copyfile(src, dst) # Copy next 500 dog images to validation_dogs_dir fnames = ['{}.jpg'.format(i) for i in range(561,1591)] for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(validation_unmask_dir, fname) shutil.copyfile(src, dst) # Copy next 500 dog images to test_dogs_dir fnames = ['{}.jpg'.format(i) for i in range(561,1591)] for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(test_unmask_dir, fname) shutil.copyfile(src, dst) print('total training mask images:', len(os.listdir(train_mask_dir))) print('total training unmask images:', len(os.listdir(train_unmask_dir))) print('total validation maskt images:', len(os.listdir(validation_mask_dir))) print('total validation unmask images:', len(os.listdir(validation_unmask_dir))) print('total test mask images:', len(os.listdir(test_mask_dir))) print('total test unmask images:', len(os.listdir(test_unmask_dir)))

    total training mask images: 561 total training unmask images: 1030 total validation maskt images: 561 total validation unmask images: 1030 total test mask images: 561 total test unmask images: 1030

    构建网络

    from keras import layers from keras import models model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Flatten()) model.add(layers.Dense(512, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) model.summary()

    数据预处理

    from keras import optimizers model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc']) from keras.preprocessing.image import ImageDataGenerator # All images will be rescaled by 1./255 train_datagen = ImageDataGenerator(rescale=1./255) test_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( # This is the target directory train_dir, # All images will be resized to 150x150 target_size=(150, 150), batch_size=20, # Since we use binary_crossentropy loss, we need binary labels class_mode='binary') validation_generator = test_datagen.flow_from_directory( validation_dir, target_size=(150, 150), batch_size=20, class_mode='binary')

    Found 1591 images belonging to 2 classes. Found 1591 images belonging to 2 classes.

    for data_batch, labels_batch in train_generator: print('data batch shape:', data_batch.shape) print('labels batch shape:', labels_batch.shape) break

    data batch shape: (20, 150, 150, 3) labels batch shape: (20,)

    history = model.fit_generator( train_generator, steps_per_epoch=100, epochs=30, validation_data=validation_generator, validation_steps=50)

    model.save('mask_and_unmask_small_1.h5') import matplotlib.pyplot as plt acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show()

    数据增强

    datagen = ImageDataGenerator( rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest') # This is module with image preprocessing utilities from keras.preprocessing import image fnames = [os.path.join(train_mask_dir, fname) for fname in os.listdir(train_mask_dir)] # We pick one image to "augment" img_path = fnames[5] # Read the image and resize it img = image.load_img(img_path, target_size=(150, 150)) # Convert it to a Numpy array with shape (150, 150, 3) x = image.img_to_array(img) # Reshape it to (1, 150, 150, 3) x = x.reshape((1,) + x.shape) # The .flow() command below generates batches of randomly transformed images. # It will loop indefinitely, so we need to `break` the loop at some point! i = 0 for batch in datagen.flow(x, batch_size=1): plt.figure(i) imgplot = plt.imshow(image.array_to_img(batch[0])) i += 1 if i % 4 == 0: break plt.show()

    model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Flatten()) model.add(layers.Dropout(0.5)) model.add(layers.Dense(512, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc'])

    训练模型

    train_datagen = ImageDataGenerator( rescale=1./255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True,) # Note that the validation data should not be augmented! test_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( # This is the target directory train_dir, # All images will be resized to 150x150 target_size=(150, 150), batch_size=32, # Since we use binary_crossentropy loss, we need binary labels class_mode='binary') validation_generator = test_datagen.flow_from_directory( validation_dir, target_size=(150, 150), batch_size=32, class_mode='binary') history = model.fit_generator( train_generator, steps_per_epoch=100, epochs=30, validation_data=validation_generator, validation_steps=5)

    保存模型

    model.save('mask_and_unmask_small_2.h5') acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show()

    使用模型进行识别

    import cv2 from keras.preprocessing import image from keras.models import load_model import numpy as np import dlib from PIL import Image model = load_model('mask_and_unmask_small_2.h5') detector = dlib.get_frontal_face_detector() video=cv2.VideoCapture(0) font = cv2.FONT_HERSHEY_SIMPLEX def rec(img): gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) dets=detector(gray,1) if dets is not None: for face in dets: left=face.left() top=face.top() right=face.right() bottom=face.bottom() cv2.rectangle(img,(left,top),(right,bottom),(0,255,0),2) img1=cv2.resize(img[top:bottom,left:right],dsize=(150,150)) img1=cv2.cvtColor(img1,cv2.COLOR_BGR2RGB) img1 = np.array(img1)/255. img_tensor = img1.reshape(-1,150,150,3) prediction =model.predict(img_tensor) if prediction[0][0]>0.5: result='unmask' else: result='mask' cv2.putText(img, result, (left,top), font, 2, (0, 255, 0), 2, cv2.LINE_AA) cv2.imshow('mask', img) while video.isOpened(): res, img_rd = video.read() if not res: break rec(img_rd) if cv2.waitKey(5) & 0xFF == ord('q'): break video.release() cv2.destroyAllWindows()

    六、参考文章

    https://blog.csdn.net/qq_35723619/article/details/83042202 https://blog.csdn.net/cungudafa/article/details/93613135

    Processed: 0.012, SQL: 9