bmp(位图):这种格式的特点是包含的图像信息较丰富,几乎不进行压缩。 png:一种采用无损压缩算法的位图格式,有四个通道,多了一个Alpha通道透明度。
c++代码
#include <opencv2/opencv.hpp> int main(){ cv::Mat img = cv::Mat(200,200,CV_8UC3,cv::Scalar(255,0,0)); std::vector<cv::Mat>ms; cv::split(img,ms);//通道切割 ms[1] = cv::Scalar(255); cv::merge(ms,img);//通道合并 cv::imwrite("write_pic.jpg",img); }8U 表示 8 位无符号整数,16S 表示 16 位有符号整数,64F 表示 64 位浮点数,(即 double 类型);C 后面的数表示通道数,例如 C3 表示三个 通道的图像。
python代码
cap = cv2.VideoCapture('') while True: ret,frame = cap.read()#ret是否读成功,frame读出的图像 cv2.imshow('frame',frame) #按下q键循环停止 if cv2.waitKey(10)& 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows()视频或摄像头每隔25帧取一张图片
cap = cv2.VideoCapture('')#输入视频或着摄像头 i = 0 while True: was_read, img = self.cap.read() i += 1 if i % 25 == 0:#25可以改变,值越小抽取帧数越多 return imgc++代码
#include <opencv2/opencv.hpp> int main(){ cv::VideoCapture cap; cap = cv::VideoCapture(0); while (true){ cv::Mat frame; cap >> frame; cv::imshow("frame",frame); cv::waitKey(30); } }色彩空间:RGB/RGBA/GRAY/HSV/YUV cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 改变对比度:cv2.convertScaleAbs(img,alpha=3,beta=1),像素乘以alpha+beta HSV:H是的色度,取值[0,179];S是饱和度,取值[0,255];V是亮度,取值[0,255],常用于分割指定颜色的物体。 示例:提取图片中文字
import cv2 import numpy as np img = cv2.imread('11.jpg') hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV) lower_blue = np.array([100,200,100]) high_blue = np.array([200,255,200]) mask = cv2.inRange(hsv,lower_blue,high_blue) #进行加合运算 res = cv2.bitwise_and(img,img,mask=mask) cv2.imshow('',mask) cv2.waitKey(0)原图: 运行后结果:
c++代码
#include <opencv2/opencv.hpp> int main(){ cv::Mat img = cv::imread("1.jpg"); cv::line(img,cv::Point(100,100),cv::Point(200,200),cv::Scalar(255,0,0),2); cv::circle(img,cv::Point(100,100),30,cv::Scalar(255,0,0),2); cv::rectangle(img,cv::Point(100,100),cv::Point(200,200),cv::Scalar(255,0,0),2); //画椭圆,thickness(线宽)为-1时,表示对整个图形进行填充 cv::ellipse(img,cv::Point(150,130),cv::Point(170,180),0,0,360,cv::Scalar(255,0,0),thickness=2); //画多边形 std::vector<cv::Point> contour; contour.push_back(cv::Point(20,10)); contour.push_back(cv::Point(40,20)); contour.push_back(cv::Point(80,40)); contour.push_back(cv::Point(120,80)); cv::polylines(img, contour, true,cv::Scalar(255,0,0),2); //写文字,LINE_AA:字体抗锯齿,看起来光滑 cv::putText(img,"beautiful girl",cv::Point(40,40),cv::FONT_HERSHEY_SIMPLEX,1,cv::Scalar(255,0,0),1,cv::LINE_AA); cv::imshow("",img); cv::waitKey(0); }python代码
import cv2 import numpy as np img = cv2.imread(r"1.jpg") # cv2.line(img, (100, 30), (210, 180), color=(0, 0, 255), thickness=2) # cv2.circle(img, (50, 50), 30, (0, 0, 255), 2) # cv2.rectangle(img, (100, 30), (210, 180), color=(0, 0, 255), thickness=2) # cv2.ellipse(img, (100, 100), (100, 50), 0, 0, 360, (255, 0, 0), -1) # pts = np.array([[10, 5], [50, 10], [70, 20], [20, 30]], np.int32) # pts = pts.reshape((-1, 1, 2)) # cv2.polylines(img, [pts], True, (0, 0, 255), 2) cv2.putText(img, 'beautiful girl', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, lineType=cv2.LINE_AA) cv2.imshow("pic show", img) cv2.waitKey(0)python代码
import cv2 img = cv2.imread('1.jpg') img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) #THRESH_OTSU用于找到合适的阈值进行划分 ret,binary = cv2.threshold(img,0,255,cv2.THRESH_BINARY|cv2.THRESH_OTSU) cv2.imshow('',binary) cv2.waitKey(0)c++代码
#include <opencv2/opencv.hpp> int main(){ cv::Mat img = cv::imread("1.jpg"); cv::Mat img1; cv::cvtColor(img,img1,cv::COLOR_BGR2GRAY); cv::Mat img2; cv::threshold(img1,img2,0,255,cv::THRESH_BINARY|cv::THRESH_OTSU); cv::imshow("1",img2); cv::waitKey(0); }自适应二值化: python代码
import cv2 import matplotlib.pyplot as plt img = cv2.imread('2.jpg') img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) ret,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY) th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2) th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2) tile = ['Original Image','Global Thresholding(v=127)','Adaptive Mean Thresholding','Adaptive Gaussian Thresholding'] images = [img,th1,th2,th3] for i in range(4): plt.subplot(2,2,i+1),plt.imshow(images[i],'gray') plt.title(tile[i]) plt.xticks([]),plt.yticks([]) plt.show()加减法:运算后像素小于零时自动归为零,运算后像素大于255时自动归为255。 按位运算: 1.and:二者都为1时结果为1,否则为0; 2.or:二者有一个为1时结果为1,否则为0; 3.not:原来为1进行非运算结果为0; 4.XOR:异或运算二者不同为1,相同为0。 将一张图像放到另一张图像上:
import cv2 img1 = cv2.imread('1.jpg') img2 = cv2.imread('6.jpg') # img = cv2.add(img1,img2) #权重相加 #dst = cv2.addWeighted(img1, 0.7, img2, 0.3, 0) #cv2.imshow("...",dst) #cv2.waitKey(0) rows, cols, channels = img2.shape roi = img1[0:rows, 0:cols] img2gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY) mask_inv = cv2.bitwise_not(mask) img1_bg = cv2.bitwise_and(roi, roi, mask=mask_inv) img2_fg = cv2.bitwise_and(img2, img2, mask=mask) dst = cv2.add(img1_bg, img2_fg) img1[0:rows, 0:cols] = dst cv2.imshow('res', img1) cv2.waitKey(0)1.resize/transpose/flip
#include <opencv2/opencv.hpp> int main(){ cv::Mat img = cv::imread("1.jpg"); cv::Mat dst; // cv::resize(img,dst,cv::Size(300,300)); // cv::transpose(img,dst); cv::flip(img,dst,2); cv::imshow(".",dst); cv::waitKey(0); }2.仿射变换:属于线性变换 任意一个二维图像乘以一个仿射矩阵,就能得到变换后的图像,包括:缩放、旋转、平移、倾斜、镜像。 仿射矩阵:
import cv2 import numpy as np src = cv2.imread('1.jpg') rows, cols, channel = src.shape # M = np.float32([[1, 0, 50], [0, 1, 50]]) # M = np.float32([[0.5, 0, 0], [0, 0.5, 0]]) # M = np.float32([[-0.5, 0, cols // 2], [0, 0.5, 0]]) # M = np.float32([[1, 0.5, 0], [0, 1, 0]]) #(cols / 2, rows / 2):旋转中心,45:旋转角度,0.7:缩放比例 M = cv2.getRotationMatrix2D((cols / 2, rows / 2), 45, 0.7) dst = cv2.warpAffine(src, M, (cols, rows)) cv2.imshow('src pic', src) cv2.imshow('dst pic', dst) cv2.waitKey(0)3.透视变换 python代码
import cv2 import numpy as np img = cv2.imread("2.jpg") #原始图片坐标 p1 = np.float32([[25,30],[179,25],[12,188],[189,190]]) p2 = np.float32([[0,0],[200,0],[0,200],[200,200]]) m = cv2.getPerspectiveTransform(p1,p2) dst = cv2.warpPerspective(img,m,(200,201)) cv2.imshow('dst',dst) cv2.waitKey(0)代码运行结果: c++代码
#include <opencv2/opencv.hpp> int main(){ cv::Point2f ps1[] = {cv::Point2f(25,30),cv::Point2f(179,25),cv::Point2f(12,188),cv::Point2f(189,190),}; cv::Point2f ps2[] = {cv::Point2f(0,0),cv::Point2f(200,0),cv::Point2f(0,200),cv::Point2f(200,200),}; cv::Mat img = cv::imread("2.jpg"); cv::Mat M = cv::getPerspectiveTransform(ps1,ps2); cv::Mat std; cv::warpPerspective(img,std,M,cv::Size (img.size())); cv::imshow("std",std); cv::waitKey(0); }4.形态学操作 膨胀:白颜色区域膨胀 腐蚀:白颜色区域缩小 开操作:先腐蚀后膨胀,用于去噪,噪点为白色 闭操作:先膨胀后腐蚀,用于补洞,洞是黑色的 梯度操作:用于提轮廓 顶帽操作:也叫礼帽操作,原图像-开运算图像,获取噪音 黑帽操作:原图像-闭运算图像,获取漏洞
import cv2 img = cv2.imread('4.jpg') #(3,3):表示高宽方向上需要变换的大小 k = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3)) # dst = cv2.dilate(img,k)#膨胀 # dst = cv2.erode(img,k)#腐蚀 # dst = cv2.morphologyEx(img,cv2.MORPH_OPEN,k)#开操作 # dst = cv2.morphologyEx(img,cv2.MORPH_CLOSE,k)#闭操作 # dst = cv2.morphologyEx(img,cv2.MORPH_GRADIENT,k)#梯度操作 # dst = cv2.morphologyEx(img,cv2.MORPH_TOPHAT,k)#顶帽操作 dst = cv2.morphologyEx(img,cv2.MORPH_BLACKHAT,k)#黑帽操作 cv2.imshow('',dst) cv2.waitKey(0)1.滤波概念 滤波是将不需要的信号频率去掉的过程,滤波过程一般是用卷积实现的,卷积核成为滤波器; 滤波分为: (1)低通滤波:使图像变模糊,用于去噪 (2)高通滤波:获取图像边缘、轮廓或梯度 (3)中通滤波:获取已知频率范围内的信号 (4)阻带滤波:去掉已知频率范围内的信号 滤波分析: (1)时域分析:直接对信号本身进行分析 (2)频域分析:对信号变换快慢的分析 2.滤波代码实现
import cv2 img = cv2.imread('1.jpg') # dst = cv2.medianBlur(img,3)#中值滤波,处理椒盐噪声 # dst = cv2.blur(img,(3,3))#均值滤波 src = cv2.GaussianBlur(img,(3,3),sigmaX=1,sigmaY=1)#高斯滤波,sigma:核上的方差 # dst = cv2.bilateralFilter(img,5,sigmaColor=2,sigmaSpace=2)#双边滤波 # dst = cv2.Laplacian(img,-1,ksize=1)#拉普拉斯滤波 dst = cv2.addWeighted(img,2,src,-1,0)#USM锐化 cv2.imshow('dst',dst) cv2.waitKey(0)3.梯度算子 Sobel算子: G x = [ − 1 0 1 − 2 0 2 − 1 0 1 ] G{x}=\begin{bmatrix} -1&0&1 \\ -2& 0 &2 \\ -1&0 &1 \end{bmatrix} Gx=⎣⎡−1−2−1000121⎦⎤ G y = [ − 1 − 2 − 1 0 0 0 1 2 1 ] G{y}=\begin{bmatrix} -1&-2&-1 \\ 0& 0 &0 \\ 1&2&1 \end{bmatrix} Gy=⎣⎡−101−202−101⎦⎤
Scharr算子:是对Sobel算子的改进版本
G x = [ − 3 0 3 − 10 0 10 − 3 0 3 ] G{x}=\begin{bmatrix} -3&0&3 \\ -10& 0 &10 \\ -3&0&3 \end{bmatrix} Gx=⎣⎡−3−10−30003103⎦⎤ G y = [ − 3 − 10 − 3 0 0 0 3 10 3 ] G{y}=\begin{bmatrix} -3&-10&-3 \\ 0& 0 &0 \\ 3&10&3 \end{bmatrix} Gy=⎣⎡−303−10010−303⎦⎤
c++代码
#include<opencv2/opencv.hpp> using namespace std; int main() { cv::Mat img = cv::imread("2.jpg"); cv::Mat dst; // cv::Sobel(img, dst, -1, 1, 0); cv::Sobel(img, dst, -1, 0, 1); // cv::Scharr(img, dst, -1, 1, 0); cv::imshow("src", img); cv::imshow("dst", dst); cv::waitKey(0); }(1)实现步骤:彩色图像转为灰度图;高斯滤波去噪;计算图像梯度;非极大值抑制;双边阈值边缘连接处理;二值化图像输出。 (2)双边阈值边缘连接处理: 梯度大于maxVal的边缘保留,而在minVal以下的边缘被丢弃,两者之间的值要判断是否与真正的边界相连,相连就保留,不相连舍弃。 python代码
import cv2 img = cv2.imread('25.jpg',0) dst = cv2.convertScaleAbs(img,alpha=6,beta=0) k = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3)) dst = cv2.morphologyEx(dst,cv2.MORPH_CLOSE,k) dst = cv2.GaussianBlur(dst,(5,5),sigmaX=1) dst = cv2.Canny(dst,50,150) dst = cv2.resize(dst,dsize=(500,500)) cv2.imshow('dst',dst) cv2.waitKey(0)(1)轮廓的查找与绘制: python代码
import cv2 img = cv2.imread('14.jpg') imggray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) ret,dst = cv2.threshold(imggray,127,255,0) contour,_ = cv2.findContours(dst,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) # print(len(contour[0])) img_ = cv2.drawContours(img,contour,-1,(0,255,0),2)#-1表示绘制所有轮廓 cv2.imshow('1',img_) cv2.waitKey(0)c++代码
#include <opencv2/opencv.hpp> using namespace std; int main(){ cv::Mat img = cv::imread("14.jpg"); cv::Mat gray_img,bin_img; cv::cvtColor(img,gray_img,cv::COLOR_BGR2GRAY); cv::threshold(gray_img,bin_img,127,255, 0); vector<vector<cv::Point>>contours; vector<cv::Vec4i>vec_4f; cv::findContours(bin_img,contours,vec_4f,cv::RETR_TREE,cv::CHAIN_APPROX_SIMPLE); cv::drawContours(img,contours,-1,cv::Scalar(0,0,255),2); cv::imshow("img",img); cv::waitKey(0); }(2)面积、周长、重心 python代码
import cv2 img = cv2.imread('26.jpg',0) ret,dst = cv2.threshold(img,127,255,0) contour,_ = cv2.findContours(dst,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) M = cv2.moments(contour[0])#矩 cx, cy = int(M['m10'] / M['m00']), int(M['m01'] / M['m00']) print('重心',cx,cy) area = cv2.contourArea(contour[0]) print('面积',area) perimeter = cv2.arcLength(contour[0],True) print('周长',perimeter)c++代码
#include <opencv2/opencv.hpp> using namespace std; int main(){ cv::Mat img = cv::imread("26.jpg"); cv::Mat gray_img, bin_img; cv::cvtColor(img,gray_img,cv::COLOR_BGR2GRAY); cv::threshold(gray_img,bin_img,127,255,0); vector<vector<cv::Point>>contours; vector<cv::Vec4i>vec_4f; cv::findContours(bin_img,contours,cv::RETR_TREE,cv::CHAIN_APPROX_SIMPLE); cv::Moments M = cv::moments(contours[0]);//矩 int cx = M.m10/M.m00; int cy = M.m01/M.m00; cout<<"Focus"<<cx<<""<<cy<<endl;//重心 double area = cv::contourArea(contours[0]); cout<<"area"<<area<<endl;//面积 double arc_len = cv::arcLength(contours[0], true); cout<<"arc_len"<<arc_len<<endl;//周长 cv::drawContours(img,contours,-1,cv::Scalar(0,0,255),2); cv::imshow("img",img); cv::waitKey(0); }(3)轮廓近似 python代码
import cv2 img = cv2.imread('26.jpg') imggray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) ret ,threh = cv2.threshold(imggray,127,255,0) counters,_ = cv2.findContours(threh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) elsilon = 20 #精度,值越小精度越高 approx = cv2.approxPolyDP(counters[0],elsilon,True) img_contour = cv2.drawContours(img,[approx],-1,(0,0,255),2) cv2.imshow('img',img_contour) cv2.waitKey(0)(4)凸包与凸性检测 寻找一个非凸物体的凸包 python代码
import cv2 img = cv2.imread('26.jpg') imggray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) ret,thresh = cv2.threshold(imggray,127,255,0) contours,_ = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) hull = cv2.convexHull(contours[0]) print(cv2.isContourConvex(contours[0]),cv2.isContourConvex(hull)) #False True #说明轮廓曲线是非凸的,凸包曲线是凸的 img_contour = cv2.drawContours(img,[hull],-1,(0,0,255),3) cv2.imshow('img_contour',img_contour) cv2.waitKey(0)c++代码
#include <opencv2/opencv.hpp> using namespace std; int main(){ cv::Mat img = cv::imread("26.jpg"); cv::Mat imggray,bin_img; cv::cvtColor(img,imggray,cv::COLOR_BGR2GRAY); cv::threshold(imggray,bin_img,127,255,0); vector<vector<cv::Point>>contours; vector<cv::Vec4i>vec_4f; cv::findContours(bin_img,contours,vec_4f,cv::RETR_TREE,cv::CHAIN_APPROX_SIMPLE); vector<vector<cv::Point>>hull(contours.size()); cv::convexHull(contours.at(0),hull.at(0)); cout<<cv::isContourConvex(contours.at(0))<<""<<cv::isContourConvex(hull.at(0))<<endl; cv::drawContours(img,hull,-1,cv::Scalar(0,0,255),2); cv::imshow("img",img); cv::waitKey(0); }(5)边界检测 可以通过最小外接矩形判断两个物体是否相碰 python代码
import cv2 import numpy as np img = cv2.imread('16.jpg') imggray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) ret,thresh = cv2.threshold(imggray,127,255,0) contours,_ = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #边界矩形 x,y,w,h = cv2.boundingRect(contours[0]) img_contour = cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2) #最小矩形 rect = cv2.minAreaRect(contours[0]) angle=rect[2]#旋转角度(框与X正半轴的夹角),度数形式 cx,cy=rect[0]#矩形中心点坐标 w=rect[1][0]#矩形的宽 h=rect[1][1]#矩形的高 box = cv2.boxPoints(rect) box = np.int0(box) img_contour = cv2.drawContours(img,[box],0,(0,0,255),2) #最小外切圆 (x,y),r = cv2.minEnclosingCircle(contours[0]) center = (int(x),int(y)) r = int(r) img_contour = cv2.circle(img,center,r,(255,0,0),2) cv2.imshow('img',img_contour) cv2.waitKey(0)c++代码
#include <opencv2/opencv.hpp> using namespace std; int main(){ cv::Mat img = cv::imread("16.jpg"); cv::Mat imggray,bin_img; cv::cvtColor(img,imggray,cv::COLOR_BGR2GRAY); cv::threshold(imggray,bin_img,127,255,0); vector<vector<cv::Point>>contours; vector<cv::Vec4i>vec_4f; cv::findContours(bin_img,contours,vec_4f,cv::RETR_TREE,cv::CHAIN_APPROX_SIMPLE); //边界矩形 { cv::Rect rect = cv::boundingRect(contours[0]);//Rect类用来保存矩形框的左上角坐标和高宽 cv::rectangle(img,cv::Point(rect.x,rect.y),cv::Point(rect.x+rect.width,rect.y+rect.height),cv::Scalar(0,255,0),2); } //最小矩形 { cv::RotatedRect minrect = cv::minAreaRect(contours[0]); cv::Point2f vs[4]; minrect.points(vs); std::vector<cv::Point>contour; contour.push_back(vs[0]); contour.push_back(vs[1]); contour.push_back(vs[2]); contour.push_back(vs[3]); cv::polylines(img,contour,true,cv::Scalar(255,0,0),2,cv::LINE_AA); } { cv::Point2f center; float r; cv::minEnclosingCircle(contours[0],center,r); cv::circle(img,center,r,cv::Scalar (0,0,255),2); } cv::imshow("img",img); cv::waitKey(0); }(6)轮廓性质 边界矩形的宽高比
x,y,w,h = cv2.boundingRect(cnt)#cnt为findContours函数返回的轮廓 aspect_ratio = float(w)/h轮廓面积与边界矩形面积的比
area = cv2.contourArea(cnt) x,y,w,h = cv2.boundingRect(cnt) rect_area = w*h extent = float(area)/rect_area轮廓面积与凸包面积的比
area = cv2.contourArea(cnt) hull = cv2.convexHull(cnt) hull_area = cv2.contourArea(hull) solidity = float(area)/hull_area方向拟合
import cv2 import numpy as np img = cv2.imread('16.jpg') imggray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) ret,thresh = cv2.threshold(imggray,127,255,0) contours,_ = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #椭圆拟合 ellipse = cv2.fitEllipse(contours[0]) cv2.ellipse(img,ellipse,(255,0,0),2) #直线拟合 h,w,_ = img.shape [vx,vy,x,y] = cv2.fitLine(contours[0],cv2.DIST_L2,0,0.01,0.01) lefty = int((-x*vy/vx)+y) right = int(((w-x)*vy/vx)+y) cv2.line(img,(w-1,right),(0,lefty),(0,0,255),2) cv2.imshow('img',img) cv2.waitKey(0)(7)形状匹配 只能做一些简单的匹配,效果不好
import cv2 img1 = cv2.imread('16.jpg') img2 = cv2.imread('17.jpg') imggray1 = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY) imggray2 = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY) ret1,thresh1 = cv2.threshold(imggray1,127,255,0) ret2,thresh2 = cv2.threshold(imggray2,127,255,0) contour1,_1 = cv2.findContours(thresh1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) contour2,_2 = cv2.findContours(thresh2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) ret = cv2.matchShapes(contour1[0],contour2[0],cv2.CONTOURS_MATCH_I2,0) print(ret)目的:熟悉opencv操作使用 python代码
import cv2 from PIL import Image img1 = Image.open('23.jpg') img2 = cv2.imread('23.jpg') #高斯滤波去除噪音 img = cv2.GaussianBlur(img2,(3,3),1) #转为灰度图 img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) #计算梯度 sobel_x = cv2.Sobel(img,cv2.CV_16S,1,0) # sobel_x= cv2.Canny(img,100,150) #提高对比度 abs = cv2.convertScaleAbs(sobel_x) #二值化 ret,img= cv2.threshold(abs,0,255,cv2.THRESH_OTSU) #闭操作,用于补洞 k = cv2.getStructuringElement(cv2.MORPH_RECT,(17,5)) img= cv2.morphologyEx(img,cv2.MORPH_CLOSE,k) #一系列形态学处理 kx = cv2.getStructuringElement(cv2.MORPH_RECT,(20,1)) ky = cv2.getStructuringElement(cv2.MORPH_RECT,(1,19)) img = cv2.dilate(img,kx) img = cv2.erode(img,kx) img = cv2.erode(img,ky) img = cv2.dilate(img,ky) # cv2.imshow('1',img) # cv2.waitKey(0) # exit() #平滑处理,中值滤波 img = cv2.medianBlur(img,15) #查找轮廓 contour,_ = cv2.findContours(img,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) for i in contour: rect = cv2.boundingRect(i) x = rect[0] y = rect[1] w = rect[2] h = rect[3] if w > (h*2): #剪裁图片 chepai = img1.crop([int(x),int(y),int(x+w),int(y+h)]) chepai.show() #绘制轮廓 img = cv2.drawContours(img2,contour,-1,(0,0,255),3) cv2.imshow('x',img) cv2.waitKey(0) cv2.destroyAllWindows()c++代码
#include <opencv2/opencv.hpp> using namespace std; int main(){ cv::Mat img = cv::imread("23.jpg"); cv::Mat img1; cv::GaussianBlur(img,img1,cv::Size(3,3),1); cv::Mat imggray; cv::cvtColor(img1,imggray,cv::COLOR_BGR2GRAY); cv::Mat img_sobel; cv::Sobel(imggray,img_sobel,CV_16SC1,1,0); cv::Mat img2; cv::convertScaleAbs(img_sobel,img2,3,0); cv::Mat bin_img; cv::threshold(img2,bin_img,127,255,0); cv::Mat k = cv::getStructuringElement(cv::MORPH_RECT,cv::Size(17,5)); cv::morphologyEx(bin_img,bin_img,cv::MORPH_CLOSE,k); cv::Mat kx = cv::getStructuringElement(cv::MORPH_RECT,cv::Size(20,1)); cv::Mat ky = cv::getStructuringElement(cv::MORPH_RECT,cv::Size(1,19)); cv::Mat image; cv::dilate(bin_img,image,kx); cv::erode(image,image,kx); cv::erode(image,image,ky); cv::dilate(image,image,ky); cv::medianBlur(image,image,15); vector<vector<cv::Point>>contour; vector<cv::Vec4i>vec_4f; cv::findContours(image,contour,vec_4f,cv::RETR_TREE,cv::CHAIN_APPROX_SIMPLE); for (int i = 0; i <contour.size() ; ++i) { cv::Rect rect = cv::boundingRect(contour[i]); int x = rect.x; int y = rect.y; int w = rect.width; int h = rect.height; if (w>(h*2)){ cv::Mat chepai = img(cv::Range(y,y+h),cv::Range(x,x+w)); cv::imshow(string("chepai"+char(i)),chepai); } } cv::drawContours(img,contour,-1,(0,0,255),3); cv::imshow("",img); cv::waitKey(0); cv::destroyAllWindows(); }高斯金字塔:主要用来下采样
import cv2 img = cv2.imread('13.jpg') for i in range(3): cv2.imshow(f'img{i}',img) # img = cv2.pyrDown(img)#下采样 img = cv2.pyrUp(img)#上采样 cv2.waitKey(0)拉普拉斯金字塔:由高斯金字塔计算得来: L i = G i − P y r U p ( G i + 1 ) L{i}= G_{i}-P{yr}U{p}(G_{i+1}) Li=Gi−PyrUp(Gi+1) 高斯金字塔下采样后,由当前下采样层图片减去下一层下采样图片上采样后的图片而得到拉普拉斯金字塔
import cv2 img = cv2.imread('12.jpg') img_down = cv2.pyrDown(img) img_up = cv2.pyrUp(img_down) img_new = cv2.subtract(img,img_up) #提高对比度,让图片更容易看清 img = cv2.convertScaleAbs(img_new,alpha=5,beta=0) cv2.imshow('',img) cv2.waitKey(0)图像融合: 通过金字塔可以将两个物体无缝融合
import cv2 import numpy as np a = cv2.imread('21.jpg') b = cv2.imread('22.jpg') #对a高斯金字塔下采样 g = a.copy() gpa = [g] for i in range(6): g = cv2.pyrDown(g) gpa.append(g) #对b高斯金字塔下采样 g = b.copy() gpb = [g] for i in range(6): g = cv2.pyrDown(g) gpb.append(g) #对a拉普拉斯金字塔 lpa = [gpa[5]] for i in range(5,0,-1): ge = cv2.pyrUp(gpa[i]) l = cv2.subtract(gpa[i-1],ge) lpa.append(l) #对b拉普拉斯金字塔 lpb = [gpb[5]] for i in range(5,0,-1): ge = cv2.pyrUp(gpb[i]) l = cv2.subtract(gpa[i-1],ge) lpb.append(l) #图像融合 LS = [] for i,(la,lb) in enumerate(zip(lpa,lpb)): row,col,dpt = la.shape ls = np.hstack((la[:,0:col//2],lb[:,col//2:])) LS.append(ls) ls_ = LS[0] for i in range(1,6): ls_ = cv2.pyrUp(ls_) ls_ = cv2.add(ls_,LS[i]) cv2.imshow(f'xxx{i}',ls_) cv2.waitKey(0)(1)直方图:统计一张图像中每个像素点出现的次数
import cv2 import matplotlib.pyplot as plt img = cv2.imread('1.jpg') img_b = cv2.calcHist([img],[0],None,[255],[0,255]) plt.plot(img_b,label = 'b',color = 'b') plt.show() img_g = cv2.calcHist([img],[1],None,[255],[0,255]) plt.plot(img_b,label = 'g',color = 'g') plt.show() img_r = cv2.calcHist([img],[2],None,[255],[0,255]) plt.plot(img_b,label = 'r',color = 'r') plt.show()(2)直方图均衡化:直方图均衡化是通过拉伸像素强度的分布范围,使得在0~255上的分布更加均衡,提高了图像的对比度,达到改善图像主观视觉效果的目的。对比度较低的图像适合使用直方图均衡化方法来增强图像细节
import cv2 import matplotlib.pyplot as plt img = cv2.imread('7.jpg',0) his = cv2.calcHist([img],[0],None,[255],[0,255]) plt.plot(his,label = 'his',color = 'r') dst = cv2.equalizeHist(img) cv2.imshow('dst',dst) cv2.imwrite('15.jpg',dst) his = cv2.calcHist([dst],[0],None,[255],[0,255]) plt.plot(his,label = 'his',color = 'b') plt.show()(3)自适应直方图均衡化:
#类似于自适应二值化,设计一个核,对核内的像素进行均值化 import cv2 img = cv2.imread('8.jpg',0) dst = cv2.equalizeHist(img) clahe = cv2.createCLAHE(clipLimit=2,tileGridSize=(8,8)) dst1 = clahe.apply(img) cv2.imshow('dst',dst1) cv2.waitKey(0)(4)直方图的反向投影:反向投影是反映直方图模型在目标图像中的分布情况;简单点说就是用直方图模型去目标图像中寻找是否有相似的对象。通常用HSV色彩空间的HS两个通道作为直方图模型
#用一张纯色图片,对另一张图片提取与上一张纯色图片一样颜色的部分 import cv2 import numpy as np rol = cv2.imread('10.jpg') hsv = cv2.cvtColor(rol,cv2.COLOR_BGR2HSV) target = cv2.imread('9.jpg') hsv1 = cv2.cvtColor(target,cv2.COLOR_BGR2HSV) roihist = cv2.calcHist([hsv],[0,1],None,[180,255],[0,180,0,255]) cv2.normalize(roihist,roihist,0,255,cv2.NORM_MINMAX) dst = cv2.calcBackProject([hsv1],[0,1],roihist,[0,180,0,255],1) #对图片进行滤波 disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)) dst = cv2.filter2D(dst,-1,disc) #图片二值化 ret,thresh = cv2.threshold(dst,50,255,0) thresh = cv2.merge((thresh,thresh,thresh)) res = cv2.bitwise_and(target,thresh) res = np.hstack((target,thresh,res)) cv2.imshow('img',res) cv2.waitKey(0)对一张图片进行傅里叶变换就是将它分为正弦和余弦两部分,也就是将图片从时域转为频域。这种变换的思想可以理解为任何函数可以很精确的接近无穷个sin函数和cos函数的叠加。 使用numpy进行傅里叶变换:
import cv2 import numpy as np from matplotlib import pyplot as plt img = cv2.imread('9.jpg', 0) f = np.fft.fft2(img) #傅里叶变换 fshift = np.fft.fftshift(f) #把中点移动到中间去 #fshift是复数,求绝对值后是振幅 magnitude_spectrum = 20 * np.log(np.abs(fshift)) plt.figure(figsize=(10, 10)) plt.subplot(221), plt.imshow(img, cmap='gray') plt.title('Input Image'), plt.xticks([]), plt.yticks([]) plt.subplot(222), plt.imshow(magnitude_spectrum, cmap='gray') plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([]) #去掉低频信号,留下高频信号 rows, cols = img.shape crow, ccol = rows // 2, cols // 2 fshift[crow - 30:crow + 30, ccol - 30:ccol + 30] = 0 #傅里叶逆变换 f_ishift = np.fft.ifftshift(fshift) img_back = np.fft.ifft2(f_ishift) img_back = np.abs(img_back) plt.subplot(223), plt.imshow(img_back, cmap='gray') plt.title('Image after HPF'), plt.xticks([]), plt.yticks([]) plt.subplot(224), plt.imshow(img_back) plt.title('Result in JET'), plt.xticks([]), plt.yticks([]) plt.show()图2是图像的振幅图,中心是低频信号,两边是高频信号,中心越亮代表低频信号越多,两边越暗代表高频信号越少 使用opencv进行傅里叶变换:
import numpy as np import cv2 from matplotlib import pyplot as plt img = cv2.imread('9.jpg', 0) dft = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT) dft_shift = np.fft.fftshift(dft) magnitude_spectrum = 20 * np.log(cv2.magnitude(dft_shift[:, :, 0], dft_shift[:, :, 1])) plt.figure(figsize=(10, 10)) plt.subplot(221), plt.imshow(img, cmap='gray') plt.title('Input Image'), plt.xticks([]), plt.yticks([]) plt.subplot(222), plt.imshow(magnitude_spectrum, cmap='gray') plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([]) rows, cols = img.shape crow, ccol = rows // 2, cols // 2 # create a mask first, center square is 1, remaining all zeros mask = np.zeros((rows, cols, 2), np.uint8) mask[crow - 30:crow + 30, ccol - 30:ccol + 30] = 1 fshift = dft_shift * mask # apply mask and inverse DFT f_ishift = np.fft.ifftshift(fshift) img_back = cv2.idft(f_ishift) img_back = cv2.magnitude(img_back[:, :, 0], img_back[:, :, 1]) plt.subplot(223), plt.imshow(img_back, cmap='gray') plt.title('Input Image'), plt.xticks([]), plt.yticks([]) plt.subplot(224), plt.imshow(img_back) plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([]) plt.show()霍夫变换用来检测任意能过够用数学公式表达的形状,即使这个形状被破坏或有点扭曲。 直线检测:在直角坐标系中的一条直线y=kx+b转换到霍夫坐标系中就是一个点(k,b),反之在霍夫坐标系中的一条直线转换到直角坐标系中时就是一个点。
import cv2 import numpy as np img = cv2.imread('27.jpg') img_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) edges = cv2.Canny(img_gray,100,150) lines = cv2.HoughLines(edges,1,np.pi/180,100) for line in lines: rho,theta = line[0] a = np.cos(theta) b = np.sin(theta) x0 = a*rho y0 = b*rho x1 = int(x0+1000*(-b))#直线起点横坐标 y1 = int(y0+1000*(a))#直线起点纵坐标 x2 = int(x0-1000*(-b))#直线终点横坐标 y2 = int(y0-1000*(a))#直线终点纵坐标 cv2.line(img,(x1,y1),(x2,y2),(0,0,255),1) cv2.imshow('img',img) # cv2.imwrite("35.jpg",img) cv2.waitKey(0)圆检测:
import cv2 import numpy as np img = cv2.imread('29.jpg') dst = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) circle = cv2.HoughCircles(dst,cv2.HOUGH_GRADIENT,1,80,param1=40,param2=20,minRadius=20,maxRadius=300) if not circle is None: circle = np.uint16(np.around(circle)) for i in circle[0,:]: cv2.circle(img,(i[0],i[1]),i[2],(0,0,255),2) cv2.imshow('circle',img) cv2.waitKey(0)用于简单的图像分割,核心思想:将图片中的物体看作一座座山峰,假设在每个区域最小值的位置上打一个洞并且让水以均匀的上升速率从洞中涌出,水从低到高淹没整个地形,当被地形分割的水聚集在一起时就形成了分割线。 分水岭详细原理参考此文章
import numpy as np import cv2 img = cv2.imread('30.jpg') gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) ret,thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU) #去除噪声 k = np.ones((3,3),np.uint8) opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,k,iterations=2)#iterations:应用腐蚀膨胀的次数 sure_bg = cv2.dilate(opening,k,iterations=3)#膨胀 #中心点之间求距离 dist_transform = cv2.distanceTransform(opening,1,5) ret,sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0) sure_fg = np.uint8(sure_fg) unknown = cv2.subtract(sure_bg,sure_fg) #计算出中心的位置 ret,masker1 = cv2.connectedComponents(sure_fg) #在标签上加1,确保背景都是1,而不是0 maskers = masker1 + 1 #用0标记未知区域 maskers[unknown==255] = 0 #maskers3中值为-1的地方表示轮廓 maskers3 = cv2.watershed(img,maskers) img[maskers3==-1] = [0,0,255] cv2.imshow('..',img) cv2.waitKey(0)