精华内容
下载资源
问答
  • 基于OpenCV车道线检测

    2018-04-16 11:17:25
    基于VC++6.0的车道线检测代码,对视频进行实时处理,并对车道线进行标注
  • 基于opencv车道线检测
  • 利用opencv开发的车道检测和车辆识别代码,包含源代码、目的代码、演示视频。
  • 基于OPENCV车道线检测,可以用来识别车道线。适合视觉开发及ADAS开发参考。
  • 基于Opencv车道线检测

    千次阅读 2018-02-21 11:29:42
    形态学滤波:对二值化图像进行腐蚀,去除噪点,然后对图像进行膨胀,弥补对车道线的腐蚀。ROI提取:提取感兴趣的ROI边缘线检测:canny变化、sobel变化和laplacian变化中选择了效果比较好的canny变化,三者在代码中均...
    1. 二值化:先变化为灰度图,然后设定阈值直接变成二值化图像。
    2. 形态学滤波:对二值化图像进行腐蚀,去除噪点,然后对图像进行膨胀,弥补对车道线的腐蚀。
    3. ROI提取:提取感兴趣的ROI
    4. 边缘线检测:canny变化、sobel变化和laplacian变化中选择了效果比较好的canny变化,三者在代码中均可以使用,canny变化效果稍微好一点。
    5. 直线检测:Hough直线检测
       
    #include<cv.h>
    #include<cxcore.h>
    #include<highgui.h>
    #include<opencv2\opencv.hpp>
    #include<iostream>
    #include <opencv2\opencv.hpp>
    #include <iostream>
    #include <stdio.h>
    #include <opencv2/core/core.hpp>
    #include <opencv2/highgui/highgui.hpp>
    #include <opencv2/imgproc/imgproc.hpp>
    #include <opencv2/opencv.hpp>
    
    #include <opencv2/imgproc/types_c.h>  
    #include <opencv2/videoio/videoio_c.h>  
    #include <opencv2/highgui/highgui_c.h>
    #include <opencv2/face.hpp>
    #include <opencv2/face/facerec.hpp>
    
    using namespace std;
    using namespace cv;
    using namespace face;
    
    int main()
    {
    	VideoCapture capture;
    	Mat frame,frame_gray;
    	Mat edges;
    
    	vector<Vec2f> lines;
    	Point point1, point2;
    
    	bool stop = false;
    
    	VideoCapture cap("D:\\pics\\video_1.mp4");    //打开默认摄像头  
    	if (!cap.isOpened())
    	{
    		return -1;
    	}
    
    	//获得帧宽度和高度
    	int w = static_cast<int>(cap.get(CV_CAP_PROP_FRAME_WIDTH));
    	int h = static_cast<int>(cap.get(CV_CAP_PROP_FRAME_HEIGHT));
    	//获得帧率
    	double r = cap.get(CV_CAP_PROP_FPS);
    	cout << "帧宽:" << w << "\n帧高:" << h << "\n速率" << r;
    	//frame = imread("D:\\pics\\line2.jpg");
    
    	while (!stop)
    	{
    
    	cap >> frame;
    
    	if (frame.empty()) break;
    
    	// 矩阵转置
    	//transpose(frame, frame);
    	//0: 沿X轴翻转; >0: 沿Y轴翻转; <0: 沿X轴和Y轴翻转
    	//flip(frame, frame, 1);
    	
    	//【1】进行灰度图转换和Canny线检测
    	cvtColor(frame, frame_gray, CV_BGR2GRAY);
        //imshow("灰度图", frame_gray);
    	
    	GaussianBlur(frame_gray,frame_gray,Size(5,5),0,0);
    	//imshow("Gauss图", frame_gray);
    
    	Canny(frame_gray, edges, 50, 150);
    	imshow("canny线检测图",edges);
     	threshold(edges, edges, 128, 255, THRESH_BINARY);
    
    
    	//【2】提取ROI
    	//初始化掩模矩阵
    	/***
    	Rect rect;
    	rect.x = 100;
    	rect.y = 190;
    	rect.width = 150;
    	rect.height = 90;
    
    	//设置矩形掩模
    	Mat mask = Mat::zeros(edges.size(), CV_8UC1);
    
    	mask(rect).setTo(255);
    
    	Mat img2;
    	edges.copyTo(img2, mask);
    	imshow("mask", mask);
    	imshow("img2", img2);
    	***/
    	/** 创建一些点 */
    
    	Mat mask = Mat::zeros(edges.size(), CV_8UC1);
    
    	Point rook_points[1][4];
    	rook_points[0][0] = Point(0, edges.rows);
    	rook_points[0][1] = Point(460,325);
    	rook_points[0][2] = Point(520, 325);
    	rook_points[0][3] = Point(edges.cols, edges.rows);
    
    	const Point* ppt[1] = { rook_points[0] };
    	int npt[] = { 4 };
    
    	fillPoly(mask,
    		ppt,
    		npt,
    		1,
    		Scalar(255, 255, 255),8);
    
    	imshow("mask", mask);
    
    	Mat dstImg;
    	edges.copyTo(dstImg, mask);
    
    	imshow("dstImg",dstImg);
     
    	//【3】进行霍夫线变换
    	vector<Vec4i> lines;//定义一个矢量结构lines用于存放得到的线段矢量集合
    	HoughLinesP(dstImg, lines, 1, CV_PI / 180, 15, 15, 100);
    
    
    	vector<Point2f> left_points_b;
    	vector<Point2f> left_points_e;
    	vector<Point2f> right_points_b;
    	vector<Point2f> right_points_e;
       
    
    	//【4】依次在图中绘制出每条线段
    	for (size_t i = 0; i <10; i++)
    	{
    		Vec4i l = lines[i];
    		double x1 = l[0];
    		double y1 = l[1];
    		double x2 = l[2];
    		double y2 = l[3];
    		double slope = (y2 - y1) / (x2 - x1);
    		if (slope>0.2)
    			line(frame, Point(x1,y1), Point(x2, y2), Scalar(0, 0, 255), 3, CV_AA);
    			left_points_b.push_back(Point(x1, y1));
    			left_points_b.push_back(Point(x2, y2));
    			
    		if (slope < -0.2)
    			line(frame, Point(x1, y1), Point(x2, y2), Scalar(0, 255,0), 3, CV_AA);
    			right_points_b.push_back(Point(x1, y1));
    			right_points_b.push_back(Point(x2, y2));
    	}
    	cout << endl << "left_point" << left_points_b;
    
    	cout << endl << "right_point" << right_points_b;
    	
    
    	imshow("detected lines [CPU]", frame);
    
    	if (waitKey(10) >= 0)
    	   stop = true;
    	}
    
    	return 0;
    
    
    }
    
    
    /***
    Mat roi_mask(Mat img, Mat vertices) 
    {
    
    	Rect rect;
    	rect.x = 300;
    	rect.y = 300;
    	rect.width = 500;
    	rect.height = 200;
    
    
    	//初始化掩模矩阵
    	Mat mask = Mat::zeros(img.size(), CV_8UC1);
    	    
    	fillPoly(mask,vertices,255);
    
    	Mat masked_img = bitwise_and(img, mask);
    	return masked_img
    
    }
    ****/
    

    
    

    展开全文
  • 基于OpenCV车道线检测方法

    千次阅读 2019-10-25 18:36:11
    车道线检测是图像处理运用到无人驾驶的一项技术,目前也...基于霍夫变换和kalman滤波车道线检测 ***A***中处理步骤如下: 1.使用提供的一组棋盘格图片计算相机校正矩阵(camera calibration matrix)和失真系数(...

    车道线检测是图像处理运用到无人驾驶的一项技术,目前也过渡到了部分汽车上,高速公路的自动车道保持就是一个应用。
    最近研究了两个基于opencv的车道检的代码,先放链接:
    A.Udacity车道线检测代码

    高速车道线检测

    B.基于霍夫变换和kalman滤波车道线检测

    夜视情况下车道线检测

     *****A***中处理步骤如下:**
     
     1.使用提供的一组棋盘格图片计算相机校正矩阵(camera calibration matrix)和失真系数(distortion coefficients).这里主要是考虑到一些鱼眼相机。可以通过棋盘图片来获取校正矩阵进而完成相角拍摄图片的校正。
     2.使用梯度阈值(gradient threshold),颜色阈值(color threshold)等处理图片得到清晰捕捉车道线的二进制图(binary image).
     3.使用透视变换(perspective transform)得到二进制图(binary image)的鸟瞰图(birds-eye view).
     进行透视变化只要是为了找到左右车道线的起点(这里将透视变化后的图片中的像素点投影到x轴,像素多的点对应为左右车道线的起点)
     4.检测属于车道线的像素并用它来测出车道边界.
     5.计算车道曲率及车辆相对车道中央的位置.
     6.处理图片展示车道区域,及车道的曲率和车辆位置.
    

    A中各个步骤的处理细节,效果对比在.
    Github中README.md中介绍很详细;A中主要缺点就是实时性还不够(每一帧需要1.05秒),对于弯道较多的路段还是很难适用。

    ***B***中对于夜视情况下检测效果非常好,且实时性较好。
             步骤如下:1.对视频中的图片进行gamma校正,gamma校正相当于进行直方图均匀化。将夜视下的图片增量对比度提高。代码如下:`
    
    def gamma_correction_auto(RGBimage, equalizeHist=False):  # 0.35
        originalFile = RGBimage.copy()
        red = RGBimage[:, :, 2]
        green = RGBimage[:, :, 1]
        blue = RGBimage[:, :, 0]
        forLuminance = cv2.cvtColor(originalFile, cv2.COLOR_BGR2YUV)
        Y = forLuminance[:, :, 0]
        totalPix = vidsize[0] * vidsize[1]
        summ = np.sum(Y[:, :])
        Yaverage = np.divide(totalPix, summ)
        # Yclipped = np.clip(Yaverage,0,1)
        epsilon = 1.19209e-007
        correct_param = np.divide(-0.3, np.log10([Yaverage + epsilon]))
        correct_param = 0.7 - correct_param
        red = red / 255.0
        red = cv2.pow(red, correct_param)
        red = np.uint8(red * 255)
        if equalizeHist:
            red = cv2.equalizeHist(red)
        green = green / 255.0
        green = cv2.pow(green, correct_param)
        green = np.uint8(green * 255)
        if equalizeHist:
            green = cv2.equalizeHist(green)
        blue = blue / 255.0
        blue = cv2.pow(blue, correct_param)
        blue = np.uint8(blue * 255)
        if equalizeHist:
            blue = cv2.equalizeHist(blue)
        output = cv2.merge((blue, green, red))
        # print(correct_param)
        return output
    
     2.将图片转化到hsv空间主要筛选出白色和黄色区域:
           hsv空间中黄色所在区间(yellow)
           min_val_y = np.array([15, 80, 190])
           max_val_y = np.array([30, 255, 255])
           hsv空间中白色色所在区间(white)
           min_val_w = np.array([0, 0, 195])
           max_val_w = np.array([255, 80, 255])
    

    代码如下:

    def hsv_filter(image, min_val_y, max_val_y, min_val_w, max_val_w):#因为车道线只有黄白两种颜色所以主要对这两种颜色进行处理
        """
        A function returning a mask for pixels within min_val - max_val range
        Inputs:
        - image - a BGR image you want to apply function on
        - min_val_y - array of shape (3,) giving minumum HSV values for yellow color
        - max_val_y - array of shape (3,) giving maximum HSV values for yellow color
        - min_val_w - array of shape (3,) giving minumum HSV values for white color
        - max_val_w - array of shape (3,) giving maximum HSV values for white color
        Returns:
        - img_filtered - image of pixels being in given threshold
        """
    
        hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        mask_yellow = cv2.inRange(hsv, min_val_y, max_val_y) #黄
        mask_white = cv2.inRange(hsv, min_val_w, max_val_w)  #白
        mask = cv2.bitwise_or(mask_yellow, mask_white)
        img_filtered = cv2.bitwise_and(image, image, mask=mask)
        return img_filtered
    
     3.霍夫变换检检测出直线。霍夫变换可以检测直线和圆。原理可以查看下相关资料,运算量算小。所以实时性不错。对于一些弯道较多的路段挑战较大。代码如下:代码中进行了关于角度的滤波,因为霍夫变换检测出来的直线也较多所以可以去掉一些角度不在范围内的,挑选出左右车道线。
    
    def hough_transform(original, gray_img, threshold, discard_horizontal=0.4):
        """
        A function fitting lines that intersect >=threshold white pixels
        Input:
        - original - image we want to draw lines on
        - gray_img - image with white/black pixels, e.g. a result of Canny Edge Detection
        - threshold - if a line intersects more than threshold white pixels, draw it
        - discard_horizontal - smallest abs derivative of line that we want to take into account
        Return:
        - image_lines - result of applying the function
        - lines_ok - rho and theta
        """
        lines = cv2.HoughLines(gray_img, 0.5, np.pi / 360, threshold)
        image_lines = original
        lines_ok = []  # list of parameters of lines that we want to take into account (not horizontal)
        if lines is not None:
            for i in range(0, len(lines)):
                rho = lines[i][0][0]
                theta = lines[i][0][1]
                # discard horizontal lines
                m = -math.cos(theta) / (math.sin(theta) + 1e-10)  # adding some small value to avoid dividing by 0
                if abs(m) < discard_horizontal:
                    continue
                else:
                    a = math.cos(theta)
                    b = math.sin(theta)
                    x0 = a * rho
                    y0 = b * rho
                    pt1 = (int(x0 + 1000 * (-b)), int(y0 + 1000 * (a)))
                    pt2 = (int(x0 - 1000 * (-b)), int(y0 - 1000 * (a)))
                    cv2.line(image_lines, pt1, pt2, (0, 0, 255), 2, cv2.LINE_AA)
                    lines_ok.append([rho, theta])
        lines_ok = np.array(lines_ok)
        return image_lines, lines_ok
    
      4.检测出直线后为了防止车道线抖动,这里采用了kalman滤波对车道线进行跟踪。kalman运算量较小,广泛运用于目标跟踪。当然还有粒子滤波有兴趣可以了解下,这里最关键是怎样对kalman的初试矩阵进行初始化。kalman滤波主要运用了5个方程。2个用于预测,3个用于更新参数;直接贴代码吧
    
    class LaneTracker:
        def __init__(self, n_lanes, proc_noise_scale, meas_noise_scale, process_cov_parallel=0, proc_noise_type='white'):
            '''
            n_lanes=2
            proc_noise_scale=0.1
            meas_noise_scale=15
            predict():  基于k-1去预测值
                         x^k- =Ax^(k-1)        #估计的状态       x^k- 16*1    A.shape 16*16      x^(k-1) 16*1
                         p^k-  =A(p^k)AT+Q     #估计的协方差     16*16 =16*16 * 16*16*16*16 +16*16
    
            correct(): 估计值 基于zk测量值和x^k- 去估计
                      Kk=((p^k-).HT)/(H(p^k-)HT+R) 16*8=(16*16*16*8)/(8*16 *16*16 *16*8 +8*8)
                      x^k=x^k- +Kk(zk-H(x^k-))      #更新的状态#结合测量和预测后验估计    精确  16*1=16*1-16*8* (8*1-8*16*16*1)
                      p^k=(I-KkH)(p^k- )            #更新的协方差#结合测量和预测后验估计  精确  16*16=( 16*16- 16*8*8*16)*16*16
    
            x^k-          为k时刻先验状态估计
            x^k、x^(k-1)  为后验  转态估计
            p^k-          为k时刻 先验估计协方差 初始化为0 容易收敛
            Q             过程噪声协方差  表达了状态矩阵和实际过程状态矩阵的误差 是估计的关键
    
            Kk 卡尔曼协方差
            zk 为测量值 m阶向量
            H  为测量转移矩阵
            R为 测量噪声协方差 R越小越收敛快
            '''
            self.n_lanes = n_lanes#2
            self.meas_size = 4 * self.n_lanes     #8 测量维度 一条车道线四个点  x1,y1,x2,y2, x1',y1',x2',y2'
            self.state_size = self.meas_size * 2  #16   转态转移维度  测量维度*2  x1,dx1,y1,dy1,x2,dx2,y2,dy2,  x1',dx1',y1',dy1',x2',dx2',y2',dy2'
            self.contr_size = 0
            self.kf = cv2.KalmanFilter(self.state_size, self.meas_size, self.contr_size)        #(16, 8,0)转移 测量控制矩阵维度
            self.kf.transitionMatrix = np.eye(self.state_size, dtype=np.float32)                #(16,16)单位矩阵转移矩阵初始化A
            self.kf.measurementMatrix = np.zeros((self.meas_size, self.state_size), np.float32) #(8,16)测量矩阵初始化H
            for i in range(self.meas_size):
                self.kf.measurementMatrix[i, i*2] = 1
            if proc_noise_type == 'white':#处理的白噪声
                block = np.matrix([[0.25, 0.5],
                                   [0.5, 1.]], dtype=np.float32)
                self.kf.processNoiseCov = block_diag(*([block] * self.meas_size)) * proc_noise_scale#过程噪声协方差Q
            if proc_noise_type == 'identity':
                self.kf.processNoiseCov = np.eye(self.state_size, dtype=np.float32) * proc_noise_scale#(16,16)
    
            for i in range(0, self.meas_size, 2):
                for j in range(1, self.n_lanes):#8*2
                    self.kf.processNoiseCov[i, i+(j*8)] = process_cov_parallel  #process_cov_parallel=0
                    self.kf.processNoiseCov[i+(j*8), i] = process_cov_parallel
            self.kf.measurementNoiseCov = np.eye(self.meas_size, dtype=np.float32) * meas_noise_scale#(8,8)测量噪声协方差R
            self.kf.errorCovPre = np.eye(self.state_size)#P^k 错误协方差(16,16)的单位矩阵
            self.meas = np.zeros((self.meas_size, 1), np.float32)#(8,1)测量值 zk
            self.state = np.zeros((self.state_size, 1), np.float32)#(16,1)状态值 x^k-
            self.first_detected = False
            
        def _update_dt(self, dt):#更新A(16,16)
            for i in range(0, self.state_size, 2):
                self.kf.transitionMatrix[i, i+1] = dt  #速度 dt为视频处理速度?
    
        def _first_detect(self, lanes):#初始化self.state
            for l, i in zip(lanes, range(0, self.state_size, 8)):
                self.state[i:i+8:2, 0] = l#先验转态x^k-
            self.kf.statePost = self.state
            self.first_detected = True
    
        def update(self, lanes):
            if self.first_detected:
                for l, i in zip(lanes, range(0, self.meas_size, 4)):
                    if l is not None:
                        self.meas[i:i+4, 0] = l#测量值更新
                self.kf.correct(self.meas)#根据测量值更新    Kk x^k p^k 为下次预测做准备
    
            else:
                if lanes.count(None) == 0:
                    self._first_detect(lanes)
    
        def predict(self, dt):
            if self.first_detected:
                self._update_dt(dt)
                state = self.kf.predict()#预测值x^k-
                lanes = []
                for i in range(0, len(state), 8):
                    lanes.append((state[i], state[i+2], state[i+4], state[i+6]))# x1,dx1,y1,dy1,x2,dx2,y2,dy2
                return lanes
            else:
                return None
    

    整体效果大家可以运行Github代码查看。夜视下以及实时性很nice。
    A,B主要采用opencv方式完成的车道线检测方法。接下来作者将继续去研究基于特征的深度学习方法完成车道检测。

    展开全文
  • 基于opencv车道线检测(c++)

    万次阅读 多人点赞 2019-06-11 17:37:24
    基于opencv车道线检测 原理: 算法基本思想说明: 传统的车道线检测,多数是基于霍夫直线检测,其实这个里面有个很大的误区,霍夫直线拟合容易受到各种噪声干扰,直接运用有时候效果不好,更多的时候通过霍夫直线...

    基于opencv的车道线检测

    原理:

    算法基本思想说明:
    传统的车道线检测,多数是基于霍夫直线检测,其实这个里面有个很大的误区,霍夫直线拟合容易受到各种噪声干扰,直接运用有时候效果不好,更多的时候通过霍夫直线检测进行初步的筛选,然后再有针对性的进行直线拟合,根据拟合的直线四个点坐标,绘制出车道线,这种方式可以有效避免霍夫直线拟合不良后果,是一种更加稳定的车道线检测方法,在实际项目中,可以选择两种方法并行,在计算出结果后进行叠加或者对比提取,今天分享的案例主要是绕开了霍夫直线检测,通过对二值图像进行轮廓分析与几何分析,提取到相关的车道线信息、然后进行特定区域的像素扫描,拟合生成直线方程,确定四个点绘制出车道线,对连续的视频来说,如果某一帧无法正常检测,就可以通过缓存来替代绘制,从而实现在视频车道线检测中实时可靠。

    原理图:

    在这里插入图片描述

    代码:
    #include <opencv2/opencv.hpp>
    #include <iostream>
    #include <cmath>
    
    using namespace cv;
    using namespace std;
    
    /**
    **1、读取视频  
    **2、二值化
    **3、轮廓发现
    **4、轮廓分析、面积就算,角度分析
    **5、直线拟合
    **6、画出直线
    **
    */
    
    Point left_line[2];
    Point right_line[2];
    
    void process(Mat &frame, Point *left_line, Point *right_line);
    Mat fitLines(Mat &image, Point *left_line, Point *right_line);
    
    int main(int argc, char** argv) {
    	//读取视频
    	VideoCapture capture("E:/opencv/road_line.mp4");
    
    	int height = capture.get(CAP_PROP_FRAME_HEIGHT);
    	int width = capture.get(CAP_PROP_FRAME_WIDTH);
    	int count = capture.get(CAP_PROP_FRAME_COUNT);
    	int fps = capture.get(CAP_PROP_FPS);
    	//初始化
    
    	left_line[0] = Point(0,0);
    
    	left_line[1] = Point(0, 0);
    	
    	right_line[0] = Point(0, 0);
    	
    	right_line[1] = Point(0, 0);
    
    	cout << height<<"       "<< width<< "       " <<count<< "       " <<fps << endl;
    
    	//循环读取视频
    	Mat frame;
    	while (true) {
    		int ret = capture.read(frame);
    		if (!ret) {
    			break;
    		}
    		imshow("input", frame);
    		process(frame, left_line, right_line);
    
    		char c = waitKey(5);
    		if (c == 27) {
    			break;
    		}
    		
    		
    	}
    
    }
    
    void process(Mat &frame, Point *left_line, Point *right_line ){
    	Mat gray,binary;
    	/**灰度化*/
    	cvtColor(frame, gray, COLOR_BGR2GRAY);
    	
    	//threshold(gray, binary, );
    	//边缘检测
    	Canny(gray, binary, 150, 300);
    	//imshow("Canny", binary);
    	for (size_t i = 0; i < (gray.rows/2+40); i++) {
    		for (size_t j = 0; j < gray.cols; j++)
    		{
    			binary.at<uchar>(i, j) = 0;
    		}
    	}
    	imshow("binary", binary);
    	
    	//寻找轮廓
    	vector<vector<Point>> contours;
    	findContours(binary, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
    
    	Mat out_image = Mat::zeros(gray.size(), gray.type());
    
    	for (int i = 0; i < contours.size(); i++)
    	{
    		
    		//计算面积与周长
    		double length = arcLength(contours[i], true);
    		double area = contourArea(contours[i]);
    		//cout << "周长 length:" << length << endl;
    		//cout << "面积 area:" << area << endl;
    
    		//外部矩形边界
    		Rect rect = boundingRect(contours[i]);
    		int h = gray.rows - 50;
    
    		//轮廓分析:
    		if (length < 5.0 || area < 10.0) {
    			continue;
    		}
    		if (rect.y > h) {
    			continue;
    		}
    
    		//最小包围矩形
    		RotatedRect rrt = minAreaRect(contours[i]);
    		//关于角度问题:https://blog.csdn.net/weixin_41887615/article/details/91411086
    		
    		
    		//cout << "最小包围矩形 angle:" << rrt.angle << endl;
    
    		double angle = abs(rrt.angle);
    		
    		//angle < 50.0 || angle>89.0
    
    		if (angle < 20.0 || angle>84.0) {
    
    			continue;
    
    		}
    		
    
    		if (contours[i].size() > 5) {
    			//用椭圆拟合
    			RotatedRect errt = fitEllipse(contours[i]);
    			//cout << "用椭圆拟合err.angle:" << errt.angle << endl;
    
    			if ((errt.angle<5.0) || (errt.angle>160.0))
    			{
    				if (80.0 < errt.angle && errt.angle < 100.0) {
    					continue;
    				}
    				
    			}
    		}
    
    
    		//cout << "开始绘制:" << endl;
    		drawContours(out_image, contours, i, Scalar(255), 2, 8);
    		imshow("out_image", out_image);
    
    	}
    	Mat result = fitLines(out_image, left_line, right_line);
    	imshow("result", result);
    
    	Mat dst;
    	addWeighted(frame, 0.8, result, 0.5,0, dst);
    	imshow("lane-lines", dst);
    
    }
    
    //直线拟合
    Mat fitLines(Mat &image, Point *left_line, Point *right_line) {
    	int height = image.rows;
    	int width = image.cols;
    
    	Mat out = Mat::zeros(image.size(), CV_8UC3);
    
    	int cx = width / 2;
    	int cy = height / 2;
    
    	vector<Point> left_pts;
    	vector<Point> right_pts;
    	Vec4f left;
    	
    
    	for (int i = 100; i < (cx-10); i++)
    	{
    		for (int j = cy; j < height; j++)
    		{
    			int pv = image.at<uchar>(j, i);
    			if (pv == 255) 
    			{
    				left_pts.push_back(Point(i, j));
    			}
    		}
    	}
    
    	for (int i = cx; i < (width-20); i++)
    	{
    		for (int j = cy; j < height; j++)
    		{
    			int pv = image.at<uchar>(j, i);
    			if (pv == 255)
    			{
    				right_pts.push_back(Point(i, j));
    			}
    		}
    	}
    
    	if (left_pts.size() > 2)
    	{
    		fitLine(left_pts, left, DIST_L1, 0, 0.01, 0.01);
    		
    		double k1 = left[1] / left[0];
    		double step = left[3] - k1 * left[2];
    
    		int x1 = int((height - step) / k1);
    		int y2 = int((cx - 25)*k1 + step);
    
    		Point left_spot_1 = Point(x1, height);
    		Point left_spot_end = Point((cx - 25), y2);
    		
    
    		line(out, left_spot_1, left_spot_end, Scalar(0, 0, 255), 8, 8, 0);
    		left_line[0] = left_spot_1;
    		left_line[1] = left_spot_end;
    
    	}
    	else
    	{
    		line(out, left_line[0], left_line[1], Scalar(0, 0, 255), 8, 8, 0);
    	}
    
    
    
    
    	if (right_pts.size()>2)
    	{
    		
    		Point spot_1 = right_pts[0];
    		Point spot_end = right_pts[right_pts.size()-1];
    
    		int x1 = spot_1.x;
    		
    		int y1 = spot_1.y;
    
    		int x2 = spot_end.x;
    		int y2 = spot_end.y;
    
    	
    
    		line(out, spot_1, spot_end, Scalar(0, 0, 255), 8, 8, 0);
    		right_line[0] = spot_1;
    		right_line[1] = spot_end;
    
    	}
    	else
    	{
    		line(out, right_line[0], right_line[1], Scalar(0, 0, 255), 8, 8, 0);
    	}
    
    	return out;
    }
    
    
    结果图片:

    在这里插入图片描述

    展开全文
  • 基于opencv的道路车道线检测

    千次下载 热门讨论 2014-01-10 16:05:18
    基于opencv的道路车道线检测。 采用了边缘检测法先检测出绘图图像的边缘,再hough直线拟合,拟合出图中的直线。由于这样查找到的直线非常多,所以先筛选掉角度明显有误的直线,在剩下的直线中保留最长的一组。 ...
  • 基于OpenCV3( Python / C++ ) 的车道检测代码和测试视频
  • opencv车道线检测

    2020-11-23 10:35:01
    基于Visual Studio 2015,并进行Qt配置Opencv,实现对视频中基于道路特征的 车道线检测方法。
  • # 步骤1:边缘检测 def canyEdgeDetector(image): edged = cv2.Canny(image, 50, 150) return edged # 步骤2:定义ROI(感兴趣区域) def getROI(image): height = image.shape[0] width = image.shape[1] # ...

    在这里插入图片描述

    import cv2
    import numpy as np
    
    # 步骤1:边缘检测
    def canyEdgeDetector(image):
        edged = cv2.Canny(image, 50, 150)
        return edged
    
    
    # 步骤2:定义ROI(感兴趣区域)
    def getROI(image):
        height = image.shape[0]
        width = image.shape[1]
        # Defining Triangular ROI: The values will change as per your camera mounts
        triangle = np.array([[(100, height), (width, height), (width-300, int(height/1.9))]])
        # creating black image same as that of input image
    
    
        black_image = np.zeros_like(image)
        # Put the Triangular shape on top of our Black image to create a mask
        mask = cv2.fillPoly(black_image, triangle, 255)
        # applying mask on original image
        masked_image = cv2.bitwise_and(image, mask)
        return masked_image
    
    
    # 步骤3:获取图像中的所有直线
    def getLines(image):
        # lines=cv2.HoughLinesP(image,bin_size,precision,threshold,dummy 2d array--no use,minLineLength,maxLineGap)
        # lets take bin size to be 2 pixels
        # lets take precision to be 1 degree= pi/180 radians
        # threshold is the votes that a bin should have to be accepted to draw a line
        # minLineLength --the minimum length in pixels a line should have to be accepted.
        # maxLineGap --the max gap between 2 broken line which we allow for 2 lines to be connected together.
        lines = cv2.HoughLinesP(image, 1, np.pi / 180, 100, np.array([]), minLineLength=70, maxLineGap=20)
        #lines = cv2.HoughLinesP(image, 1, np.pi / 180, 100, 10, 100)
        return lines
    
    
    #display lines over a image
    def displayLines(image, lines):
        if lines is not None:
            for line in lines:
                # print(line) --output like [[704 418 927 641]] this is 2d array representing [[x1,y1,x2,y2]] for each line
                x1, y1, x2, y2 = line.reshape(4)  # converting to 1d array []
    
                # draw line over black image --(255,0,0) tells we want to draw blue line (b,g,r) values 10 is line thickness
                cv2.line(image, (x1, y1), (x2, y2), (255, 0, 0), 10)
        return image
    
    
    
    def getLineCoordinatesFromParameters(image, line_parameters):
        slope = line_parameters[0]
        intercept = line_parameters[1]
        y1 = image.shape[0]  # since line will always start from bottom of image
        y2 = int(y1 * (3.4 / 5))  # some random point at 3/5
        x1 = int((y1 - intercept) / slope)
        x2 = int((y2 - intercept) / slope)
        return np.array([x1, y1, x2, y2])
    
    
    
    #Avergaes all the left and right lines found for a lane and retuns single left and right line for the the lane
    def getSmoothLines(image, lines):
        left_fit = []  # will hold m,c parameters for left side lines
        right_fit = []  # will hold m,c parameters for right side lines
    
        for line in lines:
            x1, y1, x2, y2 = line.reshape(4)
            # polyfit gives slope(m) and intercept(c) values from input points
            # last parameter 1 is for linear..so it will give linear parameters m,c
            parameters = np.polyfit((x1, x2), (y1, y2), 1)
            slope = parameters[0]
            intercept = parameters[1]
    
            if slope < 0:
                left_fit.append((slope, intercept))
            else:
                right_fit.append((slope, intercept))
    
        # take averages of all intercepts and slopes separately and get 1 single value for slope,intercept
        # axis=0 means vertically...see its always (row,column)...so row is always 0 position.
        # so axis 0 means over row(vertically)
        left_fit_average = np.average(left_fit, axis=0)
        right_fit_average = np.average(right_fit, axis=0)
    
        # now we have got m,c parameters for left and right line, we need to know x1,y1 x2,y2 parameters
        left_line = getLineCoordinatesFromParameters(image, left_fit_average)
        right_line = getLineCoordinatesFromParameters(image, right_fit_average)
        return np.array([left_line, right_line])
    
    
    
    def show_image(name, image):
        cv2.imshow(name, image)
        # cv2.waitKey(0)
    
    
    
    image = cv2.imread("lane.jpg") #Load Image
    print(image.shape)
    show_image('image', image)
    
    edged_image = canyEdgeDetector(image)   # Step 1
    print(edged_image.shape)
    show_image('edged_image', edged_image)
    
    roi_image = getROI(edged_image)         # Step 2
    print(roi_image.shape)
    show_image('roi_image', roi_image)
    
    lines = getLines(roi_image)             # Step 3
    print(lines)
    #image_with_lines = displayLines(image, lines)
    
    
    smooth_lines = getSmoothLines(image, lines)    # Step 5
    print(smooth_lines)
    image_with_smooth_lines = displayLines(image, smooth_lines) # Step 4
    
    cv2.imshow("Output", image_with_smooth_lines)
    cv2.waitKey(0)
    
    
    

    在这里插入图片描述
    在这里插入图片描述

    import cv2
    import numpy as np
    
    def canyEdgeDetector(image):
        edged = cv2.Canny(image, 50, 150)
        return edged
    
    
    def getROI(image):
        height = image.shape[0]
        width = image.shape[1]
        print(height, width)  # 720 1280
        # Defining Triangular ROI: The values will change as per your camera mounts
        triangle=np.array([[(200, height), (1100, height), (550, 250)]])
        # creating black image same as that of input image
        black_image = np.zeros_like(image)
        # Put the Triangular shape on top of our Black image to create a mask
        mask = cv2.fillPoly(black_image, triangle, 255)
        # applying mask on original image
        masked_image = cv2.bitwise_and(image, mask)
        return masked_image
    
    
    
    def getLines(image):
        # lines=cv2.HoughLinesP(image,bin_size,precision,threshold,dummy 2d array--no use,minLineLength,maxLineGap)
        # lets take bin size to be 2 pixels
        # lets take precision to be 1 degree= pi/180 radians
        # threshold is the votes that a bin should have to be accepted to draw a line
        # minLineLength --the minimum length in pixels a line should have to be accepted.
        # maxLineGap --the max gap between 2 broken line which we allow for 2 lines to be connected together.
        lines = cv2.HoughLinesP(image, 2, np.pi / 180, 100, np.array([]), minLineLength=40, maxLineGap=5)
        return lines
    
    
    #display lines over a image
    def displayLines(image, lines):
        if lines is not None:
            for line in lines:
                # print(line) --output like [[704 418 927 641]] this is 2d array representing [[x1,y1,x2,y2]] for each line
                x1, y1, x2, y2 = line.reshape(4)  # converting to 1d array []
    
                # draw line over black image --(255,0,0) tells we want to draw blue line (b,g,r) values 10 is line thickness
                cv2.line(image, (x1, y1), (x2, y2), (255, 0, 0), 10)
        return image
    
    
    
    def getLineCoordinatesFromParameters(image, line_parameters):
        slope = line_parameters[0]
        intercept = line_parameters[1]
        y1 = image.shape[0]  # since line will always start from bottom of image
        y2 = int(y1 * (3.4 / 5))  # some random point at 3/5
        x1 = int((y1 - intercept) / slope)
        x2 = int((y2 - intercept) / slope)
        return np.array([x1, y1, x2, y2])
    
    
    
    #Avergaes all the left and right lines found for a lane and retuns single left and right line for the the lane
    def getSmoothLines(image, lines):
        left_fit = []  # will hold m,c parameters for left side lines
        right_fit = []  # will hold m,c parameters for right side lines
    
        for line in lines:
            x1, y1, x2, y2 = line.reshape(4)
            # polyfit gives slope(m) and intercept(c) values from input points
            # last parameter 1 is for linear..so it will give linear parameters m,c
            parameters = np.polyfit((x1, x2), (y1, y2), 1)
            slope = parameters[0]
            intercept = parameters[1]
    
            if slope < 0:
                left_fit.append((slope, intercept))
            else:
                right_fit.append((slope, intercept))
    
        # take averages of all intercepts and slopes separately and get 1 single value for slope,intercept
        # axis=0 means vertically...see its always (row,column)...so row is always 0 position.
        # so axis 0 means over row(vertically)
        left_fit_average = np.average(left_fit, axis=0)
        right_fit_average = np.average(right_fit, axis=0)
    
        # now we have got m,c parameters for left and right line, we need to know x1,y1 x2,y2 parameters
        left_line = getLineCoordinatesFromParameters(image, left_fit_average)
        right_line = getLineCoordinatesFromParameters(image, right_fit_average)
        return np.array([left_line, right_line])
    
    
    
    
    
    
    videoFeed = cv2.VideoCapture("test_video.mp4")
    
    try:
      while videoFeed.isOpened() :
        (status, image) = videoFeed.read()
    
        edged_image = canyEdgeDetector(image)   # Step 1
        roi_image = getROI(edged_image)         # Step 2
    
        lines = getLines(roi_image)             # Step 3
        #image_with_lines = displayLines(image, lines)
    
        smooth_lines = getSmoothLines(image, lines)    # Step 5
        image_with_smooth_lines = displayLines(image, smooth_lines) # Step 4
    
        cv2.imshow("Output", image_with_smooth_lines)
        cv2.waitKey(1)
    
    except:
        pass
    

    在这里插入图片描述

    展开全文
  • 基于opencv车道线检测,供大家参考,具体内容如下 原理: 算法基本思想说明: 传统的车道线检测,多数是基于霍夫直线检测,其实这个里面有个很大的误区,霍夫直线拟合容易受到各种噪声干扰,直接运用有时候效果...
  • 主要介绍了opencv车道线检测的实现方法,文中通过示例代码介绍的非常详细,对大家的学习或者工作具有一定的参考学习价值,需要的朋友们下面随着小编来一起学习学习吧
  • 运行环境:python3.8, numpy=1.19.5,opencv-python=4.5.1 ...9、视频车道线检测 二、实操 注意:后一节代码是在上一节代码的基础上进行修改(排版比较繁琐,可以选择章节进行运行) 1、相机标定 impo
  • 1. 图像加载;2.图像预处理:图片灰度化,高斯滤波;3.Cany边缘检测;4.感兴趣区域检测;5.霍夫直线检测 ;6.直线拟合;7.车道线叠加;8.图片和视频测试;9.可视化界面pyqt5
  • 基于OpenCV车道线检测

    千次阅读 2019-04-09 19:03:11
    最近终于在Android下成功实现车道线检测了 ,但是还是不够稳定,需要进一步优化 等有空详细写一下文档~
  • opencv-车道线检测

    2019-11-24 16:24:13
    opencv-车道线检测0.导入数据1.读取图片和视频2.灰度处理3.高斯滤波4.边缘检测5.感兴趣区域检测6.霍夫变换7.图片混合显示视频 今天记录一下简单的车道线检测,为一下几个步骤 0.导入数据 import matplotlib.pyplot ...
  • opencv 车道线检测(一)

    千次阅读 2018-07-01 10:40:49
    opencv 车道线检测(一)
  • 使用OpenCV实现车道线检测

    千次阅读 2020-11-10 16:44:57
    本文介绍了使用计算机视觉技术进行车道检测的过程,并引导我们完成识别车道区域、计算道路RoC和估计车道中心距离的步骤。 摄像机校准 几乎所有摄像机使用的镜头在聚焦光线以捕捉图像时都存在一定的误差,因为这些...
  • 本程序基于OpenCv给出了一种车道线检测的算法,首先通过OSTU进行二值化处理,随后通过改进的霍夫变化进行车道线检测,具有比较好的效果。
  • python+opencv车道线检测(简易实现)

    千次阅读 多人点赞 2020-04-10 09:56:50
    python+opencv车道线检测(简易实现) 技术栈:python+opencv 实现思路: canny边缘检测获取图中的边缘信息; 霍夫变换寻找图中直线; 绘制梯形感兴趣区域获得车前范围; 得到并绘制车道线; 效果展示: 代码实现...
  • opencv车道线检测实现

    千次阅读 2018-08-26 20:36:27
    主要opencv函数介绍: CvSeq* cvHoughLines2( CvArr* image, void* line_storage, int method, double rho, double theta, int threshold, double param1=0, double param2=0 ); image 输入 8-比特、单通道 (二值...
  • 基于OpenCV C++ 的视频车道检测

    千次阅读 多人点赞 2018-10-27 10:31:17
    OpenCV Python 车道检测 ; 对应的python中的cv2.fillPoly和 C++ 中的cv::fillPoly不对应,在C++中应该对应的是cv::fillConvexPoly。 以下是代码, p.s 代码(Python & C++)及测试视频下载地址: Lane ...
  • 基于视觉的车道线检测完整代码

    热门讨论 2017-12-06 17:52:50
    基于视觉的曲线车道线检测完整代码,采用滑动窗口,详情见博客:http://blog.csdn.net/adamshan/article/details/78733302
  • OpenCV车道线检测 输入 一张摄像机拍摄到的道路图片,图片中需要包含车道线。如下图所示(可=可以直接将图片另存成jpg格式来使用) 输出 图像坐标系下的左右车道线的直线方程和有效距离。将左右车道线的方程...
  • 基于Python的openCV实现车道线检测算法原理

    万次阅读 多人点赞 2019-07-07 17:57:41
    车道线检测,就是在汽车行驶时通过汽车前方的摄像头捕捉到地面的图像(视频),通过Opencv对图片(视频)进行一系列处理后,可以在图片(视频)中标注出车道线,然后以此作为汽车自动驾驶时的安全行驶范围。...
  • opencv 车道线检测

    热门讨论 2013-12-13 16:47:44
    opencv 基于改进的霍夫变换车道线检测代码 及相关文献

空空如也

空空如也

1 2 3 4 5 ... 20
收藏数 1,945
精华内容 778
关键字:

基于opencv的车道线检测