精华内容
下载资源
问答
  • opencv相机标定

    2018-05-24 11:08:21
    opencv相机标定源码进行一定的修改,更容易理解和设置参数,操作便捷。亲测可以运行。
  • OpenCV相机标定

    千次阅读 2016-10-09 22:23:08
    OpenCV相机标定

    OpenCV相机标定


    代码1

    #include "opencv2/core/core.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "opencv2/calib3d/calib3d.hpp"
    #include "opencv2/highgui/highgui.hpp"
    
    #include <cctype>
    #include <stdio.h>
    #include <string.h>
    #include <time.h>
    
    using namespace cv;
    using namespace std;
    
    const char * usage =
    " \nexample command line for calibration from a live feed.\n"
    "   calibration  -w 4 -h 5 -s 0.025 -o camera.yml -op -oe\n"
    " \n"
    " example command line for calibration from a list of stored images:\n"
    "   imagelist_creator image_list.xml *.png\n"
    "   calibration -w 4 -h 5 -s 0.025 -o camera.yml -op -oe image_list.xml\n"
    " where image_list.xml is the standard OpenCV XML/YAML\n"
    " use imagelist_creator to create the xml or yaml list\n"
    " file consisting of the list of strings, e.g.:\n"
    " \n"
    "<?xml version=\"1.0\"?>\n"
    "<opencv_storage>\n"
    "<images>\n"
    "view000.png\n"
    "view001.png\n"
    "view002.png\n"
    "view003.png\n"
    "view010.png\n"
    "one_extra_view.jpg\n"
    "</images>\n"
    "</opencv_storage>\n";
    
    
    
    
    const char* liveCaptureHelp =
        "When the live video from camera is used as input, the following hot-keys may be used:\n"
            "  <ESC>, 'q' - quit the program\n"
            "  'g' - start capturing images\n"
            "  'u' - switch undistortion on/off\n";
    
    static void help()
    {
        printf( "This is a camera calibration sample.\n"
            "Usage: calibration\n"
            "     -w <board_width>         # the number of inner corners per one of board dimension\n"
            "     -h <board_height>        # the number of inner corners per another board dimension\n"
            "     [-pt <pattern>]          # the type of pattern: chessboard or circles' grid\n"
            "     [-n <number_of_frames>]  # the number of frames to use for calibration\n"
            "                              # (if not specified, it will be set to the number\n"
            "                              #  of board views actually available)\n"
            "     [-d <delay>]             # a minimum delay in ms between subsequent attempts to capture a next view\n"
            "                              # (used only for video capturing)\n"
            "     [-s <squareSize>]       # square size in some user-defined units (1 by default)\n"
            "     [-o <out_camera_params>] # the output filename for intrinsic [and extrinsic] parameters\n"
            "     [-op]                    # write detected feature points\n"
            "     [-oe]                    # write extrinsic parameters\n"
            "     [-zt]                    # assume zero tangential distortion\n"
            "     [-a <aspectRatio>]      # fix aspect ratio (fx/fy)\n"
            "     [-p]                     # fix the principal point at the center\n"
            "     [-v]                     # flip the captured images around the horizontal axis\n"
            "     [-V]                     # use a video file, and not an image list, uses\n"
            "                              # [input_data] string for the video file name\n"
            "     [-su]                    # show undistorted images after calibration\n"
            "     [input_data]             # input data, one of the following:\n"
            "                              #  - text file with a list of the images of the board\n"
            "                              #    the text file can be generated with imagelist_creator\n"
            "                              #  - name of video file with a video of the board\n"
            "                              # if input_data not specified, a live view from the camera is used\n"
            "\n" );
        printf("\n%s",usage);
        printf( "\n%s", liveCaptureHelp );
    }
    
    enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 };
    enum Pattern { CHESSBOARD, CIRCLES_GRID, ASYMMETRIC_CIRCLES_GRID };
    
    static double computeReprojectionErrors(
            const vector<vector<Point3f> >& objectPoints,
            const vector<vector<Point2f> >& imagePoints,
            const vector<Mat>& rvecs, const vector<Mat>& tvecs,
            const Mat& cameraMatrix, const Mat& distCoeffs,
            vector<float>& perViewErrors )
    {
        vector<Point2f> imagePoints2;
        int i, totalPoints = 0;
        double totalErr = 0, err;
        perViewErrors.resize(objectPoints.size());
    
        for( i = 0; i < (int)objectPoints.size(); i++ )
        {
            projectPoints(Mat(objectPoints[i]), rvecs[i], tvecs[i],
                          cameraMatrix, distCoeffs, imagePoints2);
            err = norm(Mat(imagePoints[i]), Mat(imagePoints2), CV_L2);
            int n = (int)objectPoints[i].size();
            perViewErrors[i] = (float)std::sqrt(err*err/n);
            totalErr += err*err;
            totalPoints += n;
        }
    
        return std::sqrt(totalErr/totalPoints);
    }
    
    static void calcChessboardCorners(Size boardSize, float squareSize, vector<Point3f>& corners, Pattern patternType = CHESSBOARD)
    {
        corners.resize(0);
    
        switch(patternType)
        {
          case CHESSBOARD:
          case CIRCLES_GRID:
            for( int i = 0; i < boardSize.height; i++ )
                for( int j = 0; j < boardSize.width; j++ )
                    corners.push_back(Point3f(float(j*squareSize),
                                              float(i*squareSize), 0));
            break;
    
          case ASYMMETRIC_CIRCLES_GRID:
            for( int i = 0; i < boardSize.height; i++ )
                for( int j = 0; j < boardSize.width; j++ )
                    corners.push_back(Point3f(float((2*j + i % 2)*squareSize),
                                              float(i*squareSize), 0));
            break;
    
          default:
            CV_Error(CV_StsBadArg, "Unknown pattern type\n");
        }
    }
    
    static bool runCalibration( vector<vector<Point2f> > imagePoints,
                        Size imageSize, Size boardSize, Pattern patternType,
                        float squareSize, float aspectRatio,
                        int flags, Mat& cameraMatrix, Mat& distCoeffs,
                        vector<Mat>& rvecs, vector<Mat>& tvecs,
                        vector<float>& reprojErrs,
                        double& totalAvgErr)
    {
        cameraMatrix = Mat::eye(3, 3, CV_64F);
        if( flags & CV_CALIB_FIX_ASPECT_RATIO )
            cameraMatrix.at<double>(0,0) = aspectRatio;
    
        distCoeffs = Mat::zeros(8, 1, CV_64F);
    
        vector<vector<Point3f> > objectPoints(1);
        calcChessboardCorners(boardSize, squareSize, objectPoints[0], patternType);
    
        objectPoints.resize(imagePoints.size(),objectPoints[0]);
    
        double rms = calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix,
                        distCoeffs, rvecs, tvecs, flags|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5);
                        ///*|CV_CALIB_FIX_K3*/|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5);
        printf("RMS error reported by calibrateCamera: %g\n", rms);
    
        bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs);
    
        totalAvgErr = computeReprojectionErrors(objectPoints, imagePoints,
                    rvecs, tvecs, cameraMatrix, distCoeffs, reprojErrs);
    
        return ok;
    }
    
    
    static void saveCameraParams( const string& filename,
                           Size imageSize, Size boardSize,
                           float squareSize, float aspectRatio, int flags,
                           const Mat& cameraMatrix, const Mat& distCoeffs,
                           const vector<Mat>& rvecs, const vector<Mat>& tvecs,
                           const vector<float>& reprojErrs,
                           const vector<vector<Point2f> >& imagePoints,
                           double totalAvgErr )
    {
        FileStorage fs( filename, FileStorage::WRITE );
    
        time_t tt;
        time( &tt );
        struct tm *t2 = localtime( &tt );
        char buf[1024];
        strftime( buf, sizeof(buf)-1, "%c", t2 );
    
        fs << "calibration_time" << buf;
    
        if( !rvecs.empty() || !reprojErrs.empty() )
            fs << "nframes" << (int)std::max(rvecs.size(), reprojErrs.size());
        fs << "image_width" << imageSize.width;
        fs << "image_height" << imageSize.height;
        fs << "board_width" << boardSize.width;
        fs << "board_height" << boardSize.height;
        fs << "square_size" << squareSize;
    
        if( flags & CV_CALIB_FIX_ASPECT_RATIO )
            fs << "aspectRatio" << aspectRatio;
    
        if( flags != 0 )
        {
            sprintf( buf, "flags: %s%s%s%s",
                flags & CV_CALIB_USE_INTRINSIC_GUESS ? "+use_intrinsic_guess" : "",
                flags & CV_CALIB_FIX_ASPECT_RATIO ? "+fix_aspectRatio" : "",
                flags & CV_CALIB_FIX_PRINCIPAL_POINT ? "+fix_principal_point" : "",
                flags & CV_CALIB_ZERO_TANGENT_DIST ? "+zero_tangent_dist" : "" );
            cvWriteComment( *fs, buf, 0 );
        }
    
        fs << "flags" << flags;
    
        fs << "camera_matrix" << cameraMatrix;
        fs << "distortion_coefficients" << distCoeffs;
    
        fs << "avg_reprojection_error" << totalAvgErr;
        if( !reprojErrs.empty() )
            fs << "per_view_reprojection_errors" << Mat(reprojErrs);
    
        if( !rvecs.empty() && !tvecs.empty() )
        {
            CV_Assert(rvecs[0].type() == tvecs[0].type());
            Mat bigmat((int)rvecs.size(), 6, rvecs[0].type());
            for( int i = 0; i < (int)rvecs.size(); i++ )
            {
                Mat r = bigmat(Range(i, i+1), Range(0,3));
                Mat t = bigmat(Range(i, i+1), Range(3,6));
    
                CV_Assert(rvecs[i].rows == 3 && rvecs[i].cols == 1);
                CV_Assert(tvecs[i].rows == 3 && tvecs[i].cols == 1);
                //*.t() is MatExpr (not Mat) so we can use assignment operator
                r = rvecs[i].t();
                t = tvecs[i].t();
            }
            cvWriteComment( *fs, "a set of 6-tuples (rotation vector + translation vector) for each view", 0 );
            fs << "extrinsic_parameters" << bigmat;
        }
    
        if( !imagePoints.empty() )
        {
            Mat imagePtMat((int)imagePoints.size(), (int)imagePoints[0].size(), CV_32FC2);
            for( int i = 0; i < (int)imagePoints.size(); i++ )
            {
                Mat r = imagePtMat.row(i).reshape(2, imagePtMat.cols);
                Mat imgpti(imagePoints[i]);
                imgpti.copyTo(r);
            }
            fs << "image_points" << imagePtMat;
        }
    }
    
    static bool readStringList( const string& filename, vector<string>& l )
    {
        l.resize(0);
        FileStorage fs(filename, FileStorage::READ);
        if( !fs.isOpened() )
            return false;
        FileNode n = fs.getFirstTopLevelNode();
        if( n.type() != FileNode::SEQ )
            return false;
        FileNodeIterator it = n.begin(), it_end = n.end();
        for( ; it != it_end; ++it )
            l.push_back((string)*it);
        return true;
    }
    
    
    static bool runAndSave(const string& outputFilename,
                    const vector<vector<Point2f> >& imagePoints,
                    Size imageSize, Size boardSize, Pattern patternType, float squareSize,
                    float aspectRatio, int flags, Mat& cameraMatrix,
                    Mat& distCoeffs, bool writeExtrinsics, bool writePoints )
    {
        vector<Mat> rvecs, tvecs;
        vector<float> reprojErrs;
        double totalAvgErr = 0;
    
        bool ok = runCalibration(imagePoints, imageSize, boardSize, patternType, squareSize,
                       aspectRatio, flags, cameraMatrix, distCoeffs,
                       rvecs, tvecs, reprojErrs, totalAvgErr);
        printf("%s. avg reprojection error = %.2f\n",
               ok ? "Calibration succeeded" : "Calibration failed",
               totalAvgErr);
    
        if( ok )
            saveCameraParams( outputFilename, imageSize,
                             boardSize, squareSize, aspectRatio,
                             flags, cameraMatrix, distCoeffs,
                             writeExtrinsics ? rvecs : vector<Mat>(),
                             writeExtrinsics ? tvecs : vector<Mat>(),
                             writeExtrinsics ? reprojErrs : vector<float>(),
                             writePoints ? imagePoints : vector<vector<Point2f> >(),
                             totalAvgErr );
        return ok;
    }
    
    
    int main( int argc, char** argv )
    {
        Size boardSize, imageSize;
        float squareSize = 1.f, aspectRatio = 1.f;
        Mat cameraMatrix, distCoeffs;
        const char* outputFilename = "out_camera_data.yml";
        const char* inputFilename = 0;
    
        int i, nframes = 10;
        bool writeExtrinsics = false, writePoints = false;
        bool undistortImage = false;
        int flags = 0;
        VideoCapture capture;
        bool flipVertical = false;
        bool showUndistorted = false;
        bool videofile = false;
        int delay = 1000;
        clock_t prevTimestamp = 0;
        int mode = DETECTION;
        int cameraId = 0;
        vector<vector<Point2f> > imagePoints;
        vector<string> imageList;
        Pattern pattern = CHESSBOARD;
    
        if( argc < 2 )
        {
            help();
            return 0;
        }
    
        for( i = 1; i < argc; i++ )
        {
            const char* s = argv[i];
            if( strcmp( s, "-w" ) == 0 )
            {
                if( sscanf( argv[++i], "%u", &boardSize.width ) != 1 || boardSize.width <= 0 )
                    return fprintf( stderr, "Invalid board width\n" ), -1;
            }
            else if( strcmp( s, "-h" ) == 0 )
            {
                if( sscanf( argv[++i], "%u", &boardSize.height ) != 1 || boardSize.height <= 0 )
                    return fprintf( stderr, "Invalid board height\n" ), -1;
            }
            else if( strcmp( s, "-pt" ) == 0 )
            {
                i++;
                if( !strcmp( argv[i], "circles" ) )
                    pattern = CIRCLES_GRID;
                else if( !strcmp( argv[i], "acircles" ) )
                    pattern = ASYMMETRIC_CIRCLES_GRID;
                else if( !strcmp( argv[i], "chessboard" ) )
                    pattern = CHESSBOARD;
                else
                    return fprintf( stderr, "Invalid pattern type: must be chessboard or circles\n" ), -1;
            }
            else if( strcmp( s, "-s" ) == 0 )
            {
                if( sscanf( argv[++i], "%f", &squareSize ) != 1 || squareSize <= 0 )
                    return fprintf( stderr, "Invalid board square width\n" ), -1;
            }
            else if( strcmp( s, "-n" ) == 0 )
            {
                if( sscanf( argv[++i], "%u", &nframes ) != 1 || nframes <= 3 )
                    return printf("Invalid number of images\n" ), -1;
            }
            else if( strcmp( s, "-a" ) == 0 )
            {
                if( sscanf( argv[++i], "%f", &aspectRatio ) != 1 || aspectRatio <= 0 )
                    return printf("Invalid aspect ratio\n" ), -1;
                flags |= CV_CALIB_FIX_ASPECT_RATIO;
            }
            else if( strcmp( s, "-d" ) == 0 )
            {
                if( sscanf( argv[++i], "%u", &delay ) != 1 || delay <= 0 )
                    return printf("Invalid delay\n" ), -1;
            }
            else if( strcmp( s, "-op" ) == 0 )
            {
                writePoints = true;
            }
            else if( strcmp( s, "-oe" ) == 0 )
            {
                writeExtrinsics = true;
            }
            else if( strcmp( s, "-zt" ) == 0 )
            {
                flags |= CV_CALIB_ZERO_TANGENT_DIST;
            }
            else if( strcmp( s, "-p" ) == 0 )
            {
                flags |= CV_CALIB_FIX_PRINCIPAL_POINT;
            }
            else if( strcmp( s, "-v" ) == 0 )
            {
                flipVertical = true;
            }
            else if( strcmp( s, "-V" ) == 0 )
            {
                videofile = true;
            }
            else if( strcmp( s, "-o" ) == 0 )
            {
                outputFilename = argv[++i];
            }
            else if( strcmp( s, "-su" ) == 0 )
            {
                showUndistorted = true;
            }
            else if( s[0] != '-' )
            {
                if( isdigit(s[0]) )
                    sscanf(s, "%d", &cameraId);
                else
                    inputFilename = s;
            }
            else
                return fprintf( stderr, "Unknown option %s", s ), -1;
        }
    
        if( inputFilename )
        {
            if( !videofile && readStringList(inputFilename, imageList) )
                mode = CAPTURING;
            else
                capture.open(inputFilename);
        }
        else
            capture.open(cameraId);
    
        if( !capture.isOpened() && imageList.empty() )
            return fprintf( stderr, "Could not initialize video (%d) capture\n",cameraId ), -2;
    
        if( !imageList.empty() )
            nframes = (int)imageList.size();
    
        if( capture.isOpened() )
            printf( "%s", liveCaptureHelp );
    
        namedWindow( "Image View", 1 );
    
        for(i = 0;;i++)
        {
            Mat view, viewGray;
            bool blink = false;
    
            if( capture.isOpened() )
            {
                Mat view0;
                capture >> view0;
                view0.copyTo(view);
            }
            else if( i < (int)imageList.size() )
                view = imread(imageList[i], 1);
    
            if(!view.data)
            {
                if( imagePoints.size() > 0 )
                    runAndSave(outputFilename, imagePoints, imageSize,
                               boardSize, pattern, squareSize, aspectRatio,
                               flags, cameraMatrix, distCoeffs,
                               writeExtrinsics, writePoints);
                break;
            }
    
            imageSize = view.size();
    
            if( flipVertical )
                flip( view, view, 0 );
    
            vector<Point2f> pointbuf;
            cvtColor(view, viewGray, COLOR_BGR2GRAY);
    
            bool found;
            switch( pattern )
            {
                case CHESSBOARD:
                    found = findChessboardCorners( view, boardSize, pointbuf,
                        CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE);
                    break;
                case CIRCLES_GRID:
                    found = findCirclesGrid( view, boardSize, pointbuf );
                    break;
                case ASYMMETRIC_CIRCLES_GRID:
                    found = findCirclesGrid( view, boardSize, pointbuf, CALIB_CB_ASYMMETRIC_GRID );
                    break;
                default:
                    return fprintf( stderr, "Unknown pattern type\n" ), -1;
            }
    
           // improve the found corners' coordinate accuracy
            if( pattern == CHESSBOARD && found) cornerSubPix( viewGray, pointbuf, Size(11,11),
                Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
    
            if( mode == CAPTURING && found &&
               (!capture.isOpened() || clock() - prevTimestamp > delay*1e-3*CLOCKS_PER_SEC) )
            {
                imagePoints.push_back(pointbuf);
                prevTimestamp = clock();
                blink = capture.isOpened();
            }
    
            if(found)
                drawChessboardCorners( view, boardSize, Mat(pointbuf), found );
    
            string msg = mode == CAPTURING ? "100/100" :
                mode == CALIBRATED ? "Calibrated" : "Press 'g' to start";
            int baseLine = 0;
            Size textSize = getTextSize(msg, 1, 1, 1, &baseLine);
            Point textOrigin(view.cols - 2*textSize.width - 10, view.rows - 2*baseLine - 10);
    
            if( mode == CAPTURING )
            {
                if(undistortImage)
                    msg = format( "%d/%d Undist", (int)imagePoints.size(), nframes );
                else
                    msg = format( "%d/%d", (int)imagePoints.size(), nframes );
            }
    
            putText( view, msg, textOrigin, 1, 1,
                     mode != CALIBRATED ? Scalar(0,0,255) : Scalar(0,255,0));
    
            if( blink )
                bitwise_not(view, view);
    
            if( mode == CALIBRATED && undistortImage )
            {
                Mat temp = view.clone();
                undistort(temp, view, cameraMatrix, distCoeffs);
            }
    
            imshow("Image View", view);
            int key = 0xff & waitKey(capture.isOpened() ? 50 : 500);
    
            if( (key & 255) == 27 )
                break;
    
            if( key == 'u' && mode == CALIBRATED )
                undistortImage = !undistortImage;
    
            if( capture.isOpened() && key == 'g' )
            {
                mode = CAPTURING;
                imagePoints.clear();
            }
    
            if( mode == CAPTURING && imagePoints.size() >= (unsigned)nframes )
            {
                if( runAndSave(outputFilename, imagePoints, imageSize,
                           boardSize, pattern, squareSize, aspectRatio,
                           flags, cameraMatrix, distCoeffs,
                           writeExtrinsics, writePoints))
                    mode = CALIBRATED;
                else
                    mode = DETECTION;
                if( !capture.isOpened() )
                    break;
            }
        }
    
        if( !capture.isOpened() && showUndistorted )
        {
            Mat view, rview, map1, map2;
            initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(),
                                    getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0),
                                    imageSize, CV_16SC2, map1, map2);
    
            for( i = 0; i < (int)imageList.size(); i++ )
            {
                view = imread(imageList[i], 1);
                if(!view.data)
                    continue;
                //undistort( view, rview, cameraMatrix, distCoeffs, cameraMatrix );
                remap(view, rview, map1, map2, INTER_LINEAR);
                imshow("Image View", rview);
                int c = 0xff & waitKey();
                if( (c & 255) == 27 || c == 'q' || c == 'Q' )
                    break;
            }
        }
    
        return 0;
    }
    

    简介

    读者可以直接使用Opencv自带的摄像机标定示例程序,该程序位于 “\OpenCV\samples\c目录下的calibration.cpp”,程序的输入支持直接从USB摄像机读取图片标定,或者读取avi文件或者已经存放于电脑上图片进行标定。

    使用说明

    编译运行程序,如果未设置任何命令行参数,则程序会有提示,告诉你应该在你编译出来的程序添加必要的命令行,比如你的程序是calibration.exe(以windows操作系统为例)。则你可以添加如下命令行(以下加粗的字体所示):

    calibration -w 6 -h 8 -s 2 -n 10 -o camera.yml -op -oe [<list_of_views.txt>]


    调用命令行和参数介绍

    Usage: calibration

        -w <board_width>         # 图片某一维方向上的交点个数
        -h <board_height>        # 图片另一维上的交点个数
        [-n <number_of_frames>]  # 标定用的图片帧数
                                 # (if not specified, it will be set to the number
                                 #  of board views actually available)
        [-d <delay>]             # a minimum delay in ms between subsequent attempts to capture a next view
                                 # (used only for video capturing)
        [-s <square_size>]       # square size in some user-defined units (1 by default)
        [-o <out_camera_params>] # the output filename for intrinsic [and extrinsic] parameters
        [-op]                    # write detected feature points
        [-oe]                    # write extrinsic parameters
        [-zt]                    # assume zero tangential distortion
        [-a <aspect_ratio>]      # fix aspect ratio (fx/fy)
        [-p]                     # fix the principal point at the center
        [-v]                     # flip the captured images around the horizontal axis
        [input_data]             # 输入数据,是下面三种之中的一种:
                                 #  - 指定的包含图片列表的txt文件
                                 #  - name of video file with a video of the board
                                 # if input_data not specified, a live view from the camera is used
    
    标定图片示例
    标定图片示例

    上图中,横向和纵向分别为9个交点和6个交点,对应上面的命令行的命令参数应该为: -w 9 -h 6

    • 经多次使用发现,不指定 -p参数时计算的结果误差较大,主要表现在对u0,v0的估计误差较大,因此建议使用时加上-p参数


    list_of_views.txt

    该txt文件表示的是你在电脑上面需要用以标定的图片列表。

    view000.png
    view001.png
    #view002.png
    view003.png
    view010.png
    one_extra_view.jpg
    

    上面的例子中,前面加“井号”的图片被忽略。

    • 在windows的命令行中,有一种简便的办法来产生此txt文件。在CMD窗口中输入如下命令(假设当前目录里面的所有jpg文件都用作标定,并且生成的文件为a.txt)。
    dir *.jpg /B >> a.txt
    



    输入为摄像机或者avi文件时
            "When the live video from camera is used as input, the following hot-keys may be used:\n"
                "  <ESC>, 'q' - quit the program\n"
                "  'g' - start capturing images\n"
                "  'u' - switch undistortion on/off\n";
    
    

    运行命令行和效果如下图:



    标定过程中,会不断显示一些检测到的角点示意图。





    代码 2

    #include "opencv2/calib3d/calib3d.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "opencv2/highgui/highgui.hpp"
    
    #include <iostream>
    #include <vector>
    #include <algorithm>
    #include <iterator>
    #include <stdio.h>
    
    using namespace cv;
    using namespace std;
    
    static void help()
    {
        printf( "\nThis code generates an artificial camera and artificial chessboard images,\n"
                "and then calibrates. It is basically test code for calibration that shows\n"
                "how to package calibration points and then calibrate the camera.\n"
                "Usage:\n"
                "./calibration_artificial\n\n");
    }
    namespace cv
    {
    
    /* copy of class defines int tests/cv/chessboardgenerator.h */
    class ChessBoardGenerator
    {
    public:
        double sensorWidth;
        double sensorHeight;
        size_t squareEdgePointsNum;
        double min_cos;
        mutable double cov;
        Size patternSize;
        int rendererResolutionMultiplier;
    
        ChessBoardGenerator(const Size& patternSize = Size(8, 6));
        Mat operator()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, vector<Point2f>& corners) const;
        Size cornersSize() const;
    private:
        void generateEdge(const Point3f& p1, const Point3f& p2, vector<Point3f>& out) const;
        Mat generageChessBoard(const Mat& bg, const Mat& camMat, const Mat& distCoeffs,
            const Point3f& zero, const Point3f& pb1, const Point3f& pb2,
            float sqWidth, float sqHeight, const vector<Point3f>& whole, vector<Point2f>& corners) const;
        void generateBasis(Point3f& pb1, Point3f& pb2) const;
        Point3f generateChessBoardCenter(const Mat& camMat, const Size& imgSize) const;
        Mat rvec, tvec;
    };
    }
    
    
    
    const Size imgSize(800, 600);
    const Size brdSize(8, 7);
    const size_t brds_num = 20;
    
    template<class T> ostream& operator<<(ostream& out, const Mat_<T>& mat)
    {
        for(int j = 0; j < mat.rows; ++j)
            for(int i = 0; i < mat.cols; ++i)
                out << mat(j, i) << " ";
        return out;
    }
    
    
    
    int main()
    {
        help();
        cout << "Initializing background...";
        Mat background(imgSize, CV_8UC3);
        randu(background, Scalar::all(32), Scalar::all(255));
        GaussianBlur(background, background, Size(5, 5), 2);
        cout << "Done" << endl;
    
        cout << "Initializing chess board generator...";
        ChessBoardGenerator cbg(brdSize);
        cbg.rendererResolutionMultiplier = 4;
        cout << "Done" << endl;
    
        /* camera params */
        Mat_<double> camMat(3, 3);
        camMat << 300., 0., background.cols/2., 0, 300., background.rows/2., 0., 0., 1.;
    
        Mat_<double> distCoeffs(1, 5);
        distCoeffs << 1.2, 0.2, 0., 0., 0.;
    
        cout << "Generating chessboards...";
        vector<Mat> boards(brds_num);
        vector<Point2f> tmp;
        for(size_t i = 0; i < brds_num; ++i)
            cout << (boards[i] = cbg(background, camMat, distCoeffs, tmp), i) << " ";
        cout << "Done" << endl;
    
        vector<Point3f> chessboard3D;
        for(int j = 0; j < cbg.cornersSize().height; ++j)
            for(int i = 0; i < cbg.cornersSize().width; ++i)
                chessboard3D.push_back(Point3i(i, j, 0));
    
        /* init points */
        vector< vector<Point3f> > objectPoints;
        vector< vector<Point2f> > imagePoints;
    
        cout << endl << "Finding chessboards' corners...";
        for(size_t i = 0; i < brds_num; ++i)
        {
            cout << i;
            namedWindow("Current chessboard"); imshow("Current chessboard", boards[i]); waitKey(100);
            bool found = findChessboardCorners(boards[i], cbg.cornersSize(), tmp);
            if (found)
            {
                imagePoints.push_back(tmp);
                objectPoints.push_back(chessboard3D);
                cout<< "-found ";
            }
            else
                cout<< "-not-found ";
    
            drawChessboardCorners(boards[i], cbg.cornersSize(), Mat(tmp), found);
            imshow("Current chessboard", boards[i]); waitKey(1000);
        }
        cout << "Done" << endl;
        cvDestroyAllWindows();
    
        Mat camMat_est;
        Mat distCoeffs_est;
        vector<Mat> rvecs, tvecs;
    
        cout << "Calibrating...";
        double rep_err = calibrateCamera(objectPoints, imagePoints, imgSize, camMat_est, distCoeffs_est, rvecs, tvecs);
        cout << "Done" << endl;
    
        cout << endl << "Average Reprojection error: " << rep_err/brds_num/cbg.cornersSize().area() << endl;
        cout << "==================================" << endl;
        cout << "Original camera matrix:\n" << camMat << endl;
        cout << "Original distCoeffs:\n" << distCoeffs << endl;
        cout << "==================================" << endl;
        cout << "Estimated camera matrix:\n" << (Mat_<double>&)camMat_est << endl;
        cout << "Estimated distCoeffs:\n" << (Mat_<double>&)distCoeffs_est << endl;
    
        return 0;
    }
    
    
    /
    /
    /
    
    // Copy of  tests/cv/src/chessboardgenerator code. Just do not want to add dependency.
    
    
    ChessBoardGenerator::ChessBoardGenerator(const Size& _patternSize) : sensorWidth(32), sensorHeight(24),
        squareEdgePointsNum(200), min_cos(sqrt(2.f)*0.5f), cov(0.5),
        patternSize(_patternSize), rendererResolutionMultiplier(4), tvec(Mat::zeros(1, 3, CV_32F))
    {
        Rodrigues(Mat::eye(3, 3, CV_32F), rvec);
    }
    
    void cv::ChessBoardGenerator::generateEdge(const Point3f& p1, const Point3f& p2, vector<Point3f>& out) const
    {
        Point3f step = (p2 - p1) * (1.f/squareEdgePointsNum);
        for(size_t n = 0; n < squareEdgePointsNum; ++n)
            out.push_back( p1 + step * (float)n);
    }
    
    Size cv::ChessBoardGenerator::cornersSize() const
    {
        return Size(patternSize.width-1, patternSize.height-1);
    }
    
    struct Mult
    {
        float m;
        Mult(int mult) : m((float)mult) {}
        Point2f operator()(const Point2f& p)const { return p * m; }
    };
    
    void cv::ChessBoardGenerator::generateBasis(Point3f& pb1, Point3f& pb2) const
    {
        RNG& rng = theRNG();
    
        Vec3f n;
        for(;;)
        {
            n[0] = rng.uniform(-1.f, 1.f);
            n[1] = rng.uniform(-1.f, 1.f);
            n[2] = rng.uniform(-1.f, 1.f);
            float len = (float)norm(n);
            n[0]/=len;
            n[1]/=len;
            n[2]/=len;
    
            if (fabs(n[2]) > min_cos)
                break;
        }
    
        Vec3f n_temp = n; n_temp[0] += 100;
        Vec3f b1 = n.cross(n_temp);
        Vec3f b2 = n.cross(b1);
        float len_b1 = (float)norm(b1);
        float len_b2 = (float)norm(b2);
    
        pb1 = Point3f(b1[0]/len_b1, b1[1]/len_b1, b1[2]/len_b1);
        pb2 = Point3f(b2[0]/len_b1, b2[1]/len_b2, b2[2]/len_b2);
    }
    
    Mat cv::ChessBoardGenerator::generageChessBoard(const Mat& bg, const Mat& camMat, const Mat& distCoeffs,
                                                    const Point3f& zero, const Point3f& pb1, const Point3f& pb2,
                                                    float sqWidth, float sqHeight, const vector<Point3f>& whole,
                                                    vector<Point2f>& corners) const
    {
        vector< vector<Point> > squares_black;
        for(int i = 0; i < patternSize.width; ++i)
            for(int j = 0; j < patternSize.height; ++j)
                if ( (i % 2 == 0 && j % 2 == 0) || (i % 2 != 0 && j % 2 != 0) )
                {
                    vector<Point3f> pts_square3d;
                    vector<Point2f> pts_square2d;
    
                    Point3f p1 = zero + (i + 0) * sqWidth * pb1 + (j + 0) * sqHeight * pb2;
                    Point3f p2 = zero + (i + 1) * sqWidth * pb1 + (j + 0) * sqHeight * pb2;
                    Point3f p3 = zero + (i + 1) * sqWidth * pb1 + (j + 1) * sqHeight * pb2;
                    Point3f p4 = zero + (i + 0) * sqWidth * pb1 + (j + 1) * sqHeight * pb2;
                    generateEdge(p1, p2, pts_square3d);
                    generateEdge(p2, p3, pts_square3d);
                    generateEdge(p3, p4, pts_square3d);
                    generateEdge(p4, p1, pts_square3d);
    
                    projectPoints( Mat(pts_square3d), rvec, tvec, camMat, distCoeffs, pts_square2d);
                    squares_black.resize(squares_black.size() + 1);
                    vector<Point2f> temp;
                    approxPolyDP(Mat(pts_square2d), temp, 1.0, true);
                    transform(temp.begin(), temp.end(), back_inserter(squares_black.back()), Mult(rendererResolutionMultiplier));
                }
    
        /* calculate corners */
        vector<Point3f> corners3d;
        for(int j = 0; j < patternSize.height - 1; ++j)
            for(int i = 0; i < patternSize.width - 1; ++i)
                corners3d.push_back(zero + (i + 1) * sqWidth * pb1 + (j + 1) * sqHeight * pb2);
        corners.clear();
        projectPoints( Mat(corners3d), rvec, tvec, camMat, distCoeffs, corners);
    
        vector<Point3f> whole3d;
        vector<Point2f> whole2d;
        generateEdge(whole[0], whole[1], whole3d);
        generateEdge(whole[1], whole[2], whole3d);
        generateEdge(whole[2], whole[3], whole3d);
        generateEdge(whole[3], whole[0], whole3d);
        projectPoints( Mat(whole3d), rvec, tvec, camMat, distCoeffs, whole2d);
        vector<Point2f> temp_whole2d;
        approxPolyDP(Mat(whole2d), temp_whole2d, 1.0, true);
    
        vector< vector<Point > > whole_contour(1);
        transform(temp_whole2d.begin(), temp_whole2d.end(),
            back_inserter(whole_contour.front()), Mult(rendererResolutionMultiplier));
    
        Mat result;
        if (rendererResolutionMultiplier == 1)
        {
            result = bg.clone();
            drawContours(result, whole_contour, -1, Scalar::all(255), CV_FILLED, CV_AA);
            drawContours(result, squares_black, -1, Scalar::all(0), CV_FILLED, CV_AA);
        }
        else
        {
            Mat tmp;
            resize(bg, tmp, bg.size() * rendererResolutionMultiplier);
            drawContours(tmp, whole_contour, -1, Scalar::all(255), CV_FILLED, CV_AA);
            drawContours(tmp, squares_black, -1, Scalar::all(0), CV_FILLED, CV_AA);
            resize(tmp, result, bg.size(), 0, 0, INTER_AREA);
        }
        return result;
    }
    
    Mat cv::ChessBoardGenerator::operator ()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, vector<Point2f>& corners) const
    {
        cov = min(cov, 0.8);
        double fovx, fovy, focalLen;
        Point2d principalPoint;
        double aspect;
        calibrationMatrixValues( camMat, bg.size(), sensorWidth, sensorHeight,
            fovx, fovy, focalLen, principalPoint, aspect);
    
        RNG& rng = theRNG();
    
        float d1 = static_cast<float>(rng.uniform(0.1, 10.0));
        float ah = static_cast<float>(rng.uniform(-fovx/2 * cov, fovx/2 * cov) * CV_PI / 180);
        float av = static_cast<float>(rng.uniform(-fovy/2 * cov, fovy/2 * cov) * CV_PI / 180);
    
        Point3f p;
        p.z = cos(ah) * d1;
        p.x = sin(ah) * d1;
        p.y = p.z * tan(av);
    
        Point3f pb1, pb2;
        generateBasis(pb1, pb2);
    
        float cbHalfWidth = static_cast<float>(norm(p) * sin( min(fovx, fovy) * 0.5 * CV_PI / 180));
        float cbHalfHeight = cbHalfWidth * patternSize.height / patternSize.width;
    
        vector<Point3f> pts3d(4);
        vector<Point2f> pts2d(4);
        for(;;)
        {
            pts3d[0] = p + pb1 * cbHalfWidth + cbHalfHeight * pb2;
            pts3d[1] = p + pb1 * cbHalfWidth - cbHalfHeight * pb2;
            pts3d[2] = p - pb1 * cbHalfWidth - cbHalfHeight * pb2;
            pts3d[3] = p - pb1 * cbHalfWidth + cbHalfHeight * pb2;
    
            /* can remake with better perf */
            projectPoints( Mat(pts3d), rvec, tvec, camMat, distCoeffs, pts2d);
    
            bool inrect1 = pts2d[0].x < bg.cols && pts2d[0].y < bg.rows && pts2d[0].x > 0 && pts2d[0].y > 0;
            bool inrect2 = pts2d[1].x < bg.cols && pts2d[1].y < bg.rows && pts2d[1].x > 0 && pts2d[1].y > 0;
            bool inrect3 = pts2d[2].x < bg.cols && pts2d[2].y < bg.rows && pts2d[2].x > 0 && pts2d[2].y > 0;
            bool inrect4 = pts2d[3].x < bg.cols && pts2d[3].y < bg.rows && pts2d[3].x > 0 && pts2d[3].y > 0;
    
            if ( inrect1 && inrect2 && inrect3 && inrect4)
                break;
    
            cbHalfWidth*=0.8f;
            cbHalfHeight = cbHalfWidth * patternSize.height / patternSize.width;
        }
    
        cbHalfWidth  *= static_cast<float>(patternSize.width)/(patternSize.width + 1);
        cbHalfHeight *= static_cast<float>(patternSize.height)/(patternSize.height + 1);
    
        Point3f zero = p - pb1 * cbHalfWidth - cbHalfHeight * pb2;
        float sqWidth  = 2 * cbHalfWidth/patternSize.width;
        float sqHeight = 2 * cbHalfHeight/patternSize.height;
    
        return generageChessBoard(bg, camMat, distCoeffs, zero, pb1, pb2, sqWidth, sqHeight,  pts3d, corners);
    }
    



    代码 3

    1. 首先自制一张标定图片,用A4纸打印出来,设定距离,再设定标定棋盘的格子数目,如8×6,以下是我做的图片8×8

    1. 然后利用cvFindChessboardCorners找到棋盘在摄像头中的2D位置,这里cvFindChessboardCorners不太稳定,有时不能工作,也许需要图像增强处理。
    2. 计算实际的距离,应该是3D的距离。我设定为21.6毫米,既在A4纸上为两厘米。
    3. 再用cvCalibrateCamera2计算内参,
    4. 最后用cvUndistort2纠正图像的变形。

    结果如下:



    代码
    #include "stdafx.h"
    #include <stdio.h>
    #include <stdlib.h>
    #include <string.h>
    // OpenCV
    #include <cxcore.h>
    #include <cv.h>
    #include <highgui.h>
    #include <cvaux.h>
     
     
    void InitCorners3D(CvMat *Corners3D, CvSize ChessBoardSize, int Nimages, float SquareSize);
    void makeChessBoard();
    int myFindChessboardCorners( const void* image, CvSize pattern_size,
                                 CvPoint2D32f* corners, int* corner_count=NULL,
                                 int flags=CV_CALIB_CB_ADAPTIVE_THRESH );
     
     
    inline int drawCorssMark(IplImage *dst,CvPoint pt)
    /*************************************************
      Function:        main_loop
      Description:     绘制一个十字标记					
      Calls:          
      Called By:      
      Input:           RGB image,  pt               
      Output:         
      Return:         
      Others:          需要检查坐标是否越界 to do list
    *************************************************/
    {
     
    	const int cross_len = 4;
    	CvPoint pt1,pt2,pt3,pt4;
    	pt1.x = pt.x;
    	pt1.y = pt.y - cross_len;
    	pt2.x = pt.x;
    	pt2.y = pt.y + cross_len;
    	pt3.x = pt.x - cross_len;
    	pt3.y = pt.y;
    	pt4.x = pt.x + cross_len;
    	pt4.y = pt.y;
     
    	cvLine(dst,pt1,pt2,CV_RGB(0,255,0),2,CV_AA, 0 );	
    	cvLine(dst,pt3,pt4,CV_RGB(0,255,0),2,CV_AA, 0 );
     
    	return 0;
    }
     
    /* declarations for OpenCV */
    IplImage                 *current_frame_rgb,grid;
    IplImage                 *current_frame_gray;
    IplImage                 *chessBoard_Img;
     
    int                       Thresholdness = 120;
     
    int image_width = 320;
    int image_height = 240;
     
    bool verbose = false;
     
    const int ChessBoardSize_w = 7;
    const int ChessBoardSize_h = 7;
    // Calibration stuff
    bool			calibration_done = false;
    const CvSize 	ChessBoardSize = cvSize(ChessBoardSize_w,ChessBoardSize_h);
    //float 			SquareWidth = 21.6f; //实际距离 毫米单位 在A4纸上为两厘米
    float 			SquareWidth = 17; //投影实际距离 毫米单位  200
     
    const   int NPoints = ChessBoardSize_w*ChessBoardSize_h;
    const   int NImages = 20; //Number of images to collect 
     
    CvPoint2D32f corners[NPoints*NImages];
    int corner_count[NImages] = {0};
    int captured_frames = 0;
     
    CvMat *intrinsics;
    CvMat *distortion_coeff;
    CvMat *rotation_vectors;
    CvMat *translation_vectors;
    CvMat *object_points;
    CvMat *point_counts;
    CvMat *image_points;
    int find_corners_result =0 ;
     
     
    void on_mouse( int event, int x, int y, int flags, void* param )
    {
     
        if( event == CV_EVENT_LBUTTONDOWN )
        {
    		//calibration_done = true; 
        }
    }
     
     
    int main(int argc, char *argv[])
    {
     
     
      CvFont font;
      cvInitFont( &font, CV_FONT_VECTOR0,5, 5, 0, 7, 8);
     
      intrinsics 		= cvCreateMat(3,3,CV_32FC1);
      distortion_coeff 	= cvCreateMat(1,4,CV_32FC1);
      rotation_vectors 	= cvCreateMat(NImages,3,CV_32FC1);
      translation_vectors 	= cvCreateMat(NImages,3,CV_32FC1);
     
      point_counts 		= cvCreateMat(NImages,1,CV_32SC1);
     
      object_points 	= cvCreateMat(NImages*NPoints,3,CV_32FC1);
      image_points 		= cvCreateMat(NImages*NPoints,2,CV_32FC1);
     
     
      // Function to fill in the real-world points of the checkerboard
      InitCorners3D(object_points, ChessBoardSize, NImages, SquareWidth);
     
     
      CvCapture* capture = 0;
     
     
      if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
    	  capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
      else if( argc == 2 )
    	  capture = cvCaptureFromAVI( argv[1] );
     
      if( !capture )
      {
    	  fprintf(stderr,"Could not initialize capturing...\n");
    	  return -1;
      }
     
     
      // Initialize all of the IplImage structures
      current_frame_rgb = cvCreateImage(cvSize(image_width, image_height), IPL_DEPTH_8U, 3);
     
      IplImage *current_frame_rgb2 = cvCreateImage(cvSize(image_width, image_height), IPL_DEPTH_8U, 3);
      current_frame_gray = cvCreateImage(cvSize(image_width, image_height), IPL_DEPTH_8U, 1);
     
      chessBoard_Img   = cvCreateImage(cvSize(image_width, image_height), IPL_DEPTH_8U, 3);  
      current_frame_rgb2->origin = chessBoard_Img->origin  = current_frame_gray->origin = current_frame_rgb->origin = 1;
     
      makeChessBoard();
     
      cvNamedWindow( "result", 0);
      cvNamedWindow( "Window 0", 0);
      cvNamedWindow( "grid", 0);
      cvMoveWindow( "grid", 100,100);
      cvSetMouseCallback( "Window 0", on_mouse, 0 );  
      cvCreateTrackbar("Thresholdness","Window 0",&Thresholdness, 255,0);
     
      while (!calibration_done)
      {
     
    	while (captured_frames < NImages)
        {
    	  current_frame_rgb = cvQueryFrame( capture );
    	  //current_frame_rgb = cvLoadImage( "c:\\BoardStereoL3.jpg" );
    	  //cvCopy(chessBoard_Img,current_frame_rgb);
     
    	  if( !current_frame_rgb )
    		  break;
     
    	  cvCopy(current_frame_rgb,current_frame_rgb2);
    	  cvCvtColor(current_frame_rgb, current_frame_gray, CV_BGR2GRAY);
    	  //cvThreshold(current_frame_gray,current_frame_gray,Thresholdness,255,CV_THRESH_BINARY);
    	  //cvThreshold(current_frame_gray,current_frame_gray,150,255,CV_THRESH_BINARY_INV);
     
    /*
    	int pos = 1;
    	IplConvKernel* element = 0;
    	const int element_shape = CV_SHAPE_ELLIPSE;
    	element = cvCreateStructuringElementEx( pos*2+1, pos*2+1, pos, pos, element_shape, 0 );
    	cvDilate(current_frame_gray,current_frame_gray,element,1);
    	cvErode(current_frame_gray,current_frame_gray,element,1);
    	cvReleaseStructuringElement(&element);
    */
     
    	find_corners_result = cvFindChessboardCorners(current_frame_gray,
                                              ChessBoardSize,
                                              &corners[captured_frames*NPoints],
                                              &corner_count[captured_frames],
                                              0);
     
     
     
    	cvDrawChessboardCorners(current_frame_rgb2, ChessBoardSize, &corners[captured_frames*NPoints], NPoints, find_corners_result);
     
     
    	cvShowImage("Window 0",current_frame_rgb2);
    	cvShowImage("grid",chessBoard_Img);
     
    	if(find_corners_result==1)
    	{
    		cvWaitKey(2000);
    		cvSaveImage("c:\\hardyinCV.jpg",current_frame_rgb2);
    		captured_frames++;
    	}
    	//cvShowImage("result",current_frame_gray);
     
    	intrinsics->data.fl[0] = 256.8093262;   //fx		
    	intrinsics->data.fl[2] = 160.2826538;   //cx
    	intrinsics->data.fl[4] = 254.7511139;   //fy
    	intrinsics->data.fl[5] = 127.6264572;   //cy
     
    	intrinsics->data.fl[1] = 0;   
    	intrinsics->data.fl[3] = 0;   
    	intrinsics->data.fl[6] = 0;   
    	intrinsics->data.fl[7] = 0;   
    	intrinsics->data.fl[8] = 1;   	
     
    	distortion_coeff->data.fl[0] = -0.193740;  //k1
    	distortion_coeff->data.fl[1] = -0.378588;  //k2
    	distortion_coeff->data.fl[2] = 0.028980;   //p1
    	distortion_coeff->data.fl[3] = 0.008136;   //p2
     
    	cvWaitKey(40);
    	find_corners_result = 0;
        }   
    	//if (find_corners_result !=0)
    	{
     
    		printf("\n");
     
    		cvSetData( image_points, corners, sizeof(CvPoint2D32f));
    		cvSetData( point_counts, &corner_count, sizeof(int));
     
     
    		cvCalibrateCamera2( object_points,
    			image_points,
    			point_counts,
    			cvSize(image_width,image_height),
    			intrinsics,
    			distortion_coeff,
    			rotation_vectors,
    			translation_vectors,
    			0);
     
     
    		// [fx 0 cx; 0 fy cy; 0 0 1].
    		cvUndistort2(current_frame_rgb,current_frame_rgb,intrinsics,distortion_coeff);
    		cvShowImage("result",current_frame_rgb);
     
     
    		float intr[3][3] = {0.0};
    		float dist[4] = {0.0};
    		float tranv[3] = {0.0};
    		float rotv[3] = {0.0};
     
    		for ( int i = 0; i < 3; i++)
    		{
    			for ( int j = 0; j < 3; j++)
    			{
    				intr[i][j] = ((float*)(intrinsics->data.ptr + intrinsics->step*i))[j];
    			}
    			dist[i] = ((float*)(distortion_coeff->data.ptr))[i];
    			tranv[i] = ((float*)(translation_vectors->data.ptr))[i];
    			rotv[i] = ((float*)(rotation_vectors->data.ptr))[i];
    		}
    		dist[3] = ((float*)(distortion_coeff->data.ptr))[3];
     
    		printf("-----------------------------------------\n");
    		printf("INTRINSIC MATRIX: \n");
    		printf("[ %6.4f %6.4f %6.4f ] \n", intr[0][0], intr[0][1], intr[0][2]);
    		printf("[ %6.4f %6.4f %6.4f ] \n", intr[1][0], intr[1][1], intr[1][2]);
    		printf("[ %6.4f %6.4f %6.4f ] \n", intr[2][0], intr[2][1], intr[2][2]);
    		printf("-----------------------------------------\n");
    		printf("DISTORTION VECTOR: \n");
    		printf("[ %6.4f %6.4f %6.4f %6.4f ] \n", dist[0], dist[1], dist[2], dist[3]);
    		printf("-----------------------------------------\n");
    		printf("ROTATION VECTOR: \n");
    		printf("[ %6.4f %6.4f %6.4f ] \n", rotv[0], rotv[1], rotv[2]);
    		printf("TRANSLATION VECTOR: \n");
    		printf("[ %6.4f %6.4f %6.4f ] \n", tranv[0], tranv[1], tranv[2]);
    		printf("-----------------------------------------\n");
     
    		cvWaitKey(0);
     
    		calibration_done = true;      
    	}
     
      }
     
      exit(0);
      cvDestroyAllWindows();
    }
     
    void InitCorners3D(CvMat *Corners3D, CvSize ChessBoardSize, int NImages, float SquareSize)
    {
      int CurrentImage = 0;
      int CurrentRow = 0;
      int CurrentColumn = 0;
      int NPoints = ChessBoardSize.height*ChessBoardSize.width;
      float * temppoints = new float[NImages*NPoints*3];
     
      // for now, assuming we're row-scanning
      for (CurrentImage = 0 ; CurrentImage < NImages ; CurrentImage++)
      {
        for (CurrentRow = 0; CurrentRow < ChessBoardSize.height; CurrentRow++)
        {
          for (CurrentColumn = 0; CurrentColumn < ChessBoardSize.width; CurrentColumn++)
          {
    		  temppoints[(CurrentImage*NPoints*3)+(CurrentRow*ChessBoardSize.width + CurrentColumn)*3]=(float)CurrentRow*SquareSize;
    		  temppoints[(CurrentImage*NPoints*3)+(CurrentRow*ChessBoardSize.width + CurrentColumn)*3+1]=(float)CurrentColumn*SquareSize;
    		  temppoints[(CurrentImage*NPoints*3)+(CurrentRow*ChessBoardSize.width + CurrentColumn)*3+2]=0.f;
          }
        }
      }
      (*Corners3D) = cvMat(NImages*NPoints,3,CV_32FC1, temppoints);
    }
     
    int myFindChessboardCorners( const void* image, CvSize pattern_size,
                                 CvPoint2D32f* corners, int* corner_count,
                                 int flags )
     
    {
     
     
    	IplImage* eig = cvCreateImage( cvGetSize(image), 32, 1 );
    	IplImage* temp = cvCreateImage( cvGetSize(image), 32, 1 );
    	double quality = 0.01;
    	double min_distance = 5;
    	int win_size =10;
     
    	int count = pattern_size.width * pattern_size.height;
    	cvGoodFeaturesToTrack( image, eig, temp, corners, &count,
    		quality, min_distance, 0, 3, 0, 0.04 );
    	cvFindCornerSubPix( image, corners, count,
    		cvSize(win_size,win_size), cvSize(-1,-1),
    		cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
     
    	cvReleaseImage( &eig );
    	cvReleaseImage( &temp );
     
    	return 1;
    }
     
    void makeChessBoard()
    {
     
      CvScalar e; 
      e.val[0] =255;
      e.val[1] =255;
      e.val[2] =255;
      cvSet(chessBoard_Img,e,0);
      for(int i = 0;i<ChessBoardSize.width+1;i++)
    	  for(int j = 0;j<ChessBoardSize.height+1;j++)
    	  {
    		  int w =(image_width)/2/(ChessBoardSize.width);
    		  int h = w; //(image_height)/2/(ChessBoardSize.height);
     
    		  int ii = i+1;
    		  int iii = ii+1;
    		  int jj =j+1;
    		  int jjj =jj+1;
    		  int s_x = image_width/6;		  
     
    		if((i+j)%2==1)
    		   cvRectangle( chessBoard_Img, cvPoint(w*i+s_x,h*j+s_x),cvPoint(w*ii-1+s_x,h*jj-1+s_x), CV_RGB(0,0,0),CV_FILLED, 8, 0 );
    	  }
    }


    代码 4

    /*
     * 3calibration.cpp -- Calibrate 3 cameras in a horizontal line together.
     */
    
    #include "opencv2/calib3d/calib3d.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "opencv2/highgui/highgui.hpp"
    
    #include <stdio.h>
    #include <string.h>
    #include <time.h>
    
    using namespace cv;
    using namespace std;
    
    enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 };
    
    static void help()
    {
            printf( "\nThis is a camera calibration sample that calibrates 3 horizontally placed cameras together.\n"
                   "Usage: 3calibration\n"
                   "     -w <board_width>         # the number of inner corners per one of board dimension\n"
                   "     -h <board_height>        # the number of inner corners per another board dimension\n"
                   "     [-s <squareSize>]       # square size in some user-defined units (1 by default)\n"
                   "     [-o <out_camera_params>] # the output filename for intrinsic [and extrinsic] parameters\n"
                   "     [-zt]                    # assume zero tangential distortion\n"
                   "     [-a <aspectRatio>]      # fix aspect ratio (fx/fy)\n"
                   "     [-p]                     # fix the principal point at the center\n"
                   "     [input_data]             # input data - text file with a list of the images of the board\n"
                   "\n" );
    
    }
    
    static void calcChessboardCorners(Size boardSize, float squareSize, vector<Point3f>& corners)
    {
        corners.resize(0);
    
        for( int i = 0; i < boardSize.height; i++ )
            for( int j = 0; j < boardSize.width; j++ )
                corners.push_back(Point3f(float(j*squareSize),
                                          float(i*squareSize), 0));
    }
    
    static bool run3Calibration( vector<vector<Point2f> > imagePoints1,
                                vector<vector<Point2f> > imagePoints2,
                                vector<vector<Point2f> > imagePoints3,
                                Size imageSize, Size boardSize,
                                float squareSize, float aspectRatio,
                                int flags,
                                Mat& cameraMatrix1, Mat& distCoeffs1,
                                Mat& cameraMatrix2, Mat& distCoeffs2,
                                Mat& cameraMatrix3, Mat& distCoeffs3,
                                Mat& R12, Mat& T12, Mat& R13, Mat& T13)
    {
        int c, i;
    
        // step 1: calibrate each camera individually
        vector<vector<Point3f> > objpt(1);
        vector<vector<Point2f> > imgpt;
        calcChessboardCorners(boardSize, squareSize, objpt[0]);
        vector<Mat> rvecs, tvecs;
    
        for( c = 1; c <= 3; c++ )
        {
            const vector<vector<Point2f> >& imgpt0 = c == 1 ? imagePoints1 : c == 2 ? imagePoints2 : imagePoints3;
            imgpt.clear();
            int N = 0;
            for( i = 0; i < (int)imgpt0.size(); i++ )
                if( !imgpt0[i].empty() )
                {
                    imgpt.push_back(imgpt0[i]);
                    N += (int)imgpt0[i].size();
                }
    
            if( imgpt.size() < 3 )
            {
                printf("Error: not enough views for camera %d\n", c);
                return false;
            }
    
            objpt.resize(imgpt.size(),objpt[0]);
    
            Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
            if( flags & CV_CALIB_FIX_ASPECT_RATIO )
                cameraMatrix.at<double>(0,0) = aspectRatio;
    
            Mat distCoeffs = Mat::zeros(5, 1, CV_64F);
    
            double err = calibrateCamera(objpt, imgpt, imageSize, cameraMatrix,
                            distCoeffs, rvecs, tvecs,
                            flags|CV_CALIB_FIX_K3/*|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5|CV_CALIB_FIX_K6*/);
            bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs);
            if(!ok)
            {
                printf("Error: camera %d was not calibrated\n", c);
                return false;
            }
            printf("Camera %d calibration reprojection error = %g\n", c, sqrt(err/N));
    
            if( c == 1 )
                cameraMatrix1 = cameraMatrix, distCoeffs1 = distCoeffs;
            else if( c == 2 )
                cameraMatrix2 = cameraMatrix, distCoeffs2 = distCoeffs;
            else
                cameraMatrix3 = cameraMatrix, distCoeffs3 = distCoeffs;
        }
    
        vector<vector<Point2f> > imgpt_right;
    
        // step 2: calibrate (1,2) and (3,2) pairs
        for( c = 2; c <= 3; c++ )
        {
            const vector<vector<Point2f> >& imgpt0 = c == 2 ? imagePoints2 : imagePoints3;
    
            imgpt.clear();
            imgpt_right.clear();
            int N = 0;
    
            for( i = 0; i < (int)std::min(imagePoints1.size(), imgpt0.size()); i++ )
                if( !imagePoints1.empty() && !imgpt0[i].empty() )
                {
                    imgpt.push_back(imagePoints1[i]);
                    imgpt_right.push_back(imgpt0[i]);
                    N += (int)imgpt0[i].size();
                }
    
            if( imgpt.size() < 3 )
            {
                printf("Error: not enough shared views for cameras 1 and %d\n", c);
                return false;
            }
    
            objpt.resize(imgpt.size(),objpt[0]);
            Mat cameraMatrix = c == 2 ? cameraMatrix2 : cameraMatrix3;
            Mat distCoeffs = c == 2 ? distCoeffs2 : distCoeffs3;
            Mat R, T, E, F;
            double err = stereoCalibrate(objpt, imgpt, imgpt_right, cameraMatrix1, distCoeffs1,
                                         cameraMatrix, distCoeffs,
                                         imageSize, R, T, E, F,
                                         TermCriteria(TermCriteria::COUNT, 30, 0),
                                         CV_CALIB_FIX_INTRINSIC);
            printf("Pair (1,%d) calibration reprojection error = %g\n", c, sqrt(err/(N*2)));
            if( c == 2 )
            {
                cameraMatrix2 = cameraMatrix;
                distCoeffs2 = distCoeffs;
                R12 = R; T12 = T;
            }
            else
            {
                R13 = R; T13 = T;
            }
        }
    
        return true;
    }
    
    static bool readStringList( const string& filename, vector<string>& l )
    {
        l.resize(0);
        FileStorage fs(filename, FileStorage::READ);
        if( !fs.isOpened() )
            return false;
        FileNode n = fs.getFirstTopLevelNode();
        if( n.type() != FileNode::SEQ )
            return false;
        FileNodeIterator it = n.begin(), it_end = n.end();
        for( ; it != it_end; ++it )
            l.push_back((string)*it);
        return true;
    }
    
    
    int main( int argc, char** argv )
    {
        int i, k;
        int flags = 0;
        Size boardSize, imageSize;
        float squareSize = 1.f, aspectRatio = 1.f;
        const char* outputFilename = "out_camera_data.yml";
        const char* inputFilename = 0;
    
        vector<vector<Point2f> > imgpt[3];
        vector<string> imageList;
    
        if(argc < 2)
        {
            help();
            return 1;
        }
    
    
        for( i = 1; i < argc; i++ )
        {
            const char* s = argv[i];
            if( strcmp( s, "-w" ) == 0 )
            {
                if( sscanf( argv[++i], "%u", &boardSize.width ) != 1 || boardSize.width <= 0 )
                    return fprintf( stderr, "Invalid board width\n" ), -1;
            }
            else if( strcmp( s, "-h" ) == 0 )
            {
                if( sscanf( argv[++i], "%u", &boardSize.height ) != 1 || boardSize.height <= 0 )
                    return fprintf( stderr, "Invalid board height\n" ), -1;
            }
            else if( strcmp( s, "-s" ) == 0 )
            {
                if( sscanf( argv[++i], "%f", &squareSize ) != 1 || squareSize <= 0 )
                    return fprintf( stderr, "Invalid board square width\n" ), -1;
            }
            else if( strcmp( s, "-a" ) == 0 )
            {
                if( sscanf( argv[++i], "%f", &aspectRatio ) != 1 || aspectRatio <= 0 )
                    return printf("Invalid aspect ratio\n" ), -1;
                flags |= CV_CALIB_FIX_ASPECT_RATIO;
            }
            else if( strcmp( s, "-zt" ) == 0 )
            {
                flags |= CV_CALIB_ZERO_TANGENT_DIST;
            }
            else if( strcmp( s, "-p" ) == 0 )
            {
                flags |= CV_CALIB_FIX_PRINCIPAL_POINT;
            }
            else if( strcmp( s, "-o" ) == 0 )
            {
                outputFilename = argv[++i];
            }
            else if( s[0] != '-' )
            {
                inputFilename = s;
            }
            else
                return fprintf( stderr, "Unknown option %s", s ), -1;
        }
    
        if( !inputFilename ||
           !readStringList(inputFilename, imageList) ||
           imageList.size() == 0 || imageList.size() % 3 != 0 )
        {
            printf("Error: the input image list is not specified, or can not be read, or the number of files is not divisible by 3\n");
            return -1;
        }
    
        Mat view, viewGray;
        Mat cameraMatrix[3], distCoeffs[3], R[3], P[3], R12, T12;
        for( k = 0; k < 3; k++ )
        {
            cameraMatrix[k] = Mat_<double>::eye(3,3);
            cameraMatrix[k].at<double>(0,0) = aspectRatio;
            cameraMatrix[k].at<double>(1,1) = 1;
            distCoeffs[k] = Mat_<double>::zeros(5,1);
        }
        Mat R13=Mat_<double>::eye(3,3), T13=Mat_<double>::zeros(3,1);
    
        FileStorage fs;
        namedWindow( "Image View", 0 );
    
        for( k = 0; k < 3; k++ )
            imgpt[k].resize(imageList.size()/3);
    
        for( i = 0; i < (int)(imageList.size()/3); i++ )
        {
            for( k = 0; k < 3; k++ )
            {
                int k1 = k == 0 ? 2 : k == 1 ? 0 : 1;
                printf("%s\n", imageList[i*3+k].c_str());
                view = imread(imageList[i*3+k], 1);
    
                if(view.data)
                {
                    vector<Point2f> ptvec;
                    imageSize = view.size();
                    cvtColor(view, viewGray, COLOR_BGR2GRAY);
                    bool found = findChessboardCorners( view, boardSize, ptvec, CV_CALIB_CB_ADAPTIVE_THRESH );
    
                    drawChessboardCorners( view, boardSize, Mat(ptvec), found );
                    if( found )
                    {
                        imgpt[k1][i].resize(ptvec.size());
                        std::copy(ptvec.begin(), ptvec.end(), imgpt[k1][i].begin());
                    }
                    //imshow("view", view);
                    //int c = waitKey(0) & 255;
                    //if( c == 27 || c == 'q' || c == 'Q' )
                    //    return -1;
                }
            }
        }
    
        printf("Running calibration ...\n");
    
        run3Calibration(imgpt[0], imgpt[1], imgpt[2], imageSize,
                        boardSize, squareSize, aspectRatio, flags|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5,
                        cameraMatrix[0], distCoeffs[0],
                        cameraMatrix[1], distCoeffs[1],
                        cameraMatrix[2], distCoeffs[2],
                        R12, T12, R13, T13);
    
        fs.open(outputFilename, CV_STORAGE_WRITE);
    
        fs << "cameraMatrix1" << cameraMatrix[0];
        fs << "cameraMatrix2" << cameraMatrix[1];
        fs << "cameraMatrix3" << cameraMatrix[2];
    
        fs << "distCoeffs1" << distCoeffs[0];
        fs << "distCoeffs2" << distCoeffs[1];
        fs << "distCoeffs3" << distCoeffs[2];
    
        fs << "R12" << R12;
        fs << "T12" << T12;
        fs << "R13" << R13;
        fs << "T13" << T13;
    
        fs << "imageWidth" << imageSize.width;
        fs << "imageHeight" << imageSize.height;
    
        Mat Q;
    
        // step 3: find rectification transforms
        double ratio = rectify3Collinear(cameraMatrix[0], distCoeffs[0], cameraMatrix[1],
                 distCoeffs[1], cameraMatrix[2], distCoeffs[2],
                 imgpt[0], imgpt[2],
                 imageSize, R12, T12, R13, T13,
                 R[0], R[1], R[2], P[0], P[1], P[2], Q, -1.,
                 imageSize, 0, 0, CV_CALIB_ZERO_DISPARITY);
        Mat map1[3], map2[3];
    
        fs << "R1" << R[0];
        fs << "R2" << R[1];
        fs << "R3" << R[2];
    
        fs << "P1" << P[0];
        fs << "P2" << P[1];
        fs << "P3" << P[2];
    
        fs << "disparityRatio" << ratio;
        fs.release();
    
        printf("Disparity ratio = %g\n", ratio);
    
        for( k = 0; k < 3; k++ )
            initUndistortRectifyMap(cameraMatrix[k], distCoeffs[k], R[k], P[k], imageSize, CV_16SC2, map1[k], map2[k]);
    
        Mat canvas(imageSize.height, imageSize.width*3, CV_8UC3), small_canvas;
        destroyWindow("view");
        canvas = Scalar::all(0);
    
        for( i = 0; i < (int)(imageList.size()/3); i++ )
        {
            canvas = Scalar::all(0);
            for( k = 0; k < 3; k++ )
            {
                int k1 = k == 0 ? 2 : k == 1 ? 0 : 1;
                int k2 = k == 0 ? 1 : k == 1 ? 0 : 2;
                view = imread(imageList[i*3+k], 1);
    
                if(!view.data)
                    continue;
    
                Mat rview = canvas.colRange(k2*imageSize.width, (k2+1)*imageSize.width);
                remap(view, rview, map1[k1], map2[k1], CV_INTER_LINEAR);
            }
            printf("%s %s %s\n", imageList[i*3].c_str(), imageList[i*3+1].c_str(), imageList[i*3+2].c_str());
            resize( canvas, small_canvas, Size(1500, 1500/3) );
            for( k = 0; k < small_canvas.rows; k += 16 )
                line(small_canvas, Point(0, k), Point(small_canvas.cols, k), Scalar(0,255,0), 1);
            imshow("rectified", small_canvas);
            int c = waitKey(0);
            if( c == 27 || c == 'q' || c == 'Q' )
                break;
        }
    
        return 0;
    }
    




    展开全文
  • opencv相机标定程序

    2018-05-04 22:05:47
    opencv相机标定opencv相机标定opencv相机标定opencv相机标定
  • OpenCV 相机标定

    2021-05-21 22:16:15
    OpenCV相机标定一、相机与小孔成像相机模型二、1.2.读入数据总结 一、相机与小孔成像相机模型 现代科技加持下的相机已经成为制造精密设计巧妙的消费品,相机的光学结构也比诞生之初复杂了许多 典型单反相机光学...


    一、相机与针孔相机模型

    1.相机模型

    现代科技加持下的相机已经成为制造精密设计巧妙的消费品,相机的光学结构也比诞生之初复杂了许多
    典型单反相机光学结构:
    单反相机结构
    在众多相机模型中,针孔相机又称投影相机模型是相对简单而常用的模型。简单的说,针孔相机模型就是把相机简化成单纯的小孔成像,可想而知,这种简化对于精度要求高的情况或者特殊镜头的相机是不适用的。
    小孔成像原理:
    小孔成像

    2.引入透镜

    单纯的小孔成像模型中没有考虑镜头,现实条件下,由一片或多片透镜组成的镜头才能让利用了小孔成像原理的相机成像清晰的同时保持画面亮度。所以我们需要向模型引入透镜。
    透镜成像原理:
    在这里插入图片描述
    但是,新的问题也随之而来:虚焦、畸变
    一般我们称之为径向畸变,即光线在院里透镜中的地方比靠近中心的地方更加弯曲。径向畸变又分为中短焦距、近距离的桶形畸变和长焦距、远距离会出现的枕形畸变。


    二、相机参数

    1.坐标系约定

    我们约定三个坐标系
    1、世界坐标系矩阵:X

    2、摄像机坐标系:Xc,

    3、图像(像素)坐标系:x

    4、相机矩阵:P

    2.像平面到像素平面的投影

    将三维空间中一点,过该点取一平面与像素平面平行,该平面就是像平面。设该三位点P,齐次坐标为X。投影为图像点P’,平面坐标x。
    针孔相机模型:
    在这里插入图片描述
    在针孔相机模型中,像素坐标和像坐标之间的关系:
    λx = PX
    其中,λ是三位点的逆深度。P为相机矩阵,可以分解为:
    P = R[K|t]
    R 是描述照相机方向的旋转矩阵,t 是描述照相机中心位置的三维平移向量,内标定矩阵K 描述照相机的投影性质。标定矩阵仅和照相机自身的情况有关,通常可以写成:
    在这里插入图片描述
    焦距f是像在平面到像素平面中心的距离。s是倾斜参数,α是纵横比例参数。
    在像素数组在传感器上没有偏斜且像素是正方形的时候,可以设 s = 0,α = 1。标定矩阵可以简化为:
    在这里插入图片描述

    三、相机标定

    实验图片如下:
    实验图片

    代码如下:

    import cv2
    import numpy as np
    import glob
    
    # 找棋盘格角点
    # 阈值
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    #棋盘格模板规格
    w = 7   #内角点个数,内角点是和其他格子连着的点
    h = 7
    
    # 世界坐标系中的棋盘格点,例如(0,0,0), (1,0,0), (2,0,0) ....,(8,5,0),去掉Z坐标,记为二维矩阵
    objp = np.zeros((w*h,3), np.float32)
    objp[:,:2] = np.mgrid[0:w,0:h].T.reshape(-1,2)
    # 储存棋盘格角点的世界坐标和图像坐标对
    objpoints = [] # 在世界坐标系中的三维点
    imgpoints = [] # 在图像平面的二维点
    
    images = glob.glob('picture/*.jpg')
    for fname in images:
        img = cv2.imread(fname)
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        # 找到棋盘格角点
        # 棋盘图像(8位灰度或彩色图像)  棋盘尺寸  存放角点的位置
        ret, corners = cv2.findChessboardCorners(gray, (w,h),None)
        # 如果找到足够点对,将其存储起来
        if ret == True:
            # 角点精确检测
            # 输入图像 角点初始坐标 搜索窗口为2*winsize+1 死区 求角点的迭代终止条件
            cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
            objpoints.append(objp)
            imgpoints.append(corners)
            # 将角点在图像上显示
            cv2.drawChessboardCorners(img, (w,h), corners, ret)
            cv2.imshow('findCorners',img)
            cv2.waitKey(1000)
    cv2.destroyAllWindows()
    #标定、去畸变
    # 输入:世界坐标系里的位置 像素坐标 图像的像素尺寸大小 3*3矩阵,相机内参数矩阵 畸变矩阵
    # 输出:标定结果 相机的内参数矩阵 畸变系数 旋转矩阵 平移向量
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
    # mtx:内参数矩阵
    # dist:畸变系数
    # rvecs:旋转向量 (外参数)
    # tvecs :平移向量 (外参数)
    print (("ret:"),ret)
    print (("mtx:\n"),mtx)        # 内参数矩阵
    print (("dist:\n"),dist)      # 畸变系数   distortion cofficients = (k_1,k_2,p_1,p_2,k_3)
    print (("rvecs:\n"),rvecs)    # 旋转向量  # 外参数
    print (("tvecs:\n"),tvecs)    # 平移向量  # 外参数
    # 去畸变
    img2 = cv2.imread('picture/6.jpg')
    h,w = img2.shape[:2]
    # 我们已经得到了相机内参和畸变系数,在将图像去畸变之前,
    # 我们还可以使用cv.getOptimalNewCameraMatrix()优化内参数和畸变系数,
    # 通过设定自由自由比例因子alpha。当alpha设为0的时候,
    # 将会返回一个剪裁过的将去畸变后不想要的像素去掉的内参数和畸变系数;
    # 当alpha设为1的时候,将会返回一个包含额外黑色像素点的内参数和畸变系数,并返回一个ROI用于将其剪裁掉
    newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),0,(w,h)) # 自由比例参数
    
    dst = cv2.undistort(img2, mtx, dist, None, newcameramtx)
    # 根据前面ROI区域裁剪图片
    x,y,w,h = roi
    dst = dst[y:y+h, x:x+w]
    cv2.imwrite('calibresult.jpg',dst)
    
    # 反投影误差
    # 通过反投影误差,我们可以来评估结果的好坏。越接近0,说明结果越理想。
    # 通过之前计算的内参数矩阵、畸变系数、旋转矩阵和平移向量,使用cv2.projectPoints()计算三维点到二维图像的投影,
    # 然后计算反投影得到的点与图像上检测到的点的误差,最后计算一个对于所有标定图像的平均误差,这个值就是反投影误差。
    total_error = 0
    for i in range(len(objpoints)):
        imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
        error = cv2.norm(imgpoints[i],imgpoints2, cv2.NORM_L2)/len(imgpoints2)
        total_error += error
    print (("total error: "), total_error/len(objpoints))
    

    展开全文
  • 聊一聊OpenCV相机标定

    2020-08-28 07:25:14
    主要为大家详细介绍了OpenCV相机标定的相关资料,即获得相机参数的过程,具有一定的参考价值,感兴趣的小伙伴们可以参考一下
  • OpenCv相机标定——圆形标定板标定

    万次阅读 多人点赞 2019-03-27 14:56:55
    Opencv相机标定之圆形标定板标定——本文主要介绍了OpenCv中圆形标定板的标定,并将标定结果与Halcon标定进行比较分析,得出OpenCv标定算法对图像品质的鲁棒性较高,标定精度较高。同时也从侧面反映出使用Halcon进行...

    OpenCv相机标定——圆形标定板标定

    0.前言

      OpenCv中,相机标定所使用的标定图案分为棋盘格、对称圆形及非对称圆形特征图、ArUco板和ChArUco板等。在OpenCV的官方例程中,采用的是棋盘格图案,因为其操作简单、快速,标定精度满足一般应用场景的需求。对于标定精度要求高的场景,则一般采用圆形标定图案。本文主要介绍如何使用圆形标定图案(对称和非对称)完成相机的标定,并将OpenCv标定结果与Halcon标定结果进行对比分析。

    1.标定图案

      OpenCv中使用的圆形标定图案如图1所示:
    在这里插入图片描述
    OpenCv中,使用圆形标定图案用到的函数为 cv::findCirclesGrid()。函数原型如下:
     bool cv::findCirclesGrid(//找到圆心坐标返回True
         cv::InputArray,//输入标定图像,8位单通道或三通道
         cv::Size patternSize,//标定图案的尺寸
         cv::OutputArray centers,//输出数组,为检测到的圆心坐标
         int flags,//标志位,对称图案——cv::CALIB_CB_SYMMETRIC_GRID,非对称图案——  cv::CALIB_CB_ASYMMETRIC_GRID
         const cv::Ptrcv::FeatureDetector&blobDetector=new SimpleBlobDetector()
    );
      图1所示的非对称圆形标定图案,其width=11,height=6。在计算标定图案上标志点圆心的世界坐标时,参数squareSize即为图1中标注的圆心距。关于圆的半径大小,可以自行设定,因为在提取圆心坐标时不涉及圆的半径(这点和halcon标定不同,halcon在进行相机标定时,圆的半径作为标定文件中的已知参数)。圆心距一般取圆直径的4倍左右。
      图2为本文使用的标定板,其为高精度铝制标定板,精度为±0.01mm,是200x200mm的halcon标准标定板,圆的直径为12.5mm,圆心距为25mm。
    在这里插入图片描述

    2.OpenCv标定

      本文采用的标定为离线标定,先由相机采集N幅图像,再由标定程序读取图像。为了保证标定精度,建议采集10幅或更多的视图,尽量使得标定板的移动范围覆盖相机视野。
      在OpenCv官方相机标定代码的基础上进行了修改,得到了下面的对圆形标定图案标定的代码。由于代码近500行,为了缩短篇幅,省略的一些头文件、说明性文字、函数的实现。省略部分可参考:OpenCv/sources/samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp.

    #include "stdafx.h"
    //此处省略各种头文件
    using namespace cv;
    using namespace std;
    //此处省略help()函数
    enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 };
    enum Pattern { CHESSBOARD, CIRCLES_GRID, ASYMMETRIC_CIRCLES_GRID };
    
    //计算重投影误差函数
    static double computeReprojectionErrors(
    	const vector<vector<Point3f> >& objectPoints,
    	const vector<vector<Point2f> >& imagePoints,
    	const vector<Mat>& rvecs, const vector<Mat>& tvecs,
    	const Mat& cameraMatrix, const Mat& distCoeffs,
    	vector<float>& perViewErrors)
    {
    	//此处省略...
    }
    
    static void calcChessboardCorners(Size boardSize, float squareSize, vector<Point3f>& corners, Pattern patternType = CIRCLES_GRID)
    {
    	//省略...
    	//本文中用到的标定板,在该函数中的参数为:boardSize.width=7,boardSize.height=7,squareSize=0.025(此处单位为米)
    }
    //执行标定,包括计算重投影误差
    static bool runCalibration(vector<vector<Point2f> > imagePoints,
    	Size imageSize, Size boardSize, Pattern patternType,
    	float squareSize, float aspectRatio,
    	int flags, Mat& cameraMatrix, Mat& distCoeffs,
    	vector<Mat>& rvecs, vector<Mat>& tvecs,
    	vector<float>& reprojErrs,
    	double& totalAvgErr)
    {
    	//省略...
    }
    
    //保存相机参数
    static void saveCameraParams(const string& filename,
    	Size imageSize, Size boardSize,
    	float squareSize, float aspectRatio, int flags,
    	const Mat& cameraMatrix, const Mat& distCoeffs,
    	const vector<Mat>& rvecs, const vector<Mat>& tvecs,
    	const vector<float>& reprojErrs,
    	const vector<vector<Point2f> >& imagePoints,
    	double totalAvgErr)
    {
      //省略...
    }
    
    //读取字符串
    static bool readStringList(const string& filename, vector<string>& l)
    {
    	l.resize(0);
    	FileStorage fs(filename, FileStorage::READ);
    	if (!fs.isOpened())
    		return false;
    	FileNode n = fs["images"];
    	if (n.type() != FileNode::SEQ)
    		return false;
    	FileNodeIterator it = n.begin(), it_end = n.end();
    	for (; it != it_end; ++it)
    		l.push_back((string)*it);
    	return true;
    }
    
    //运行并保存
    static bool runAndSave(const string& outputFilename,
    	const vector<vector<Point2f> >& imagePoints,
    	Size imageSize, Size boardSize, Pattern patternType, float squareSize,
    	float aspectRatio, int flags, Mat& cameraMatrix,
    	Mat& distCoeffs, bool writeExtrinsics, bool writePoints)
    {
    	//省略...
    }
    
    
    int main(int argc, char** argv)
    {
    	cout << argc << endl;
    	for (size_t i = 0; i < argc; i++)
    	{
    		cout << argv[i] << endl;
    	}
    	
    	Size boardSize, imageSize;
    	float squareSize, aspectRatio;
    	Mat cameraMatrix, distCoeffs;
    	string outputFilename;
    	string inputFilename = "";
    
    	int i, nframes;
    	bool writeExtrinsics, writePoints;
    	bool undistortImage = false;
    	int flags = 0;
    	VideoCapture capture;
    	bool flipVertical;
    	bool showUndistorted;
    	bool videofile;
    	int delay;
    	clock_t prevTimestamp = 0;
    	int mode = DETECTION;
    	int cameraId = 0;
    	vector<vector<Point2f> > imagePoints;
    	vector<string> imageList;
    	Pattern pattern = CIRCLES_GRID;//标定图案类型,对称圆形图案
    
    	cv::CommandLineParser parser(argc, argv,
    		"{help ||}{w|7|}{h|7|}{pt|circles|}{n|30|}{d|1000|}{s|0.025|}{o|D:/opencv/cameracalibration/out_camera_params_25x25_circleboard.yml|}"
    		"{op|D:/opencv/cameracalibration/Detected_feature_points.yml|}{oe|D:/opencv/cameracalibration/Extrinsic_parameters_circleboard.yml|}{zt||}{a|1|}{p||}{v||}{V||}{su||}"
    		"{input_data|D:/opencv/cameracalibration/VID25x25_CircleGrid.xml|}");
    		//命令行参数赋值,参数说明:w,h为标定板宽,高; pt为标定图案类型; n为读取图片的张数; d为相机在线抓图的时间间隔(ms)(本代码
    		//为离线标定,该参数可以不设置); o为程序输出的相机内参、外参文件(自定义的文件); op为输出检测到特征点的文件(自定义的文件); 
    		//oe为输出的相机外参数(这里可以不用设置,因为外参数已经在o中输出了,标定完后该文件为空文件); a为比例系数,默认为1; 
    		//input_data为存放图片路径的xml文件,本代码读取的VID25X25_CircleGrid.xml文件内容见图3。
    	if (parser.has("help"))
    	{
    		help();
    		return 0;
    	}
    	boardSize.width = parser.get<int>("w");
    	boardSize.height = parser.get<int>("h");
    	if (parser.has("pt"))
    	{
    		string val = parser.get<string>("pt");
    		if (val == "circles")
    			pattern = CIRCLES_GRID;
    		else if (val == "acircles")
    			pattern = ASYMMETRIC_CIRCLES_GRID;
    		else if (val == "chessboard")
    			pattern = CHESSBOARD;
    		else
    			return fprintf(stderr, "Invalid pattern type: must be chessboard or circles\n"), -1;
    	}
    	squareSize = parser.get<float>("s");
    	nframes = parser.get<int>("n");
    	aspectRatio = parser.get<float>("a");
    	delay = parser.get<int>("d");
    	writePoints = parser.has("op");
    	writeExtrinsics = parser.has("oe");
    	if (parser.has("a"))
    		flags |= CALIB_FIX_ASPECT_RATIO;
    	if (parser.has("zt"))
    		flags |= CALIB_ZERO_TANGENT_DIST;
    	if (parser.has("p"))
    		flags |= CALIB_FIX_PRINCIPAL_POINT;
    	flipVertical = parser.has("v");
    	videofile = parser.has("V");
    	if (parser.has("o"))
    		outputFilename = parser.get<string>("o");
    	showUndistorted = parser.has("su");
    	if (isdigit(parser.get<string>("input_data")[0]))
    		cameraId = parser.get<int>("input_data");
    	else
    		inputFilename = parser.get<string>("input_data");
    		
    	if (!parser.check())
    	{
    		help();
    		parser.printErrors();
    		return -1;
    	}
    	if (squareSize <= 0)
    		return fprintf(stderr, "Invalid board square width\n"), -1;
    	if (nframes <= 3)
    		return printf("Invalid number of images\n"), -1;
    	if (aspectRatio <= 0)
    		return printf("Invalid aspect ratio\n"), -1;
    	if (delay <= 0)
    		return printf("Invalid delay\n"), -1;
    	if (boardSize.width <= 0)
    		return fprintf(stderr, "Invalid board width\n"), -1;
    	if (boardSize.height <= 0)
    		return fprintf(stderr, "Invalid board height\n"), -1;
    
    	if (!inputFilename.empty())
    	{
    		if (!videofile && readStringList(inputFilename, imageList))
    			mode = CAPTURING;
    		else
    			capture.open(inputFilename);
    	}
    	else
    		capture.open(cameraId);
    
    	if (!capture.isOpened() && imageList.empty())
    		return fprintf(stderr, "Could not initialize video (%d) capture\n", cameraId), -2;
    
    	if (!imageList.empty())
    		nframes = (int)imageList.size();
    
    	if (capture.isOpened())
    		printf("%s", liveCaptureHelp);
    
    	namedWindow("Image View", 1);
    
    	for (i = 0;; i++)
    	{
    		Mat view, viewGray;
    		bool blink = false;
    
    		if (capture.isOpened())
    		{
    			Mat view0;
    			capture >> view0;
    			view0.copyTo(view);
    		}
    		else if (i < (int)imageList.size())
    			view = imread(imageList[i], 1);
    
    		if (view.empty())
    		{
    			if (imagePoints.size() > 0)
    				runAndSave(outputFilename, imagePoints, imageSize,
    					boardSize, pattern, squareSize, aspectRatio,
    					flags, cameraMatrix, distCoeffs,
    					writeExtrinsics, writePoints);
    			break;
    		}
    
    		imageSize = view.size();
    
    		if (flipVertical)
    			flip(view, view, 0);
    
    		vector<Point2f> pointbuf;
    		cvtColor(view, viewGray, COLOR_BGR2GRAY);
    
    		bool found;
    		switch (pattern)
    		{
    		case CHESSBOARD:
    			found = findChessboardCorners(view, boardSize, pointbuf,
    				CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_FAST_CHECK | CALIB_CB_NORMALIZE_IMAGE);
    			break;
    		case CIRCLES_GRID:
    			found = findCirclesGrid(view, boardSize, pointbuf,CALIB_CB_SYMMETRIC_GRID);
    			break;
    		case ASYMMETRIC_CIRCLES_GRID:
    			found = findCirclesGrid(view, boardSize, pointbuf, CALIB_CB_ASYMMETRIC_GRID);
    			break;
    		default:
    			return fprintf(stderr, "Unknown pattern type\n"), -1;
    		}
    		if (found)
    			drawChessboardCorners(view, boardSize, Mat(pointbuf), found);//在原图中绘制找到的圆心点,图4为其中的一幅图
    
    		string msg = mode == CAPTURING ? "100/100" :
    			mode == CALIBRATED ? "Calibrated" : "Press 'g' to start";
    		int baseLine = 0;
    		Size textSize = getTextSize(msg, 1, 1, 1, &baseLine);
    		Point textOrigin(view.cols - 2 * textSize.width - 10, view.rows - 2 * baseLine - 10);
    
    		if (mode == CAPTURING)
    		{
    			if (undistortImage)
    				msg = format("%d/%d Undist", (int)imagePoints.size(), nframes);
    			else
    				msg = format("%d/%d", (int)imagePoints.size(), nframes);
    		}
    
    		putText(view, msg, textOrigin, 1, 1,
    			mode != CALIBRATED ? Scalar(0, 0, 255) : Scalar(0, 255, 0));
    
    		if (blink)
    			bitwise_not(view, view);
    
    		if (mode == CALIBRATED && undistortImage)
    		{
    			Mat temp = view.clone();
    			undistort(temp, view, cameraMatrix, distCoeffs);
    		}
    
    		imshow("Image View", view);
    		char key = (char)waitKey(capture.isOpened() ? 50 : 500);
    
    		if (key == 27)
    			break;
    
    		if (key == 'u' && mode == CALIBRATED)
    			undistortImage = !undistortImage;
    
    		if (capture.isOpened() && key == 'g')
    		{
    			mode = CAPTURING;
    			imagePoints.clear();
    		}
    
    		if (mode == CAPTURING && imagePoints.size() >= (unsigned)nframes)
    		{
    			if (runAndSave(outputFilename, imagePoints, imageSize,
    				boardSize, pattern, squareSize, aspectRatio,
    				flags, cameraMatrix, distCoeffs,
    				writeExtrinsics, writePoints))
    				mode = CALIBRATED;
    			else
    				mode = DETECTION;
    			if (!capture.isOpened())
    				break;
    		}
    	}
    
    	if (!capture.isOpened() && showUndistorted)
    	{
    		Mat view, rview, map1, map2;
    		initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(),
    			getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0),
    			imageSize, CV_16SC2, map1, map2);
    
    		for (i = 0; i < (int)imageList.size(); i++)
    		{
    			view = imread(imageList[i], 1);
    			if (view.empty())
    				continue;
    			//undistort( view, rview, cameraMatrix, distCoeffs, cameraMatrix );
    			remap(view, rview, map1, map2, INTER_LINEAR);
    			imshow("Image View", rview);
    			char c = (char)waitKey();
    			if (c == 27 || c == 'q' || c == 'Q')
    				break;
    		}
    	}
    
    	return 0;
    }
    

    在这里插入图片描述
    在这里插入图片描述

    3.标定结果分析

      OpenCv标定得到的相机参数矩阵为:
    在这里插入图片描述
      本次标定使用的镜头焦距 f=8mm, 像元尺寸为3.45μm,图像尺寸为2040x1200。
      Halcon标定得到的内参为(k,sx,sy,cx,cy)将其转换为式(1)中的矩阵。表1为OpenCv和Halcon标定的对比数据。
    在这里插入图片描述
      本实验中,镜头与世界坐标系z=0平面的距离为112cm左右。从表中可以看出,OpenCv标定的重投影误差为0.01759,精度较高,小于Halcon标定的0.069。(OpenCv标定过程中采用了5项畸变系数k1,k2,p1,p2,k3;Halcon标定中只考虑径向畸变k,表中没有列出)
      需要指出的是,实验数据来源于对同一组图片的标定。Halcon中对相机的标定,采用的方法是Tsai两步标定法,需要预先给出相机的内参数,理论上具有较高的标定精度。但是在本次的Halcon标定中,由于采用的是离线采集的图片,在标定过程中提示图片过曝、旋转角度没有覆盖全、标定图案偏小、光照不均匀等图像品质问题,因此标定的精度不高。如果使用halcon在线抓图标定,可以有效避免图像品质问题,从而大幅度提高标定精度,预计标定精度和OpenCv标定相当或者更高。标定结果表明,OpenCv标定算法的鲁棒性更好,而Halcon标定算法对采集到的图像品质要求较高,也可以理解为高精度标定下对图像品质的高要求。


    ps:如有错误,谢谢指出。转载请注明出处。

    展开全文
  • 基于opencv平台编写的单目相机标定程序,使用的张正友的方法,只要准备十二张单目相机拍摄的棋盘格的图片就可以标定出单目相机的焦距等参数,建立单目相机坐标系。可以用于机器视觉测量中,也可以用于双面相机标定的...
  • 主要介绍了Python opencv相机标定实现原理及步骤详解,文中通过示例代码介绍的非常详细,对大家的学习或者工作具有一定的参考学习价值,需要的朋友可以参考下

空空如也

空空如也

1 2 3 4 5 ... 20
收藏数 6,684
精华内容 2,673
关键字:

opencv相机标定