• 显示图像#include "StdAfx.h" #include #include #include #include using namespace cv; using namespace std; int main() { string imageName = “lena.jpg”; //读入图像 Mat img = i

    显示图像

    #include "StdAfx.h"
    #include <string>
    #include <iostream>
    #include <opencv2\core\core.hpp>
    #include <opencv2\highgui\highgui.hpp>
     
    using namespace cv;
    using namespace std;
     
    int main()
    {
             string imageName = “lena.jpg”;
     
             //读入图像
             Mat img = imread(imageName, CV_LOAD_IMAGE_COLOR);
     
             //如果读入图像失败
             if (img.empty())
             {
                       cout<<”Could not open or find the image!”<<endl;
                       return -1;
             }
     
             //创建窗口
             namedWindow(“lena”, CV_WINDOW_AUTOSIZE);
    
             //显示图像
             imshow(“lena”, img);
     
             //等待按键,按键盘任意键返回
             waitKey();
    
             return 0;
    }

    加载-RGB转灰度图-保存

    #include “StdAfx.h”
    #include <cv.h>
    #include <highgui.h>
    #include <string>
     
    using namespace cv;
    using namespace std;
     
    int main()
    {
             char* imageName = “lena.jpg”;
             Mat image = imread(imageName, 1);
     
             if (!image.data)
             {
                       cout<<”Could not open or find the image!”<<endl;
                       return -1;
             }
     
             Mat gray_image;
             String grayImageName = “lena_gray”;
     
             cvtColor(image,gray_image,CV_RGB2GRAY);//将RGB图像转换成灰度图像
             imwrite(“../../lena_gray.jpg”,gray_image);//保存图像
     
             namedWindow(imageName, CV_WINDOW_AUTOSIZE);//创建用于显示元图像窗口
             namedWindow(grayImageName,CV_WINDOW_AUTOSIZE);//创建用于显示转换后图像窗口
     
             imshow(imageName,image);
             imshow(“grayImageName”, gray_image);
     
            waitKey(0);
            return 0;
    }
    
    
    <h1>膨胀操作示例</h1><pre name="code" class="cpp">#include <opencv2/core/core.hpp>
    #include<opencv2/highgui/highgui.hpp>
    #include<opencv2/imgproc/imgproc.hpp>
    #include <iostream>
    
    using namespace std;
    using namespace cv;
     
    int main(  )
    {
     
      //载入原图 
      Mat image = imread("1.jpg");
     
      //创建窗口 
      namedWindow("原图-膨胀操作");
      namedWindow("效果图-膨胀操作");
     
      //显示原图
      imshow("原图-膨胀操作", image);
     
      //获取自定义核
      Mat element = getStructuringElement(MORPH_RECT, Size(15, 15));
      Mat out;
      //进行膨胀操作
      dilate(image,out, element);
     
      //显示效果图
      imshow("效果图-膨胀操作", out);
     
      waitKey(0);
     
      return 0;
    }
    

    
    
    
    

    腐蚀操作示例

    #include <opencv2/core/core.hpp>

    #include<opencv2/highgui/highgui.hpp>
    #include<opencv2/imgproc/imgproc.hpp>
    #include <iostream>
    
    using namespace std;
    using namespace cv;
    
    int main(  )
    {
      //载入原图 
      Matimage = imread("1.jpg");
     
       //创建窗口 
      namedWindow("原图-腐蚀操作");
      namedWindow("效果图-腐蚀操作");
     
      //显示原图
      imshow("原图-腐蚀操作", image);
     
       
      //获取自定义核
      Mat element = getStructuringElement(MORPH_RECT, Size(15, 15));
      Mat out;
     
      //进行腐蚀操作
      erode(image,out, element);
     
      //显示效果图
      imshow("效果图-腐蚀操作", out);
     
      waitKey(0);
     
      return 0;
    }

    膨胀与腐蚀综合示例

    #include <opencv2/opencv.hpp>
    #include <opencv2/highgui/highgui.hpp>
    #include<opencv2/imgproc/imgproc.hpp>
    #include <iostream>
     
    using namespace std;
    using namespace cv;
     
    Mat g_srcImage, g_dstImage;//原始图和效果图
    int g_nTrackbarNumer = 0;//0表示腐蚀erode, 1表示膨胀dilate
    int g_nStructElementSize = 3; //结构元素(内核矩阵)的尺寸
     
    void Process();//膨胀和腐蚀的处理函数
    void on_TrackbarNumChange(int, void *);//回调函数
    void on_ElementSizeChange(int, void *);//回调函数
     
    int main( )
    {
      //改变console字体颜色
      system("color5E"); 
     
      //载入原图
      g_srcImage= imread("1.jpg");
      if(!g_srcImage.data ) { printf("Oh,no,读取srcImage错误~!\n"); return false; }
          
      //显示原始图
      namedWindow("原始图");
      imshow("原始图", g_srcImage);
          
      //进行初次腐蚀操作并显示效果图
      namedWindow("效果图");
      //获取自定义核
      Matelement = getStructuringElement(MORPH_RECT, Size(2*g_nStructElementSize
    
    +1,2*g_nStructElementSize+1),Point( g_nStructElementSize, g_nStructElementSize ));
      erode(g_srcImage,g_dstImage, element);
      imshow("效果图", g_dstImage);
     
      //创建轨迹条
      createTrackbar("腐蚀/膨胀", "效果图", &g_nTrackbarNumer, 1, on_TrackbarNumChange);
      createTrackbar("内核尺寸", "效果图",&g_nStructElementSize, 21, on_ElementSizeChange);
     
      //输出一些帮助信息
      cout<<endl<<"\t嗯。运行成功,请调整滚动条观察图像效果~\n\n"
        <<"\t按下“q”键时,程序退出~!\n"
        <<"\n\n\t\t\t\tby毛毛";
     
      //轮询获取按键信息,若下q键,程序退出
      while(char(waitKey(1))!= 'q') {}
     
      return 0;
    }
     
    
    //进行自定义的腐蚀和膨胀操作
    void Process()
    {
      //获取自定义核
      Mat element = getStructuringElement(MORPH_RECT, Size(2*g_nStructElementSize
    
    +1,2*g_nStructElementSize+1),Point( g_nStructElementSize, g_nStructElementSize ));
     
      //进行腐蚀或膨胀操作
      if(g_nTrackbarNumer== 0) {   
        erode(g_srcImage,g_dstImage, element);
      }
      else{
        dilate(g_srcImage,g_dstImage, element);
      }
     
      //显示效果图
      imshow("效果图", g_dstImage);
    }
     
     
    //腐蚀和膨胀之间切换开关的回调函数
    void on_TrackbarNumChange(int, void *)
    {
      //腐蚀和膨胀之间效果已经切换,回调函数体内需调用一次Process函数,使改变后的效果立即生效并
    
    显示出来
      Process();
    }
     
    //腐蚀和膨胀操作内核改变时的回调函数
    void on_ElementSizeChange(int, void *)
    {
      //内核尺寸已改变,回调函数体内需调用一次Process函数,使改变后的效果立即生效并显示出来
      Process();
    }

    膨胀与腐蚀综合示例2

    #include "cv.h" 
    #include "highgui.h"
    #include "opencv2/imgproc/imgproc.hpp"
    
    using namespace std;
    using namespace cv;
    
    #define TYPE_MORPH_RECT      (0)
    #define TYPE_MORPH_CROSS     (1)
    #define TYPE_MORPH_ELLIPSE   (2)
    
    #define MAX_ELE_TYPE         (2)
    #define MAX_ELE_SIZE         (20)
    
    Mat src, erode_dst, dilate_dst;
    
    const char *erode_wn  = "eroding demo";
    const char *dilate_wn = "dilating demo";
    
    int erode_ele_type;
    int dilate_ele_type;
    int erode_ele_size;
    int dilate_ele_size;
    
    static void Erosion(int, void *);
    static void Dilation(int, void *);
    
    /*
     * @brief   
     * @inputs  
     * @outputs 
     * @retval  
     */
    int main(int argc, char *argv[])
    {
        if (argc < 2) {
            cout<<"Usage: ./eroding_and_dilating [file name]"<<endl;
            return -1;
        }
    
        src = imread(argv[1]);
        if (!src.data) {
            cout<<"Read image failure."<<endl;
            return -1;
        }
    
        // Windows
        namedWindow(erode_wn, WINDOW_AUTOSIZE);
        namedWindow(dilate_wn, WINDOW_AUTOSIZE);
    
        // Track Bar for Erosion
        createTrackbar("Element Type\n0:Rect\n1:Cross\n2:Ellipse", erode_wn, 
                &erode_ele_type, MAX_ELE_TYPE, Erosion);  // callback @Erosion
        createTrackbar("Element Size: 2n+1", erode_wn, 
                &erode_ele_size, MAX_ELE_SIZE, Erosion);
    
        // Track Bar for Dilation
        createTrackbar("Element Type\n0:Rect\n1:Cross\n2:Ellipse", dilate_wn, 
                &dilate_ele_type, MAX_ELE_TYPE, Dilation);  // callback @Erosion
        createTrackbar("Element Size: 2n+1", dilate_wn, 
                &dilate_ele_size, MAX_ELE_SIZE, Dilation);
    
        // Default start
        Erosion(0, 0);
        Dilation(0, 0);
    
        waitKey(0);
        return 0;
    }
    
    
    /*
     * @brief   腐蚀操作的回调函数
     * @inputs  
     * @outputs 
     * @retval  
     */
    static void Erosion(int, void *)
    {
        int erode_type;
    
        switch (erode_ele_type) {
        case TYPE_MORPH_RECT:
           erode_type = MORPH_RECT; 
           break;
        case TYPE_MORPH_CROSS:
           erode_type = MORPH_CROSS;
           break;
        case TYPE_MORPH_ELLIPSE:
           erode_type = MORPH_ELLIPSE;
           break;
        default:
           erode_type = MORPH_RECT;
           break;
        }
    
        Mat ele = getStructuringElement(erode_type, Size(2*erode_ele_size+1, 2*erode_ele_size
    
    +1), 
                Point(erode_ele_size, erode_ele_size));
    
        erode(src, erode_dst, ele);
    
        imshow(erode_wn, erode_dst);
    }
    
    /*
     * @brief   膨胀操作的回调函数
     * @inputs  
     * @outputs 
     * @retval  
     */
    static void Dilation(int, void *)
    {
        int dilate_type;
    
        switch (dilate_ele_type) {
        case TYPE_MORPH_RECT:
           dilate_type = MORPH_RECT; 
           break;
        case TYPE_MORPH_CROSS:
           dilate_type = MORPH_CROSS;
           break;
        case TYPE_MORPH_ELLIPSE:
           dilate_type = MORPH_ELLIPSE;
           break;
        default:
           dilate_type = MORPH_RECT;
           break;
        }
    
        Mat ele = getStructuringElement(dilate_type, Size(2*dilate_ele_size+1, 
    
    2*dilate_ele_size+1), 
                Point(dilate_ele_size, dilate_ele_size));
    
        dilate(src, dilate_dst, ele);
    
        imshow(dilate_wn, dilate_dst);
    }

    Qt图像的缩放显示

    #include "widget.h"
    #include "ui_widget.h"
    #include <QDebug>
    Widget::Widget(QWidget *parent) :
        QWidget(parent),
        ui(new Ui::Widget)
    {
        ui->setupUi(this);
    }
    
    
    Widget::~Widget()
    {
        delete ui;
    }
    
    void Widget::on_openButton_clicked()
    {
        QString fileName = QFileDialog::getOpenFileName(this,tr("Open Image"),
                                    ".",tr("Image Files (*.png *.jpg *.bmp)"));
        qDebug()<<"filenames:"<<fileName;
        image = cv::imread(fileName.toAscii().data());
        ui->imgfilelabel->setText(fileName);
        //here use 2 ways to make a copy
    //    image.copyTo(originalimg);          //make a copy
        originalimg = image.clone();        //clone the img
        qimg = Widget::Mat2QImage(image);
        display(qimg);                      //display by the label
        if(image.data)
        {
            ui->saltButton->setEnabled(true);
            ui->originalButton->setEnabled(true);
            ui->reduceButton->setEnabled(true);
        }
    }
    
    QImage Widget::Mat2QImage(const cv::Mat &mat)
    {
        QImage img;
        if(mat.channels()==3)
        {
            //cvt Mat BGR 2 QImage RGB
            cvtColor(mat,rgb,CV_BGR2RGB);
            img =QImage((const unsigned char*)(rgb.data),
                        rgb.cols,rgb.rows,
                        rgb.cols*rgb.channels(),
                        QImage::Format_RGB888);
        }
        else
        {
            img =QImage((const unsigned char*)(mat.data),
                        mat.cols,mat.rows,
                        mat.cols*mat.channels(),
                        QImage::Format_RGB888);
        }
        return img;
    }
    
    void Widget::display(QImage img)
    {
        QImage imgScaled;
        imgScaled = img.scaled(ui->imagelabel->size(),Qt::KeepAspectRatio);
    //  imgScaled = img.QImage::scaled(ui->imagelabel->width(),ui->imagelabel->height
    
    (),Qt::KeepAspectRatio);
        ui->imagelabel->setPixmap(QPixmap::fromImage(imgScaled));
    }
    
    void Widget::on_originalButton_clicked()
    {
        qimg = Widget::Mat2QImage(originalimg);
        display(qimg);
    }
    
    void Widget::on_saltButton_clicked()
    {
        salt(image,3000);
        qimg = Widget::Mat2QImage(image);
        display(qimg);
    }
    void Widget::on_reduceButton_clicked()
    {
        colorReduce0(image,64);
        qimg = Widget::Mat2QImage(image);
        display(qimg);
    }
    void Widget::salt(cv::Mat &image, int n)
    {
        int i,j;
        for (int k=0; k<n; k++)
        {
            i= qrand()%image.cols;
            j= qrand()%image.rows;
    
            if (image.channels() == 1)
            { // gray-level image
                image.at<uchar>(j,i)= 255;
            }
            else if (image.channels() == 3)
            { // color image
                image.at<cv::Vec3b>(j,i)[0]= 255;
                image.at<cv::Vec3b>(j,i)[1]= 255;
                image.at<cv::Vec3b>(j,i)[2]= 255;
            }
        }
    }
    
    // using .ptr and []
    void Widget::colorReduce0(cv::Mat &image, int div)
    {
          int nl= image.rows; // number of lines
          int nc= image.cols * image.channels(); // total number of elements per line
    
          for (int j=0; j<nl; j++)
          {
              uchar* data= image.ptr<uchar>(j);
    
              for (int i=0; i<nc; i++)
              {
                // process each pixel ---------------------
                    data[i]= data[i]/div*div+div/2;
    
                // end of pixel processing ----------------
              } // end of line
          }
    }

    #ifndef WIDGET_H
    #define WIDGET_H
    
    #include <QWidget>
    #include <QImage>
    #include <QFileDialog>
    #include <QTimer>
    #include <opencv2/core/core.hpp>
    #include <opencv2/highgui/highgui.hpp>
    #include <opencv2/imgproc/imgproc.hpp>
    
    using namespace cv;
    
    namespace Ui {
    class Widget;
    }
    
    class Widget : public QWidget
    {
        Q_OBJECT
        
    public:
        explicit Widget(QWidget *parent = 0);
        ~Widget();
    private slots:
        void on_openButton_clicked();
        QImage Mat2QImage(const cv::Mat &mat);
        void display(QImage image);
        void salt(cv::Mat &image, int n);
    
        void on_saltButton_clicked();
        void on_reduceButton_clicked();
        void colorReduce0(cv::Mat &image, int div);
        void on_originalButton_clicked();
    
    private:
        Ui::Widget *ui;
        cv::Mat image;
        cv::Mat originalimg; //store the original img
        QImage qimg;
        QImage imgScaled;
        cv::Mat rgb;
    };
    
    #endif // WIDGET_H
    

    #include <iostream>
    
    #include <opencv2/core/core.hpp>
    #include <opencv2/highgui/highgui.hpp>
    
    // using .ptr and []
    void colorReduce0(cv::Mat &image, int div=64) {
          int nl= image.rows; // number of lines
          int nc= image.cols * image.channels(); // total number of elements per line
                  
          for (int j=0; j<nl; j++) {
              uchar* data= image.ptr<uchar>(j);
              for (int i=0; i<nc; i++) {
                // process each pixel ---------------------
                      data[i]= data[i]/div*div + div/2;
                // end of pixel processing ----------------
                } // end of line                   
          }
    }
    
    // using .ptr and * ++ 
    void colorReduce1(cv::Mat &image, int div=64) {
          int nl= image.rows; // number of lines
          int nc= image.cols * image.channels(); // total number of elements per line
                  
          for (int j=0; j<nl; j++) {
              uchar* data= image.ptr<uchar>(j);
              for (int i=0; i<nc; i++) {
                // process each pixel ---------------------
                     *data++= *data/div*div + div/2;
                // end of pixel processing ----------------
                } // end of line                   
          }
    }
    
    // using .ptr and * ++ and modulo
    void colorReduce2(cv::Mat &image, int div=64) {
    
          int nl= image.rows; // number of lines
          int nc= image.cols * image.channels(); // total number of elements per line
                  
          for (int j=0; j<nl; j++) {
              uchar* data= image.ptr<uchar>(j);
    
              for (int i=0; i<nc; i++) {
                // process each pixel ---------------------
                      int v= *data;
                      *data++= v - v%div + div/2;
                // end of pixel processing ----------------
                } // end of line                   
          }
    }
    
    // using .ptr and * ++ and bitwise
    void colorReduce3(cv::Mat &image, int div=64) {
          int nl= image.rows; // number of lines
          int nc= image.cols * image.channels(); // total number of elements per line
          int n= static_cast<int>(log(static_cast<double>(div))/log(2.0));
          // mask used to round the pixel value
          uchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0
                  
          for (int j=0; j<nl; j++) {
              uchar* data= image.ptr<uchar>(j);
              for (int i=0; i<nc; i++) {
                // process each pixel ---------------------
                *data++= *data&mask + div/2;
                // end of pixel processing ----------------
                } // end of line                   
          }
    }
    
    // direct pointer arithmetic
    void colorReduce4(cv::Mat &image, int div=64) {
          int nl= image.rows; // number of lines
          int nc= image.cols * image.channels(); // total number of elements per line
          int n= static_cast<int>(log(static_cast<double>(div))/log(2.0));
          int step= image.step; // effective width
          // mask used to round the pixel value
          uchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0
                  
          // get the pointer to the image buffer
          uchar *data= image.data;
    
          for (int j=0; j<nl; j++) {
              for (int i=0; i<nc; i++) {
                // process each pixel ---------------------
                *(data+i)= *data&mask + div/2;
                // end of pixel processing ----------------
                } // end of line                   
                data+= step;  // next line
          }
    }
    
    // using .ptr and * ++ and bitwise with image.cols * image.channels()
    void colorReduce5(cv::Mat &image, int div=64) {
          int nl= image.rows; // number of lines
          int n= static_cast<int>(log(static_cast<double>(div))/log(2.0));
          // mask used to round the pixel value
          uchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0
                  
          for (int j=0; j<nl; j++) {
              uchar* data= image.ptr<uchar>(j);
              for (int i=0; i<image.cols * image.channels(); i++) {
                // process each pixel ---------------------
                *data++= *data&mask + div/2;
                // end of pixel processing ----------------
                } // end of line                   
          }
    }
    
    // using .ptr and * ++ and bitwise (continuous)
    void colorReduce6(cv::Mat &image, int div=64) {
    
          int nl= image.rows; // number of lines
          int nc= image.cols * image.channels(); // total number of elements per line
    
          if (image.isContinuous())  {
              // then no padded pixels
              nc= nc*nl; 
              nl= 1;  // it is now a 1D array
           }
    
          int n= static_cast<int>(log(static_cast<double>(div))/log(2.0));
          // mask used to round the pixel value
          uchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0
                  
          for (int j=0; j<nl; j++) {
              uchar* data= image.ptr<uchar>(j);
    
              for (int i=0; i<nc; i++) {
                // process each pixel ---------------------
                *data++= *data&mask + div/2;
                // end of pixel processing ----------------
                } // end of line                   
          }
    }
    
    // using .ptr and * ++ and bitwise (continuous+channels)
    void colorReduce7(cv::Mat &image, int div=64) {
          int nl= image.rows; // number of lines
          int nc= image.cols ; // number of columns
    
          if (image.isContinuous())  {
              // then no padded pixels
              nc= nc*nl; 
              nl= 1;  // it is now a 1D array
           }
    
          int n= static_cast<int>(log(static_cast<double>(div))/log(2.0));
          // mask used to round the pixel value
          uchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0
                  
          for (int j=0; j<nl; j++) {
              uchar* data= image.ptr<uchar>(j);
    
              for (int i=0; i<nc; i++) {
                // process each pixel ---------------------
                     
                *data++= *data&mask + div/2;
                *data++= *data&mask + div/2;
                *data++= *data&mask + div/2;
     
                // end of pixel processing ----------------
                } // end of line                   
          }
    }
    
    // using Mat_ iterator 
    void colorReduce8(cv::Mat &image, int div=64) {
          // get iterators
          cv::Mat_<cv::Vec3b>::iterator it= image.begin<cv::Vec3b>();
          cv::Mat_<cv::Vec3b>::iterator itend= image.end<cv::Vec3b>();
    
          for ( ; it!= itend; ++it) {
            // process each pixel ---------------------
    
            (*it)[0]= (*it)[0]/div*div + div/2;
            (*it)[1]= (*it)[1]/div*div + div/2;
            (*it)[2]= (*it)[2]/div*div + div/2;
    
            // end of pixel processing ----------------
          }
    }
    
    // using Mat_ iterator and bitwise
    void colorReduce9(cv::Mat &image, int div=64) {
    
          // div must be a power of 2
          int n= static_cast<int>(log(static_cast<double>(div))/log(2.0));
          // mask used to round the pixel value
          uchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0
    
          // get iterators
          cv::Mat_<cv::Vec3b>::iterator it= image.begin<cv::Vec3b>();
          cv::Mat_<cv::Vec3b>::iterator itend= image.end<cv::Vec3b>();
    
          // scan all pixels
          for ( ; it!= itend; ++it) {
            // process each pixel ---------------------
    
            (*it)[0]= (*it)[0]&mask + div/2;
            (*it)[1]= (*it)[1]&mask + div/2;
            (*it)[2]= (*it)[2]&mask + div/2;
    
            // end of pixel processing ----------------
          }
    }
    
    // using MatIterator_ 
    void colorReduce10(cv::Mat &image, int div=64) {
          // get iterators
          cv::Mat_<cv::Vec3b> cimage= image;
          cv::Mat_<cv::Vec3b>::iterator it=cimage.begin();
          cv::Mat_<cv::Vec3b>::iterator itend=cimage.end();
    
          for ( ; it!= itend; it++) { 
            // process each pixel ---------------------
    
            (*it)[0]= (*it)[0]/div*div + div/2;
            (*it)[1]= (*it)[1]/div*div + div/2;
            (*it)[2]= (*it)[2]/div*div + div/2;
    
            // end of pixel processing ----------------
          }
    }
    
    void colorReduce11(cv::Mat &image, int div=64) {
          int nl= image.rows; // number of lines
          int nc= image.cols; // number of columns
                  
          for (int j=0; j<nl; j++) {
              for (int i=0; i<nc; i++) {
                // process each pixel ---------------------
                     
                      image.at<cv::Vec3b>(j,i)[0]=image.at<cv::Vec3b>(j,i)[0]/div*div + div/2;
                      image.at<cv::Vec3b>(j,i)[1]=image.at<cv::Vec3b>(j,i)[1]/div*div + div/2;
                      image.at<cv::Vec3b>(j,i)[2]=image.at<cv::Vec3b>(j,i)[2]/div*div + div/2;
     
                // end of pixel processing ----------------
                } // end of line                   
          }
    }
    
    // with input/ouput images
    void colorReduce12(const cv::Mat &image, // input image 
                     cv::Mat &result,      // output image
                     int div=64) {
          int nl= image.rows; // number of lines
          int nc= image.cols ; // number of columns
    
          // allocate output image if necessary
          result.create(image.rows,image.cols,image.type());
    
          // created images have no padded pixels
          nc= nc*nl; 
          nl= 1;  // it is now a 1D array
    
          int n= static_cast<int>(log(static_cast<double>(div))/log(2.0));
          // mask used to round the pixel value
          uchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0
                  
          for (int j=0; j<nl; j++) {
              uchar* data= result.ptr<uchar>(j);
              const uchar* idata= image.ptr<uchar>(j);
    
              for (int i=0; i<nc; i++) { 
                // process each pixel ---------------------
                     
                *data++= (*idata++)&mask + div/2;
                *data++= (*idata++)&mask + div/2;
                *data++= (*idata++)&mask + div/2;
     
                // end of pixel processing ---------------- 
              } // end of line                   
          }
    }
    
    // using overloaded operators
    void colorReduce13(cv::Mat &image, int div=64) {    
          int n= static_cast<int>(log(static_cast<double>(div))/log(2.0));
          // mask used to round the pixel value
          uchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0
    
          // perform color reduction
          image=(image&cv::Scalar(mask,mask,mask))+cv::Scalar(div/2,div/2,div/2);
    }

    图像锐化1

    sharp.h

    #pragma once
    #include <opencv\cv.h>
    using namespace cv;
    namespace ggicci
    {
        void sharpen(const Mat& img, Mat& result);
    }

    sharp.cpp
    #include "sharp.h"
    void ggicci::sharpen(const Mat& img, Mat& result)
    {    
        result.create(img.size(), img.type());
        //处理边界内部的像素点, 图像最外围的像素点应该额外处理
        for (int row = 1; row < img.rows-1; row++)
        {
            //前一行像素点
            const uchar* previous = img.ptr<const uchar>(row-1);
            //待处理的当前行
            const uchar* current = img.ptr<const uchar>(row);
            //下一行
            const uchar* next = img.ptr<const uchar>(row+1);
            uchar *output = result.ptr<uchar>(row);
            int ch = img.channels();
            int starts = ch;
            int ends = (img.cols - 1) * ch;
            for (int col = starts; col < ends; col++)
            {
                //输出图像的遍历指针与当前行的指针同步递增, 以每行的每一个像素点的每一个通道值
    
    为一个递增量, 因为要考虑到图像的通道数
                *output++ = saturate_cast<uchar>(5 * current[col] - current[col-ch] - current
    
    [col+ch] - previous[col] - next[col]);
            }
        } //end loop
        //处理边界, 外围像素点设为 0
        result.row(0).setTo(Scalar::all(0));
        result.row(result.rows-1).setTo(Scalar::all(0));
        result.col(0).setTo(Scalar::all(0));
        result.col(result.cols-1).setTo(Scalar::all(0));
    }

    main.cpp
    #include <opencv\highgui.h>
    #pragma comment(lib, "opencv_core231d.lib")
    #pragma comment(lib, "opencv_highgui231d.lib")
    #pragma comment(lib, "opencv_imgproc231d.lib")
    
    using namespace cv;
     
    #include "sharp.h"
     
    int main()
    {    
        Mat lena = imread("lena.jpg");
        Mat sharpenedLena;
        ggicci::sharpen(lena, sharpenedLena);
     
        imshow("lena", lena);
        imshow("sharpened lena", sharpenedLena);
        cvWaitKey();
        return 0;
    }

    图像锐化2

    int main()
    {    
        Mat lena = imread("lena.jpg");
        Mat sharpenedLena;
        Mat kernel = (Mat_<float>(3, 3) << 0, -1, 0, -1, 5, -1, 0, -1, 0);
        cv::filter2D(lena, sharpenedLena, lena.depth(), kernel);
     
        imshow("lena", lena);
        imshow("sharpened lena", sharpenedLena);
        cvWaitKey();
        return 0;
    }

    简单的灰度图像的直方图计算

       int main()
       {    
           Mat img = imread("lena.jpg", CV_LOAD_IMAGE_GRAYSCALE);
       
           Mat* arrays = &img;
           int narrays = 1;
           int channels[] = { 0 };
           InputArray mask = noArray();
           Mat hist;
           int dims = 1;
           int histSize[] = { 256 };    
           float hranges[] = { 0.0, 255.0 };
           const float *ranges[] = { hranges };
           //调用 calcHist 计算直方图, 结果存放在 hist 中
           calcHist(arrays, narrays, channels, mask, hist, dims, histSize, ranges);
           
           //调用一个我自己写的简单的函数用于获取一张显示直方图数据的图片,
           //输入参数为直方图数据 hist 和期望得到的图片的尺寸
           Mat histImg = ggicci::getHistogram1DImage(hist, Size(600, 420));
           imshow("lena gray image histogram", histImg);
           waitKey();
       }
       
       Mat ggicci::getHistogram1DImage(const Mat& hist, Size imgSize)
       {
           Mat histImg(imgSize, CV_8UC3);
           int Padding = 10;
           int W = imgSize.width - 2 * Padding;
           int H = imgSize.height - 2 * Padding;
           double _max;
           minMaxLoc(hist, NULL, &_max);
           double Per = (double)H / _max;
           const Point Orig(Padding, imgSize.height-Padding);
           int bin = W / (hist.rows + 2);
       
           //画方柱
           for (int i = 1; i <= hist.rows; i++)
           {
               Point pBottom(Orig.x + i * bin, Orig.y);
               Point pTop(pBottom.x, pBottom.y - Per * hist.at<float>(i-1));
               line(histImg, pBottom, pTop, Scalar(255, 0, 0), bin);
           }
       
           //画 3 条红线标明区域
           line(histImg, Point(Orig.x + bin, Orig.y - H), Point(Orig.x + hist.rows *  bin, 
    
    Orig.y - H), Scalar(0, 0, 255), 1);
           line(histImg, Point(Orig.x + bin, Orig.y), Point(Orig.x + bin, Orig.y - H), Scalar
    
    (0, 0, 255), 1);
           line(histImg, Point(Orig.x + hist.rows * bin, Orig.y), Point(Orig.x + hist.rows *  
    
    bin, Orig.y - H), Scalar(0, 0, 255), 1);
           drawArrow(histImg, Orig, Orig+Point(W, 0), 10, 30, Scalar::all(0), 2);
           drawArrow(histImg, Orig, Orig-Point(0, H), 10, 30, Scalar::all(0), 2);
           
           return histImg;
       }

    图像缩放-最近邻插值-双线性插值

    #include "stdafx.h"
    #include <cv.h>
    #include <cxcore.h>
    #include <highgui.h>
    #include <cmath>
    
    using namespace std;
    using namespace cv;
    
    int main(int argc ,char ** argv)
    {
        IplImage *scr=0;
        IplImage *dst=0;
        double scale=4;
        CvSize dst_cvsize;
        if (argc==2&&(scr=cvLoadImage(argv[1],-1))!=0)
        {
            dst_cvsize.width=(int)(scr->width*scale);
            dst_cvsize.height=(int)(scr->height*scale);
            dst=cvCreateImage(dst_cvsize,scr->depth,scr->nChannels);
    
            cvResize(scr,dst,CV_INTER_NN);//
    //             CV_INTER_NN - 最近邻插值,
    //             CV_INTER_LINEAR - 双线性插值 (缺省使用)
    //             CV_INTER_AREA - 使用象素关系重采样。当图像缩小时候,该方法可以避免波纹出现。
             /*当图像放大时,类似于 CV_INTER_NN 方法..*/
    //             CV_INTER_CUBIC - 立方插值.
    
            cvNamedWindow("scr",CV_WINDOW_AUTOSIZE);
            cvNamedWindow("dst",CV_WINDOW_AUTOSIZE);
            cvShowImage("scr",scr);
            cvShowImage("dst",dst);
            cvWaitKey();
            cvReleaseImage(&scr);
            cvReleaseImage(&dst);
            cvDestroyWindow("scr");
            cvDestroyWindow("dst");
        }
        return 0;
    }

    图片加“怀旧色”滤镜保存输出

    #include <opencv/cv.h>
    #include <opencv/highgui.h>
    
    using namespace cv;
    using namespace std;
    
    int main(int argc, char ** argv)
    {
        // input args check
        if(argc < 3){
            printf("please input args.\n");
            printf("e.g. : ./test infilepath outfilepath \n");
            return 0;
        }
        
        char * input = argv[1];
        char * output = argv[2];
        
        printf("input: %s, output: %s\n", input, output);
    
        Mat src = imread(input, 1);
    
        int width=src.cols;
        int heigh=src.rows;
        RNG rng;
        Mat img(src.size(),CV_8UC3);
        for (int y=0; y<heigh; y++)
        {
            uchar* P0 = src.ptr<uchar>(y);
            uchar* P1 = img.ptr<uchar>(y);
            for (int x=0; x<width; x++)
            {
                float B=P0[3*x];
                float G=P0[3*x+1];
                float R=P0[3*x+2];
                float newB=0.272*R+0.534*G+0.131*B;
                float newG=0.349*R+0.686*G+0.168*B;
                float newR=0.393*R+0.769*G+0.189*B;
                if(newB<0)newB=0;
                if(newB>255)newB=255;
                if(newG<0)newG=0;
                if(newG>255)newG=255;
                if(newR<0)newR=0;
                if(newR>255)newR=255;
                P1[3*x] = (uchar)newB;
                P1[3*x+1] = (uchar)newG;
                P1[3*x+2] = (uchar)newR;
            }
        }
        //imshow("out",img);
        waitKey();
        imwrite(output,img);
    }

    浮雕和雕刻效果

    #include <cv.h>  
    #include <highgui.h>  
    
    #pragma comment( lib, "cv.lib" )  
    #pragma comment( lib, "cxcore.lib" )  
    #pragma comment( lib, "highgui.lib" )  
    
    int main()  
    {  
        IplImage *org=cvLoadImage("1.jpg",1);  
        IplImage *image=cvCloneImage(org);  
        int width=image->width;  
        int height=image->height;  
        int step=image->widthStep;  
        int channel=image->nChannels;  
        uchar* data=(uchar *)image->imageData;  
        for(int i=0;i<width-1;i++)  
        {  
            for(int j=0;j<height-1;j++)  
            {  
                for(int k=0;k<channel;k++)  
                {  
                    int temp = data[(j+1)*step+(i+1)*channel+k]-data[j*step+i*channel+k]+128;//
    
    浮雕  
                    //int temp = data[j*step+i*channel+k]-data[(j+1)*step+(i+1)*channel
    
    +k]+128;//雕刻  
                    if(temp>255)  
                    {  
                        data[j*step+i*channel+k]=255;  
                    }  
                    else if(temp<0)  
                    {  
                        data[j*step+i*channel+k]=0;  
                    }  
                    else  
                    {  
                        data[j*step+i*channel+k]=temp;  
                    }  
                }  
            }  
        }  
        cvNamedWindow("original",1);  
        cvShowImage("original",org);  
        cvNamedWindow("image",1);  
        cvShowImage("image",image);  
        cvWaitKey(0);   
        cvDestroyAllWindows();  
        cvReleaseImage(&image);  
        cvReleaseImage(&org);  
        return 0;  
    }

    图像褶皱效果

    #include <cv.h>  
    #include <highgui.h>  
    
    #pragma comment( lib, "cv.lib" )  
    #pragma comment( lib, "cxcore.lib" )  
    #pragma comment( lib, "highgui.lib" )  
    
    int main()  
    {  
        IplImage *org=cvLoadImage("lena.jpg",1);  
        IplImage *image=cvCloneImage(org);  
        int width=image->width;  
        int height=image->height;  
        int step=image->widthStep;  
        int channel=image->nChannels;  
        uchar* data=(uchar *)image->imageData;  
        int sign=-1;  
        for(int i=0;i<height;i++)  
        {     
            int cycle=10;  
            int margin=(i%cycle);  
            if((i/cycle)%2==0)  
            {  
                sign=-1;  
            }  
            else  
            {  
                sign=1;  
            }  
            if(sign==-1)  
            {     
                margin=cycle-margin;  
                for(int j=0;j<width-margin;j++)  
                {             
                    for(int k=0;k<channel;k++)  
                    {  
                        data[i*step+j*channel+k]=data[i*step+(j+margin)*channel+k];  
                    }  
                }  
            }  
            else if(sign==1)  
            {         
                for(int j=0;j<width-margin;j++)  
                {  
                    for(int k=0;k<channel;k++)  
                    {  
                        data[i*step+j*channel+k]=data[i*step+(j+margin)*channel+k];  
                    }  
                }  
            }     
        }  
        cvNamedWindow("original",1);  
        cvShowImage("original",org);  
        cvNamedWindow("image",1);  
        cvShowImage("image",image);  
        cvSaveImage("image.jpg",image);  
        cvWaitKey(0);   
        cvDestroyAllWindows();  
        cvReleaseImage(&image);  
        cvReleaseImage(&org);  
        return 0;  
    }

    Grabcut算法

    #include "stdafx.h"  
      
    #include "opencv2/highgui/highgui.hpp"  
    #include "opencv2/imgproc/imgproc.hpp"  
      
    #include <iostream>  
      
    #include "ComputeTime.h"  
    #include "windows.h"  
      
    using namespace std;  
    using namespace cv;  
      
    static void help()  
    {  
        cout << "\nThis program demonstrates GrabCut segmentation -- select an object in a 
    
    region\n"  
            "and then grabcut will attempt to segment it out.\n"  
            "Call:\n"  
            "./grabcut <image_name>\n"  
            "\nSelect a rectangular area around the object you want to segment\n" <<  
            "\nHot keys: \n"  
            "\tESC - quit the program\n"  
            "\tr - restore the original image\n"  
            "\tn - next iteration\n"  
            "\n"  
            "\tleft mouse button - set rectangle\n"  
            "\n"  
            "\tCTRL+left mouse button - set GC_BGD pixels\n"  
            "\tSHIFT+left mouse button - set CG_FGD pixels\n"  
            "\n"  
            "\tCTRL+right mouse button - set GC_PR_BGD pixels\n"  
            "\tSHIFT+right mouse button - set CG_PR_FGD pixels\n" << endl;  
    }  
      
    const Scalar RED = Scalar(0,0,255);  
    const Scalar PINK = Scalar(230,130,255);  
    const Scalar BLUE = Scalar(255,0,0);  
    const Scalar LIGHTBLUE = Scalar(255,255,160);  
    const Scalar GREEN = Scalar(0,255,0);  
      
    const int BGD_KEY = CV_EVENT_FLAG_CTRLKEY;  //Ctrl键  
    const int FGD_KEY = CV_EVENT_FLAG_SHIFTKEY; //Shift键  
      
    static void getBinMask( const Mat& comMask, Mat& binMask )  
    {  
        if( comMask.empty() || comMask.type()!=CV_8UC1 )  
            CV_Error( CV_StsBadArg, "comMask is empty or has incorrect type (not CV_8UC1)" );  
        if( binMask.empty() || binMask.rows!=comMask.rows || binMask.cols!=comMask.cols )  
            binMask.create( comMask.size(), CV_8UC1 );  
        binMask = comMask & 1;  //得到mask的最低位,实际上是只保留确定的或者有可能的前景点当做
    
    mask  
    }  
      
    class GCApplication  
    {  
    public:  
        enum{ NOT_SET = 0, IN_PROCESS = 1, SET = 2 };  
        static const int radius = 2;  
        static const int thickness = -1;  
      
        void reset();  
        void setImageAndWinName( const Mat& _image, const string& _winName );  
        void showImage() const;  
        void mouseClick( int event, int x, int y, int flags, void* param );  
        int nextIter();  
        int getIterCount() const { return iterCount; }  
    private:  
        void setRectInMask();  
        void setLblsInMask( int flags, Point p, bool isPr );  
      
        const string* winName;  
        const Mat* image;  
        Mat mask;  
        Mat bgdModel, fgdModel;  
      
        uchar rectState, lblsState, prLblsState;  
        bool isInitialized;  
      
        Rect rect;  
        vector<Point> fgdPxls, bgdPxls, prFgdPxls, prBgdPxls;  
        int iterCount;  
    };  
      
    /*给类的变量赋值*/  
    void GCApplication::reset()  
    {  
        if( !mask.empty() )  
            mask.setTo(Scalar::all(GC_BGD));  
        bgdPxls.clear(); fgdPxls.clear();  
        prBgdPxls.clear();  prFgdPxls.clear();  
      
        isInitialized = false;  
        rectState = NOT_SET;    //NOT_SET == 0  
        lblsState = NOT_SET;  
        prLblsState = NOT_SET;  
        iterCount = 0;  
    }  
      
    /*给类的成员变量赋值而已*/  
    void GCApplication::setImageAndWinName( const Mat& _image, const string& _winName  )  
    {  
        if( _image.empty() || _winName.empty() )  
            return;  
        image = &_image;  
        winName = &_winName;  
        mask.create( image->size(), CV_8UC1);  
        reset();  
    }  
      
    /*显示4个点,一个矩形和图像内容,因为后面的步骤很多地方都要用到这个函数,所以单独拿出来*/  
    void GCApplication::showImage() const  
    {  
        if( image->empty() || winName->empty() )  
            return;  
      
        Mat res;  
        Mat binMask;  
        if( !isInitialized )  
            image->copyTo( res );  
        else  
        {  
            getBinMask( mask, binMask );  
            image->copyTo( res, binMask );  //按照最低位是0还是1来复制,只保留跟前景有关的图像
    
    ,比如说可能的前景,可能的背景  
        }  
      
        vector<Point>::const_iterator it;  
        /*下面4句代码是将选中的4个点用不同的颜色显示出来*/  
        for( it = bgdPxls.begin(); it != bgdPxls.end(); ++it )  //迭代器可以看成是一个指针  
            circle( res, *it, radius, BLUE, thickness );  
        for( it = fgdPxls.begin(); it != fgdPxls.end(); ++it )  //确定的前景用红色表示  
            circle( res, *it, radius, RED, thickness );  
        for( it = prBgdPxls.begin(); it != prBgdPxls.end(); ++it )  
            circle( res, *it, radius, LIGHTBLUE, thickness );  
        for( it = prFgdPxls.begin(); it != prFgdPxls.end(); ++it )  
            circle( res, *it, radius, PINK, thickness );  
      
        /*画矩形*/  
        if( rectState == IN_PROCESS || rectState == SET )  
            rectangle( res, Point( rect.x, rect.y ), Point(rect.x + rect.width, rect.y + 
    
    rect.height ), GREEN, 2);  
      
        imshow( *winName, res );  
    }  
      
    /*该步骤完成后,mask图像中rect内部是3,外面全是0*/  
    void GCApplication::setRectInMask()  
    {  
        assert( !mask.empty() );  
        mask.setTo( GC_BGD );   //GC_BGD == 0  
        rect.x = max(0, rect.x);  
        rect.y = max(0, rect.y);  
        rect.width = min(rect.width, image->cols-rect.x);  
        rect.height = min(rect.height, image->rows-rect.y);  
        (mask(rect)).setTo( Scalar(GC_PR_FGD) );    //GC_PR_FGD == 3,矩形内部,为可能的前景点  
    }  
      
    void GCApplication::setLblsInMask( int flags, Point p, bool isPr )  
    {  
        vector<Point> *bpxls, *fpxls;  
        uchar bvalue, fvalue;  
        if( !isPr ) //确定的点  
        {  
            bpxls = &bgdPxls;  
            fpxls = &fgdPxls;  
            bvalue = GC_BGD;    //0  
            fvalue = GC_FGD;    //1  
        }  
        else    //概率点  
        {  
            bpxls = &prBgdPxls;  
            fpxls = &prFgdPxls;  
            bvalue = GC_PR_BGD; //2  
            fvalue = GC_PR_FGD; //3  
        }  
        if( flags & BGD_KEY )  
        {  
            bpxls->push_back(p);  
            circle( mask, p, radius, bvalue, thickness );   //该点处为2  
        }  
        if( flags & FGD_KEY )  
        {  
            fpxls->push_back(p);  
            circle( mask, p, radius, fvalue, thickness );   //该点处为3  
        }  
    }  
      
    /*鼠标响应函数,参数flags为CV_EVENT_FLAG的组合*/  
    void GCApplication::mouseClick( int event, int x, int y, int flags, void* )  
    {  
        // TODO add bad args check  
        switch( event )  
        {  
        case CV_EVENT_LBUTTONDOWN: // set rect or GC_BGD(GC_FGD) labels  
            {  
                bool isb = (flags & BGD_KEY) != 0,  
                    isf = (flags & FGD_KEY) != 0;  
                if( rectState == NOT_SET && !isb && !isf )//只有左键按下时  
                {  
                    rectState = IN_PROCESS; //表示正在画矩形  
                    rect = Rect( x, y, 1, 1 );  
                }  
                if ( (isb || isf) && rectState == SET ) //按下了alt键或者shift键,且画好了矩形
    
    ,表示正在画前景背景点  
                    lblsState = IN_PROCESS;  
            }  
            break;  
        case CV_EVENT_RBUTTONDOWN: // set GC_PR_BGD(GC_PR_FGD) labels  
            {  
                bool isb = (flags & BGD_KEY) != 0,  
                    isf = (flags & FGD_KEY) != 0;  
                if ( (isb || isf) && rectState == SET ) //正在画可能的前景背景点  
                    prLblsState = IN_PROCESS;  
            }  
            break;  
        case CV_EVENT_LBUTTONUP:  
            if( rectState == IN_PROCESS )  
            {  
                rect = Rect( Point(rect.x, rect.y), Point(x,y) );   //矩形结束  
                rectState = SET;  
                setRectInMask();  
                assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() && 
    
    prFgdPxls.empty() );  
                showImage();  
            }  
            if( lblsState == IN_PROCESS )   //已画了前后景点  
            {  
                setLblsInMask(flags, Point(x,y), false);    //画出前景点  
                lblsState = SET;  
                showImage();  
            }  
            break;  
        case CV_EVENT_RBUTTONUP:  
            if( prLblsState == IN_PROCESS )  
            {  
                setLblsInMask(flags, Point(x,y), true); //画出背景点  
                prLblsState = SET;  
                showImage();  
            }  
            break;  
        case CV_EVENT_MOUSEMOVE:  
            if( rectState == IN_PROCESS )  
            {  
                rect = Rect( Point(rect.x, rect.y), Point(x,y) );  
                assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() && 
    
    prFgdPxls.empty() );  
                showImage();    //不断的显示图片  
            }  
            else if( lblsState == IN_PROCESS )  
            {  
                setLblsInMask(flags, Point(x,y), false);  
                showImage();  
            }  
            else if( prLblsState == IN_PROCESS )  
            {  
                setLblsInMask(flags, Point(x,y), true);  
                showImage();  
            }  
            break;  
        }  
    }  
      
    /*该函数进行grabcut算法,并且返回算法运行迭代的次数*/  
    int GCApplication::nextIter()  
    {  
        if( isInitialized )  
            //使用grab算法进行一次迭代,参数2为mask,里面存的mask位是:矩形内部除掉那些可能是背
    
    景或者已经确定是背景后的所有的点,且mask同时也为输出  
            //保存的是分割后的前景图像  
            grabCut( *image, mask, rect, bgdModel, fgdModel, 1 );  
        else  
        {  
            if( rectState != SET )  
                return iterCount;  
      
            if( lblsState == SET || prLblsState == SET )  
                grabCut( *image, mask, rect, bgdModel, fgdModel, 1, GC_INIT_WITH_MASK );  
            else  
                grabCut( *image, mask, rect, bgdModel, fgdModel, 1, GC_INIT_WITH_RECT );  
      
            isInitialized = true;  
        }  
        iterCount++;  
      
        bgdPxls.clear(); fgdPxls.clear();  
        prBgdPxls.clear(); prFgdPxls.clear();  
      
        return iterCount;  
    }  
      
    GCApplication gcapp;  
      
    static void on_mouse( int event, int x, int y, int flags, void* param )  
    {  
        gcapp.mouseClick( event, x, y, flags, param );  
    }  
      
    int main( int argc, char** argv )  
    {  
        string filename;  
        cout<<" Grabcuts ! \n";  
        cout<<"input image name:  "<<endl;  
        cin>>filename;  
      
          
        Mat image = imread( filename, 1 );  
        if( image.empty() )  
        {  
            cout << "\n Durn, couldn't read image filename " << filename << endl;  
            return 1;  
        }  
      
        help();  
      
        const string winName = "image";  
        cvNamedWindow( winName.c_str(), CV_WINDOW_AUTOSIZE );  
        cvSetMouseCallback( winName.c_str(), on_mouse, 0 );  
      
        gcapp.setImageAndWinName( image, winName );  
        gcapp.showImage();  
      
        for(;;)  
        {  
            int c = cvWaitKey(0);  
            switch( (char) c )  
            {  
            case '\x1b':  
                cout << "Exiting ..." << endl;  
                goto exit_main;  
            case 'r':  
                cout << endl;  
                gcapp.reset();  
                gcapp.showImage();  
                break;  
            case 'n':  
                ComputeTime ct ;  
                ct.Begin();  
                  
                int iterCount = gcapp.getIterCount();  
                cout << "<" << iterCount << "... ";  
                int newIterCount = gcapp.nextIter();  
                if( newIterCount > iterCount )  
                {  
                    gcapp.showImage();  
                    cout << iterCount << ">" << endl;  
                    cout<<"运行时间:  "<<ct.End()<<endl;  
                }  
                else  
                    cout << "rect must be determined>" << endl;  
                break;  
            }  
        }  
      
    exit_main:  
        cvDestroyWindow( winName.c_str() );  
        return 0;  
    }

    lazy snapping

    lszySnapping.cpp

    LazySnapping.cpp
     
    #include "stdafx.h"  
    #include <cv.h>  
    #include <highgui.h>  
    #include "graph.h"  
    #include <vector>  
    #include <iostream>  
    #include <cmath>  
    #include <string>  
      
    using namespace std;  
      
    typedef Graph<float,float,float> GraphType;  
      
    class LasySnapping  
    {  
          
    public :  
        LasySnapping();  
      
        ~LasySnapping()  
        {   
            if(graph)  
            {  
                delete graph;  
            }  
        };  
    private :  
        vector<CvPoint> forePts;  
        vector<CvPoint> backPts;  
        IplImage* image;  
        // average color of foreground points  
        unsigned char avgForeColor[3];  
        // average color of background points  
        unsigned char avgBackColor[3];  
    public :  
        void setImage(IplImage* image)  
        {  
            this->image = image;  
            graph = new GraphType(image->width*image->height,image->width*image->height*2);  
        }  
        // include-pen locus  
        void setForegroundPoints(vector<CvPoint> pts)  
        {  
            forePts.clear();  
            for(int i =0; i< pts.size(); i++)  
            {  
                if(!isPtInVector(pts[i],forePts))  
                {  
                    forePts.push_back(pts[i]);  
                }  
            }  
            if(forePts.size() == 0)  
            {  
                return;  
            }  
            int sum[3] = {0};  
            for(int i =0; i < forePts.size(); i++)  
            {  
                unsigned char* p = (unsigned char*)image->imageData + forePts[i].x * 3   
                    + forePts[i].y*image->widthStep;  
                sum[0] += p[0];  
                sum[1] += p[1];  
                sum[2] += p[2];              
            }  
            cout<<sum[0]<<" " <<forePts.size()<<endl;  
            avgForeColor[0] = sum[0]/forePts.size();  
            avgForeColor[1] = sum[1]/forePts.size();  
            avgForeColor[2] = sum[2]/forePts.size();  
        }  
        // exclude-pen locus  
        void setBackgroundPoints(vector<CvPoint> pts)  
        {  
            backPts.clear();  
            for(int i =0; i< pts.size(); i++)  
            {  
                if(!isPtInVector(pts[i],backPts))  
                {  
                    backPts.push_back(pts[i]);  
                }  
            }  
            if(backPts.size() == 0)  
            {  
                return;  
            }  
            int sum[3] = {0};  
            for(int i =0; i < backPts.size(); i++)  
            {  
                unsigned char* p = (unsigned char*)image->imageData + backPts[i].x * 3 +   
                    backPts[i].y*image->widthStep;  
                sum[0] += p[0];  
                sum[1] += p[1];  
                sum[2] += p[2];              
            }  
            avgBackColor[0] = sum[0]/backPts.size();  
            avgBackColor[1] = sum[1]/backPts.size();  
            avgBackColor[2] = sum[2]/backPts.size();  
        }  
      
        // return maxflow of graph  
        int runMaxflow();  
        // get result, a grayscale mast image indicating forground by 255 and background by 0  
        IplImage* getImageMask();  
      
    private :  
      
        float colorDistance(unsigned char* color1, unsigned char* color2);  
        float minDistance(unsigned char* color, vector<CvPoint> points);  
        bool isPtInVector(CvPoint pt, vector<CvPoint> points);  
        void getE1(unsigned char* color,float* energy);  
        float getE2(unsigned char* color1,unsigned char* color2);  
          
        GraphType *graph;      
    };  
      
    LasySnapping::LasySnapping()  
    {  
        graph = NULL;  
        avgForeColor[0] = 0;  
        avgForeColor[1] = 0;  
        avgForeColor[2] = 0;  
      
        avgBackColor[0] = 0;  
        avgBackColor[1] = 0;  
        avgBackColor[2] = 0;  
    }  
     
    float LasySnapping::colorDistance(unsigned char* color1, unsigned char* color2)  
    {  
          
        return sqrt(((float)color1[0]-(float)color2[0])*((float)color1[0]-(float)color2[0])+  
            ((float)color1[1]-(float)color2[1])*((float)color1[1]-(float)color2[1])+  
            ((float)color1[2]-(float)color2[2])*((float)color1[2]-(float)color2[2]));      
    }  
      
    float LasySnapping::minDistance(unsigned char* color, vector<CvPoint> points)  
    {  
        float distance = -1;  
        for(int i =0 ; i < points.size(); i++)  
        {  
            unsigned char* p = (unsigned char*)image->imageData + points[i].y * image-
    
    >widthStep +   
                points[i].x * image->nChannels;  
            float d = colorDistance(p,color);  
            if(distance < 0 )  
            {  
                distance = d;  
            }  
            else  
            {  
                if(distance > d)  
                {  
                    distance = d;  
                }  
            }  
        }  
      
        return distance;  
    }  
      
    bool LasySnapping::isPtInVector(CvPoint pt, vector<CvPoint> points)  
    {  
        for(int i =0 ; i < points.size(); i++)  
        {  
            if(pt.x == points[i].x && pt.y == points[i].y)  
            {  
                return true;  
            }  
        }  
        return false;  
    }  
    void LasySnapping::getE1(unsigned char* color,float* energy)  
    {  
        // average distance  
        float df = colorDistance(color,avgForeColor);  
        float db = colorDistance(color,avgBackColor);  
        // min distance from background points and forground points  
        // float df = minDistance(color,forePts);  
        // float db = minDistance(color,backPts);  
        energy[0] = df/(db+df);  
        energy[1] = db/(db+df);  
    }  
      
    float LasySnapping::getE2(unsigned char* color1,unsigned char* color2)  
    {  
        const float EPSILON = 0.01;  
        float lambda = 100;  
        return lambda/(EPSILON+  
            (color1[0]-color2[0])*(color1[0]-color2[0])+  
            (color1[1]-color2[1])*(color1[1]-color2[1])+  
            (color1[2]-color2[2])*(color1[2]-color2[2]));  
    }  
      
    int LasySnapping::runMaxflow()  
    {     
        const float INFINNITE_MAX = 1e10;  
        int indexPt = 0;  
        for(int h = 0; h < image->height; h ++)  
        {  
            unsigned char* p = (unsigned char*)image->imageData + h *image->widthStep;  
            for(int w = 0; w < image->width; w ++)  
            {  
                // calculate energe E1  
                float e1[2]={0};  
                if(isPtInVector(cvPoint(w,h),forePts))  
                {  
                    e1[0] =0;  
                    e1[1] = INFINNITE_MAX;  
                }  
                else if  
                    (isPtInVector(cvPoint(w,h),backPts))  
                {  
                    e1[0] = INFINNITE_MAX;  
                    e1[1] = 0;  
                }  
                else   
                {  
                    getE1(p,e1);  
                }  
      
                // add node  
                graph->add_node();  
                graph->add_tweights(indexPt, e1[0],e1[1]);  
      
                // add edge, 4-connect  
                if(h > 0 && w > 0)  
                {  
                    float e2 = getE2(p,p-3);  
                    graph->add_edge(indexPt,indexPt-1,e2,e2);  
                    e2 = getE2(p,p-image->widthStep);  
                    graph->add_edge(indexPt,indexPt-image->width,e2,e2);  
                }  
                  
                p+= 3;  
                indexPt ++;              
            }  
        }  
          
        return graph->maxflow();  
    }  
      
    IplImage* LasySnapping::getImageMask()  
    {  
        IplImage* gray = cvCreateImage(cvGetSize(image),8,1);   
        int indexPt =0;  
        for(int h =0; h < image->height; h++)  
        {  
            unsigned char* p = (unsigned char*)gray->imageData + h*gray->widthStep;  
            for(int w =0 ;w <image->width; w++)  
            {  
                if (graph->what_segment(indexPt) == GraphType::SOURCE)  
                {  
                    *p = 0;  
                }  
                else  
                {  
                    *p = 255;  
                }  
      
                p++;  
                indexPt ++;  
            }  
        }  
        return gray;  
    }  
      
    // global  
    vector<CvPoint> forePts;  
    vector<CvPoint> backPts;  
    int currentMode = 0;// indicate foreground or background, foreground as default  
    CvScalar paintColor[2] = {CV_RGB(0,0,255),CV_RGB(255,0,0)};  
      
    IplImage* image = NULL;  
    char* winName = "lazySnapping";  
    IplImage* imageDraw = NULL;  
    const int SCALE = 4;  
      
    void on_mouse( int event, int x, int y, int flags, void* )  
    {      
        if( event == CV_EVENT_LBUTTONUP )  
        {  
            if(backPts.size() == 0 && forePts.size() == 0)  
            {  
                return;  
            }  
            LasySnapping ls;  
            IplImage* imageLS = cvCreateImage(cvSize(image->width/SCALE,image->height/SCALE),  
                8,3);  
            cvResize(image,imageLS);  
            ls.setImage(imageLS);  
            ls.setBackgroundPoints(backPts);  
            ls.setForegroundPoints(forePts);  
            ls.runMaxflow();  
            IplImage* mask = ls.getImageMask();  
            IplImage* gray = cvCreateImage(cvGetSize(image),8,1);  
            cvResize(mask,gray);  
            // edge  
            cvCanny(gray,gray,50,150,3);  
              
            IplImage* showImg = cvCloneImage(imageDraw);  
            for(int h =0; h < image->height; h ++)  
            {  
                unsigned char* pgray = (unsigned char*)gray->imageData + gray->widthStep*h;  
                unsigned char* pimage = (unsigned char*)showImg->imageData + showImg-
    
    >widthStep*h;  
                for(int width  =0; width < image->width; width++)  
                {  
                    if(*pgray++ != 0 )  
                    {  
                        pimage[0] = 0;  
                        pimage[1] = 255;  
                        pimage[2] = 0;  
                    }  
                    pimage+=3;                  
                }  
            }  
            cvSaveImage("t.bmp",showImg);  
            cvShowImage(winName,showImg);  
            cvReleaseImage(&imageLS);  
            cvReleaseImage(&mask);  
            cvReleaseImage(&showImg);  
            cvReleaseImage(&gray);  
        }  
        else if( event == CV_EVENT_LBUTTONDOWN )  
        {  
      
        }  
        else if( event == CV_EVENT_MOUSEMOVE && (flags & CV_EVENT_FLAG_LBUTTON))  
        {  
            CvPoint pt = cvPoint(x,y);  
            if(currentMode == 0)  
            {//foreground  
                forePts.push_back(cvPoint(x/SCALE,y/SCALE));  
            }  
            else  
            {//background  
                backPts.push_back(cvPoint(x/SCALE,y/SCALE));  
            }  
            cvCircle(imageDraw,pt,2,paintColor[currentMode]);  
            cvShowImage(winName,imageDraw);  
        }  
    }  
    int main(int argc, char** argv)  
    {     
        //if(argc != 2)  
        //{  
         //   cout<<"command : lazysnapping inputImage"<<endl;  
         //   return 0;  
       // }  
      
        string image_name;  
        cout<<"input image name: "<<endl;  
        cin>>image_name;  
      
        cvNamedWindow(winName,1);  
        cvSetMouseCallback( winName, on_mouse, 0);  
          
        image = cvLoadImage(image_name.c_str(),CV_LOAD_IMAGE_COLOR);  
        imageDraw = cvCloneImage(image);  
        cvShowImage(winName, image);  
        for(;;)  
        {  
            int c = cvWaitKey(0);  
            c = (char)c;  
            if(c == 27)  
            {//exit  
                break;  
            }  
            else if(c == 'r')  
            {//reset  
                image = cvLoadImage(image_name.c_str(),CV_LOAD_IMAGE_COLOR);  
                imageDraw = cvCloneImage(image);  
                forePts.clear();  
                backPts.clear();  
                currentMode = 0;  
                cvShowImage(winName, image);  
            }  
            else if(c == 'b')  
            {//change to background selection  
                currentMode = 1;  
            }else if(c == 'f')  
            {//change to foreground selection  
                currentMode = 0;  
            }  
        }  
        cvReleaseImage(&image);  
        cvReleaseImage(&imageDraw);  
        return 0;  
    }

    由汉字生成图片

    AddChinese.cpp

    #include "stdafx.h"    
      
    #include <opencv2/core/core.hpp>    
    #include <opencv2/highgui/highgui.hpp>  
    #include "CvxText.h"  
      
    #pragma comment(lib,"freetype255d.lib")  
    #pragma comment(lib,"opencv_core2410d.lib")                  
    #pragma comment(lib,"opencv_highgui2410d.lib")                  
    #pragma comment(lib,"opencv_imgproc2410d.lib")     
      
    using namespace std;  
    using namespace cv;  
      
    #define ROW_BLOCK 2  
    #define COLUMN_Block 2  
      
    writePng.cpp : 定义控制台应用程序的入口点。  
    int run_test_png(Mat &mat,string image_name)  
    {  
        /*采用自己设置的参数来保存图片*/  
        //Mat mat(480, 640, CV_8UC4);  
        //createAlphaMat(mat);  
        vector<int> compression_params;  
        compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION);  
        compression_params.push_back(9);    //png格式下,默认的参数为3.  
        try   
        {  
            imwrite(image_name, mat, compression_params);  
        }  
        catch (runtime_error& ex)   
        {  
            fprintf(stderr, "Exception converting image to PNG format: %s\n", ex.what());  
            return 1;  
        }  
        fprintf(stdout, "Saved PNG file with alpha data.\n");  
      
        waitKey(0);  
        return 0;  
    }  
      
    int coloured(Mat &template_src, Mat &mat_png, CvScalar color)  
    {  
      
        for (int i = 0; i < template_src.rows; ++i)   
        {  
            for (int j = 0; j < template_src.cols; ++j)   
            {  
                Vec4b& bgra = mat_png.at<Vec4b>(i, j);  
                //int temp = template_src.at<uchar>(i,j);  
                if (template_src.at<uchar>(i,j)== 0)  
                {  
                    bgra[0] = color.val[0];    //b通道  
                    bgra[1] = color.val[1];     //g通道  
                    bgra[2] = color.val[2];     //r通道  
                    bgra[3] = 255;//alpha通道全部设置为透明完全透明为0,否则为255  
                }  
                else  
                {  
                    bgra[3] = 0;//alpha通道全部设置为透明完全透明为0,否则为255  
                }  
                  
                  
                  
            }  
        }  
      
        return 0;  
    }  
      
    void ImageBinarization(IplImage *src)  
    {   /*对灰度图像二值化,自适应门限threshold*/  
        int i,j,width,height,step,chanel,threshold;  
        /*size是图像尺寸,svg是灰度直方图均值,va是方差*/  
        float size,avg,va,maxVa,p,a,s;  
        unsigned char *dataSrc;  
        float histogram[256];  
      
        width = src->width;  
        height = src->height;  
        dataSrc = (unsigned char *)src->imageData;  
        step = src->widthStep/sizeof(char);  
        chanel = src->nChannels;  
        /*计算直方图并归一化histogram*/  
        for(i=0; i<256; i++)  
            histogram[i] = 0;  
        for(i=0; i<height; i++)  
            for(j=0; j<width*chanel; j++)  
            {  
                histogram[dataSrc[i*step+j]-'0'+48]++;  
            }  
            size = width * height;  
            for(i=0; i<256; i++)  
                histogram[i] /=size;  
            /*计算灰度直方图中值和方差*/  
            avg = 0;  
            for(i=0; i<256; i++)  
                avg += i*histogram[i];  
            va = 0;  
            for(i=0; i<256; i++)  
                va += fabs(i*i*histogram[i]-avg*avg);  
            /*利用加权最大方差求门限*/  
            threshold = 20;  
            maxVa = 0;  
            p = a = s = 0;  
            for(i=0; i<256; i++)  
            {  
                p += histogram[i];  
                a += i*histogram[i];  
                s = (avg*p-a)*(avg*p-a)/p/(1-p);  
                if(s > maxVa)  
                {  
                    threshold = i;  
                    maxVa = s;  
                }  
            }  
            /*二值化*/  
            for(i=0; i<height; i++)  
                for(j=0; j<width*chanel; j++)  
                {  
                    if(dataSrc[i*step+j] > threshold)  
                        dataSrc[i*step+j] = 255;  
                    else  
                        dataSrc[i*step+j] = 0;  
                }  
    }  
      
    Mat binaryzation(Mat &src)  
    {  
        Mat des_gray(src.size(),CV_8UC1);  
      
        cvtColor(src,des_gray,CV_BGR2GRAY);  
          
        //Mat bin_mat();  
        IplImage temp(des_gray);  
        ImageBinarization(&temp);  
      
      
        //threshold(des_gray,des_gray,150,255,THRESH_BINARY);  
        imshow("二值图像",des_gray);  
        return des_gray;  
    }  
      
    int generate_chinese(const int size_zi, const char *msg ,int number,CvScalar color)  
    {  
        //int size_zi = 50;//字体大小  
        CvSize czSize;  //目标图像尺寸  
        float p = 0.5;  
        CvScalar fsize;  
      
      
        //读取TTF字体文件  
        CvxText text("simhei.ttf");       
      
        //设置字体属性 字体大小/空白比例/间隔比例/旋转角度  
        fsize = cvScalar(size_zi, 1, 0.1, 0);  
        text.setFont(NULL, &fsize, NULL, &p);        
      
        czSize.width = size_zi*number;  
        czSize.height = size_zi;  
        //加载原图像  
        IplImage* ImageSrc = cvCreateImage(czSize,IPL_DEPTH_8U,3);//cvLoadImage(Imagename, 
    
    CV_LOAD_IMAGE_UNCHANGED);  
        //Mat image(ImageSrc);  
        //createAlphaMat(image);  
        //ImageSrc = ℑ  
      
        //IplImage temp(image);   
        //ImageSrc = &temp;  
      
        //设置原图像文字  
        text.putText(ImageSrc, msg, cvPoint(1, size_zi), color);   
      
        //显示原图像  
        cvShowImage("原图", ImageSrc);  
      
      
        string hanzi = msg;  
        hanzi = hanzi + ".png";  
      
        Mat chinese(ImageSrc,true);  
        Mat gray = binaryzation(chinese);  
      
        imwrite("chinese_gray.jpg",gray);  
      
        Mat mat_png(chinese.size(),CV_8UC4);  
        coloured(gray,mat_png,color);  
        run_test_png(mat_png,hanzi);  
        //  
        ////cvSaveImage("hanzi.jpg",reDstImage);  
        //run_test_png(chinese,hanzi);  
        //等待按键事件  
        cvWaitKey();  
        return 0;  
    }  
      
    int main()  
    {  
        CvScalar color = CV_RGB(0,0,0);  
        int size = 200;  
        const char* msg = "你好a";//暂时一行字不要太长  
      
        int number = 3;//字符个数  
      
        generate_chinese(size,msg,number,color);  
          
      
        return 0;  
    }


    展开全文
  • OpenCV图像处理编程实例》以OpenCV开源库为基础实现图像处理领域的很多通用算法,并结合当今图像处理领域前沿技术,对多个典型工程实例进行讲解及实现。全书内容覆盖面广,由基础到进阶,各个技术点均提供详细的...
  • OpenCV图像处理编程实例》以OpenCV开源库为基础实现图像处理领域的很多通用算法,并结合当今图像处理领域前沿技术,对多个典型工程实例进行讲解及实现。全书内容覆盖面广,由基础到进阶,各个技术点均提供详细的...
  • OpenCV图像处理编程实例》-源码,最新更新20160801,支持OpenCV3.1+VS2015,修改若干程序中错误,如遇问题欢迎反馈到zhu1988wei@163.com
  • OpenCV图像处理编程实例》以OpenCV开源库为基础实现图像处理领域的很多通用算法,并结合当今图像处理领域前沿技术,对多个典型工程实例进行讲解及实现。全书内容覆盖面广,由基础到进阶,各个技术点均提供详细的...
  • 高清版的《opencv图像处理编程实例》,作者:朱伟,开发环境vs2015+opencv3.1,里面有详细的安装教程
  • 详细描述了OpenCV图像处理编程案例,代码及书籍。工程可以运行。
  • 说明:1、 OpenCV图像处理编程实例所有代码已更新支持版本 OpenCV3.1.0+vs2015;2、修复了若干cpp中的错误及BUG;3、如若对代码存有疑问或发现其中的错误,敬请批评指正,谢谢,联系邮箱:zw301289@163.com;4、为...

    说明:

    1、 OpenCV图像处理编程实例所有代码已更新支持版本 OpenCV3.1.0+vs2015;

    2、修复了若干cpp中的错误及BUG;

    3、如若对代码存有疑问或发现其中的错误,敬请批评指正,谢谢,联系邮箱:zhu1988wei@163.com;

    4、为方便读者进一步了解OpenCV相关知识,该博客将增加对相关内容的介绍;

    5、读者可到以下网址下载本书相关程序

         http://download.csdn.net/detail/zhuwei1988/9596692
    

    6、版权所有,引用或摘录本书内容或代码时,请联系作者,并注明引用于《OpenCV图像处理编程实例》,知识宝贵,重在分享!

    展开全文
  • OpenCV图像处理编程实例-朱伟等编著-PDF电子版-附书中源码
  • OpenCV图像处理编程实例》例程复现 随书代码下载:http://www.broadview.com.cn/28573 总结+遇到的issue解决: 第一章 初识OpenCV 1.VS2015安装OpenCV2.4.11 四步骤: 1)步骤一:下载Windows安装包OpenCV...

    《OpenCV图像处理编程实例》例程复现

    随书代码下载:http://www.broadview.com.cn/28573

     

    总结+遇到的issue解决:

    第一章 初识OpenCV

    1.VS2015安装OpenCV2.4.11

    四步骤:

    1)步骤一:下载Windows安装包OpenCV2.4.11并解压,本人为:D:\ProgramFiles\opencv2

    2)步骤二:配置OpenCV2.4.11的环境变量,本人为:D:\ProgramFiles\opencv2\opencv\build\x86\vc12\bin

    注意:此处实际此处与系统32位还是64位无关,而与在VS2015工程项目中配置的平台相关,所以,本人电脑64位系统,若添加D:\ProgramFiles\opencv2\opencv\build\x86\vc12\bin,在VS2015工程项目中配置的平台中选择x86,而若若添加D:\ProgramFiles\opencv2\opencv\build\x86\vc12\bin D:\ProgramFiles\opencv2\opencv\build\x64\vc12\bin在VS2015工程项目中配置的平台选择x86和x64均可但是步骤三配置VS工程目录要对应。

    3)步骤三:配置VS工程目录

    包括:新建空白项目Win32控制台应用程序,属性页面:配置:选择“Debug”平台选择“Win32”需要配置3处:VC++目录(包含目录、库目录)和链接器——“输入”(附加依赖项)

    选择“Release”平台选择“Win32”需要配置3处:同上。

    本人配置的为:配置:选择“Debug”平台选择“Win32”需要配置3处:VC++目录(包含目录:D:\ProgramFiles\opencv2\opencv\build\include、D:\ProgramFiles\opencv2\opencv\build\include\opencv、D:\ProgramFiles\opencv2\opencv\build\include\opencv2;库目录:D:\ProgramFiles\opencv2\opencv\build\x86\vc12\lib)和链接器——“输入”(附加依赖项:opencv_calib3d2411d.lib
    opencv_contrib2411d.lib
    opencv_core2411d.lib
    opencv_features2d2411d.lib
    opencv_flann2411d.lib
    opencv_gpu2411d.lib
    opencv_highgui2411d.lib
    opencv_imgproc2411d.lib
    opencv_legacy2411d.lib
    opencv_ml2411d.lib
    opencv_nonfree2411d.lib
    opencv_objdetect2411d.lib
    opencv_ocl2411d.lib
    opencv_photo2411d.lib
    opencv_stitching2411d.lib
    opencv_superres2411d.lib
    opencv_ts2411d.lib
    opencv_video2411d.lib
    opencv_videostab2411d.lib

    配置:选择“Release”平台选择“Win32”需要配置3处:VC++目录(包含目录:D:\ProgramFiles\opencv2\opencv\build\include、D:\ProgramFiles\opencv2\opencv\build\include\opencv、D:\ProgramFiles\opencv2\opencv\build\include\opencv2;库目录:D:\ProgramFiles\opencv2\opencv\build\x86\vc12\lib)和链接器——“输入”(附加依赖项:opencv_calib3d2411.lib
    opencv_contrib2411.lib
    opencv_core2411.lib
    opencv_features2d2411.lib
    opencv_flann2411.lib
    opencv_gpu2411.lib
    opencv_highgui2411.lib
    opencv_imgproc2411.lib
    opencv_legacy2411.lib
    opencv_ml2411.lib
    opencv_nonfree2411.lib
    opencv_objdetect2411.lib
    opencv_ocl2411.lib
    opencv_photo2411.lib
    opencv_stitching2411.lib
    opencv_superres2411.lib
    opencv_ts2411.lib
    opencv_video2411.lib
    opencv_videostab2411.lib

    注意:1)以本人配置的位列,若要配置64位系统库目录应为:D:\ProgramFiles\opencv2\opencv\build\x64\vc12\lib

       (2)若库目录配置为staticlib,这样开发出来的软件将包含opencv库,可以运行于没有安装配置OpenCV开发环境的系统中。如若配置,库目录应为:D:\ProgramFiles\opencv2\opencv\build\x64\vc12\staticlib

    4)步骤四:测试OpenCV开发环境是否正确配置。

    注意:此处的图像地址一定要正确,本人的是:D:\\Working\\opencvimg\\images\\flower.jpg

     

    2.Sublime下配置OpenCV

    1)Sublime下载及汉化:http://www.onlinedown.net/soft/68602.htm

     详细配置建议参考Lucida的博文:http://zh.lucida.me/blog/sublime-text-complete-guide/

     安装后要配置环境变量,本人的为:C:\Program Files (x86)\Sublime Text 3

    2)下载安装MinGW

    下载:https://sourceforge.net/projects/mingw/

     本人网络特慢,所以给出其他下载链接:https://jingyan.baidu.com/article/0320e2c11564ca1b87507b8f.html

     安装后要配置环境变量,本人的为:D:\ProgramFiles\MinGW-V5.1.6\MinGW\bin

    测试:测试是否安装及配置成功,windows + r 组合键调出命令提示符,然后输入cmd,然后输入gcc -v,没有报错的话,说明安装配置成功。

    3)下载安装OpenCV,制作MinGW版本库。

    Sublime下需要利用MinGW进行编译链接OpenCV2.4.x,因此需要配置响应的环境变量,而2.4.4配置为D:\ProgramFiles\opencv2。4.4\opencv\build\x86\mingw\bin但是2.4.11没有此文件夹

    说明:opencv2.4.4官网的后续OpenCV2.4.x软件开发包没有提供相关的MinGW版本库,若使用新版本,需要通过CMake来自己制作。

    具体方法为:Windows平台安装最新OpenCV-2.4.9,利用Eclipse、MinGW构建C++调用OpenCV开发环境——https://blog.csdn.net/yanzi1225627/article/details/26408579

    还要参看本人另一篇博客:OpenCV+Qt+CMake安装+十种踩坑

    这篇博客是QT的MinGW编译OpenCV,里边附带CMake的下载,安装及编译过程。

     

    CMake来自己制作MinGW库要到的issue汇总(1)OpenCV 2.4.11 Compile Issues in “operations.hpp”4101

    error: got 2 template parameters but 1 required

    解决:打开operations.hpp将4098-4105此函数注释

    参考:https://stackoverflow.com/questions/11777958/opencv-2-4-2-compile-issues-in-operations-hpp

    https://blog.csdn.net/jacke121/article/details/56038046

    (2)D:\ProgramFiles\opencv2\opencv\sources\modules\highgui\src\window_w32.cpp:1853: error: `BTNS_AUTOSIZE' was not declared in this scope
    D:\ProgramFiles\opencv2\opencv\sources\modules\highgui\src\window_w32.cpp:1853: error: `BTNS_BUTTON' was not declared in this scope

    解决:打开window_w32.cpp在1850-1851行加入:

               const int BTNS_AUTOSIZE = 0;
                const int BTNS_BUTTON = 0;

    (3)出现[100%] Built target opencv_annotation 表明编译好了

     安装后要配置环境变量,本人的为:D:\ProgramFiles\opencv2\opencv\build\x86\mingw\bin

     

    4)新建编译系统OpenCV

    打开Sublime界面,选择工具——>编译系统——>新编译系统——>新建my_opencv.sublime-build文件,输入代码:

    {
      "cmd": ["g++", "${file}", "-o",
       "${file_path}/${file_base_name}","-I",
       "D:/ProgramFiles/opencv2/opencv/build/include",
      "-I","D:/ProgramFiles/opencv2/opencv/build/include/opencv","-I",
      "D:/ProgramFiles/opencv2/opencv/build/include/opencv2","-L",
       "D:/ProgramFiles/opencv2/opencv/build/x86/mingw/lib",

      "-l", "opencv_core2411", "-l", "opencv_imgproc2411",
       "-l", "opencv_calib3d2411","-l", "opencv_video2411",
       "-l", "opencv_features2d2411", "-l", "opencv_ml2411", "-l",
       "opencv_highgui2411","-l", "opencv_objdetect2411", "-l",
       "opencv_legacy2411",  "-l", "opencv_flann2411"],

      "file_regex": "^(..[^:]*):([0-9]+):?([0-9]+)?:? (.*)$",
      "working_dir": "${file_path}",
      "selector": "source.c, source.c++, source.cpp",
      "encoding": "gbk",
      "variants":
      [
        {
          "name": "Run",
          "cmd" : ["${file_path}/${file_base_name}"]
        }
      ]
    }
    注意:上述代码的3到6行需要更改为自己的实际路径8到11行为编译连接需要用到的dll,在Opencv\build\x64\mingw\bin目录下可以看到这些dll,将它们按照文件夹中的实际名称更改到代码中,名称中的数字是对应的版本号,名称前缀可以用opencv,也可以用libopencv,注意不要遗漏,否则无法通过编译。完成之后,在工具-编译系统中选择刚才新建的my_opencv,之后就可以编译运行了。

    详细参看:将Sublime Text 3打造成OpenCV3.2的IDE

     

    如何编译运行:使用的时候,在选中了一个*.cpp文件,在键盘上输入**ctrl+b**(如果是第一次的话,会弹出下面的框,让你来选;否则就会需要输入ctrl+shift+b 这里关于输入法的必须选英文,否则会有表情包出来的。。)

    本人操作:工具——>用什么编译(ctrl+shift+b)

    第一个 my_opencv : 这个会编译当前文件。
    第二个 my_opencv-Run就是只运行编译后的文件,如果不编译,无法运行。

    详细参看:https://blog.csdn.net/a19990412/article/details/82658981

     

    问题:代码1-7运行总是报错 *.exe无法工作。

    说明:Sublime测试代码1-3没有问题,把1-7逐行注释测试,发现运行到cv::warpAffine(srcImage, rotateImg, rotateImage,srcImage.size());无法运行,可知其无法运行经过复杂运算的图像,可能是太小的缘故。

     

    第二章 图像及视频基本操作

    1.Mat基本操作

    1)convertTo函数?

    在使用Opencv中,常常会出现读取一个图片内容后要把图片内容的像素信息转为浮点并把当前的mat作为矩形进行矩阵计算,那么这里就有一个类型转换问你,在新的opencv中mat有一个函数可以用于类型的相互转换

    ocIMat:convertTo(oclMat& m,int rtype,double alpha=1,double beta=0)

    m:转为目标数据类型的矩阵;
    rtype:指定目标数据类型,或者是depth(通道数),如果rtype:是负值,那么目标矩阵的数据类型和源矩形的数据类型是一致的;
    alpha:基于尺度的变化值;
    beta:在尺度上的加和;

    举例:
    cv:Mat matTemp=cv:Mat:zeros(100,100,CV_32F);//得到一个浮点型的100*100的矩阵
    cv:Mat MatTemp2;
    matTemp.convertTo(MatTemp2,CV8U);//把矩阵matTemp转为unsing char类型的矩阵,注意在转换过程中有可能数值上会出现一些变化,这个要注意

    参看链接:http://www.360doc.com/content/16/1202/09/35269117_611224795.shtml

    2)blur函数?什么是锚点?

    锚点(即被平滑的那个点

    参看:openCV blur函数

    opencv之blur()函数

    3)

     

     

     

    2

     

    展开全文
  • OpenCV图像处理编程实例》(朱伟著)+源代码;《OpenCV图像处理编程实例》(朱伟著)+源代码
  • OpenCV图像处理编程实例-朱伟等编著-PDF电子版,来源于
  • OpenCV图像处理编程实例
  • 本书以 OpenCV 开源库为基础实现图像处理领域的很多通用算法,并结合当今图像处理领域前沿技术, 对多个典型工程实例进行讲解及实现。全书内容覆盖面广,由基础到进阶,各个技术点均提供详细的代码

    当当网购买地址:http://product.dangdang.com/23956649.html
    京东网购买地址:http://item.jd.com/11929148.html

    内容简介

    本书以 OpenCV 开源库为基础实现图像处理领域的很多通用算法,并结合当今图像处理领域前沿技术,
    对多个典型工程实例进行讲解及实现。全书内容覆盖面广,由基础到进阶,各个技术点均提供详细的代码
    实现,以帮助读者快速上手和深入学习。
    本书内容共三个部分,其中 1~2 章为基础篇, 3~6 章为进阶篇, 7~9 章为高级篇。第一部分基础篇
    主要介绍 OpenCV 开发基础的相关知识,让读者熟悉图像处理开发环境以及简单的图像处理操作;第二部
    分进阶篇主要介绍图像处理技术,包括灰度变换技术、平滑技术、边缘检测及形态学技术;第三部分高级
    篇主要介绍图像应用技术,包括图像分割技术、特征分析和复杂视频处理技术。进阶篇与高级篇的每章末
    节均提供了与本章内容相关的应用实例,意在让读者更好理解知识点,进而有效地进行图像处理开发。
    本书适合图像处理、计算机视觉及模式识别等相关领域学习和工作的人士阅读,也可作为其他相关领
    域研究工作者的参考资料。


    本书特色
    本书将理论与实际案例相结合,始终秉承“学以致用”的理念,提供多个颇
    具实用性和前沿性的实例,用详细的代码验证实现,通过大量的例子让读者边学
    边练,注重给予读者一定的启发和引导。本书的编写是站在一线开发人员的角度,
    用通俗易懂的语言详细解释了 OpenCV 的应用,更像一个 OpenCV 的工作人员在
    解说 OpenCV 的方方面面,严谨的逻辑结构和清晰的脉络为读者入门及深入了解
    和掌握 OpenCV 图像处理开发技术奠定了扎实的基础。

    本书目录

    Part I 基础篇 OpenCV开发基础
    第1章 初识OpenCV
    1.1 OpenCV初识
    1.1.1 OpenCV简介
    1.1.2 OpenCV组件及架构
    1.1.3 OpenCV资源
    1.2 VS2012安装OpenCV2.4.x
    1.3 VS2013安装OpenCV3.0
    1.4 Sublime下配置OpenCV
    1.5 小结
    第2章 图像及视频基本操作
    2.1 图像初级操作
    2.1.1 Mat类
    2.1.2 Mat基本操作
    2.1.3 Mat类型转换
    2.1.4 图像读取显示保存
    2.1.5 图像存储
    2.2 图像几何变换
    2.2.1 坐标映射
    2.2.2 平移
    2.2.3 缩放
    2.2.4 旋转
    2.2.5 仿射变换
    2.3 视频操作
    2.3.1 VideoCapture类
    2.3.2 视频写操作
    2.3.3 视频质量评价
    2.4 图像基础应用操作
    2.4.1 界面事件
    2.4.2 区域提取
    2.4.3 图像元素遍历――反色
    2.4.4 单窗口显示多幅图像
    2.4.5 图像颜色空间转换
    2.4.6 图像批量读取――规则
    2.4.7 图像批量读取――无规则
    2.5 小结
    Part II 进阶篇 图像处理技术
    第3章 进阶篇――图像灰度变换技术
    3.1 阈值化处理
    3.1.1 OTSU阈值化
    3.1.2 固定阈值化
    3.1.3 自适应阈值化
    3.1.4 双阈值化
    3.1.5 半阈值化
    3.2 直方图处理
    3.2.1 灰度直方图
    3.2.2 H-S直方图
    3.2.3 BGR直方图
    3.2.4 自定义直方图
    3.2.5 灰度直方图均衡
    3.2.6 彩色直方图均衡
    3.2.7 直方图变换――查找
    3.2.8 直方图变换――累计
    3.2.9 直方图匹配
    3.2.10 直方图对比
    3.2.11 直方图的反向投影
    3.3 距离变换
    3.3.1 距离
    3.3.2 邻接性
    3.3.3 区域
    3.3.4 距离变换――扫描
    3.3.5 距离变换――distanceTransform
    3.4 Gamma校正
    3.5 其他常见的灰度变换技术
    3.5.1 线性变换
    3.5.2 对数变换
    3.5.3 对比度拉伸
    3.5.4 灰度级分层
    3.5.5 灰度比特平面
    3.6 实例应用
    3.6.1 最大熵阈值分割
    3.6.2 投影峰谷查找
    3.7 小结
    第4章 进阶篇――图像平滑技术
    4.1 图像采样
    4.1.1 最近邻插值
    4.1.2 双线性插值
    4.1.3 插值操作性能对比
    4.1.4 图像金字塔
    4.2 傅里叶变换
    4.2.1 图像掩码操作
    4.2.2 离散傅里叶
    4.2.3 图像卷积
    4.3 图像噪声
    4.3.1 椒盐噪声
    4.3.2 高斯噪声
    4.4 空间平滑
    4.4.1 盒滤波
    4.4.2 均值滤波
    4.4.3 中值滤波
    4.4.4 高斯滤波
    4.4.5 双边滤波
    4.5 实例应用
    4.5.1 导向滤波
    4.5.2 图像污点修复
    4.5.3 旋转文本图像矫正
    4.6 小结
    第5章 进阶篇――边缘检测技术
    5.1 边缘检测基础
    5.1.1 边缘检测概念
    5.1.2 梯度算子
    5.1.3 一阶微分算子
    5.1.4 二阶微分算子
    5.1.5 图像差分运算
    5.1.6 非极大值抑制
    5.2 基本边缘检测算子――Sobel
    5.2.1 非极大值抑制Sobel检测
    5.2.2 图像直接卷积实现Sobel
    5.2.3 图像卷积下非极大值抑制Sobel
    5.2.4 Sobel库函数实现
    5.3 基本边缘检测算子――Laplace
    5.4 基本边缘检测算子――Roberts
    5.5 基本边缘检测算子――Prewitt
    5.6 改进边缘检测算子――Canny
    5.6.1 Canny算子
    5.6.2 Canny原理及实现
    5.6.3 Canny库函数实现
    5.7 改进边缘检测算子――Marr-Hildreth
    5.8 几何检测
    5.8.1 霍夫变换
    5.8.2 线检测技术
    5.8.3 LSD快速直线检测
    5.8.4 圆检测技术
    5.9 形状检测
    5.9.1 轮廓检测
    5.9.2 凸包检测
    5.9.3 轮廓边界框
    5.9.4 轮廓矩
    5.9.5 点多边形测试
    5.10 角点检测
    5.10.1 moravec角点
    5.10.2 harris角点
    5.10.3 Shi-Tomasi角点
    5.11 实例应用
    5.11.1 颜色圆检测
    5.11.2 车牌区域检测
    5.12 小结
    第6章 进阶篇――形态学技术
    6.1 腐蚀膨胀操作
    6.2 开闭运算操作
    6.3 形态学梯度
    6.4 形态学Top-Hat
    6.5 实例应用
    6.5.1 形态学滤波角点提取
    6.5.2 车牌目标提取
    6.6 小结
    Part III 高级篇 图像应用技术
    第7章 高级篇――图像分割技术
    7.1 分水岭分割
    7.1.1 分水岭的特征
    7.1.2 实现分水岭分割
    7.1.3 分水岭分割合并
    7.2 FloodFill分割
    7.3 均值漂移MeanShift
    7.4 图割Grabcut
    7.5 实例实例
    7.5.1 奇异区域检测
    7.5.2 肤色检测
    7.6 小结
    第8章 高级篇――特征分析
    8.1 尺度空间
    8.1.1 尺度与旋转不变性
    8.1.2 特征点尺度变换
    8.2 特征描述子
    8.2.1 SIFT特征
    8.2.2 SURF特征
    8.2.3 ORB特征
    8.3 方向梯度直方图HOG
    8.3.1 HOG原理
    8.3.2 HOG特征提取步骤
    8.3.3 HOGDescriptor特征描述类
    8.3.4 HOG特征描述实现
    8.4 局部二值模式LBP
    8.4.1 经典LBP
    8.4.2 圆形LBP
    8.5 Haar特征描述
    8.5.1 Haar原理
    8.5.2 Haar特征提取
    8.6 应用实例
    8.6.1 最近邻特征点目标提取
    8.6.2 最大极值稳定区域匹配MSER
    8.6.3 字符特征提取
    8.6.4 车牌字符SVM训练
    8.7 小结
    第9章 高级篇――复杂视频处理技术
    9.1 视频稳像技术
    9.2 图像拼接
    9.2.1 拼接原理及过程
    9.2.2 图像拼接实现
    9.3 高动态范围图像HDR
    9.3.1 HDR合成技术
    9.3.2 HDR合成原理
    9.3.3 OpenCV实现
    9.4 背景建模
    9.4.1 背景差分
    9.4.2 混合高斯背景建模
    9.4.3 混合高斯背景建模实现
    9.4.4 混合模型MOG2成员参数设定
    9.4.5 KNN模型背景建模实现
    9.4.6 GMG模型背景建模实现
    9.5 级联分类器――人脸检测
    9.5.1 级联分类器
    9.5.2 CascadeClassifier类
    9.6 应用实例
    9.6.1 运动目标提取
    9.6.2 TLD单目标跟踪
    9.6.3 人眼检测与跟踪
    9.7 小结
    附录A
    1――代码清单
    2――CMake编译OpenCV3.1源码
    3――OpenCV3.1 Extra扩展库
    参考文献

    展开全文
  • 适合初学者阅读的——OpenCV图像处理编程实例+源代码!
  • OpenCV图像处理编程实例》,电子工业出版社,2016.05
  • 资源名称:opencv图像处理编程实例资源截图: 资源太大,传百度网盘了,链接在附件中,有需要的同学自取。
  • OpenCV图像处理编程实例及源码,包含界面制作及代码编写,让你真正的学习opencv\
  • 网上很多此文档都是看起来很亮,字体颜色太浅,看起来不舒服 现在发的这个进行了字体加强,颜色的加深,
  • 运行环境: VS2015 + OpenCV3.1.0 需重新编译opencv_contrib,编译流程见本书附录2——CMake 编译 OpenCV3.1 源码; OpenCV图像处理编程实例-源码
1 2 3 4 5 ... 20
收藏数 13,974
精华内容 5,589
关键字:

opencv 图像处理编程