精华内容
下载资源
问答
  • java 判断两张图片相似度

    千次阅读 2017-05-27 14:23:49
    import java.awt.image.BufferedImage; import java.io.File;... * 比较两张图片相似度  * @author Guihua  *  */ public class BMPLoader {  // 改变成二进制码  public static Strin
    import java.awt.image.BufferedImage;
    import java.io.File;
    import javax.imageio.ImageIO;
    /**
     * 比较两张图片的相似度
     * @author Guihua
     *
     */
    public class BMPLoader {
      // 改变成二进制码
      public static String[][] getPX(String args) {
        int[] rgb = new int[3];
        File file = new File(args);
        BufferedImage bi = null;
        try {
          bi = ImageIO.read(file);
        } catch (Exception e) {
          e.printStackTrace();
        }
        int width = bi.getWidth();
        int height = bi.getHeight();
        int minx = bi.getMinX();
        int miny = bi.getMinY();
        String[][] list = new String[width][height];
        for (int i = minx; i < width; i++) {
          for (int j = miny; j < height; j++) {
            int pixel = bi.getRGB(i, j);
            rgb[0] = (pixel & 0xff0000) >> 16;
            rgb[1] = (pixel & 0xff00) >> 8;
            rgb[2] = (pixel & 0xff);
            list[i][j] = rgb[0] + "," + rgb[1] + "," + rgb[2];
          }
        }
        return list;
      }
      public static void compareImage(String imgPath1, String imgPath2){
        String[] images = {imgPath1, imgPath2};
        if (images.length == 0) {
          System.out.println("Usage >java BMPLoader ImageFile.bmp");
          System.exit(0);
        }
        // 分析图片相似度 begin
        String[][] list1 = getPX(images[0]);
        String[][] list2 = getPX(images[1]);
        int xiangsi = 0;
        int busi = 0;
        int i = 0, j = 0;
        for (String[] strings : list1) {
          if ((i + 1) == list1.length) {
            continue;
          }
          for (int m=0; m<strings.length; m++) {
            try {
              String[] value1 = list1[i][j].toString().split(",");
              String[] value2 = list2[i][j].toString().split(",");
              int k = 0;
              for (int n=0; n<value2.length; n++) {
                if (Math.abs(Integer.parseInt(value1[k]) - Integer.parseInt(value2[k])) < 5) {
                  xiangsi++;
                } else {
                  busi++;
                }
              }
            } catch (RuntimeException e) {
              continue;
            }
            j++;
          }
          i++;
        }
        list1 = getPX(images[1]);
        list2 = getPX(images[0]);
        i = 0;
        j = 0;
        for (String[] strings : list1) {
          if ((i + 1) == list1.length) {
            continue;
          }
          for (int m=0; m<strings.length; m++) {
            try {
              String[] value1 = list1[i][j].toString().split(",");
              String[] value2 = list2[i][j].toString().split(",");
              int k = 0;
              for (int n=0; n<value2.length; n++) {
                if (Math.abs(Integer.parseInt(value1[k]) - Integer.parseInt(value2[k])) < 5) {
                  xiangsi++;
                } else {
                  busi++;
                }
              }
            } catch (RuntimeException e) {
              continue;
            }
            j++;
          }
          i++;
        }
        String baifen = "";
        try {
          baifen = ((Double.parseDouble(xiangsi + "") / Double.parseDouble((busi + xiangsi) + "")) + "");
          baifen = baifen.substring(baifen.indexOf(".") + 1, baifen.indexOf(".") + 3);
        } catch (Exception e) {
          baifen = "0";
        }
        if (baifen.length() <= 0) {
          baifen = "0";
        }
        if(busi == 0){
          baifen="100";
        }
        System.out.println("相似像素数量:" + xiangsi + " 不相似像素数量:" + busi + " 相似率:" + Integer.parseInt(baifen) + "%");
      }
      public static void main(String[] args){
        BMPLoader.compareImage("E:\\12.bmp", "E:\\1.bmp");
      }
    }
    展开全文
  • 两张图片相似度比较

    千次阅读 2016-11-01 09:04:20
    package { import flash.display.BitmapData; import flash.geom.Matrix; public class HashClass { public function HashClass() { // constructor code ...public function compareBitmapData(bmpData1


    package  {
    	import flash.display.BitmapData;
    	import flash.geom.Matrix;
    	public class HashClass {
    
    		public function HashClass() {
    			// constructor code
    		}
    		public function compareBitmapData(bmpData1:BitmapData,bmpData2:BitmapData):Number{
    			var str1 =  process(bmpData1);
    			var str2 =  process(bmpData2);
    			var arr1:Array = str1.split("");
    			var arr2:Array = str2.split("");
     			var diffCount:int=0;
     			for(var i1:int=0; i1<64;i1++){ 
    	 				if(arr1[i1]==arr2[i1]){
    		 				diffCount=diffCount+1;
    	 				} 
     			} 
    
    			return diffCount/64; 
    		}
    		public static function process(bmpData:BitmapData):String{
    			//processing the image
    			//   trace("Start processing...\n");
    			//scaling and converting
    			var resizedData:BitmapData = reduceSize(bmpData,8,8);
    			//转换为灰度
    			var greyBmp:BitmapData = reduceColor(resizedData);
    			//计算灰度平均值
    			var avgGrey:uint = calcAvgGrey(greyBmp);
     			//trace("Average GreyScale:0x"+avgGrey.toString(16)+"\n");
    
    			//比较灰度值与平均值,建立哈希指纹
    			var hashArr:Array = calcAvgHash(resizedData, avgGrey);
    			//   trace("hashArr: ",hashArr.join(""));
    			return hashArr.join("");
    		}	
     
    		public static function reduceSize(source:BitmapData,width:Number = 8, height:Number=8):BitmapData{
    			var newData:BitmapData = new BitmapData(width,height);
    			var matrix:Matrix = new Matrix();
    			//缩小至 8x8
    			matrix.scale(newData.width/source.width, newData.height/source.height);
    			newData.draw(source,matrix);
     
    			return newData;
    		}
     
    		public static function reduceColor(source:BitmapData):BitmapData{
    			var result:BitmapData = new BitmapData(source.width,source.height);
    			for(var i:int = 0; i < source.height; i++){
    					for(var j:uint = 0; j < source.width; j++){
                            var color:uint = source.getPixel(i, j);
                            var red:uint = (color & 0xFF0000) >> 16;
    						var green:uint = (color & 0x00FF00) >> 8;
    						var blue:uint = (color & 0x0000FF) >> 0;
    						//trace(red+"+"+green+"+"+blue);
    						//var bwColor:uint = (red + green + blue) / 3;
    						var bwColor:uint = (red * 30 + green * 59 + blue * 11) / 100;
    						// puts the average in each channel
    						bwColor = (bwColor << 16) + (bwColor << 8) + bwColor; 
    						result.setPixel(i, j, bwColor);
        				}
      			}
      			return result;
    		}
                    /*
    		public static function calcAvgGrey(bmpData:BitmapData):uint{
    			var vecGrey:Vector.<uint> = bmpData.getVector(bmpData.rect);
    			var total:uint = 0;
    			var length:uint = vecGrey.length;
    			for(var i:int = 0; i< length;i++){
    					total += (vecGrey[i] & 0x00FFFFFF);
      			}
      			return uint(total/vecGrey.length);
    		}
     
    		//计算哈希
    		public static function calcAvgHash(bmpData:BitmapData, avgValue:uint):Array {
    			var vecGrey:Vector.<uint>  = bmpData.getVector(bmpData.rect);
    			var length:uint = vecGrey.length;
    			var hashArr:Array = [];
    			for(var i:int = 0; i< length;i++) {
    				//ARGB 32位数据,只取RGB
    				var pxColor:uint = vecGrey[i] & 0x00FFFFFF;	
    				//是否小于灰度均值,小于记0,否则记0
    				var value:uint =  pxColor > avgValue ? 0:1;
    				hashArr.push(value);
      			}
      			return hashArr;
    		}*/
    		//求平均灰度值
    		public static function calcAvgGrey(bmpData:BitmapData):uint{
    			var vecGrey:Vector.<uint> = bmpData.getVector(bmpData.rect);
    			var total:uint = 0;
    			var length:uint = vecGrey.length;
    			for(var i:int = 0; i< length;i++){ 
    				var color:uint = vecGrey[i];
    				var red:uint = (color & 0xFF0000) >> 16;
    				var green:uint = (color & 0x00FF00) >> 8;
    				var blue:uint = (color & 0x0000FF) >> 0; 
    				var bwColor:uint = (red * 30 + green * 59 + blue * 11) / 100;
    				total = total + bwColor;
      			}
      			return uint(total/vecGrey.length);
    		}
     
    		//计算哈希
    		public static function calcAvgHash(bmpData:BitmapData, avgValue:uint):Array {
    			var vecGrey:Vector.<uint>  = bmpData.getVector(bmpData.rect);
    			var length:uint = vecGrey.length;
    			var hashArr:Array = [];
    			for(var i:int = 0; i< length;i++) {
    				var color:uint = vecGrey[i];
    				var red:uint = (color & 0xFF0000) >> 16;
    				var green:uint = (color & 0x00FF00) >> 8;
    				var blue:uint = (color & 0x0000FF) >> 0; 
    				var bwColor:uint = (red * 30 + green * 59 + blue * 11) / 100;
    				//ARGB 32位数据,只取RGB
    				var pxColor:uint = bwColor;	
    				//是否小于灰度均值,小于记0,否则记0  减2可以提高检测的容错性
    				var value:uint =  pxColor-2 > avgValue ? 0:1;
    				//trace(pxColor + "----" +avgValue);
    				hashArr.push(value);
      			}
      			return hashArr;
    		}
    
    	}
    	
    }
    

    展开全文
  • opencv 判断两张图片相似度

    千次阅读 2017-05-17 10:29:29
    Goal¶ Today it is common to have a digital video recording system at your disposal. Therefore, you will eventually come to the situation that you no longer process a batch of images, but video ...

    Goal

    Today it is common to have a digital video recording system at your disposal. Therefore, you will eventually come to the situation that you no longer process a batch of images, but video streams. These may be of two kinds: real-time image feed (in the case of a webcam) or prerecorded and hard disk drive stored files. Luckily OpenCV threats these two in the same manner, with the same C++ class. So here’s what you’ll learn in this tutorial:

    • How to open and read video streams
    • Two ways for checking image similarity: PSNR and SSIM

    The source code

    As a test case where to show off these using OpenCV I’ve created a small program that reads in two video files and performs a similarity check between them. This is something you could use to check just how well a new video compressing algorithms works. Let there be a reference (original) video like this small Megamind clip and a compressed version of it. You may also find the source code and these video file in the samples/cpp/tutorial_code/HighGUI/video-input-psnr-ssim/ folder of the OpenCV source library.

      1
      2
      3
      4
      5
      6
      7
      8
      9
     10
     11
     12
     13
     14
     15
     16
     17
     18
     19
     20
     21
     22
     23
     24
     25
     26
     27
     28
     29
     30
     31
     32
     33
     34
     35
     36
     37
     38
     39
     40
     41
     42
     43
     44
     45
     46
     47
     48
     49
     50
     51
     52
     53
     54
     55
     56
     57
     58
     59
     60
     61
     62
     63
     64
     65
     66
     67
     68
     69
     70
     71
     72
     73
     74
     75
     76
     77
     78
     79
     80
     81
     82
     83
     84
     85
     86
     87
     88
     89
     90
     91
     92
     93
     94
     95
     96
     97
     98
     99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    184
    185
    186
    187
    188
    189
    190
    191
    #include <iostream> // for standard I/O
    #include <string>   // for strings
    #include <iomanip>  // for controlling float print precision
    #include <sstream>  // string to number conversion
    
    #include <opencv2/core/core.hpp>        // Basic OpenCV structures (cv::Mat, Scalar)
    #include <opencv2/imgproc/imgproc.hpp>  // Gaussian Blur
    #include <opencv2/highgui/highgui.hpp>  // OpenCV window I/O
    
    using namespace std;
    using namespace cv;
    
    double getPSNR ( const Mat& I1, const Mat& I2);
    Scalar getMSSIM( const Mat& I1, const Mat& I2);
    
    int main(int argc, char *argv[])
        help();
    
        if (argc != 5)
        {
            cout << "Not enough parameters" << endl;
            return -1;
        }
    
        stringstream conv;
    
        const string sourceReference = argv[1], sourceCompareWith = argv[2];
        int psnrTriggerValue, delay;
        conv << argv[3] << endl << argv[4];       // put in the strings
        conv >> psnrTriggerValue >> delay;        // take out the numbers
    
        char c;
        int frameNum = -1;          // Frame counter
    
        VideoCapture captRefrnc(sourceReference), captUndTst(sourceCompareWith);
    
        if (!captRefrnc.isOpened())
        {
            cout  << "Could not open reference " << sourceReference << endl;
            return -1;
        }
    
        if (!captUndTst.isOpened())
        {
            cout  << "Could not open case test " << sourceCompareWith << endl;
            return -1;
        }
    
        Size refS = Size((int) captRefrnc.get(CV_CAP_PROP_FRAME_WIDTH),
                         (int) captRefrnc.get(CV_CAP_PROP_FRAME_HEIGHT)),
             uTSi = Size((int) captUndTst.get(CV_CAP_PROP_FRAME_WIDTH),
                         (int) captUndTst.get(CV_CAP_PROP_FRAME_HEIGHT));
    
        if (refS != uTSi)
        {
            cout << "Inputs have different size!!! Closing." << endl;
            return -1;
        }
    
        const char* WIN_UT = "Under Test";
        const char* WIN_RF = "Reference";
    
        // Windows
        namedWindow(WIN_RF, CV_WINDOW_AUTOSIZE);
        namedWindow(WIN_UT, CV_WINDOW_AUTOSIZE);
        cvMoveWindow(WIN_RF, 400       , 0);         //750,  2 (bernat =0)
        cvMoveWindow(WIN_UT, refS.width, 0);         //1500, 2
    
        cout << "Reference frame resolution: Width=" << refS.width << "  Height=" << refS.height
             << " of nr#: " << captRefrnc.get(CV_CAP_PROP_FRAME_COUNT) << endl;
    
        cout << "PSNR trigger value " << setiosflags(ios::fixed) << setprecision(3)
             << psnrTriggerValue << endl;
    
        Mat frameReference, frameUnderTest;
        double psnrV;
        Scalar mssimV;
    
        for(;;) //Show the image captured in the window and repeat
        {
            captRefrnc >> frameReference;
            captUndTst >> frameUnderTest;
    
            if (frameReference.empty() || frameUnderTest.empty())
            {
                cout << " < < <  Game over!  > > > ";
                break;
            }
    
            ++frameNum;
            cout << "Frame: " << frameNum << "# ";
    
            / PSNR 
            psnrV = getPSNR(frameReference,frameUnderTest);
            cout << setiosflags(ios::fixed) << setprecision(3) << psnrV << "dB";
    
             MSSIM /
            if (psnrV < psnrTriggerValue && psnrV)
            {
                mssimV = getMSSIM(frameReference, frameUnderTest);
    
                cout << " MSSIM: "
                    << " R " << setiosflags(ios::fixed) << setprecision(2) << mssimV.val[2] * 100 << "%"
                    << " G " << setiosflags(ios::fixed) << setprecision(2) << mssimV.val[1] * 100 << "%"
                    << " B " << setiosflags(ios::fixed) << setprecision(2) << mssimV.val[0] * 100 << "%";
            }
    
            cout << endl;
    
            // Show Image /
            imshow(WIN_RF, frameReference);
            imshow(WIN_UT, frameUnderTest);
    
            c = (char)cvWaitKey(delay);
            if (c == 27) break;
        }
    
        return 0;
    }
    
    double getPSNR(const Mat& I1, const Mat& I2)
    {
        Mat s1;
        absdiff(I1, I2, s1);       // |I1 - I2|
        s1.convertTo(s1, CV_32F);  // cannot make a square on 8 bits
        s1 = s1.mul(s1);           // |I1 - I2|^2
    
        Scalar s = sum(s1);        // sum elements per channel
    
        double sse = s.val[0] + s.val[1] + s.val[2]; // sum channels
    
        if( sse <= 1e-10) // for small values return zero
            return 0;
        else
        {
            double mse  = sse / (double)(I1.channels() * I1.total());
            double psnr = 10.0 * log10((255 * 255) / mse);
            return psnr;
        }
    }
    
    Scalar getMSSIM( const Mat& i1, const Mat& i2)
    {
        const double C1 = 6.5025, C2 = 58.5225;
        /***************************** INITS **********************************/
        int d = CV_32F;
    
        Mat I1, I2;
        i1.convertTo(I1, d);            // cannot calculate on one byte large values
        i2.convertTo(I2, d);
    
        Mat I2_2   = I2.mul(I2);        // I2^2
        Mat I1_2   = I1.mul(I1);        // I1^2
        Mat I1_I2  = I1.mul(I2);        // I1 * I2
    
        /*************************** END INITS **********************************/
    
        Mat mu1, mu2;                   // PRELIMINARY COMPUTING
        GaussianBlur(I1, mu1, Size(11, 11), 1.5);
        GaussianBlur(I2, mu2, Size(11, 11), 1.5);
    
        Mat mu1_2   =   mu1.mul(mu1);
        Mat mu2_2   =   mu2.mul(mu2);
        Mat mu1_mu2 =   mu1.mul(mu2);
    
        Mat sigma1_2, sigma2_2, sigma12;
    
        GaussianBlur(I1_2, sigma1_2, Size(11, 11), 1.5);
        sigma1_2 -= mu1_2;
    
        GaussianBlur(I2_2, sigma2_2, Size(11, 11), 1.5);
        sigma2_2 -= mu2_2;
    
        GaussianBlur(I1_I2, sigma12, Size(11, 11), 1.5);
        sigma12 -= mu1_mu2;
    
        / FORMULA 
        Mat t1, t2, t3;
    
        t1 = 2 * mu1_mu2 + C1;
        t2 = 2 * sigma12 + C2;
        t3 = t1.mul(t2);                 // t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
    
        t1 = mu1_2 + mu2_2 + C1;
        t2 = sigma1_2 + sigma2_2 + C2;
        t1 = t1.mul(t2);                 // t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))
    
        Mat ssim_map;
        divide(t3, t1, ssim_map);        // ssim_map =  t3./t1;
    
        Scalar mssim = mean(ssim_map);   // mssim = average of ssim map
    

    How to read a video stream (online-camera or offline-file)?

    Essentially, all the functionalities required for video manipulation is integrated in the VideoCapture C++ class. This on itself builds on the FFmpeg open source library. This is a basic dependency of OpenCV so you shouldn’t need to worry about this. A video is composed of a succession of images, we refer to these in the literature as frames. In case of a video file there is a frame ratespecifying just how long is between two frames. While for the video cameras usually there is a limit of just how many frames they can digitalize per second, this property is less important as at any time the camera sees the current snapshot of the world.

    The first task you need to do is to assign to a VideoCapture class its source. You can do this either via the constructor or its open function. If this argument is an integer then you will bind the class to a camera, a device. The number passed here is the ID of the device, assigned by the operating system. If you have a single camera attached to your system its ID will probably be zero and further ones increasing from there. If the parameter passed to these is a string it will refer to a video file, and the string points to the location and name of the file. For example, to the upper source code a valid command line is:

    video/Megamind.avi video/Megamind_bug.avi  35 10
    

    We do a similarity check. This requires a reference and a test case video file. The first two arguments refer to this. Here we use a relative address. This means that the application will look into its current working directory and open the video folder and try to find inside this the Megamind.avi and the Megamind_bug.avi.

    const string sourceReference = argv[1],sourceCompareWith = argv[2];
    
    VideoCapture captRefrnc(sourceReference);
    // or
    VideoCapture captUndTst;
    captUndTst.open(sourceCompareWith);
    

    To check if the binding of the class to a video source was successful or not use the isOpened function:

    if ( !captRefrnc.isOpened())
      {
      cout  << "Could not open reference " << sourceReference << endl;
      return -1;
      }
    

    Closing the video is automatic when the objects destructor is called. However, if you want to close it before this you need to call its release function. The frames of the video are just simple images. Therefore, we just need to extract them from the VideoCapture object and put them inside a Mat one. The video streams are sequential. You may get the frames one after another by the read or the overloaded >> operator:

    Mat frameReference, frameUnderTest;
    captRefrnc >> frameReference;
    captUndTst.open(frameUnderTest);
    

    The upper read operations will leave empty the Mat objects if no frame could be acquired (either cause the video stream was closed or you got to the end of the video file). We can check this with a simple if:

    if( frameReference.empty()  || frameUnderTest.empty())
    {
     // exit the program
    }
    

    A read method is made of a frame grab and a decoding applied on that. You may call explicitly these two by using the grab and then the retrieve functions.

    Videos have many-many information attached to them besides the content of the frames. These are usually numbers, however in some case it may be short character sequences (4 bytes or less). Due to this to acquire these information there is a general function named get that returns double values containing these properties. Use bitwise operations to decode the characters from a double type and conversions where valid values are only integers. Its single argument is the ID of the queried property. For example, here we get the size of the frames in the reference and test case video file; plus the number of frames inside the reference.

    Size refS = Size((int) captRefrnc.get(CV_CAP_PROP_FRAME_WIDTH),
                     (int) captRefrnc.get(CV_CAP_PROP_FRAME_HEIGHT)),
    
    cout << "Reference frame resolution: Width=" << refS.width << "  Height=" << refS.height
         << " of nr#: " << captRefrnc.get(CV_CAP_PROP_FRAME_COUNT) << endl;
    

    When you are working with videos you may often want to control these values yourself. To do this there is a set function. Its first argument remains the name of the property you want to change and there is a second of double type containing the value to be set. It will return true if it succeeds and false otherwise. Good examples for this is seeking in a video file to a given time or frame:

    captRefrnc.set(CV_CAP_PROP_POS_MSEC, 1.2);  // go to the 1.2 second in the video
    captRefrnc.set(CV_CAP_PROP_POS_FRAMES, 10); // go to the 10th frame of the video
    // now a read operation would read the frame at the set position
    

    For properties you can read and change look into the documentation of the get and set functions.

    Image similarity - PSNR and SSIM

    We want to check just how imperceptible our video converting operation went, therefore we need a system to check frame by frame the similarity or differences. The most common algorithm used for this is the PSNR (aka Peak signal-to-noise ratio). The simplest definition of this starts out from the mean squad error. Let there be two images: I1 and I2; with a two dimensional size i and j, composed of c number of channels.

    MSE = \frac{1}{c*i*j} \sum{(I_1-I_2)^2}

    Then the PSNR is expressed as:

    PSNR = 10 \cdot \log_{10} \left( \frac{MAX_I^2}{MSE} \right)

    Here the MAX_I^2 is the maximum valid value for a pixel. In case of the simple single byte image per pixel per channel this is 255. When two images are the same the MSE will give zero, resulting in an invalid divide by zero operation in the PSNR formula. In this case the PSNR is undefined and as we’ll need to handle this case separately. The transition to a logarithmic scale is made because the pixel values have a very wide dynamic range. All this translated to OpenCV and a C++ function looks like:

    double getPSNR(const Mat& I1, const Mat& I2)
    {
     Mat s1;
     absdiff(I1, I2, s1);       // |I1 - I2|
     s1.convertTo(s1, CV_32F);  // cannot make a square on 8 bits
     s1 = s1.mul(s1);           // |I1 - I2|^2
    
     Scalar s = sum(s1);         // sum elements per channel
    
     double sse = s.val[0] + s.val[1] + s.val[2]; // sum channels
    
     if( sse <= 1e-10) // for small values return zero
         return 0;
     else
     {
         double  mse =sse /(double)(I1.channels() * I1.total());
         double psnr = 10.0*log10((255*255)/mse);
         return psnr;
     }
    }
    

    Typically result values are anywhere between 30 and 50 for video compression, where higher is better. If the images significantly differ you’ll get much lower ones like 15 and so. This similarity check is easy and fast to calculate, however in practice it may turn out somewhat inconsistent with human eye perception. The structural similarity algorithm aims to correct this.

    Describing the methods goes well beyond the purpose of this tutorial. For that I invite you to read the article introducing it. Nevertheless, you can get a good image of it by looking at the OpenCV implementation below.

    See also

     

    SSIM is described more in-depth in the: “Z. Wang, A. C. Bovik, H. R. Sheikh and E. P. Simoncelli, “Image quality assessment: From error visibility to structural similarity,” IEEE Transactions on Image Processing, vol. 13, no. 4, pp. 600-612, Apr. 2004.” article.

    Scalar getMSSIM( const Mat& i1, const Mat& i2)
    {
     const double C1 = 6.5025, C2 = 58.5225;
     /***************************** INITS **********************************/
     int d     = CV_32F;
    
     Mat I1, I2;
     i1.convertTo(I1, d);           // cannot calculate on one byte large values
     i2.convertTo(I2, d);
    
     Mat I2_2   = I2.mul(I2);        // I2^2
     Mat I1_2   = I1.mul(I1);        // I1^2
     Mat I1_I2  = I1.mul(I2);        // I1 * I2
    
     /***********************PRELIMINARY COMPUTING ******************************/
    
     Mat mu1, mu2;   //
     GaussianBlur(I1, mu1, Size(11, 11), 1.5);
     GaussianBlur(I2, mu2, Size(11, 11), 1.5);
    
     Mat mu1_2   =   mu1.mul(mu1);
     Mat mu2_2   =   mu2.mul(mu2);
     Mat mu1_mu2 =   mu1.mul(mu2);
    
     Mat sigma1_2, sigma2_2, sigma12;
    
     GaussianBlur(I1_2, sigma1_2, Size(11, 11), 1.5);
     sigma1_2 -= mu1_2;
    
     GaussianBlur(I2_2, sigma2_2, Size(11, 11), 1.5);
     sigma2_2 -= mu2_2;
    
     GaussianBlur(I1_I2, sigma12, Size(11, 11), 1.5);
     sigma12 -= mu1_mu2;
    
     / FORMULA 
     Mat t1, t2, t3;
    
     t1 = 2 * mu1_mu2 + C1;
     t2 = 2 * sigma12 + C2;
     t3 = t1.mul(t2);              // t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
    
     t1 = mu1_2 + mu2_2 + C1;
     t2 = sigma1_2 + sigma2_2 + C2;
     t1 = t1.mul(t2);               // t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))
    
     Mat ssim_map;
     divide(t3, t1, ssim_map);      // ssim_map =  t3./t1;
    
     Scalar mssim = mean( ssim_map ); // mssim = average of ssim map
     return mssim;
    }
    

    This will return a similarity index for each channel of the image. This value is between zero and one, where one corresponds to perfect fit. Unfortunately, the many Gaussian blurring is quite costly, so while the PSNR may work in a real time like environment (24 frame per second) this will take significantly more than to accomplish similar performance results.

    Therefore, the source code presented at the start of the tutorial will perform the PSNR measurement for each frame, and the SSIM only for the frames where the PSNR falls below an input value. For visualization purpose we show both images in an OpenCV window and print the PSNR and MSSIM values to the console. Expect to see something like:

    A sample output

    展开全文
  • 判断两张图的相似度 方法 直方图对比法 ORB算法 实验 1.直方图对比法 参考如何使用OpenCV3直方图方法进行人脸相似度对比 因为我的环境是VS2010+OpenCV2.4.8,所以在原版的基础上做了一点小修改。 #include &...

    判断两张图的相似度

    方法

    1. 直方图对比法
    2. ORB算法

    实验

    1.直方图对比法

    参考如何使用OpenCV3直方图方法进行人脸相似度对比
    因为我的环境是VS2010+OpenCV2.4.8,所以在原版的基础上做了一点小修改。

    #include <opencv2/opencv.hpp>
    #include "opencv2/core/core.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include <iostream>
    #include <string>
    #include <stdlib.h>
    using namespace std;
    using namespace cv;
    //直方图比对
    bool compareFacesByHist(Mat img,Mat orgImg)
    {
    	Mat tmpImg;
    	resize(img, tmpImg, Size(orgImg.cols, orgImg.rows));
    	imshow("Img1", img);
    	imshow("tmpImg", tmpImg);
    	imshow("orgImg", orgImg);
    	//HSV颜色特征模型(色调H,饱和度S,亮度V)
    	cvtColor(tmpImg, tmpImg, COLOR_BGR2HSV);
    	cvtColor(orgImg, orgImg, COLOR_BGR2HSV);
    	//直方图尺寸设置
    	//一个灰度值可以设定一个bins,256个灰度值就可以设定256个bins
    	//对应HSV格式,构建二维直方图
    	//每个维度的直方图灰度值划分为256块进行统计,也可以使用其他值
    	int hBins = 256, sBins = 256;
    	int histSize[] = { hBins,sBins };
    	//H:0~180, S:0~255,V:0~255
    	//H色调取值范围
    	float hRanges[] = { 0,180 };
    	//S饱和度取值范围
    	float sRanges[] = { 0,255 };
    	const float* ranges[] = { hRanges,sRanges };
    	int channels[] = { 0,1 };//二维直方图
    	MatND hist1, hist2;
    	calcHist(&tmpImg, 1, channels, Mat(), hist1,2,histSize, ranges, true, false);
    	normalize(hist1, hist1, 0, 1, NORM_MINMAX, -1, Mat());
    	calcHist(&orgImg, 1, channels, Mat(), hist2, 2, histSize, ranges, true, false);
    	normalize(hist2, hist2, 0, 1, NORM_MINMAX, -1, Mat());
    	double similarityValue = compareHist(hist1, hist2, CV_COMP_CORREL);
    	cout << "相似度:" << similarityValue << endl;
    	if (similarityValue >= 0.85)
    	{
    		return true;
    	}
    	return false;
    }
    
    int main()
    {
    	Mat orgImg = imread("p34.png");
    	Mat img = imread("p45.png");
    	compareFacesByHist(img, orgImg);
    	waitKey(0);
    	return 0;
    }
    

    主要实现思路:
    1)从本地读取两张图像
    2)将需要对比的图像进行HSV格式转换
    3)构建图像的直方图模型,并进行直方图归一化
    4)比较两张图片的直方图模型,计算图片的直方图相似度
    5)判断相似度值,如果大于0.85左右我们可以认为两张图片比较相似的

    2. ORB算法

    参考OpenCV图像相似度ORB算法(图像特征比对)

    #include "stdafx.h"
    #include <iostream>
    #include<opencv2/opencv.hpp>
    #include "opencv2/core/core.hpp"
    #include "opencv2/features2d/features2d.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/nonfree/nonfree.hpp"
    #include "opencv2/nonfree/features2d.hpp"
    #include<opencv2/legacy/legacy.hpp>
    
    using namespace std;
    using namespace cv;
    
    int getORB(char * imagePatha,char * imagePathb){
       double t;
       t=getTickCount();
       Mat img_1 = imread(imagePatha);
       Mat img_2 = imread(imagePathb);
       if (!img_1.data || !img_2.data)   
       {
          cout << "error reading images " << endl;      return -1;
       }
       ORB orb;
       vector<KeyPoint> keyPoints_1, keyPoints_2;
       Mat descriptors_1, descriptors_2;
       orb(img_1, Mat(), keyPoints_1, descriptors_1);
       orb(img_2, Mat(), keyPoints_2, descriptors_2);
       BruteForceMatcher<HammingLUT> matcher;
       vector<DMatch> matches;
       matcher.match(descriptors_1, descriptors_2, matches);
       double max_dist = 0; double min_dist = 100;
       for( int i = 0; i < descriptors_1.rows; i++ )   
       {
          double dist = matches[i].distance;
          if( dist < min_dist ) min_dist = dist;
          if( dist > max_dist ) max_dist = dist;
       }
       printf("-- Max dist : %f \n", max_dist );
       printf("-- Min dist : %f \n", min_dist );
       std::vector< DMatch > good_matches;
       for( int i = 0; i < descriptors_1.rows; i++ )   
       {
         if( matches[i].distance < 0.6*max_dist )
    	 {
    		good_matches.push_back(matches[i]);
         }
       }
    	t=getTickCount()-t;
    	t=t*1000/getTickFrequency();
    	Mat img_matches;
    	drawMatches(img_1, keyPoints_1, img_2, keyPoints_2,good_matches, img_matches,
    	Scalar::all(-1), Scalar::all(-1),vector<char>(),
    	DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
    	imshow( "Match", img_matches);   
    	printf( "%f ms\n", t );
    	cvWaitKey(0);
    	return 0;
    }
    
    int main()
    {
    	getORB("r.png","s.png");
    	return 0;
    }
    

     

    展开全文
  • 比较两张图片相似度-java源码实现

    千次阅读 2019-03-18 16:37:43
    import java.awt.image.BufferedImage; import java.io.File;...比较两张图片相似度 @author Guihua */ public class BMPLoader { // 改变成二进制码 public static String[][] getPX(String ar...
  • 像素对比两张图片相似度是否一样

    万次阅读 2017-06-23 16:04:49
    返回值是 'image1' 和 'image2'对比后的相似度相似度越高,图片越接近,达到100.0说明图片完全相同。 ''' img1 = image1.resize(size).convert("RGB") sub_image1 = split_image(img1, part_size) img2 = ...
  • 判断文件是否完全相同可以使用内置的...也可以使用第三方拓展包,这个速度会快很多,是将图片先进行压缩,再进行比较,还可以得出图片相似度,很是强大。 https://packagist.org/packages/jenssegers/imagehash...
  • JAVA 比较两张图片相似度

    千次阅读 2018-04-28 14:17:17
    import java.awt.image.BufferedImage; import java.io.File;... /** * 比较两张图片相似度 * @author Guihua * */ public class BMPLoader { // 改变成二进制码 public static Str...
  • 2.矩阵相减,用来判断两个图是不是完全一样;hashlib.md5判断两个图是否完全一样哈希算法:1.感知哈希算法;2.均值哈希算法;3.差值哈希算法灰度直方图:1.单通道直方图;2. 三通道直方图二、暴力方式1. 按像素比较#...
  • 判断两张图的相似度 方法 直方图对比法 ORB算法 实验 1.直方图对比法 参考如何使用OpenCV3直方图方法进行人脸相似度对比 因为我的环境是VS2010+OpenCV2.4.8,所以在原版的基础上做了一点小修改。 #include &lt...
  • 截图,处理像素点,如果两个图片不同的像素点大于设置的阈值,则认为两张图片不相同,即:收到了微信消息 #图片相似度比较 from PIL import Image import time from PIL import ImageGrab import os root_dir = os....
  • 图片的存储、管理、查询、求解两张图片相似度(基于颜色特征矩阵)
  • JAVA 比较两张图片相似度的代码

    万次阅读 2017-02-10 16:54:31
    import java.awt.image.BufferedImage; import java.io.File;... * 比较两张图片相似度 * @author Guihua * */ public class BMPLoader { // 改变成二进制码 public static String[][] getPX
  • 主要介绍了Python比较图片相似度的方法,涉及Python操作pil模块实现图片比较的技巧,具有一定参考借鉴价值,需要的朋友可以参考下
  • # 自定义计算图片相似度函数 :param img1_path: 图片1路径 :param img2_path: 图片2路径 :return: 图片相似度 """ img1 = cv2.imread(img1_path, cv2.IMREAD_GRAYSCALE) img2 = cv2.imread(img2_pa
  • 【matlab】 matlab实现计算两张图片相似度
  • Python计算两张图片相似度

    千次阅读 2019-06-07 16:20:27
    目录一、场景需求解读二、Mean Squared ...  在现实场景中,我们经常会遇到一个问题,即如何评价不同图片的好坏,或者如何比较两张图片的相似性。它在学术研究领域中具有的广泛的研究前景,例如当你提出来一种新...
  • [img=https://forum.csdn.net/PointForum/ui/scripts/csdn/Plugin/003/onion/3.gif][/img]需求是这样的:原先保存有一张图片 现在我要用android手机摄像头拍差不多的图片判断相似度,可以用opencv4android实现...
  • 本文介绍了python 比较2张图片相似度的方法示例,分享给大家,具体如下:#!/usr/bin/python# -*- coding: UTF-8 -*-import cv2import numpy as np#均值哈希算法def aHash(img):#缩放为8*8 img=cv2.resize(img,(8,8...
  • Android简单实现比较两张涂鸦相似度

    千次阅读 2017-12-13 22:04:48
    由于本文的重点不在于涂鸦板,所以就很随便了,哈哈 ...将手绘图的像素点映射表 (int 二维数组,0是无,1是有画笔颜色),跟原图的画笔颜色像素点匹配,得出两张图片的差异,可以得到的数据有: 原图
  • Android 比较两张图片相似度

    千次阅读 2019-07-26 15:36:12
    https://liuguihua0823.iteye.com/blog/1178118 https://blog.csdn.net/cw2004100021124/article/details/9382519 https://blog.csdn.net/u010652002/article/details/72722198
  • android中比较两张图片相似度

    千次阅读 2015-03-08 11:00:23
    public static String similarity (Bitmap b,Bitmap viewBt) { //把图片转换为Bitmap Bitmap bm_one = b; Bitmap bm_two = viewBt; //保存图片所有像素个数的数组,图片宽×高
  • python 比较两张图片相似度

    千次阅读 2018-11-13 18:54:05
    以上代码实现的功能是先读取两张图片,然后比较两张图片的相似程度,当两张图片相似度为0 时,运行到imgocr行时 imgocr 为 nan 会报错 numpy\lib\function_base.py:3183: RuntimeWarning: invalid value ...
  • public static String similarity (Bitmap b,Bitmap viewBt) {//把图片转换为BitmapBitmap bm_one = b;Bitmap bm_two = viewBt;//保存图片所有像素个数的数组,图片宽×高int[] pixels_one = new int[bm_one....
  • 比较两张图片相似度-python

    万次阅读 2015-02-03 23:59:06
    如果两张图片的直方图很接近,就可以认为它们很相似。 详细的看这里: google识图 感觉用Wote写的ImgHash更加简洁: ;">import glob import os import sys from PIL import Image EXTS = ...
  • java--比较两张图片相似度

    千次阅读 2014-04-08 11:52:43
    import java.awt.image.BufferedImage; import java.io.File;... * 比较两张图片相似度 * @author Guihua * */ public class BMPLoader { // 改变成二进制码 public static String[][] getPX

空空如也

空空如也

1 2 3 4 5 ... 20
收藏数 5,674
精华内容 2,269
关键字:

判断两张图片的相似度