2016-04-23 22:38:08 u013218907 阅读数 576

有时候会遇到有两张几乎完全相同的图片,但其中一张会多一些东西,比如logo、花纹等。此程序可以找到这些不同,将图片不同的像素放到另一张图片上

例如:
这里写图片描述

程序界面如下图
这里写图片描述
点击文件1按钮加载第一个图片,文件2同理,点击【开始比较】按钮,会生成一张新的图片,即两个图片之间的差异(文件1多于文件2的部分)
主要问题在对于图片像素的处理(按下【开始比较】按钮的处理)
程序压缩包见如下链接
http://download.csdn.net/detail/u013218907/9500550

核心代码如下:

private void btnStartCompare_Click(object sender, EventArgs e)
        {
            Bitmap bitmap1 = (Bitmap)Bitmap.FromFile(this.tbxFilePath1.Text, false);
            Bitmap bitmap2 = (Bitmap)Bitmap.FromFile(this.tbxFilePath2.Text, false);
            Bitmap diffBitmap = createBitmap(bitmap1.Width, bitmap1.Height);

            //检查图片
            if (!checkBitmap(bitmap1, bitmap2))
            {
                MessageBox.Show(this, "图片有问题", "警告",
                    MessageBoxButtons.OK, MessageBoxIcon.Warning);
                return;
            }

            compareBitmap(bitmap1, bitmap2, diffBitmap);

            try
            {
                diffBitmap.Save("diff.png");
            }
            catch (Exception)
            {
                MessageBox.Show(this, "存储图片时发生错误〒_〒", "出错了",
                    MessageBoxButtons.OK, MessageBoxIcon.Error);
                return;
            }
            MessageBox.Show(this, "成功完成图片处理~", "成功了!!!",
                    MessageBoxButtons.OK, MessageBoxIcon.Information);
        }

        private bool checkBitmap(Bitmap bitmap1, Bitmap bitmap2)
        {
            if (bitmap1 == null || bitmap2 == null) {
                return false;
            }

            if (bitmap1.Width != bitmap2.Width || bitmap1.Height != bitmap2.Height)
            {
                return false;
            }

            return true;
        }

        private void compareBitmap(Bitmap bitmap1, Bitmap bitmap2, Bitmap diffBitmap)
        {
            Color color1;
            Color color2;

            int wide = bitmap1.Width;
            int height = bitmap1.Height;
            for (int y = 0; y < height; y++)
            {
                for (int x = 0; x < wide; x++)
                {
                    //获取像素的RGB颜色值
                    color1 = bitmap1.GetPixel(x, y);
                    color2 = bitmap2.GetPixel(x, y);
                    if (color1 != color2)
                    {
                        diffBitmap.SetPixel(x, y, color1);
                    }
                }
            }
            return;
        }

        private Bitmap createBitmap(int wight, int height)
        {
            Bitmap bitmap = new Bitmap(wight, height);
            bitmap.MakeTransparent(Color.Transparent);

            return bitmap;
        }

        public static Bitmap RGB2Gray(Bitmap srcBitmap)
        {
            Color srcColor;
            int wide = srcBitmap.Width;
            int height = srcBitmap.Height;
            for (int y = 0; y < height; y++)
                for (int x = 0; x < wide; x++)
                {
                    //获取像素的RGB颜色值
                    srcColor = srcBitmap.GetPixel(x, y);
                    byte temp = (byte)(srcColor.R * .299 + srcColor.G * .587 + srcColor.B * .114);
                    //设置像素的RGB颜色值
                    srcBitmap.SetPixel(x, y, Color.FromArgb(temp, temp, temp));
                }
            return srcBitmap;
        }

    }
2019-06-23 12:54:49 weixin_43431189 阅读数 603

Opencv查找两张图片不同的部分以及图片中特定的像素替换

Opencv识别两张图片的不同部分demo:

import cv2
import numpy as np
from matplotlib import pyplot as plt
import argparse
 
def matchAB(fileA, fileB):
    # 读取图像数据
    imgA = cv2.imread(fileA)
    imgB = cv2.imread(fileB)
 
    # 转换成灰色
    grayA = cv2.cvtColor(imgA, cv2.COLOR_BGR2GRAY)
    grayB = cv2.cvtColor(imgB, cv2.COLOR_BGR2GRAY)
 
    # 获取图片A的大小
    height, width = grayA.shape
 
    # 取局部图像,寻找匹配位置
    result_window = np.zeros((height, width), dtype=imgA.dtype)
    for start_y in range(0, height-100, 10):
        for start_x in range(0, width-100, 10):
            window = grayA[start_y:start_y+100, start_x:start_x+100]
            match = cv2.matchTemplate(grayB, window, cv2.TM_CCOEFF_NORMED)
            _, _, _, max_loc = cv2.minMaxLoc(match)
            matched_window = grayB[max_loc[1]:max_loc[1]+100, max_loc[0]:max_loc[0]+100]
            result = cv2.absdiff(window, matched_window)
            result_window[start_y:start_y+100, start_x:start_x+100] = result
 
    # 用四边形圈出不同部分
    _, result_window_bin = cv2.threshold(result_window, 30, 255, cv2.THRESH_BINARY)
    _, contours, _ = cv2.findContours(result_window_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    imgC = imgA.copy()
    for contour in contours:
        min = np.nanmin(contour, 0)
        max = np.nanmax(contour, 0)
        loc1 = (min[0][0], min[0][1])
        loc2 = (max[0][0], max[0][1])
        cv2.rectangle(imgC, loc1, loc2, 255, 2)
 
    plt.subplot(1, 3, 1), plt.imshow(cv2.cvtColor(imgA, cv2.COLOR_BGR2RGB)), plt.title('A'), plt.xticks([]), plt.yticks([])
    plt.subplot(1, 3, 2), plt.imshow(cv2.cvtColor(imgB, cv2.COLOR_BGR2RGB)), plt.title('B'), plt.xticks([]), plt.yticks([])
    plt.subplot(1, 3, 3), plt.imshow(cv2.cvtColor(imgC, cv2.COLOR_BGR2RGB)), plt.title('Answer'), plt.xticks([]), plt.yticks([])
    plt.show()
 
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--source_image',
        type=str,
        default='img/image01-0.png',
        help='source image'
    )
 
    parser.add_argument(
        '--target_image',
        type=str,
        default='img/image01-1.png',
        help='target image'
    )
 
    FLAGS, unparsed = parser.parse_known_args()
 
    matchAB(FLAGS.source_image, FLAGS.target_image)

Opencv图片重合匹配

import cv2
import numpy as np
from matplotlib import pyplot as plt
import argparse
 
def matchAB(fileA, fileB):
    # 读取图像数据
    imgA = cv2.imread(fileA)
    imgB = cv2.imread(fileB)
 
    # 转换成灰色
    grayA = cv2.cvtColor(imgA, cv2.COLOR_BGR2GRAY)
    grayB = cv2.cvtColor(imgB, cv2.COLOR_BGR2GRAY)
 
    # 获取图片A的大小
    height, width = grayA.shape
 
    # 取局部图像,寻找匹配位置
    result_window = np.zeros((height, width), dtype=imgA.dtype)
    for start_y in range(0, height-100, 10):
        for start_x in range(0, width-100, 10):
            window = grayA[start_y:start_y+100, start_x:start_x+100]
            match = cv2.matchTemplate(grayB, window, cv2.TM_CCOEFF_NORMED)
            _, _, _, max_loc = cv2.minMaxLoc(match)
            matched_window = grayB[max_loc[1]:max_loc[1]+100, max_loc[0]:max_loc[0]+100]
            result = cv2.absdiff(window, matched_window)
            result_window[start_y:start_y+100, start_x:start_x+100] = result
 
    # 用四边形圈出不同部分
    _, result_window_bin = cv2.threshold(result_window, 30, 255, cv2.THRESH_BINARY)
    _, contours, _ = cv2.findContours(result_window_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    imgC = imgA.copy()
    for contour in contours:
        min = np.nanmin(contour, 0)
        max = np.nanmax(contour, 0)
        loc1 = (min[0][0], min[0][1])
        loc2 = (max[0][0], max[0][1])
        cv2.rectangle(imgC, loc1, loc2, 255, 2)
 
    plt.subplot(1, 3, 1), plt.imshow(cv2.cvtColor(imgA, cv2.COLOR_BGR2RGB)), plt.title('A'), plt.xticks([]), plt.yticks([])
    plt.subplot(1, 3, 2), plt.imshow(cv2.cvtColor(imgB, cv2.COLOR_BGR2RGB)), plt.title('B'), plt.xticks([]), plt.yticks([])
    plt.subplot(1, 3, 3), plt.imshow(cv2.cvtColor(imgC, cv2.COLOR_BGR2RGB)), plt.title('Answer'), plt.xticks([]), plt.yticks([])
    plt.show()
 
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--source_image',
        type=str,
        default='img/image01-0.png',
        help='source image'
    )
 
    parser.add_argument(
        '--target_image',
        type=str,
        default='img/image01-1.png',
        help='target image'
    )
 
    FLAGS, unparsed = parser.parse_known_args()
    matchAB(FLAGS.source_image, FLAGS.target_image)

demo2效果图

2017-02-22 23:33:46 MysticalRzc 阅读数 2690

使用openCV这个工具进行图片处理还是很简单的,这里总结一下两张大小不同的图片的和成。

openCV合唱两张类型,大小相同的图片是很简单的只需使用addWeighted函数就行了。

函数的参数是addWeighted ( 第一张鱼片,第一张图片所占的比例,第二张图片,第二张图片所占的比例, 0., 输出图片) ;

	Mat imageROI;
	imageROI = girl(Rect(0, 0, leave.cols, leave.rows));
	addWeighted(imageROI,0.5,leave,0.5,0,imageROI);
	imshow("girl",girl);

个人理解,请多多指教。

首先要创建一个矩阵这个矩阵要按照最小的那个图片来,将这个图片放置于大图片当中,有电箱ps中的选择框框,以后的图形合成会在这个框框内进行。下面进行图形合成的时候合成的两张图片是那张较小的图片和全出框框的那个图片,合成之后改变的是大图,二不是带框框的那个图片。

2019-04-21 20:56:22 francislucien2017 阅读数 467

概述:

  • 场景与任务:判断相邻的两张微信聊天截图是否为同一张(传输压缩、格式转换过程中存在一定像素失真和边缘抖动,不可以直接相减)
  • 要求:使用数字图像处理的方法(仅作为预处理去重,不用深度学习方法);一组(两张)图片判断的时间要求在20ms以内;
  • 思路:
    • 转换到HSV空间下,先将聊天窗口通过颜色阈值单独分离出来;
    • 轮廓查找判断两张图的闭包矩形框的数目是否相同,不同则为不同截图;
    • 否则再利用ORB提取特征点(keypoints)和描述子(descriptors),计算两张图对应特征点的斜率,若60%的线条均值在0附近(前后各去掉最大最小的20%斜率的曲线),则为同一张图,否则为不同张;
    • Python 测试成功后转写成 C++ 版本以适应速度需求;

Python 代码:

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
import os, glob, time


orb = cv2.ORB_create()


def read_img(name1, name2):
    img1 = cv2.imread(name1, 1)
    img2 = cv2.imread(name2)
    h, w = min(img1.shape[0], img2.shape[0]), min(img1.shape[1], img2.shape[1])
    img1 = img1[:h, :w]
    img2 = img2[:h, :w]
    img1 = cv2.resize(img1, (800, 800 * img1.shape[0] // img1.shape[1]))
    img2 = cv2.resize(img2, (800, 800 * img2.shape[0] // img2.shape[1]))
    return img1, img2


def color_enhance(img):
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    lower_green = np.array([35, 43, 46])
    upper_green = np.array([77, 255, 255])
    mask_green = cv2.inRange(hsv, lowerb=lower_green, upperb=upper_green)
    lower_white = np.array([0, 0, 245])
    upper_white = np.array([180, 30, 255])
    mask_white = cv2.inRange(hsv, lowerb=lower_white, upperb=upper_white)

    dst_w = cv2.bitwise_and(img, img, mask=mask_white)
    dst_g = cv2.bitwise_and(img, img, mask=mask_green)
    dst = dst_w + dst_g
    return dst


def count_box(img, show=False):
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 9, 2)
    contours, hierarchy = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    H, W = img.shape[:2]
    count = 0
    for contour in contours:
        if cv2.contourArea(contour) < H * W / 500 or H > W * 1.1:
            continue
        count += 1
        x, y, w, h = cv2.boundingRect(contour)
        cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)

    if show:
        cv2.imshow('img', img)
        cv2.imshow('gray', gray)
        cv2.imshow('thresh', thresh)
        cv2.waitKey(0)
    return count


def orb_match(img1, img2):
    kp1, des1 = orb.detectAndCompute(img1, None)  # des是描述子
    kp2, des2 = orb.detectAndCompute(img2, None)
    return kp1, des1, kp2, des2


def draw_keypoints(img, keypoints, color=(0, 255, 255)):
    for kp in keypoints:
            x, y = kp.pt
            cv2.circle(img, (int(x), int(y)), 2, color)
    return img


def match_imgs(des1, des2):
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)
    good = []
    for m, n in matches:
        if m.distance < 0.8 * n.distance:
            good.append([m])
    return good


def compute_slope(src, dst):
    # slope = (y - y') / (x' - x + 800)
    return (src[1] - dst[1]) / (dst[0] - src[0] + 800)


def judge(img1, img2, show=False):
    img3, img4 = color_enhance(img1), color_enhance(img2)
    n1 = count_box(img3)
    n2 = count_box(img4)
    if n1 != n2:
        print('n1, n2: ', n1, n2)
        return False
    kp1, des1, kp2, des2 = orb_match(img3, img4)
    good = match_imgs(des1, des2)
    src_pts = np.float32([kp1[m[0].queryIdx].pt for m in good]).reshape(-1, 2)
    dst_pts = np.float32([kp2[m[0].trainIdx].pt for m in good]).reshape(-1, 2)
    all_slopes = []
    for i in range(len(src_pts)):
        all_slopes.append(compute_slope(src_pts[i], dst_pts[i]))
    all_slopes.sort()
    len_s = len(all_slopes) // 5
    filtered_slopes = all_slopes[len_s:-len_s]
    slopes = filtered_slopes if filtered_slopes else all_slopes

    if show:
        slopes = pd.Series(slopes)
        # print(slopes.describe())
        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.hist(slopes, bins=20, color='blue', alpha=0.8)
        plt.show()

        img5 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=2)
        thresh_merge = np.hstack((img3, img4))
        cv2.imshow("thresh_merge", thresh_merge)

        visual_1 = draw_keypoints(img1, kp1, color=(255, 0, 255))
        visual_2 = draw_keypoints(img2, kp2, color=(255, 0, 255))
        hmerge = np.hstack((visual_1, visual_2))
        cv2.imshow("point", hmerge)
        cv2.imshow("ORB", img5)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    slopes_mean = sum(slopes) / len(slopes)
    print('abs slope mean: ', abs(slopes_mean))
    if abs(slopes_mean) < 0.01:
        return True
    else:
        return False


if __name__ == '__main__':
    name1, name2 = './1.png', './2.png'
    img1, img2 = read_img(name1, name2)
    if judge(img1, img2, show=True):
        print('Same screenshots.')
    else:
        print('Different screenshots.')

 

C++ 代码(去掉了颜色增强部分):

JudgeDuplicates.h

#ifndef JUDGEDUPLICATES_H
#define JUDGEDUPLICATES_H

#include <cstdlib>
#include <iostream>
#include <vector>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>


class JudgeDuplicates
{
    public:
        JudgeDuplicates();
        void orb_match(cv::Mat, cv::Mat,
                       std::vector<cv::KeyPoint>&,
                       std::vector<cv::KeyPoint>&,
                       std::vector<cv::DMatch>&);
        double compute_slope(cv::Point, cv::Point);
        bool judge(std::string, std::string);
        virtual ~JudgeDuplicates();

    protected:

    private:
};

#endif // JUDGEDUPLICATES_H

JudgeDuplicates.cpp

#include "JudgeDuplicates.h"

JudgeDuplicates::JudgeDuplicates()
{
    //ctor
}

JudgeDuplicates::~JudgeDuplicates()
{
    //dtor
}


void JudgeDuplicates::orb_match(cv::Mat img1, cv::Mat img2,
                                std::vector<cv::KeyPoint>& kp1,
                                std::vector<cv::KeyPoint>& kp2,
                                std::vector<cv::DMatch>& goodmatches){
    int Hessian = 500;
    cv::Ptr<cv::ORB> detector = cv::ORB::create(Hessian);
    cv::Mat des1, des2;
    detector->detectAndCompute(img1, cv::Mat(), kp1, des1);
    detector->detectAndCompute(img2, cv::Mat(), kp2, des2);

    cv::Ptr<cv::DescriptorMatcher> matcher = cv::DescriptorMatcher::create("BruteForce");
    std::vector<std::vector<cv::DMatch> > matches_knn;
    matcher->knnMatch(des1, des2, matches_knn, 2);
    for (size_t i = 0; i < matches_knn.size(); ++i){
        if(matches_knn[i][0].distance < 0.8 * matches_knn[i][1].distance){
            goodmatches.push_back(matches_knn[i][0]);
        }
    }
}


double JudgeDuplicates::compute_slope(cv::Point src, cv::Point dst){
    // slope = (y - y') / (x' - x + 800)
    return double(src.y - dst.y) / double(dst.x - src.x + 800.0);
}


bool JudgeDuplicates::judge(std::string name1, std::string name2){
    cv::Mat img1 = cv::imread(name1, 1);
    cv::Mat img2 = cv::imread(name2, 1);
    int h1 = img1.rows;
    int w1 = img1.cols;
    cv::resize(img1, img1, cv::Size(800, int(800 * h1 / w1)));
    cv::resize(img2, img2, cv::Size(800, int(800 * h1 / w1)));

    std::vector<cv::KeyPoint> kp1, kp2;
    std::vector<cv::DMatch> good_matches;
    orb_match(img1, img2, kp1, kp2, good_matches);
    std::cout << good_matches.size() << std::endl;

    std::vector<cv::Point> src_pts, dst_pts;
    for(size_t i = 0; i < good_matches.size(); ++i){
        int x1 = kp1[good_matches[i].queryIdx].pt.x;
        int y1 = kp1[good_matches[i].queryIdx].pt.y;
        int x2 = kp2[good_matches[i].trainIdx].pt.x;
        int y2 = kp2[good_matches[i].trainIdx].pt.y;
        cv::Point src_pt = cv::Point(x1, y1);
        cv::Point dst_pt = cv::Point(x2, y2);
        src_pts.push_back(src_pt);
        dst_pts.push_back(dst_pt);
    }
    double slope, mean_slope = 0.0;
    std::vector<double> slopes;
    for(size_t i = 0; i < src_pts.size(); ++i){
        slope = compute_slope(src_pts[i], dst_pts[i]);
        slopes.push_back(slope);
    }
    sort(slopes.begin(), slopes.end());
    int line_cnt = 0;
    for(size_t i = 0; i < slopes.size(); ++i){
        if(i < slopes.size() * 0.2){
            continue;
        }
        if(i > slopes.size() * 0.8){
            break;
        }
        line_cnt += 1;
        mean_slope += fabs(slopes[i]);
        std::cout << slopes[i] << std::endl;
    }

    if(line_cnt != 0){
        mean_slope /= line_cnt;
    }
    else{
        mean_slope = 1000000;
    }
    std::cout << mean_slope << " line_cnt " << line_cnt << std::endl;

    if(mean_slope < 0.001)
        return true;
    else
        return false;
}

 

2017-03-05 13:15:14 qq_31718279 阅读数 554

点击这里可以查看我所有关于图形图像处理的文章

简单图片包括Bitmap和BitmapFactory两类。

Bitmap用于显示一张位图,BitmapFactory用于封装一个Bitmap对象。

如果想将Bitmap封装成一个BitmapFactory对象,可以调用BitmapDrawable的构造方法。

Bitmap bitmap = BitmapFactory.decodeFile("draw1.jpg");
BitmapDrawable bd = new BitmapDrawable(bitmap);

如果需要获取BitmapDrawable包装的 Bitmap对象,可以调用 BitmapDrawable的getBitmap()方法。

Bitmap bitmap = bd.getBitmap()

Bitmap类常用函数

编号 方 法 描 述
1 createBitmap(Bitmap source,int x,int y,int width,int height) 从原位图source的指定坐标点(x,y)开始,截取宽为width,长为height的部分,创建一个新的Bitm对象
2 createBitmap(int width,int height,Bitmap.Config cpnfig) 创建一个宽度为width,长为height的新位图
3 getHeight() 获取位图的高度
4 getWidth() 获取位图的宽度
5 isRecycle() 返回该Bitmap对象是否已被收回
6 recycle() 强制一个Bitmap对象立刻收回自己

简单图片实例—图片放大器

说明:这个图片放大器要实现的功能就:是在一张图片上我们点击哪个区域,会对该处的细节进行放大处理。


activity_main.xml

<?xml version="1.0" encoding="utf-8"?>
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
    android:layout_width="match_parent"
    android:layout_height="match_parent"
    android:orientation="vertical">
    <ImageView
        android:id="@+id/bitmap1"
        android:layout_width="match_parent"
        android:layout_height="240dp"
        android:scaleType="fitXY"/>
    <ImageView
        android:id="@+id/bitmap2"
        android:layout_width="100dp"
        android:layout_height="100dp"
        android:layout_gravity="center_horizontal"
        android:layout_marginTop="10dp"/>
</LinearLayout>

MainActivity.java

package com.file.file;

import android.app.Activity;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.graphics.drawable.BitmapDrawable;
import android.os.Bundle;
import android.view.MotionEvent;
import android.view.View;
import android.widget.ImageView;

public class MainActivity extends Activity {

    @Override
    protected void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        setContentView(R.layout.activity_main);
        final ImageView bitmap1 = (ImageView)findViewById(R.id.bitmap1);
        final ImageView bitmap2 = (ImageView)findViewById(R.id.bitmap2);
//        获取图片
        bitmap1.setImageBitmap(BitmapFactory.decodeResource(getResources(),R.drawable.cat));
        bitmap1.setOnTouchListener(new View.OnTouchListener() {
            @Override
//            设置触摸监听器
            public boolean onTouch(View view, MotionEvent motionEvent) {
                BitmapDrawable bitmapDrawable = (BitmapDrawable)bitmap1.getDrawable();
                Bitmap bitmap = bitmapDrawable.getBitmap();
                float xchange = bitmap.getWidth()/(float)bitmap1.getWidth();
                float ychange = bitmap.getHeight()/(float)bitmap1.getHeight();

                int x = (int)(motionEvent.getX() * xchange);
                int y = (int)(motionEvent.getY() * ychange);
//          获取原图和手机上照片显示的比例(一般手机会对图片按比例缩小)


//                我在下面设置的是以(x,y)为中心,长宽各为100的正方形
//                如果X/Y +50 > 图片的长度/宽度,就表名这已经到达边缘
//                那么就将中心设置在距离边缘50位置,以下雷同
                if (x+50>bitmap.getWidth()){
                    x = bitmap.getWidth()-50;
                }
                if (x-50<0){
                    x=50;
                }
                if (y+50>bitmap.getHeight()){
                    y = bitmap.getHeight()-50;
                }
                if (y-50<0){
                    y=50;
                }


                bitmap2.setImageBitmap(Bitmap.createBitmap(bitmap,x-50,y-50,100,100));
                bitmap2.setVisibility(View.VISIBLE);

                return false;
            }
        });
    }

}


实现效果截图
这里写图片描述

http://icelily.xyz/?p=119

博文 来自: qq_15009269
没有更多推荐了,返回首页