如何计算两幅灰度图像之间的误差

Posted

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了如何计算两幅灰度图像之间的误差相关的知识,希望对你有一定的参考价值。

参考技术A 可以啊,
这是matlab的一个例程,读取的图片就是灰度图像,运行没问题
ref = imread('pout.tif');
H = fspecial('Gaussian',[11 11],1.5);
A = imfilter(ref,H,'replicate');

subplot(1,2,1); imshow(ref); title('Reference Image');
subplot(1,2,2); imshow(A); title('Blurred Image');

[ssimval, ssimmap] = ssim(A,ref);

fprintf('The ssim value is %0.4f.\n',ssimval);

figure, imshow(ssimmap,[]);
title(sprintf('ssim Index Map - Mean ssim Value is %0.4f',ssimval));本回答被提问者采纳

两幅图像之间的 DrawMatching - 图像识别

【中文标题】两幅图像之间的 DrawMatching - 图像识别【英文标题】:DrawMatching between two images - image recognition 【发布时间】:2016-12-11 19:00:51 【问题描述】:

我试图显示两个图像之间的匹配关键点(一个是从我的相机捕获的,另一个是从数据库中捕获的)

谁能帮我在我的代码中编写 DrawMatches 函数以显示 2 个图像之间的匹配行。

这是我的代码:

public final class ImageDetectionFilter

// Flag draw target Image corner.
private boolean flagDraw ;

// The reference image (this detector's target).
private final Mat mReferenceImage;

// Features of the reference image.
private final MatOfKeyPoint mReferenceKeypoints = new MatOfKeyPoint();

// Descriptors of the reference image's features.
private final Mat mReferenceDescriptors = new Mat();

// The corner coordinates of the reference image, in pixels.
// CvType defines the color depth, number of channels, and
// channel layout in the image. Here, each point is represented
// by two 32-bit floats.
private final Mat mReferenceCorners = new Mat(4, 1, CvType.CV_32FC2);

// Features of the scene (the current frame).
private final MatOfKeyPoint mSceneKeypoints = new MatOfKeyPoint();
// Descriptors of the scene's features.
private final Mat mSceneDescriptors = new Mat();
// Tentative corner coordinates detected in the scene, in
// pixels.
private final Mat mCandidateSceneCorners = 
    new Mat(4, 1, CvType.CV_32FC2);
// Good corner coordinates detected in the scene, in pixels.
private final Mat mSceneCorners = new Mat(4, 1, CvType.CV_32FC2);
// The good detected corner coordinates, in pixels, as integers.
private final MatOfPoint mIntSceneCorners = new MatOfPoint();

// A grayscale version of the scene.
private final Mat mGraySrc = new Mat();
// Tentative matches of scene features and reference features.
private final MatOfDMatch mMatches = new MatOfDMatch();

// A feature detector, which finds features in images.
private final FeatureDetector mFeatureDetector = 
    FeatureDetector.create(FeatureDetector.ORB);
// A descriptor extractor, which creates descriptors of
// features.
private final DescriptorExtractor mDescriptorExtractor = 
    DescriptorExtractor.create(DescriptorExtractor.ORB);
// A descriptor matcher, which matches features based on their
// descriptors.
private final DescriptorMatcher mDescriptorMatcher = DescriptorMatcher
    .create(DescriptorMatcher.BRUTEFORCE_HAMMINGLUT);

// The color of the outline drawn around the detected image.
private final Scalar mLineColor = new Scalar(0, 255, 0);

public ImageDetectionFilter(final Context context,
    final int referenceImageResourceID) throws IOException 

// Load the reference image from the app's resources.
// It is loaded in BGR (blue, green, red) format.
mReferenceImage = Utils.loadResource(context, referenceImageResourceID,
        Imgcodecs.CV_LOAD_IMAGE_COLOR);

// Create grayscale and RGBA versions of the reference image.
final Mat referenceImageGray = new Mat();
Imgproc.cvtColor(mReferenceImage, referenceImageGray,
        Imgproc.COLOR_BGR2GRAY);

Imgproc.cvtColor(mReferenceImage, mReferenceImage,
        Imgproc.COLOR_BGR2RGBA);

// Store the reference image's corner coordinates, in pixels.
mReferenceCorners.put(0, 0, new double[]  0.0, 0.0 );
mReferenceCorners.put(1, 0, 
        new double[]  referenceImageGray.cols(),0.0 );
mReferenceCorners.put(2, 0,
        new double[]  referenceImageGray.cols(),
        referenceImageGray.rows() );
mReferenceCorners.put(3, 0,
        new double[]  0.0, referenceImageGray.rows() );

// Detect the reference features and compute their
// descriptors.
mFeatureDetector.detect(referenceImageGray, 
        mReferenceKeypoints);
mDescriptorExtractor.compute(referenceImageGray, 
        mReferenceKeypoints,mReferenceDescriptors);


public void apply(Mat src, Mat dst) 

// Convert the scene to grayscale.
Imgproc.cvtColor(src, mGraySrc, Imgproc.COLOR_RGBA2GRAY);

// Detect the same features, compute their descriptors,
// and match the scene descriptors to reference descriptors.
mFeatureDetector.detect(mGraySrc, mSceneKeypoints);
mDescriptorExtractor.compute(mGraySrc, mSceneKeypoints,
        mSceneDescriptors);
mDescriptorMatcher.match(mSceneDescriptors, 
        mReferenceDescriptors,mMatches);

findSceneCorners();

// If the corners have been found, draw an outline around the
// target image.
// Else, draw a thumbnail of the target image.
draw(src, dst);



private void findSceneCorners() 
flagDraw = false;

final List<DMatch> matchesList = mMatches.toList();

if (matchesList.size() < 4) 
    // There are too few matches to find the homography.
    return;


final List<KeyPoint> referenceKeypointsList = 
        mReferenceKeypoints.toList();
final List<KeyPoint> sceneKeypointsList = 
        mSceneKeypoints.toList();

// Calculate the max and min distances between keypoints.
double maxDist = 0.0;
double minDist = Double.MAX_VALUE;

for (final DMatch match : matchesList) 
    final double dist = match.distance;
    if (dist < minDist) 
        minDist = dist;
    
    if (dist > maxDist) 
        maxDist = dist;
    


// The thresholds for minDist are chosen subjectively
// based on testing. The unit is not related to pixel
// distances; it is related to the number of failed tests
// for similarity between the matched descriptors.
if (minDist > 50.0) 
    // The target is completely lost.
    // Discard any previously found corners.
    mSceneCorners.create(0, 0, mSceneCorners.type());
    return;
 else if (minDist > 25.0) 
    // The target is lost but maybe it is still close.
    // Keep any previously found corners.
    return;


// Identify "good" keypoints and on match distance.
final ArrayList<Point> goodReferencePointsList =
        new ArrayList<Point>();
final ArrayList<Point> goodScenePointsList =
        new ArrayList<Point>();
final double maxGoodMatchDist = 1.75 * minDist;
for (final DMatch match : matchesList) 
    if (match.distance < maxGoodMatchDist) 
        goodReferencePointsList.add(
                referenceKeypointsList.get(match.trainIdx).pt);
        goodScenePointsList
                .add(sceneKeypointsList.get(match.queryIdx).pt);
    

if (goodReferencePointsList.size() < 4
        || goodScenePointsList.size() < 4) 
    // There are too few good points to find the homography.
    return;

// There are enough good points to find the homography.
// (Otherwise, the method would have already returned.)

// Convert the matched points to MatOfPoint2f format, as
// required by the Calib3d.findHomography function.
final MatOfPoint2f goodReferencePoints = new MatOfPoint2f();
goodReferencePoints.fromList(goodReferencePointsList);

final MatOfPoint2f goodScenePoints = new MatOfPoint2f();
goodScenePoints.fromList(goodScenePointsList);

// Find the homography.
final Mat homography = Calib3d.findHomography(
        goodReferencePoints,goodScenePoints);

// Use the homography to project the reference corner
// coordinates into scene coordinates.
Core.perspectiveTransform(mReferenceCorners,
        mCandidateSceneCorners,homography);

// Convert the scene corners to integer format, as required
// by the Imgproc.isContourConvex function.
mCandidateSceneCorners.convertTo(mIntSceneCorners, 
        CvType.CV_32S);

// Check whether the corners form a convex polygon. If not,
// (that is, if the corners form a concave polygon), the
// detection result is invalid because no real perspective can
// make the corners of a rectangular image look like a concave
// polygon!
if (Imgproc.isContourConvex(mIntSceneCorners)) 
    // The corners form a convex polygon, so record them as
    // valid scene corners.
    mCandidateSceneCorners.copyTo(mSceneCorners);
    flagDraw = true;




protected void draw(final Mat src, final Mat dst) 

if (dst != src) 
    src.copyTo(dst);


// Outline the found target in green.
Imgproc.line(dst, new Point(mSceneCorners.get(0, 0)), new Point(
        mSceneCorners.get(1, 0)), mLineColor, 4);
Imgproc.line(dst, new Point(mSceneCorners.get(1, 0)), new Point(
        mSceneCorners.get(2, 0)), mLineColor, 4);
Imgproc.line(dst, new Point(mSceneCorners.get(2, 0)), new Point(
        mSceneCorners.get(3, 0)), mLineColor, 4);
Imgproc.line(dst, new Point(mSceneCorners.get(3, 0)), new Point(
        mSceneCorners.get(0, 0)), mLineColor, 4);


public boolean getFlagDraw()

return flagDraw;


【问题讨论】:

如果您也可以分享一些示例图像以进行匹配,将会很有帮助。 @ZdaR,执行以下语句时出现错误。你能看看它,让我知道它有什么问题吗? Mat outImg = new Mat(); Features2d.drawMatches(mReferenceImage, mReferenceKeypoints, mCandidateSceneCorners, mSceneKeypoints, mMatches, outImg); 【参考方案1】:

我在 Java 方面并不坚定,不确定这是否会有所帮助,但我发布了一个示例,我是如何使用 openCV 在 python 中实现这一点的。也许这会对您有所帮助。

(示例改编自this网站,有进一步的解释可能会感兴趣)

在这个例子中,我在一组六只卡通动物中找到一个卡通动物的旋转版本。

基本上,您想使用训练中的关键点调用cv2.drawMatches(),并查询图像并屏蔽不良匹配。我的代码的相关部分在最底部。

你的例子不是一个最小的代码例子,我没有完成所有的工作,但你似乎已经有了你的关键点,应该准备好了吗?

import numpy as np
import cv2
from matplotlib import pyplot as plt

MIN_MATCH_COUNT = 4


img1 = cv2.imread('d:/one_animal_rotated.jpg',0)          # queryImage
img2 = cv2.imread('d:/many_animals.jpg',0) # trainImage

# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create(0,3,0)
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)

#find matches using FLANN
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)

#apply ratio test to find best matches (values from 0.7-1 made sense here)
good = []
for m,n in matches:
    if m.distance < 1*n.distance:
        good.append(m)

#find homography to transform the edges of the query image and draw them on the train image
#This is also used to mask all keypoints that aren't inside this box further below.
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good]).reshape(-1,1,2)

M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()

h,w = img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)

#draw the good matched key points
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
                   singlePointColor = None,
                   matchesMask = matchesMask, # draw only inliers
                   flags = 2)

img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
plt.figure()
plt.imshow(img3, 'gray'),plt.show()

【讨论】:

这是我需要在我的代码集中实现的。但我需要它在 Java 中用于数据库中的一个图像和相机捕获的图像中的另一个。

以上是关于如何计算两幅灰度图像之间的误差的主要内容,如果未能解决你的问题,请参考以下文章

计算图像相似度的算法都有哪些

如何将 WORD中将彩色图片变成灰度图 或 黑白图 ?

python如何获取图某一点的灰度

转变为灰度图像的算法优化及马赛克实现代码

S0.2 灰度图

OpenCV (一)Mat基本操作以及灰度图转化