在perspectiveWarp/warpPerspective 之后Android-Android OpenCV 得到空白(黑色)图像?

Posted

技术标签:

【中文标题】在perspectiveWarp/warpPerspective 之后Android-Android OpenCV 得到空白(黑色)图像?【英文标题】:Android-Android OpenCV after perspectiveWarp/warpPerspective getting blank(Black) image as a result? 【发布时间】:2015-05-30 00:59:08 【问题描述】:

我正在使用 android+OpenCv+JNI 来找出图像中最大的轮廓,然后使用透视变换裁剪最大的轮廓。我的问题是应用转换后我无法将结果 Mat 转换为 Bitmap 并返回错误

OpenCV 错误:断言失败 (src.type() == CV_8UC1 || src.type() == CV_8UC3 || src.type() == CV_8UC4) 在 void Java_org_opencv_android_Utils_nMatToBitmap2(JNIEnv*, jclass, jlong​​, jobject, jboolean),文件 /home/reports/ci/slave_desktop/50-SDK/opencv/modules/java/generator/src/cpp/utils.cpp,第 98 行

这是我的 JNI 代码:

JNIEXPORT jint JNICALL Java_org_opencv_samples_tutorial3_Sample3Native_FindSquares(

    JNIEnv* env, jobject, jlong addrRgba, jint draw, jlong addrDescriptor) 

Mat& image = *(Mat*) addrRgba;
Mat& pMatDesc = *(Mat*) addrDescriptor;
int thresh = 50, N = 4;
int found = 0;

Mat pyr, timg, gray0(image.size(), CV_8U), gray;

// down-scale and upscale the image to filter out the noise
pyrDown(image, pyr, Size(image.cols / 2, image.rows / 2));
pyrUp(pyr, timg, image.size());
vector < vector<Point> > contours;
// find squares in every color plane of the image
for (int c = 1; c < 3; c++) 
    int ch[] =  c, 0 ;
    mixChannels(&timg, 1, &gray0, 1, ch, 1);
    // try several threshold levels
    for (int l = 0; l < N; l++) 
        // hack: use Canny instead of zero threshold level.
        // Canny helps to catch squares with gradient shading
        if (l == 0) 
            // apply Canny. Take the upper threshold from slider
            // and set the lower to 0 (which forces edges merging)
            Canny(gray0, gray, 0, thresh, 5);
            // dilate canny output to remove potential
            // holes between edge segments
            dilate(gray, gray, Mat(), Point(-1, -1));
         else 
            // apply threshold if l!=0:
            //     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
            gray = gray0 >= (l + 1) * 255 / N;
        
        // find contours and store them all as a list
        findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
        vector<Point> approx;
        // test each contour
        for (size_t i = 0; i < contours.size(); i++) 

            //__android_log_print(ANDROID_LOG_INFO, "Test", "Error:", v);
            // approximate contour with accuracy proportional
            // to the contour perimeter
            approxPolyDP(Mat(contours[i]), approx,
                    arcLength(Mat(contours[i]), true) * 0.02, true);

            // square contours should have 4 vertices after approximation
            // relatively large area (to filter out noisy contours)
            // and be convex.
            // Note: absolute value of an area is used because
            // area may be positive or negative - in accordance with the
            // contour orientation
            if (approx.size() == 4 && fabs(contourArea(Mat(approx))) > 1000
                    && isContourConvex(Mat(approx))) 
                double maxCosine = 0;

                for (int j = 2; j < 5; j++) 
                    // find the maximum cosine of the angle between joint edges
                    double cosine = fabs(
                            angle(approx[j % 4], approx[j - 2],
                                    approx[j - 1]));
                    maxCosine = MAX(maxCosine, cosine);
                

                // if cosines of all angles are small
                // (all angles are ~90 degree) then write quandrange
                // vertices to resultant sequence
                if (maxCosine < 0.3) 

                    circle(image, approx[0], 5, Scalar(255, 0, 0, 255), 3,
                            4, 0);
                    circle(image, approx[1], 5, Scalar(255, 0, 0, 255), 3,
                            4, 0);
                    circle(image, approx[2], 5, Scalar(255, 0, 0, 255), 3,
                            4, 0);
                    circle(image, approx[3], 5, Scalar(255, 0, 0, 255), 3,
                            4, 0);
                    //rectangle(image, approx[0], approx[2], Scalar(0,255,0,255), 5, 4, 0);

                    //Center of this rectangle
                    int x = (int) ((approx[0].x + approx[1].x + approx[2].x
                            + approx[3].x) / 4.0);
                    int y = (int) ((approx[0].y + approx[1].y + approx[2].y
                            + approx[3].y) / 4.0);

                    if ((int) draw) 
                        //outline
                        line(image, approx[0], approx[1],
                                Scalar(0, 255, 0, 255), 1, 4, 0);
                        line(image, approx[1], approx[2],
                                Scalar(0, 255, 0, 255), 1, 4, 0);
                        line(image, approx[2], approx[3],
                                Scalar(0, 255, 0, 255), 1, 4, 0);
                        line(image, approx[3], approx[0],
                                Scalar(0, 255, 0, 255), 1, 4, 0);
                        //center
                        //circle(image, Point(x,y), 1, Scalar(255,0,0,255));
                    
                    vector<Point2f> src(4);
                    src[0] = approx[0];
                    src[1] = approx[1];
                    src[2] = approx[2];
                    src[3] = approx[3];
                    cv::Mat quad = cv::Mat::zeros(300, 220, CV_32FC1 );

                    // transformed quadrangle
                    vector<Point2f> quad_pts(4);


                      quad_pts[0] = Point(0, 0);
                      quad_pts[1] = Point(quad.cols, 0);
                      quad_pts[2] = Point(quad.cols, quad.rows);
                      quad_pts[3] = Point(0, quad.rows);

                    Mat transmtx = getPerspectiveTransform(src, quad_pts);
                    warpPerspective(src, quad, transmtx, quad.size());

                    quad.copyTo(pMatDesc);
                    found = 1;
                    jint result = (jint) found;
                    return result;
                
            
        
    

jint result = (jint) found;
return result;

在我的 java 代码中,我将此函数称为

找到 = FindSquares(mRgba.getNativeObjAddr(), mDraw, 描述符.getNativeObjAddr());

最后我尝试将最终的 Mat 转换为位图

Mat final_mat = new Mat(descriptor.height(), descriptor.width(), CvType.CV_8UC4);
descriptor.copyTo(final_mat); 
bitmap = Bitmap.createBitmap(final_mat.cols(), final_mat.rows(),
                Bitmap.Config.ARGB_8888);
Utils.matToBitmap(final_mat, bitmap);

final_mat 通道类型正在变为 CV_32FC1。 如何将频道类型转换为CV_8UC4 请帮我找出解决办法。

编辑: 我已将 finat_mat 图像更改为 CV_8UC3

descriptor.copyTo(final_mat);
descriptor.convertTo(final_mat, CvType.CV_8UC1);
Imgproc.cvtColor(final_mat,final_mat,Imgproc.COLOR_GRAY2RGB);

但是结果我得到了空白(黑色)图像?

【问题讨论】:

【参考方案1】:

经过长时间的研究,我找到了解决方案。这里的问题是由于实际图像的转换而引起的。我们应该使用实际 Mat 对象的副本来应用转换(模糊、canny 等),并使用实际 Mat 对象进行扭曲透视变换。在这里,我附上参考代码以找出最大轮廓。

jni_part.cpp:

extern "C" 
double angle(Point pt1, Point pt2, Point pt0);

JNIEXPORT jint Java_info_androidhive_androidcameraapi_CameraMainActivity_findSquare(
    JNIEnv*, jobject, jlong addrRgba, jlong addrDescriptor, jint width_,
    jint height_);

JNIEXPORT jint Java_info_androidhive_androidcameraapi_CameraMainActivity_findSquare(
    JNIEnv*, jobject, jlong addrRgba, jlong addrDescriptor, jint width_,
    jint height_) 

Mat& image = *(Mat*) addrRgba;
Mat& imageCropped = *(Mat*) addrDescriptor;
int screen_width = (int) width_;
int screen_height = (int) height_;

Mat newSrc = image.clone();
imageCropped = image.clone();
Mat testImage = image.clone();
// blur will enhance edge detection
Mat blurred(testImage);

medianBlur(testImage, blurred, 9);

Mat gray0(blurred.size(), CV_8U), gray;
vector<vector<Point> > contours;

// find squares in every color plane of the image
cv::vector<cv::vector<cv::Point> > squares;

for (int c = 0; c < 3; c++) 
    int ch[] =  c, 0 ;
    mixChannels(&blurred, 1, &gray0, 1, ch, 1);

    // try several threshold levels
    const int threshold_level = 2;
    for (int l = 0; l < threshold_level; l++) 
        // Use Canny instead of zero threshold level!
        // Canny helps to catch squares with gradient shading
        if (l == 0) 
            Canny(gray0, gray, 10, 20, 3); //

            // Dilate helps to remove potential holes between edge segments
            dilate(gray, gray, Mat(), Point(-1, -1));
         else 
            gray = gray0 >= (l + 1) * 255 / threshold_level;
        

        // Find contours and store them in a list
        findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);

        // Test contours
        vector<Point> approx;
        if (contours.size() > 0) 
            for (size_t i = 0; i < contours.size(); i++) 
                // approximate contour with accuracy proportional
                // to the contour perimeter
                approxPolyDP(Mat(contours[i]), approx,
                        arcLength(Mat(contours[i]), true) * 0.02, true);

                // Note: absolute value of an area is used because
                // area may be positive or negative - in accordance with the
                // contour orientation
                if (approx.size() == 4
                        && fabs(contourArea(Mat(approx))) > 1000
                        && isContourConvex(Mat(approx))) 
                    double maxCosine = 0;

                    for (int j = 2; j < 5; j++) 
                        double cosine = fabs(
                                angle(approx[j % 4], approx[j - 2],
                                        approx[j - 1]));
                        maxCosine = MAX(maxCosine, cosine);
                    

                    if (maxCosine < 0.3) 
                        squares.push_back(approx);

                        /*circle(image, approx[0], 5, Scalar(255, 0, 0, 255), 3,
                         4, 0);
                         circle(image, approx[1], 5, Scalar(255, 0, 0, 255), 3,
                         4, 0);
                         circle(image, approx[2], 5, Scalar(255, 0, 0, 255), 3,
                         4, 0);
                         circle(image, approx[3], 5, Scalar(255, 0, 0, 255), 3,
                         4, 0);
                         if ((int) draw) 
                         line(image, approx[0], approx[1],
                         Scalar(0, 255, 0, 255), 2, 4, 0);
                         line(image, approx[1], approx[2],
                         Scalar(0, 255, 0, 255), 2, 4, 0);
                         line(image, approx[2], approx[3],
                         Scalar(0, 255, 0, 255), 2, 4, 0);
                         line(image, approx[3], approx[0],
                         Scalar(0, 255, 0, 255), 2, 4, 0);
                         */
                    
                
            
        
    

if (squares.size() > 0) 
    int max_width = 0;
    int max_height = 0;
    int max_square_idx = 0;
    cv::vector<cv::Point> biggest_square;

            squares.size());
    for (size_t i = 0; i < squares.size(); i++) 

    cv::Rect structure.
        cv::Rect rectangle = boundingRect(cv::Mat(squares[i]));
        // Store the index position of the biggest square found
        if ((rectangle.width >= max_width)
                && (rectangle.height >= max_height)) 
            max_width = rectangle.width;
            max_height = rectangle.height;
            max_square_idx = i;
        
    

    biggest_square = squares[max_square_idx];
    vector<Point> _adjustRect;
    _adjustRect = squares[max_square_idx];
    if (biggest_square.size() == 4) 
        vector<Point> sortedPoints;
        sortedPoints = squares[max_square_idx];

        Point ptbiggest_square = biggest_square[0];

        Point ptBottomLeft1 = biggest_square[0];
        Point ptBottomRight1 = biggest_square[1];
        Point ptTopRight1 = biggest_square[2];
        Point ptTopLeft1 = biggest_square[3];

        int bl = ptBottomLeft1.x + ptBottomLeft1.y;
        int br = ptBottomRight1.x + ptBottomRight1.y;
        int tr = ptTopRight1.x + ptTopRight1.y;
        int tl = ptTopLeft1.x + ptTopLeft1.y;

        int value_array[] =  bl, br, tr, tl ;
        int max = value_array[0];
        int min = value_array[0];

        for (int s = 0; s < 4; s++) 
            if (value_array[s] > max) 
                max = value_array[s];
             else if (value_array[s] < min) 
                min = value_array[s];
            
        
        int minIndex = 0;
        int maxIndex = 0;

        int missingIndexOne = 0;
        int missingIndexTwo = 0;

        for (int i = 0; i < 4; i++) 

            if (value_array[i] == min) 
                sortedPoints[0] = biggest_square[i];
                minIndex = i;
                continue;
            

            if (value_array[i] == max) 
                sortedPoints[2] = biggest_square[i];
                maxIndex = i;
                continue;
            
            missingIndexOne = i;
        

        for (int i = 0; i < 4; i++) 
            if (missingIndexOne != i && minIndex != i && maxIndex != i) 
                missingIndexTwo = i;
            
        

        if (biggest_square[missingIndexOne].x
                < biggest_square[missingIndexTwo].x) 
            //2nd Point Found

            sortedPoints[3] = biggest_square[missingIndexOne];
            sortedPoints[1] = biggest_square[missingIndexTwo];
         else 
            //4rd Point Found

            sortedPoints[1] = biggest_square[missingIndexOne];
            sortedPoints[3] = biggest_square[missingIndexTwo];
        

        _adjustRect[0] = sortedPoints[0];
        _adjustRect[1] = sortedPoints[1];
        _adjustRect[2] = sortedPoints[2];
        _adjustRect[3] = sortedPoints[3];



    

    Point ptTopLeft = _adjustRect[0];
    Point ptTopRight = _adjustRect[1];
    Point ptBottomRight = _adjustRect[2];
    Point ptBottomLeft = _adjustRect[3];

    float imageScale = fminf((float) screen_width / newSrc.cols,
            (float) screen_height / newSrc.rows);

    __android_log_print(ANDROID_LOG_INFO, "OpenGLTest", "imageScale %f",
            imageScale);
    __android_log_print(ANDROID_LOG_INFO, "OpenGLTest", "width_ %d",
            screen_width);

    float w1 = sqrt(
            pow(ptBottomRight.x / imageScale - ptBottomLeft.x / imageScale,
                    2)
                    + pow(
                            ptBottomRight.x / imageScale
                                    - ptBottomLeft.x / imageScale, 2));
    float w2 = sqrt(
            pow(ptTopRight.x / imageScale - ptTopLeft.x / imageScale, 2)
                    + pow(
                            ptTopRight.x / imageScale
                                    - ptTopLeft.x / imageScale, 2));

    float h1 = sqrt(
            pow(ptTopRight.y / imageScale - ptBottomRight.y / imageScale, 2)
                    + pow(
                            ptTopRight.y / imageScale
                                    - ptBottomRight.y / imageScale, 2));
    float h2 = sqrt(
            pow(ptTopLeft.y / imageScale - ptBottomLeft.y / imageScale, 2)
                    + pow(
                            ptTopLeft.y / imageScale
                                    - ptBottomLeft.y / imageScale, 2));

    float maxWidth = (w1 < w2) ? w1 : w2;
    float maxHeight = (h1 < h2) ? h1 : h2;

    Point2f src[4], quad[4];
    src[0].x = ptTopLeft.x;
    src[0].y = ptTopLeft.y;
    src[1].x = ptTopRight.x;
    src[1].y = ptTopRight.y;
    src[2].x = ptBottomRight.x;
    src[2].y = ptBottomRight.y;
    src[3].x = ptBottomLeft.x;
    src[3].y = ptBottomLeft.y;

    quad[0].x = 0;
    quad[0].y = 0;
    quad[1].x = maxWidth - 1;
    quad[1].y = 0;
    quad[2].x = maxWidth - 1;
    quad[2].y = maxHeight - 1;
    quad[3].x = 0;
    quad[3].y = maxHeight - 1;

    cv::Mat undistorted = cv::Mat(cvSize(maxWidth, maxHeight), CV_8UC1);
    cv::warpPerspective(newSrc, undistorted,
            cv::getPerspectiveTransform(src, quad),
            cvSize(maxWidth, maxHeight));

    imageCropped = undistorted.clone();


return 1;



double angle(Point pt1, Point pt2, Point pt0) 
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1 * dx2 + dy1 * dy2)
        / sqrt((dx1 * dx1 + dy1 * dy1) * (dx2 * dx2 + dy2 * dy2) + 1e-10);



编码愉快!!

【讨论】:

以上是关于在perspectiveWarp/warpPerspective 之后Android-Android OpenCV 得到空白(黑色)图像?的主要内容,如果未能解决你的问题,请参考以下文章

秋的潇洒在啥?在啥在啥?

上传的数据在云端的怎么查看,保存在啥位置?

在 React 应用程序中在哪里转换数据 - 在 Express 中还是在前端使用 React?

存储在 plist 中的数据在模拟器中有效,但在设备中无效

如何在保存在 Mongoose (ExpressJS) 之前在模型中格式化数据

如何在保存在 Mongoose (ExpressJS) 之前在模型中格式化数据