如何将 RotatedRect 中的所有像素存储到另一个矩阵?

Posted

技术标签:

【中文标题】如何将 RotatedRect 中的所有像素存储到另一个矩阵?【英文标题】:How to store all the pixels within a RotatedRect to another matrix? 【发布时间】:2014-10-09 13:35:15 【问题描述】:

我将在 OpenCV 中处理由 RotatedRect 定义的像素区域。虽然我知道矩形的中心、大小和角度,但我不确定如何将这个区域的所有 x 和 y 存储到另一个矩阵中。我检查了其他一些帖子,有些建议旋转图像,但这会裁剪部分图像。你能帮帮我吗?

【问题讨论】:

旋转旋转的(所以 theta=0),并将其存储为普通矩形。或将其存储在黑色画布中。 您只想知道旋转矩形内的像素位置?在黑色图像(8UC1)上绘制一个白色填充的旋转矩形并将其用作蒙版。然后你可以遍历整个面具并说if(mask.at<unsigned char>(y,x)) pixelList.push_back(cv::Point(x,y)); 【参考方案1】:

试试这个(不确定我是否完全理解这个问题):

#include "opencv2/opencv.hpp"
#include <vector>
using namespace std;
using namespace cv;
//----------------------------------------------------------
//
//----------------------------------------------------------
void getQuadrangleSubPix_8u32f_CnR( const uchar* src, size_t src_step, Size src_size,
                                   float* dst, size_t dst_step, Size win_size,
                                   const double *matrix, int cn )

    int x, y, k;
    double A11 = matrix[0], A12 = matrix[1], A13 = matrix[2];
    double A21 = matrix[3], A22 = matrix[4], A23 = matrix[5];

    src_step /= sizeof(src[0]);
    dst_step /= sizeof(dst[0]);

    for( y = 0; y < win_size.height; y++, dst += dst_step )
    
        double xs = A12*y + A13;
        double ys = A22*y + A23;
        double xe = A11*(win_size.width-1) + A12*y + A13;
        double ye = A21*(win_size.width-1) + A22*y + A23;

        if( (unsigned)(cvFloor(xs)-1) < (unsigned)(src_size.width - 3) &&
            (unsigned)(cvFloor(ys)-1) < (unsigned)(src_size.height - 3) &&
            (unsigned)(cvFloor(xe)-1) < (unsigned)(src_size.width - 3) &&
            (unsigned)(cvFloor(ye)-1) < (unsigned)(src_size.height - 3))
        
            for( x = 0; x < win_size.width; x++ )
            
                int ixs = cvFloor( xs );
                int iys = cvFloor( ys );
                const uchar *ptr = src + src_step*iys;
                float a = (float)(xs - ixs), b = (float)(ys - iys), a1 = 1.f - a, b1 = 1.f - b;
                float w00 = a1*b1, w01 = a*b1, w10 = a1*b, w11 = a*b;
                xs += A11;
                ys += A21;

                if( cn == 1 )
                
                    ptr += ixs;
                    dst[x] = ptr[0]*w00 + ptr[1]*w01 + ptr[src_step]*w10 + ptr[src_step+1]*w11;
                
                else if( cn == 3 )
                
                    ptr += ixs*3;
                    float t0 = ptr[0]*w00 + ptr[3]*w01 + ptr[src_step]*w10 + ptr[src_step+3]*w11;
                    float t1 = ptr[1]*w00 + ptr[4]*w01 + ptr[src_step+1]*w10 + ptr[src_step+4]*w11;
                    float t2 = ptr[2]*w00 + ptr[5]*w01 + ptr[src_step+2]*w10 + ptr[src_step+5]*w11;

                    dst[x*3] = t0;
                    dst[x*3+1] = t1;
                    dst[x*3+2] = t2;
                
                else
                
                    ptr += ixs*cn;
                    for( k = 0; k < cn; k++ )
                        dst[x*cn+k] = ptr[k]*w00 + ptr[k+cn]*w01 +
                        ptr[src_step+k]*w10 + ptr[src_step+k+cn]*w11;
                
            
        
        else
        
            for( x = 0; x < win_size.width; x++ )
            
                int ixs = cvFloor( xs ), iys = cvFloor( ys );
                float a = (float)(xs - ixs), b = (float)(ys - iys), a1 = 1.f - a, b1 = 1.f - b;
                float w00 = a1*b1, w01 = a*b1, w10 = a1*b, w11 = a*b;
                const uchar *ptr0, *ptr1;
                xs += A11; ys += A21;

                if( (unsigned)iys < (unsigned)(src_size.height-1) )
                    ptr0 = src + src_step*iys, ptr1 = ptr0 + src_step;
                else
                    ptr0 = ptr1 = src + (iys < 0 ? 0 : src_size.height-1)*src_step;

                if( (unsigned)ixs < (unsigned)(src_size.width-1) )
                
                    ptr0 += ixs*cn; ptr1 += ixs*cn;
                    for( k = 0; k < cn; k++ )
                        dst[x*cn + k] = ptr0[k]*w00 + ptr0[k+cn]*w01 + ptr1[k]*w10 + ptr1[k+cn]*w11;
                
                else
                
                    ixs = ixs < 0 ? 0 : src_size.width - 1;
                    ptr0 += ixs*cn; ptr1 += ixs*cn;
                    for( k = 0; k < cn; k++ )
                        dst[x*cn + k] = ptr0[k]*b1 + ptr1[k]*b;
                
            
        
    


//----------------------------------------------------------
// 
//----------------------------------------------------------
void myGetQuadrangleSubPix(const Mat& src, Mat& dst,Mat& m )

    CV_Assert( src.channels() == dst.channels() );

    cv::Size win_size = dst.size();
    double matrix[6];
    cv::Mat M(2, 3, CV_64F, matrix);
    m.convertTo(M, CV_64F);
    double dx = (win_size.width - 1)*0.5;
    double dy = (win_size.height - 1)*0.5;
    matrix[2] -= matrix[0]*dx + matrix[1]*dy;
    matrix[5] -= matrix[3]*dx + matrix[4]*dy;

    if( src.depth() == CV_8U && dst.depth() == CV_32F )
        getQuadrangleSubPix_8u32f_CnR( src.data, src.step, src.size(),
        (float*)dst.data, dst.step, dst.size(),
        matrix, src.channels());
    else
    
        CV_Assert( src.depth() == dst.depth() );
        cv::warpAffine(src, dst, M, dst.size(),
            cv::INTER_LINEAR + cv::WARP_INVERSE_MAP,
            cv::BORDER_REPLICATE);
    

//----------------------------------------------------------
// 
//----------------------------------------------------------
void getRotRectImg(cv::RotatedRect rr,Mat &img,Mat& dst)

    Mat m(2,3,CV_64FC1);
    float ang=rr.angle*CV_PI/180.0;
    m.at<double>(0,0)=cos(ang);
    m.at<double>(1,0)=sin(ang);
    m.at<double>(0,1)=-sin(ang);
    m.at<double>(1,1)=cos(ang);
    m.at<double>(0,2)=rr.center.x;
    m.at<double>(1,2)=rr.center.y;
    myGetQuadrangleSubPix(img,dst,m);


//----------------------------------------------------------
// 
//----------------------------------------------------------
int main(int argc, char* argv[])

    Mat img=imread("D:\\ImagesForTest\\lena.jpg");
    img.convertTo(img,CV_32FC3,1.0/255.0);

    cv::RotatedRect rr(cv::Point2f(200,200),Size(50,50),-30);

    // rotated rectangle
    Point2f rect_points[4];
    rr.points( rect_points );

    for( int j = 0; j < 4; j++ )
    
        line( img, rect_points[j], rect_points[(j+1)%4], Scalar(0,1,0), 1, CV_AA );
    

    imshow("colImg",img);
    Mat dst(rr.size,CV_32FC3);
    getRotRectImg(rr,img,dst);
    imshow("rotImg",dst);
    cv::waitKey(0);
    cv::destroyAllWindows();
    return 0;

结果:

【讨论】:

这可能是 OP 正在寻找的,但应该注意的是,反旋转矩形中的像素是从原始 RotatedRect 插值的,不一定具有相同的值,或者实际上相同数量的像素。我知道它在对warpAffine 的调用中,但我认为值得明确说明。 感谢您的分享。是否可以使用上面的代码存储原始的x和y坐标? 是的,这是可能的,只需用 x 和 y 坐标填充 2 个图像通道。你会得到带有(插值作为正确注意到的烧杯)坐标的片段。【参考方案2】:

OpenCVwarpAffine实现。

Mat getAffineTransformForRotatedRect(RotatedRect rr) 
    float angle = rr.angle * M_PI / 180.0;
    // angle += M_PI; // you may want rotate it upsidedown
    float sinA = sin(angle), cosA = cos(angle);
    float data[6] = 
         cosA, sinA, rr.size.width/2.0f - cosA * rr.center.x - sinA * rr.center.y,
        -sinA, cosA, rr.size.height/2.0f - cosA * rr.center.y + sinA * rr.center.x;
    Mat rot_mat(2, 3, CV_32FC1, data);
    return rot_mat.clone();


Mat getRotatedRectImg(const cv::Mat &mat, RotatedRect rr) 
    Mat M, result;
    M = getAffineTransformForRotatedRect(rr);

    warpAffine(mat, result, M, rr.size, INTER_CUBIC);

    return result;

【讨论】:

以上是关于如何将 RotatedRect 中的所有像素存储到另一个矩阵?的主要内容,如果未能解决你的问题,请参考以下文章

OpenCV对图片中的RotatedRect进行填充

如何从imageview中的png获取特定颜色的所有像素

基于opencv RotatedRect教程无法得到相同的图像结果

Matlab:提取存储在单元阵列中的所有图像的像素

RotateRect(旋转矩形)的倾斜旋转变换矫正

如何将存储在雪花中的所有视图下载到本地机器