iOS 使用 vImage - 加速将 QCAR YUV 转换为 RGB
Posted
技术标签:
【中文标题】iOS 使用 vImage - 加速将 QCAR YUV 转换为 RGB【英文标题】:iOS using vImage - Accelerate to convert QCAR YUV to RGB 【发布时间】:2015-04-07 11:09:09 【问题描述】:我正在尝试测试转换 Vuforia 生成的 YUV 图像并使用 ios Accelerate Framework 的 vImage 调用将它们转换为 UIImage
的性能。在代码的当前状态下,我只是想让它工作。现在转换会产生深色条纹图像。是否有任何关于 Vuforia 如何在其实现中布置 YUV 格式的已发布细节?我最初的假设是他们使用了 iOS 设备使用的双平面 420p 格式。相关测试代码如下。
UIImage *imageWithQCARCameraImage(const QCAR::Image *cameraImage)
UIImage *image = nil;
if (cameraImage)
QCAR::PIXEL_FORMAT pixelFormat = cameraImage->getFormat();
CGColorSpaceRef colorSpace = NULL;
switch (pixelFormat)
case QCAR::YUV:
case QCAR::RGB888:
colorSpace = CGColorSpaceCreateDeviceRGB();
break;
case QCAR::GRAYSCALE:
colorSpace = CGColorSpaceCreateDeviceGray();
break;
case QCAR::RGB565:
case QCAR::RGBA8888:
case QCAR::INDEXED:
std::cerr << "Image format conversion not implemented." << std::endl;
break;
case QCAR::UNKNOWN_FORMAT:
std::cerr << "Image format unknown." << std::endl;
break;
int bitsPerComponent = 8;
int width = cameraImage->getWidth();
int height = cameraImage->getHeight();
const void *baseAddress = cameraImage->getPixels();
size_t totalBytes = QCAR::getBufferSize(width, height, pixelFormat);
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault | kCGImageAlphaNone;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
CGImageRef imageRef = NULL;
if (pixelFormat == QCAR::YUV)
int bytesPerPixel = 4;
uint8_t *sourceDataAddress = (uint8_t *)baseAddress;
static vImage_Buffer srcYp =
.width = static_cast<vImagePixelCount>(width),
.height = static_cast<vImagePixelCount>(height),
.data = const_cast<void *>(baseAddress)
;
size_t lumaBytes = width * height;
size_t chromianceBytes = totalBytes - lumaBytes;
static vImage_Buffer srcCb =
.data = static_cast<void *>(sourceDataAddress + lumaBytes)
;
static vImage_Buffer srcCr =
.data = static_cast<void *>(sourceDataAddress + lumaBytes + (chromianceBytes / 2))
;
static vImage_Buffer dest =
.width = static_cast<vImagePixelCount>(width),
.height = static_cast<vImagePixelCount>(height),
.data = imageData
;
//uint8_t permuteMap[] = 1, 2, 3, 0 ;
vImage_YpCbCrPixelRange pixelRange = (vImage_YpCbCrPixelRange) 0, 128, 255, 255, 255, 1, 255, 0 ;
vImage_YpCbCrToARGB info;
vImage_Error error;
error = vImageConvert_YpCbCrToARGB_GenerateConversion(kvImage_YpCbCrToARGBMatrix_ITU_R_601_4,
&pixelRange,
&info,
kvImage420Yp8_Cb8_Cr8,
kvImageARGB8888,
kvImagePrintDiagnosticsToConsole);
error = vImageConvert_420Yp8_Cb8_Cr8ToARGB8888(&srcYp,
&srcCb,
&srcCr,
&dest,
&info,
NULL,
1,
kvImageNoFlags);
vImage_CGImageFormat format =
.bitsPerComponent = static_cast<uint32_t>(bitsPerComponent),
.bitsPerPixel = static_cast<uint32_t>(3 * bitsPerComponent),
.colorSpace = colorSpace,
.bitmapInfo = bitmapInfo,
.version = 0,
.decode = NULL,
.renderingIntent = renderingIntent
;
imageRef = vImageCreateCGImageFromBuffer(&dest,
&format,
NULL,
NULL,
kvImageNoFlags,
&error);
if (error)
std::cerr << "Err." << std::endl;
else
int bitsPerPixel = QCAR::getBitsPerPixel(pixelFormat);
int bytesPerRow = cameraImage->getStride();
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL,
baseAddress,
totalBytes,
NULL);
imageRef = CGImageCreate(width,
height,
bitsPerComponent,
bitsPerPixel,
bytesPerRow,
colorSpace,
bitmapInfo,
provider,
NULL,
false,
renderingIntent);
CGDataProviderRelease(provider);
if (imageRef != NULL)
image = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
if (colorSpace != NULL)
CGColorSpaceRelease(colorSpace);
return image;
【问题讨论】:
迄今为止,高通发布的任何细节都毫无用处。但在他们的论坛中,版主“尝试”回答问题。如果这是一个好问题,他们只有两种方式来回答,“参考文档”或实际答案。希望你能得到答案,听起来很有趣。 我也会在论坛上问,虽然他们的论坛不是很好。我对 YUV 的了解告诉我,他们的 API 没有提供足够的细节供您做任何事情,只能猜测,因为有无数种潜在格式。 非常非常真实!高通需要加快步伐。 每次都需要填写vImage_Buffer结构体的四个字段。 我是我自己的引文。燃烧的衬套将根据需要提供。 【参考方案1】:void *baseAddress = buffer;
size_t totalBytes = width * height * 3 / 2;
uint8_t *sourceDataAddress = (uint8_t *)baseAddress;
vImage_Buffer srcYp =
.width = static_cast<vImagePixelCount>(width),
.height = static_cast<vImagePixelCount>(height),
.rowBytes = static_cast<size_t>(width),
.data = const_cast<void *>(baseAddress),
;
size_t lumaBytes = width * height;
size_t chromianceBytes = totalBytes - lumaBytes;
vImage_Buffer srcCb =
.width = static_cast<vImagePixelCount>(width) / 2,
.height = static_cast<vImagePixelCount>(height) / 2,
.rowBytes = static_cast<size_t>(width) / 2,
.data = static_cast<void *>(sourceDataAddress + lumaBytes),
;
vImage_Buffer srcCr =
.width = static_cast<vImagePixelCount>(width) / 2,
.height = static_cast<vImagePixelCount>(height) / 2,
.rowBytes = static_cast<size_t>(width) / 2,
.data = static_cast<void *>(sourceDataAddress + lumaBytes + (chromianceBytes / 2)),
;
vImage_Buffer dest;
dest.data = NULL;
vImage_Error error = kvImageNoError;
error = vImageBuffer_Init(&dest, height, width, 32, kvImageNoFlags);
// vImage_YpCbCrPixelRange pixelRange = (vImage_YpCbCrPixelRange) 0, 128, 255, 255, 255, 1, 255, 0 ;
vImage_YpCbCrPixelRange pixelRange = 16, 128, 235, 240, 255, 0, 255, 0 ;
vImage_YpCbCrToARGB info;
error = kvImageNoError;
error = vImageConvert_YpCbCrToARGB_GenerateConversion(kvImage_YpCbCrToARGBMatrix_ITU_R_601_4,
&pixelRange,
&info,
kvImage420Yp8_Cb8_Cr8,
kvImageARGB8888,
kvImagePrintDiagnosticsToConsole);
error = kvImageNoError;
uint8_t permuteMap[4] = 3, 2, 1, 0; // BGRA - iOS only support BGRA
error = vImageConvert_420Yp8_Cb8_Cr8ToARGB8888(&srcYp,
&srcCb,
&srcCr,
&dest,
&info,
permuteMap, // for iOS must be no NULL, mac can be NULL iOS only support BGRA
255,
kvImageNoFlags);
【讨论】:
edit 您的答案并添加说明此代码如何/为何解决问题(代码上方,而不是 cmets 中的某处)。 本质上,操作的错误是没有将 srcCb 和 srcCr 中的宽度和高度除以 2。 “两个色度平面(蓝色和红色投影)在水平和垂直维度上都被二次采样了 2 倍”。见wiki.videolan.org/YUV以上是关于iOS 使用 vImage - 加速将 QCAR YUV 转换为 RGB的主要内容,如果未能解决你的问题,请参考以下文章
我无法让 vImage(加速框架)将 420Yp8_Cb8_Cr8(平面)转换为 ARGB8888
iOS - 将 UIImage 转换为 vImage 内存处理