OpenGL中怎么使用压缩纹理?
Posted
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了OpenGL中怎么使用压缩纹理?相关的知识,希望对你有一定的参考价值。
我在OpenGL中使用压缩纹理,但是显示不出来。。。
不用压缩的话,显示正常。
现在我想把纹理的内部格式改成下面的,因为内存不够用了。。
谢谢。
GL_COMPRESSED_ALPHA,
GL_COMPRESSED_LUMINANCE,
GL_COMPRESSED_LUMINANCE_ALPHA,
GL_COMPRESSED_RGB,
GL_COMPRESSED_RGBA,
原来正常的加载纹理部分的代码:
const INT nFormats[] =
GL_ALPHA,
GL_LUMINANCE,
GL_LUMINANCE_ALPHA,
GL_RGB,
GL_RGBA,
;
const INT nTypes[] =
GL_UNSIGNED_BYTE,
GL_UNSIGNED_SHORT_5_6_5,
GL_UNSIGNED_SHORT_4_4_4_4,
GL_UNSIGNED_SHORT_5_5_5_1,
;
glGenTextures(1, &m_uTexID);
glBindTexture(GL_TEXTURE_2D, GetTexID());
glTexImage2D(
GL_TEXTURE_2D,
0,
nFormats[m_nFormat],
m_nWidth,
m_nHeight,
0,
nFormats[m_nFormat],
nTypes[m_nType],
pImage);
===========================
渲染时候的代码:
glEnable(GL_LINE_SMOOTH);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, nTexID);
glEnable(GL_BLEND);
glBlendFunc(m_nGLBlendSrc, m_nGLBlendDst);
ASSERT(fW > 0);
ASSERT(fH > 0);
const FLOAT fTexCoodX = fClipX/fTexW;
const FLOAT fTexCoodY = fClipY/fTexH;
const FLOAT fTexCoodW = fClipW/fTexW;
const FLOAT fTexCoodH = fClipH/fTexH;
const SScreenParam& rScreen = GetScreenParam();
const SWindowParam& rWindow = GetWindowParam();
const FLOAT fScreenW = (fW/rWindow.fW * rScreen.fW);
const FLOAT fScreenH = (fH/rWindow.fH * rScreen.fH);
FLOAT fVerOffsetX = rScreen.fL + fX/rWindow.fW * rScreen.fW;
FLOAT fVerOffsetY = rScreen.fT - fY/rWindow.fH * rScreen.fH;
GLfloat fVertex[] =
fVerOffsetX + fScreenW, fVerOffsetY - fScreenH, fZ,
fVerOffsetX, fVerOffsetY - fScreenH, fZ,
fVerOffsetX + fScreenW, fVerOffsetY, fZ,
fVerOffsetX, fVerOffsetY, fZ,
;
GLfloat fColor[] =
fR, fG, fB, fA,
fR, fG, fB, fA,
fR, fG, fB, fA,
fR, fG, fB, fA,
;
GLfloat fTexCoord[] =
fTexCoodX + fTexCoodW, fTexCoodY + fTexCoodH,
fTexCoodX, fTexCoodY + fTexCoodH,
fTexCoodX + fTexCoodW, fTexCoodY,
fTexCoodX, fTexCoodY,
;
CheckGrad(fX, fY, fW, fH, fClipW, fClipH, fColor);
SetTex(fTexCoord);
SetVert(fVertex, fRot);
glVertexPointer(3, GL_FLOAT, 0, fVertex);
glEnableClientState(GL_VERTEX_ARRAY);
glColorPointer(4, GL_FLOAT, 0, fColor);
glEnableClientState(GL_COLOR_ARRAY);
glTexCoordPointer(2, GL_FLOAT, 0, fTexCoord);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
dds格式分为2个部分:
1、dds头
2、dxt压缩数据
dds的头文件信息如下:
/*
* DDCOLORKEY
*/
typedef struct _DDCOLORKEY
uint32 dwColorSpaceLowvalue; // low boundary of color space that is to
// be treated as Color Key, inclusive
uint32 dwColorSpaceHighvalue; // high boundary of color space that is
// to be treated as Color Key, inclusive
DDCOLORKEY;
/*
* DDSCAPS2
*/
typedef struct _DDSCAPS2
uint32 dwCaps; // capabilities of surface wanted
uint32 dwCaps2;
uint32 dwCaps3;
union
uint32 dwCaps4;
uint32 dwVolumeDepth;
DUMMYUNIONNAMEN(1);
DDSCAPS2;
/*
* DDPIXELFORMAT
*/
typedef struct _DDPIXELFORMAT
uint32 dwSize; // size of structure
uint32 dwFlags; // pixel format flags
uint32 dwFourCC; // (FOURCC code)
union
uint32 dwRGBBitCount; // how many bits per pixel
uint32 dwYUVBitCount; // how many bits per pixel
uint32 dwZBufferBitDepth; // how many total bits/pixel in z buffer (including any stencil bits)
uint32 dwAlphaBitDepth; // how many bits for alpha channels
uint32 dwLuminanceBitCount; // how many bits per pixel
uint32 dwBumpBitCount; // how many bits per "buxel", total
uint32 dwPrivateFormatBitCount;// Bits per pixel of private driver formats. Only valid in texture
// format list and if DDPF_D3DFORMAT is set
DUMMYUNIONNAMEN(1);
union
uint32 dwRBitMask; // mask for red bit
uint32 dwYBitMask; // mask for Y bits
uint32 dwStencilBitDepth; // how many stencil bits (note: dwZBufferBitDepth-dwStencilBitDepth is total Z-only bits)
uint32 dwLuminanceBitMask; // mask for luminance bits
uint32 dwBumpDuBitMask; // mask for bump map U delta bits
uint32 dwOperations; // DDPF_D3DFORMAT Operations
DUMMYUNIONNAMEN(2);
union
uint32 dwGBitMask; // mask for green bits
uint32 dwUBitMask; // mask for U bits
uint32 dwZBitMask; // mask for Z bits
uint32 dwBumpDvBitMask; // mask for bump map V delta bits
struct
uint16 wFlipMSTypes; // Multisample methods supported via flip for this D3DFORMAT
uint16 wBltMSTypes; // Multisample methods supported via blt for this D3DFORMAT
MultiSampleCaps;
DUMMYUNIONNAMEN(3);
union
uint32 dwBBitMask; // mask for blue bits
uint32 dwVBitMask; // mask for V bits
uint32 dwStencilBitMask; // mask for stencil bits
uint32 dwBumpLuminanceBitMask; // mask for luminance in bump map
DUMMYUNIONNAMEN(4);
union
uint32 dwRGBAlphaBitMask; // mask for alpha channel
uint32 dwYUVAlphaBitMask; // mask for alpha channel
uint32 dwLuminanceAlphaBitMask;// mask for alpha channel
uint32 dwRGBZBitMask; // mask for Z channel
uint32 dwYUVZBitMask; // mask for Z channel
DUMMYUNIONNAMEN(5);
DDPIXELFORMAT;
/*
* DDSURFACEDESC2
*/
typedef struct _DDSURFACEDESC2
uint32 dwSize; // size of the DDSURFACEDESC structure
uint32 dwFlags; // determines what fields are valid
uint32 dwHeight; // height of surface to be created
uint32 dwWidth; // width of input surface
union
int32 lPitch; // distance to start of next line (return value only)
uint32 dwLinearSize; // Formless late-allocated optimized surface size
DUMMYUNIONNAMEN(1);
union
uint32 dwBackBufferCount; // number of back buffers requested
uint32 dwDepth; // the depth if this is a volume texture
DUMMYUNIONNAMEN(5);
union
uint32 dwMipMapCount; // number of mip-map levels requestde
// dwZBufferBitDepth removed, use ddpfPixelFormat one instead
uint32 dwRefreshRate; // refresh rate (used when display mode is described)
uint32 dwSrcVBHandle; // The source used in VB::Optimize
DUMMYUNIONNAMEN(2);
uint32 dwAlphaBitDepth; // depth of alpha buffer requested
uint32 dwReserved; // reserved
void* lpSurface; // pointer to the associated surface memory
union
DDCOLORKEY ddckCKDestOverlay; // color key for destination overlay use
uint32 dwEmptyFaceColor; // Physical color for empty cubemap faces
DUMMYUNIONNAMEN(3);
DDCOLORKEY ddckCKDestBlt; // color key for destination blt use
DDCOLORKEY ddckCKSrcOverlay; // color key for source overlay use
DDCOLORKEY ddckCKSrcBlt; // color key for source blt use
union
DDPIXELFORMAT ddpfPixelFormat; // pixel format description of the surface
uint32 dwFVF; // vertex format description of vertex buffers
DUMMYUNIONNAMEN(4);
DDSCAPS2 ddsCaps; // direct draw surface capabilities
uint32 dwTextureStage; // stage in multitexture cascade
DDSURFACEDESC2;
struct DDS_IMAGE_DATA
int width;
int height;
int components;
unsigned int format;
int numMipMaps;
unsigned char *pixels;//这个是压缩的图像信息文件,glCompressedTexImage2DARB就是加载这个。
; 参考技术A opengl中有两种方法为顶点(vertex)指定纹理坐标:
1.由人工给每个顶点分配坐标。可以通过函数gltexcord*()来完成。
2.由opengl自动为每个顶点分配坐标。这个任务由函数gltexgen*()来完成。
一开始,我对gltexgen*()这个函数并不是十分的理解 参考技术B 我给你找找
OpenGL RGB DXT1压缩纹理mipmap上传
【中文标题】OpenGL RGB DXT1压缩纹理mipmap上传【英文标题】:OpenGL RGB DXT1 compressed texture mipmap upload 【发布时间】:2020-01-15 13:11:54 【问题描述】:我正在尝试使用 PBO 上传 GL_COMPRESSED_RGB_S3TC_DXT1_EXT 格式的 mipmaped 纹理级别。
该程序正在使用stb_resize.h
和stb_dxt.h
库调整和压缩图像。
显然图像压缩工作正常,但上传任何 mipmap 级别 会产生以下结果:
但是,如果我没有为它正确渲染的纹理定义任何 mipmap 级别。
此外,我没有收到来自 OpenGL 的 任何 错误,因为我启用了 GL_KHR_debug
。
这里是上传mipmap的opengl代码,中间是我自己的一些库代码:
// Load image:
auto img = loadImage2D("parrot.png");
// Resize the image to power-of-two
int max_mipmaps = 1;
unsigned int max_size = nextPow2(std::max(img.width, img.height));
for(int i = max_size; i > 4; i >>= 1)
max_mipmaps++;
auto rimg = img.scaleCubic(max_size,max_size);
// Allocate PBO to source compressed mipmap levels from
handle_t pbo;
size_t pbosize = rimg.compressed_size();
glCreateBuffers(1, &pbo);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo);
glBufferStorage(GL_PIXEL_UNPACK_BUFFER, pbosize, 0,
GL_DYNAMIC_STORAGE_BIT|GL_MAP_WRITE_BIT|GL_MAP_READ_BIT|GL_MAP_PERSISTENT_BIT);
// Map the buffer
void * pbodata = (char*)glMapBufferRange(
GL_PIXEL_UNPACK_BUFFER, 0, pbosize,
GL_MAP_WRITE_BIT|GL_MAP_READ_BIT
|GL_MAP_PERSISTENT_BIT
|GL_MAP_FLUSH_EXPLICIT_BIT);
// Compress the image and write the data directly into PBO.
// compress_to() fmt returns GL_COMPRESSED_RGB_S3TC_DXT1_EXT
option_t fmt;
rimg.compress_to(pbodata, fmt);
glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, 0, pbosize);
// Allocate the texture
glCreateTextures(GL_TEXTURE_2D, 1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTextureStorage2D(texture, max_mipmaps, fmt, rimg.width, rimg.width);
// Upload base image level.
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glCompressedTextureSubImage2D(texture, 0, 0,0, rimg.width, rimg.height, fmt, rimg.compressed_size(), 0);
// Process and Upload mipmap levels.
unsigned int mipmapsize = max_size >> 1;
for(int i = 1; i < max_mipmaps && mipmapsize >= 4; ++i)
// Resize next mipmap level.
rimg = img.scaleLinear(mipmapsize,mipmapsize);
// Compress the image and write result to *pbodata
rimg.compress_to(pbodata, fmt);
glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, 0, pbosize);
// Upload mipmap image level.
glCompressedTexSubImage2D(GL_TEXTURE_2D, i, 0,0, rimg.width, rimg.height, fmt, rimg.compressed_size(), 0);
mipmapsize >>= 1;
// Discard the PBO
glUnmapNamedBuffer(pbo);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER,0);
glDeleteBuffers(1, &pbo);
// Set texture params.
glTextureParameteri(texture, GL_TEXTURE_BASE_LEVEL, 0);
glTextureParameteri(texture, GL_TEXTURE_MAX_LEVEL, max_mipmaps - 1);
glTextureParameteri(texture, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTextureParameteri(texture, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
【问题讨论】:
【参考方案1】:执行glFlushMappedBufferRange
将不足以确保与 GPU 正确同步。这似乎是这里发生的事情:整个 mipmap 金字塔是在 CPU 上创建的,在GPU 处理第一个 glCompressedTexSubImage2D
调用之前。
作为对这个假设的快速测试,您可以在循环中添加glFinish
。要正确同步它,您可以使用GL Sync Objects,但最好完全避免同步:只需使用足够大的 PBO 来保存整个 mipmap 金字塔数据。
【讨论】:
我怀疑 CPU 在 glCompressedTexSubImage2D() 获取数据之前过早地覆盖了数据。 谢谢 PS:我可以简单地为每个纹理级别上传使用新的新鲜 PBO,然后在最后一次 gl*TexSubImage2D 调用后丢弃 PBO? 是的,您当然也可以在这里使用多个 PBO。最好的选择是性能方面的,很可能只使用一个具有不同范围的 PBO,并且只创建和映射一次。 OTOH,无论如何,CPU 端 DXT 压缩和图像缩放操作可能是这里的限制因素......以上是关于OpenGL中怎么使用压缩纹理?的主要内容,如果未能解决你的问题,请参考以下文章