着色器 - 通过“拉”像素来反鱼眼
Posted
技术标签:
【中文标题】着色器 - 通过“拉”像素来反鱼眼【英文标题】:Shader - Anti fisheye by "pulling" pixels 【发布时间】:2019-02-10 17:22:14 【问题描述】:我想修复这张图片的失真:
对不起质量,但这是我能找到的最好的例子。
我不知道是否可以修复这种失真(我想要直门和直天花板),但基本上,不是将像素推到图像之外(红色箭头),而是添加模糊效果,我想做相反的事情(绿色箭头),将像素拉向中心。
如果你有任何想法,那就太棒了。也欢迎其他解决方案!
【问题讨论】:
【参考方案1】:为了定义鱼眼效果,您必须将角度 (alpha) 与视口的对角线 (d) 相关联。 视口的对角线是圆的直径,包括整个视口。
周长圆的半径(r)与角度(alpha)的关系为:
r = tan(alpha / 2)
下面eye_angle
对应alpha,half_dist
对应r
:
float half_angle = eye_angle/2.0;
float half_dist = tan(half_angle);
使用视口的纵横比 (aspect
) 并且是片段在视口ndcPos
上的标准化设备位置,可以计算出位置P
。在标准化设备空间中,x
和 y
在范围 [-1, 1] 内:
vec2 vp_scale = vec2(aspect, 1.0);
vec2 P = ndcPos * vp_scale;
对于视口上的每个点 (P
),必须计算与视口中心相对于周边圆的相对距离 (rel_dist
)。并且需要点(P
)与纵横比的相对位置(rel_P
):
float vp_dia = length(vp_scale);
float rel_dist = length(P) / vp_dia;
vec2 rel_P = normalize(P) / normalize(vp_scale);
鱼眼效果是由球面在平面上的投影引起的。为了计算从和到投影的距离,必须找到弧长和到平面中心的距离之间的关系:
如果圆的半径为 1,则圆弧的长度等于圆弧段的角度(以弧度为单位)。所以到点P的距离和角度beta的关系是:
|P|/r = tan(beta)
beta = atan(|P|/r)
如果从球面到平面的投影是:
float beta = rel_dist * half_angle;
vec3 pos_prj = rel_P * tan(beta) / half_dist;
而从平面到球面的投影为:
float beta = atan(rel_dist * half_dist);
vec2 pos_prj = rel_P * beta / half_angle;
请参阅以下 WebGL 示例,该示例使用片段着色器来实现算法。角度 alpha 设置为统一变量u_alpha
。
如果u_alpha > 0.0
,则计算球面到平面的投影。
如果u_alpha < 0.0
,则计算平面到球面的投影。
(function loadscene()
var canvas, gl, vp_size, texture, prog, bufObj = ;
function initScene()
canvas = document.getElementById( "ogl-canvas");
gl = canvas.getContext( "experimental-webgl" );
if ( !gl )
return;
texture = new Texture( "https://raw.githubusercontent.com/Rabbid76/graphics-snippets/master/resource/texture/supermario.jpg" );
texture.bound = false;
progDraw = gl.createProgram();
for (let i = 0; i < 2; ++i)
let source = document.getElementById(i==0 ? "draw-shader-vs" : "draw-shader-fs").text;
let shaderObj = gl.createShader(i==0 ? gl.VERTEX_SHADER : gl.FRAGMENT_SHADER);
gl.shaderSource(shaderObj, source);
gl.compileShader(shaderObj);
let status = gl.getShaderParameter(shaderObj, gl.COMPILE_STATUS);
if (!status) alert(gl.getShaderInfoLog(shaderObj));
gl.attachShader(progDraw, shaderObj);
gl.linkProgram(progDraw);
status = gl.getProgramParameter(progDraw, gl.LINK_STATUS);
if ( !status ) alert(gl.getProgramInfoLog(progDraw));
progDraw.inPos = gl.getAttribLocation(progDraw, "inPos");
progDraw.u_time = gl.getUniformLocation(progDraw, "u_time");
progDraw.u_resolution = gl.getUniformLocation(progDraw, "u_resolution");
progDraw.u_texture = gl.getUniformLocation(progDraw, "u_texture");
progDraw.u_angle = gl.getUniformLocation(progDraw, "u_angle");
gl.useProgram(progDraw);
var pos = [ -1, -1, 1, -1, 1, 1, -1, 1 ];
var inx = [ 0, 1, 2, 0, 2, 3 ];
bufObj.pos = gl.createBuffer();
gl.bindBuffer( gl.ARRAY_BUFFER, bufObj.pos );
gl.bufferData( gl.ARRAY_BUFFER, new Float32Array( pos ), gl.STATIC_DRAW );
bufObj.inx = gl.createBuffer();
bufObj.inx.len = inx.length;
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, bufObj.inx );
gl.bufferData( gl.ELEMENT_ARRAY_BUFFER, new Uint16Array( inx ), gl.STATIC_DRAW );
gl.enableVertexAttribArray( progDraw.inPos );
gl.vertexAttribPointer( progDraw.inPos, 2, gl.FLOAT, false, 0, 0 );
gl.enable( gl.DEPTH_TEST );
gl.clearColor( 0.0, 0.0, 0.0, 1.0 );
window.onresize = resize;
resize();
requestAnimationFrame(render);
function resize()
vp_size = [window.innerWidth, window.innerHeight];
canvas.width = vp_size[0];
canvas.height = vp_size[1];
function render(deltaMS)
scale = document.getElementById( "scale" ).value / 100 * 2.0 - 1.0;
gl.viewport( 0, 0, canvas.width, canvas.height );
gl.clear( gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
texture.bound = texture.bound || texture.bind( 0 );
gl.uniform1i(progDraw.u_texture, 0);
gl.uniform1f(progDraw.u_time, deltaMS/2000.0);
gl.uniform2f(progDraw.u_resolution, canvas.width, canvas.height);
gl.uniform1f(progDraw.u_angle, scale * Math.PI * 0.9);
gl.drawElements( gl.TRIANGLES, bufObj.inx.len, gl.UNSIGNED_SHORT, 0 );
requestAnimationFrame(render);
class Texture
constructor( name, dflt )
let texture = this;
this.dflt = dflt || [128,128,128,255]
let image = "cx": this.dflt.w || 1, "cy": this.dflt.h || 1, "plane": this.dflt.p || this.dflt ;
this.size = [image.cx, image.cy];
this.dummyObj = Texture.createTexture2D( image, true )
this.image = new Image(64,64);
this.image.setAttribute('crossorigin', 'anonymous');
this.image.onload = function ()
let cx = 1 << 31 - Math.clz32(texture.image.naturalWidth);
if ( cx < texture.image.naturalWidth ) cx *= 2;
let cy = 1 << 31 - Math.clz32(texture.image.naturalHeight);
if ( cy < texture.image.naturalHeight ) cy *= 2;
var canvas = document.createElement( 'canvas' );
canvas.width = cx;
canvas.height = cy;
var context = canvas.getContext( '2d' );
context.drawImage( texture.image, 0, 0, canvas.width, canvas.height );
texture.textureObj = Texture.createTexture2D( canvas, true );
texture.size = [cx, cy];
this.image.src = name;
static createTexture2D( image, flipY )
let t = gl.createTexture();
gl.activeTexture( gl.TEXTURE0 );
gl.bindTexture( gl.TEXTURE_2D, t );
gl.pixelStorei( gl.UNPACK_FLIP_Y_WEBGL, flipY != undefined && flipY == true );
if ( image.cx && image.cy && image.plane )
gl.texImage2D( gl.TEXTURE_2D, 0, gl.RGBA, image.cx, image.cy, 0, gl.RGBA, gl.UNSIGNED_BYTE, new Uint8Array(image.plane) );
else
gl.texImage2D( gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, image );
gl.texParameteri( gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR );
gl.texParameteri( gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR );
gl.texParameteri( gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.REPEAT );
gl.texParameteri( gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.REPEAT );
gl.bindTexture( gl.TEXTURE_2D, null );
return t;
bind( texUnit = 0 )
gl.activeTexture( gl.TEXTURE0 + texUnit );
if ( this.textureObj )
gl.bindTexture( gl.TEXTURE_2D, this.textureObj );
return true;
gl.bindTexture( gl.TEXTURE_2D, this.dummyObj );
return false;
;
initScene();
)();
html,body margin: 0; overflow: hidden;
#gui position : absolute; top : 0; left : 0;
<script id="draw-shader-fs" type="x-shader/x-fragment">
precision mediump float;
uniform float u_time;
uniform vec2 u_resolution;
uniform float u_angle;
uniform sampler2D u_texture;
void main()
vec2 uv = gl_FragCoord.xy / u_resolution;
vec2 ndcPos = uv * 2.0 - 1.0;
float aspect = u_resolution.x / u_resolution.y;
float eye_angle = abs(u_angle);
float half_angle = eye_angle/2.0;
float half_dist = tan(half_angle);
vec2 vp_scale = vec2(aspect, 1.0);
vec2 P = ndcPos * vp_scale;
float vp_dia = length(vp_scale);
float rel_dist = length(P) / vp_dia;
vec2 rel_P = normalize(P) / normalize(vp_scale);
vec2 pos_prj = ndcPos;
if (u_angle > 0.0)
float beta = rel_dist * half_angle;
pos_prj = rel_P * tan(beta) / half_dist;
else if (u_angle < 0.0)
float beta = atan(rel_dist * half_dist);
pos_prj = rel_P * beta / half_angle;
vec2 uv_prj = pos_prj * 0.5 + 0.5;
vec2 rangeCheck = step(vec2(0.0), uv_prj) * step(uv_prj, vec2(1.0));
if (rangeCheck.x * rangeCheck.y < 0.5)
discard;
vec4 texColor = texture2D(u_texture, uv_prj.st);
gl_FragColor = vec4( texColor.rgb, 1.0 );
</script>
<script id="draw-shader-vs" type="x-shader/x-vertex">
precision mediump float;
attribute vec2 inPos;
void main()
gl_Position = vec4( inPos.xy, 0.0, 1.0 );
</script>
<canvas id="ogl-canvas" style="border: none"></canvas>
<form id="gui" name="inputs">
<input type="range" id="scale" min="0" max="100" value="20"/>
</form>
【讨论】:
【参考方案2】:这种失真是相机的特性。如果您可以使用用于拍照的相机,则可以对其进行校准以提取其应用的变形。否则,您仍然可以使用一些通用方程。看看 Paul Bourke 的 article、这个 Stack Overflow answer 或这个 Shader Toy example,看看它是如何实现的。
【讨论】:
这张图片的失真与相机无关,这是由于立体投影(这是一张360度图片)。我是您提供的 shadertoy 链接的创建者,但它使用红色箭头解决方案,而不是绿色箭头。 我误解了你的问题,对不起!很明显,镜头矫正你已经知道了^^。以上是关于着色器 - 通过“拉”像素来反鱼眼的主要内容,如果未能解决你的问题,请参考以下文章