Unity3D URP中使用Render Feature实现后处理效果
Posted 拉格尔
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了Unity3D URP中使用Render Feature实现后处理效果相关的知识,希望对你有一定的参考价值。
unity urp 自带了一个后处理组件Volume,提供了不少后处理效果:
除此之外,Render Feature 也可以实现类似的效果,并且自由度更高。
使用方式是,在RenderPiplineAsset 中的Renderer中新增Feature,下面举例说明如何通过Feature增加一个KawaseBlur的后处理效果。
首先写一个自定义的Feature脚本:
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.Rendering;
using UnityEngine.Rendering.Universal;
public class KawaseBlur : ScriptableRendererFeature
{
[System.Serializable]
public class KawaseBlurSettings
{
public RenderPassEvent renderPassEvent = RenderPassEvent.AfterRenderingTransparents;
public Material blurMaterial = null;
[Range(2,15)]
public int blurPasses = 2;
[Range(1,4)]
public int downsample = 1;
public bool copyToFramebuffer = true;
public string targetName = "_blurTexture";
}
public KawaseBlurSettings settings = new KawaseBlurSettings();
class CustomRenderPass : ScriptableRenderPass
{
public Material blurMaterial;
public int passes;
public int downsample;
public bool copyToFramebuffer;
public string targetName;
string profilerTag;
int tmpId1;
int tmpId2;
RenderTargetIdentifier tmpRT1;
RenderTargetIdentifier tmpRT2;
RenderTargetIdentifier cameraColorTexture;
public CustomRenderPass(string profilerTag)
{
this.profilerTag = profilerTag;
}
public override void Configure(CommandBuffer cmd, RenderTextureDescriptor cameraTextureDescriptor)
{
var width = cameraTextureDescriptor.width / downsample;
var height = cameraTextureDescriptor.height / downsample;
tmpId1 = Shader.PropertyToID("tmpBlurRT1");
tmpId2 = Shader.PropertyToID("tmpBlurRT2");
cmd.GetTemporaryRT(tmpId1, width, height, 0, FilterMode.Bilinear, RenderTextureFormat.ARGB32);
cmd.GetTemporaryRT(tmpId2, width, height, 0, FilterMode.Bilinear, RenderTextureFormat.ARGB32);
tmpRT1 = new RenderTargetIdentifier(tmpId1);
tmpRT2 = new RenderTargetIdentifier(tmpId2);
ConfigureTarget(tmpRT1);
ConfigureTarget(tmpRT2);
}
public override void Execute(ScriptableRenderContext context, ref RenderingData renderingData)
{
cameraColorTexture = renderingData.cameraData.renderer.cameraColorTarget;
CommandBuffer cmd = CommandBufferPool.Get(profilerTag);
RenderTextureDescriptor opaqueDesc = renderingData.cameraData.cameraTargetDescriptor;
opaqueDesc.depthBufferBits = 0;
// first pass
// cmd.GetTemporaryRT(tmpId1, opaqueDesc, FilterMode.Bilinear);
cmd.SetGlobalFloat("_offset", 1.5f);
cmd.Blit(cameraColorTexture, tmpRT1, blurMaterial);
for (var i=1; i<passes-1; i++) {
cmd.SetGlobalFloat("_offset", 0.5f + i);
cmd.Blit(tmpRT1, tmpRT2, blurMaterial);
// pingpong
(tmpRT1, tmpRT2) = (tmpRT2, tmpRT1);
}
// final pass
cmd.SetGlobalFloat("_offset", 0.5f + passes - 1f);
if (copyToFramebuffer) {
cmd.Blit(tmpRT1, cameraColorTexture, blurMaterial);
} else {
cmd.Blit(tmpRT1, tmpRT2, blurMaterial);
cmd.SetGlobalTexture(targetName, tmpRT2);
}
context.ExecuteCommandBuffer(cmd);
cmd.Clear();
CommandBufferPool.Release(cmd);
}
public override void FrameCleanup(CommandBuffer cmd)
{
}
}
CustomRenderPass scriptablePass;
public override void Create()
{
scriptablePass = new CustomRenderPass("KawaseBlur");
scriptablePass.blurMaterial = settings.blurMaterial;
scriptablePass.passes = settings.blurPasses;
scriptablePass.downsample = settings.downsample;
scriptablePass.copyToFramebuffer = settings.copyToFramebuffer;
scriptablePass.targetName = settings.targetName;
scriptablePass.renderPassEvent = settings.renderPassEvent;
}
public override void AddRenderPasses(ScriptableRenderer renderer, ref RenderingData renderingData)
{
renderer.EnqueuePass(scriptablePass);
}
}
再加上一个Blur Shader:
Shader "Custom/RenderFeature/KawaseBlur"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
// _offset ("Offset", float) = 0.5
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
// make fog work
#pragma multi_compile_fog
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
UNITY_FOG_COORDS(1)
float4 vertex : SV_POSITION;
};
sampler2D _MainTex;
// sampler2D _CameraOpaqueTexture;
float4 _MainTex_TexelSize;
float4 _MainTex_ST;
float _offset;
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
return o;
}
fixed4 frag (v2f input) : SV_Target
{
float2 res = _MainTex_TexelSize.xy;
float i = _offset;
fixed4 col;
col.rgb = tex2D( _MainTex, input.uv ).rgb;
col.rgb += tex2D( _MainTex, input.uv + float2( i, i ) * res ).rgb;
col.rgb += tex2D( _MainTex, input.uv + float2( i, -i ) * res ).rgb;
col.rgb += tex2D( _MainTex, input.uv + float2( -i, i ) * res ).rgb;
col.rgb += tex2D( _MainTex, input.uv + float2( -i, -i ) * res ).rgb;
col.rgb /= 5.0f;
return col;
}
ENDCG
}
}
}
其次新增一个Forward Render asset:
点击Add Renderer Feature,把前面写的自定义Blur添加进来,新建一个材质球,材质球使用上面写的shader,并把材质球挂到 Feature 的Blur Material上,调整一下Blur Passes等参数,然后勾上Copy To Framebuffer。这样我们的自定义 Forward Render 就做好了。
之后把自定义的 Forward Render 添加到当前使用的URP Render Pipeline Asset 上面:
注意如果点了Set Default,那么相机就会自动使用这个Feature,这里我们不把它当做默认,只是加上此Feature。然后在游戏中如果使用的是当前的 Render Pipeline Asset ,那么相机是可以选择Renderer的,我们选上新增的自定义Renderer:
好了,此时当前相机渲染到FrameBuffer的图像就会带有Blur效果啦。另外,上面的Shader 也可以选择将渲染出的图片不输出到 FrameBuffer 中,而是储存在一个临时Buffer中,供其他Shader使用。就是在Feature的参数中,不勾选 Copy to Framebuffer选项,并在Target Name中填上渲染的Buffer名字。像上图中写的 _blurTexture,这样之后就可以在其他shader中直接使用_blurTexture啦,可以实现一些毛玻璃之类的效果,当然需要使用一个调整好视角的单独相机,另外渲染时机(Render Pass Event)可能需要修改一下。使用Feature的好处是可以调整渲染时机,针对某几个相机使用,并且可以挑选渲染对象(参考URP自带的Feature:Render Objects),相对来说自由度更高。
以上是关于Unity3D URP中使用Render Feature实现后处理效果的主要内容,如果未能解决你的问题,请参考以下文章