using UnityEngine; public class depthmohu : MonoBehaviour { public Material mat; [Range(0.0f,1.0f)] public float blurSize = 0.5f; private Matrix4x4 previousViewProjectionMatrix; //上一次的视角投影矩阵 private Camera c; //要实时获取它的视角矩阵和投影矩阵 void Start () { c = this.GetComponent<Camera>(); c.depthTextureMode = DepthTextureMode.Depth; } private void OnRenderImage(RenderTexture source, RenderTexture destination) { if (mat != null) { mat.SetFloat("_BlurSize", blurSize); //模糊的程度,现在应该叫做幻影的程度 mat.SetMatrix("_PreviousViewProjectionMatrix", previousViewProjectionMatrix);//上一帧的视角投影矩阵 Matrix4x4 currentViewProjectionMatrix = c.projectionMatrix * c.worldToCameraMatrix;//当前帧的视角投影矩阵 Matrix4x4 currentViewProjectionInverseMatrix = currentViewProjectionMatrix.inverse;//反转当前帧的视角投影矩阵 mat.SetMatrix("_CurrentViewProjectionInverseMatrix", currentViewProjectionInverseMatrix); previousViewProjectionMatrix = currentViewProjectionMatrix; //保存当前帧的视角投影矩阵 Graphics.Blit(source, destination, mat); } else { Graphics.Blit(source, destination); } } }
Shader "Unlit/depthmohu" { Properties { _MainTex ("Texture", 2D) = "white" {} _BlurSize("BlurSize",Float) = 0.5 } SubShader { Tags { "RenderType" = "Opaque" } LOD 100 CGINCLUDE #include "UnityCG.cginc" sampler2D _MainTex; half4 _MainTex_TexelSize; sampler2D _CameraDepthTexture; float4x4 _CurrentViewProjectionInverseMatrix; float4x4 _PreviousViewProjectionMatrix; half _BlurSize; struct v2f { float2 uv : TEXCOORD0; float2 uv_depth:TEXCOORD1; float4 vertex : SV_POSITION; }; v2f vert(appdata_img v) { v2f o; o.vertex = UnityObjectToClipPos(v.vertex); o.uv = v.texcoord; o.uv_depth = v.texcoord; //平台差异化处理,贴图反转,防止开启了抗锯齿 #if UNITY_UV_STARTS_AT_TOP if (_MainTex_TexelSize.y < 0) o.uv_depth.y = 1 - o.uv_depth.y; #endif return o; } fixed4 frag(v2f i):SV_Target { //深度贴图的取样 float d = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture,i.uv_depth); //当前NDC的坐标,映射到[-1,1]范围 float4 H = float4(i.uv.x * 2 - 1, i.uv.y * 2 - 1, d * 2 - 1, 1); //通过视角和投影的逆矩阵来得到世界坐标 float4 D = mul(_CurrentViewProjectionInverseMatrix, H); float4 worldPos = D / D.w; float4 currentPos = H; //通过上一帧的视角和投影矩阵,反构建上一帧的NDC坐标 float4 previousPos = mul(_PreviousViewProjectionMatrix, worldPos); previousPos /= previousPos.w; //利用两个NDC坐标差异构建速度值 float2 velocity = (currentPos.xy - previousPos.xy) / 2.0f; float2 uv = i.uv; float4 c = tex2D(_MainTex, uv); for (int it = 1; it < 3; it++) { uv += velocity * _BlurSize;//速度值会决定模糊的程度,取样的偏差位置 float4 currentColor = tex2D(_MainTex,uv); c += currentColor; } c /= 3; return fixed4(c.rgb, 1); } ENDCG Pass { ZTest Always Cull Off ZWrite Off CGPROGRAM #pragma vertex vert #pragma fragment frag ENDCG } } Fallback Off }
因为这是根据相机的视角和投影矩阵来反推世界坐标的,所以这个只适合这两个矩阵改变的运动迷糊,也就是说适合场景静止的情况下,相机动的运动模糊。