using UnityEngine; public class BackWu : MonoBehaviour { public Material mat; private Camera cam; [Range(0.0f, 3.0f)] public float fogDensity = 1.0f;//雾效强度 public Color fogColor = Color.white;//雾的颜色 public float fogStart = 0.0f; //开始高度 public float fogEnd = 2.0f; //结束高度 void Start () { cam = this.GetComponent<Camera>(); cam.depthTextureMode |= DepthTextureMode.Depth; } private void OnRenderImage(RenderTexture source, RenderTexture destination) { if (mat != null) { Matrix4x4 frustumCorners = Matrix4x4.identity; float fov = cam.fieldOfView; float near = cam.nearClipPlane; float far = cam.farClipPlane; float aspect = cam.aspect; //近剪裁面中心到top边的垂直距离 float halfHeight = near * Mathf.Tan(fov * 0.5f * Mathf.Deg2Rad); //近剪裁面中心垂直指向左边 Vector3 toRight = cam.transform.right * halfHeight * aspect; //近剪裁面中心垂直指向上边 Vector3 toTop = cam.transform.up * halfHeight; Vector3 topLeft = cam.transform.forward * near - toRight + toTop; float scale = topLeft.magnitude / near;//计算一个depth值转换成真实长度的scale //相机指向近剪裁面的左上角 topLeft.Normalize(); topLeft *= scale; //相机指向近剪裁面的右上角 Vector3 topRight = cam.transform.forward * near + toRight + toTop; topRight.Normalize(); topRight *= scale; //相机指向近剪裁面的右下角 Vector3 bottomRight = cam.transform.forward * near + toRight - toTop; bottomRight.Normalize(); bottomRight *= scale; //相机指向近剪裁面的左下角 Vector3 bottomLeft = cam.transform.forward * near - toRight - toTop; bottomLeft.Normalize(); bottomLeft *= scale; //存储在一个矩阵中 frustumCorners.SetRow(0, bottomLeft); frustumCorners.SetRow(1, bottomRight); frustumCorners.SetRow(2, topRight); frustumCorners.SetRow(3, topLeft); mat.SetMatrix("_FrustumCornersRay", frustumCorners); mat.SetFloat("_FogDensity", fogDensity); mat.SetColor("_FogColor", fogColor); mat.SetFloat("_FogStart", fogStart); mat.SetFloat("_FogEnd", fogEnd); Graphics.Blit(source, destination, mat); } else { Graphics.Blit(source, destination); } } }
Shader "Unlit/BackMohu" { Properties { _MainTex ("Texture", 2D) = "white" {} _FogDensity("Fog Density",Float) = 1.0 _FogColor("Fog Color",Color) = (1,1,1,1) _FogStart("Fog Start",Float) = 0.0 _FogEnd("Fog End",Float) = 1.0 } SubShader { Tags { "RenderType" = "Opaque" } LOD 100 CGINCLUDE float4x4 _FrustumCornersRay; sampler2D _MainTex; half4 _MainTex_TexelSize; sampler2D _CameraDepthTexture; half _FogDensity; fixed4 _FogColor; float _FogStart; float _FogEnd; #include "UnityCG.cginc" struct v2f { float4 pos:SV_POSITION; half2 uv:TEXCOORD0; half2 uv_depth:TEXCOORD1; float4 interpolateRay:TEXCOORD2; }; v2f vert(appdata_img v) { v2f o; o.pos = UnityObjectToClipPos(v.vertex); o.uv = v.texcoord; o.uv_depth = v.texcoord; #if UNITY_UV_STARTS_AT_TOP if (_MainTex_TexelSize.y < 0) o.uv_depth = 1 - o.uv_depth.y; #endif int index = 0; if (v.texcoord.x < 0.5 && v.texcoord.y < 0.5) { index = 0; } else if (v.texcoord.x > 0.5 && v.texcoord.y < 0.5) { index = 1; } else if (v.texcoord.x > 0.5 && v.texcoord.y > 0.5) { index = 2; } else { index = 3; } #if UNITY_UV_STARTS_AT_TOP if (_MainTex_TexelSize.y < 0) index = 3 - index; #endif //根据uv位置 来决定使用哪个Ray 并会线性插值到片元 o.interpolateRay = _FrustumCornersRay[index]; return o; } fixed4 frag(v2f i):SV_Target { float linearDepth = LinearEyeDepth(SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture,i.uv_depth)); float3 worldPos = _WorldSpaceCameraPos + linearDepth * i.interpolateRay.xyz;//根据深度值 重建世界坐标 float fogDensity = (_FogEnd - worldPos.y) / (_FogEnd - _FogStart);//雾效的影响 基于高度线性计算 fogDensity = saturate(fogDensity * _FogDensity); fixed4 finalColor = tex2D(_MainTex, i.uv); finalColor.rgb = lerp(finalColor.rgb, _FogColor.rgb, fogDensity); return finalColor; } ENDCG Pass { ZTest Always Cull Off ZWrite Off CGPROGRAM #pragma vertex vert #pragma fragment frag ENDCG } } Fallback off }
using UnityEngine; public class depthmohu : MonoBehaviour { public Material mat; [Range(0.0f,1.0f)] public float blurSize = 0.5f; private Matrix4x4 previousViewProjectionMatrix; //上一次的视角投影矩阵 private Camera c; //要实时获取它的视角矩阵和投影矩阵 void Start () { c = this.GetComponent<Camera>(); c.depthTextureMode = DepthTextureMode.Depth; } private void OnRenderImage(RenderTexture source, RenderTexture destination) { if (mat != null) { mat.SetFloat("_BlurSize", blurSize); //模糊的程度,现在应该叫做幻影的程度 mat.SetMatrix("_PreviousViewProjectionMatrix", previousViewProjectionMatrix);//上一帧的视角投影矩阵 Matrix4x4 currentViewProjectionMatrix = c.projectionMatrix * c.worldToCameraMatrix;//当前帧的视角投影矩阵 Matrix4x4 currentViewProjectionInverseMatrix = currentViewProjectionMatrix.inverse;//反转当前帧的视角投影矩阵 mat.SetMatrix("_CurrentViewProjectionInverseMatrix", currentViewProjectionInverseMatrix); previousViewProjectionMatrix = currentViewProjectionMatrix; //保存当前帧的视角投影矩阵 Graphics.Blit(source, destination, mat); } else { Graphics.Blit(source, destination); } } }
Shader "Unlit/depthmohu" { Properties { _MainTex ("Texture", 2D) = "white" {} _BlurSize("BlurSize",Float) = 0.5 } SubShader { Tags { "RenderType" = "Opaque" } LOD 100 CGINCLUDE #include "UnityCG.cginc" sampler2D _MainTex; half4 _MainTex_TexelSize; sampler2D _CameraDepthTexture; float4x4 _CurrentViewProjectionInverseMatrix; float4x4 _PreviousViewProjectionMatrix; half _BlurSize; struct v2f { float2 uv : TEXCOORD0; float2 uv_depth:TEXCOORD1; float4 vertex : SV_POSITION; }; v2f vert(appdata_img v) { v2f o; o.vertex = UnityObjectToClipPos(v.vertex); o.uv = v.texcoord; o.uv_depth = v.texcoord; //平台差异化处理,贴图反转,防止开启了抗锯齿 #if UNITY_UV_STARTS_AT_TOP if (_MainTex_TexelSize.y < 0) o.uv_depth.y = 1 - o.uv_depth.y; #endif return o; } fixed4 frag(v2f i):SV_Target { //深度贴图的取样 float d = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture,i.uv_depth); //当前NDC的坐标,映射到[-1,1]范围 float4 H = float4(i.uv.x * 2 - 1, i.uv.y * 2 - 1, d * 2 - 1, 1); //通过视角和投影的逆矩阵来得到世界坐标 float4 D = mul(_CurrentViewProjectionInverseMatrix, H); float4 worldPos = D / D.w; float4 currentPos = H; //通过上一帧的视角和投影矩阵,反构建上一帧的NDC坐标 float4 previousPos = mul(_PreviousViewProjectionMatrix, worldPos); previousPos /= previousPos.w; //利用两个NDC坐标差异构建速度值 float2 velocity = (currentPos.xy - previousPos.xy) / 2.0f; float2 uv = i.uv; float4 c = tex2D(_MainTex, uv); for (int it = 1; it < 3; it++) { uv += velocity * _BlurSize;//速度值会决定模糊的程度,取样的偏差位置 float4 currentColor = tex2D(_MainTex,uv); c += currentColor; } c /= 3; return fixed4(c.rgb, 1); } ENDCG Pass { ZTest Always Cull Off ZWrite Off CGPROGRAM #pragma vertex vert #pragma fragment frag ENDCG } } Fallback Off }
因为这是根据相机的视角和投影矩阵来反推世界坐标的,所以这个只适合这两个矩阵改变的运动迷糊,也就是说适合场景静止的情况下,相机动的运动模糊。
using UnityEngine; public class ShowCameraDepthTexture : MonoBehaviour { public Material mat; private void Awake() { Camera camera = this.GetComponent<Camera>(); //只要将相机的这个参数设置为DepthNormals就可以在Shader中使用_CameraDepthNormalsTexture访问深度贴图了 camera.depthTextureMode = DepthTextureMode.DepthNormals; } private void OnRenderImage(RenderTexture source, RenderTexture destination) { if (mat != null) { Graphics.Blit(source, destination, mat); } else { Graphics.Blit(source, destination); } } }
Shader "Unlit/DepthTexShow" { Properties { _MainTex ("Texture", 2D) = "white" {} } SubShader { Tags { "RenderType"="Opaque" } LOD 100 Pass { CGPROGRAM #pragma vertex vert #pragma fragment frag #include "UnityCG.cginc" struct v2f { float2 uv : TEXCOORD0; float4 vertex : SV_POSITION; }; sampler2D _MainTex; float4 _MainTex_ST; sampler2D _CameraDepthNormalsTexture; v2f vert (appdata_img v) { v2f o; o.vertex = UnityObjectToClipPos(v.vertex); o.uv = v.texcoord; return o; } fixed4 frag (v2f i) : SV_Target { float d; float3 normal; float4 enc = tex2D(_CameraDepthNormalsTexture,i.uv); DecodeDepthNormal(enc,d,normal);//获取深度值和法线值 return fixed4(normal * 0.5 + 0.5, 1); //return fixed4(d,d,d, 1); } ENDCG } } }
通过DecodeDepthNormal去解析取样的结果,可以获得out的深度值和法线方向,enc中xy存储的是视角空间下的法线信息,zw存储的是深度信息。
inline void DecodeDepthNormal( float4 enc, out float depth, out float3 normal ) { depth = DecodeFloatRG (enc.zw); normal = DecodeViewNormalStereo (enc); }
using UnityEngine; public class ShowCameraDepthTexture : MonoBehaviour { public Material mat; private void Awake() { Camera camera = this.GetComponent<Camera>(); //只要将相机的这个参数设置为Depth就可以在Shader中使用_CameraDepthTexture访问深度贴图了 camera.depthTextureMode = DepthTextureMode.Depth; } private void OnRenderImage(RenderTexture source, RenderTexture destination) { if (mat != null) { Graphics.Blit(source, destination, mat); } else { Graphics.Blit(source, destination); } } }
Shader "Unlit/DepthTexShow" { Properties { _MainTex ("Texture", 2D) = "white" {} } SubShader { Tags { "RenderType"="Opaque" } LOD 100 Pass { CGPROGRAM #pragma vertex vert #pragma fragment frag #include "UnityCG.cginc" struct v2f { float2 uv : TEXCOORD0; float4 vertex : SV_POSITION; }; sampler2D _MainTex; float4 _MainTex_ST; sampler2D _CameraDepthTexture; v2f vert (appdata_img v) { v2f o; o.vertex = UnityObjectToClipPos(v.vertex); o.uv = v.texcoord; return o; } fixed4 frag (v2f i) : SV_Target { //SAMPLE_DEPTH_TEXTURE 用来取样深度贴图 float d = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture,i.uv); d = Linear01Depth(d); //转换到线性空间,并且约束在0-1,原本是经过剪裁矩阵的,非线性 return fixed4(d,d,d,1);//可视化 } ENDCG } } }
调节相机Camera的Clipping Planes的Far属性到最远物体可以看见的位置,因为如果差太远的话深度图就会约束到很小值的地方了(黑色)
using UnityEngine; public class movemohu : MonoBehaviour { public Material mat; [Range(0.0f, 0.9f)] public float blurAmount = 0.5f;//与原图像混合的透明度 private RenderTexture accumulationTexture;//上一次的渲染图 private void OnRenderImage(RenderTexture source, RenderTexture destination) { if (mat != null) { //当上一次的渲染图不存在或则说宽高都不一样了,说明重新开始场景了 if (accumulationTexture == null || accumulationTexture.width != source.width || accumulationTexture.height != source.height) { accumulationTexture = new RenderTexture(source.width, source.height, 0); accumulationTexture.hideFlags = HideFlags.HideAndDontSave; Graphics.Blit(source, accumulationTexture); } //渲染纹理的恢复操作,发生在渲染的纹理时没有被清空的情况,这里需要和当前渲染的图像混合 accumulationTexture.MarkRestoreExpected(); mat.SetFloat("_BlurAmount", 1.0f - blurAmount); Graphics.Blit(source, accumulationTexture, mat); Graphics.Blit(accumulationTexture, destination); } else { Graphics.Blit(source, destination); } } private void OnDisable() { DestroyImmediate(accumulationTexture); } }
Shader "Unlit/movemohu" { Properties { _MainTex ("Texture", 2D) = "white" {} _BlurAmount("Blur Amount",Float) = 1.0 } SubShader { Tags { "RenderType" = "Opaque" } LOD 100 CGINCLUDE #include "UnityCG.cginc" sampler2D _MainTex; fixed _BlurAmount; struct v2f { float2 uv : TEXCOORD0; float4 vertex : SV_POSITION; }; v2f vert(appdata_img v) { v2f o; o.vertex = UnityObjectToClipPos(v.vertex); o.uv = v.texcoord; return o; } fixed4 fragRGB(v2f i):SV_Target { return fixed4(tex2D(_MainTex,i.uv).rgb,_BlurAmount); } half4 fragA(v2f i) : SV_Target { return tex2D(_MainTex,i.uv); } ENDCG ZTest Always Cull Off ZWrite Off Pass { Blend SrcAlpha OneMinusSrcAlpha ColorMask RGB //不写入透明通道,但使用自定义的透明通道混合 CGPROGRAM #pragma vertex vert #pragma fragment fragRGB ENDCG } Pass { Blend One Zero ColorMask A //写入原来的透明通道 CGPROGRAM #pragma vertex vert #pragma fragment fragA ENDCG } } }
Bloom特效可以将周围将亮的区域扩散到周围的区域中,造成一种朦胧的效果。
using UnityEngine; public class bloom : MonoBehaviour { public Material mat; [Range(0, 4)] public int iterations = 3; //高斯模糊处理次数 [Range(0.2f,3.0f)] public float blurSpread = 0.6f; //模糊程度 [Range(1, 8)] public int downSample = 2; [Range(0.0f, 4.0f)] //亮度阔值,一般不会超过1,但开启HDR的话会有更高的精度 public float luminanceThreshold = 0.6f; private void OnRenderImage(RenderTexture source, RenderTexture destination) { if (mat != null) { mat.SetFloat("_LuminanceThreshold", luminanceThreshold); int w = source.width / downSample; int h = source.height / downSample; RenderTexture buffer0 = RenderTexture.GetTemporary(w, h, 0); buffer0.filterMode = FilterMode.Bilinear; Graphics.Blit(source, buffer0, mat, 0); //将光亮图高斯模糊,扩散到周围 for (int i = 0; i < iterations; i++) { mat.SetFloat("_BlurSize", 1.0f + i * blurSpread); RenderTexture buffer1 = RenderTexture.GetTemporary(w, h, 0); Graphics.Blit(buffer0, buffer1, mat, 1); RenderTexture.ReleaseTemporary(buffer0); buffer0 = buffer1; buffer1 = RenderTexture.GetTemporary(w, h, 0); Graphics.Blit(buffer0, buffer1, mat, 2); RenderTexture.ReleaseTemporary(buffer0); buffer0 = buffer1; } mat.SetTexture("_Bloom", buffer0); Graphics.Blit(source, destination, mat, 3); RenderTexture.ReleaseTemporary(buffer0); } else { Graphics.Blit(source, destination); } } }
Shader "Unlit/bloom" { Properties { _MainTex ("Texture", 2D) = "white" {} _Bloom("Bloom",2D) = "black"{} _LuminanceThreshold("Luminance Threshold",Float) = 0.5 _BlurSize("Blur Size",Float) = 1.0 } SubShader { Tags { "RenderType"="Opaque" } LOD 100 CGINCLUDE #include "UnityCG.cginc" struct v2f { float2 uv : TEXCOORD0; float4 vertex : SV_POSITION; }; sampler2D _MainTex; float4 _MainTex_TexelSize; sampler2D _Bloom; float _LuminanceThreshold; float _BlurSize; v2f vertExtractBright(appdata_img v) { v2f o; o.vertex = UnityObjectToClipPos(v.vertex); o.uv = v.texcoord; return o; } //光度贴图的取样,亮度超过阔值的才有值 fixed4 fragExtractBright(v2f i) : SV_Target { fixed4 c = tex2D(_MainTex,i.uv); fixed val = clamp(Luminance(c) - _LuminanceThreshold, 0.0, 1.0); return c * val; } struct v2fBloom { float4 pos : SV_POSITION; half4 uv :TEXCOORD0; }; v2fBloom vertBloom(appdata_img v) { v2fBloom o; o.pos = UnityObjectToClipPos(v.vertex); o.uv.xy = v.texcoord; o.uv.zw = v.texcoord; //平台差异化处理 #if UNITY_UV_STARTS_AT_TOP if (_MainTex_TexelSize.y < 0.0) o.uv.w = 1.0 - o.uv.w; #endif return o; } fixed4 fragBloom(v2fBloom i) :SV_Target { //为较亮区域添加亮度 return tex2D(_MainTex,i.uv.xy) + tex2D(_Bloom,i.uv.zw); } ENDCG ZTest Always Cull Off ZWrite Off //得到较亮区域贴图 Pass { CGPROGRAM #pragma vertex vertExtractBright #pragma fragment fragExtractBright ENDCG } //高斯模糊处理,扩散到周围 UsePass "Unlit/gsMohu/GAUSSING_BLUR_VERTICAL" //上次通过Name UsePass "Unlit/gsMohu/GAUSSING_BLUR_GORIZONTAL" //叠加,较亮区域 Pass { CGPROGRAM #pragma vertex vertBloom #pragma fragment fragBloom ENDCG } } Fallback Off }
UsePass使用的是 “高斯模糊”中使用NAME命名好的Pass,达到重用的目的
OpenGL和DirectX使用的是不同的屏幕空间坐标,在水平方向是相同的,但是在竖直方向是反过来的。在渲染到屏幕上或则渲染到RenderTexture上时,Unity都会为我们做了相应的翻转处理,当在DirectX平台上使用了渲染到纹理上时,Unity会翻转屏幕图像纹理。但是如果渲染到RenderTexture并开启了抗锯齿时,Unity是不会帮我们做处理的,我们需要自行的将y翻转过来。
//自行反转操作 #if UNITY_UV_STARTS_AT_TOP if (_MainTex_TexelSize.y < 0.0) o.uv.w = 1.0 - o.uv.w; #endif
using UnityEngine; public class Mohua : MonoBehaviour { public Material mat; [Range(0, 4)] public int iterations = 3; //处理次数,越多次越模糊 [Range(0.2f, 3.0f)] public float blurSpread = 0.6f; //模糊程度 [Range(1, 8)] public int downSample = 2; //取样缩放 private void OnRenderImage(RenderTexture source, RenderTexture destination) { if (mat != null) { int rtW = source.width / downSample; int rtH = source.height / downSample; RenderTexture buffer0 = RenderTexture.GetTemporary(rtW, rtH, 0); buffer0.filterMode = FilterMode.Bilinear; Graphics.Blit(source, buffer0); for (int i = 0; i < iterations; i++)//处理的次数 { mat.SetFloat("_BlurSize", 1.0f + i * blurSpread); RenderTexture buffer1 = RenderTexture.GetTemporary(rtW, rtH, 0); Graphics.Blit(buffer0, buffer1, mat, 0);//使用第一个pass做垂直方向的处理 RenderTexture.ReleaseTemporary(buffer0); buffer0 = buffer1; buffer1 = RenderTexture.GetTemporary(rtW, rtH, 0); Graphics.Blit(buffer0, buffer1, mat, 1); //使用第二个pass做水平方向的处理 RenderTexture.ReleaseTemporary(buffer0); buffer0 = buffer1; } Graphics.Blit(buffer0, destination);//显示处理后的图像 RenderTexture.ReleaseTemporary(buffer0); } else { Graphics.Blit(source, destination); } } }
Shader "Unlit/gsMohu" { Properties { _MainTex ("Texture", 2D) = "white" {} _BlurSize ("Blur Size",Float) = 1.0 } SubShader { Tags { "RenderType"="Opaque" } LOD 100 CGINCLUDE //声明一块公用的 #include "UnityCG.cginc" struct v2f { float2 uv[5] : TEXCOORD0; float4 vertex : SV_POSITION; }; sampler2D _MainTex; half4 _MainTex_TexelSize; float _BlurSize; v2f vertBlurVertical (appdata_img v) { v2f o; o.vertex = UnityObjectToClipPos(v.vertex); half2 uv = v.texcoord; //竖直方向的5个点取样,_BlurSize越大,取样的间距越大,模糊就越大 o.uv[0] = uv; o.uv[1] = uv + float2(0.0, _MainTex_TexelSize.y * 1.0) * _BlurSize; o.uv[2] = uv - float2(0.0, _MainTex_TexelSize.y * 1.0) * _BlurSize; o.uv[3] = uv + float2(0.0, _MainTex_TexelSize.y * 2.0) * _BlurSize; o.uv[4] = uv - float2(0.0, _MainTex_TexelSize.y * 2.0) * _BlurSize; return o; } v2f vertBlurHorizontal(appdata_img v) { v2f o; o.vertex = UnityObjectToClipPos(v.vertex); half2 uv = v.texcoord; //水平方向的5个点取样 o.uv[0] = uv; o.uv[1] = uv + float2(0.0, _MainTex_TexelSize.x * 1.0) * _BlurSize; o.uv[2] = uv - float2(0.0, _MainTex_TexelSize.x * 1.0) * _BlurSize; o.uv[3] = uv + float2(0.0, _MainTex_TexelSize.x * 2.0) * _BlurSize; o.uv[4] = uv - float2(0.0, _MainTex_TexelSize.x * 2.0) * _BlurSize; return o; } fixed4 frag(v2f i) : SV_Target { float weight[3] = {0.4026,0.2442,0.0545}; //根据高斯方程计算处理啊的,标准方差为1,每个值相加为1 fixed3 sum = tex2D(_MainTex, i.uv[0]).rgb * weight[0]; for (int it = 1; it < 3; it++) { sum += tex2D(_MainTex, i.uv[it * 2 - 1]).rgb * weight[it]; sum += tex2D(_MainTex, i.uv[it * 2]).rgb * weight[it]; } return fixed4(sum, 1.0); } ENDCG ZTest Always Cull Off ZWrite Off Pass { NAME "GAUSSING_BLUR_VERTICAL" //为Pass命名,方便在其他SubShader中重用 CGPROGRAM #pragma vertex vertBlurVertical #pragma fragment frag ENDCG } Pass { NAME "GAUSSING_BLUR_GORIZONTAL" CGPROGRAM #pragma vertex vertBlurHorizontal #pragma fragment frag ENDCG } } FallBack "Diffuse" }
高斯方程
单通道 单独处理单个方向 传offset参数
Shader "PostEffect/SeparableGlassBlur" { Properties { _MainTex ("Base (RGB)", 2D) = "" {} } HLSLINCLUDE #include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Core.hlsl" struct appdata_img { float4 vertex:POSITION; float2 texcoord:TEXCOORD0; }; struct v2f { float4 pos : POSITION; float2 uv : TEXCOORD0; float4 uv01 : TEXCOORD1; float4 uv23 : TEXCOORD2; float4 uv45 : TEXCOORD3; }; float4 offsets; sampler2D _MainTex; v2f vert (appdata_img v) { v2f o; VertexPositionInputs vertexInput = GetVertexPositionInputs(v.vertex.xyz); o.pos = vertexInput.positionCS; o.uv.xy = v.texcoord.xy; o.uv01 = v.texcoord.xyxy + offsets.xyxy * float4(1,1, -1,-1); o.uv23 = v.texcoord.xyxy + offsets.xyxy * float4(1,1, -1,-1) * 2.0; o.uv45 = v.texcoord.xyxy + offsets.xyxy * float4(1,1, -1,-1) * 3.0; return o; } half4 frag (v2f i) : COLOR { half4 color = float4 (0,0,0,0); color += 0.40 * tex2D (_MainTex, i.uv); color += 0.15 * tex2D (_MainTex, i.uv01.xy); color += 0.15 * tex2D (_MainTex, i.uv01.zw); color += 0.10 * tex2D (_MainTex, i.uv23.xy); color += 0.10 * tex2D (_MainTex, i.uv23.zw); color += 0.05 * tex2D (_MainTex, i.uv45.xy); color += 0.05 * tex2D (_MainTex, i.uv45.zw); return color; } ENDHLSL Subshader { Pass { ZTest Always Cull Off ZWrite Off HLSLPROGRAM #pragma vertex vert #pragma fragment frag ENDHLSL } } }
Shader "Unlit/mohu" { Properties { _MainTex ("Texture", 2D) = "white" {} } SubShader { Tags { "RenderType"="Opaque" } LOD 100 Pass { ZTest Always Cull Off ZWrite Off CGPROGRAM #pragma vertex vert #pragma fragment frag #include "UnityCG.cginc" struct v2f { float2 uv[9] : TEXCOORD0; float4 vertex : SV_POSITION; }; sampler2D _MainTex; float4 _MainTex_TexelSize;//纹理每个纹素的大小,例如512*512,那就是1/512 v2f vert (appdata_img v) { v2f o; o.vertex = UnityObjectToClipPos(v.vertex); half2 uv = v.texcoord; //计算出当前像素附近的8个像素点,在顶点处理后会经过插值到片元(线性操作),不影响 o.uv[0] = uv + _MainTex_TexelSize.xy * half2(-1, -1); o.uv[1] = uv + _MainTex_TexelSize.xy * half2(0, -1); o.uv[2] = uv + _MainTex_TexelSize.xy * half2(1, -1); o.uv[3] = uv + _MainTex_TexelSize.xy * half2(-1, 0); o.uv[4] = uv + _MainTex_TexelSize.xy * half2(0, 0); o.uv[5] = uv + _MainTex_TexelSize.xy * half2(1, 0); o.uv[6] = uv + _MainTex_TexelSize.xy * half2(-1, 1); o.uv[7] = uv + _MainTex_TexelSize.xy * half2(0, 1); o.uv[8] = uv + _MainTex_TexelSize.xy * half2(1, 1); return o; } fixed4 frag (v2f i) : SV_Target { //卷积核,取周围颜色的平均值作为新的颜色值 1/9 const half Gx[9] = { 0.11111, 0.11111, 0.11111, 0.11111, 0.11111, 0.11111, 0.11111, 0.11111, 0.11111 }; fixed4 texColor; fixed4 col = 0; for (int it = 0; it < 9; it++) { texColor = tex2D(_MainTex, i.uv[it]); col += texColor * Gx[it]; } return col; } ENDCG } } Fallback Off }
卷积操作就是使用一个卷积核对一张图像中的每个像素进行一系列操作。卷积核通常是一个四方形网格结构(2*2,3*3),每个方格都有一个权重值。当对图像的某个像素进行卷积时,我们会把卷积的中心放置在改像素上,翻转核之后在依次计算核中每个元素和其覆盖的图像像素值的乘积并求和,得到的结果就是该位置的新像素值。(可以用来实现图像模糊,边缘检测等)