这里Canvas(1)设置为Overlay能渲染出指定UI高清,其他UI模糊,然而这做法非常不好,如果此时再打开UI 以及 关闭模糊效果 要将这些置顶UI 恢复到原本Canvas里,也就是要管理2套Canvas
cs
using System;
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.Rendering;
using UnityEngine.Rendering.Universal;
public class Blur : ScriptableRendererFeature
{
[Serializable] //序列化 会在ForwardRenderer下创建Blur时看到m_Data就是它
public class BlurData
{
//blur材质
public Material material;
public RenderPassEvent renderPassEvent = RenderPassEvent.AfterRenderingPostProcessing;
//渲染次数,次数越大图像会越模糊
[Range(0, 4)]
public int iterations = 3;
//模糊采样间距,越大越模糊
[Range(0.2f, 3.0f)]
public float blurSpread = 0.6f;
//缩小比例(2代表缩小1/2) 越大越模糊,性能越好,但是会逐渐像素化!
[Range(1, 8)]
public int downSample = 2;
}
public class BlurRenderPass : ScriptableRenderPass
{
private BlurData m_Data; //ForwardRenderer下Blur(ScriptableRendererFeature)资源的序列化参数数据
private RenderTargetIdentifier m_Source;//屏幕图
private RenderTargetHandle m_Buffer0;//缓冲区1
private RenderTargetHandle m_Buffer1;//缓冲区2
private int m_BlurSize = Shader.PropertyToID("_BlurSize");
public BlurRenderPass(BlurData data)
{
this.m_Data = data;
}
public void Setup(RenderTargetIdentifier cameraColorTarget)
{
m_Buffer0.Init("_Buffer0");
m_Buffer1.Init("_Buffer1");
this.m_Source = cameraColorTarget;//该pass所在管线 ForwardRenderer 所处的摄像机主纹理
}
public override void Configure(CommandBuffer cmd, RenderTextureDescriptor cameraTextureDescriptor)
{
var width = cameraTextureDescriptor.width / m_Data.downSample;
var height = cameraTextureDescriptor.height / m_Data.downSample;
cmd.GetTemporaryRT(m_Buffer0.id, width, height, 0, FilterMode.Bilinear, RenderTextureFormat.ARGB32);
cmd.GetTemporaryRT(m_Buffer1.id, width, height, 0, FilterMode.Bilinear, RenderTextureFormat.ARGB32);
}
public override void FrameCleanup(CommandBuffer cmd)
{
cmd.ReleaseTemporaryRT(m_Buffer0.id);
cmd.ReleaseTemporaryRT(m_Buffer1.id);
}
public override void Execute(ScriptableRenderContext context, ref RenderingData renderingData)
{
var mat = m_Data.material;
if (mat == null) return;
//renderingData.cameraData.camera 可获取摄像机判定哪个摄像机才进行处理渲染(每个摄像机都会进一遍pass 如果摄像机有多个且都有这个Blur(ScriptableRendererFeature资源)
CommandBuffer cmd = CommandBufferPool.Get("MyBlurCmd"); //从池获取一个cmd 命名为MyBlurCmd 能在FrameDebugger看到
cmd.Blit(m_Source, m_Buffer0.Identifier());//将摄像机图渲染至buffer0
//开始iterations次渲染,每次都会进行2次Pass操作:横向、纵向(谁先谁后都无所谓
for (int i = 0; i < m_Data.iterations; i++)
{
mat.SetFloat(m_BlurSize, 1.0f + i * m_Data.blurSpread);//指定采样间距,逐级递增的形式
cmd.Blit(m_Buffer0.Identifier(), m_Buffer1.Identifier(), mat, 0);
var tmp = m_Buffer0;
m_Buffer0 = m_Buffer1; //注意最终都会将渲染结果输出到m_Buffer0
m_Buffer1 = tmp;
}
cmd.Blit(m_Buffer0.Identifier(), m_Source); //将最终结果渲染到摄像机上
//执行cmd
context.ExecuteCommandBuffer(cmd);
//清空回收cmd
cmd.Clear();
CommandBufferPool.Release(cmd);
}
}
public BlurData data = new BlurData();
private BlurRenderPass m_Pass;
public override void AddRenderPasses(ScriptableRenderer renderer, ref RenderingData renderingData)
{
var src = renderer.cameraColorTarget;
m_Pass.Setup(src); //设置摄像机输出的颜色纹理
renderer.EnqueuePass(m_Pass); //入队渲染pass
}
public override void Create()
{
m_Pass = new BlurRenderPass(data); //创建一个pass
m_Pass.renderPassEvent = data.renderPassEvent; //设置pass的渲染时机在某个节点后(导致问题,无法类似grab一样在某一个UI渲染时进行渲染 而必须等到所有UI渲染完成才进行渲染。。
//无法做到置顶UI高清,底下UI模糊的效果.
//所以下一步是思考如何实现这个吧..
}
}
Shader代码实现模糊
cs
//注意 注释掉的CGINCLUDE CDEND UnityCG.cginc 等一系列带cg或注释的方法都是传统CG管线的内容
// 新内容会是HLSLxxx字眼形式出现 (目的是演示如何将CG代码改为URP管线代码)
Shader "MilkShader/Twently/G_GaussianBlur"
{
Properties
{
_MainTex("Texture", 2D) = "white" {}
//采样间距系数
_BlurSize("Blur Size", Float) = 1.0
}
SubShader
{
Tags { "RenderType" = "Opaque" }
LOD 100
//CGINCLUDE ... ENDCG 是一种组织结构,放在它里面的方法可以被任意Pass直接使用..相当于所有Pass都会有这些内容
//CGINCLUDE //CG
HLSLINCLUDE
//#include "UnityCG.cginc" //CG
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Core.hlsl"
sampler2D _MainTex;
half4 _MainTex_TexelSize;
float _BlurSize;
//URP下没有appdata_img结构 要自定义
struct a2v
{
float4 vertex : POSITION;
float2 texcoord : TEXCOORD0;
};
struct v2f
{
float4 pos : SV_POSITION;
half2 uv[5] : TEXCOORD0;
};
//我们只需要appdata_img内置结构的数据传入即可(有顶点、纹理坐标)
v2f vertBlurVertical(a2v v) {
v2f o;
//o.pos = UnityObjectToClipPos(v.vertex); //CG
VertexPositionInputs vertexInputs = GetVertexPositionInputs(v.vertex.xyz);
o.pos = vertexInputs.positionCS;
half2 uv = v.texcoord;
//纵向的5个像素点纹理坐标
o.uv[0] = uv;
o.uv[1] = uv + float2(0.0, _MainTex_TexelSize.y * 1.0) * _BlurSize;
o.uv[2] = uv - float2(0.0, _MainTex_TexelSize.y * 1.0) * _BlurSize;
o.uv[3] = uv + float2(0.0, _MainTex_TexelSize.y * 2.0) * _BlurSize;
o.uv[4] = uv - float2(0.0, _MainTex_TexelSize.y * 2.0) * _BlurSize;
return o;
}
v2f vertBlurHorizontal(a2v v) {
v2f o;
//o.pos = UnityObjectToClipPos(v.vertex); //CG
VertexPositionInputs vertexInputs = GetVertexPositionInputs(v.vertex.xyz);
o.pos = vertexInputs.positionCS;
half2 uv = v.texcoord;
//横向的5个像素点纹理坐标
o.uv[0] = uv;
o.uv[1] = uv + float2(_MainTex_TexelSize.x * 1.0, 0.0) * _BlurSize;
o.uv[2] = uv - float2(_MainTex_TexelSize.x * 1.0, 0.0) * _BlurSize;
o.uv[3] = uv + float2(_MainTex_TexelSize.x * 2.0, 0.0) * _BlurSize;
o.uv[4] = uv - float2(_MainTex_TexelSize.x * 2.0, 0.0) * _BlurSize;
return o;
}
//无论是纵向还是横向,它们都会使用这个片元着色器,处理手法一样
half4 fragBlur(v2f i) : SV_Target{
float weight[3] = {0.4026, 0.2442, 0.0545};
//采样RGB然后进行乘以对应的权重累加到sum
half3 sum = tex2D(_MainTex, i.uv[0]).rgb * weight[0];
for (int it = 1; it < 3; it++) {
sum += tex2D(_MainTex, i.uv[it * 2 - 1]).rgb * weight[it];
sum += tex2D(_MainTex, i.uv[it * 2]).rgb * weight[it];
}
//是的这样就完成了,模糊。。。。
return half4(sum, 1.0);
}
//ENDCG //CG
ENDHLSL
//上面都是INCLUDE内容即下面Pass都可使用的内容
//标配写法
ZTest Always Cull Off ZWrite Off
//第一个PASS,纵向模糊处理
Pass
{
NAME "GAUSSIAN_BLUR_VERTICAL"
//CGPROGRAM //CG
HLSLPROGRAM
//纵向的顶点着色器
#pragma vertex vertBlurVertical
//片元着色器
#pragma fragment fragBlur
//ENDCG //CG
ENDHLSL
}
//第二个Pass 横向模糊处理
Pass
{
NAME "GAUSSIAN_BLUR_HORIZONTAL"
//CGPROGRAM
HLSLPROGRAM
//横向的顶点着色器
#pragma vertex vertBlurHorizontal
//片元着色器
#pragma fragment fragBlur
//ENDCG
ENDHLSL
}
}//完成SubShader
Fallback Off
}
1个Canvas和2个摄像机
主要以上内容,实际上就是因为Render Pass Event是只能After Rendering Transpanrets在透明物体渲染完成后进行屏幕后处理模糊,导致无法实现置顶UI高清,底下UI模糊的需求,如果可以控制这个后处理时机是在置顶UI渲染之前进行后处理,等后处理完成后再渲染指定UI 那就可以,然而...
TODO!!!