想了想还是写下电子日志做备忘,之后陆续将以前做过的龙书的笔记也码过来好了,顺便复习。
准备工作
把camera的clear flag改为don’t clear;culling mask改为nothing。把color space改为linear。
延迟渲染
原理已经有很多大佬写的很清楚了,这里就不细讲了,Gbuffer摆放是参照虚幻的。
Gbuffer pass
延迟渲染的第一步是Gbuffer pass:
首先定义rendertexture的结构
void Start()
{
rt = new RenderTexture(Screen.width, Screen.height, 0, RenderTextureFormat.ARGB32, RenderTextureReadWrite.Linear);
GBufferTexture = new RenderTexture[]
{
new RenderTexture(Screen.width,Screen.height,0,RenderTextureFormat.ARGB32,RenderTextureReadWrite.Linear),
new RenderTexture(Screen.width,Screen.height,0,RenderTextureFormat.ARGB32,RenderTextureReadWrite.Linear),
new RenderTexture(Screen.width,Screen.height,0,RenderTextureFormat.ARGB2101010,RenderTextureReadWrite.Linear),
new RenderTexture(Screen.width,Screen.height,0,RenderTextureFormat.ARGB2101010,RenderTextureReadWrite.Linear)
};
depthTexture = new RenderTexture(Screen.width, Screen.height, 24, RenderTextureFormat.Depth, RenderTextureReadWrite.Linear);
GBuffers = new RenderBuffer[GBufferTexture.Length];
for (int i = 0; i < GBuffers.Length; i++)
{
GBuffers[i] = GBufferTexture[i].colorBuffer;
}
gbufferIDs = new int[] {
Shader.PropertyToID("outGBuffer0"),
Shader.PropertyToID("outGBuffer1"),
Shader.PropertyToID("outGBuffer2"),
Shader.PropertyToID("outEmission")
};
}
在shader里只需要简单地将当面帧的屏幕空间信息(漫反射颜色,金属度,法线等)写进多个rendertexture就行。
struct VertexInput
{
float4 vertex : POSITION;
float2 uv0 : TEXCOORD0;
float2 uv1 : TEXCOORD1;
float2 uv2 : TEXCOORD2;
float2 uv3 : TEXCOORD3;
float4 tangent : TANGENT;
half3 normal : NORMAL;
};
struct VertexOutputDeferred
{
float4 pos : SV_POSITION;
float4 uv0 : TEXCOORD0;
float4 uv1 : TEXCOORD1;
float4 TtoW0 : TEXCOORD2;
float4 TtoW1 : TEXCOORD3;
float4 TtoW2 : TEXCOORD4;
};
sampler2D _MainTex;
float4 _MainTex_ST;
sampler2D _BumpMap;
float4 _BumpMap_ST;
sampler2D _AORoughnessMetallic;
float4 _AORoughnessMetallic_ST;
sampler2D _EmissionMap;
float4 _EmissionMap_ST;
VertexOutputDeferred vertDeferred(VertexInput v)
{
VertexOutputDeferred o;
o.uv0.xy = TRANSFORM_TEX(v.uv0, _MainTex);
o.uv0.zw = TRANSFORM_TEX(v.uv1, _BumpMap);
o.uv1.xy = TRANSFORM_TEX(v.uv2, _AORoughnessMetallic);
o.uv1.zw = TRANSFORM_TEX(v.uv3, _EmissionMap);
o.pos = UnityObjectToClipPos(v.vertex);
float3 worldPos = mul(unity_ObjectToWorld, v.vertex).xyz;
float3 worldNormal = UnityObjectToWorldNormal(v.normal);
float3 worldTangent = UnityObjectToWorldDir(v.tangent.xyz);
float3 worldBinormal = cross(worldNormal, worldTangent)*v.tangent.w;
o.TtoW0 = float4(worldTangent.x, worldBinormal.x, worldNormal.x, worldPos.x);
o.TtoW1 = float4(worldTangent.y, worldBinormal.y, worldNormal.y, worldPos.y);
o.TtoW2 = float4(worldTangent.z, worldBinormal.z, worldNormal.z, worldPos.z);
return o;
}
void fragDeferred(
VertexOutputDeferred i,
out half4 outGBuffer0 : SV_Target0,
out half4 outGBuffer1 : SV_Target1,
out half4 outGBuffer2 : SV_Target2,
out half4 outEmission : SV_Target3
)
{
float3 bump = UnpackNormal(tex2D(_BumpMap, i.uv0.zw));
bump.z = sqrt(1.0 - saturate(dot(bump.xy, bump.xy)));
bump = normalize(float3(dot(i.TtoW0.xyz, bump), dot(i.TtoW1.xyz, bump), dot(i.TtoW2.xyz, bump)));
half3 diffuse = tex2D(_MainTex, i.uv0.xy).xyz;
half ao = tex2D(_AORoughnessMetallic, i.uv1.xy).x;
half roughness = tex2D(_AORoughnessMetallic, i.uv1.xy).y;
half metallic = tex2D(_AORoughnessMetallic, i.uv1.xy).z;
half3 emissive = tex2D(_EmissionMap, i.uv1.zw).xyz;
outGBuffer0 = half4(diffuse, ao);
outGBuffer1 = half4(1.0, 1.0, 1.0, roughness);
outGBuffer2 = half4(bump*0.5f + 0.5f, metallic);
outEmission = half4(emissive, 1.0f);
}
这里为了以后修改方便在顶点着色器里我还另外存了worldpos。
材质的属性有:
漫反射贴图,法线贴图,遮蔽粗糙金属贴图,自发光贴图。
gbuffer的结构为:
RT0:RGB(diffuse color) A(AO)
RT1:RG(motion vector) A(roughness)
RT2:RGB(world normal) A(metallic)
RT3:RGB(emissive)
AO和motion vector和emissve之后再实现。
光照计算
当前帧的各种信息存入RT之后,计算光照的时候直接对它们采样。这里在同学大佬的建议下没有用unity的PBR而是抄了下虚幻的做法。
关于PBR理论和解释可以看毛神的博客。
基础概念
光通量:单位时间内某一表面接受到的光子总数量,要注意的是光通量不是亮度,亮度应该是光通量的密度,也就是单位面积的光通量。
立体角:立体角在球面上覆盖的范围是就是球面积分里的微元。计算光照时,将光线的射线落点