URPLearn

9.8k words

简介

简单记录下,URP 中 Lit 如何实现光照计算

后处理

Boom

抗锯齿

雾效

LUT

Color Lookup组件
Image text
Image text
ColorLookup.cs

1
2
3
4
5
6
7
8
9
10
11
12
13
14

if (hdr)
{
material.EnableKeyword(ShaderKeywordStrings.HDRGrading);
}
else
{
switch (m_Tonemapping.mode.value)
{
case TonemappingMode.Neutral: material.EnableKeyword(ShaderKeywordStrings.TonemapNeutral); break;
case TonemappingMode.ACES: material.EnableKeyword(ShaderKeywordStrings.TonemapACES); break;
default: break; // None
}
}

ApplyColorGrading

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16

#if _HDR_GRADING
{
float3 inputLutSpace = saturate(LinearToLogC(input)); // LUT space is in LogC
input = ApplyLut2D(TEXTURE2D_ARGS(lutTex, lutSampler), inputLutSpace, lutParams);

UNITY_BRANCH
if (userLutContrib > 0.0)
{
input = saturate(input);
input.rgb = GetLinearToSRGB(input.rgb); // In LDR do the lookup in sRGB for the user LUT
half3 outLut = ApplyLut2D(TEXTURE2D_ARGS(userLutTex, userLutSampler), input, userLutParams);
input = lerp(input, outLut, userLutContrib);
input.rgb = GetSRGBToLinear(input.rgb);
}
}

贴花


屏幕空间贴花
UnityURPUnlitScreenSpaceDecalShader
LBW-WallMerge

原理

通过摄像机到模型顶点发射射线,射线的长度再乘一个屏幕深度图的系数,使得射线的重点最后贴着场景模型表面分布。最后将射线结果作为新的坐标,以新坐标作为decalSpace取xz轴作为uv方向,采样贴图

1
2
3
4
5
6
7
8
9
10

// if perspective camera, LinearEyeDepth will handle everything for user
// remember we can't use LinearEyeDepth for orthographic camera!
float sceneDepthVS = LinearEyeDepth(sceneRawDepth,_ZBufferParams);

// scene depth in any space = rayStartPos + rayDir * rayLength
// here all data in ObjectSpace(OS) or DecalSpace
// be careful, viewRayOS is not a unit vector, so don't normalize it, it is a direction vector which view space z's length is 1
decalSpaceScenePos = i.cameraPosOSAndFogFactor.xyz + i.viewRayOS.xyz * sceneDepthVS;

BackedGI

1
inputData.bakedGI = SAMPLE_GI(input.lightmapUV, input.vertexSH, inputData.normalWS);

SAMPLE_GI 作为宏定义,将根据是否存在光照贴图,决定是采样光照贴图、还是全局光照

1
2
3
4
5
6
7
8
#if defined(LIGHTMAP_ON)
#define SAMPLE_GI(lmName, shName, normalWSName)
SampleLightmap(lmName, normalWSName)
#else
#define SAMPLE_GI(lmName, shName, normalWSName)
SampleSHPixel(shName, normalWSName)
#endif

SampleLightmap
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
// Sample baked lightmap. Non-Direction and Directional if available.
// Realtime GI is not supported.
half3 SampleLightmap(float2 lightmapUV, half3 normalWS)
{
#ifdef UNITY_LIGHTMAP_FULL_HDR
bool encodedLightmap = false;
#else
bool encodedLightmap = true;
#endif

half4 decodeInstructions = half4(LIGHTMAP_HDR_MULTIPLIER, LIGHTMAP_HDR_EXPONENT, 0.0h, 0.0h);

// The shader library sample lightmap functions transform the lightmap uv coords to apply bias and scale.
// However, universal pipeline already transformed those coords in vertex. We pass half4(1, 1, 0, 0) and
// the compiler will optimize the transform away.
half4 transformCoords = half4(1, 1, 0, 0);

#if defined(LIGHTMAP_ON) && defined(DIRLIGHTMAP_COMBINED)
return SampleDirectionalLightmap(TEXTURE2D_LIGHTMAP_ARGS(LIGHTMAP_NAME, LIGHTMAP_SAMPLER_NAME),
TEXTURE2D_LIGHTMAP_ARGS(LIGHTMAP_INDIRECTION_NAME, LIGHTMAP_SAMPLER_NAME),
LIGHTMAP_SAMPLE_EXTRA_ARGS, transformCoords, normalWS, encodedLightmap, decodeInstructions);
#elif defined(LIGHTMAP_ON)
return SampleSingleLightmap(TEXTURE2D_LIGHTMAP_ARGS(LIGHTMAP_NAME, LIGHTMAP_SAMPLER_NAME), LIGHTMAP_SAMPLE_EXTRA_ARGS, transformCoords, encodedLightmap, decodeInstructions);
#else
return half3(0.0, 0.0, 0.0);
#endif
}
SampleSHPixel
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19

// SH Pixel Evaluation. Depending on target SH sampling might be done
// mixed or fully in pixel. See SampleSHVertex
half3 SampleSHPixel(half3 L2Term, half3 normalWS)
{
#if defined(EVALUATE_SH_VERTEX)
return L2Term;
#elif defined(EVALUATE_SH_MIXED)
half3 L0L1Term = SHEvalLinearL0L1(normalWS, unity_SHAr, unity_SHAg, unity_SHAb);
half3 res = L2Term + L0L1Term;
#ifdef UNITY_COLORSPACE_GAMMA
res = LinearToSRGB(res);
#endif
return max(half3(0, 0, 0), res);
#endif

// Default: Evaluate SH fully per-pixel
return SampleSH(normalWS);
}

光照贴图烘培

Bakery

一个用于烘培光照贴图的插件,能够更快的烘培出效果好的光照贴图(感觉差不多)

效果图

烘培后的全局光照结果(没有计算漫反射颜色)
Image text
Image text
Image text

全局光照

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
half3 color = GlobalIllumination(
brdfData, brdfDataClearCoat, surfaceData.clearCoatMask,
inputData.bakedGI, surfaceData.occlusion,
inputData.normalWS, inputData.viewDirectionWS);

half3 GlobalIllumination(BRDFData brdfData, BRDFData brdfDataClearCoat, float clearCoatMask,
half3 bakedGI, half occlusion,
half3 normalWS, half3 viewDirectionWS)
{
half3 reflectVector = reflect(-viewDirectionWS, normalWS);
half NoV = saturate(dot(normalWS, viewDirectionWS));
half fresnelTerm = Pow4(1.0 - NoV);

half3 indirectDiffuse = bakedGI * occlusion;
//间接光反射结果(反射探针结果采样)
half3 indirectSpecular = GlossyEnvironmentReflection(reflectVector, brdfData.perceptualRoughness, occlusion);

half3 color = EnvironmentBRDF(brdfData, indirectDiffuse, indirectSpecular, fresnelTerm);

#if defined(_CLEARCOAT) || defined(_CLEARCOATMAP)
half3 coatIndirectSpecular = GlossyEnvironmentReflection(reflectVector, brdfDataClearCoat.perceptualRoughness, occlusion);
// TODO: "grazing term" causes problems on full roughness
half3 coatColor = EnvironmentBRDFClearCoat(brdfDataClearCoat, clearCoatMask, coatIndirectSpecular, fresnelTerm);

// Blend with base layer using khronos glTF recommended way using NoV
// Smooth surface & "ambiguous" lighting
// NOTE: fresnelTerm (above) is pow4 instead of pow5, but should be ok as blend weight.
half coatFresnel = kDielectricSpec.x + kDielectricSpec.a * fresnelTerm;
return color * (1.0 - coatFresnel * clearCoatMask) + coatColor;
#else
return color;
#endif
}

Image text

GI中的反射结果

采样天空盒和反射探针结果
Image text
Image text
开启反射探针
Image text
关闭反射探针

主光源结果

1
2
3
4
color += LightingPhysicallyBased(brdfData, brdfDataClearCoat,
mainLight,
inputData.normalWS, inputData.viewDirectionWS,
surfaceData.clearCoatMask, specularHighlightsOff);

点光源结果

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15

#ifdef _ADDITIONAL_LIGHTS
uint pixelLightCount = GetAdditionalLightsCount();
for (uint lightIndex = 0u; lightIndex < pixelLightCount; ++lightIndex)
{
Light light = GetAdditionalLight(lightIndex, inputData.positionWS, shadowMask);
#if defined(_SCREEN_SPACE_OCCLUSION)
light.color *= aoFactor.directAmbientOcclusion;
#endif
color += LightingPhysicallyBased(brdfData, brdfDataClearCoat,
light,
inputData.normalWS, inputData.viewDirectionWS,
surfaceData.clearCoatMask, specularHighlightsOff);
}
#endif

Image text

LightingPhysicallyBased(BRDF)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
half3 LightingPhysicallyBased(BRDFData brdfData, BRDFData brdfDataClearCoat,
half3 lightColor, half3 lightDirectionWS, half lightAttenuation,
half3 normalWS, half3 viewDirectionWS,
half clearCoatMask, bool specularHighlightsOff)
{
half NdotL = saturate(dot(normalWS, lightDirectionWS));
half3 radiance = lightColor * (lightAttenuation * NdotL);

half3 brdf = brdfData.diffuse;
#ifndef _SPECULARHIGHLIGHTS_OFF
[branch] if (!specularHighlightsOff)
{
brdf += brdfData.specular * DirectBRDFSpecular(brdfData, normalWS, lightDirectionWS, viewDirectionWS);

#if defined(_CLEARCOAT) || defined(_CLEARCOATMAP)
// Clear coat evaluates the specular a second timw and has some common terms with the base specular.
// We rely on the compiler to merge these and compute them only once.
half brdfCoat = kDielectricSpec.r * DirectBRDFSpecular(brdfDataClearCoat, normalWS, lightDirectionWS, viewDirectionWS);

// Mix clear coat and base layer using khronos glTF recommended formula
// https://github.com/KhronosGroup/glTF/blob/master/extensions/2.0/Khronos/KHR_materials_clearcoat/README.md
// Use NoV for direct too instead of LoH as an optimization (NoV is light invariant).
half NoV = saturate(dot(normalWS, viewDirectionWS));
// Use slightly simpler fresnelTerm (Pow4 vs Pow5) as a small optimization.
// It is matching fresnel used in the GI/Env, so should produce a consistent clear coat blend (env vs. direct)
half coatFresnel = kDielectricSpec.x + kDielectricSpec.a * Pow4(1.0 - NoV);

brdf = brdf * (1.0 - clearCoatMask * coatFresnel) + brdfCoat * clearCoatMask;
#endif // _CLEARCOAT
}
#endif // _SPECULARHIGHLIGHTS_OFF

return brdf * radiance;
}

应用雾效

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
color.rgb = MixFog(color.rgb, inputData.fogCoord);

half3 MixFogColor(real3 fragColor, real3 fogColor, real fogFactor)
{
#if defined(FOG_LINEAR) || defined(FOG_EXP) || defined(FOG_EXP2)
real fogIntensity = ComputeFogIntensity(fogFactor);
fragColor = lerp(fogColor, fragColor, fogIntensity);
#endif
return fragColor;
}

half3 MixFog(real3 fragColor, real fogFactor)
{
return MixFogColor(fragColor, unity_FogColor.rgb, fogFactor);
}
高度雾

Github

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
float3 _FogColor;

float _FogGlobalDensity;
float _FogFallOff;
float _FogHeight;
float _FogStartDis;
float _FogInscatteringExp;
float _FogGradientDis;

half3 ExponentialHeightFog(half3 col, half3 posWorld)
{
half heightFallOff = _FogFallOff * 0.01;
half falloff = heightFallOff * ( posWorld.y - _WorldSpaceCameraPos.y- _FogHeight);
half fogDensity = _FogGlobalDensity * exp2(-falloff);
half fogFactor = (1 - exp2(-falloff))/falloff;
half3 viewDir = _WorldSpaceCameraPos - posWorld;
half rayLength = length(viewDir);
half distanceFactor = max((rayLength - _FogStartDis)/ _FogGradientDis, 0);
half fog = fogFactor * fogDensity * distanceFactor;
half inscatterFactor = pow(saturate(dot(-normalize(viewDir), WorldSpaceLightDir(half4(posWorld,1)))), _FogInscatteringExp);
inscatterFactor *= 1-saturate(exp2(falloff));
inscatterFactor *= distanceFactor;
half3 finalFogColor = lerp(_FogColor, _LightColor0, saturate(inscatterFactor));
return lerp(col, finalFogColor, saturate(fog));
}

实时GI

Precomputed Radiance Transfer Global Illumination

阴影

级联阴影

大佬讲解原理(知乎)

一级阴影

Image text
Image text

二级阴影

Image text
Image text

四级阴影

Image text
Image text

不同等级之间会有一个很明显的直线,可以使用 Dither Cull去做过渡
BXRP Shadow

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
int cascadeIndex;
half cascadeBlend = half(1.0);
int cascadeCount = _CascadeCount;
int i;
for(i = 0; i < cascadeCount; ++i)
{
float4 sphere = _CascadeCullingSpheres[i];
float3 dir = pos_world - sphere.xyz;
float dstSqr = dot(dir, dir);
if(dstSqr < sphere.w)
{
half fade = FadeShadowsStrength(dstSqr, _CascadeDatas[i].x, _ShadowsDistanceFade.z);
if(i == (cascadeCount - 1))
{
shadowDistanceStrength *= fade;
}
else
{
cascadeBlend = fade;
}
break;
}
}
if(i == cascadeCount) shadowDistanceStrength = half(0.0);
#if defined(_CASCADE_BLEND_DITHER)
half dither = InterleavedGradientNoise(pos_clip.xy, half(0.0));
if (cascadeBlend < dither)
{
i += 1;
}
#endif
cascadeIndex = i;

int shadowIndex = shadowData.y + cascadeIndex;
float3 normalBias = normal_world * _CascadeDatas[cascadeIndex].y * shadowData.z;
float4 shadowCoord = mul(_DirectionalShadowMatrixs[shadowIndex], float4(pos_world + normalBias , 1.0));
half shadow = FilterDirectionalShadow(shadowCoord.xyz);

阴影映射纹理
Image text

接受阴影的计算
Image text

屏幕空间阴影投影技术
Image text
Image text