Singularity/Library/PackageCache/com.unity.render-pipelines..../Runtime/UniversalRenderer.cs

1257 lines
70 KiB
C#
Raw Permalink Normal View History

2024-05-06 14:45:45 -04:00
using System.Collections.Generic;
using UnityEngine.Rendering.Universal.Internal;
namespace UnityEngine.Rendering.Universal
{
/// <summary>
/// Rendering modes for Universal renderer.
/// </summary>
public enum RenderingMode
{
/// <summary>Render all objects and lighting in one pass, with a hard limit on the number of lights that can be applied on an object.</summary>
Forward,
/// <summary>Render all objects first in a g-buffer pass, then apply all lighting in a separate pass using deferred shading.</summary>
Deferred
};
/// <summary>
/// When the Universal Renderer should use Depth Priming in Forward mode.
/// </summary>
public enum DepthPrimingMode
{
/// <summary>Depth Priming will never be used.</summary>
Disabled,
/// <summary>Depth Priming will only be used if there is a depth prepass needed by any of the render passes.</summary>
Auto,
/// <summary>A depth prepass will be explicitly requested so Depth Priming can be used.</summary>
Forced,
}
/// <summary>
/// Default renderer for Universal RP.
/// This renderer is supported on all Universal RP supported platforms.
/// It uses a classic forward rendering strategy with per-object light culling.
/// </summary>
public sealed class UniversalRenderer : ScriptableRenderer
{
#if UNITY_SWITCH || UNITY_ANDROID
internal const int k_DepthStencilBufferBits = 24;
#else
internal const int k_DepthStencilBufferBits = 32;
#endif
static readonly List<ShaderTagId> k_DepthNormalsOnly = new List<ShaderTagId> { new ShaderTagId("DepthNormalsOnly") };
private static class Profiling
{
private const string k_Name = nameof(UniversalRenderer);
public static readonly ProfilingSampler createCameraRenderTarget = new ProfilingSampler($"{k_Name}.{nameof(CreateCameraRenderTarget)}");
}
/// <inheritdoc/>
public override int SupportedCameraStackingTypes()
{
switch (m_RenderingMode)
{
case RenderingMode.Forward:
return 1 << (int)CameraRenderType.Base | 1 << (int)CameraRenderType.Overlay;
case RenderingMode.Deferred:
return 1 << (int)CameraRenderType.Base;
default:
return 0;
}
}
// Rendering mode setup from UI.
internal RenderingMode renderingMode => m_RenderingMode;
// Actual rendering mode, which may be different (ex: wireframe rendering, harware not capable of deferred rendering).
internal RenderingMode actualRenderingMode => (GL.wireframe || (DebugHandler != null && DebugHandler.IsActiveModeUnsupportedForDeferred) || m_DeferredLights == null || !m_DeferredLights.IsRuntimeSupportedThisFrame() || m_DeferredLights.IsOverlay)
? RenderingMode.Forward
: this.renderingMode;
internal bool accurateGbufferNormals => m_DeferredLights != null ? m_DeferredLights.AccurateGbufferNormals : false;
#if ADAPTIVE_PERFORMANCE_2_1_0_OR_NEWER
internal bool needTransparencyPass { get { return !UniversalRenderPipeline.asset.useAdaptivePerformance || !AdaptivePerformance.AdaptivePerformanceRenderSettings.SkipTransparentObjects;; } }
#endif
/// <summary>Property to control the depth priming behavior of the forward rendering path.</summary>
public DepthPrimingMode depthPrimingMode { get { return m_DepthPrimingMode; } set { m_DepthPrimingMode = value; } }
DepthOnlyPass m_DepthPrepass;
DepthNormalOnlyPass m_DepthNormalPrepass;
CopyDepthPass m_PrimedDepthCopyPass;
MotionVectorRenderPass m_MotionVectorPass;
MainLightShadowCasterPass m_MainLightShadowCasterPass;
AdditionalLightsShadowCasterPass m_AdditionalLightsShadowCasterPass;
GBufferPass m_GBufferPass;
CopyDepthPass m_GBufferCopyDepthPass;
TileDepthRangePass m_TileDepthRangePass;
TileDepthRangePass m_TileDepthRangeExtraPass; // TODO use subpass API to hide this pass
DeferredPass m_DeferredPass;
DrawObjectsPass m_RenderOpaqueForwardOnlyPass;
DrawObjectsPass m_RenderOpaqueForwardPass;
DrawSkyboxPass m_DrawSkyboxPass;
CopyDepthPass m_CopyDepthPass;
CopyColorPass m_CopyColorPass;
TransparentSettingsPass m_TransparentSettingsPass;
DrawObjectsPass m_RenderTransparentForwardPass;
InvokeOnRenderObjectCallbackPass m_OnRenderObjectCallbackPass;
FinalBlitPass m_FinalBlitPass;
CapturePass m_CapturePass;
#if ENABLE_VR && ENABLE_XR_MODULE
XROcclusionMeshPass m_XROcclusionMeshPass;
CopyDepthPass m_XRCopyDepthPass;
#endif
#if UNITY_EDITOR
CopyDepthPass m_FinalDepthCopyPass;
#endif
internal RenderTargetBufferSystem m_ColorBufferSystem;
RenderTargetHandle m_ActiveCameraColorAttachment;
RenderTargetHandle m_ColorFrontBuffer;
RenderTargetHandle m_ActiveCameraDepthAttachment;
RenderTargetHandle m_CameraDepthAttachment;
RenderTargetHandle m_DepthTexture;
RenderTargetHandle m_NormalsTexture;
RenderTargetHandle m_OpaqueColor;
// For tiled-deferred shading.
RenderTargetHandle m_DepthInfoTexture;
RenderTargetHandle m_TileDepthInfoTexture;
ForwardLights m_ForwardLights;
DeferredLights m_DeferredLights;
RenderingMode m_RenderingMode;
DepthPrimingMode m_DepthPrimingMode;
CopyDepthMode m_CopyDepthMode;
bool m_DepthPrimingRecommended;
StencilState m_DefaultStencilState;
LightCookieManager m_LightCookieManager;
IntermediateTextureMode m_IntermediateTextureMode;
// Materials used in URP Scriptable Render Passes
Material m_BlitMaterial = null;
Material m_CopyDepthMaterial = null;
Material m_SamplingMaterial = null;
Material m_TileDepthInfoMaterial = null;
Material m_TileDeferredMaterial = null;
Material m_StencilDeferredMaterial = null;
Material m_CameraMotionVecMaterial = null;
Material m_ObjectMotionVecMaterial = null;
PostProcessPasses m_PostProcessPasses;
internal ColorGradingLutPass colorGradingLutPass { get => m_PostProcessPasses.colorGradingLutPass; }
internal PostProcessPass postProcessPass { get => m_PostProcessPasses.postProcessPass; }
internal PostProcessPass finalPostProcessPass { get => m_PostProcessPasses.finalPostProcessPass; }
internal RenderTargetHandle colorGradingLut { get => m_PostProcessPasses.colorGradingLut; }
internal DeferredLights deferredLights { get => m_DeferredLights; }
#if ENABLE_VR && ENABLE_VR_MODULE
#if PLATFORM_WINRT || PLATFORM_ANDROID
// XRTODO: Remove this platform specific code(runs on Quest and HL).
static List<XR.XRDisplaySubsystem> displaySubsystemList = new List<XR.XRDisplaySubsystem>();
internal static bool IsRunningXRMobile()
{
var platform = Application.platform;
if (platform == RuntimePlatform.WSAPlayerX86 || platform == RuntimePlatform.WSAPlayerARM || platform == RuntimePlatform.WSAPlayerX64 || platform == RuntimePlatform.Android)
{
XR.XRDisplaySubsystem display = null;
SubsystemManager.GetInstances(displaySubsystemList);
if (displaySubsystemList.Count > 0)
display = displaySubsystemList[0];
if (display != null)
return true;
}
return false;
}
#endif
#endif
public UniversalRenderer(UniversalRendererData data) : base(data)
{
#if ENABLE_VR && ENABLE_XR_MODULE
UniversalRenderPipeline.m_XRSystem.InitializeXRSystemData(data.xrSystemData);
#endif
// TODO: should merge shaders with HDRP into core, XR dependency for now.
// TODO: replace/merge URP blit into core blitter.
Blitter.Initialize(data.shaders.coreBlitPS, data.shaders.coreBlitColorAndDepthPS);
m_BlitMaterial = CoreUtils.CreateEngineMaterial(data.shaders.blitPS);
m_CopyDepthMaterial = CoreUtils.CreateEngineMaterial(data.shaders.copyDepthPS);
m_SamplingMaterial = CoreUtils.CreateEngineMaterial(data.shaders.samplingPS);
//m_TileDepthInfoMaterial = CoreUtils.CreateEngineMaterial(data.shaders.tileDepthInfoPS);
//m_TileDeferredMaterial = CoreUtils.CreateEngineMaterial(data.shaders.tileDeferredPS);
m_StencilDeferredMaterial = CoreUtils.CreateEngineMaterial(data.shaders.stencilDeferredPS);
m_CameraMotionVecMaterial = CoreUtils.CreateEngineMaterial(data.shaders.cameraMotionVector);
m_ObjectMotionVecMaterial = CoreUtils.CreateEngineMaterial(data.shaders.objectMotionVector);
StencilStateData stencilData = data.defaultStencilState;
m_DefaultStencilState = StencilState.defaultValue;
m_DefaultStencilState.enabled = stencilData.overrideStencilState;
m_DefaultStencilState.SetCompareFunction(stencilData.stencilCompareFunction);
m_DefaultStencilState.SetPassOperation(stencilData.passOperation);
m_DefaultStencilState.SetFailOperation(stencilData.failOperation);
m_DefaultStencilState.SetZFailOperation(stencilData.zFailOperation);
m_IntermediateTextureMode = data.intermediateTextureMode;
{
var settings = LightCookieManager.Settings.GetDefault();
var asset = UniversalRenderPipeline.asset;
if (asset)
{
settings.atlas.format = asset.additionalLightsCookieFormat;
settings.atlas.resolution = asset.additionalLightsCookieResolution;
}
m_LightCookieManager = new LightCookieManager(ref settings);
}
this.stripShadowsOffVariants = true;
this.stripAdditionalLightOffVariants = true;
#if ENABLE_VR && ENABLE_VR_MODULE
#if PLATFORM_WINRT || PLATFORM_ANDROID
// AdditionalLightOff variant is available on HL&Quest platform due to performance consideration.
this.stripAdditionalLightOffVariants = !IsRunningXRMobile();
#endif
#endif
ForwardLights.InitParams forwardInitParams;
forwardInitParams.lightCookieManager = m_LightCookieManager;
forwardInitParams.clusteredRendering = data.clusteredRendering;
forwardInitParams.tileSize = (int)data.tileSize;
m_ForwardLights = new ForwardLights(forwardInitParams);
//m_DeferredLights.LightCulling = data.lightCulling;
this.m_RenderingMode = data.renderingMode;
this.m_DepthPrimingMode = data.depthPrimingMode;
this.m_CopyDepthMode = data.copyDepthMode;
useRenderPassEnabled = data.useNativeRenderPass && SystemInfo.graphicsDeviceType != GraphicsDeviceType.OpenGLES2;
#if UNITY_ANDROID || UNITY_IOS || UNITY_TVOS
this.m_DepthPrimingRecommended = false;
#else
this.m_DepthPrimingRecommended = true;
#endif
// Note: Since all custom render passes inject first and we have stable sort,
// we inject the builtin passes in the before events.
m_MainLightShadowCasterPass = new MainLightShadowCasterPass(RenderPassEvent.BeforeRenderingShadows);
m_AdditionalLightsShadowCasterPass = new AdditionalLightsShadowCasterPass(RenderPassEvent.BeforeRenderingShadows);
#if ENABLE_VR && ENABLE_XR_MODULE
m_XROcclusionMeshPass = new XROcclusionMeshPass(RenderPassEvent.BeforeRenderingOpaques);
// Schedule XR copydepth right after m_FinalBlitPass(AfterRendering + 1)
m_XRCopyDepthPass = new CopyDepthPass(RenderPassEvent.AfterRendering + 2, m_CopyDepthMaterial);
#endif
m_DepthPrepass = new DepthOnlyPass(RenderPassEvent.BeforeRenderingPrePasses, RenderQueueRange.opaque, data.opaqueLayerMask);
m_DepthNormalPrepass = new DepthNormalOnlyPass(RenderPassEvent.BeforeRenderingPrePasses, RenderQueueRange.opaque, data.opaqueLayerMask);
m_MotionVectorPass = new MotionVectorRenderPass(m_CameraMotionVecMaterial, m_ObjectMotionVecMaterial);
if (this.renderingMode == RenderingMode.Forward)
{
m_PrimedDepthCopyPass = new CopyDepthPass(RenderPassEvent.AfterRenderingPrePasses, m_CopyDepthMaterial);
}
if (this.renderingMode == RenderingMode.Deferred)
{
var deferredInitParams = new DeferredLights.InitParams();
deferredInitParams.tileDepthInfoMaterial = m_TileDepthInfoMaterial;
deferredInitParams.tileDeferredMaterial = m_TileDeferredMaterial;
deferredInitParams.stencilDeferredMaterial = m_StencilDeferredMaterial;
deferredInitParams.lightCookieManager = m_LightCookieManager;
m_DeferredLights = new DeferredLights(deferredInitParams, useRenderPassEnabled);
m_DeferredLights.AccurateGbufferNormals = data.accurateGbufferNormals;
//m_DeferredLights.TiledDeferredShading = data.tiledDeferredShading;
m_DeferredLights.TiledDeferredShading = false;
m_GBufferPass = new GBufferPass(RenderPassEvent.BeforeRenderingGbuffer, RenderQueueRange.opaque, data.opaqueLayerMask, m_DefaultStencilState, stencilData.stencilReference, m_DeferredLights);
// Forward-only pass only runs if deferred renderer is enabled.
// It allows specific materials to be rendered in a forward-like pass.
// We render both gbuffer pass and forward-only pass before the deferred lighting pass so we can minimize copies of depth buffer and
// benefits from some depth rejection.
// - If a material can be rendered either forward or deferred, then it should declare a UniversalForward and a UniversalGBuffer pass.
// - If a material cannot be lit in deferred (unlit, bakedLit, special material such as hair, skin shader), then it should declare UniversalForwardOnly pass
// - Legacy materials have unamed pass, which is implicitely renamed as SRPDefaultUnlit. In that case, they are considered forward-only too.
// TO declare a material with unnamed pass and UniversalForward/UniversalForwardOnly pass is an ERROR, as the material will be rendered twice.
StencilState forwardOnlyStencilState = DeferredLights.OverwriteStencil(m_DefaultStencilState, (int)StencilUsage.MaterialMask);
ShaderTagId[] forwardOnlyShaderTagIds = new ShaderTagId[]
{
new ShaderTagId("UniversalForwardOnly"),
new ShaderTagId("SRPDefaultUnlit"), // Legacy shaders (do not have a gbuffer pass) are considered forward-only for backward compatibility
new ShaderTagId("LightweightForward") // Legacy shaders (do not have a gbuffer pass) are considered forward-only for backward compatibility
};
int forwardOnlyStencilRef = stencilData.stencilReference | (int)StencilUsage.MaterialUnlit;
m_GBufferCopyDepthPass = new CopyDepthPass(RenderPassEvent.BeforeRenderingGbuffer + 1, m_CopyDepthMaterial);
m_TileDepthRangePass = new TileDepthRangePass(RenderPassEvent.BeforeRenderingGbuffer + 2, m_DeferredLights, 0);
m_TileDepthRangeExtraPass = new TileDepthRangePass(RenderPassEvent.BeforeRenderingGbuffer + 3, m_DeferredLights, 1);
m_DeferredPass = new DeferredPass(RenderPassEvent.BeforeRenderingDeferredLights, m_DeferredLights);
m_RenderOpaqueForwardOnlyPass = new DrawObjectsPass("Render Opaques Forward Only", forwardOnlyShaderTagIds, true, RenderPassEvent.BeforeRenderingOpaques, RenderQueueRange.opaque, data.opaqueLayerMask, forwardOnlyStencilState, forwardOnlyStencilRef);
}
// Always create this pass even in deferred because we use it for wireframe rendering in the Editor or offscreen depth texture rendering.
m_RenderOpaqueForwardPass = new DrawObjectsPass(URPProfileId.DrawOpaqueObjects, true, RenderPassEvent.BeforeRenderingOpaques, RenderQueueRange.opaque, data.opaqueLayerMask, m_DefaultStencilState, stencilData.stencilReference);
m_CopyDepthPass = new CopyDepthPass(RenderPassEvent.AfterRenderingSkybox, m_CopyDepthMaterial);
m_DrawSkyboxPass = new DrawSkyboxPass(RenderPassEvent.BeforeRenderingSkybox);
m_CopyColorPass = new CopyColorPass(RenderPassEvent.AfterRenderingSkybox, m_SamplingMaterial, m_BlitMaterial);
#if ADAPTIVE_PERFORMANCE_2_1_0_OR_NEWER
if (needTransparencyPass)
#endif
{
m_TransparentSettingsPass = new TransparentSettingsPass(RenderPassEvent.BeforeRenderingTransparents, data.shadowTransparentReceive);
m_RenderTransparentForwardPass = new DrawObjectsPass(URPProfileId.DrawTransparentObjects, false, RenderPassEvent.BeforeRenderingTransparents, RenderQueueRange.transparent, data.transparentLayerMask, m_DefaultStencilState, stencilData.stencilReference);
}
m_OnRenderObjectCallbackPass = new InvokeOnRenderObjectCallbackPass(RenderPassEvent.BeforeRenderingPostProcessing);
m_PostProcessPasses = new PostProcessPasses(data.postProcessData, m_BlitMaterial);
m_CapturePass = new CapturePass(RenderPassEvent.AfterRendering);
m_FinalBlitPass = new FinalBlitPass(RenderPassEvent.AfterRendering + 1, m_BlitMaterial);
#if UNITY_EDITOR
m_FinalDepthCopyPass = new CopyDepthPass(RenderPassEvent.AfterRendering + 9, m_CopyDepthMaterial);
#endif
// RenderTexture format depends on camera and pipeline (HDR, non HDR, etc)
// Samples (MSAA) depend on camera and pipeline
m_ColorBufferSystem = new RenderTargetBufferSystem("_CameraColorAttachment");
m_CameraDepthAttachment.Init("_CameraDepthAttachment");
m_DepthTexture.Init("_CameraDepthTexture");
m_NormalsTexture.Init("_CameraNormalsTexture");
m_OpaqueColor.Init("_CameraOpaqueTexture");
m_DepthInfoTexture.Init("_DepthInfoTexture");
m_TileDepthInfoTexture.Init("_TileDepthInfoTexture");
supportedRenderingFeatures = new RenderingFeatures();
if (this.renderingMode == RenderingMode.Deferred)
{
// Deferred rendering does not support MSAA.
this.supportedRenderingFeatures.msaa = false;
// Avoid legacy platforms: use vulkan instead.
unsupportedGraphicsDeviceTypes = new GraphicsDeviceType[]
{
GraphicsDeviceType.OpenGLCore,
GraphicsDeviceType.OpenGLES2,
GraphicsDeviceType.OpenGLES3
};
}
LensFlareCommonSRP.mergeNeeded = 0;
LensFlareCommonSRP.maxLensFlareWithOcclusionTemporalSample = 1;
LensFlareCommonSRP.Initialize();
}
/// <inheritdoc />
protected override void Dispose(bool disposing)
{
m_ForwardLights.Cleanup();
m_PostProcessPasses.Dispose();
CoreUtils.Destroy(m_BlitMaterial);
CoreUtils.Destroy(m_CopyDepthMaterial);
CoreUtils.Destroy(m_SamplingMaterial);
CoreUtils.Destroy(m_TileDepthInfoMaterial);
CoreUtils.Destroy(m_TileDeferredMaterial);
CoreUtils.Destroy(m_StencilDeferredMaterial);
CoreUtils.Destroy(m_CameraMotionVecMaterial);
CoreUtils.Destroy(m_ObjectMotionVecMaterial);
Blitter.Cleanup();
LensFlareCommonSRP.Dispose();
}
private void SetupFinalPassDebug(ref CameraData cameraData)
{
if ((DebugHandler != null) && DebugHandler.IsActiveForCamera(ref cameraData))
{
if (DebugHandler.TryGetFullscreenDebugMode(out DebugFullScreenMode fullScreenDebugMode, out int textureHeightPercent))
{
Camera camera = cameraData.camera;
float screenWidth = camera.pixelWidth;
float screenHeight = camera.pixelHeight;
float height = Mathf.Clamp01(textureHeightPercent / 100f) * screenHeight;
float width = height * (screenWidth / screenHeight);
float normalizedSizeX = width / screenWidth;
float normalizedSizeY = height / screenHeight;
Rect normalizedRect = new Rect(1 - normalizedSizeX, 1 - normalizedSizeY, normalizedSizeX, normalizedSizeY);
switch (fullScreenDebugMode)
{
case DebugFullScreenMode.Depth:
{
DebugHandler.SetDebugRenderTarget(m_DepthTexture.Identifier(), normalizedRect, true);
break;
}
case DebugFullScreenMode.AdditionalLightsShadowMap:
{
DebugHandler.SetDebugRenderTarget(m_AdditionalLightsShadowCasterPass.m_AdditionalLightsShadowmapTexture, normalizedRect, false);
break;
}
case DebugFullScreenMode.MainLightShadowMap:
{
DebugHandler.SetDebugRenderTarget(m_MainLightShadowCasterPass.m_MainLightShadowmapTexture, normalizedRect, false);
break;
}
default:
{
break;
}
}
}
else
{
DebugHandler.ResetDebugRenderTarget();
}
}
}
bool IsDepthPrimingEnabled(ref CameraData cameraData)
{
// depth priming requires an extra depth copy, disable it on platforms not supporting it (like GLES when MSAA is on)
if (!CanCopyDepth(ref cameraData))
return false;
bool depthPrimingRequested = (m_DepthPrimingRecommended && m_DepthPrimingMode == DepthPrimingMode.Auto) || m_DepthPrimingMode == DepthPrimingMode.Forced;
bool isForwardRenderingMode = m_RenderingMode == RenderingMode.Forward;
bool isFirstCameraToWriteDepth = cameraData.renderType == CameraRenderType.Base || cameraData.clearDepth;
// Enabled Depth priming when baking Reflection Probes causes artefacts (UUM-12397)
bool isNotReflectionCamera = cameraData.cameraType != CameraType.Reflection;
return depthPrimingRequested && isForwardRenderingMode && isFirstCameraToWriteDepth && isNotReflectionCamera;
}
/// <inheritdoc />
public override void Setup(ScriptableRenderContext context, ref RenderingData renderingData)
{
m_ForwardLights.ProcessLights(ref renderingData);
ref CameraData cameraData = ref renderingData.cameraData;
Camera camera = cameraData.camera;
RenderTextureDescriptor cameraTargetDescriptor = cameraData.cameraTargetDescriptor;
DebugHandler?.Setup(context, ref cameraData);
if (cameraData.cameraType != CameraType.Game)
useRenderPassEnabled = false;
// Special path for depth only offscreen cameras. Only write opaques + transparents.
bool isOffscreenDepthTexture = cameraData.targetTexture != null && cameraData.targetTexture.format == RenderTextureFormat.Depth;
if (isOffscreenDepthTexture)
{
ConfigureCameraTarget(BuiltinRenderTextureType.CameraTarget, BuiltinRenderTextureType.CameraTarget);
AddRenderPasses(ref renderingData);
EnqueuePass(m_RenderOpaqueForwardPass);
// TODO: Do we need to inject transparents and skybox when rendering depth only camera? They don't write to depth.
EnqueuePass(m_DrawSkyboxPass);
#if ADAPTIVE_PERFORMANCE_2_1_0_OR_NEWER
if (!needTransparencyPass)
return;
#endif
EnqueuePass(m_RenderTransparentForwardPass);
return;
}
if (m_DeferredLights != null)
{
m_DeferredLights.ResolveMixedLightingMode(ref renderingData);
m_DeferredLights.IsOverlay = cameraData.renderType == CameraRenderType.Overlay;
}
// Assign the camera color target early in case it is needed during AddRenderPasses.
bool isPreviewCamera = cameraData.isPreviewCamera;
var createColorTexture = (rendererFeatures.Count != 0 && m_IntermediateTextureMode == IntermediateTextureMode.Always) && !isPreviewCamera;
if (createColorTexture)
{
m_ActiveCameraColorAttachment = m_ColorBufferSystem.GetBackBuffer();
var activeColorRenderTargetId = m_ActiveCameraColorAttachment.Identifier();
#if ENABLE_VR && ENABLE_XR_MODULE
if (cameraData.xr.enabled) activeColorRenderTargetId = new RenderTargetIdentifier(activeColorRenderTargetId, 0, CubemapFace.Unknown, -1);
#endif
ConfigureCameraColorTarget(activeColorRenderTargetId);
}
// Add render passes and gather the input requirements
isCameraColorTargetValid = true;
AddRenderPasses(ref renderingData);
isCameraColorTargetValid = false;
RenderPassInputSummary renderPassInputs = GetRenderPassInputs(ref renderingData);
// Should apply post-processing after rendering this camera?
bool applyPostProcessing = cameraData.postProcessEnabled && m_PostProcessPasses.isCreated;
// There's at least a camera in the camera stack that applies post-processing
bool anyPostProcessing = renderingData.postProcessingEnabled && m_PostProcessPasses.isCreated;
// If Camera's PostProcessing is enabled and if there any enabled PostProcessing requires depth texture as shader read resource (Motion Blur/DoF)
bool cameraHasPostProcessingWithDepth = applyPostProcessing && cameraData.postProcessingRequiresDepthTexture;
// TODO: We could cache and generate the LUT before rendering the stack
bool generateColorGradingLUT = cameraData.postProcessEnabled && m_PostProcessPasses.isCreated;
bool isSceneViewOrPreviewCamera = cameraData.isSceneViewCamera || cameraData.cameraType == CameraType.Preview;
useDepthPriming = IsDepthPrimingEnabled(ref cameraData);
// This indicates whether the renderer will output a depth texture.
bool requiresDepthTexture = cameraData.requiresDepthTexture || renderPassInputs.requiresDepthTexture || useDepthPriming;
#if UNITY_EDITOR
bool isGizmosEnabled = UnityEditor.Handles.ShouldRenderGizmos();
#else
bool isGizmosEnabled = false;
#endif
bool mainLightShadows = m_MainLightShadowCasterPass.Setup(ref renderingData);
bool additionalLightShadows = m_AdditionalLightsShadowCasterPass.Setup(ref renderingData);
bool transparentsNeedSettingsPass = m_TransparentSettingsPass.Setup(ref renderingData);
bool forcePrepass = (m_CopyDepthMode == CopyDepthMode.ForcePrepass);
// Depth prepass is generated in the following cases:
// - If game or offscreen camera requires it we check if we can copy the depth from the rendering opaques pass and use that instead.
// - Scene or preview cameras always require a depth texture. We do a depth pre-pass to simplify it and it shouldn't matter much for editor.
// - Render passes require it
bool requiresDepthPrepass = (requiresDepthTexture || cameraHasPostProcessingWithDepth) && (!CanCopyDepth(ref renderingData.cameraData) || forcePrepass);
requiresDepthPrepass |= isSceneViewOrPreviewCamera;
requiresDepthPrepass |= isGizmosEnabled;
requiresDepthPrepass |= isPreviewCamera;
requiresDepthPrepass |= renderPassInputs.requiresDepthPrepass;
requiresDepthPrepass |= renderPassInputs.requiresNormalsTexture;
// Current aim of depth prepass is to generate a copy of depth buffer, it is NOT to prime depth buffer and reduce overdraw on non-mobile platforms.
// When deferred renderer is enabled, depth buffer is already accessible so depth prepass is not needed.
// The only exception is for generating depth-normal textures: SSAO pass needs it and it must run before forward-only geometry.
// DepthNormal prepass will render:
// - forward-only geometry when deferred renderer is enabled
// - all geometry when forward renderer is enabled
if (requiresDepthPrepass && this.actualRenderingMode == RenderingMode.Deferred && !renderPassInputs.requiresNormalsTexture)
requiresDepthPrepass = false;
requiresDepthPrepass |= useDepthPriming;
// If possible try to merge the opaque and skybox passes instead of splitting them when "Depth Texture" is required.
// The copying of depth should normally happen after rendering opaques.
// But if we only require it for post processing or the scene camera then we do it after rendering transparent objects
// Aim to have the most optimized render pass event for Depth Copy (The aim is to minimize the number of render passes)
if (requiresDepthTexture)
{
RenderPassEvent copyDepthPassEvent = RenderPassEvent.AfterRenderingOpaques;
// RenderPassInputs's requiresDepthTexture is configured through ScriptableRenderPass's ConfigureInput function
if (renderPassInputs.requiresDepthTexture)
{
// Do depth copy before the render pass that requires depth texture as shader read resource
copyDepthPassEvent = (RenderPassEvent)Mathf.Min((int)RenderPassEvent.AfterRenderingTransparents, ((int)renderPassInputs.requiresDepthTextureEarliestEvent) - 1);
}
m_CopyDepthPass.renderPassEvent = copyDepthPassEvent;
}
else if (cameraHasPostProcessingWithDepth || isSceneViewOrPreviewCamera || isGizmosEnabled)
{
// If only post process requires depth texture, we can re-use depth buffer from main geometry pass instead of enqueuing a depth copy pass, but no proper API to do that for now, so resort to depth copy pass for now
m_CopyDepthPass.renderPassEvent = RenderPassEvent.AfterRenderingTransparents;
}
createColorTexture |= RequiresIntermediateColorTexture(ref cameraData);
createColorTexture |= renderPassInputs.requiresColorTexture;
createColorTexture |= renderPassInputs.requiresColorTextureCreated;
createColorTexture &= !isPreviewCamera;
// If camera requires depth and there's no depth pre-pass we create a depth texture that can be read later by effect requiring it.
// When deferred renderer is enabled, we must always create a depth texture and CANNOT use BuiltinRenderTextureType.CameraTarget. This is to get
// around a bug where during gbuffer pass (MRT pass), the camera depth attachment is correctly bound, but during
// deferred pass ("camera color" + "camera depth"), the implicit depth surface of "camera color" is used instead of "camera depth",
// because BuiltinRenderTextureType.CameraTarget for depth means there is no explicit depth attachment...
bool createDepthTexture = (requiresDepthTexture || cameraHasPostProcessingWithDepth) && !requiresDepthPrepass;
createDepthTexture |= !cameraData.resolveFinalTarget;
// Deferred renderer always need to access depth buffer.
createDepthTexture |= (this.actualRenderingMode == RenderingMode.Deferred && !useRenderPassEnabled);
// Some render cases (e.g. Material previews) have shown we need to create a depth texture when we're forcing a prepass.
createDepthTexture |= useDepthPriming;
#if ENABLE_VR && ENABLE_XR_MODULE
if (cameraData.xr.enabled)
{
// URP can't handle msaa/size mismatch between depth RT and color RT(for now we create intermediate textures to ensure they match)
createDepthTexture |= createColorTexture;
createColorTexture = createDepthTexture;
}
#endif
#if UNITY_ANDROID || UNITY_WEBGL
if (SystemInfo.graphicsDeviceType != GraphicsDeviceType.Vulkan)
{
// GLES can not use render texture's depth buffer with the color buffer of the backbuffer
// in such case we create a color texture for it too.
createColorTexture |= createDepthTexture;
}
#endif
// Temporarily disable depth priming on certain platforms such as Vulkan because we lack proper depth resolve support.
useDepthPriming &= SystemInfo.graphicsDeviceType != GraphicsDeviceType.Vulkan || cameraTargetDescriptor.msaaSamples == 1;
if (useRenderPassEnabled || useDepthPriming)
{
createDepthTexture |= createColorTexture;
createColorTexture = createDepthTexture;
}
var colorDescriptor = cameraTargetDescriptor;
colorDescriptor.useMipMap = false;
colorDescriptor.autoGenerateMips = false;
colorDescriptor.depthBufferBits = (int)DepthBits.None;
m_ColorBufferSystem.SetCameraSettings(colorDescriptor, FilterMode.Bilinear);
// Configure all settings require to start a new camera stack (base camera only)
if (cameraData.renderType == CameraRenderType.Base)
{
RenderTargetHandle cameraTargetHandle = RenderTargetHandle.GetCameraTarget(cameraData.xr);
bool sceneViewFilterEnabled = camera.sceneViewFilterMode == Camera.SceneViewFilterMode.ShowFiltered;
//Scene filtering redraws the objects on top of the resulting frame. It has to draw directly to the sceneview buffer.
m_ActiveCameraColorAttachment = (createColorTexture && !sceneViewFilterEnabled) ? m_ColorBufferSystem.GetBackBuffer() : cameraTargetHandle;
m_ActiveCameraDepthAttachment = (createDepthTexture && !sceneViewFilterEnabled) ? m_CameraDepthAttachment : cameraTargetHandle;
bool intermediateRenderTexture = createColorTexture || createDepthTexture;
// Doesn't create texture for Overlay cameras as they are already overlaying on top of created textures.
if (intermediateRenderTexture)
CreateCameraRenderTarget(context, ref cameraTargetDescriptor, useDepthPriming);
}
else
{
m_ActiveCameraColorAttachment = m_ColorBufferSystem.GetBackBuffer();
m_ActiveCameraDepthAttachment = m_CameraDepthAttachment;
}
cameraData.renderer.useDepthPriming = useDepthPriming;
bool requiresDepthCopyPass = !requiresDepthPrepass
&& (requiresDepthTexture || cameraHasPostProcessingWithDepth)
&& createDepthTexture;
bool copyColorPass = renderingData.cameraData.requiresOpaqueTexture || renderPassInputs.requiresColorTexture;
if ((DebugHandler != null) && DebugHandler.IsActiveForCamera(ref cameraData))
{
DebugHandler.TryGetFullscreenDebugMode(out var fullScreenMode);
if (fullScreenMode == DebugFullScreenMode.Depth)
{
requiresDepthPrepass = true;
}
if (!DebugHandler.IsLightingActive)
{
mainLightShadows = false;
additionalLightShadows = false;
if (!isSceneViewOrPreviewCamera)
{
requiresDepthPrepass = false;
generateColorGradingLUT = false;
copyColorPass = false;
requiresDepthCopyPass = false;
}
}
if (useRenderPassEnabled)
useRenderPassEnabled = DebugHandler.IsRenderPassSupported;
}
// Assign camera targets (color and depth)
{
var activeColorRenderTargetId = m_ActiveCameraColorAttachment.Identifier();
var activeDepthRenderTargetId = m_ActiveCameraDepthAttachment.Identifier();
#if ENABLE_VR && ENABLE_XR_MODULE
if (cameraData.xr.enabled)
{
activeColorRenderTargetId = new RenderTargetIdentifier(activeColorRenderTargetId, 0, CubemapFace.Unknown, -1);
activeDepthRenderTargetId = new RenderTargetIdentifier(activeDepthRenderTargetId, 0, CubemapFace.Unknown, -1);
}
#endif
ConfigureCameraTarget(activeColorRenderTargetId, activeDepthRenderTargetId);
}
bool hasPassesAfterPostProcessing = activeRenderPassQueue.Find(x => x.renderPassEvent == RenderPassEvent.AfterRenderingPostProcessing) != null;
if (mainLightShadows)
EnqueuePass(m_MainLightShadowCasterPass);
if (additionalLightShadows)
EnqueuePass(m_AdditionalLightsShadowCasterPass);
if (requiresDepthPrepass)
{
if (renderPassInputs.requiresNormalsTexture)
{
if (this.actualRenderingMode == RenderingMode.Deferred)
{
// In deferred mode, depth-normal prepass does really primes the depth and normal buffers, instead of creating a copy.
// It is necessary because we need to render depth&normal for forward-only geometry and it is the only way
// to get them before the SSAO pass.
int gbufferNormalIndex = m_DeferredLights.GBufferNormalSmoothnessIndex;
m_DepthNormalPrepass.Setup(cameraTargetDescriptor, m_ActiveCameraDepthAttachment, m_DeferredLights.GbufferAttachments[gbufferNormalIndex]);
// Change the normal format to the one used by the gbuffer.
RenderTextureDescriptor normalDescriptor = m_DepthNormalPrepass.normalDescriptor;
normalDescriptor.graphicsFormat = m_DeferredLights.GetGBufferFormat(gbufferNormalIndex);
m_DepthNormalPrepass.normalDescriptor = normalDescriptor;
// Depth is allocated by this renderer.
m_DepthNormalPrepass.allocateDepth = false;
// Only render forward-only geometry, as standard geometry will be rendered as normal into the gbuffer.
if (RenderPassEvent.AfterRenderingGbuffer <= renderPassInputs.requiresDepthNormalAtEvent &&
renderPassInputs.requiresDepthNormalAtEvent <= RenderPassEvent.BeforeRenderingOpaques)
m_DepthNormalPrepass.shaderTagIds = k_DepthNormalsOnly;
}
else
{
m_DepthNormalPrepass.Setup(cameraTargetDescriptor, m_DepthTexture, m_NormalsTexture);
}
EnqueuePass(m_DepthNormalPrepass);
}
else
{
// Deferred renderer does not require a depth-prepass to generate samplable depth texture.
if (this.actualRenderingMode != RenderingMode.Deferred)
{
m_DepthPrepass.Setup(cameraTargetDescriptor, m_DepthTexture);
EnqueuePass(m_DepthPrepass);
}
}
}
// Depth priming requires a manual resolve of MSAA depth right after the depth prepass. If autoresolve is supported but MSAA is 1x then a copy is still required.
if (useDepthPriming && (SystemInfo.graphicsDeviceType != GraphicsDeviceType.Vulkan || cameraTargetDescriptor.msaaSamples == 1))
{
m_PrimedDepthCopyPass.Setup(m_ActiveCameraDepthAttachment, m_DepthTexture);
m_PrimedDepthCopyPass.AllocateRT = false;
EnqueuePass(m_PrimedDepthCopyPass);
}
if (generateColorGradingLUT)
{
colorGradingLutPass.Setup(colorGradingLut);
EnqueuePass(colorGradingLutPass);
}
#if ENABLE_VR && ENABLE_XR_MODULE
if (cameraData.xr.hasValidOcclusionMesh)
EnqueuePass(m_XROcclusionMeshPass);
#endif
bool lastCameraInTheStack = cameraData.resolveFinalTarget;
if (this.actualRenderingMode == RenderingMode.Deferred)
{
if (m_DeferredLights.UseRenderPass && (RenderPassEvent.AfterRenderingGbuffer == renderPassInputs.requiresDepthNormalAtEvent || !useRenderPassEnabled))
m_DeferredLights.DisableFramebufferFetchInput();
EnqueueDeferred(ref renderingData, requiresDepthPrepass, renderPassInputs.requiresNormalsTexture, mainLightShadows, additionalLightShadows);
}
else
{
// Optimized store actions are very important on tile based GPUs and have a great impact on performance.
// if MSAA is enabled and any of the following passes need a copy of the color or depth target, make sure the MSAA'd surface is stored
// if following passes won't use it then just resolve (the Resolve action will still store the resolved surface, but discard the MSAA'd surface, which is very expensive to store).
RenderBufferStoreAction opaquePassColorStoreAction = RenderBufferStoreAction.Store;
if (cameraTargetDescriptor.msaaSamples > 1)
opaquePassColorStoreAction = copyColorPass ? RenderBufferStoreAction.StoreAndResolve : RenderBufferStoreAction.Store;
// make sure we store the depth only if following passes need it.
RenderBufferStoreAction opaquePassDepthStoreAction = (copyColorPass || requiresDepthCopyPass || !lastCameraInTheStack) ? RenderBufferStoreAction.Store : RenderBufferStoreAction.DontCare;
#if ENABLE_VR && ENABLE_XR_MODULE
if (cameraData.xr.enabled && cameraData.xr.copyDepth)
{
opaquePassDepthStoreAction = RenderBufferStoreAction.Store;
}
#endif
m_RenderOpaqueForwardPass.ConfigureColorStoreAction(opaquePassColorStoreAction);
m_RenderOpaqueForwardPass.ConfigureDepthStoreAction(opaquePassDepthStoreAction);
EnqueuePass(m_RenderOpaqueForwardPass);
}
if (camera.clearFlags == CameraClearFlags.Skybox && cameraData.renderType != CameraRenderType.Overlay)
{
if (RenderSettings.skybox != null || (camera.TryGetComponent(out Skybox cameraSkybox) && cameraSkybox.material != null))
EnqueuePass(m_DrawSkyboxPass);
}
// If a depth texture was created we necessarily need to copy it, otherwise we could have render it to a renderbuffer.
if (requiresDepthCopyPass)
{
m_CopyDepthPass.Setup(m_ActiveCameraDepthAttachment, m_DepthTexture);
if (this.actualRenderingMode == RenderingMode.Deferred && !useRenderPassEnabled)
m_CopyDepthPass.AllocateRT = false; // m_DepthTexture is already allocated by m_GBufferCopyDepthPass but it's not called when using RenderPass API.
EnqueuePass(m_CopyDepthPass);
}
// Set the depth texture to the far Z if we do not have a depth prepass or copy depth
if (!requiresDepthPrepass && !requiresDepthCopyPass)
{
Shader.SetGlobalTexture(m_DepthTexture.id, SystemInfo.usesReversedZBuffer ? Texture2D.blackTexture : Texture2D.whiteTexture);
}
if (copyColorPass)
{
// TODO: Downsampling method should be store in the renderer instead of in the asset.
// We need to migrate this data to renderer. For now, we query the method in the active asset.
Downsampling downsamplingMethod = UniversalRenderPipeline.asset.opaqueDownsampling;
m_CopyColorPass.Setup(m_ActiveCameraColorAttachment.Identifier(), m_OpaqueColor, downsamplingMethod);
EnqueuePass(m_CopyColorPass);
}
if (renderPassInputs.requiresMotionVectors && !cameraData.xr.enabled)
{
SupportedRenderingFeatures.active.motionVectors = true; // hack for enabling UI
var data = MotionVectorRendering.instance.GetMotionDataForCamera(camera, cameraData);
m_MotionVectorPass.Setup(data);
EnqueuePass(m_MotionVectorPass);
}
#if ADAPTIVE_PERFORMANCE_2_1_0_OR_NEWER
if (needTransparencyPass)
#endif
{
if (transparentsNeedSettingsPass)
{
EnqueuePass(m_TransparentSettingsPass);
}
// if this is not lastCameraInTheStack we still need to Store, since the MSAA buffer might be needed by the Overlay cameras
RenderBufferStoreAction transparentPassColorStoreAction = cameraTargetDescriptor.msaaSamples > 1 && lastCameraInTheStack ? RenderBufferStoreAction.Resolve : RenderBufferStoreAction.Store;
RenderBufferStoreAction transparentPassDepthStoreAction = lastCameraInTheStack ? RenderBufferStoreAction.DontCare : RenderBufferStoreAction.Store;
// If CopyDepthPass pass event is scheduled on or after AfterRenderingTransparent, we will need to store the depth buffer or resolve (store for now until latest trunk has depth resolve support) it for MSAA case
if (requiresDepthCopyPass && m_CopyDepthPass.renderPassEvent >= RenderPassEvent.AfterRenderingTransparents)
transparentPassDepthStoreAction = RenderBufferStoreAction.Store;
m_RenderTransparentForwardPass.ConfigureColorStoreAction(transparentPassColorStoreAction);
m_RenderTransparentForwardPass.ConfigureDepthStoreAction(transparentPassDepthStoreAction);
EnqueuePass(m_RenderTransparentForwardPass);
}
EnqueuePass(m_OnRenderObjectCallbackPass);
bool hasCaptureActions = renderingData.cameraData.captureActions != null && lastCameraInTheStack;
// When FXAA or scaling is active, we must perform an additional pass at the end of the frame for the following reasons:
// 1. FXAA expects to be the last shader running on the image before it's presented to the screen. Since users are allowed
// to add additional render passes after post processing occurs, we can't run FXAA until all of those passes complete as well.
// The FinalPost pass is guaranteed to execute after user authored passes so FXAA is always run inside of it.
// 2. UberPost can only handle upscaling with linear filtering. All other filtering methods require the FinalPost pass.
bool applyFinalPostProcessing = anyPostProcessing && lastCameraInTheStack &&
((renderingData.cameraData.antialiasing == AntialiasingMode.FastApproximateAntialiasing) ||
((renderingData.cameraData.imageScalingMode == ImageScalingMode.Upscaling) && (renderingData.cameraData.upscalingFilter != ImageUpscalingFilter.Linear)));
// When post-processing is enabled we can use the stack to resolve rendering to camera target (screen or RT).
// However when there are render passes executing after post we avoid resolving to screen so rendering continues (before sRGBConvertion etc)
bool resolvePostProcessingToCameraTarget = !hasCaptureActions && !hasPassesAfterPostProcessing && !applyFinalPostProcessing;
if (lastCameraInTheStack)
{
SetupFinalPassDebug(ref cameraData);
// Post-processing will resolve to final target. No need for final blit pass.
if (applyPostProcessing)
{
// if resolving to screen we need to be able to perform sRGBConversion in post-processing if necessary
bool doSRGBConversion = resolvePostProcessingToCameraTarget;
postProcessPass.Setup(cameraTargetDescriptor, m_ActiveCameraColorAttachment, resolvePostProcessingToCameraTarget, m_ActiveCameraDepthAttachment, colorGradingLut, applyFinalPostProcessing, doSRGBConversion);
EnqueuePass(postProcessPass);
}
var sourceForFinalPass = m_ActiveCameraColorAttachment;
// Do FXAA or any other final post-processing effect that might need to run after AA.
if (applyFinalPostProcessing)
{
finalPostProcessPass.SetupFinalPass(sourceForFinalPass, true);
EnqueuePass(finalPostProcessPass);
}
if (renderingData.cameraData.captureActions != null)
{
m_CapturePass.Setup(sourceForFinalPass);
EnqueuePass(m_CapturePass);
}
// if post-processing then we already resolved to camera target while doing post.
// Also only do final blit if camera is not rendering to RT.
bool cameraTargetResolved =
// final PP always blit to camera target
applyFinalPostProcessing ||
// no final PP but we have PP stack. In that case it blit unless there are render pass after PP
(applyPostProcessing && !hasPassesAfterPostProcessing && !hasCaptureActions) ||
// offscreen camera rendering to a texture, we don't need a blit pass to resolve to screen
m_ActiveCameraColorAttachment == RenderTargetHandle.GetCameraTarget(cameraData.xr);
// We need final blit to resolve to screen
if (!cameraTargetResolved)
{
m_FinalBlitPass.Setup(cameraTargetDescriptor, sourceForFinalPass);
EnqueuePass(m_FinalBlitPass);
}
#if ENABLE_VR && ENABLE_XR_MODULE
if (cameraData.xr.enabled)
{
bool depthTargetResolved =
// active depth is depth target, we don't need a blit pass to resolve
m_ActiveCameraDepthAttachment == RenderTargetHandle.GetCameraTarget(cameraData.xr);
if (!depthTargetResolved && cameraData.xr.copyDepth)
{
m_XRCopyDepthPass.Setup(m_ActiveCameraDepthAttachment, RenderTargetHandle.GetCameraTarget(cameraData.xr));
EnqueuePass(m_XRCopyDepthPass);
}
}
#endif
}
// stay in RT so we resume rendering on stack after post-processing
else if (applyPostProcessing)
{
postProcessPass.Setup(cameraTargetDescriptor, m_ActiveCameraColorAttachment, false, m_ActiveCameraDepthAttachment, colorGradingLut, false, false);
EnqueuePass(postProcessPass);
}
#if UNITY_EDITOR
if (isSceneViewOrPreviewCamera || (isGizmosEnabled && lastCameraInTheStack))
{
// Scene view camera should always resolve target (not stacked)
m_FinalDepthCopyPass.Setup(m_DepthTexture, RenderTargetHandle.CameraTarget);
m_FinalDepthCopyPass.MssaSamples = 0;
EnqueuePass(m_FinalDepthCopyPass);
}
#endif
}
/// <inheritdoc />
public override void SetupLights(ScriptableRenderContext context, ref RenderingData renderingData)
{
m_ForwardLights.Setup(context, ref renderingData);
// Perform per-tile light culling on CPU
if (this.actualRenderingMode == RenderingMode.Deferred)
m_DeferredLights.SetupLights(context, ref renderingData);
}
/// <inheritdoc />
public override void SetupCullingParameters(ref ScriptableCullingParameters cullingParameters,
ref CameraData cameraData)
{
// TODO: PerObjectCulling also affect reflection probes. Enabling it for now.
// if (asset.additionalLightsRenderingMode == LightRenderingMode.Disabled ||
// asset.maxAdditionalLightsCount == 0)
// {
// cullingParameters.cullingOptions |= CullingOptions.DisablePerObjectCulling;
// }
// We disable shadow casters if both shadow casting modes are turned off
// or the shadow distance has been turned down to zero
bool isShadowCastingDisabled = !UniversalRenderPipeline.asset.supportsMainLightShadows && !UniversalRenderPipeline.asset.supportsAdditionalLightShadows;
bool isShadowDistanceZero = Mathf.Approximately(cameraData.maxShadowDistance, 0.0f);
if (isShadowCastingDisabled || isShadowDistanceZero)
{
cullingParameters.cullingOptions &= ~CullingOptions.ShadowCasters;
}
if (this.actualRenderingMode == RenderingMode.Deferred)
cullingParameters.maximumVisibleLights = 0xFFFF;
else
{
// We set the number of maximum visible lights allowed and we add one for the mainlight...
//
// Note: However ScriptableRenderContext.Cull() does not differentiate between light types.
// If there is no active main light in the scene, ScriptableRenderContext.Cull() might return ( cullingParameters.maximumVisibleLights ) visible additional lights.
// i.e ScriptableRenderContext.Cull() might return ( UniversalRenderPipeline.maxVisibleAdditionalLights + 1 ) visible additional lights !
cullingParameters.maximumVisibleLights = UniversalRenderPipeline.maxVisibleAdditionalLights + 1;
}
cullingParameters.shadowDistance = cameraData.maxShadowDistance;
cullingParameters.conservativeEnclosingSphere = UniversalRenderPipeline.asset.conservativeEnclosingSphere;
cullingParameters.numIterationsEnclosingSphere = UniversalRenderPipeline.asset.numIterationsEnclosingSphere;
}
/// <inheritdoc />
public override void FinishRendering(CommandBuffer cmd)
{
m_ColorBufferSystem.Clear(cmd);
if (m_ActiveCameraColorAttachment != RenderTargetHandle.CameraTarget)
{
m_ActiveCameraColorAttachment = RenderTargetHandle.CameraTarget;
}
if (m_ActiveCameraDepthAttachment != RenderTargetHandle.CameraTarget)
{
cmd.ReleaseTemporaryRT(m_ActiveCameraDepthAttachment.id);
m_ActiveCameraDepthAttachment = RenderTargetHandle.CameraTarget;
}
}
void EnqueueDeferred(ref RenderingData renderingData, bool hasDepthPrepass, bool hasNormalPrepass, bool applyMainShadow, bool applyAdditionalShadow)
{
m_DeferredLights.Setup(
ref renderingData,
applyAdditionalShadow ? m_AdditionalLightsShadowCasterPass : null,
hasDepthPrepass,
hasNormalPrepass,
m_DepthTexture,
m_DepthInfoTexture,
m_TileDepthInfoTexture,
m_ActiveCameraDepthAttachment,
m_ActiveCameraColorAttachment
);
// Need to call Configure for both of these passes to setup input attachments as first frame otherwise will raise errors
if (useRenderPassEnabled && m_DeferredLights.UseRenderPass)
{
m_GBufferPass.Configure(null, renderingData.cameraData.cameraTargetDescriptor);
m_DeferredPass.Configure(null, renderingData.cameraData.cameraTargetDescriptor);
}
EnqueuePass(m_GBufferPass);
//Must copy depth for deferred shading: TODO wait for API fix to bind depth texture as read-only resource.
if (!useRenderPassEnabled || !m_DeferredLights.UseRenderPass)
{
m_GBufferCopyDepthPass.Setup(m_CameraDepthAttachment, m_DepthTexture);
EnqueuePass(m_GBufferCopyDepthPass);
}
// Note: DeferredRender.Setup is called by UniversalRenderPipeline.RenderSingleCamera (overrides ScriptableRenderer.Setup).
// At this point, we do not know if m_DeferredLights.m_Tilers[x].m_Tiles actually contain any indices of lights intersecting tiles (If there are no lights intersecting tiles, we could skip several following passes) : this information is computed in DeferredRender.SetupLights, which is called later by UniversalRenderPipeline.RenderSingleCamera (via ScriptableRenderer.Execute).
// However HasTileLights uses m_HasTileVisLights which is calculated by CheckHasTileLights from all visibleLights. visibleLights is the list of lights that have passed camera culling, so we know they are in front of the camera. So we can assume m_DeferredLights.m_Tilers[x].m_Tiles will not be empty in that case.
// m_DeferredLights.m_Tilers[x].m_Tiles could be empty if we implemented an algorithm accessing scene depth information on the CPU side, but this (access depth from CPU) will probably not happen.
if (m_DeferredLights.HasTileLights())
{
// Compute for each tile a 32bits bitmask in which a raised bit means "this 1/32th depth slice contains geometry that could intersect with lights".
// Per-tile bitmasks are obtained by merging together the per-pixel bitmasks computed for each individual pixel of the tile.
EnqueuePass(m_TileDepthRangePass);
// On some platform, splitting the bitmasks computation into two passes:
// 1/ Compute bitmasks for individual or small blocks of pixels
// 2/ merge those individual bitmasks into per-tile bitmasks
// provides better performance that doing it in a single above pass.
if (m_DeferredLights.HasTileDepthRangeExtraPass())
EnqueuePass(m_TileDepthRangeExtraPass);
}
EnqueuePass(m_DeferredPass);
EnqueuePass(m_RenderOpaqueForwardOnlyPass);
}
private struct RenderPassInputSummary
{
internal bool requiresDepthTexture;
internal bool requiresDepthPrepass;
internal bool requiresNormalsTexture;
internal bool requiresColorTexture;
internal bool requiresColorTextureCreated;
internal bool requiresMotionVectors;
internal RenderPassEvent requiresDepthNormalAtEvent;
internal RenderPassEvent requiresDepthTextureEarliestEvent;
}
private RenderPassInputSummary GetRenderPassInputs(ref RenderingData renderingData)
{
RenderPassEvent beforeMainRenderingEvent = m_RenderingMode == RenderingMode.Deferred ? RenderPassEvent.BeforeRenderingGbuffer : RenderPassEvent.BeforeRenderingOpaques;
RenderPassInputSummary inputSummary = new RenderPassInputSummary();
inputSummary.requiresDepthNormalAtEvent = RenderPassEvent.BeforeRenderingOpaques;
inputSummary.requiresDepthTextureEarliestEvent = RenderPassEvent.BeforeRenderingPostProcessing;
for (int i = 0; i < activeRenderPassQueue.Count; ++i)
{
ScriptableRenderPass pass = activeRenderPassQueue[i];
bool needsDepth = (pass.input & ScriptableRenderPassInput.Depth) != ScriptableRenderPassInput.None;
bool needsNormals = (pass.input & ScriptableRenderPassInput.Normal) != ScriptableRenderPassInput.None;
bool needsColor = (pass.input & ScriptableRenderPassInput.Color) != ScriptableRenderPassInput.None;
bool needsMotion = (pass.input & ScriptableRenderPassInput.Motion) != ScriptableRenderPassInput.None;
bool eventBeforeMainRendering = pass.renderPassEvent <= beforeMainRenderingEvent;
// TODO: Need a better way to handle this, probably worth to recheck after render graph
// DBuffer requires color texture created as it does not handle y flip correctly
if (pass is DBufferRenderPass dBufferRenderPass)
{
inputSummary.requiresColorTextureCreated = true;
}
inputSummary.requiresDepthTexture |= needsDepth;
inputSummary.requiresDepthPrepass |= needsNormals || needsDepth && eventBeforeMainRendering;
inputSummary.requiresNormalsTexture |= needsNormals;
inputSummary.requiresColorTexture |= needsColor;
inputSummary.requiresMotionVectors |= needsMotion;
if (needsDepth)
inputSummary.requiresDepthTextureEarliestEvent = (RenderPassEvent)Mathf.Min((int)pass.renderPassEvent, (int)inputSummary.requiresDepthTextureEarliestEvent);
if (needsNormals || needsDepth)
inputSummary.requiresDepthNormalAtEvent = (RenderPassEvent)Mathf.Min((int)pass.renderPassEvent, (int)inputSummary.requiresDepthNormalAtEvent);
}
return inputSummary;
}
bool IsGLESDevice()
{
return SystemInfo.graphicsDeviceType == GraphicsDeviceType.OpenGLES2 || SystemInfo.graphicsDeviceType == GraphicsDeviceType.OpenGLES3;
}
void CreateCameraRenderTarget(ScriptableRenderContext context, ref RenderTextureDescriptor descriptor, bool primedDepth)
{
CommandBuffer cmd = CommandBufferPool.Get();
using (new ProfilingScope(null, Profiling.createCameraRenderTarget))
{
if (m_ActiveCameraColorAttachment != RenderTargetHandle.CameraTarget)
{
bool useDepthRenderBuffer = m_ActiveCameraDepthAttachment == RenderTargetHandle.CameraTarget;
var colorDescriptor = descriptor;
colorDescriptor.useMipMap = false;
colorDescriptor.autoGenerateMips = false;
colorDescriptor.depthBufferBits = (useDepthRenderBuffer) ? k_DepthStencilBufferBits : 0;
m_ColorBufferSystem.SetCameraSettings(cmd, colorDescriptor, FilterMode.Bilinear);
if (useDepthRenderBuffer)
ConfigureCameraTarget(m_ColorBufferSystem.GetBackBuffer(cmd).id, m_ColorBufferSystem.GetBufferA().id);
else
ConfigureCameraColorTarget(m_ColorBufferSystem.GetBackBuffer(cmd).id);
m_ActiveCameraColorAttachment = m_ColorBufferSystem.GetBackBuffer(cmd);
cmd.SetGlobalTexture("_CameraColorTexture", m_ActiveCameraColorAttachment.id);
//Set _AfterPostProcessTexture, users might still rely on this although it is now always the cameratarget due to swapbuffer
cmd.SetGlobalTexture("_AfterPostProcessTexture", m_ActiveCameraColorAttachment.id);
}
if (m_ActiveCameraDepthAttachment != RenderTargetHandle.CameraTarget)
{
var depthDescriptor = descriptor;
depthDescriptor.useMipMap = false;
depthDescriptor.autoGenerateMips = false;
depthDescriptor.bindMS = depthDescriptor.msaaSamples > 1 && (SystemInfo.supportsMultisampledTextures != 0);
// binding MS surfaces is not supported by the GLES backend, and it won't be fixed after investigating
// the high performance impact of potential fixes, which would make it more expensive than depth prepass (fogbugz 1339401 for more info)
if (IsGLESDevice())
depthDescriptor.bindMS = false;
depthDescriptor.colorFormat = RenderTextureFormat.Depth;
depthDescriptor.depthBufferBits = k_DepthStencilBufferBits;
cmd.GetTemporaryRT(m_ActiveCameraDepthAttachment.id, depthDescriptor, FilterMode.Point);
}
}
context.ExecuteCommandBuffer(cmd);
CommandBufferPool.Release(cmd);
}
bool PlatformRequiresExplicitMsaaResolve()
{
#if UNITY_EDITOR
// In the editor play-mode we use a Game View Render Texture, with
// samples count forced to 1 so we always need to do an explicit MSAA resolve.
return true;
#else
// On Metal/iOS the MSAA resolve is done implicitly as part of the renderpass, so we do not need an extra intermediate pass for the explicit autoresolve.
// Note: On Vulkan Standalone, despite SystemInfo.supportsMultisampleAutoResolve being true, the backbuffer has only 1 sample, so we still require
// the explicit resolve on non-mobile platforms with supportsMultisampleAutoResolve.
return !(SystemInfo.supportsMultisampleAutoResolve && Application.isMobilePlatform)
&& SystemInfo.graphicsDeviceType != GraphicsDeviceType.Metal;
#endif
}
/// <summary>
/// Checks if the pipeline needs to create a intermediate render texture.
/// </summary>
/// <param name="cameraData">CameraData contains all relevant render target information for the camera.</param>
/// <seealso cref="CameraData"/>
/// <returns>Return true if pipeline needs to render to a intermediate render texture.</returns>
bool RequiresIntermediateColorTexture(ref CameraData cameraData)
{
// When rendering a camera stack we always create an intermediate render texture to composite camera results.
// We create it upon rendering the Base camera.
if (cameraData.renderType == CameraRenderType.Base && !cameraData.resolveFinalTarget)
return true;
// Always force rendering into intermediate color texture if deferred rendering mode is selected.
// Reason: without intermediate color texture, the target camera texture is y-flipped.
// However, the target camera texture is bound during gbuffer pass and deferred pass.
// Gbuffer pass will not be y-flipped because it is MRT (see ScriptableRenderContext implementation),
// while deferred pass will be y-flipped, which breaks rendering.
// This incurs an extra blit into at the end of rendering.
if (this.actualRenderingMode == RenderingMode.Deferred)
return true;
bool isSceneViewCamera = cameraData.isSceneViewCamera;
var cameraTargetDescriptor = cameraData.cameraTargetDescriptor;
int msaaSamples = cameraTargetDescriptor.msaaSamples;
bool isScaledRender = cameraData.imageScalingMode != ImageScalingMode.None;
bool isCompatibleBackbufferTextureDimension = cameraTargetDescriptor.dimension == TextureDimension.Tex2D;
bool requiresExplicitMsaaResolve = msaaSamples > 1 && PlatformRequiresExplicitMsaaResolve();
bool isOffscreenRender = cameraData.targetTexture != null && !isSceneViewCamera;
bool isCapturing = cameraData.captureActions != null;
#if ENABLE_VR && ENABLE_XR_MODULE
if (cameraData.xr.enabled)
{
isScaledRender = false;
isCompatibleBackbufferTextureDimension = cameraData.xr.renderTargetDesc.dimension == cameraTargetDescriptor.dimension;
}
#endif
bool requiresBlitForOffscreenCamera = cameraData.postProcessEnabled || cameraData.requiresOpaqueTexture || requiresExplicitMsaaResolve || !cameraData.isDefaultViewport;
if (isOffscreenRender)
return requiresBlitForOffscreenCamera;
return requiresBlitForOffscreenCamera || isSceneViewCamera || isScaledRender || cameraData.isHdrEnabled ||
!isCompatibleBackbufferTextureDimension || isCapturing || cameraData.requireSrgbConversion;
}
bool CanCopyDepth(ref CameraData cameraData)
{
bool msaaEnabledForCamera = cameraData.cameraTargetDescriptor.msaaSamples > 1;
bool supportsTextureCopy = SystemInfo.copyTextureSupport != CopyTextureSupport.None;
bool supportsDepthTarget = RenderingUtils.SupportsRenderTextureFormat(RenderTextureFormat.Depth);
bool supportsDepthCopy = !msaaEnabledForCamera && (supportsDepthTarget || supportsTextureCopy);
bool msaaDepthResolve = msaaEnabledForCamera && SystemInfo.supportsMultisampledTextures != 0;
// copying MSAA depth on GLES3 is giving invalid results. Needs investigation (Fogbugz issue 1339401)
if (IsGLESDevice() && msaaDepthResolve)
return false;
return supportsDepthCopy || msaaDepthResolve;
}
internal override void SwapColorBuffer(CommandBuffer cmd)
{
m_ColorBufferSystem.Swap();
//Check if we are using the depth that is attached to color buffer
if (m_ActiveCameraDepthAttachment == RenderTargetHandle.CameraTarget)
ConfigureCameraTarget(m_ColorBufferSystem.GetBackBuffer(cmd).id, m_ColorBufferSystem.GetBufferA().id);
else ConfigureCameraColorTarget(m_ColorBufferSystem.GetBackBuffer(cmd).id);
m_ActiveCameraColorAttachment = m_ColorBufferSystem.GetBackBuffer();
cmd.SetGlobalTexture("_CameraColorTexture", m_ActiveCameraColorAttachment.id);
//Set _AfterPostProcessTexture, users might still rely on this although it is now always the cameratarget due to swapbuffer
cmd.SetGlobalTexture("_AfterPostProcessTexture", m_ActiveCameraColorAttachment.id);
}
internal override RenderTargetIdentifier GetCameraColorFrontBuffer(CommandBuffer cmd)
{
return m_ColorBufferSystem.GetFrontBuffer(cmd).id;
}
internal override void EnableSwapBufferMSAA(bool enable)
{
m_ColorBufferSystem.EnableMSAA(enable);
}
}
}