您当前的位置: 首页 >  Jave.Lin unity

Unity - BuiltIn 深度缓存值得计算(注意不是 _CameraDephtTexutre, CameraDepthNormalTexture 的深度值)

Jave.Lin 发布时间:2021-12-23 12:11:38 ,浏览量:7

文章目录
  • 目的
  • Built-in 深度写入值
  • 总结

目的

备忘

Built-in 深度写入值

因为之前写过一些实验,发现 SV_Depth 值和 _CameraDepthTexture, CameraDepthNormalTexture 中编码得值,再经过解码后,还是对不上深度关系

所以我在怀疑 depth buffer 中得值得计算方式根本就不是这种方式

后发现 stackoverflow 问题:How do you write z-depth in a shader?

原来 depth buffer 中得值时这么计算得:


struct fragmentOutput {
	float4 color : SV_Target;
	float zvalue : SV_Depth;
};

fragmentOutput frag(inputType ...) 
{
	fragmentOutput o = ....;
	...
	//convert position to clip space, read depth
	float4 clip_pos = mul(UNITY_MATRIX_VP, float4(ray_pos, 1.0));
	o.zvalue = clip_pos.z / clip_pos.w;
	...
	return o;
}

上面的 float zvalue : SV_Depth 是我们需要关注的深度写入值

通过答主的链接也发现了:compute_depth 的计算方式:

float compute_depth(float4 clippos)
{
#if defined(SHADER_TARGET_GLSL) || defined(SHADER_API_GLES) || defined(SHADER_API_GLES3)
    return ((clippos.z / clippos.w) + 1.0) * 0.5;
#else
    return clippos.z / clippos.w;
#endif
}

然后看了一下这个 github 仓库是写:关于 unity 中 raymarch 和 mesh (geometry) 的混合显示,那么肯定需要使用到深度的写入,读取判断深度测试用

下面是应用的地方:compute_depth 的应用地方 - 关键看在写入 GBuffer 的 depth 的计算 o.depth = compute_depth(mul(UNITY_MATRIX_VP, float4(ray_pos, 1.0)));

struct gbuffer_out
{
    half4 diffuse           : SV_Target0; // RT0: diffuse color (rgb), occlusion (a)
    half4 spec_smoothness   : SV_Target1; // RT1: spec color (rgb), smoothness (a)
    half4 normal            : SV_Target2; // RT2: normal (rgb), --unused, very low precision-- (a) 
    half4 emission          : SV_Target3; // RT3: emission (rgb), --unused-- (a)
    float depth             : SV_Depth;
};


gbuffer_out frag_gbuffer(vs_out v)
{
#if UNITY_UV_STARTS_AT_TOP
    v.spos.y *= -1.0;
#endif
    float time = _Time.y;
    float2 pos = v.spos.xy;
    pos.x *= _ScreenParams.x / _ScreenParams.y;

    float num_steps = 1.0;
    float last_distance = 0.0;
    float total_distance = _ProjectionParams.y;
    float3 ray_pos;
    float3 normal;
    if(g_enable_adaptive) {
        float3 cam_pos      = get_camera_position();
        float3 cam_forward  = get_camera_forward();
        float3 cam_up       = get_camera_up();
        float3 cam_right    = get_camera_right();
        float  cam_focal_len= get_camera_focal_length();
        float3 ray_dir = normalize(cam_right*pos.x + cam_up*pos.y + cam_forward*cam_focal_len);

        total_distance = tex2D(g_depth, v.spos.xy*0.5+0.5).x;
        ray_pos = cam_pos + ray_dir * total_distance;
        normal = guess_normal(ray_pos);
        //normal = float3(0.0, 0.0, 1.0);
    }
    else {
        raymarching(pos, MAX_MARCH_SINGLE_GBUFFER_PASS, total_distance, num_steps, last_distance, ray_pos);
        normal = guess_normal(ray_pos);
    }

    float glow = 0.0;
    if(g_enable_glowline) {
        glow += max((modc(length(ray_pos)-time*1.5, 10.0)-9.0)*2.5, 0.0);
        float2 p = pattern(ray_pos.xz*0.5);
        if(p.x            
关注
打赏
1688896170
查看更多评论
0.0526s