gfx-rs / naga

Universal shader translation in Rust
https://github.com/gfx-rs/wgpu/tree/trunk/naga
Other
1.54k stars 195 forks source link

No Such Function `vec5` When Targeting GLSL ES 3.1 #1153

Closed zicklag closed 3 years ago

zicklag commented 3 years ago

I'm going to look into this, but in case anybody will know what is wrong right off, I'm getting an error translating Bevy's new PBR shader to GLSL from WGSL. Somehow the GLSL ends up with a call to a non-existant vec5 function.

PS: This shader is way too big and I should have gotten a smaller sample shader that exhibits the issue, I just haven't figured out how yet so feel free to wait for me to do that if you want. :grin:

WGSL:

```glsl // TODO: try merging this block with the binding? [[block]] struct View { view_proj: mat4x4; world_position: vec3; }; [[group(0), binding(0)]] var view: View; [[block]] struct Mesh { transform: mat4x4; }; [[group(1), binding(0)]] var mesh: Mesh; struct Vertex { [[location(0)]] position: vec3; [[location(1)]] normal: vec3; [[location(2)]] uv: vec2; }; struct VertexOutput { [[builtin(position)]] clip_position: vec4; [[location(0)]] world_position: vec4; [[location(1)]] world_normal: vec3; [[location(2)]] uv: vec2; }; [[stage(vertex)]] fn vertex(vertex: Vertex) -> VertexOutput { let world_position = mesh.transform * vec4(vertex.position, 1.0); var out: VertexOutput; out.uv = vertex.uv; out.world_position = world_position; out.clip_position = view.view_proj * world_position; // FIXME: The inverse transpose of the model matrix should be used to correctly handle scaling // of normals out.world_normal = mat3x3(mesh.transform.x.xyz, mesh.transform.y.xyz, mesh.transform.z.xyz) * vertex.normal; return out; } // From the Filament design doc // https://google.github.io/filament/Filament.html#table_symbols // Symbol Definition // v View unit vector // l Incident light unit vector // n Surface normal unit vector // h Half unit vector between l and v // f BRDF // f_d Diffuse component of a BRDF // f_r Specular component of a BRDF // α Roughness, remapped from using input perceptualRoughness // σ Diffuse reflectance // Ω Spherical domain // f0 Reflectance at normal incidence // f90 Reflectance at grazing angle // χ+(a) Heaviside function (1 if a>0 and 0 otherwise) // nior Index of refraction (IOR) of an interface // ⟨n⋅l⟩ Dot product clamped to [0..1] // ⟨a⟩ Saturated value (clamped to [0..1]) // The Bidirectional Reflectance Distribution Function (BRDF) describes the surface response of a standard material // and consists of two components, the diffuse component (f_d) and the specular component (f_r): // f(v,l) = f_d(v,l) + f_r(v,l) // // The form of the microfacet model is the same for diffuse and specular // f_r(v,l) = f_d(v,l) = 1 / { |n⋅v||n⋅l| } ∫_Ω D(m,α) G(v,l,m) f_m(v,l,m) (v⋅m) (l⋅m) dm // // In which: // D, also called the Normal Distribution Function (NDF) models the distribution of the microfacets // G models the visibility (or occlusion or shadow-masking) of the microfacets // f_m is the microfacet BRDF and differs between specular and diffuse components // // The above integration needs to be approximated. [[block]] struct StandardMaterial { base_color: vec4; emissive: vec4; perceptual_roughness: f32; metallic: f32; reflectance: f32; // 'flags' is a bit field indicating various option. uint is 32 bits so we have up to 32 options. flags: u32; }; struct PointLight { color: vec4; // projection: mat4x4; position: vec3; inverse_square_range: f32; radius: f32; near: f32; far: f32; shadow_depth_bias: f32; shadow_normal_bias: f32; }; struct DirectionalLight { view_projection: mat4x4; color: vec4; direction_to_light: vec3; shadow_depth_bias: f32; shadow_normal_bias: f32; }; [[block]] struct Lights { // NOTE: this array size must be kept in sync with the constants defined bevy_pbr2/src/render/light.rs // TODO: this can be removed if we move to storage buffers for light arrays point_lights: array; directional_lights: array; ambient_color: vec4; n_point_lights: u32; n_directional_lights: u32; }; let FLAGS_BASE_COLOR_TEXTURE_BIT: u32 = 1u; let FLAGS_EMISSIVE_TEXTURE_BIT: u32 = 2u; let FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT: u32 = 4u; let FLAGS_OCCLUSION_TEXTURE_BIT: u32 = 8u; let FLAGS_DOUBLE_SIDED_BIT: u32 = 16u; let FLAGS_UNLIT_BIT: u32 = 32u; [[group(0), binding(1)]] var lights: Lights; [[group(0), binding(2)]] var point_shadow_textures: texture_depth_cube_array; [[group(0), binding(3)]] var point_shadow_textures_sampler: sampler_comparison; [[group(0), binding(4)]] var directional_shadow_textures: texture_depth_2d_array; [[group(0), binding(5)]] var directional_shadow_textures_sampler: sampler_comparison; [[group(2), binding(0)]] var material: StandardMaterial; [[group(2), binding(1)]] var base_color_texture: texture_2d; [[group(2), binding(2)]] var base_color_sampler: sampler; [[group(2), binding(3)]] var emissive_texture: texture_2d; [[group(2), binding(4)]] var emissive_sampler: sampler; [[group(2), binding(5)]] var metallic_roughness_texture: texture_2d; [[group(2), binding(6)]] var metallic_roughness_sampler: sampler; [[group(2), binding(7)]] var occlusion_texture: texture_2d; [[group(2), binding(8)]] var occlusion_sampler: sampler; let PI: f32 = 3.141592653589793; fn saturate(value: f32) -> f32 { return clamp(value, 0.0, 1.0); } // distanceAttenuation is simply the square falloff of light intensity // combined with a smooth attenuation at the edge of the light radius // // light radius is a non-physical construct for efficiency purposes, // because otherwise every light affects every fragment in the scene fn getDistanceAttenuation(distanceSquare: f32, inverseRangeSquared: f32) -> f32 { let factor = distanceSquare * inverseRangeSquared; let smoothFactor = saturate(1.0 - factor * factor); let attenuation = smoothFactor * smoothFactor; return attenuation * 1.0 / max(distanceSquare, 0.0001); } // Normal distribution function (specular D) // Based on https://google.github.io/filament/Filament.html#citation-walter07 // D_GGX(h,α) = α^2 / { π ((n⋅h)^2 (α2−1) + 1)^2 } // Simple implementation, has precision problems when using fp16 instead of fp32 // see https://google.github.io/filament/Filament.html#listing_speculardfp16 fn D_GGX(roughness: f32, NoH: f32, h: vec3) -> f32 { let oneMinusNoHSquared = 1.0 - NoH * NoH; let a = NoH * roughness; let k = roughness / (oneMinusNoHSquared + a * a); let d = k * k * (1.0 / PI); return d; } // Visibility function (Specular G) // V(v,l,a) = G(v,l,α) / { 4 (n⋅v) (n⋅l) } // such that f_r becomes // f_r(v,l) = D(h,α) V(v,l,α) F(v,h,f0) // where // V(v,l,α) = 0.5 / { n⋅l sqrt((n⋅v)^2 (1−α2) + α2) + n⋅v sqrt((n⋅l)^2 (1−α2) + α2) } // Note the two sqrt's, that may be slow on mobile, see https://google.github.io/filament/Filament.html#listing_approximatedspecularv fn V_SmithGGXCorrelated(roughness: f32, NoV: f32, NoL: f32) -> f32 { let a2 = roughness * roughness; let lambdaV = NoL * sqrt((NoV - a2 * NoV) * NoV + a2); let lambdaL = NoV * sqrt((NoL - a2 * NoL) * NoL + a2); let v = 0.5 / (lambdaV + lambdaL); return v; } // Fresnel function // see https://google.github.io/filament/Filament.html#citation-schlick94 // F_Schlick(v,h,f_0,f_90) = f_0 + (f_90 − f_0) (1 − v⋅h)^5 fn F_Schlick_vec(f0: vec3, f90: f32, VoH: f32) -> vec3 { // not using mix to keep the vec3 and float versions identical return f0 + (f90 - f0) * pow(1.0 - VoH, 5.0); } fn F_Schlick(f0: f32, f90: f32, VoH: f32) -> f32 { // not using mix to keep the vec3 and float versions identical return f0 + (f90 - f0) * pow(1.0 - VoH, 5.0); } fn fresnel(f0: vec3, LoH: f32) -> vec3 { // f_90 suitable for ambient occlusion // see https://google.github.io/filament/Filament.html#lighting/occlusion let f90 = saturate(dot(f0, vec3(50.0 * 0.33))); return F_Schlick_vec(f0, f90, LoH); } // Specular BRDF // https://google.github.io/filament/Filament.html#materialsystem/specularbrdf // Cook-Torrance approximation of the microfacet model integration using Fresnel law F to model f_m // f_r(v,l) = { D(h,α) G(v,l,α) F(v,h,f0) } / { 4 (n⋅v) (n⋅l) } fn specular(f0: vec3, roughness: f32, h: vec3, NoV: f32, NoL: f32, NoH: f32, LoH: f32, specularIntensity: f32) -> vec3 { let D = D_GGX(roughness, NoH, h); let V = V_SmithGGXCorrelated(roughness, NoV, NoL); let F = fresnel(f0, LoH); return (specularIntensity * D * V) * F; } // Diffuse BRDF // https://google.github.io/filament/Filament.html#materialsystem/diffusebrdf // fd(v,l) = σ/π * 1 / { |n⋅v||n⋅l| } ∫Ω D(m,α) G(v,l,m) (v⋅m) (l⋅m) dm // // simplest approximation // float Fd_Lambert() { // return 1.0 / PI; // } // // vec3 Fd = diffuseColor * Fd_Lambert(); // // Disney approximation // See https://google.github.io/filament/Filament.html#citation-burley12 // minimal quality difference fn Fd_Burley(roughness: f32, NoV: f32, NoL: f32, LoH: f32) -> f32 { let f90 = 0.5 + 2.0 * roughness * LoH * LoH; let lightScatter = F_Schlick(1.0, f90, NoL); let viewScatter = F_Schlick(1.0, f90, NoV); return lightScatter * viewScatter * (1.0 / PI); } // From https://www.unrealengine.com/en-US/blog/physically-based-shading-on-mobile fn EnvBRDFApprox(f0: vec3, perceptual_roughness: f32, NoV: f32) -> vec3 { let c0 = vec4(-1.0, -0.0275, -0.572, 0.022); let c1 = vec4(1.0, 0.0425, 1.04, -0.04); let r = perceptual_roughness * c0 + c1; let a004 = min(r.x * r.x, exp2(-9.28 * NoV)) * r.x + r.y; let AB = vec2(-1.04, 1.04) * a004 + r.zw; return f0 * AB.x + AB.y; } fn perceptualRoughnessToRoughness(perceptualRoughness: f32) -> f32 { // clamp perceptual roughness to prevent precision problems // According to Filament design 0.089 is recommended for mobile // Filament uses 0.045 for non-mobile let clampedPerceptualRoughness = clamp(perceptualRoughness, 0.089, 1.0); return clampedPerceptualRoughness * clampedPerceptualRoughness; } // from https://64.github.io/tonemapping/ // reinhard on RGB oversaturates colors fn reinhard(color: vec3) -> vec3 { return color / (1.0 + color); } fn reinhard_extended(color: vec3, max_white: f32) -> vec3 { let numerator = color * (1.0f + (color / vec3(max_white * max_white))); return numerator / (1.0 + color); } // luminance coefficients from Rec. 709. // https://en.wikipedia.org/wiki/Rec._709 fn luminance(v: vec3) -> f32 { return dot(v, vec3(0.2126, 0.7152, 0.0722)); } fn change_luminance(c_in: vec3, l_out: f32) -> vec3 { let l_in = luminance(c_in); return c_in * (l_out / l_in); } fn reinhard_luminance(color: vec3) -> vec3 { let l_old = luminance(color); let l_new = l_old / (1.0f + l_old); return change_luminance(color, l_new); } fn reinhard_extended_luminance(color: vec3, max_white_l: f32) -> vec3 { let l_old = luminance(color); let numerator = l_old * (1.0f + (l_old / (max_white_l * max_white_l))); let l_new = numerator / (1.0f + l_old); return change_luminance(color, l_new); } fn point_light( world_position: vec3, light: PointLight, roughness: f32, NdotV: f32, N: vec3, V: vec3, R: vec3, F0: vec3, diffuseColor: vec3 ) -> vec3 { let light_to_frag = light.position.xyz - world_position.xyz; let distance_square = dot(light_to_frag, light_to_frag); let rangeAttenuation = getDistanceAttenuation(distance_square, light.inverse_square_range); // Specular. // Representative Point Area Lights. // see http://blog.selfshadow.com/publications/s2013-shading-course/karis/s2013_pbs_epic_notes_v2.pdf p14-16 let a = roughness; let centerToRay = dot(light_to_frag, R) * R - light_to_frag; let closestPoint = light_to_frag + centerToRay * saturate(light.radius * inverseSqrt(dot(centerToRay, centerToRay))); let LspecLengthInverse = inverseSqrt(dot(closestPoint, closestPoint)); let normalizationFactor = a / saturate(a + (light.radius * 0.5 * LspecLengthInverse)); let specularIntensity = normalizationFactor * normalizationFactor; var L: vec3 = closestPoint * LspecLengthInverse; // normalize() equivalent? var H: vec3 = normalize(L + V); var NoL: f32 = saturate(dot(N, L)); var NoH: f32 = saturate(dot(N, H)); var LoH: f32 = saturate(dot(L, H)); let specular_light = specular(F0, roughness, H, NdotV, NoL, NoH, LoH, specularIntensity); // Diffuse. // Comes after specular since its NoL is used in the lighting equation. L = normalize(light_to_frag); H = normalize(L + V); NoL = saturate(dot(N, L)); NoH = saturate(dot(N, H)); LoH = saturate(dot(L, H)); let diffuse = diffuseColor * Fd_Burley(roughness, NdotV, NoL, LoH); // Lout = f(v,l) Φ / { 4 π d^2 }⟨n⋅l⟩ // where // f(v,l) = (f_d(v,l) + f_r(v,l)) * light_color // Φ is light intensity // our rangeAttentuation = 1 / d^2 multiplied with an attenuation factor for smoothing at the edge of the non-physical maximum light radius // It's not 100% clear where the 1/4π goes in the derivation, but we follow the filament shader and leave it out // See https://google.github.io/filament/Filament.html#mjx-eqn-pointLightLuminanceEquation // TODO compensate for energy loss https://google.github.io/filament/Filament.html#materialsystem/improvingthebrdfs/energylossinspecularreflectance // light.color.rgb is premultiplied with light.intensity on the CPU return ((diffuse + specular_light) * light.color.rgb) * (rangeAttenuation * NoL); } fn directional_light(light: DirectionalLight, roughness: f32, NdotV: f32, normal: vec3, view: vec3, R: vec3, F0: vec3, diffuseColor: vec3) -> vec3 { let incident_light = light.direction_to_light.xyz; let half_vector = normalize(incident_light + view); let NoL = saturate(dot(normal, incident_light)); let NoH = saturate(dot(normal, half_vector)); let LoH = saturate(dot(incident_light, half_vector)); let diffuse = diffuseColor * Fd_Burley(roughness, NdotV, NoL, LoH); let specularIntensity = 1.0; let specular_light = specular(F0, roughness, half_vector, NdotV, NoL, NoH, LoH, specularIntensity); return (specular_light + diffuse) * light.color.rgb * NoL; } fn fetch_point_shadow(light_id: i32, frag_position: vec4, surface_normal: vec3) -> f32 { let light = lights.point_lights[light_id]; // because the shadow maps align with the axes and the frustum planes are at 45 degrees // we can get the worldspace depth by taking the largest absolute axis let surface_to_light = light.position.xyz - frag_position.xyz; let surface_to_light_abs = abs(surface_to_light); let distance_to_light = max(surface_to_light_abs.x, max(surface_to_light_abs.y, surface_to_light_abs.z)); // The normal bias here is already scaled by the texel size at 1 world unit from the light. // The texel size increases proportionally with distance from the light so multiplying by // distance to light scales the normal bias to the texel size at the fragment distance. let normal_offset = light.shadow_normal_bias * distance_to_light * surface_normal.xyz; let depth_offset = light.shadow_depth_bias * normalize(surface_to_light.xyz); let offset_position = frag_position.xyz + normal_offset + depth_offset; // similar largest-absolute-axis trick as above, but now with the offset fragment position let frag_ls = light.position.xyz - offset_position.xyz; let abs_position_ls = abs(frag_ls); let major_axis_magnitude = max(abs_position_ls.x, max(abs_position_ls.y, abs_position_ls.z)); // do a full projection // vec4 clip = light.projection * vec4(0.0, 0.0, -major_axis_magnitude, 1.0); // float depth = (clip.z / clip.w); // alternatively do only the necessary multiplications using near/far let proj_r = light.far / (light.near - light.far); let z = -major_axis_magnitude * proj_r + light.near * proj_r; let w = major_axis_magnitude; let depth = z / w; // do the lookup, using HW PCF and comparison // NOTE: Due to the non-uniform control flow above, we must use the Level variant of // textureSampleCompare to avoid undefined behaviour due to some of the fragments in // a quad (2x2 fragments) being processed not being sampled, and this messing with // mip-mapping functionality. The shadow maps have no mipmaps so Level just samples // from LOD 0. return textureSampleCompareLevel(point_shadow_textures, point_shadow_textures_sampler, frag_ls, i32(light_id), depth); } fn fetch_directional_shadow(light_id: i32, frag_position: vec4, surface_normal: vec3) -> f32 { let light = lights.directional_lights[light_id]; // The normal bias is scaled to the texel size. let normal_offset = light.shadow_normal_bias * surface_normal.xyz; let depth_offset = light.shadow_depth_bias * light.direction_to_light.xyz; let offset_position = vec4(frag_position.xyz + normal_offset + depth_offset, frag_position.w); let offset_position_clip = light.view_projection * offset_position; if (offset_position_clip.w <= 0.0) { return 1.0; } let offset_position_ndc = offset_position_clip.xyz / offset_position_clip.w; // No shadow outside the orthographic projection volume if (any(offset_position_ndc.xy < vec2(-1.0)) || offset_position_ndc.z < 0.0 || any(offset_position_ndc > vec3(1.0))) { return 1.0; } // compute texture coordinates for shadow lookup, compensating for the Y-flip difference // between the NDC and texture coordinates let flip_correction = vec2(0.5, -0.5); let light_local = offset_position_ndc.xy * flip_correction + vec2(0.5, 0.5); let depth = offset_position_ndc.z; // do the lookup, using HW PCF and comparison // NOTE: Due to non-uniform control flow above, we must use the level variant of the texture // sampler to avoid use of implicit derivatives causing possible undefined behavior. return textureSampleCompareLevel(directional_shadow_textures, directional_shadow_textures_sampler, light_local, i32(light_id), depth); } struct FragmentInput { [[builtin(front_facing)]] is_front: bool; [[location(0)]] world_position: vec4; [[location(1)]] world_normal: vec3; [[location(2)]] uv: vec2; }; [[stage(fragment)]] fn fragment(in: FragmentInput) -> [[location(0)]] vec4 { var output_color: vec4 = material.base_color; if ((material.flags & FLAGS_BASE_COLOR_TEXTURE_BIT) != 0u) { output_color = output_color * textureSample(base_color_texture, base_color_sampler, in.uv); } // // NOTE: Unlit bit not set means == 0 is true, so the true case is if lit if ((material.flags & FLAGS_UNLIT_BIT) == 0u) { // TODO use .a for exposure compensation in HDR var emissive: vec4 = material.emissive; if ((material.flags & FLAGS_EMISSIVE_TEXTURE_BIT) != 0u) { emissive = vec4(emissive.rgb * textureSample(emissive_texture, emissive_sampler, in.uv).rgb, 1.0); } // calculate non-linear roughness from linear perceptualRoughness var metallic: f32 = material.metallic; var perceptual_roughness: f32 = material.perceptual_roughness; if ((material.flags & FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT) != 0u) { let metallic_roughness = textureSample(metallic_roughness_texture, metallic_roughness_sampler, in.uv); // Sampling from GLTF standard channels for now metallic = metallic * metallic_roughness.b; perceptual_roughness = perceptual_roughness * metallic_roughness.g; } let roughness = perceptualRoughnessToRoughness(perceptual_roughness); var occlusion: f32 = 1.0; if ((material.flags & FLAGS_OCCLUSION_TEXTURE_BIT) != 0u) { occlusion = textureSample(occlusion_texture, occlusion_sampler, in.uv).r; } var N: vec3 = normalize(in.world_normal); // FIXME: Normal maps need an additional vertex attribute and vertex stage output/fragment stage input // Just use a separate shader for lit with normal maps? // # ifdef STANDARDMATERIAL_NORMAL_MAP // vec3 T = normalize(v_WorldTangent.xyz); // vec3 B = cross(N, T) * v_WorldTangent.w; // # endif if ((material.flags & FLAGS_DOUBLE_SIDED_BIT) != 0u) { if (!in.is_front) { N = -N; } // # ifdef STANDARDMATERIAL_NORMAL_MAP // T = gl_FrontFacing ? T : -T; // B = gl_FrontFacing ? B : -B; // # endif } // # ifdef STANDARDMATERIAL_NORMAL_MAP // mat3 TBN = mat3(T, B, N); // N = TBN * normalize(texture(sampler2D(normal_map, normal_map_sampler), v_Uv).rgb * 2.0 - 1.0); // # endif var V: vec3; if (view.view_proj.w.w != 1.0) { // If the projection is not orthographic // Only valid for a perpective projection V = normalize(view.world_position.xyz - in.world_position.xyz); } else { // Ortho view vec V = normalize(vec3(-view.view_proj.x.z, -view.view_proj.y.z, -view.view_proj.z.z)); } // Neubelt and Pettineo 2013, "Crafting a Next-gen Material Pipeline for The Order: 1886" let NdotV = max(dot(N, V), 0.0001); // Remapping [0,1] reflectance to F0 // See https://google.github.io/filament/Filament.html#materialsystem/parameterization/remapping let reflectance = material.reflectance; let F0 = 0.16 * reflectance * reflectance * (1.0 - metallic) + output_color.rgb * metallic; // Diffuse strength inversely related to metallicity let diffuse_color = output_color.rgb * (1.0 - metallic); let R = reflect(-V, N); // accumulate color var light_accum: vec3 = vec3(0.0); let n_point_lights = i32(lights.n_point_lights); let n_directional_lights = i32(lights.n_directional_lights); for (var i: i32 = 0; i < n_point_lights; i = i + 1) { let light = lights.point_lights[i]; let shadow = fetch_point_shadow(i, in.world_position, in.world_normal); let light_contrib = point_light(in.world_position.xyz, light, roughness, NdotV, N, V, R, F0, diffuse_color); light_accum = light_accum + light_contrib * shadow; } for (var i: i32 = 0; i < n_directional_lights; i = i + 1) { let light = lights.directional_lights[i]; let shadow = fetch_directional_shadow(i, in.world_position, in.world_normal); let light_contrib = directional_light(light, roughness, NdotV, N, V, R, F0, diffuse_color); light_accum = light_accum + light_contrib * shadow; } let diffuse_ambient = EnvBRDFApprox(diffuse_color, 1.0, NdotV); let specular_ambient = EnvBRDFApprox(F0, perceptual_roughness, NdotV); output_color = vec4( light_accum + (diffuse_ambient + specular_ambient) * lights.ambient_color.rgb * occlusion + emissive.rgb * output_color.a, output_color.a); // tone_mapping output_color = vec4(reinhard_luminance(output_color.rgb), output_color.a); // Gamma correction. // Not needed with sRGB buffer // output_color.rgb = pow(output_color.rgb, vec3(1.0 / 2.2)); } return output_color; } ```

GLSL:

```glsl #version 310 es #extension GL_EXT_texture_cube_map_array : require precision highp float; precision highp int; struct Vertex { vec3 position; vec3 normal; vec2 uv; }; struct VertexOutput { vec4 clip_position; vec4 world_position; vec3 world_normal; vec2 uv; }; struct PointLight { vec4 color; vec3 position; float inverse_square_range; float radius; float near; float far; float shadow_depth_bias; float shadow_normal_bias; }; struct DirectionalLight { mat4x4 view_projection; vec4 color; vec3 direction_to_light; float shadow_depth_bias; float shadow_normal_bias; }; struct FragmentInput { bool is_front; vec4 world_position; vec3 world_normal; vec2 uv; }; uniform View_block_0Fs { mat4x4 view_proj; vec3 world_position; } _group_0_binding_0; uniform Lights_block_1Fs { PointLight point_lights[10]; DirectionalLight directional_lights[1]; vec4 ambient_color; uint n_point_lights; uint n_directional_lights; } _group_0_binding_1; uniform highp samplerCubeArrayShadow _group_0_binding_2; uniform highp sampler2DArrayShadow _group_0_binding_4; uniform StandardMaterial_block_2Fs { vec4 base_color; vec4 emissive; float perceptual_roughness; float metallic; float reflectance; uint flags; } _group_2_binding_0; uniform highp sampler2D _group_2_binding_1; uniform highp sampler2D _group_2_binding_3; uniform highp sampler2D _group_2_binding_5; uniform highp sampler2D _group_2_binding_7; layout(location = 0) smooth in vec4 _vs2fs_location0; layout(location = 1) smooth in vec3 _vs2fs_location1; layout(location = 2) smooth in vec2 _vs2fs_location2; layout(location = 0) out vec4 _fs2p_location0; float saturate(float value) { return clamp(value, 0.0, 1.0); } float getDistanceAttenuation(float distanceSquare, float inverseRangeSquared) { float factor = (distanceSquare * inverseRangeSquared); float _expr29 = saturate((1.0 - (factor * factor))); float attenuation = (_expr29 * _expr29); return ((attenuation * 1.0) / max(distanceSquare, 0.0001)); } float D_GGX(float roughness, float NoH, vec3 h) { float oneMinusNoHSquared = (1.0 - (NoH * NoH)); float a = (NoH * roughness); float k = (roughness / (oneMinusNoHSquared + (a * a))); float d = ((k * k) * (1.0 / 3.141592653589793)); return d; } float V_SmithGGXCorrelated(float roughness1, float NoV, float NoL) { float a2_ = (roughness1 * roughness1); float lambdaV = (NoL * sqrt((((NoV - (a2_ * NoV)) * NoV) + a2_))); float lambdaL = (NoV * sqrt((((NoL - (a2_ * NoL)) * NoL) + a2_))); float v1 = (0.5 / (lambdaV + lambdaL)); return v1; } vec3 F_Schlick_vec(vec3 f0_, float f90_, float VoH) { return (f0_ + ((vec3(f90_) - f0_) * pow((1.0 - VoH), 5.0))); } float F_Schlick(float f0_1, float f90_1, float VoH1) { return (f0_1 + ((f90_1 - f0_1) * pow((1.0 - VoH1), 5.0))); } vec3 fresnel(vec3 f0_2, float LoH) { float _expr30 = saturate(dot(f0_2, vec3((50.0 * 0.33)))); vec3 _expr31 = F_Schlick_vec(f0_2, _expr30, LoH); return _expr31; } vec3 specular(vec3 f0_3, float roughness2, vec3 h1, float NoV1, float NoL1, float NoH1, float LoH1, float specularIntensity) { float _expr31 = D_GGX(roughness2, NoH1, h1); float _expr32 = V_SmithGGXCorrelated(roughness2, NoV1, NoL1); vec3 _expr33 = fresnel(f0_3, LoH1); return (((specularIntensity * _expr31) * _expr32) * _expr33); } float Fd_Burley(float roughness3, float NoV2, float NoL2, float LoH2) { float f90_2 = (0.5 + (((2.0 * roughness3) * LoH2) * LoH2)); float _expr34 = F_Schlick(1.0, f90_2, NoL2); float _expr36 = F_Schlick(1.0, f90_2, NoV2); return ((_expr34 * _expr36) * (1.0 / 3.141592653589793)); } vec3 EnvBRDFApprox(vec3 f0_4, float perceptual_roughness1, float NoV3) { vec4 c0_ = vec4(-1.0, -0.0275, -0.572, 0.022); vec4 c1_ = vec4(1.0, 0.0425, 1.04, -0.04); vec4 r = ((perceptual_roughness1 * c0_) + c1_); float a004_ = ((min((r.x * r.x), exp2((-9.28 * NoV3))) * r.x) + r.y); vec2 AB = ((vec2(-1.04, 1.04) * a004_) + r.zw); return ((f0_4 * AB.x) + vec3(AB.y)); } float perceptualRoughnessToRoughness(float perceptualRoughness) { float clampedPerceptualRoughness = clamp(perceptualRoughness, 0.089, 1.0); return (clampedPerceptualRoughness * clampedPerceptualRoughness); } vec3 reinhard(vec3 color) { return (color / (vec3(1.0) + color)); } vec3 reinhard_extended(vec3 color1, float max_white) { vec3 numerator = (color1 * (vec3(1.0) + (color1 / vec3((max_white * max_white))))); return (numerator / (vec3(1.0) + color1)); } float luminance(vec3 v) { return dot(v, vec3(0.2126, 0.7152, 0.0722)); } vec3 change_luminance(vec3 c_in, float l_out) { float _expr25 = luminance(c_in); return (c_in * (l_out / _expr25)); } vec3 reinhard_luminance(vec3 color2) { float _expr24 = luminance(color2); float l_new = (_expr24 / (1.0 + _expr24)); vec3 _expr28 = change_luminance(color2, l_new); return _expr28; } vec3 reinhard_extended_luminance(vec3 color3, float max_white_l) { float _expr25 = luminance(color3); float numerator = (_expr25 * (1.0 + (_expr25 / (max_white_l * max_white_l)))); float l_new = (numerator / (1.0 + _expr25)); vec3 _expr34 = change_luminance(color3, l_new); return _expr34; } vec3 point_light(vec3 world_position, PointLight light, float roughness4, float NdotV, vec3 N1, vec3 V1, vec3 R, vec3 F0_, vec3 diffuseColor) { vec3 L = vec3(0.0, 0.0, 0.0); vec3 H = vec3(0.0, 0.0, 0.0); float NoL3 = 0.0; float NoH2 = 0.0; float LoH3 = 0.0; vec3 light_to_frag = (light.position.xyz - world_position.xyz); float distance_square = dot(light_to_frag, light_to_frag); float _expr38 = getDistanceAttenuation(distance_square, light.inverse_square_range); vec3 centerToRay = ((dot(light_to_frag, R) * R) - light_to_frag); float _expr46 = saturate((light.radius * inversesqrt(dot(centerToRay, centerToRay)))); vec3 closestPoint = (light_to_frag + (centerToRay * _expr46)); float LspecLengthInverse = inversesqrt(dot(closestPoint, closestPoint)); float _expr56 = saturate((roughness4 + ((light.radius * 0.5) * LspecLengthInverse))); float normalizationFactor = (roughness4 / _expr56); float specularIntensity1 = (normalizationFactor * normalizationFactor); L = (closestPoint * LspecLengthInverse); vec3 _expr61 = L; H = normalize((_expr61 + V1)); vec3 _expr65 = L; float _expr67 = saturate(dot(N1, _expr65)); NoL3 = _expr67; vec3 _expr69 = H; float _expr71 = saturate(dot(N1, _expr69)); NoH2 = _expr71; vec3 _expr73 = L; vec3 _expr74 = H; float _expr76 = saturate(dot(_expr73, _expr74)); LoH3 = _expr76; vec3 _expr78 = H; float _expr79 = NoL3; float _expr80 = NoH2; float _expr81 = LoH3; vec3 _expr82 = specular(F0_, roughness4, _expr78, NdotV, _expr79, _expr80, _expr81, specularIntensity1); L = normalize(light_to_frag); vec3 _expr84 = L; H = normalize((_expr84 + V1)); vec3 _expr87 = L; float _expr89 = saturate(dot(N1, _expr87)); NoL3 = _expr89; vec3 _expr90 = H; float _expr92 = saturate(dot(N1, _expr90)); NoH2 = _expr92; vec3 _expr93 = L; vec3 _expr94 = H; float _expr96 = saturate(dot(_expr93, _expr94)); LoH3 = _expr96; float _expr97 = NoL3; float _expr98 = LoH3; float _expr99 = Fd_Burley(roughness4, NdotV, _expr97, _expr98); vec3 diffuse = (diffuseColor * _expr99); float _expr105 = NoL3; return (((diffuse + _expr82) * light.color.xyz) * (_expr38 * _expr105)); } vec3 directional_light(DirectionalLight light1, float roughness5, float NdotV1, vec3 normal, vec3 view, vec3 R1, vec3 F0_1, vec3 diffuseColor1) { vec3 incident_light = light1.direction_to_light.xyz; vec3 half_vector = normalize((incident_light + view)); float _expr36 = saturate(dot(normal, incident_light)); float _expr38 = saturate(dot(normal, half_vector)); float _expr40 = saturate(dot(incident_light, half_vector)); float _expr41 = Fd_Burley(roughness5, NdotV1, _expr36, _expr40); vec3 diffuse = (diffuseColor1 * _expr41); vec3 _expr44 = specular(F0_1, roughness5, half_vector, NdotV1, _expr36, _expr38, _expr40, 1.0); return (((_expr44 + diffuse) * light1.color.xyz) * _expr36); } float fetch_point_shadow(int light_id, vec4 frag_position, vec3 surface_normal) { PointLight light2 = _group_0_binding_1.point_lights[light_id]; vec3 surface_to_light = (light2.position.xyz - frag_position.xyz); vec3 surface_to_light_abs = abs(surface_to_light); float distance_to_light = max(surface_to_light_abs.x, max(surface_to_light_abs.y, surface_to_light_abs.z)); vec3 normal_offset = ((light2.shadow_normal_bias * distance_to_light) * surface_normal.xyz); vec3 depth_offset = (light2.shadow_depth_bias * normalize(surface_to_light.xyz)); vec3 offset_position = ((frag_position.xyz + normal_offset) + depth_offset); vec3 frag_ls = (light2.position.xyz - offset_position.xyz); vec3 abs_position_ls = abs(frag_ls); float w = max(abs_position_ls.x, max(abs_position_ls.y, abs_position_ls.z)); float proj_r = (light2.far / (light2.near - light2.far)); float z = (((- w) * proj_r) + (light2.near * proj_r)); float depth = (z / w); float _expr72 = textureGrad(_group_0_binding_2, vec5(frag_ls, int(light_id), depth), vec2(0,0), vec2(0,0)); return _expr72; } float fetch_directional_shadow(int light_id1, vec4 frag_position1, vec3 surface_normal1) { DirectionalLight light3 = _group_0_binding_1.directional_lights[light_id1]; vec3 normal_offset = (light3.shadow_normal_bias * surface_normal1.xyz); vec3 depth_offset = (light3.shadow_depth_bias * light3.direction_to_light.xyz); vec4 offset_position = vec4(((frag_position1.xyz + normal_offset) + depth_offset), frag_position1.w); vec4 offset_position_clip = (light3.view_projection * offset_position); if ((offset_position_clip.w <= 0.0)) { return 1.0; } vec3 offset_position_ndc = (offset_position_clip.xyz / vec3(offset_position_clip.w)); if (((any(lessThan(offset_position_ndc.xy,vec2(-1.0))) || (offset_position_ndc.z < 0.0)) || any(greaterThan(offset_position_ndc,vec3(1.0))))) { return 1.0; } vec2 flip_correction = vec2(0.5, -0.5); vec2 light_local = ((offset_position_ndc.xy * flip_correction) + vec2(0.5, 0.5)); float depth = offset_position_ndc.z; float _expr77 = textureGrad(_group_0_binding_4, vec4(light_local, int(light_id1), depth), vec2(0,0), vec2(0,0)); return _expr77; } void main() { FragmentInput in1 = FragmentInput(gl_FrontFacing, _vs2fs_location0, _vs2fs_location1, _vs2fs_location2); vec4 output_color = vec4(0.0, 0.0, 0.0, 0.0); vec4 emissive = vec4(0.0, 0.0, 0.0, 0.0); float metallic = 0.0; float perceptual_roughness = 0.0; float occlusion = 0.0; vec3 N = vec3(0.0, 0.0, 0.0); vec3 V = vec3(0.0, 0.0, 0.0); vec3 light_accum = vec3(0.0, 0.0, 0.0); int i = 0; int i1 = 0; vec4 _expr25 = _group_2_binding_0.base_color; output_color = _expr25; uint _expr28 = _group_2_binding_0.flags; if (((_expr28 & 1u) != 0u)) { vec4 _expr32 = output_color; vec4 _expr34 = texture(_group_2_binding_1, vec2(in1.uv)); output_color = (_expr32 * _expr34); } uint _expr37 = _group_2_binding_0.flags; if (((_expr37 & 32u) == 0u)) { vec4 _expr42 = _group_2_binding_0.emissive; emissive = _expr42; uint _expr45 = _group_2_binding_0.flags; if (((_expr45 & 2u) != 0u)) { vec4 _expr49 = emissive; vec4 _expr52 = texture(_group_2_binding_3, vec2(in1.uv)); emissive = vec4((_expr49.xyz * _expr52.xyz), 1.0); } float _expr58 = _group_2_binding_0.metallic; metallic = _expr58; float _expr61 = _group_2_binding_0.perceptual_roughness; perceptual_roughness = _expr61; uint _expr64 = _group_2_binding_0.flags; if (((_expr64 & 4u) != 0u)) { vec4 metallic_roughness = texture(_group_2_binding_5, vec2(in1.uv)); float _expr70 = metallic; metallic = (_expr70 * metallic_roughness.z); float _expr73 = perceptual_roughness; perceptual_roughness = (_expr73 * metallic_roughness.y); } float _expr76 = perceptual_roughness; float _expr77 = perceptualRoughnessToRoughness(_expr76); occlusion = 1.0; uint _expr81 = _group_2_binding_0.flags; if (((_expr81 & 8u) != 0u)) { vec4 _expr86 = texture(_group_2_binding_7, vec2(in1.uv)); occlusion = _expr86.x; } N = normalize(in1.world_normal); uint _expr92 = _group_2_binding_0.flags; if (((_expr92 & 16u) != 0u)) { if ((! in1.is_front)) { vec3 _expr98 = N; N = (- _expr98); } } vec4 _expr103 = _group_0_binding_0.view_proj[3]; if ((_expr103.w != 1.0)) { vec3 _expr108 = _group_0_binding_0.world_position; V = normalize((_expr108.xyz - in1.world_position.xyz)); } else { vec4 _expr116 = _group_0_binding_0.view_proj[0]; vec4 _expr121 = _group_0_binding_0.view_proj[1]; vec4 _expr126 = _group_0_binding_0.view_proj[2]; V = normalize(vec3((- _expr116.z), (- _expr121.z), (- _expr126.z))); } vec3 _expr131 = N; vec3 _expr132 = V; float NdotV2 = max(dot(_expr131, _expr132), 0.0001); float reflectance = _group_2_binding_0.reflectance; float _expr142 = metallic; vec4 _expr145 = output_color; float _expr147 = metallic; vec3 F0_2 = (vec3((((0.16 * reflectance) * reflectance) * (1.0 - _expr142))) + (_expr145.xyz * _expr147)); vec4 _expr151 = output_color; float _expr154 = metallic; vec3 diffuse_color = (_expr151.xyz * (1.0 - _expr154)); vec3 _expr157 = V; vec3 _expr159 = N; vec3 R2 = reflect((- _expr157), _expr159); light_accum = vec3(0.0); uint _expr165 = _group_0_binding_1.n_point_lights; int n_point_lights = int(_expr165); uint _expr168 = _group_0_binding_1.n_directional_lights; int n_directional_lights = int(_expr168); i = 0; bool loop_init = true; while(true) { if (!loop_init) { int _expr174 = i; i = (_expr174 + 1); } loop_init = false; int _expr172 = i; if ((_expr172 < n_point_lights)) { } else { break; } int _expr178 = i; PointLight light4 = _group_0_binding_1.point_lights[_expr178]; int _expr181 = i; float _expr184 = fetch_point_shadow(_expr181, in1.world_position, in1.world_normal); vec3 _expr187 = N; vec3 _expr188 = V; vec3 _expr189 = point_light(in1.world_position.xyz, light4, _expr77, NdotV2, _expr187, _expr188, R2, F0_2, diffuse_color); vec3 _expr190 = light_accum; light_accum = (_expr190 + (_expr189 * _expr184)); } i1 = 0; bool loop_init1 = true; while(true) { if (!loop_init1) { int _expr197 = i1; i1 = (_expr197 + 1); } loop_init1 = false; int _expr195 = i1; if ((_expr195 < n_directional_lights)) { } else { break; } int _expr201 = i1; DirectionalLight light5 = _group_0_binding_1.directional_lights[_expr201]; int _expr204 = i1; float _expr207 = fetch_directional_shadow(_expr204, in1.world_position, in1.world_normal); vec3 _expr208 = N; vec3 _expr209 = V; vec3 _expr210 = directional_light(light5, _expr77, NdotV2, _expr208, _expr209, R2, F0_2, diffuse_color); vec3 _expr211 = light_accum; light_accum = (_expr211 + (_expr210 * _expr207)); } vec3 _expr215 = EnvBRDFApprox(diffuse_color, 1.0, NdotV2); float _expr216 = perceptual_roughness; vec3 _expr217 = EnvBRDFApprox(F0_2, _expr216, NdotV2); vec3 _expr218 = light_accum; vec4 _expr221 = _group_0_binding_1.ambient_color; float _expr224 = occlusion; vec4 _expr227 = emissive; vec4 _expr229 = output_color; vec4 _expr233 = output_color; output_color = vec4(((_expr218 + (((_expr215 + _expr217) * _expr221.xyz) * _expr224)) + (_expr227.xyz * _expr229.w)), _expr233.w); vec4 _expr236 = output_color; vec3 _expr238 = reinhard_luminance(_expr236.xyz); vec4 _expr239 = output_color; output_color = vec4(_expr238, _expr239.w); } vec4 _expr242 = output_color; _fs2p_location0 = _expr242; return; } ```
zicklag commented 3 years ago

There, found a minimal example:

WGSL:

[[group(0), binding(4)]]
var point_shadow_textures: texture_depth_cube_array;
[[group(0), binding(5)]]
var point_shadow_textures_sampler: sampler_comparison;

[[stage(fragment)]]
fn fragment() -> [[location(0)]] vec4<f32> {
    let frag_ls = vec4<f32>(1., 1., 2., 1.).xyz;
    let a = textureSampleCompareLevel(point_shadow_textures, point_shadow_textures_sampler, frag_ls, i32(1), 1.);

    return vec4<f32>(a, 1., 1., 1.);
}

GLSL:

#version 310 es
#extension GL_EXT_texture_cube_map_array : require

precision highp float;
precision highp int;

uniform highp samplerCubeArrayShadow _group_0_binding_4;

layout(location = 0) out vec4 _fs2p_location0;

void main() {
    vec3 frag_ls = vec4(1.0, 1.0, 2.0, 1.0).xyz;
    float a = textureGrad(_group_0_binding_4, vec5(frag_ls, int(1), 1.0), vec2(0,0), vec2(0,0));
    _fs2p_location0 = vec4(a, 1.0, 1.0, 1.0);
    return;
}
zicklag commented 3 years ago

Uh oh, looking at the GLES 3.1 reference card it doesn't look like there's a textureGrad function that works on cube array shadows. I have no idea what that means or how to work-around. :man_shrugging:

Edit: It looks like textureGrad on CubeArrayShadows does work with the extension: EXT_texture_cube_map_array.