Open Naxela opened 2 years ago
// uniform variables uniform sampler2D tex; // source texture uniform sampler2D depthTex; // depth texture uniform mat4 viewMatrix; // view matrix uniform mat4 projectionMatrix; // projection matrix uniform vec2 bufferSize; // size of the screen-space buffer uniform float maxRayLength; // maximum length of the reflection ray uniform int numSamples; // number of samples to take uniform float roughness; // roughness of the surface uniform float metalness; // metalness of the surface
// varyings varying vec2 vTexCoord; // texcoord of the fragment
// projection function vec3 getViewPosition(vec2 texCoord, float depth) { vec2 screenPos = texCoord bufferSize; vec4 clipPos = vec4((screenPos - 0.5 bufferSize) / (0.5 bufferSize), depth, 1.0); vec4 viewPos = projectionMatrix clipPos; return viewPos.xyz / viewPos.w; }
// main function void main() { // sample the source texture vec4 color = texture(tex, vTexCoord);
// sample the depth texture
float depth = texture(depthTex, vTexCoord).r;
// calculate the view position
vec3 viewPos = getViewPosition(vTexCoord, depth);
// calculate the reflection direction
vec3 viewNormal = normalize(cross(dFdx(viewPos), dFdy(viewPos)));
vec3 viewReflect = reflect(viewPos, viewNormal);
// generate a set of samples
vec3 reflectColor = vec3(0.0);
for (int i = 0; i < numSamples; i++) {
// generate a random sample position
float rnd = float(i + 1) / float(numSamples);
vec2 rndCoord = vTexCoord + rnd * roughness * texelFetchOffset(depthTex, ivec2(vTexCoord * bufferSize), ivec2(i)).rg;
// calculate the reflection direction for the sample
float rndDepth = texelFetch(depthTex, ivec2(rndCoord * bufferSize), 0).r;
vec3 rndViewPos = getViewPosition(rndCoord, rndDepth);
vec3 rndViewNormal = normalize(cross(dFdx(rndViewPos), dFdy(rndViewPos)));
vec3 rndViewReflect = reflect(rndViewPos, rndViewNormal);
// trace the reflection ray
vec3 reflectDir = normalize(rndViewReflect - viewPos);
float rayLength = maxRayLength;
for (float t = 0.01; t < rayLength; t += 0.01) {
vec3 rayPos = viewPos + t * reflectDir;
// sample the depth texture to see if we hit an object
float depth = texelFetch(depthTex, ivec2(rayPos.xy / rayPos.z * 0.5 + 0.5), 0).r;
if (depth < rayPos.z) {
// we hit an object, so calculate the surface properties
vec3 surfaceNormal = normalize(cross(dFdx(rayPos), dFdy(rayPos)));
vec3 surfaceReflect = reflect(-reflectDir, surfaceNormal);
float surfaceMetalness = 0.5 * dot(surfaceNormal, reflectDir) + 0.5;
surfaceMetalness = smoothstep(metalness - 0.1, metalness + 0.1, surfaceMetalness);
// add the reflection contribution to the color
reflectColor += surfaceMetalness * texture(tex, rayPos.xy / rayPos.z).rgb;
break;
}
}
}
reflectColor /= float(numSamples);
// output the final color
gl_FragColor = vec4(color.rgb + reflectColor, 1.0);
}
This implementation assumes that the following input is provided as a vertex attribute:
vTexCoord: the texture coordinate of the vertex And that the following uniforms are provided:
tex: a texture containing the source image depthTex: a texture containing the depth values of the scene viewMatrix: the view matrix of the camera projectionMatrix: the projection matrix of the camera bufferSize: the size of the screen-space buffer (a vec2) maxRayLength: the maximum length of the reflection ray (a positive float) numSamples: the number of samples to take (a positive integer) roughness: the roughness of the surface (a value between 0.0 and 1.0) metalness: the metalness of the surface (a value between 0.0 and 1.0) This implementation uses a stochastic approach, where multiple samples are taken around the pixel and averaged to reduce noise. It also uses a depth texture to determine if the reflection ray hits an object in the scene, and if it does, it calculates the surface properties (normal and metalness) and adds the reflection contribution to the final color.
A working implementation of Stochastic Screen-Space Reflections will release the bounty of $100 on Bountysource.
Armory3D already have a working and fast SSR solution that works well for a wide range of devices, although this this implementation is doesn't necessarily have to be as performant, rather focusing on high-quality visuals.
Suggested implementation (Optional):
Implementation material (Inspiration):
Bountysource Link: https://app.bountysource.com/issues/111898137-100-bounty-stochastic-screen-space-reflections