molstar
Version:
A comprehensive macromolecular library.
296 lines (249 loc) • 12.1 kB
JavaScript
"use strict";
/**
* Copyright (c) 2019-2024 mol* contributors, licensed under MIT, See LICENSE file for more info.
*
* @author Alexander Rose <alexander.rose@weirdbyte.de>
* @author Áron Samuel Kovács <aron.kovacs@mail.muni.cz>
* @author Ludovic Autin <ludovic.autin@gmail.com>
* @author Gianluca Tomasello <giagitom@gmail.com>
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.ssao_frag = void 0;
exports.ssao_frag = `
precision highp float;
precision highp int;
precision highp sampler2D;
uniform sampler2D tDepth;
uniform sampler2D tDepthHalf;
uniform sampler2D tDepthQuarter;
uniform sampler2D tDepthTransparent;
uniform sampler2D tDepthHalfTransparent;
uniform sampler2D tDepthQuarterTransparent;
uniform int uTransparencyFlag;
uniform vec2 uTexSize;
uniform vec4 uBounds;
uniform vec3 uSamples[dNSamples];
uniform mat4 uProjection;
uniform mat4 uInvProjection;
uniform float uLevelRadius[dLevels];
uniform float uLevelBias[dLevels];
uniform float uNearThreshold;
uniform float uFarThreshold;
uniform float uRadius;
uniform float uBias;
float smootherstep(float edge0, float edge1, float x) {
x = clamp((x - edge0) / (edge1 - edge0), 0.0, 1.0);
return x * x * x * (x * (x * 6.0 - 15.0) + 10.0);
}
float noise(const in vec2 coords) {
float a = 12.9898;
float b = 78.233;
float c = 43758.5453;
float dt = dot(coords, vec2(a,b));
float sn = mod(dt, PI);
return abs(fract(sin(sn) * c)); // is abs necessary?
}
vec2 getNoiseVec2(const in vec2 coords) {
return vec2(noise(coords), noise(coords + vec2(PI, 2.71828)));
}
bool isBackground(const in float depth) {
return depth > 0.999; // handle precision issues with packed depth
}
float getDepth(const in vec2 coords, const in int transparentFlag) {
vec2 c = vec2(clamp(coords.x, uBounds.x, uBounds.z), clamp(coords.y, uBounds.y, uBounds.w));
if (transparentFlag == 1){
return unpackRGBAToDepthWithAlpha(texture2D(tDepthTransparent, c)).x;
return 1.0;
} else {
return texture2D(tDepth, c).r;
return unpackRGBAToDepth(texture2D(tDepth, c));
}
}
vec2 getDepthTransparentWithAlpha(const in vec2 coords){
vec2 c = vec2(clamp(coords.x, uBounds.x, uBounds.z), clamp(coords.y, uBounds.y, uBounds.w));
return unpackRGBAToDepthWithAlpha(texture2D(tDepthTransparent, c));
}
float getMappedDepth(const in vec2 coords, const in vec2 selfCoords) {
vec2 c = vec2(clamp(coords.x, uBounds.x, uBounds.z), clamp(coords.y, uBounds.y, uBounds.w));
float d = distance(coords, selfCoords);
if (d > dQuarterThreshold) {
return texture2D(tDepthQuarter, c).r;
} else if (d > dHalfThreshold) {
return texture2D(tDepthHalf, c).r;
} else {
return texture2D(tDepth, c).r;
}
if (d > dQuarterThreshold) {
return unpackRGBAToDepth(texture2D(tDepthQuarter, c));
} else if (d > dHalfThreshold) {
return unpackRGBAToDepth(texture2D(tDepthHalf, c));
} else {
return unpackRGBAToDepth(texture2D(tDepth, c));
}
}
vec2 getMappedDepthTransparentWithAlpha(const in vec2 coords, const in vec2 selfCoords) {
vec2 c = vec2(clamp(coords.x, uBounds.x, uBounds.z), clamp(coords.y, uBounds.y, uBounds.w));
float d = distance(coords, selfCoords);
if (d > dQuarterThreshold) {
return unpackRGBAToDepthWithAlpha(texture2D(tDepthQuarterTransparent, c));
} else if (d > dHalfThreshold) {
return unpackRGBAToDepthWithAlpha(texture2D(tDepthHalfTransparent, c));
} else {
return unpackRGBAToDepthWithAlpha(texture2D(tDepthTransparent, c));
}
}
// adapted from https://gist.github.com/bgolus/a07ed65602c009d5e2f753826e8078a0
vec3 viewNormalAtPixelPositionAccurate(const in vec2 vpos, const in int transparentFlag) {
// current pixel's depth
float c = getDepth(vpos, transparentFlag);
// get current pixel's view space position
vec3 viewSpacePos_c = screenSpaceToViewSpace(vec3(vpos, c), uInvProjection);
// get view space position at 1 pixel offsets in each major direction
vec3 viewSpacePos_l = screenSpaceToViewSpace(vec3(vpos + vec2(-1.0, 0.0) / uTexSize, getDepth(vpos + vec2(-1.0, 0.0) / uTexSize, transparentFlag)), uInvProjection);
vec3 viewSpacePos_r = screenSpaceToViewSpace(vec3(vpos + vec2( 1.0, 0.0) / uTexSize, getDepth(vpos + vec2( 1.0, 0.0) / uTexSize, transparentFlag)), uInvProjection);
vec3 viewSpacePos_d = screenSpaceToViewSpace(vec3(vpos + vec2( 0.0,-1.0) / uTexSize, getDepth(vpos + vec2( 0.0,-1.0) / uTexSize, transparentFlag)), uInvProjection);
vec3 viewSpacePos_u = screenSpaceToViewSpace(vec3(vpos + vec2( 0.0, 1.0) / uTexSize, getDepth(vpos + vec2( 0.0, 1.0) / uTexSize, transparentFlag)), uInvProjection);
// get the difference between the current and each offset position
vec3 l = viewSpacePos_c - viewSpacePos_l;
vec3 r = viewSpacePos_r - viewSpacePos_c;
vec3 d = viewSpacePos_c - viewSpacePos_d;
vec3 u = viewSpacePos_u - viewSpacePos_c;
// get depth values at 1 & 2 pixels offsets from current along the horizontal axis
vec4 H = vec4(
getDepth(vpos + vec2(-1.0, 0.0) / uTexSize, transparentFlag),
getDepth(vpos + vec2( 1.0, 0.0) / uTexSize, transparentFlag),
getDepth(vpos + vec2(-2.0, 0.0) / uTexSize, transparentFlag),
getDepth(vpos + vec2( 2.0, 0.0) / uTexSize, transparentFlag)
);
// get depth values at 1 & 2 pixels offsets from current along the vertical axis
vec4 V = vec4(
getDepth(vpos + vec2(0.0,-1.0) / uTexSize, transparentFlag),
getDepth(vpos + vec2(0.0, 1.0) / uTexSize, transparentFlag),
getDepth(vpos + vec2(0.0,-2.0) / uTexSize, transparentFlag),
getDepth(vpos + vec2(0.0, 2.0) / uTexSize, transparentFlag)
);
// current pixel's depth difference from slope of offset depth samples
// differs from original article because we're using non-linear depth values
// see article's comments
vec2 he = abs((2.0 * H.xy - H.zw) - c);
vec2 ve = abs((2.0 * V.xy - V.zw) - c);
// pick horizontal and vertical diff with the smallest depth difference from slopes
vec3 hDeriv = he.x < he.y ? l : r;
vec3 vDeriv = ve.x < ve.y ? d : u;
// get view space normal from the cross product of the best derivatives
vec3 viewNormal = normalize(cross(hDeriv, vDeriv));
return viewNormal;
}
float getPixelSize(const in vec2 coords, const in float depth) {
vec3 viewPos0 = screenSpaceToViewSpace(vec3(coords, depth), uInvProjection);
vec3 viewPos1 = screenSpaceToViewSpace(vec3(coords + vec2(1.0, 0.0) / uTexSize, depth), uInvProjection);
return distance(viewPos0, viewPos1);
}
// StarCraft II Ambient Occlusion by [Filion and McNaughton 2008]
void main(void) {
vec2 invTexSize = 1.0 / uTexSize;
vec2 selfCoords = gl_FragCoord.xy * invTexSize;
float selfDepth = getDepth(selfCoords, uTransparencyFlag);
vec2 selfPackedDepth = packUnitIntervalToRG(selfDepth);
if (isBackground(selfDepth)) {
gl_FragColor = vec4(packUnitIntervalToRG(1.0), selfPackedDepth);
return;
}
vec3 selfViewNormal = viewNormalAtPixelPositionAccurate(selfCoords, uTransparencyFlag);
vec3 selfViewPos = screenSpaceToViewSpace(vec3(selfCoords, selfDepth), uInvProjection);
vec3 randomVec = normalize(vec3(getNoiseVec2(selfCoords) * 2.0 - 1.0, 0.0));
vec3 tangent = normalize(randomVec - selfViewNormal * dot(randomVec, selfViewNormal));
vec3 bitangent = cross(selfViewNormal, tangent);
mat3 TBN = mat3(tangent, bitangent, selfViewNormal);
float occlusion = 0.0;
float pixelSize = getPixelSize(selfCoords, selfDepth);
for(int l = 0; l < dLevels; l++) {
// TODO: smooth transition
if (pixelSize * uNearThreshold > uLevelRadius[l]) continue;
if (pixelSize * uFarThreshold < uLevelRadius[l]) continue;
float levelOcclusion = 0.0;
for(int i = 0; i < dNSamples; i++) {
// get sample position:
vec3 sampleViewPos = TBN * uSamples[i];
sampleViewPos = selfViewPos + sampleViewPos * uLevelRadius[l];
// project sample position:
vec4 offset = vec4(sampleViewPos, 1.0);
offset = uProjection * offset;
offset.xyz = (offset.xyz / offset.w) * 0.5 + 0.5;
// get sample depth:
float sampleOcc = 0.0;
if (uTransparencyFlag == 1) {
float sampleDepth = getMappedDepth(offset.xy, selfCoords);
float sampleViewZ = screenSpaceToViewSpace(vec3(offset.xy, sampleDepth), uInvProjection).z;
sampleOcc = step(sampleViewPos.z + 0.025, sampleViewZ) * smootherstep(0.0, 1.0, uLevelRadius[l] / abs(selfViewPos.z - sampleViewZ)) * uLevelBias[l];
}
vec2 sampleDepthWithAlpha = getMappedDepthTransparentWithAlpha(offset.xy, selfCoords);
if (!isBackground(sampleDepthWithAlpha.x)) {
float sampleViewZ = screenSpaceToViewSpace(vec3(offset.xy, sampleDepthWithAlpha.x), uInvProjection).z;
sampleOcc = max(sampleOcc, step(sampleViewPos.z + 0.025, sampleViewZ) * smootherstep(0.0, 1.0, uLevelRadius[l] / abs(selfViewPos.z - sampleViewZ)) * uLevelBias[l] * sampleDepthWithAlpha.y);
}
levelOcclusion += sampleOcc;
}
occlusion = max(occlusion, levelOcclusion);
}
for(int i = 0; i < dNSamples; i++) {
vec3 sampleViewPos = TBN * uSamples[i];
sampleViewPos = selfViewPos + sampleViewPos * uRadius;
vec4 offset = vec4(sampleViewPos, 1.0);
offset = uProjection * offset;
offset.xyz = (offset.xyz / offset.w) * 0.5 + 0.5;
float sampleOcc = 0.0;
if (uTransparencyFlag == 1) {
// NOTE: using getMappedDepth here causes issues on some mobile devices
float sampleDepth = getDepth(offset.xy, 0);
float sampleViewZ = screenSpaceToViewSpace(vec3(offset.xy, sampleDepth), uInvProjection).z;
sampleOcc = step(sampleViewPos.z + 0.025, sampleViewZ) * smootherstep(0.0, 1.0, uRadius / abs(selfViewPos.z - sampleViewZ));
}
vec2 sampleDepthWithAlpha = getDepthTransparentWithAlpha(offset.xy);
if (!isBackground(sampleDepthWithAlpha.x)) {
float sampleViewZ = screenSpaceToViewSpace(vec3(offset.xy, sampleDepthWithAlpha.x), uInvProjection).z;
sampleOcc = max(sampleOcc, step(sampleViewPos.z + 0.025, sampleViewZ) * smootherstep(0.0, 1.0, uRadius / abs(selfViewPos.z - sampleViewZ)) * sampleDepthWithAlpha.y);
}
occlusion += sampleOcc;
}
occlusion = 1.0 - (uBias * occlusion / float(dNSamples));
vec2 packedOcclusion = packUnitIntervalToRG(clamp(occlusion, 0.01, 1.0));
gl_FragColor = vec4(packedOcclusion, selfPackedDepth);
}
`;