UNPKG

@woosh/meep-engine

Version:

Pure JavaScript game engine. Fully featured and production ready.

187 lines (136 loc) 5.38 kB
import { Matrix4, Vector2 } from 'three'; /** * TODO */ const SAOShader = { defines: { 'NUM_SAMPLES': 7, 'NUM_RINGS': 4, 'NORMAL_TEXTURE': 0, 'DIFFUSE_TEXTURE': 0, 'DEPTH_PACKING': 1, 'PERSPECTIVE_CAMERA': 1 }, uniforms: { 'tDepth': { value: null }, 'tNormal': { value: null }, 'size': { value: new Vector2(512, 512) }, 'cameraNear': { value: 1 }, 'cameraFar': { value: 100 }, 'cameraProjectionMatrix': { value: new Matrix4() }, 'cameraInverseProjectionMatrix': { value: new Matrix4() }, 'intensity': { value: 0.1 }, 'bias': { value: 0.5 }, 'minResolution': { value: 0.0 }, 'kernelRadius': { value: 100.0 }, 'randomSeed': { value: 0.0 } }, vertexShader: /* glsl */` varying vec2 vUv; void main() { vUv = uv; gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 ); }`, fragmentShader: /* glsl */` #include <common> varying vec2 vUv; uniform sampler2D tDepth; #if NORMAL_TEXTURE == 1 uniform sampler2D tNormal; #endif uniform float cameraNear; uniform float cameraFar; uniform mat4 cameraProjectionMatrix; uniform mat4 cameraInverseProjectionMatrix; uniform float intensity; uniform float bias; uniform float kernelRadius; uniform float minResolution; uniform vec2 size; uniform float randomSeed; // RGBA depth #include <packing> out float out_occlusion; float getDepth( const in vec2 screenPosition ) { #if DEPTH_PACKING == 1 return unpackRGBAToDepth( texture2D( tDepth, screenPosition ) ); #else return texture2D( tDepth, screenPosition ).x; #endif } float getViewZ( const in float depth ) { #if PERSPECTIVE_CAMERA == 1 return perspectiveDepthToViewZ( depth, cameraNear, cameraFar ); #else return orthographicDepthToViewZ( depth, cameraNear, cameraFar ); #endif } vec3 getViewPosition( const in vec2 screenPosition, const in float depth, const in float viewZ ) { float clipW = cameraProjectionMatrix[2][3] * viewZ + cameraProjectionMatrix[3][3]; vec4 clipPosition = vec4( ( vec3( screenPosition, depth ) - 0.5 ) * 2.0, 1.0 ); clipPosition *= clipW; // unprojection. return ( cameraInverseProjectionMatrix * clipPosition ).xyz; } vec3 getViewNormal( const in vec3 viewPosition, const in vec2 screenPosition ) { #if NORMAL_TEXTURE == 1 return unpackRGBToNormal( texture2D( tNormal, screenPosition ).xyz ); #else return normalize( cross( dFdx( viewPosition ), dFdy( viewPosition ) ) ); #endif } // Non-sin based hash function, fast and has good randomness float hash12(vec2 p){ vec3 p3 = fract(vec3(p.xyx) * .1031); p3 += dot(p3, p3.yzx + 33.33); return fract((p3.x + p3.y) * p3.z); } float minResolutionMultipliedByCameraFar; float getOcclusion( const in vec3 centerViewPosition, const in vec3 centerViewNormal, const in vec3 sampleViewPosition ) { vec3 viewDelta = sampleViewPosition - centerViewPosition; float viewDistance = length( viewDelta ); float vn = dot( centerViewNormal, viewDelta ); float a2 = (vn - minResolutionMultipliedByCameraFar) / viewDistance - bias; float a1 = (1.0 + pow2( viewDistance ) ); return max(0.0, a2) / a1; } // moving costly divides into consts const float ANGLE_STEP = PI2 * float( NUM_RINGS ) / float( NUM_SAMPLES ); const float INV_NUM_SAMPLES = 1.0 / float( NUM_SAMPLES ); float getAmbientOcclusion( const in vec3 centerViewPosition ) { // precompute some variables require in getOcclusion. minResolutionMultipliedByCameraFar = minResolution * cameraFar; vec3 centerViewNormal = getViewNormal( centerViewPosition, vUv ); // jsfiddle that shows sample pattern: https://jsfiddle.net/a16ff1p7/ float angle = hash12( vUv + randomSeed ) * PI2; vec2 radius = vec2( kernelRadius * INV_NUM_SAMPLES ) / size; float occlusionSum = 0.0; float weightSum = 0.0; for( int i = 0; i < NUM_SAMPLES; i ++ ) { vec2 sampleUv = vUv + vec2( cos( angle ), sin( angle ) ) * radius * float(i+1); angle += ANGLE_STEP; float sampleDepth = getDepth( sampleUv ); if( sampleDepth >= ( 1.0 - EPSILON ) ) { // skip, too far continue; } float sampleViewZ = getViewZ( sampleDepth ); vec3 sampleViewPosition = getViewPosition( sampleUv, sampleDepth, sampleViewZ ); occlusionSum += getOcclusion( centerViewPosition, centerViewNormal, sampleViewPosition ); weightSum += 1.0; } if( weightSum == 0.0 ) discard; return occlusionSum * ( intensity / weightSum ); } void main() { float centerDepth = getDepth( vUv ); if( centerDepth >= ( 1.0 - EPSILON ) ) { discard; } float centerViewZ = getViewZ( centerDepth ); vec3 viewPosition = getViewPosition( vUv, centerDepth, centerViewZ ); float ambientOcclusion = getAmbientOcclusion( viewPosition ); float invOcclusion = 1.0 - ambientOcclusion; out_occlusion = invOcclusion; }` }; export { SAOShader };