UNPKG

@woosh/meep-engine

Version:

Pure JavaScript game engine. Fully featured and production ready.

3 lines • 19 kB
declare const _default: "\n#ifndef LPV_SHADER_CHUNK_COMMON\n#define LPV_SHADER_CHUNK_COMMON\n\n\nuniform usampler2D lpv_t_mesh_vertices;\nuniform usampler2D lpv_t_mesh_neighbours;\nuniform usampler3D lpv_t_mesh_lookup;\n\nuniform sampler2D lpv_t_probe_positions;\nuniform sampler2D lpv_t_probe_data;\n\nuniform sampler2D lpv_t_probe_depth;\nuniform uint lpv_u_probe_depth_resolution;\n\nuniform uint lpv_u_mesh_tet_count;\nuniform vec3 lpv_v3_bounds_min;\nuniform vec3 lpv_v3_bounds_max;\n\n\nconst float lpv_min_thickness = 0.03; // in Meters\nconst float lpv_max_thickness = 0.50; // in Meters\n\n\n#define SEARCH_STEP_LIMIT 32u\n#define INVALID_TET 1073741823u\n\n\n/** Slightly bump the location of the shadow test point away from the shadow casting surface.\n The shadow casting surface is the boundary for shadow, so the nearer an imprecise value is\n to it the more the light leaks.\n*/\n#define LPV_NORMAL_BIAS 0.0001f\n\nivec2 lpv_index_to_256_coordinate(uint index) {\n\n uint pixel_x = index % 256u;\n uint pixel_y = index / 256u;\n\n return ivec2(int(pixel_x), int(pixel_y));\n}\n\nuvec4 lpv_mesh_getVertices(uint tet_index) {\n ivec2 p = lpv_index_to_256_coordinate(tet_index);\n\n return texelFetch(lpv_t_mesh_vertices, p, 0);\n\n}\n\nuvec4 lpv_mesh_getNeighbours(uint tet_index) {\n ivec2 p = lpv_index_to_256_coordinate(tet_index);\n\n return texelFetch(lpv_t_mesh_neighbours, p, 0);\n}\n\nvec3[9] lpv_probe_getData(uint probe_index) {\n\n\n int slot = int(probe_index % 256u);\n int column = int(probe_index / 256u);\n\n int offset_x = int(slot * 9);\n\n vec3[9] result;\n\n for (int i = 0; i < 9; i++) {\n result[i] = texelFetch(lpv_t_probe_data, ivec2(offset_x + i, column), 0).rgb;\n }\n\n return result;\n}\n\nfloat sign_not_zero(float x) {\n return x >= 0.0 ? 1.0 : -1.0;\n}\n\nvec2 sign_not_zero(vec2 x) {\n return vec2(\n sign_not_zero(x.x),\n sign_not_zero(x.y)\n );\n}\n\nvec3 sign_not_zero(vec3 x) {\n return vec3(\n sign_not_zero(x.x),\n sign_not_zero(x.y),\n sign_not_zero(x.z)\n );\n}\n\nvec4 quadBlendWieghts(vec2 coords)\n{\n vec4 res;\n/* 0 0 0\n 0 0 0\n 1 0 0 */\n res.x = min(1.0f - coords.x, 1.0f - coords.y);\n/* 1 0 0\n 0 0 0\n 0 0 1 */\n res.y = abs(coords.x - coords.y);\n/* 0 0 1\n 0 0 0\n 0 0 0 */\n res.z = min(coords.x, coords.y);\n/* 0 0 0\n 0 0 1\n 0 1 1 */\n res.w = ceil(coords.x - coords.y);\n //res.xyz /= (res.x + res.y + res.z);\n return res;\n}\n\nvec2 VecToSphereOct(vec3 v)\n{\n float l1norm = abs(v.x) + abs(v.y) + abs(v.z);\n\n vec2 result = v.xz / l1norm;\n\n if (v.y < 0.0) {\n result = (1.0 - abs(result.yx)) * sign_not_zero(result.xy);\n }\n\n return result;\n}\n\nfloat SampleBlended(sampler2D tex, vec2 uv0, vec2 uv1, vec2 uv2, vec4 weights) {\n\n float samp0 = textureLod(tex, uv0, 0.0).r;\n float samp1 = textureLod(tex, uv1, 0.0).r;\n float samp2 = textureLod(tex, uv2, 0.0).r;\n\n return samp0 * weights.x + samp1 * weights.y + samp2 * weights.z;\n}\n\nvec2 lpv_probe_getDepthTriangular(uint probe_index, vec3 direction) {\n // get offset\n uint depth_tile_resolution = lpv_u_probe_depth_resolution;\n uvec2 atlas_size = uvec2(4096u);\n\n uint tiles_per_row = atlas_size.x / depth_tile_resolution;\n\n uint tile_x = probe_index % tiles_per_row;\n uint tile_y = probe_index / tiles_per_row;\n\n vec2 tile_offset = vec2(\n tile_x * depth_tile_resolution,\n tile_y * depth_tile_resolution\n );\n\n // convert direction to UV\n vec2 octahedral_uv = clamp(VecToSphereOct(direction) * 0.5 + 0.5, 0.0, 1.0);\n vec2 grid = octahedral_uv * vec2(depth_tile_resolution - 1u);\n\n vec2 gridFrac = fract(grid);\n vec2 gridFloor = floor(grid);\n\n vec4 weights = quadBlendWieghts(gridFrac);\n\n //3 nearest frames\n vec2 frame0 = gridFloor;\n vec2 frame1 = gridFloor + mix(vec2(0, 1), vec2(1, 0), weights.w);\n vec2 frame2 = gridFloor + vec2(1.0, 1.0);\n\n // move frames to atlas space\n frame0 += tile_offset;\n frame1 += tile_offset;\n frame2 += tile_offset;\n\n vec2 samp0 = texelFetch(lpv_t_probe_depth, ivec2(frame0), 0).rg;\n vec2 samp1 = texelFetch(lpv_t_probe_depth, ivec2(frame1), 0).rg;\n vec2 samp2 = texelFetch(lpv_t_probe_depth, ivec2(frame2), 0).rg;\n\n vec2 d0 = samp0 * weights.x;\n vec2 d1 = samp1 * weights.y;\n vec2 d2 = samp2 * weights.z;\n\n return (d0 + d1 + d2);\n}\n\nfloat lpv_bilinear_lerp(float v00, float v01, float v10, float v11, vec2 fraction) {\n\n float x0 = mix(v00, v01, fraction.x);\n float x1 = mix(v10, v11, fraction.x);\n\n return mix(x0, x1, fraction.y);\n}\n\nvec2 lpv_bilinear_lerp(vec2 v00, vec2 v01, vec2 v10, vec2 v11, vec2 fraction) {\n\n vec2 x0 = mix(v00, v01, fraction.x);\n vec2 x1 = mix(v10, v11, fraction.x);\n\n return mix(x0, x1, fraction.y);\n}\n\nvec2 lpv_sample_bilinear(sampler2D tex, ivec2 texel_position, vec2 fraction) {\n\n float texel_00 = texelFetch(tex, texel_position, 0).r;\n float texel_01 = texelFetch(tex, texel_position + ivec2(1, 0), 0).r;\n float texel_10 = texelFetch(tex, texel_position + ivec2(0, 1), 0).r;\n float texel_11 = texelFetch(tex, texel_position + ivec2(1, 1), 0).r;\n\n return vec2(\n lpv_bilinear_lerp(\n texel_00, texel_01,\n texel_10, texel_11,\n fraction\n ),\n lpv_bilinear_lerp(\n texel_00 * texel_00, texel_01 * texel_01,\n texel_10 * texel_10, texel_11 * texel_11,\n fraction\n )\n );\n}\n\nivec2 wrapOctahedralTexelCoordinates(const in ivec2 texel, const in int texture_size) {\n ivec2 wrapped = ((texel % texture_size) + texture_size) % texture_size;\n\n int fx = (abs(texel.x / texture_size) + int(texel.x < 0));\n int fy = (abs(texel.y / texture_size) + int(texel.y < 0));\n\n if (((fx ^ fy) & 1) != 0) {\n return (texture_size - (wrapped + ivec2(1)));\n return wrapped;\n } else {\n return wrapped;\n }\n}\n\nvec2 lpv_probe_getDepthBilinear(uint probe_index, vec3 direction) {\n // get offset\n int depth_tile_resolution = int(lpv_u_probe_depth_resolution);\n ivec2 tile_resolution = ivec2(depth_tile_resolution);\n const ivec2 atlas_size = ivec2(4096);\n\n int tiles_per_row = atlas_size.x / depth_tile_resolution;\n\n int tile_x = int(probe_index) % tiles_per_row;\n int tile_y = int(probe_index) / tiles_per_row;\n\n ivec2 tile_offset = ivec2(\n tile_x * depth_tile_resolution,\n tile_y * depth_tile_resolution\n );\n\n // convert direction to UV\n vec2 octahedral_uv = clamp(VecToSphereOct(direction) * 0.5 + 0.5, 0.0, 1.0);\n vec2 grid = octahedral_uv * vec2(depth_tile_resolution) - 0.5;\n\n vec2 gridFrac = fract(grid);\n\n ivec2 texel_position = ivec2(floor(grid));\n\n ivec2 tile_p_00;\n ivec2 tile_p_01;\n ivec2 tile_p_10;\n ivec2 tile_p_11;\n\n tile_p_00 = wrapOctahedralTexelCoordinates(texel_position, depth_tile_resolution);\n tile_p_01 = wrapOctahedralTexelCoordinates(texel_position + ivec2(1, 0), depth_tile_resolution);\n tile_p_10 = wrapOctahedralTexelCoordinates(texel_position + ivec2(0, 1), depth_tile_resolution);\n tile_p_11 = wrapOctahedralTexelCoordinates(texel_position + ivec2(1, 1), depth_tile_resolution);\n\n vec2 texel_00 = texelFetch(lpv_t_probe_depth, tile_offset + tile_p_00, 0).rg;\n vec2 texel_01 = texelFetch(lpv_t_probe_depth, tile_offset + tile_p_01, 0).rg;\n vec2 texel_10 = texelFetch(lpv_t_probe_depth, tile_offset + tile_p_10, 0).rg;\n vec2 texel_11 = texelFetch(lpv_t_probe_depth, tile_offset + tile_p_11, 0).rg;\n\n return lpv_bilinear_lerp(\n texel_00, texel_01,\n texel_10, texel_11,\n gridFrac\n );\n}\n\nvec3 lpv_probe_getPosition(uint probe_index) {\n return texelFetch(lpv_t_probe_positions, lpv_index_to_256_coordinate(probe_index), 0).rgb;\n}\n\nmat3 lpv_mesh_makeMatrix(vec3 p0, vec3 p1, vec3 p2, vec3 p3) {\n\n return inverse(\n mat3(\n p0 - p3,\n p1 - p3,\n p2 - p3\n )\n );\n\n}\n\nvec4 lpv_mesh_getBarycentricCoordinates(uint tet_index, vec3 position) {\n uvec4 vertices = lpv_mesh_getVertices(tet_index);\n\n vec3 p0 = lpv_probe_getPosition(vertices[0]);\n vec3 p1 = lpv_probe_getPosition(vertices[1]);\n vec3 p2 = lpv_probe_getPosition(vertices[2]);\n vec3 p3 = lpv_probe_getPosition(vertices[3]);\n\n mat3 matrix = lpv_mesh_makeMatrix(p0, p1, p2, p3);\n\n vec3 mult = matrix * (position - p3);\n\n return vec4(mult, 1.0 - mult.x - mult.y - mult.z);\n}\n\nvoid lpv_walk_to_tetrahedron(\nin vec3 position,\nin uint tet_guess,\nout uint tet_index,\nout vec4 weights\n) {\n tet_index = tet_guess;\n\n for (uint i = 0u; i < SEARCH_STEP_LIMIT; i++) {\n\n weights = lpv_mesh_getBarycentricCoordinates(tet_index, position);\n\n // Check if we're in the current \"best guess\" tetrahedron\n if (weights.x >= 0.0 && weights.y >= 0.0 && weights.z >= 0.0 && weights.w >= 0.0) {\n // success\n return;\n }\n\n uvec4 neighbors = lpv_mesh_getNeighbours(tet_index);\n\n uint next_tet;\n\n // Otherwise find the smallest barycentric coord and move in that direction\n if (weights.x < weights.y && weights.x < weights.z && weights.x < weights.w) {\n next_tet = neighbors[0];\n } else if (weights.y < weights.z && weights.y < weights.w) {\n next_tet = neighbors[1];\n } else if (weights.z < weights.w) {\n next_tet = neighbors[2];\n } else {\n next_tet = neighbors[3];\n }\n\n tet_index = next_tet;\n }\n\n tet_index = INVALID_TET;\n}\n\nfloat lpv_probe_getVisibilityMask(vec3 position, uint probe_index) {\n\n vec3 probe_position = lpv_probe_getPosition(probe_index);\n\n vec3 local_probe_offset = position - probe_position;\n\n float distToProbe = length(local_probe_offset);\n\n vec3 direction = local_probe_offset / distToProbe;\n\n vec2 temp = lpv_probe_getDepthBilinear(probe_index, direction);\n\n float mean = temp.x;\n float mean2 = temp.y; // mean of squared distances\n\n float variance = abs(mean * mean - mean2);\n\n // http://www.punkuser.net/vsm/vsm_paper.pdf; equation 5\n // Need the max in the denominator because biasing can cause a negative displacement\n float distance_delta = max(distToProbe - mean, 0.0);\n\n float chebyshevWeight = variance / (variance + distance_delta * distance_delta);\n\n // Increase contrast in the weight\n chebyshevWeight = max(chebyshevWeight * chebyshevWeight * chebyshevWeight, 0.0);\n\n return (distToProbe <= mean) ? 1.0 : chebyshevWeight;\n}\n\nvec4 lvp_mask_weights_by_visibility_by_depth(vec3 position, uint tet_index, vec4 weights) {\n\n uvec4 vertices = lpv_mesh_getVertices(tet_index);\n\n vec4 visibility = vec4(\n lpv_probe_getVisibilityMask(position, vertices[0]),\n lpv_probe_getVisibilityMask(position, vertices[1]),\n lpv_probe_getVisibilityMask(position, vertices[2]),\n lpv_probe_getVisibilityMask(position, vertices[3])\n );\n\n return visibility * weights;\n}\n\n\nvec4 lvp_mask_weights_by_visibility_by_normal(vec3 position, vec3 normal, uint tet_index, vec4 weights) {\n\n uvec4 vertices = lpv_mesh_getVertices(tet_index);\n\n vec4 visibility;\n\n for (int i = 0; i < 4; i++) {\n vec3 probe_position = lpv_probe_getPosition(vertices[i]);\n\n vec3 direction_to_probe = position - probe_position;\n\n visibility[i] = step(dot(direction_to_probe, normal), 0.0);\n }\n\n return visibility * weights;\n}\n\n\nvec4 lvp_mask_weights_by_visibility(in vec3 position, in vec3 normal, in vec3 view_direction, in uint tet_index, in vec4 barycentric) {\n\n uvec4 vertices = lpv_mesh_getVertices(tet_index);\n\n vec4 visibility;\n\n // Bias the position at which visibility is computed; this\n // avoids performing a shadow test *at* a surface, which is a\n // dangerous location because that is exactly the line between\n // shadowed and unshadowed. If the normal bias is too small,\n // there will be light and dark leaks. If it is too large,\n // then samples can pass through thin occluders to the other\n // side (this can only happen if there are MULTIPLE occluders\n // near each other, a wall surface won't pass through itself.)\n vec3 lookup_position = position + (normal - view_direction * 3.0) * LPV_NORMAL_BIAS;\n\n float weight_sum = 0.0;\n\n for (uint i = 0u; i < 4u; i++) {\n\n float weight = 1.0;\n\n uint probe_index = vertices[i];\n\n vec3 probe_position = lpv_probe_getPosition(probe_index);\n\n\n // Smooth backface test\n {\n\n // Computed without the biasing applied to the \"dir\" variable.\n // This test can cause reflection-map looking errors in the image\n // (stuff looks shiny) if the transition is poor.\n vec3 direction_to_probe = normalize(probe_position - position);\n\n // The naive soft backface weight would ignore a probe when\n // it is behind the surface. That's good for walls. But for small details inside of a\n // room, the normals on the details might rule out all of the probes that have mutual\n // visibility to the point. So, we instead use a \"wrap shading\" test below inspired by\n // NPR work.\n// float backface_term = max(0.0001, dot(direction_to_probe, normal));\n\n // The small offset at the end reduces the \"going to zero\" impact\n // where this is really close to exactly opposite\n float backface_term = max(0.0001, (dot(direction_to_probe, normal) + 1.0) * 0.5);\n weight *= backface_term * backface_term + 0.05;\n// weight *= backface_term;\n\n }\n\n // Moment visibility test (depth)\n {\n\n// weight *= lpv_probe_getVisibilityMask(lookup_position, probe_index);\n\n }\n\n\n // A tiny bit of light is really visible due to log perception, so\n // crush tiny weights but keep the curve continuous. This must be done\n // before the trilinear weights, because those should be preserved.\n const float crushThreshold = 0.2;\n\n if (weight < crushThreshold) {\n weight *= weight * weight * (1.0 / (crushThreshold * crushThreshold));\n }\n\n // Avoid zero weight\n weight = max(0.000001, weight);\n\n weight *= barycentric[i];\n\n weight_sum += weight;\n\n visibility[i] = weight;\n }\n\n // normalize\n visibility /= weight_sum;\n\n return visibility;\n}\n\nvec3[9] lpv_interpolate_probes(vec4 weights, uint tet_index) {\n\n uvec4 vertices = lpv_mesh_getVertices(tet_index);\n\n vec3[9] probe0 = lpv_probe_getData(vertices[0]);\n vec3[9] probe1 = lpv_probe_getData(vertices[1]);\n vec3[9] probe2 = lpv_probe_getData(vertices[2]);\n vec3[9] probe3 = lpv_probe_getData(vertices[3]);\n\n vec3[9] result;\n\n for (int i = 0; i < 9; i++) {\n\n result[i] = probe0[i] * weights[0]\n + probe1[i] * weights[1]\n + probe2[i] * weights[2]\n + probe3[i] * weights[3];\n\n }\n\n return result;\n}\n\nuint lpv_guess_initial_tet(vec3 position) {\n\n vec3 lpv_mesh_bounds_min = lpv_v3_bounds_min;\n vec3 lpv_mesh_bounds_max = lpv_v3_bounds_max;\n\n vec3 lookup_coordinates = (position - lpv_mesh_bounds_min) / (lpv_mesh_bounds_max - lpv_mesh_bounds_min);\n\n return textureLod(lpv_t_mesh_lookup, lookup_coordinates, 0.0).r;\n\n}\n\n\nvec3 lpv_get_irradiance_at(in vec3 normal, in vec3 shCoefficients[9]) {\n // normal is assumed to have unit length\n float x = normal.x, y = normal.y, z = normal.z;\n\n // band 0\n vec3 result = shCoefficients[0] * 0.8862269254527579;\n\n // band 1\n result += shCoefficients[1] * 1.0233267079464885 * y;\n result += shCoefficients[2] * 1.0233267079464885 * z;\n result += shCoefficients[3] * 1.0233267079464885 * x;\n\n // band 2\n result += shCoefficients[4] * 0.8580855308097834 * x * y;\n result += shCoefficients[5] * 0.8580855308097834 * y * z;\n result += shCoefficients[6] * (0.7431238683011272 * z * z - 0.24770795610037571);\n result += shCoefficients[7] * 0.8580855308097834 * x * z;\n result += shCoefficients[8] * 0.4290427654048917 * (x * x - y * y);\n\n return result;\n}\n\n\nvec4 lpv_renormalize_weights(in vec4 source) {\n\n float sum = source.x + source.y + source.z + source.w;\n\n if (sum <= 0.0001) {\n return vec4(0.0);\n }\n\n return source / sum;\n\n}\n\n\nvec3 lpv_mesh_interpolate_probe_irradiance(vec3 direction , vec4 weights, uint tet){\n\n uvec4 vertices = lpv_mesh_getVertices(tet);\n\n vec3[9] probe0 = lpv_probe_getData(vertices[0]);\n vec3[9] probe1 = lpv_probe_getData(vertices[1]);\n vec3[9] probe2 = lpv_probe_getData(vertices[2]);\n vec3[9] probe3 = lpv_probe_getData(vertices[3]);\n \n vec3 irradiance_0 = lpv_get_irradiance_at(direction, probe0);\n vec3 irradiance_1 = lpv_get_irradiance_at(direction, probe1);\n vec3 irradiance_2 = lpv_get_irradiance_at(direction, probe2);\n vec3 irradiance_3 = lpv_get_irradiance_at(direction, probe3);\n \n // move irradiance into preceptional space for interpolation\n vec3 irradiance_p_0 = sqrt(irradiance_0);\n vec3 irradiance_p_1 = sqrt(irradiance_1);\n vec3 irradiance_p_2 = sqrt(irradiance_2);\n vec3 irradiance_p_3 = sqrt(irradiance_3);\n\n vec3 interpolated_sqrt = irradiance_p_0 * weights[0]\n +irradiance_p_1 * weights[1]\n +irradiance_p_2 * weights[2]\n +irradiance_p_3 * weights[3]\n ;\n\n // convert back\n return interpolated_sqrt*interpolated_sqrt;\n}\n\nvec3 lpv_sample_irradiance(vec3 position, vec3 normal, vec3 view_direction) {\n\n // lookup nearby tet\n vec3 lpv_mesh_bounds_min = lpv_v3_bounds_min;\n vec3 lpv_mesh_bounds_max = lpv_v3_bounds_max;\n vec3 lookup_coordinates = (position - lpv_mesh_bounds_min) / (lpv_mesh_bounds_max - lpv_mesh_bounds_min);\n\n\n uint nearest_tet = lpv_guess_initial_tet(position);\n\n uint tet;\n vec4 barycentric_coordinates;\n lpv_walk_to_tetrahedron(position, nearest_tet, tet, barycentric_coordinates);\n\n // apply visibility term\n vec4 weights = lvp_mask_weights_by_visibility(position, normal, view_direction, tet, barycentric_coordinates);\n\n if (tet == INVALID_TET) {\n // do nothing\n return vec3(0.0);\n\n } else {\n\n vec3[9] lpv_values = lpv_interpolate_probes(weights, tet);\n\n vec3 irradiance = lpv_get_irradiance_at(normal, lpv_values);\n\n return irradiance;\n }\n}\n\n#endif\n"; export default _default; //# sourceMappingURL=common.d.ts.map