@azure/cosmos
Version:
Microsoft Azure Cosmos DB Service Node.js SDK for NOSQL API
393 lines (392 loc) • 11.2 kB
JavaScript
var __defProp = Object.defineProperty;
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
var __getOwnPropNames = Object.getOwnPropertyNames;
var __hasOwnProp = Object.prototype.hasOwnProperty;
var __export = (target, all) => {
for (var name in all)
__defProp(target, name, { get: all[name], enumerable: true });
};
var __copyProps = (to, from, except, desc) => {
if (from && typeof from === "object" || typeof from === "function") {
for (let key of __getOwnPropNames(from))
if (!__hasOwnProp.call(to, key) && key !== except)
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
}
return to;
};
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
var murmurHash_exports = {};
__export(murmurHash_exports, {
default: () => murmurHash_default,
reverse: () => reverse
});
module.exports = __toCommonJS(murmurHash_exports);
var import_uint8 = require("../uint8.js");
function _x86Multiply(m, n) {
return (m & 65535) * n + (((m >>> 16) * n & 65535) << 16);
}
function _x86Rotl(m, n) {
return m << n | m >>> 32 - n;
}
function _x86Fmix(h) {
h ^= h >>> 16;
h = _x86Multiply(h, 2246822507);
h ^= h >>> 13;
h = _x86Multiply(h, 3266489909);
h ^= h >>> 16;
return h;
}
function _x64Add(m, n) {
m = [m[0] >>> 16, m[0] & 65535, m[1] >>> 16, m[1] & 65535];
n = [n[0] >>> 16, n[0] & 65535, n[1] >>> 16, n[1] & 65535];
const o = [0, 0, 0, 0];
o[3] += m[3] + n[3];
o[2] += o[3] >>> 16;
o[3] &= 65535;
o[2] += m[2] + n[2];
o[1] += o[2] >>> 16;
o[2] &= 65535;
o[1] += m[1] + n[1];
o[0] += o[1] >>> 16;
o[1] &= 65535;
o[0] += m[0] + n[0];
o[0] &= 65535;
return [o[0] << 16 | o[1], o[2] << 16 | o[3]];
}
function _x64Multiply(m, n) {
m = [m[0] >>> 16, m[0] & 65535, m[1] >>> 16, m[1] & 65535];
n = [n[0] >>> 16, n[0] & 65535, n[1] >>> 16, n[1] & 65535];
const o = [0, 0, 0, 0];
o[3] += m[3] * n[3];
o[2] += o[3] >>> 16;
o[3] &= 65535;
o[2] += m[2] * n[3];
o[1] += o[2] >>> 16;
o[2] &= 65535;
o[2] += m[3] * n[2];
o[1] += o[2] >>> 16;
o[2] &= 65535;
o[1] += m[1] * n[3];
o[0] += o[1] >>> 16;
o[1] &= 65535;
o[1] += m[2] * n[2];
o[0] += o[1] >>> 16;
o[1] &= 65535;
o[1] += m[3] * n[1];
o[0] += o[1] >>> 16;
o[1] &= 65535;
o[0] += m[0] * n[3] + m[1] * n[2] + m[2] * n[1] + m[3] * n[0];
o[0] &= 65535;
return [o[0] << 16 | o[1], o[2] << 16 | o[3]];
}
function _x64Rotl(m, n) {
n %= 64;
if (n === 32) {
return [m[1], m[0]];
} else if (n < 32) {
return [m[0] << n | m[1] >>> 32 - n, m[1] << n | m[0] >>> 32 - n];
} else {
n -= 32;
return [m[1] << n | m[0] >>> 32 - n, m[0] << n | m[1] >>> 32 - n];
}
}
function _x64LeftShift(m, n) {
n %= 64;
if (n === 0) {
return m;
} else if (n < 32) {
return [m[0] << n | m[1] >>> 32 - n, m[1] << n];
} else {
return [m[1] << n - 32, 0];
}
}
function _x64Xor(m, n) {
return [m[0] ^ n[0], m[1] ^ n[1]];
}
function _x64Fmix(h) {
h = _x64Xor(h, [0, h[0] >>> 1]);
h = _x64Multiply(h, [4283543511, 3981806797]);
h = _x64Xor(h, [0, h[0] >>> 1]);
h = _x64Multiply(h, [3301882366, 444984403]);
h = _x64Xor(h, [0, h[0] >>> 1]);
return h;
}
function x86Hash32(bytes, seed) {
seed = seed || 0;
const remainder = bytes.length % 4;
const blocks = bytes.length - remainder;
let h1 = seed;
let k1 = 0;
const c1 = 3432918353;
const c2 = 461845907;
let j = 0;
for (let i = 0; i < blocks; i = i + 4) {
k1 = bytes[i] | bytes[i + 1] << 8 | bytes[i + 2] << 16 | bytes[i + 3] << 24;
k1 = _x86Multiply(k1, c1);
k1 = _x86Rotl(k1, 15);
k1 = _x86Multiply(k1, c2);
h1 ^= k1;
h1 = _x86Rotl(h1, 13);
h1 = _x86Multiply(h1, 5) + 3864292196;
j = i + 4;
}
k1 = 0;
switch (remainder) {
case 3:
k1 ^= bytes[j + 2] << 16;
case 2:
k1 ^= bytes[j + 1] << 8;
case 1:
k1 ^= bytes[j];
k1 = _x86Multiply(k1, c1);
k1 = _x86Rotl(k1, 15);
k1 = _x86Multiply(k1, c2);
h1 ^= k1;
}
h1 ^= bytes.length;
h1 = _x86Fmix(h1);
return h1 >>> 0;
}
function x86Hash128(bytes, seed) {
seed = seed || 0;
const remainder = bytes.length % 16;
const blocks = bytes.length - remainder;
let h1 = seed;
let h2 = seed;
let h3 = seed;
let h4 = seed;
let k1 = 0;
let k2 = 0;
let k3 = 0;
let k4 = 0;
const c1 = 597399067;
const c2 = 2869860233;
const c3 = 951274213;
const c4 = 2716044179;
let j = 0;
for (let i = 0; i < blocks; i = i + 16) {
k1 = bytes[i] | bytes[i + 1] << 8 | bytes[i + 2] << 16 | bytes[i + 3] << 24;
k2 = bytes[i + 4] | bytes[i + 5] << 8 | bytes[i + 6] << 16 | bytes[i + 7] << 24;
k3 = bytes[i + 8] | bytes[i + 9] << 8 | bytes[i + 10] << 16 | bytes[i + 11] << 24;
k4 = bytes[i + 12] | bytes[i + 13] << 8 | bytes[i + 14] << 16 | bytes[i + 15] << 24;
k1 = _x86Multiply(k1, c1);
k1 = _x86Rotl(k1, 15);
k1 = _x86Multiply(k1, c2);
h1 ^= k1;
h1 = _x86Rotl(h1, 19);
h1 += h2;
h1 = _x86Multiply(h1, 5) + 1444728091;
k2 = _x86Multiply(k2, c2);
k2 = _x86Rotl(k2, 16);
k2 = _x86Multiply(k2, c3);
h2 ^= k2;
h2 = _x86Rotl(h2, 17);
h2 += h3;
h2 = _x86Multiply(h2, 5) + 197830471;
k3 = _x86Multiply(k3, c3);
k3 = _x86Rotl(k3, 17);
k3 = _x86Multiply(k3, c4);
h3 ^= k3;
h3 = _x86Rotl(h3, 15);
h3 += h4;
h3 = _x86Multiply(h3, 5) + 2530024501;
k4 = _x86Multiply(k4, c4);
k4 = _x86Rotl(k4, 18);
k4 = _x86Multiply(k4, c1);
h4 ^= k4;
h4 = _x86Rotl(h4, 13);
h4 += h1;
h4 = _x86Multiply(h4, 5) + 850148119;
j = i + 16;
}
k1 = 0;
k2 = 0;
k3 = 0;
k4 = 0;
switch (remainder) {
case 15:
k4 ^= bytes[j + 14] << 16;
case 14:
k4 ^= bytes[j + 13] << 8;
case 13:
k4 ^= bytes[j + 12];
k4 = _x86Multiply(k4, c4);
k4 = _x86Rotl(k4, 18);
k4 = _x86Multiply(k4, c1);
h4 ^= k4;
case 12:
k3 ^= bytes[j + 11] << 24;
case 11:
k3 ^= bytes[j + 10] << 16;
case 10:
k3 ^= bytes[j + 9] << 8;
case 9:
k3 ^= bytes[j + 8];
k3 = _x86Multiply(k3, c3);
k3 = _x86Rotl(k3, 17);
k3 = _x86Multiply(k3, c4);
h3 ^= k3;
case 8:
k2 ^= bytes[j + 7] << 24;
case 7:
k2 ^= bytes[j + 6] << 16;
case 6:
k2 ^= bytes[j + 5] << 8;
case 5:
k2 ^= bytes[j + 4];
k2 = _x86Multiply(k2, c2);
k2 = _x86Rotl(k2, 16);
k2 = _x86Multiply(k2, c3);
h2 ^= k2;
case 4:
k1 ^= bytes[j + 3] << 24;
case 3:
k1 ^= bytes[j + 2] << 16;
case 2:
k1 ^= bytes[j + 1] << 8;
case 1:
k1 ^= bytes[j];
k1 = _x86Multiply(k1, c1);
k1 = _x86Rotl(k1, 15);
k1 = _x86Multiply(k1, c2);
h1 ^= k1;
}
h1 ^= bytes.length;
h2 ^= bytes.length;
h3 ^= bytes.length;
h4 ^= bytes.length;
h1 += h2;
h1 += h3;
h1 += h4;
h2 += h1;
h3 += h1;
h4 += h1;
h1 = _x86Fmix(h1);
h2 = _x86Fmix(h2);
h3 = _x86Fmix(h3);
h4 = _x86Fmix(h4);
h1 += h2;
h1 += h3;
h1 += h4;
h2 += h1;
h3 += h1;
h4 += h1;
return ("00000000" + (h1 >>> 0).toString(16)).slice(-8) + ("00000000" + (h2 >>> 0).toString(16)).slice(-8) + ("00000000" + (h3 >>> 0).toString(16)).slice(-8) + ("00000000" + (h4 >>> 0).toString(16)).slice(-8);
}
function x64Hash128(bytes, seed) {
seed = seed || 0;
const remainder = bytes.length % 16;
const blocks = bytes.length - remainder;
let h1 = [0, seed];
let h2 = [0, seed];
let k1 = [0, 0];
let k2 = [0, 0];
const c1 = [2277735313, 289559509];
const c2 = [1291169091, 658871167];
let j = 0;
for (let i = 0; i < blocks; i = i + 16) {
k1 = [
bytes[i + 4] | bytes[i + 5] << 8 | bytes[i + 6] << 16 | bytes[i + 7] << 24,
bytes[i] | bytes[i + 1] << 8 | bytes[i + 2] << 16 | bytes[i + 3] << 24
];
k2 = [
bytes[i + 12] | bytes[i + 13] << 8 | bytes[i + 14] << 16 | bytes[i + 15] << 24,
bytes[i + 8] | bytes[i + 9] << 8 | bytes[i + 10] << 16 | bytes[i + 11] << 24
];
k1 = _x64Multiply(k1, c1);
k1 = _x64Rotl(k1, 31);
k1 = _x64Multiply(k1, c2);
h1 = _x64Xor(h1, k1);
h1 = _x64Rotl(h1, 27);
h1 = _x64Add(h1, h2);
h1 = _x64Add(_x64Multiply(h1, [0, 5]), [0, 1390208809]);
k2 = _x64Multiply(k2, c2);
k2 = _x64Rotl(k2, 33);
k2 = _x64Multiply(k2, c1);
h2 = _x64Xor(h2, k2);
h2 = _x64Rotl(h2, 31);
h2 = _x64Add(h2, h1);
h2 = _x64Add(_x64Multiply(h2, [0, 5]), [0, 944331445]);
j = i + 16;
}
k1 = [0, 0];
k2 = [0, 0];
switch (remainder) {
case 15:
k2 = _x64Xor(k2, _x64LeftShift([0, bytes[j + 14]], 48));
case 14:
k2 = _x64Xor(k2, _x64LeftShift([0, bytes[j + 13]], 40));
case 13:
k2 = _x64Xor(k2, _x64LeftShift([0, bytes[j + 12]], 32));
case 12:
k2 = _x64Xor(k2, _x64LeftShift([0, bytes[j + 11]], 24));
case 11:
k2 = _x64Xor(k2, _x64LeftShift([0, bytes[j + 10]], 16));
case 10:
k2 = _x64Xor(k2, _x64LeftShift([0, bytes[j + 9]], 8));
case 9:
k2 = _x64Xor(k2, [0, bytes[j + 8]]);
k2 = _x64Multiply(k2, c2);
k2 = _x64Rotl(k2, 33);
k2 = _x64Multiply(k2, c1);
h2 = _x64Xor(h2, k2);
case 8:
k1 = _x64Xor(k1, _x64LeftShift([0, bytes[j + 7]], 56));
case 7:
k1 = _x64Xor(k1, _x64LeftShift([0, bytes[j + 6]], 48));
case 6:
k1 = _x64Xor(k1, _x64LeftShift([0, bytes[j + 5]], 40));
case 5:
k1 = _x64Xor(k1, _x64LeftShift([0, bytes[j + 4]], 32));
case 4:
k1 = _x64Xor(k1, _x64LeftShift([0, bytes[j + 3]], 24));
case 3:
k1 = _x64Xor(k1, _x64LeftShift([0, bytes[j + 2]], 16));
case 2:
k1 = _x64Xor(k1, _x64LeftShift([0, bytes[j + 1]], 8));
case 1:
k1 = _x64Xor(k1, [0, bytes[j]]);
k1 = _x64Multiply(k1, c1);
k1 = _x64Rotl(k1, 31);
k1 = _x64Multiply(k1, c2);
h1 = _x64Xor(h1, k1);
}
h1 = _x64Xor(h1, [0, bytes.length]);
h2 = _x64Xor(h2, [0, bytes.length]);
h1 = _x64Add(h1, h2);
h2 = _x64Add(h2, h1);
h1 = _x64Fmix(h1);
h2 = _x64Fmix(h2);
h1 = _x64Add(h1, h2);
h2 = _x64Add(h2, h1);
const h1Hex = ("00000000" + (h1[0] >>> 0).toString(16)).slice(-8) + ("00000000" + (h1[1] >>> 0).toString(16)).slice(-8);
const h1Buff = (0, import_uint8.hexStringToUint8Array)(h1Hex);
const h1Reversed = (0, import_uint8.uint8ArrayToHex)(reverse(h1Buff));
const h2Hex = ("00000000" + (h2[0] >>> 0).toString(16)).slice(-8) + ("00000000" + (h2[1] >>> 0).toString(16)).slice(-8);
const h2Buff = (0, import_uint8.hexStringToUint8Array)(h2Hex);
const h2Reversed = (0, import_uint8.uint8ArrayToHex)(reverse(h2Buff));
return h1Reversed + h2Reversed;
}
function reverse(buff) {
const uint8array = new Uint8Array(buff.length);
for (let i = 0, j = buff.length - 1; i <= j; ++i, --j) {
uint8array[i] = buff[j];
uint8array[j] = buff[i];
}
return uint8array;
}
var murmurHash_default = {
version: "3.0.0",
x86: {
hash32: x86Hash32,
hash128: x86Hash128
},
x64: {
hash128: x64Hash128
},
inputValidation: true
};
// Annotate the CommonJS export names for ESM import in node:
0 && (module.exports = {
reverse
});