@arcium-hq/client
Version:
Client SDK for interacting with encrypted Solana programs
1,263 lines (1,250 loc) • 186 kB
JavaScript
'use strict';
var crypto = require('crypto');
var ed25519 = require('@noble/curves/ed25519');
var sha3 = require('@noble/hashes/sha3');
var modular = require('@noble/curves/abstract/modular');
var anchor = require('@coral-xyz/anchor');
var utils = require('@noble/hashes/utils');
var edwards = require('@noble/curves/abstract/edwards');
var web3_js = require('@solana/web3.js');
function _interopNamespaceDefault(e) {
var n = Object.create(null);
if (e) {
Object.keys(e).forEach(function (k) {
if (k !== 'default') {
var d = Object.getOwnPropertyDescriptor(e, k);
Object.defineProperty(n, k, d.get ? d : {
enumerable: true,
get: function () { return e[k]; }
});
}
});
}
n.default = e;
return Object.freeze(n);
}
var anchor__namespace = /*#__PURE__*/_interopNamespaceDefault(anchor);
/**
* Scalar field prime modulus for Curve25519: 2^252 + 27742317777372353535851937790883648493
*/
const CURVE25519_SCALAR_FIELD_MODULUS = ed25519.ed25519.CURVE.n;
/**
* Generates a random value within the field bound by q.
* @param q - The upper bound (exclusive) for the random value.
* @returns A random bigint value between 0 and q-1.
*/
function generateRandomFieldElem(q) {
const byteLength = (q.toString(2).length + 7) >> 3;
let r;
do {
const randomBuffer = crypto.randomBytes(byteLength);
r = BigInt(`0x${randomBuffer.toString('hex')}`);
} while (r >= q);
return r;
}
/**
* Computes the positive modulo of a over m.
* @param a - The dividend.
* @param m - The modulus.
* @returns The positive remainder of a mod m.
*/
function positiveModulo(a, m) {
return ((a % m) + m) % m;
}
/**
* Serializes a bigint to a little-endian Uint8Array of the specified length.
* @param val - The bigint value to serialize.
* @param lengthInBytes - The desired length of the output array.
* @returns The serialized value as a Uint8Array.
* @throws Error if the value is too large for the specified length.
*/
function serializeLE(val, lengthInBytes) {
const result = new Uint8Array(lengthInBytes);
let tempVal = val;
for (let i = 0; i < lengthInBytes; i++) {
result[i] = Number(tempVal & BigInt(255));
tempVal >>= BigInt(8);
}
if (tempVal > BigInt(0)) {
throw new Error(`Value ${val} is too large for the byte length ${lengthInBytes}`);
}
return result;
}
/**
* Deserializes a little-endian Uint8Array to a bigint.
* @param bytes - The Uint8Array to deserialize.
* @returns The deserialized bigint value.
*/
function deserializeLE(bytes) {
let result = BigInt(0);
for (let i = 0; i < bytes.length; i++) {
result |= BigInt(bytes[i]) << (BigInt(i) * BigInt(8));
}
return result;
}
// GENERAL
/**
* Computes the SHA-256 hash of an array of Uint8Arrays.
* @param byteArrays - The arrays to hash.
* @returns The SHA-256 hash as a Buffer.
*/
function sha256(byteArrays) {
const hash = crypto.createHash('sha256');
byteArrays.forEach((byteArray) => {
hash.update(byteArray);
});
return hash.digest();
}
/**
* Seed for ClockAccount PDA
* @constant {string}
*/
const CLOCK_ACC_SEED = 'ClockAccount';
/**
* Seed for FeePool PDA
* @constant {string}
*/
const POOL_ACC_SEED = 'FeePool';
/**
* Seed for ComputationAccount PDA
* @constant {string}
*/
const COMPUTATION_ACC_SEED = 'ComputationAccount';
/**
* Seed for Mempool PDA
* @constant {string}
*/
const MEMPOOL_ACC_SEED = 'Mempool';
/**
* Seed for ExecutingPoolAccount PDA
* @constant {string}
*/
const EXEC_POOL_ACC_SEED = 'Execpool';
/**
* Seed for ClusterAccount PDA
* @constant {string}
*/
const CLUSTER_ACC_SEED = 'Cluster';
/**
* Seed for ArxNodeAccount PDA
* @constant {string}
*/
const ARX_NODE_ACC_SEED = 'ArxNode';
/**
* Seed for MXE Account PDA
* @constant {string}
*/
const MXE_ACCOUNT_SEED = 'MXEAccount';
/**
* Seed for CompDefAccount PDA
* @constant {string}
*/
const COMP_DEF_ACC_SEED = 'ComputationDefinitionAccount';
/**
* Maximum number of bytes that can be reallocated per instruction.
* @constant {number}
*/
const MAX_REALLOC_PER_IX = 10240;
/**
* Maximum number of bytes that can be uploaded in a single transaction with the upload instruction.
* @constant {number}
*/
const MAX_UPLOAD_PER_TX_BYTES = 814;
/**
* Maximum size of an account in bytes (10MB = 10 * 1024 * 1024).
* @constant {number}
*/
const MAX_ACCOUNT_SIZE = 10485760;
/**
* Maximum number of arcium embiggen instructions allowed in a single transaction (due to compute unit limits).
* @constant {number}
*/
const MAX_EMBIGGEN_IX_PER_TX = 18;
/**
* Size of account discriminator in bytes.
* @constant {number}
*/
const DISCRIMINATOR_SIZE = 8;
/**
* Size of offset buffer in bytes (u32).
* @constant {number}
*/
const OFFSET_BUFFER_SIZE = 4;
/**
* Size of computation definition offset slice in bytes.
* @constant {number}
*/
const COMP_DEF_OFFSET_SIZE = 4;
/**
* Size of a uint128 in bytes.
* @constant {number}
*/
const UINT128_BYTE_SIZE = 16;
/**
* Converts a bigint to an array of bits (least significant to most significant, in 2's complement representation).
* @param x - The bigint to convert.
* @param binSize - The number of bits to use in the representation.
* @returns An array of booleans representing the bits of x.
*/
function toBinLE(x, binSize) {
const res = [];
for (let i = 0; i < binSize; ++i) {
res.push(ctSignBit(x, BigInt(i)));
}
return res;
}
/**
* Converts an array of bits (least significant to most significant, in 2's complement representation) to a bigint.
* @param xBin - The array of bits to convert.
* @returns The bigint represented by the bit array.
*/
function fromBinLE(xBin) {
let res = 0n;
for (let i = 0; i < xBin.length - 1; ++i) {
res |= BigInt(xBin[i]) << BigInt(i);
}
return res - (BigInt(xBin[xBin.length - 1]) << BigInt(xBin.length - 1));
}
/**
* Binary adder between x and y (assumes xBin and yBin are of the same length and large enough to represent the sum).
* @param xBin - The first operand as a bit array.
* @param yBin - The second operand as a bit array.
* @param carryIn - The initial carry-in value.
* @param binSize - The number of bits to use in the operation.
* @returns The sum as a bit array.
*/
function adder(xBin, yBin, carryIn, binSize) {
const res = [];
let carry = carryIn;
for (let i = 0; i < binSize; ++i) {
// res[i] = xBin[i] XOR yBin[i] XOR carry
const yXorCarry = yBin[i] !== carry;
res.push(xBin[i] !== yXorCarry);
// newCarry = (xBin[i] AND yBin[i]) XOR (xBin[i] AND carry) XOR (yBin[i] AND carry)
// = (yBin[i] XOR carry) ? xBin[i] : yBin[i]
const newCarry = yBin[i] !== (yXorCarry && (xBin[i] !== yBin[i]));
carry = newCarry;
}
return res;
}
/**
* Constant-time addition of two bigints, using 2's complement representation.
* @param x - The first operand.
* @param y - The second operand.
* @param binSize - The number of bits to use in the operation.
* @returns The sum as a bigint.
*/
function ctAdd(x, y, binSize) {
const resBin = adder(toBinLE(x, binSize), toBinLE(y, binSize), false, binSize);
return fromBinLE(resBin);
}
/**
* Constant-time subtraction of two bigints, using 2's complement representation.
* @param x - The first operand.
* @param y - The second operand.
* @param binSize - The number of bits to use in the operation.
* @returns The difference as a bigint.
*/
function ctSub(x, y, binSize) {
const yBin = toBinLE(y, binSize);
const yBinNot = [];
for (let i = 0; i < binSize; ++i) {
yBinNot.push(yBin[i] === false);
}
const resBin = adder(toBinLE(x, binSize), yBinNot, true, binSize);
return fromBinLE(resBin);
}
/**
* Returns the sign bit of a bigint in constant time.
* @param x - The bigint to check.
* @param binSize - The bit position to check (typically the highest bit).
* @returns True if the sign bit is set, false otherwise.
*/
function ctSignBit(x, binSize) {
return ((x >> binSize) & 1n) === 1n;
}
/**
* Constant-time less-than comparison for two bigints.
* @param x - The first operand.
* @param y - The second operand.
* @param binSize - The number of bits to use in the operation.
* @returns True if x < y, false otherwise.
*/
function ctLt(x, y, binSize) {
return ctSignBit(ctSub(x, y, binSize), binSize);
}
/**
* Constant-time select between two bigints based on a boolean condition.
* @param b - The condition; if true, select x, otherwise select y.
* @param x - The value to select if b is true.
* @param y - The value to select if b is false.
* @param binSize - The number of bits to use in the operation.
* @returns The selected bigint.
*/
function ctSelect(b, x, y, binSize) {
return ctAdd(y, BigInt(b) * (ctSub(x, y, binSize)), binSize);
}
/**
* Checks if a bigint fits in the range -2^binSize <= x < 2^binSize.
* Not constant-time for arbitrary x, but is constant-time for all inputs for which the function returns true.
* If you assert your inputs satisfy verifyBinSize(x, binSize), you need not care about the non constant-timeness of this function.
* @param x - The bigint to check.
* @param binSize - The number of bits to use in the check.
* @returns True if x fits in the range, false otherwise.
*/
function verifyBinSize(x, binSize) {
const bin = (x >> binSize).toString(2);
return bin === '0' || bin === '-1';
}
/**
* Checks if code is running in a browser environment.
* @returns true if window object exists, false otherwise
*/
function isBrowser() {
return (
// eslint-disable-next-line no-prototype-builtins
typeof window !== 'undefined' && !window.process?.hasOwnProperty('type'));
}
/**
* Conditionally logs a message if logging is enabled.
* @param log - Whether to output the log
* @param args - Arguments to pass to console.log
*/
function optionalLog(log, ...args) {
if (log) {
// eslint-disable-next-line no-console
console.log(...args);
}
}
/**
* Calculates the minimum number of bits needed to represent a value.
* Formula: floor(log2(max)) + 1 for unsigned, +1 for signed, +1 for diff of two negatives.
* @param max - The bigint value to measure
* @returns Number of bits required
*/
function getBinSize(max) {
// floor(log2(max)) + 1 to represent unsigned elements, a +1 for signed elements
// and another +1 to account for the diff of two negative elements
return BigInt(Math.floor(Math.log2(Number(max)))) + 3n;
}
/**
* Compresses an array of bytes into 128-bit bigints.
*
* Takes an array of bytes whose length is a multiple of 16 and compresses each consecutive 16 bytes into a single 128-bit bigint.
*
* @param bytes - The input byte array. Its length must be a multiple of 16.
* @returns An array of 128-bit bigints, each representing 16 bytes from the input.
* @throws Error if the input length is not a multiple of 16.
*/
function compressUint128(bytes) {
if (bytes.length % UINT128_BYTE_SIZE !== 0) {
throw Error(`bytes.length must be a multiple of ${UINT128_BYTE_SIZE} (found ${bytes.length})`);
}
const res = [];
for (let n = 0; n < bytes.length / UINT128_BYTE_SIZE; ++n) {
res.push(deserializeLE(bytes.slice(n * UINT128_BYTE_SIZE, (n + 1) * UINT128_BYTE_SIZE)));
}
return res;
}
/**
* Decompresses an array of 128-bit bigints into a flattened byte array.
*
* Takes an array of 128-bit bigints and returns a Uint8Array containing the decompressed bytes (16 bytes per bigint).
*
* @param compressed - The input array of 128-bit bigints. Each bigint must be less than 2^128.
* @returns A Uint8Array containing the decompressed bytes.
* @throws Error if any bigint in the input is not less than 2^128.
*/
function decompressUint128(compressed) {
compressed.forEach((c) => {
if (c >= 1n << 128n) {
throw Error(`input must be less than 2^128 (found ${c})`);
}
});
const res = [];
for (let n = 0; n < compressed.length; ++n) {
res.push(...serializeLE(compressed[n], UINT128_BYTE_SIZE));
}
return new Uint8Array(res);
}
/**
* Checks if a computation reference is null (all zeros).
* @param ref - The computation reference to check
* @returns true if the reference is null, false otherwise
*/
function isNullRef(ref) {
const bigZero = new anchor__namespace.BN(0);
return (ref.computationOffset === bigZero
&& ref.priorityFee === bigZero);
}
/**
* Matrix class over FpField. Data is row-major.
*/
class Matrix {
field;
data;
constructor(field, data) {
this.field = field;
const nrows = data.length;
const ncols = data[0].length;
for (let i = 1; i < nrows; ++i) {
if (data[i].length !== ncols) {
throw Error('All rows must have same number of columns.');
}
}
this.data = data.map((row) => row.map((c) => field.create(c)));
}
/**
* Matrix multiplication between `this` and `rhs`.
*/
matMul(rhs) {
const thisNrows = this.data.length;
const thisNcols = this.data[0].length;
const rhsNrows = rhs.data.length;
const rhsNcols = rhs.data[0].length;
if (thisNcols !== rhsNrows) {
throw Error(`this.ncols must be equal to rhs.nrows (found ${thisNcols} and ${rhsNrows})`);
}
const data = [];
for (let i = 0; i < thisNrows; ++i) {
const row = [];
for (let j = 0; j < rhsNcols; ++j) {
let c = this.field.ZERO;
for (let k = 0; k < thisNcols; ++k) {
c = this.field.add(c, this.field.mul(this.data[i][k], rhs.data[k][j]));
}
row.push(c);
}
data.push(row);
}
return new Matrix(this.field, data);
}
/**
* Element-wise addition between `this` and `rhs`.
*/
add(rhs, ct = false) {
const thisNrows = this.data.length;
const thisNcols = this.data[0].length;
const rhsNrows = rhs.data.length;
const rhsNcols = rhs.data[0].length;
if (thisNrows !== rhsNrows) {
throw Error(`this.nrows must be equal to rhs.nrows (found ${thisNrows} and ${rhsNrows})`);
}
if (thisNcols !== rhsNcols) {
throw Error(`this.ncols must be equal to rhs.ncols (found ${thisNcols} and ${rhsNcols})`);
}
const binSize = getBinSize(this.field.ORDER - 1n);
const data = [];
for (let i = 0; i < thisNrows; ++i) {
const row = [];
for (let j = 0; j < thisNcols; ++j) {
if (ct) {
const sum = ctAdd(this.data[i][j], rhs.data[i][j], binSize);
row.push(ctSelect(ctLt(sum, this.field.ORDER, binSize), sum, ctSub(sum, this.field.ORDER, binSize), binSize));
}
else {
row.push(this.field.add(this.data[i][j], rhs.data[i][j]));
}
}
data.push(row);
}
return new Matrix(this.field, data);
}
/**
* Element-wise subtraction between `this` and `rhs`.
*/
sub(rhs, ct = false) {
const thisNrows = this.data.length;
const thisNcols = this.data[0].length;
const rhsNrows = rhs.data.length;
const rhsNcols = rhs.data[0].length;
if (thisNrows !== rhsNrows) {
throw Error(`this.nrows must be equal to rhs.nrows (found ${thisNrows} and ${rhsNrows})`);
}
if (thisNcols !== rhsNcols) {
throw Error(`this.ncols must be equal to rhs.ncols (found ${thisNcols} and ${rhsNcols})`);
}
const binSize = getBinSize(this.field.ORDER - 1n);
const data = [];
for (let i = 0; i < thisNrows; ++i) {
const row = [];
for (let j = 0; j < thisNcols; ++j) {
if (ct) {
const diff = ctSub(this.data[i][j], rhs.data[i][j], binSize);
row.push(ctSelect(ctSignBit(diff, binSize), ctAdd(diff, this.field.ORDER, binSize), diff, binSize));
}
else {
row.push(this.field.sub(this.data[i][j], rhs.data[i][j]));
}
}
data.push(row);
}
return new Matrix(this.field, data);
}
/**
* Raises each element of `this` to the power `e`.
*/
pow(e) {
const data = [];
for (let i = 0; i < this.data.length; ++i) {
const row = [];
for (let j = 0; j < this.data[0].length; ++j) {
row.push(this.field.pow(this.data[i][j], e));
}
data.push(row);
}
return new Matrix(this.field, data);
}
/**
* computs the determinant using gaus elimination
* matches the determinant implementation in arcis
*/
det() {
// Ensure the matrix is square
const n = this.data.length;
if (n === 0 || !this.is_square()) {
throw Error('Matrix must be square and non-empty to compute the determinant.');
}
let det = this.field.ONE;
// Clone the data to avoid mutating the original matrix
let rows = this.data.map((row) => [...row]);
for (let i = 0; i < n; ++i) {
// we partition into rows that have a leading zero and rows that don't
const lzRows = rows.filter((r) => this.field.is0(r[0]));
const nlzRows = rows.filter((r) => !this.field.is0(r[0]));
// take pivot element
const pivotRow = nlzRows.shift();
if (pivotRow === undefined) {
// no pivot row implies the rank is less than n i.e. the determinant is zero
return this.field.ZERO;
}
const pivot = pivotRow[0];
// multiply pivot onto the determinant
det = this.field.mul(det, pivot);
// subtract all leading non zero values with the pivot element (forward elimination).
const pivotInverse = this.field.inv(pivot);
// precomputing pivot row such that the leading value is one. This reduces the number of
// multiplications in the forward elimination multiplications by 50%
const normalizedPivotRow = pivotRow.map((v) => this.field.mul(pivotInverse, v));
// forward elimination with normalized pivot row
const nlzRowsProcessed = nlzRows.map((row) => {
const lead = row[0];
return row.map((value, index) => this.field.sub(value, this.field.mul(lead, normalizedPivotRow[index])));
});
// concat the reamining rows (without pivot row) and remove the pivot column (all first
// elements (i.e. zeros) from the remaining rows).
rows = nlzRowsProcessed.concat(lzRows).map((row) => row.slice(1));
}
return det;
}
is_square() {
const n = this.data.length;
for (let i = 1; i < n; ++i) {
if (this.data[i].length !== n) {
return false;
}
}
return true;
}
}
function randMatrix(field, nrows, ncols) {
const data = [];
for (let i = 0; i < nrows; ++i) {
const row = [];
for (let j = 0; j < ncols; ++j) {
row.push(generateRandomFieldElem(field.ORDER));
}
data.push(row);
}
return new Matrix(field, data);
}
/**
* Curve25519 base field as an IField instance.
*/
const CURVE25519_BASE_FIELD = ed25519.ed25519.CURVE.Fp;
// Security level for the block cipher.
const SECURITY_LEVEL_BLOCK_CIPHER = 128;
// Security level for the hash function.
const SECURITY_LEVEL_HASH_FUNCTION = 256;
// We refer to https://tosc.iacr.org/index.php/ToSC/article/view/8695/8287 for more details.
/**
* Description and parameters for the Rescue cipher or hash function, including round constants, MDS matrix, and key schedule.
* See: https://tosc.iacr.org/index.php/ToSC/article/view/8695/8287
*/
class RescueDesc {
mode;
field;
// The smallest prime that does not divide p-1.
alpha;
// The inverse of alpha modulo p-1.
alphaInverse;
nRounds;
m;
// A Maximum Distance Separable matrix.
mdsMat;
// Its inverse.
mdsMatInverse;
// The round keys, needed for encryption and decryption.
roundKeys;
/**
* Constructs a RescueDesc for a given field and mode (cipher or hash).
* Initializes round constants, MDS matrix, and key schedule.
* @param field - The field to use (e.g., CURVE25519_BASE_FIELD).
* @param mode - The mode: block cipher or hash function.
*/
constructor(field, mode) {
this.field = field;
this.mode = mode;
switch (this.mode.kind) {
case 'cipher': {
this.m = this.mode.key.length;
if (this.m < 2) {
throw Error(`parameter m must be at least 2 (found ${this.m})`);
}
break;
}
case 'hash': {
this.m = this.mode.m;
break;
}
default: {
this.m = 0;
break;
}
}
const alphaAndInverse = getAlphaAndInverse(this.field.ORDER);
this.alpha = alphaAndInverse[0];
this.alphaInverse = alphaAndInverse[1];
this.nRounds = getNRounds(this.field.ORDER, this.mode, this.alpha, this.m);
const mdsMatrixAndInverse = getMdsMatrixAndInverse(this.field, this.m);
this.mdsMat = mdsMatrixAndInverse[0];
this.mdsMatInverse = mdsMatrixAndInverse[1];
// generate the round constants using SHAKE256
const roundConstants = this.sampleConstants(this.nRounds);
switch (this.mode.kind) {
case 'cipher': {
// do the key schedule
this.roundKeys = rescuePermutation(this.mode, this.alpha, this.alphaInverse, this.mdsMat, roundConstants, new Matrix(this.field, toVec(this.mode.key)));
break;
}
case 'hash': {
this.roundKeys = roundConstants;
break;
}
default: {
this.roundKeys = [];
break;
}
}
}
/**
* Samples round constants for the Rescue permutation, using SHAKE256.
* @param nRounds - The number of rounds.
* @returns An array of round constant matrices.
*/
sampleConstants(nRounds) {
const field = this.field;
const m = this.m;
// setup randomness
// dkLen is the output length from the Keccak instance behind shake.
// this is irrelevant for our extendable output function (xof), but still we use
// the default value from one-time shake256 hashing, as defined in shake256's definition
// in noble-hashes-sha3.
const hasher = sha3.shake256.create({ dkLen: 256 / 8 });
// buffer to create field elements from bytes
// we add 16 bytes to get a distribution statistically close to uniform
const bufferLen = Math.ceil(field.BITS / 8) + 16;
switch (this.mode.kind) {
case 'cipher': {
hasher.update('encrypt everything, compute anything');
const rFieldArray = Array.from({ length: m * m + 2 * m }, () => {
// create field element from the shake hash
const randomness = hasher.xof(bufferLen);
// we need not check whether the obtained field element f is in any subgroup,
// because we use only prime fields (i.e. there are no subgroups)
return field.create(deserializeLE(randomness));
});
// create matrix and vectors
const matData = Array.from({ length: m }, () => rFieldArray.splice(0, m));
let roundConstantMat = new Matrix(field, matData);
const initData = Array.from({ length: m }, () => rFieldArray.splice(0, 1));
const initialRoundConstant = new Matrix(field, initData);
const roundData = Array.from({ length: m }, () => rFieldArray.splice(0, 1));
const roundConstantAffineTerm = new Matrix(field, roundData);
// check for inversability
while (field.is0(roundConstantMat.det())) {
// resample matrix
const resampleArray = Array.from({ length: m * m }, () => {
const randomness = hasher.xof(bufferLen);
return field.create(deserializeLE(randomness));
});
const resampleData = Array.from({ length: m }, () => resampleArray.splice(0, m));
roundConstantMat = new Matrix(field, resampleData);
}
const roundConstants = [initialRoundConstant];
for (let r = 0; r < 2 * this.nRounds; ++r) {
roundConstants.push(roundConstantMat.matMul(roundConstants[r]).add(roundConstantAffineTerm));
}
return roundConstants;
}
case 'hash': {
hasher.update(`Rescue-XLIX(${this.field.ORDER},${m},${this.mode.capacity},${SECURITY_LEVEL_HASH_FUNCTION})`);
// this.permute requires an odd number of round keys
// prepending a 0 matrix makes it equivalent to Algorithm 3 from https://eprint.iacr.org/2020/1143.pdf
const zeros = [];
for (let i = 0; i < m; ++i) {
zeros.push([0n]);
}
const roundConstants = [new Matrix(field, zeros)];
const rFieldArray = Array.from({ length: 2 * m * nRounds }, () => {
// create field element from the shake hash
const randomness = hasher.xof(bufferLen);
// we need not check whether the obtained field element f is in any subgroup,
// because we use only prime fields (i.e. there are no subgroups)
return field.create(deserializeLE(randomness));
});
for (let r = 0; r < 2 * nRounds; ++r) {
const data = [];
for (let i = 0; i < m; ++i) {
data.push([rFieldArray[r * m + i]]);
}
roundConstants.push(new Matrix(field, data));
}
return roundConstants;
}
default: return [];
}
}
/**
* Applies the Rescue permutation to a state matrix.
* @param state - The input state matrix.
* @returns The permuted state matrix.
*/
permute(state) {
return rescuePermutation(this.mode, this.alpha, this.alphaInverse, this.mdsMat, this.roundKeys, state)[2 * this.nRounds];
}
/**
* Applies the inverse Rescue permutation to a state matrix.
* @param state - The input state matrix.
* @returns The inverse-permuted state matrix.
*/
permuteInverse(state) {
return rescuePermutationInverse(this.mode, this.alpha, this.alphaInverse, this.mdsMatInverse, this.roundKeys, state)[2 * this.nRounds];
}
}
/**
* Finds the smallest prime alpha that does not divide p-1, and computes its inverse modulo p-1.
* The alpha parameter is used in the Rescue permutation for exponentiation operations.
* @param p - The field modulus (prime number)
* @returns A tuple [alpha, alphaInverse] where alpha is the prime and alphaInverse is its modular inverse
* @throws Error if no suitable prime alpha is found
*/
function getAlphaAndInverse(p) {
const pMinusOne = p - 1n;
let alpha = 0n;
for (const a of [2n, 3n, 5n, 7n, 11n, 13n, 17n, 19n, 23n, 29n, 31n, 37n, 41n, 43n, 47n]) {
if (pMinusOne % a !== 0n) {
alpha = a;
break;
}
}
if (alpha === 0n) {
throw Error('Could not find prime alpha that does not divide p-1.');
}
const alphaInverse = modular.invert(alpha, pMinusOne);
return [alpha, alphaInverse];
}
/**
* Calculates the number of rounds required for the Rescue permutation based on security analysis.
* The number of rounds is determined by analyzing resistance to differential and algebraic attacks.
* See: https://tosc.iacr.org/index.php/ToSC/article/view/8695/8287 for the security analysis.
* @param p - The field modulus
* @param mode - The Rescue mode (cipher or hash)
* @param alpha - The prime alpha parameter
* @param m - The state size (block size for cipher, total size for hash)
* @returns The number of rounds (will be doubled for the full permutation)
*/
function getNRounds(p, mode, alpha, m) {
function dcon(n) {
return Math.floor(0.5 * (Number(alpha) - 1) * m * (n - 1) + 2.0);
}
function v(n, rate) {
return m * (n - 1) + rate;
}
function binomial(n, k) {
function factorial(x) {
if (x === 0n || x === 1n) {
return 1n;
}
return x * factorial(x - 1n);
}
return factorial(BigInt(n)) / (factorial(BigInt(n - k)) * factorial(BigInt(k)));
}
switch (mode.kind) {
case 'cipher': {
const l0 = Math.ceil((2 * SECURITY_LEVEL_BLOCK_CIPHER) / ((m + 1) * (Math.log2(Number(p)) - Math.log2(Number(alpha) - 1))));
let l1 = 0;
if (alpha === 3n) {
l1 = Math.ceil((SECURITY_LEVEL_BLOCK_CIPHER + 2) / (4 * m));
}
else {
l1 = Math.ceil((SECURITY_LEVEL_BLOCK_CIPHER + 3) / (5.5 * m));
}
return 2 * Math.max(l0, l1, 5);
}
case 'hash': {
// get number of rounds for Groebner basis attack
const rate = m - mode.capacity;
const target = 1n << BigInt(SECURITY_LEVEL_HASH_FUNCTION);
let l1 = 1;
let tmp = binomial(v(l1, rate) + dcon(l1), v(l1, rate));
while (tmp * tmp <= target && l1 <= 23) {
l1 += 1;
tmp = binomial(v(l1, rate) + dcon(l1), v(l1, rate));
}
// set a minimum value for sanity and add 50%
return Math.ceil(1.5 * Math.max(5, l1));
}
default: return 0;
}
}
/**
* Builds a Cauchy matrix for use as an MDS (Maximum Distance Separable) matrix.
* A Cauchy matrix is guaranteed to be invertible and provides optimal diffusion properties.
* The matrix is constructed using the formula: M[i][j] = 1/(i + j) for i, j in [1, size].
* @param field - The finite field over which to construct the matrix
* @param size - The size of the square matrix
* @returns A Cauchy matrix of the specified size
*/
function buildCauchy(field, size) {
const data = [];
for (let i = 1n; i <= size; ++i) {
const row = [];
for (let j = 1n; j <= size; ++j) {
row.push(field.inv(i + j));
}
data.push(row);
}
return new Matrix(field, data);
}
/**
* Builds the inverse of a Cauchy matrix for use as the inverse MDS matrix.
* The inverse is computed using a closed-form formula for Cauchy matrix inversion.
* @param field - The finite field over which to construct the matrix
* @param size - The size of the square matrix
* @returns The inverse of the Cauchy matrix
*/
function buildInverseCauchy(field, size) {
function product(arr) {
return arr.reduce((acc, curr) => field.mul(acc, field.create(curr)), field.ONE);
}
function prime(arr, val) {
return product(arr.map((u) => {
if (u !== val) {
return val - u;
}
return 1n;
}));
}
const data = [];
for (let i = 1n; i <= size; ++i) {
const row = [];
for (let j = 1n; j <= size; ++j) {
const a = product(Array.from({ length: size }, (_, key) => -i - BigInt(1 + key)));
const aPrime = prime(Array.from({ length: size }, (_, key) => BigInt(1 + key)), j);
const b = product(Array.from({ length: size }, (_, key) => j + BigInt(1 + key)));
const bPrime = prime(Array.from({ length: size }, (_, key) => -BigInt(1 + key)), -i);
row.push(field.mul(a, field.mul(b, field.mul(field.inv(aPrime), field.mul(field.inv(bPrime), field.inv(-i - j))))));
}
data.push(row);
}
return new Matrix(field, data);
}
function getMdsMatrixAndInverse(field, m) {
const mdsMat = buildCauchy(field, m);
const mdsMatInverse = buildInverseCauchy(field, m);
return [mdsMat, mdsMatInverse];
}
function exponentForEven(mode, alpha, alphaInverse) {
switch (mode.kind) {
case 'cipher': {
return alphaInverse;
}
case 'hash': {
return alpha;
}
default: return 0n;
}
}
function exponentForOdd(mode, alpha, alphaInverse) {
switch (mode.kind) {
case 'cipher': {
return alpha;
}
case 'hash': {
return alphaInverse;
}
default: return 0n;
}
}
/**
* Core Rescue permutation function implementing the cryptographic primitive.
* Applies alternating rounds of exponentiation and MDS matrix multiplication with round keys.
* The permutation alternates between using alpha and alphaInverse as exponents based on round parity.
* This is the fundamental building block for both Rescue cipher and Rescue-Prime hash.
* @param mode - The Rescue mode (cipher or hash) determining exponent selection
* @param alpha - The prime exponent for even rounds
* @param alphaInverse - The inverse exponent for odd rounds
* @param mdsMat - The Maximum Distance Separable matrix for diffusion
* @param subkeys - Array of round key matrices
* @param state - The initial state matrix to permute
* @returns Array of all intermediate states during the permutation
*/
function rescuePermutation(mode, alpha, alphaInverse, mdsMat, subkeys, state) {
const exponentEven = exponentForEven(mode, alpha, alphaInverse);
const exponentOdd = exponentForOdd(mode, alpha, alphaInverse);
const states = [state.add(subkeys[0])];
for (let r = 0; r < subkeys.length - 1; ++r) {
let s = states[r];
if (r % 2 === 0) {
s = s.pow(exponentEven);
}
else {
s = s.pow(exponentOdd);
}
states.push(mdsMat.matMul(s).add(subkeys[r + 1]));
}
return states;
}
function rescuePermutationInverse(mode, alpha, alphaInverse, mdsMatInverse, subkeys, state) {
const exponentEven = exponentForEven(mode, alpha, alphaInverse);
const exponentOdd = exponentForOdd(mode, alpha, alphaInverse);
// the initial state will need to be removed afterwards
const states = [state];
for (let r = 0; r < subkeys.length - 1; ++r) {
let s = states[r];
s = mdsMatInverse.matMul(s.sub(subkeys[subkeys.length - 1 - r]));
if (r % 2 === 0) {
s = s.pow(exponentEven);
}
else {
s = s.pow(exponentOdd);
}
states.push(s);
}
states.push(states[states.length - 1].sub(subkeys[0]));
states.shift();
return states;
}
function toVec(data) {
const dataVec = [];
for (let i = 0; i < data.length; ++i) {
dataVec.push([data[i]]);
}
return dataVec;
}
/**
* The Rescue-Prime hash function, as described in https://eprint.iacr.org/2020/1143.pdf, offering 256 bits
* of security against collision, preimage and second-preimage attacks for any field of size at least 102 bits.
* We use the sponge construction with fixed rate = 7 and capacity = 5 (i.e., m = 12), and truncate the
* output to 5 field elements.
*/
class RescuePrimeHash {
desc;
rate;
digestLength;
/**
* Constructs a RescuePrimeHash instance with rate = 7 and capacity = 5.
*/
constructor() {
this.desc = new RescueDesc(CURVE25519_BASE_FIELD, { kind: 'hash', m: 12, capacity: 5 });
this.rate = 7;
this.digestLength = 5;
}
// This is Algorithm 1 from https://eprint.iacr.org/2020/1143.pdf, though with the padding (see Algorithm 2).
// The hash is truncated to digestLength elements.
// According to Section 2.2, this offers min(log2(CURVE25519_BASE_FIELD.ORDER) / 2 * min(digestLength, capacity), s)
// bits of security against collision, preimage and second-preimage attacks.
// The security level is thus of the order of 256 bits for any field of size at least 102 bits.
// The rate and capacity are chosen to achieve minimal number of rounds 8.
/**
* Computes the Rescue-Prime hash of a message, with padding as described in Algorithm 2 of the paper.
* @param message - The input message as an array of bigints.
* @returns The hash output as an array of bigints (length = digestLength).
*/
digest(message) {
// Create a copy and pad message to avoid mutating input parameter
const paddedMessage = [...message, 1n];
while (paddedMessage.length % this.rate !== 0) {
paddedMessage.push(0n);
}
const zeros = [];
for (let i = 0; i < this.desc.m; ++i) {
zeros.push([0n]);
}
let state = new Matrix(this.desc.field, zeros);
for (let r = 0; r < paddedMessage.length / this.rate; ++r) {
const data = [];
for (let i = 0; i < this.rate; ++i) {
data[i] = [paddedMessage[r * this.rate + i]];
}
for (let i = this.rate; i < this.desc.m; ++i) {
data[i] = [0n];
}
const s = new Matrix(this.desc.field, data);
state = this.desc.permute(state.add(s, true));
}
const res = [];
for (let i = 0; i < this.digestLength; ++i) {
res.push(state.data[i][0]);
}
return res;
}
}
/**
* Block size m for Rescue cipher operations.
* Rescue operates on 5-element blocks of field elements.
*/
const RESCUE_CIPHER_BLOCK_SIZE = 5;
/**
* The Rescue cipher in Counter (CTR) mode, with a fixed block size m = 5.
* See: https://tosc.iacr.org/index.php/ToSC/article/view/8695/8287
*/
class RescueCipher {
desc;
/**
* Constructs a RescueCipher instance using a shared secret.
* The key is derived using RescuePrimeHash and used to initialize the RescueDesc.
* @param sharedSecret - The shared secret to derive the cipher key from.
*/
constructor(sharedSecret) {
const hasher = new RescuePrimeHash();
// We follow [Section 4, Option 1.](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-56Cr2.pdf).
// For our choice of hash function, we have:
// - H_outputBits = hasher.digestLength = RESCUE_CIPHER_BLOCK_SIZE
// - max_H_inputBits = arbitrarily long, as the Rescue-Prime hash function is built upon the
// sponge construction
// - L = RESCUE_CIPHER_BLOCK_SIZE.
// Build the vector `counter || Z || FixedInfo` (we only have i = 1, since reps = 1).
// For the FixedInfo we simply take L.
const counter = [1n, deserializeLE(sharedSecret), BigInt(RESCUE_CIPHER_BLOCK_SIZE)];
const rescueKey = hasher.digest(counter);
this.desc = new RescueDesc(CURVE25519_BASE_FIELD, { kind: 'cipher', key: rescueKey });
}
/**
* Encrypts the plaintext vector in Counter (CTR) mode (raw, returns bigints).
* @param plaintext - The array of plaintext bigints to encrypt.
* @param nonce - A 16-byte nonce for CTR mode.
* @returns The ciphertext as an array of bigints.
* @throws Error if the nonce is not 16 bytes long.
*/
encrypt_raw(plaintext, nonce) {
if (nonce.length !== 16) {
throw Error(`nonce must be of length 16 (found ${nonce.length})`);
}
const binSize = getBinSize(this.desc.field.ORDER - 1n);
function encryptBatch(desc, ptxt, cntr) {
if (cntr.length !== RESCUE_CIPHER_BLOCK_SIZE) {
throw Error(`counter must be of length ${RESCUE_CIPHER_BLOCK_SIZE} (found ${cntr.length})`);
}
const encryptedCounter = desc.permute(new Matrix(desc.field, toVec(cntr)));
const ciphertext = [];
for (let i = 0; i < ptxt.length; ++i) {
if (!verifyBinSize(ptxt[i], binSize - 1n) || ctSignBit(ptxt[i], binSize) || !ctLt(ptxt[i], desc.field.ORDER, binSize)) {
throw Error(`plaintext must be non-negative and at most ${desc.field.ORDER}`);
}
const sum = ctAdd(ptxt[i], encryptedCounter.data[i][0], binSize);
ciphertext.push(ctSelect(ctLt(sum, desc.field.ORDER, binSize), sum, ctSub(sum, desc.field.ORDER, binSize), binSize));
}
return ciphertext;
}
const nBlocks = Math.ceil(plaintext.length / RESCUE_CIPHER_BLOCK_SIZE);
const counter = getCounter(deserializeLE(nonce), nBlocks);
const ciphertext = [];
for (let i = 0; i < nBlocks; ++i) {
const cnt = RESCUE_CIPHER_BLOCK_SIZE * i;
const newCiphertext = encryptBatch(this.desc, plaintext.slice(cnt, Math.min(cnt + RESCUE_CIPHER_BLOCK_SIZE, plaintext.length)), counter.slice(cnt, cnt + RESCUE_CIPHER_BLOCK_SIZE));
for (let j = 0; j < newCiphertext.length; ++j) {
ciphertext.push(newCiphertext[j]);
}
}
return ciphertext;
}
/**
* Encrypts the plaintext vector in Counter (CTR) mode and serializes each block.
* @param plaintext - The array of plaintext bigints to encrypt.
* @param nonce - A 16-byte nonce for CTR mode.
* @returns The ciphertext as an array of arrays of numbers (each 32 bytes).
*/
encrypt(plaintext, nonce) {
return this.encrypt_raw(plaintext, nonce).map((c) => Array.from(serializeLE(c, 32)));
}
/**
* Decrypts the ciphertext vector in Counter (CTR) mode (raw, expects bigints).
* @param ciphertext - The array of ciphertext bigints to decrypt.
* @param nonce - A 16-byte nonce for CTR mode.
* @returns The decrypted plaintext as an array of bigints.
* @throws Error if the nonce is not 16 bytes long.
*/
decrypt_raw(ciphertext, nonce) {
if (nonce.length !== 16) {
throw Error(`nonce must be of length 16 (found ${nonce.length})`);
}
const binSize = getBinSize(this.desc.field.ORDER - 1n);
function decryptBatch(desc, ctxt, cntr) {
if (cntr.length !== RESCUE_CIPHER_BLOCK_SIZE) {
throw Error(`counter must be of length ${RESCUE_CIPHER_BLOCK_SIZE} (found ${cntr.length})`);
}
const encryptedCounter = desc.permute(new Matrix(desc.field, toVec(cntr)));
const decrypted = [];
for (let i = 0; i < ctxt.length; ++i) {
const diff = ctSub(ctxt[i], encryptedCounter.data[i][0], binSize);
decrypted.push(ctSelect(ctSignBit(diff, binSize), ctAdd(diff, desc.field.ORDER, binSize), diff, binSize));
}
return decrypted;
}
const nBlocks = Math.ceil(ciphertext.length / RESCUE_CIPHER_BLOCK_SIZE);
const counter = getCounter(deserializeLE(nonce), nBlocks);
const decrypted = [];
for (let i = 0; i < nBlocks; ++i) {
const cnt = RESCUE_CIPHER_BLOCK_SIZE * i;
const newDecrypted = decryptBatch(this.desc, ciphertext.slice(cnt, Math.min(cnt + RESCUE_CIPHER_BLOCK_SIZE, ciphertext.length)), counter.slice(cnt, cnt + RESCUE_CIPHER_BLOCK_SIZE));
for (let j = 0; j < newDecrypted.length; ++j) {
decrypted.push(newDecrypted[j]);
}
}
return decrypted;
}
/**
* Deserializes and decrypts the ciphertext vector in Counter (CTR) mode.
* @param ciphertext - The array of arrays of numbers (each 32 bytes) to decrypt.
* @param nonce - A 16-byte nonce for CTR mode.
* @returns The decrypted plaintext as an array of bigints.
*/
decrypt(ciphertext, nonce) {
return this.decrypt_raw(ciphertext.map((c) => {
if (c.length !== 32) {
throw Error(`ciphertext must be of length 32 (found ${c.length})`);
}
return deserializeLE(Uint8Array.from(c));
}), nonce);
}
}
/**
* Generates the counter values for Rescue cipher CTR mode.
* @param nonce - The initial nonce as a bigint.
* @param nBlocks - The number of blocks to generate counters for.
* @returns An array of counter values as bigints.
*/
function getCounter(nonce, nBlocks) {
const counter = [];
for (let i = 0n; i < nBlocks; ++i) {
counter.push(nonce);
counter.push(i);
// Pad to RESCUE_CIPHER_BLOCK_SIZE elements per counter block
for (let j = 2; j < RESCUE_CIPHER_BLOCK_SIZE; ++j) {
counter.push(0n);
}
}
return counter;
}
// The arcisEd25519 signature scheme. This is essentially ed25519 but we use the hash function
// SHA3-512 instead of SHA-512 since its multiplicative depth is much lower, which
// makes it much better suited to be evaluated in MPC.
// Those are the parameters specified [here](https://datatracker.ietf.org/doc/html/rfc8032#section-5.1)
// (except for the hash function, see above). The below is copied from [here](https://github.com/paulmillr/noble-curves/blob/main/src/ed25519.ts).
const arcisEd25519Defaults = (() => ({
a: BigInt('57896044618658097711785492504343953926634992332820282019728792003956564819948'),
d: BigInt('37095705934669439343138083508754565189542113879843219016388785533085940283555'),
Fp: ed25519.ed25519.CURVE.Fp,
n: BigInt('7237005577332262213973186563042994240857116359379907606001950938285454250989'),
h: BigInt(8),
Gx: BigInt('15112221349535400772501151409588531511454012693041857206046113283949847762202'),
Gy: BigInt('46316835694926478169428394003475163141307993866256225615783033603165251855960'),
hash: sha3.sha3_512,
randomBytes: utils.randomBytes,
adjustScalarBytes,
uvRatio,
}))();
/**
* Ed25519 curve instance using SHA3-512 for hashing, suitable for MPC (ArcisEd25519 signature scheme).
* This is essentially Ed25519 but with SHA3-512 instead of SHA-512 for lower multiplicative depth.
* See: https://datatracker.ietf.org/doc/html/rfc8032#section-5.1
*/
const arcisEd25519 = (() => edwards.twistedEdwards(arcisEd25519Defaults))();
// Helper function for the sqrt in Fp.
function ed25519_pow_2_252_3(x) {
// prettier-ignore
const _10n = BigInt(10), _20n = BigInt(20), _40n = BigInt(40), _80n = BigInt(80);
const P = ed25519.ed25519.CURVE.Fp.ORDER;
const x2 = (x * x) % P;
const b2 = (x2 * x) % P; // x^3, 11
const b4 = (modular.pow2(b2, 2n, P) * b2) % P; // x^15, 1111
const b5 = (modular.pow2(b4, 1n, P) * x) % P; // x^31
const b10 = (modular.pow2(b5, 5n, P) * b5) % P;
const b20 = (modular.pow2(b10, _10n, P) * b10) % P;
const b40 = (modular.pow2(b20, _20n, P) * b20) % P;
const b80 = (modular.pow2(b40, _40n, P) * b40) % P;
const b160 = (modular.pow2(b80, _80n, P) * b80) % P;
const b240 = (modular.pow2(b160, _80n, P) * b80) % P;
const b250 = (modular.pow2(b240, _10n, P) * b10) % P;
const pow_p_5_8 = (modular.pow2(b250, 2n, P) * x) % P;
// ^ To pow to (p+3)/8, multiply it by x.
return { pow_p_5_8, b2 };
}
// Fp.sqrt(Fp.neg(1))
const ED25519_SQRT_M1 = /* @__PURE__ */ BigInt('19681161376707505956807079304988542015446066515923890162744021073123829784752');
/**
* Clamps a 32-byte scalar as required by the Ed25519 signature scheme.
* See: https://datatracker.ietf.org/doc/html/rfc8032#section-5.1.5
* @param bytes - The 32-byte scalar to clamp.
* @returns The clamped scalar as a Uint8Array.
*/
function adjustScalarBytes(bytes) {
const clamped = bytes;
clamped[0] &= 248;
clamped[31] &= 127;
clamped[31] |= 64;
return clamped;
}
/**
* Helper function for decompression, calculating √(u/v).
* @returns An object with isValid and value.
*/
function uvRatio(u, v) {
const P = ed25519.ed25519.CURVE.Fp.ORDER;
const v3 = modular.mod(v * v * v, P); // v³
const v7 = modular.mod(v3 * v3 * v, P); // v⁷
// (p+3)/8 and (p-5)/8
const pow = ed25519_pow_2_252_3(u * v7).pow_p_5_8;
let x = modular.mod(u * v3 * pow, P); // (uv³)(uv⁷)^(p-5)/8
const vx2 = modular.mod(v * x * x, P); // vx²
const root1 = x; // First root candidate
const root2 = modular.mod(x * ED25519_SQRT_M1, P); // Second root candidate
const useRoot1 = vx2 === u; // If vx² = u (mod p), x is a square root
const useRoot2 = vx2 === modular.mod(-u, P); // If vx² = -u, set x <-- x * 2^((p-1)/4)
const noRoot = vx2 === modular.mod(-u * ED25519_SQRT_M1, P); // There is no valid root, vx² = -u√(-1)
if (useRoot1)
x = root1;
if (useRoot2 || noRoot)
x = root2; // We return root2 anyway, for const-time
if (modular.isNegativeLE(x, P))
x = modular.mod(-x, P);
return { isValid: useRoot1 || useRoot2, value: x };
}
/**
* Mapping from key bits to key byte length.
*/
const KEY_BYTES = { 128: 16, 192: 24, 256: 32 };
/**
* Generic AES cipher in Counter (CTR) mode, using SHA3-256 to derive the key from a shared secret.
* See: https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38a.pdf (Section 6.5) for details on CTR mode.
*/
class AesCtrCipher {
key;
keyBits;
/**
* Constructs an AES cipher instance using a shared secret.
* The key is derived using SHA3-256.
* @param sharedSecret - The shared secret to derive the AES key from.
* @param keyBits - The AES key size