ruvector-attention-wasm
Version:
High-performance attention mechanisms for WebAssembly - Transformer, Hyperbolic, Flash, MoE, and Graph attention
72 lines (71 loc) • 5.18 kB
TypeScript
/* tslint:disable */
/* eslint-disable */
export const memory: WebAssembly.Memory;
export const __wbg_wasmadam_free: (a: number, b: number) => void;
export const __wbg_wasmadamw_free: (a: number, b: number) => void;
export const __wbg_wasmflashattention_free: (a: number, b: number) => void;
export const __wbg_wasmhyperbolicattention_free: (a: number, b: number) => void;
export const __wbg_wasminfonceloss_free: (a: number, b: number) => void;
export const __wbg_wasmlinearattention_free: (a: number, b: number) => void;
export const __wbg_wasmmoeattention_free: (a: number, b: number) => void;
export const __wbg_wasmmultiheadattention_free: (a: number, b: number) => void;
export const __wbg_wasmsgd_free: (a: number, b: number) => void;
export const attention_weights: (a: number, b: number, c: number, d: number) => void;
export const available_mechanisms: () => number;
export const batch_normalize: (a: number, b: number, c: number) => void;
export const cosine_similarity: (a: number, b: number, c: number, d: number, e: number) => void;
export const l2_norm: (a: number, b: number) => number;
export const log: (a: number, b: number) => void;
export const log_error: (a: number, b: number) => void;
export const normalize: (a: number, b: number, c: number, d: number) => void;
export const pairwise_distances: (a: number, b: number) => void;
export const random_orthogonal_matrix: (a: number, b: number) => void;
export const scaled_dot_attention: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
export const softmax: (a: number, b: number, c: number) => void;
export const version: (a: number) => void;
export const wasmadam_learning_rate: (a: number) => number;
export const wasmadam_new: (a: number, b: number) => number;
export const wasmadam_reset: (a: number) => void;
export const wasmadam_set_learning_rate: (a: number, b: number) => void;
export const wasmadam_step: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
export const wasmadamw_new: (a: number, b: number, c: number) => number;
export const wasmadamw_reset: (a: number) => void;
export const wasmadamw_step: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
export const wasmadamw_weight_decay: (a: number) => number;
export const wasmflashattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
export const wasmflashattention_new: (a: number, b: number) => number;
export const wasmhyperbolicattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
export const wasmhyperbolicattention_curvature: (a: number) => number;
export const wasmhyperbolicattention_new: (a: number, b: number) => number;
export const wasminfonceloss_compute: (a: number, b: number, c: number, d: number, e: number, f: number, g: number) => void;
export const wasminfonceloss_new: (a: number) => number;
export const wasmlinearattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
export const wasmlinearattention_new: (a: number, b: number) => number;
export const wasmlocalglobalattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
export const wasmlocalglobalattention_new: (a: number, b: number, c: number) => number;
export const wasmlrscheduler_get_lr: (a: number) => number;
export const wasmlrscheduler_new: (a: number, b: number, c: number) => number;
export const wasmlrscheduler_reset: (a: number) => void;
export const wasmlrscheduler_step: (a: number) => void;
export const wasmmoeattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
export const wasmmoeattention_new: (a: number, b: number, c: number) => number;
export const wasmmultiheadattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
export const wasmmultiheadattention_dim: (a: number) => number;
export const wasmmultiheadattention_new: (a: number, b: number, c: number) => void;
export const wasmmultiheadattention_num_heads: (a: number) => number;
export const wasmsgd_learning_rate: (a: number) => number;
export const wasmsgd_new: (a: number, b: number, c: number) => number;
export const wasmsgd_reset: (a: number) => void;
export const wasmsgd_set_learning_rate: (a: number, b: number) => void;
export const wasmsgd_step: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
export const init: () => void;
export const wasmadamw_set_learning_rate: (a: number, b: number) => void;
export const wasmadamw_learning_rate: (a: number) => number;
export const __wbg_wasmlocalglobalattention_free: (a: number, b: number) => void;
export const __wbg_wasmlrscheduler_free: (a: number, b: number) => void;
export const __wbindgen_export: (a: number, b: number) => number;
export const __wbindgen_export2: (a: number, b: number, c: number, d: number) => number;
export const __wbindgen_export3: (a: number) => void;
export const __wbindgen_export4: (a: number, b: number, c: number) => void;
export const __wbindgen_add_to_stack_pointer: (a: number) => number;
export const __wbindgen_start: () => void;