UNPKG

@electric-sql/client

Version:

Postgres everywhere - your data, in sync, wherever you need it.

946 lines (934 loc) 36.4 kB
/** * Default types for SQL but can be extended with additional types when using a custom parser. * @typeParam Extensions - Additional value types. */ type Value<Extensions = never> = string | number | boolean | bigint | null | Extensions | Value<Extensions>[] | { [key: string]: Value<Extensions>; }; type Row<Extensions = never> = Record<string, Value<Extensions>>; type GetExtensions<T> = [T] extends [Row<never>] ? never : [T] extends [Row<infer E>] ? E : never; type Offset = `-1` | `now` | `${number}_${number}` | `${bigint}_${number}`; /** Information about transaction visibility for a snapshot. All fields are encoded as strings, but should be treated as uint64. */ type PostgresSnapshot = { xmin: `${bigint}`; xmax: `${bigint}`; xip_list: `${bigint}`[]; }; type NormalizedPgSnapshot = { xmin: bigint; xmax: bigint; xip_list: bigint[]; }; interface Header { [key: Exclude<string, `operation` | `control` | `event`>]: Value; } type Operation = `insert` | `update` | `delete`; /** * A tag is a string identifying a reason for this row to be part of the shape. * * Tags can be composite, but they are always sent as a single string. Compound tags * are separated by `|`. It's up to the client to split the tag into its components * in order to react to move-outs correctly. Tag parts are guaranteed to not contain an * unescaped `|` character (escaped as `\\|`) or be a literal `*`. * * Composite tag width is guaranteed to be fixed for a given shape. */ type MoveTag = string; /** * A move-out pattern is a position and a value. The position is the index of the column * that is being moved out. The value is the value of the column that is being moved out. * * Tag width and value order is fixed for a given shape, so the client can determine * which tags match this pattern. */ type MoveOutPattern = { pos: number; value: string; }; /** * Serialized expression types for structured subset queries. * These allow Electric to properly apply columnMapper transformations * before generating the final SQL. */ type SerializedExpression = { type: `ref`; column: string; } | { type: `val`; paramIndex: number; } | { type: `func`; name: string; args: SerializedExpression[]; }; /** * Serialized ORDER BY clause for structured subset queries. */ type SerializedOrderByClause = { column: string; direction?: `asc` | `desc`; nulls?: `first` | `last`; }; type SubsetParams = { /** Legacy string format WHERE clause */ where?: string; /** Positional parameter values for WHERE clause */ params?: Record<string, string | bigint | number>; /** Maximum number of rows to return */ limit?: number; /** Number of rows to skip */ offset?: number; /** Legacy string format ORDER BY clause */ orderBy?: string; /** Structured WHERE expression (preferred when available) */ whereExpr?: SerializedExpression; /** Structured ORDER BY clauses (preferred when available) */ orderByExpr?: SerializedOrderByClause[]; /** * HTTP method to use for the request. Overrides `subsetMethod` from ShapeStreamOptions. * - `GET` (default): Sends subset params as query parameters. May fail with 414 errors * for large queries. * - `POST`: Sends subset params in request body as JSON. Recommended to avoid URL * length limits with large WHERE clauses or many parameters. * * In Electric 2.0, GET will be deprecated and only POST will be supported. */ method?: `GET` | `POST`; }; type ControlMessage = { headers: (Header & { control: `up-to-date` | `must-refetch`; global_last_seen_lsn?: string; }) | (Header & { control: `snapshot-end`; } & PostgresSnapshot) | (Header & { control: `subset-end`; } & SubsetParams); }; type EventMessage = { headers: Header & { event: `move-out`; patterns: MoveOutPattern[]; }; }; type ChangeMessage<T extends Row<unknown> = Row> = { key: string; value: T; old_value?: Partial<T>; headers: Header & { operation: Operation; txids?: number[]; /** Tags will always be present for changes if the shape has a subquery in its where clause, and are omitted otherwise.*/ tags?: MoveTag[]; removed_tags?: MoveTag[]; }; }; type Message<T extends Row<unknown> = Row> = ControlMessage | EventMessage | ChangeMessage<T>; /** * Common properties for all columns. * `dims` is the number of dimensions of the column. Only provided if the column is an array. * `not_null` is true if the column has a `NOT NULL` constraint and is omitted otherwise. */ type CommonColumnProps = { dims?: number; not_null?: boolean; }; type RegularColumn = { type: string; } & CommonColumnProps; type VarcharColumn = { type: `varchar`; max_length?: number; } & CommonColumnProps; type BpcharColumn = { type: `bpchar`; length?: number; } & CommonColumnProps; type TimeColumn = { type: `time` | `timetz` | `timestamp` | `timestamptz`; precision?: number; } & CommonColumnProps; type IntervalColumn = { type: `interval`; fields?: `YEAR` | `MONTH` | `DAY` | `HOUR` | `MINUTE` | `YEAR TO MONTH` | `DAY TO HOUR` | `DAY TO MINUTE` | `DAY TO SECOND` | `HOUR TO MINUTE` | `HOUR TO SECOND` | `MINUTE TO SECOND`; } & CommonColumnProps; type IntervalColumnWithPrecision = { type: `interval`; precision?: 0 | 1 | 2 | 3 | 4 | 5 | 6; fields?: `SECOND`; } & CommonColumnProps; type BitColumn = { type: `bit`; length: number; } & CommonColumnProps; type NumericColumn = { type: `numeric`; precision?: number; scale?: number; } & CommonColumnProps; type ColumnInfo = RegularColumn | VarcharColumn | BpcharColumn | TimeColumn | IntervalColumn | IntervalColumnWithPrecision | BitColumn | NumericColumn; type Schema = { [key: string]: ColumnInfo; }; type TypedMessages<T extends Row<unknown> = Row> = { messages: Array<Message<T>>; schema: ColumnInfo; }; type MaybePromise<T> = T | Promise<T>; /** * Metadata that allows the consumer to know which changes have been incorporated into this snapshot. * * For any data that has a known transaction ID `xid` (and e.g. a key that's part of the snapshot): * - if `xid` < `xmin` - included, change can be skipped * - if `xid` < `xmax` AND `xid` not in `xip` - included, change can be skipped * - if `xid` < `xmax` AND `xid` in `xip` - parallel, not included, change must be processed * - if `xid` >= `xmax` - not included, change must be processed, and we can stop filtering after we see this */ type SnapshotMetadata = { /** Random number that's reflected in the `snapshot_mark` header on the snapshot items. */ snapshot_mark: number; database_lsn: string; } & PostgresSnapshot; type Token = string; type ParseFunction<Extensions = never> = (value: Token, additionalInfo?: Omit<ColumnInfo, `type` | `dims`>) => Value<Extensions>; /** * @typeParam Extensions - Additional types that can be parsed by this parser beyond the standard SQL types. * Defaults to no additional types. */ type Parser<Extensions = never> = { [key: string]: ParseFunction<Extensions>; }; type TransformFunction<Extensions = never> = (message: Row<Extensions>) => Row<Extensions>; type DbColumnName = string; type AppColumnName = string; /** * A bidirectional column mapper that handles transforming column **names** * between database format (e.g., snake_case) and application format (e.g., camelCase). * * **Important**: ColumnMapper only transforms column names, not column values or types. * For type conversions (e.g., string → Date), use the `parser` option. * For value transformations (e.g., encryption), use the `transformer` option. * * @example * ```typescript * const mapper = snakeCamelMapper() * mapper.decode('user_id') // 'userId' * mapper.encode('userId') // 'user_id' * ``` */ interface ColumnMapper { /** * Transform a column name from database format to application format. * Applied to column names in query results. */ decode: (dbColumnName: DbColumnName) => AppColumnName; /** * Transform a column name from application format to database format. * Applied to column names in WHERE clauses and other query parameters. */ encode: (appColumnName: AppColumnName) => DbColumnName; } /** * Converts a snake_case string to camelCase. * * Handles edge cases: * - Preserves leading underscores: `_user_id` → `_userId` * - Preserves trailing underscores: `user_id_` → `userId_` * - Collapses multiple underscores: `user__id` → `userId` * - Normalizes to lowercase first: `user_Column` → `userColumn` * * @example * snakeToCamel('user_id') // 'userId' * snakeToCamel('project_id') // 'projectId' * snakeToCamel('created_at') // 'createdAt' * snakeToCamel('_private') // '_private' * snakeToCamel('user__id') // 'userId' * snakeToCamel('user_id_') // 'userId_' */ declare function snakeToCamel(str: string): string; /** * Converts a camelCase string to snake_case. * * Handles consecutive capitals (acronyms) properly: * - `userID` → `user_id` * - `userHTTPSURL` → `user_https_url` * * @example * camelToSnake('userId') // 'user_id' * camelToSnake('projectId') // 'project_id' * camelToSnake('createdAt') // 'created_at' * camelToSnake('userID') // 'user_id' * camelToSnake('parseHTMLString') // 'parse_html_string' */ declare function camelToSnake(str: string): string; /** * Creates a column mapper from an explicit mapping of database columns to application columns. * * @param mapping - Object mapping database column names (keys) to application column names (values) * @returns A ColumnMapper that can encode and decode column names bidirectionally * * @example * const mapper = createColumnMapper({ * user_id: 'userId', * project_id: 'projectId', * created_at: 'createdAt' * }) * * // Use with ShapeStream * const stream = new ShapeStream({ * url: 'http://localhost:3000/v1/shape', * params: { table: 'todos' }, * columnMapper: mapper * }) */ declare function createColumnMapper(mapping: Record<string, string>): ColumnMapper; /** * Creates a column mapper that automatically converts between snake_case and camelCase. * This is the most common use case for column mapping. * * When a schema is provided, it will only map columns that exist in the schema. * Otherwise, it will map any column name it encounters. * * **⚠️ Limitations and Edge Cases:** * - **WHERE clause encoding**: Uses regex-based parsing which may not handle all complex * SQL expressions. Test thoroughly with your queries, especially those with: * - Complex nested expressions * - Custom operators or functions * - Column names that conflict with SQL keywords * - Quoted identifiers (e.g., `"$price"`, `"user-id"`) - not supported * - Column names with special characters (non-alphanumeric except underscore) * - **Acronym ambiguity**: `userID` → `user_id` → `userId` (ID becomes Id after roundtrip) * Use `createColumnMapper()` with explicit mapping if you need exact control * - **Type conversion**: This only renames columns, not values. Use `parser` for type conversion * * **When to use explicit mapping instead:** * - You have column names that don't follow snake_case/camelCase patterns * - You need exact control over mappings (e.g., `id` → `identifier`) * - Your WHERE clauses are complex and automatic encoding fails * - You have quoted identifiers or column names with special characters * * @param schema - Optional database schema to constrain mapping to known columns * @returns A ColumnMapper for snake_case ↔ camelCase conversion * * @example * // Basic usage * const mapper = snakeCamelMapper() * * // With schema - only maps columns in schema (recommended) * const mapper = snakeCamelMapper(schema) * * // Use with ShapeStream * const stream = new ShapeStream({ * url: 'http://localhost:3000/v1/shape', * params: { table: 'todos' }, * columnMapper: snakeCamelMapper() * }) * * @example * // If automatic encoding fails, fall back to manual column names in WHERE clauses: * stream.requestSnapshot({ * where: "user_id = $1", // Use database column names directly if needed * params: { "1": "123" } * }) */ declare function snakeCamelMapper(schema?: Schema): ColumnMapper; declare class FetchError extends Error { url: string; status: number; text?: string; json?: object; headers: Record<string, string>; constructor(status: number, text: string | undefined, json: object | undefined, headers: Record<string, string>, url: string, message?: string); static fromResponse(response: Response, url: string): Promise<FetchError>; } interface BackoffOptions { /** * Initial delay before retrying in milliseconds */ initialDelay: number; /** * Maximum retry delay in milliseconds * After reaching this, delay stays constant (e.g., retry every 60s) */ maxDelay: number; multiplier: number; onFailedAttempt?: () => void; debug?: boolean; /** * Maximum number of retry attempts before giving up. * Set to Infinity (default) for indefinite retries - needed for offline scenarios * where clients may go offline and come back later. */ maxRetries?: number; } declare const BackoffDefaults: { initialDelay: number; maxDelay: number; multiplier: number; maxRetries: number; }; declare const LIVE_CACHE_BUSTER_QUERY_PARAM = "cursor"; declare const SHAPE_HANDLE_QUERY_PARAM = "handle"; declare const LIVE_QUERY_PARAM = "live"; declare const OFFSET_QUERY_PARAM = "offset"; declare const CACHE_BUSTER_QUERY_PARAM = "cache-buster"; declare const ELECTRIC_PROTOCOL_QUERY_PARAMS: Array<string>; type Replica = `full` | `default`; type LogMode = `changes_only` | `full`; /** * PostgreSQL-specific shape parameters that can be provided externally */ interface PostgresParams<T extends Row<unknown> = Row> { /** The root table for the shape. Not required if you set the table in your proxy. */ table?: string; /** * The columns to include in the shape. * Must include primary keys, and can only include valid columns. * Defaults to all columns of the type `T`. If provided, must include primary keys, and can only include valid columns. */ columns?: (keyof T)[]; /** The where clauses for the shape */ where?: string; /** * Positional where clause paramater values. These will be passed to the server * and will substitute `$i` parameters in the where clause. * * It can be an array (note that positional arguments start at 1, the array will be mapped * accordingly), or an object with keys matching the used positional parameters in the where clause. * * If where clause is `id = $1 or id = $2`, params must have keys `"1"` and `"2"`, or be an array with length 2. */ params?: Record<`${number}`, string> | string[]; /** * If `replica` is `default` (the default) then Electric will only send the * changed columns in an update. * * If it's `full` Electric will send the entire row with both changed and * unchanged values. `old_value` will also be present on update messages, * containing the previous value for changed columns. * * Setting `replica` to `full` will result in higher bandwidth * usage and so is not generally recommended. */ replica?: Replica; } type SerializableParamValue = string | string[] | Record<string, string>; type ParamValue = SerializableParamValue | (() => SerializableParamValue | Promise<SerializableParamValue>); /** * External params type - what users provide. * Excludes reserved parameters to prevent dynamic variations that could cause stream shape changes. */ type ExternalParamsRecord<T extends Row<unknown> = Row> = { [K in string]: ParamValue | undefined; } & Partial<PostgresParams<T>> & { [K in ReservedParamKeys]?: never; }; type ReservedParamKeys = typeof LIVE_CACHE_BUSTER_QUERY_PARAM | typeof SHAPE_HANDLE_QUERY_PARAM | typeof LIVE_QUERY_PARAM | typeof OFFSET_QUERY_PARAM | typeof CACHE_BUSTER_QUERY_PARAM | `subset__${string}`; /** * External headers type - what users provide. * Allows string or function values for any header. */ type ExternalHeadersRecord = { [key: string]: string | (() => string | Promise<string>); }; /** * Helper function to resolve a function or value to its final value */ declare function resolveValue<T>(value: T | (() => T | Promise<T>)): Promise<T>; type RetryOpts = { params?: ExternalParamsRecord; headers?: ExternalHeadersRecord; }; type ShapeStreamErrorHandler = (error: Error) => void | RetryOpts | Promise<void | RetryOpts>; /** * Options for constructing a ShapeStream. */ interface ShapeStreamOptions<T = never> { /** * The full URL to where the Shape is served. This can either be the Electric server * directly or a proxy. E.g. for a local Electric instance, you might set `http://localhost:3000/v1/shape` */ url: string; /** * The "offset" on the shape log. This is typically not set as the ShapeStream * will handle this automatically. A common scenario where you might pass an offset * is if you're maintaining a local cache of the log. If you've gone offline * and are re-starting a ShapeStream to catch-up to the latest state of the Shape, * you'd pass in the last offset and shapeHandle you'd seen from the Electric server * so it knows at what point in the shape to catch you up from. */ offset?: Offset; /** * Similar to `offset`, this isn't typically used unless you're maintaining * a cache of the shape log. */ handle?: string; /** * HTTP headers to attach to requests made by the client. * Values can be strings or functions (sync or async) that return strings. * Function values are resolved in parallel when needed, making this useful * for authentication tokens or other dynamic headers. */ headers?: ExternalHeadersRecord; /** * Additional request parameters to attach to the URL. * Values can be strings, string arrays, or functions (sync or async) that return these types. * Function values are resolved in parallel when needed, making this useful * for user-specific parameters or dynamic filters. * * These will be merged with Electric's standard parameters. * Note: You cannot use Electric's reserved parameter names * (offset, handle, live, cursor). * * PostgreSQL-specific options like table, where, columns, and replica * should be specified here. */ params?: ExternalParamsRecord; /** * Automatically fetch updates to the Shape. If you just want to sync the current * shape and stop, pass false. */ subscribe?: boolean; /** * @deprecated No longer experimental, use {@link liveSse} instead. */ experimentalLiveSse?: boolean; /** * Use Server-Sent Events (SSE) for live updates. */ liveSse?: boolean; /** * Initial data loading mode */ log?: LogMode; signal?: AbortSignal; fetchClient?: typeof fetch; backoffOptions?: BackoffOptions; parser?: Parser<T>; /** * Function to transform rows after parsing (e.g., for encryption, type coercion). * Applied to data received from Electric. * * **Note**: If you're using `transformer` solely for column name transformation * (e.g., snake_case → camelCase), consider using `columnMapper` instead, which * provides bidirectional transformation and automatically encodes WHERE clauses. * * **Execution order** when both are provided: * 1. `columnMapper.decode` runs first (renames columns) * 2. `transformer` runs second (transforms values) * * @example * ```typescript * // For column renaming only - use columnMapper * import { snakeCamelMapper } from '@electric-sql/client' * const stream = new ShapeStream({ columnMapper: snakeCamelMapper() }) * ``` * * @example * ```typescript * // For value transformation (encryption, etc.) - use transformer * const stream = new ShapeStream({ * transformer: (row) => ({ * ...row, * encrypted_field: decrypt(row.encrypted_field) * }) * }) * ``` * * @example * ```typescript * // Use both together * const stream = new ShapeStream({ * columnMapper: snakeCamelMapper(), // Runs first: renames columns * transformer: (row) => ({ // Runs second: transforms values * ...row, * encryptedData: decrypt(row.encryptedData) * }) * }) * ``` */ transformer?: TransformFunction<T>; /** * Bidirectional column name mapper for transforming between database column names * (e.g., snake_case) and application column names (e.g., camelCase). * * The mapper handles both: * - **Decoding**: Database → Application (applied to query results) * - **Encoding**: Application → Database (applied to WHERE clauses) * * @example * ```typescript * // Most common case: snake_case ↔ camelCase * import { snakeCamelMapper } from '@electric-sql/client' * * const stream = new ShapeStream({ * url: 'http://localhost:3000/v1/shape', * params: { table: 'todos' }, * columnMapper: snakeCamelMapper() * }) * ``` * * @example * ```typescript * // Custom mapping * import { createColumnMapper } from '@electric-sql/client' * * const stream = new ShapeStream({ * columnMapper: createColumnMapper({ * user_id: 'userId', * project_id: 'projectId', * created_at: 'createdAt' * }) * }) * ``` */ columnMapper?: ColumnMapper; /** * A function for handling shapestream errors. * * **Automatic retries**: The client automatically retries 5xx server errors, network * errors, and 429 rate limits with exponential backoff. The `onError` callback is * only invoked after these automatic retries are exhausted, or for non-retryable * errors like 4xx client errors. * * When not provided, non-retryable errors will be thrown and syncing will stop. * * **Return value behavior**: * - Return an **object** (RetryOpts or empty `{}`) to retry syncing: * - `{}` - Retry with the same params and headers * - `{ params }` - Retry with modified params * - `{ headers }` - Retry with modified headers (e.g., refreshed auth token) * - `{ params, headers }` - Retry with both modified * - Return **void** or **undefined** to stop the stream permanently * * **Important**: If you want syncing to continue after an error (e.g., to retry * on network failures), you MUST return at least an empty object `{}`. Simply * logging the error and returning nothing will stop syncing. * * Supports async functions that return `Promise<void | RetryOpts>`. * * @example * ```typescript * // Retry on network errors, stop on others * onError: (error) => { * console.error('Stream error:', error) * if (error instanceof FetchError && error.status >= 500) { * return {} // Retry with same params * } * // Return void to stop on other errors * } * ``` * * @example * ```typescript * // Refresh auth token on 401 * onError: async (error) => { * if (error instanceof FetchError && error.status === 401) { * const newToken = await refreshAuthToken() * return { headers: { Authorization: `Bearer ${newToken}` } } * } * return {} // Retry other errors * } * ``` */ onError?: ShapeStreamErrorHandler; /** * HTTP method to use for subset snapshot requests (`requestSnapshot`/`fetchSnapshot`). * * - `'GET'` (default): Sends subset params as URL query parameters. May fail with * HTTP 414 errors for large queries with many parameters. * - `'POST'`: Sends subset params in request body as JSON. Recommended for queries * with large parameter lists (e.g., `WHERE id = ANY($1)` with hundreds of IDs). * * This can be overridden per-request by passing `method` in the subset params. * * @example * ```typescript * const stream = new ShapeStream({ * url: 'http://localhost:3000/v1/shape', * params: { table: 'items' }, * subsetMethod: 'POST', // Use POST for all subset requests * }) * ``` */ subsetMethod?: `GET` | `POST`; } interface ShapeStreamInterface<T extends Row<unknown> = Row> { subscribe(callback: (messages: Message<T>[]) => MaybePromise<void> | { columns?: (keyof T)[]; }, onError?: (error: FetchError | Error) => void): () => void; unsubscribeAll(): void; isLoading(): boolean; lastSyncedAt(): number | undefined; lastSynced(): number; isConnected(): boolean; hasStarted(): boolean; isUpToDate: boolean; lastOffset: Offset; shapeHandle?: string; error?: unknown; mode: LogMode; forceDisconnectAndRefresh(): Promise<void>; requestSnapshot(params: SubsetParams): Promise<{ metadata: SnapshotMetadata; data: Array<Message<T>>; }>; fetchSnapshot(opts: SubsetParams): Promise<{ metadata: SnapshotMetadata; data: Array<ChangeMessage<T>>; }>; } /** * Reads updates to a shape from Electric using HTTP requests and long polling or * Server-Sent Events (SSE). * Notifies subscribers when new messages come in. Doesn't maintain any history of the * log but does keep track of the offset position and is the best way * to consume the HTTP `GET /v1/shape` api. * * @constructor * @param {ShapeStreamOptions} options - configure the shape stream * @example * Register a callback function to subscribe to the messages. * ``` * const stream = new ShapeStream(options) * stream.subscribe(messages => { * // messages is 1 or more row updates * }) * ``` * * To use Server-Sent Events (SSE) for real-time updates: * ``` * const stream = new ShapeStream({ * url: `http://localhost:3000/v1/shape`, * liveSse: true * }) * ``` * * To abort the stream, abort the `signal` * passed in via the `ShapeStreamOptions`. * ``` * const aborter = new AbortController() * const issueStream = new ShapeStream({ * url: `${BASE_URL}/${table}` * subscribe: true, * signal: aborter.signal, * }) * // Later... * aborter.abort() * ``` */ declare class ShapeStream<T extends Row<unknown> = Row> implements ShapeStreamInterface<T> { #private; static readonly Replica: { FULL: Replica; DEFAULT: Replica; }; readonly options: ShapeStreamOptions<GetExtensions<T>>; constructor(options: ShapeStreamOptions<GetExtensions<T>>); get shapeHandle(): string | undefined; get error(): unknown; get isUpToDate(): boolean; get lastOffset(): Offset; get mode(): LogMode; subscribe(callback: (messages: Message<T>[]) => MaybePromise<void>, onError?: (error: Error) => void): () => void; unsubscribeAll(): void; /** Unix time at which we last synced. Undefined until first successful up-to-date. */ lastSyncedAt(): number | undefined; /** Time elapsed since last sync (in ms). Infinity if we did not yet sync. */ lastSynced(): number; /** Indicates if we are connected to the Electric sync service. */ isConnected(): boolean; /** True during initial fetch. False afterwards. */ isLoading(): boolean; hasStarted(): boolean; isPaused(): boolean; /** * Refreshes the shape stream. * This preemptively aborts any ongoing long poll and reconnects without * long polling, ensuring that the stream receives an up to date message with the * latest LSN from Postgres at that point in time. */ forceDisconnectAndRefresh(): Promise<void>; /** * Request a snapshot for subset of data and inject it into the subscribed data stream. * * Only available when mode is `changes_only`. * Returns the insertion point & the data, but more importantly injects the data * into the subscribed data stream. Returned value is unlikely to be useful for the caller, * unless the caller has complicated additional logic. * * Data will be injected in a way that's also tracking further incoming changes, and it'll * skip the ones that are already in the snapshot. * * @param opts - The options for the snapshot request. * @returns The metadata and the data for the snapshot. */ requestSnapshot(opts: SubsetParams): Promise<{ metadata: SnapshotMetadata; data: Array<ChangeMessage<T>>; }>; /** * Fetch a snapshot for subset of data. * Returns the metadata and the data, but does not inject it into the subscribed data stream. * * By default, uses GET to send subset parameters as query parameters. This may hit URL length * limits (HTTP 414) with large WHERE clauses or many parameters. Set `method: 'POST'` or use * `subsetMethod: 'POST'` on the stream to send parameters in the request body instead. * * @param opts - The options for the snapshot request. * @returns The metadata, data, and the response's offset/handle for state advancement. */ fetchSnapshot(opts: SubsetParams): Promise<{ metadata: SnapshotMetadata; data: Array<ChangeMessage<T>>; responseOffset: Offset | null; responseHandle: string | null; }>; } type ShapeData<T extends Row<unknown> = Row> = Map<string, T>; type ShapeChangedCallback<T extends Row<unknown> = Row> = (data: { value: ShapeData<T>; rows: T[]; }) => void; /** * A Shape is an object that subscribes to a shape log, * keeps a materialised shape `.rows` in memory and * notifies subscribers when the value has changed. * * It can be used without a framework and as a primitive * to simplify developing framework hooks. * * @constructor * @param {ShapeStream<T extends Row>} - the underlying shape stream * @example * ``` * const shapeStream = new ShapeStream<{ foo: number }>({ * url: `http://localhost:3000/v1/shape`, * params: { * table: `foo` * } * }) * const shape = new Shape(shapeStream) * ``` * * `rows` returns a promise that resolves the Shape data once the Shape has been * fully loaded (and when resuming from being offline): * * const rows = await shape.rows * * `currentRows` returns the current data synchronously: * * const rows = shape.currentRows * * Subscribe to updates. Called whenever the shape updates in Postgres. * * shape.subscribe(({ rows }) => { * console.log(rows) * }) */ declare class Shape<T extends Row<unknown> = Row> { #private; readonly stream: ShapeStreamInterface<T>; constructor(stream: ShapeStreamInterface<T>); get isUpToDate(): boolean; get lastOffset(): Offset; get handle(): string | undefined; get rows(): Promise<T[]>; get currentRows(): T[]; get value(): Promise<ShapeData<T>>; get currentValue(): ShapeData<T>; get error(): false | FetchError; /** Unix time at which we last synced. Undefined when `isLoading` is true. */ lastSyncedAt(): number | undefined; /** Time elapsed since last sync (in ms). Infinity if we did not yet sync. */ lastSynced(): number; /** True during initial fetch. False afterwise. */ isLoading(): boolean; /** Indicates if we are connected to the Electric sync service. */ isConnected(): boolean; /** Current log mode of the underlying stream */ get mode(): LogMode; /** * Request a snapshot for subset of data. Only available when mode is changes_only. * Returns void; data will be emitted via the stream and processed by this Shape. */ requestSnapshot(params: Parameters<ShapeStreamInterface<T>[`requestSnapshot`]>[0]): Promise<void>; subscribe(callback: ShapeChangedCallback<T>): () => void; unsubscribeAll(): void; get numSubscribers(): number; } /** * Type guard for checking {@link Message} is {@link ChangeMessage}. * * See [TS docs](https://www.typescriptlang.org/docs/handbook/advanced-types.html#user-defined-type-guards) * for information on how to use type guards. * * @param message - the message to check * @returns true if the message is a {@link ChangeMessage} * * @example * ```ts * if (isChangeMessage(message)) { * const msgChng: ChangeMessage = message // Ok * const msgCtrl: ControlMessage = message // Err, type mismatch * } * ``` */ declare function isChangeMessage<T extends Row<unknown> = Row>(message: Message<T>): message is ChangeMessage<T>; /** * Type guard for checking {@link Message} is {@link ControlMessage}. * * See [TS docs](https://www.typescriptlang.org/docs/handbook/advanced-types.html#user-defined-type-guards) * for information on how to use type guards. * * @param message - the message to check * @returns true if the message is a {@link ControlMessage} * * * @example * ```ts * if (isControlMessage(message)) { * const msgChng: ChangeMessage = message // Err, type mismatch * const msgCtrl: ControlMessage = message // Ok * } * ``` */ declare function isControlMessage<T extends Row<unknown> = Row>(message: Message<T>): message is ControlMessage; /** * Checks if a transaction is visible in a snapshot. * * @param txid - the transaction id to check * @param snapshot - the information about the snapshot * @returns true if the transaction is visible in the snapshot */ declare function isVisibleInSnapshot(txid: number | bigint | `${bigint}`, snapshot: PostgresSnapshot | NormalizedPgSnapshot): boolean; /** * Compiles a serialized expression into a SQL string. * Applies columnMapper transformations to column references. * * @param expr - The serialized expression to compile * @param columnMapper - Optional function to transform column names (e.g., camelCase to snake_case) * @returns The compiled SQL string * * @example * ```typescript * const expr = { type: 'ref', column: 'userId' } * compileExpression(expr, camelToSnake) // '"user_id"' * ``` */ declare function compileExpression(expr: SerializedExpression, columnMapper?: (col: string) => string): string; /** * Compiles serialized ORDER BY clauses into a SQL string. * Applies columnMapper transformations to column references. * * @param clauses - The serialized ORDER BY clauses to compile * @param columnMapper - Optional function to transform column names * @returns The compiled SQL ORDER BY string * * @example * ```typescript * const clauses = [{ column: 'createdAt', direction: 'desc', nulls: 'first' }] * compileOrderBy(clauses, camelToSnake) // '"created_at" DESC NULLS FIRST' * ``` */ declare function compileOrderBy(clauses: SerializedOrderByClause[], columnMapper?: (col: string) => string): string; export { BackoffDefaults, type BackoffOptions, type BitColumn, type BpcharColumn, type ChangeMessage, type ColumnInfo, type ColumnMapper, type CommonColumnProps, type ControlMessage, ELECTRIC_PROTOCOL_QUERY_PARAMS, type EventMessage, type ExternalHeadersRecord, type ExternalParamsRecord, FetchError, type GetExtensions, type IntervalColumn, type IntervalColumnWithPrecision, type LogMode, type MaybePromise, type Message, type MoveOutPattern, type MoveTag, type NormalizedPgSnapshot, type NumericColumn, type Offset, type Operation, type PostgresParams, type PostgresSnapshot, type RegularColumn, type Row, type Schema, type SerializedExpression, type SerializedOrderByClause, Shape, type ShapeChangedCallback, type ShapeData, ShapeStream, type ShapeStreamInterface, type ShapeStreamOptions, type SnapshotMetadata, type SubsetParams, type TimeColumn, type TypedMessages, type Value, type VarcharColumn, camelToSnake, compileExpression, compileOrderBy, createColumnMapper, isChangeMessage, isControlMessage, isVisibleInSnapshot, resolveValue, snakeCamelMapper, snakeToCamel };