UNPKG

@hotmeshio/hotmesh

Version:

Serverless Workflow

89 lines (88 loc) 4.2 kB
import { ILogger } from '../../../logger'; import { KeyType } from '../../../../modules/key'; import { StreamService } from '../../index'; import { KeyStoreParams, StringAnyType } from '../../../../types'; import { PostgresClientType } from '../../../../types/postgres'; import { PublishMessageConfig, StreamConfig, StreamMessage, StreamStats } from '../../../../types/stream'; import { ProviderClient, ProviderTransaction } from '../../../../types/provider'; declare class PostgresStreamService extends StreamService<PostgresClientType & ProviderClient, any> { namespace: string; appId: string; logger: ILogger; constructor(streamClient: PostgresClientType & ProviderClient, storeClient: ProviderClient, config?: StreamConfig); init(namespace: string, appId: string, logger: ILogger): Promise<void>; mintKey(type: KeyType, params: KeyStoreParams): string; transact(): ProviderTransaction; getTableName(): string; safeName(appId: string): string; createStream(streamName: string): Promise<boolean>; deleteStream(streamName: string): Promise<boolean>; createConsumerGroup(streamName: string, groupName: string): Promise<boolean>; deleteConsumerGroup(streamName: string, groupName: string): Promise<boolean>; /** * `publishMessages` can be roped into a transaction by the `store` * service. If so, it will add the SQL and params to the * transaction. [Process Overview]: The engine keeps a reference * to the `store` and `stream` providers; it asks the `store` to * create a transaction and then starts adding store commands to the * transaction. The engine then calls the router to publish a * message using the `stream` provider (which the router keeps * a reference to), and provides the transaction object. * The `stream` provider then calls this method to generate * the SQL and params for the transaction (but, of course, the sql * is not executed until the engine calls the `exec` method on * the transaction object provided by `store`). * * NOTE: this strategy keeps `stream` and `store` operations separate but * allows calls to the stream to be roped into a single SQL transaction. */ publishMessages(streamName: string, messages: string[], options?: PublishMessageConfig): Promise<string[] | ProviderTransaction>; _publishMessages(streamName: string, messages: string[]): { sql: string; params: any[]; }; consumeMessages(streamName: string, groupName: string, consumerName: string, options?: { batchSize?: number; blockTimeout?: number; autoAck?: boolean; reservationTimeout?: number; enableBackoff?: boolean; initialBackoff?: number; maxBackoff?: number; maxRetries?: number; }): Promise<StreamMessage[]>; ackAndDelete(streamName: string, groupName: string, messageIds: string[]): Promise<number>; acknowledgeMessages(streamName: string, groupName: string, messageIds: string[], options?: StringAnyType): Promise<number>; deleteMessages(streamName: string, groupName: string, messageIds: string[], options?: StringAnyType): Promise<number>; retryMessages(streamName: string, groupName: string, options?: { consumerName?: string; minIdleTime?: number; messageIds?: string[]; delay?: number; maxRetries?: number; limit?: number; }): Promise<StreamMessage[]>; getStreamStats(streamName: string): Promise<StreamStats>; getStreamDepth(streamName: string): Promise<number>; getStreamDepths(streamNames: { stream: string; }[]): Promise<{ stream: string; depth: number; }[]>; trimStream(streamName: string, options: { maxLen?: number; maxAge?: number; exactLimit?: boolean; }): Promise<number>; getProviderSpecificFeatures(): { supportsBatching: boolean; supportsDeadLetterQueue: boolean; supportsOrdering: boolean; supportsTrimming: boolean; supportsRetry: boolean; maxMessageSize: number; maxBatchSize: number; }; } export { PostgresStreamService };