@upstash/qstash
Version:
Official Typescript client for QStash
1,711 lines (1,692 loc) • 67.4 kB
TypeScript
import { Ok, Err } from 'neverthrow';
/**
* Necessary to verify the signature of a request.
*/
type ReceiverConfig = {
/**
* The current signing key. Get it from `https://console.upstash.com/qstash
*
* If not provided, value will be inferred from environment variables based on QSTASH_REGION
* and UPSTASH_REGION header.
*/
currentSigningKey?: string;
/**
* The next signing key. Get it from `https://console.upstash.com/qstash
*
* If not provided, value will be inferred from environment variables based on QSTASH_REGION
* and UPSTASH_REGION header.
*/
nextSigningKey?: string;
};
type VerifyRequest = {
/**
* The signature from the `upstash-signature` header.
*/
signature: string;
/**
* The raw request body.
*/
body: string;
/**
* URL of the endpoint where the request was sent to.
*
* Omit empty to disable checking the url.
*/
url?: string;
/**
* Number of seconds to tolerate when checking `nbf` and `exp` claims, to deal with small clock differences among different servers
*
* @default 0
*/
clockTolerance?: number;
/**
* The region from the `upstash-region` header.
*
* Used to infer which signing keys to use for verification in multi-region setups.
*/
upstashRegion?: string;
};
declare class SignatureError extends Error {
constructor(message: string);
}
/**
* Receiver offers a simple way to verify the signature of a request.
*/
declare class Receiver {
private readonly currentSigningKey?;
private readonly nextSigningKey?;
constructor(config?: ReceiverConfig);
/**
* Verify the signature of a request.
*
* Tries to verify the signature with the current signing key.
* If that fails, maybe because you have rotated the keys recently, it will
* try to verify the signature with the next signing key.
*
* If that fails, the signature is invalid and a `SignatureError` is thrown.
*/
verify(request: VerifyRequest): Promise<boolean>;
/**
* Verify signature with a specific signing key
*/
private verifyWithKey;
private verifyBodyAndUrl;
}
type Unit = "s" | "m" | "h" | "d";
type Duration = `${bigint}${Unit}`;
type State = "CREATED" | "ACTIVE" | "DELIVERED" | "ERROR" | "RETRY" | "FAILED" | "CANCELED" | "IN_PROGRESS";
type HTTPMethods = "GET" | "POST" | "PUT" | "DELETE" | "PATCH";
type Log = {
time: number;
state: State;
messageId: string;
nextDeliveryTime?: number;
error?: string;
url: string;
urlGroup?: string;
topicName?: string;
endpointName?: string;
header?: Record<string, string>;
body?: string;
label?: string;
};
/**
* Deprecated. Use the `Log` type instead.
*
* @deprecated
*/
type Event = Log;
type LogPayload = Omit<Log, "urlGroup"> & {
topicName: string;
};
/**
* Deprecated. Use the `EventPayload` type instead.
*
* @deprecated
*/
type EventPayload = LogPayload;
type GetLogsPayload = {
cursor?: string;
events: LogPayload[];
};
/**
* Deprecated. use the `GetLogsPayload` type instead.
*
* @deprecated
*/
type GetEventsPayload = GetLogsPayload;
type WithCursor<T> = T & {
cursor?: number;
};
type BodyInit = Blob | FormData | URLSearchParams | ReadableStream<Uint8Array> | string;
type HeadersInit = Headers | Record<string, string> | [string, string][] | IterableIterator<[string, string]>;
type RequestOptions = RequestInit & {
backend?: string;
};
type ChatRateLimit = {
"limit-requests": string | null;
"limit-tokens": string | null;
"remaining-requests": string | null;
"remaining-tokens": string | null;
"reset-requests": string | null;
"reset-tokens": string | null;
};
type RateLimit = {
limit: string | null;
remaining: string | null;
reset: string | null;
};
type FlowControl = {
/**
* flow control key
*/
key: string;
} & ({
/**
* number of requests which can be active with the same flow control key
*/
parallelism: number;
/**
* number of requests to activate per second with the same flow control key
*
* @deprecated use rate instead
*/
ratePerSecond?: number;
/**
* number of requests to activate within the period with the same flow control key.
*
* Default period is a second.
*/
rate?: number;
/**
* The time interval for the `rate` limit.
*
* For example, if `rate` is 10 and `period` is "1s" (or 1), then 10 requests can be activated per second.
* If `rate` is 5 and `period` is "1m" (or 60), then 5 requests can be activated per minute.
*
* Defaults to "1s" (one second) if not specified.
*
* Can be specified as a number (in seconds) or a duration string (e.g., "10s", "5m", "1h", "2d").
*/
period?: Duration | number;
} | {
/**
* number of requests which can be active with the same flow control key
*/
parallelism?: number;
/**
* number of requests to activate per second with the same flow control key
*
* @deprecated use rate instead
*/
ratePerSecond: number;
/**
* number of requests to activate within the period with the same flow control key.
* Default period is a second.
*/
rate?: number;
/**
* The time interval for the `rate` limit.
*
* For example, if `rate` is 10 and `period` is "1s" (or 1), then 10 requests can be activated per second.
* If `rate` is 5 and `period` is "1m" (or 60), then 5 requests can be activated per minute.
*
* Defaults to "1s" (one second) if not specified.
*
* Can be specified as a number (in seconds) or a duration string (e.g., "10s", "5m", "1h", "2d").
*/
period?: Duration | number;
} | {
/**
* number of requests which can be active with the same flow control key
*/
parallelism?: number;
/**
* number of requests to activate per second with the same flow control key
*
* @deprecated use rate instead
*/
ratePerSecond?: number;
/**
* number of requests to activate within the period with the same flow control key.
* Default period is a second.
*/
rate: number;
/**
* The time interval for the `rate` limit.
*
* For example, if `rate` is 10 and `period` is "1s" (or 1), then 10 requests can be activated per second.
* If `rate` is 5 and `period` is "1m" (or 60), then 5 requests can be activated per minute.
*
* Defaults to "1s" (one second) if not specified.
*
* Can be specified as a number (in seconds) or a duration string (e.g., "10s", "5m", "1h", "2d").
*/
period?: Duration | number;
});
type ProviderInfo = {
/**
* full url used for request
*/
url: string;
/**
* base url of the request
*/
baseUrl: string;
/**
* route elements which will follow the baseUrl
*/
route: string[];
/**
* headers to include in the request
*/
appendHeaders: Record<string, string>;
/**
* provider owner
*/
owner: Owner;
/**
* method to use in the request
*/
method: HTTPMethods;
};
type ApiKind = "llm" | "email";
type Owner = EmailOwner | LLMOwner;
type PublishApi<TName extends ApiKind, TProvider extends BaseProvider<TName>> = {
name: TName;
provider?: TProvider;
};
/**
* Email
*/
type EmailOwner = "resend";
type PublishEmailApi = Required<PublishApi<"email", BaseProvider<"email", EmailOwner>>>;
/**
* LLM
*/
type LLMOwner = "upstash" | "openai" | "anthropic" | "custom";
type LLMOptions = {
analytics?: {
name: "helicone";
token: string;
};
};
type PublishLLMApi = PublishApi<"llm", BaseProvider<"llm", LLMOwner>> & LLMOptions;
declare abstract class BaseProvider<TName extends ApiKind, TOwner = Owner> {
abstract readonly apiKind: TName;
abstract readonly method: HTTPMethods;
readonly baseUrl: string;
token: string;
readonly owner: TOwner;
constructor(baseUrl: string, token: string, owner: TOwner);
/**
* called before returning the final request
*
* @param request
*/
abstract onFinish(request: ProviderInfo, options: unknown): ProviderInfo;
abstract getRoute(): string[];
abstract getHeaders(options: unknown): Record<string, string>;
getUrl(): string;
}
declare class LLMProvider<TOwner extends LLMOwner> extends BaseProvider<"llm", LLMOwner> {
readonly apiKind = "llm";
readonly organization?: string;
readonly method = "POST";
constructor(baseUrl: string, token: string, owner: TOwner, organization?: string);
getRoute(): string[];
getHeaders(options: LLMOptions): Record<string, string>;
/**
* Checks if callback exists and adds analytics in place if it's set.
*
* @param request
* @param options
*/
onFinish(providerInfo: ProviderInfo, options: LLMOptions): ProviderInfo;
}
/**
* @deprecated as of version 2.7.17. Will be removed in qstash-js 3.0.0.
*
* Please use an alternative LLM provider.
*
* openai: https://upstash.com/docs/qstash/integrations/llm
* anthropic: https://upstash.com/docs/qstash/integrations/anthropic
*/
declare const upstash: () => LLMProvider<"upstash">;
declare const openai: ({ token, organization, }: {
token: string;
organization?: string;
}) => LLMProvider<"openai">;
declare const anthropic: ({ token }: {
token: string;
}) => LLMProvider<"anthropic">;
declare const custom: ({ baseUrl, token, }: {
baseUrl: string;
token: string;
}) => LLMProvider<"custom">;
type ChatCompletionMessage = {
role: "system" | "assistant" | "user";
content: string;
};
type ChatModel = "meta-llama/Meta-Llama-3-8B-Instruct" | "mistralai/Mistral-7B-Instruct-v0.2";
type ChatResponseFormat = {
type: "text" | "json_object";
};
type TopLogprob = {
token: string;
bytes: number[];
logprob: number;
};
type ChatCompletionTokenLogprob = {
token: string;
bytes: number[];
logprob: number;
top_logprobs: TopLogprob[];
};
type ChoiceLogprobs = {
content: ChatCompletionTokenLogprob[];
};
type Choice = {
finish_reason: "stop" | "length";
index: number;
logprobs: ChoiceLogprobs;
message: ChatCompletionMessage;
};
type CompletionUsage = {
completion_tokens: number;
prompt_tokens: number;
total_tokens: number;
};
type ChatCompletion = {
id: string;
choices: Choice[];
created: number;
model: string;
object: "chat.completion";
system_fingerprint: string;
usage: CompletionUsage;
};
type ChunkChoice = {
delta: ChatCompletionMessage;
finish_reason: "stop" | "length";
index: number;
logprobs: ChoiceLogprobs;
};
type ChatCompletionChunk = {
id: string;
choices: ChunkChoice[];
created: number;
model: string;
object: "chat.completion.chunk";
system_fingerprint: string;
usage: CompletionUsage;
};
type StreamEnabled = {
stream: true;
};
type StreamDisabled = {
stream: false;
} | object;
type StreamParameter = StreamEnabled | StreamDisabled;
type OpenAIChatModel = "gpt-4-turbo" | "gpt-4-turbo-2024-04-09" | "gpt-4-0125-preview" | "gpt-4-turbo-preview" | "gpt-4-1106-preview" | "gpt-4-vision-preview" | "gpt-4" | "gpt-4-0314" | "gpt-4-0613" | "gpt-4-32k" | "gpt-4-32k-0314" | "gpt-4-32k-0613" | "gpt-3.5-turbo" | "gpt-3.5-turbo-16k" | "gpt-3.5-turbo-0301" | "gpt-3.5-turbo-0613" | "gpt-3.5-turbo-1106" | "gpt-3.5-turbo-0125" | "gpt-3.5-turbo-16k-0613";
type ChatRequestCommonFields = {
frequency_penalty?: number;
logit_bias?: Record<string, number>;
logprobs?: boolean;
top_logprobs?: number;
max_tokens?: number;
n?: number;
presence_penalty?: number;
response_format?: ChatResponseFormat;
seed?: number;
stop?: string | string[];
temperature?: number;
top_p?: number;
};
type PromptChatRequestFields = ChatRequestCommonFields & {
system: string;
user: string;
};
type ChatRequestFields = ChatRequestCommonFields & {
messages: ChatCompletionMessage[];
};
type ChatRequestProviders = {
provider: LLMProvider<"openai">;
model: OpenAIChatModel;
analytics?: {
name: "helicone";
token: string;
};
} | {
provider: LLMProvider<"custom">;
model: string;
analytics?: {
name: "helicone";
token: string;
};
} | {
provider: LLMProvider<"upstash">;
model: ChatModel;
analytics?: {
name: "helicone";
token: string;
};
};
type PromptChatRequest<TStream extends StreamParameter> = ChatRequestProviders & PromptChatRequestFields & TStream;
type ChatRequest<TStream extends StreamParameter> = ChatRequestProviders & ChatRequestFields & TStream;
type UpstashRequest = {
/**
* The path to the resource.
*/
path: string[];
/**
* A BodyInit object or null to set request's body.
*/
body?: BodyInit | null;
/**
* A Headers object, an object literal, or an array of two-item arrays to set
* request's headers.
*/
headers?: HeadersInit;
/**
* A boolean to set request's keepalive.
*/
keepalive?: boolean;
/**
* A string to set request's method.
*/
method?: HTTPMethods;
query?: Record<string, string | number | boolean | undefined>;
/**
* if enabled, call `res.json()`
*
* @default true
*/
parseResponseAsJson?: boolean;
/**
* optionally overwrite the baseUrl of the http.
*
* default value of the http is base qstash url.
*/
baseUrl?: string;
};
type UpstashResponse<TResult> = TResult & {
error?: string;
};
type Requester = {
request: <TResult = unknown>(request: UpstashRequest) => Promise<UpstashResponse<TResult>>;
requestStream: (request: UpstashRequest) => AsyncIterable<ChatCompletionChunk>;
headers?: Headers;
telemetryHeaders?: Headers;
};
type RetryConfig = false | {
/**
* The number of retries to attempt before giving up.
*
* @default 5
*/
retries?: number;
/**
* A backoff function receives the current retry cound and returns a number in milliseconds to wait before retrying.
*
* @default
* ```ts
* Math.exp(retryCount) * 50
* ```
*/
backoff?: (retryCount: number) => number;
};
type Message = {
/**
* A unique identifier for this message.
*/
messageId: string;
/**
* The url group name if this message was sent to a urlGroup.
*/
urlGroup?: string;
/**
* Deprecated. The topic name if this message was sent to a urlGroup. Use urlGroup instead
*/
topicName?: string;
/**
* The url where this message is sent to.
*/
url: string;
/**
* The endpoint name of the message if the endpoint is given a
* name within the url group.
*/
endpointName?: string;
/**
* The api name if this message was sent to an api
*/
api?: string;
/**
* The http method used to deliver the message
*/
method?: HTTPMethods;
/**
* The http headers sent along with the message to your API.
*/
header?: Record<string, string[]>;
/**
* The http body sent to your API
*/
body?: string;
/**
* The base64 encoded body if the body contains non-UTF-8 characters,
* `None` otherwise.
*/
bodyBase64?: string;
/**
* Maxmimum number of retries.
*/
maxRetries?: number;
/**
* The retry delay expression for this message,
* if retry_delay was set when publishing the message.
*/
retryDelayExpression?: PublishRequest["retryDelay"];
/**
* A unix timestamp (milliseconds) after which this message may get delivered.
*/
notBefore?: number;
/**
* A unix timestamp (milliseconds) when this messages was created.
*/
createdAt: number;
/**
* The callback url if configured.
*/
callback?: string;
/**
* The failure callback url if configured.
*/
failureCallback?: string;
/**
* The queue name if this message was sent to a queue.
*/
queueName?: string;
/**
* The scheduleId of the message if the message is triggered by a schedule
*/
scheduleId?: string;
/**
* IP address of the publisher of this message
*/
callerIp?: string;
/**
* flow control key
*/
flowControlKey: string;
/**
* number of requests which can be active with the same flow control key
*/
parallelism?: number;
/**
* number of requests to activate per second with the same flow control key
*
* @deprecated use rate instead
*/
ratePerSecond?: number;
/**
* number of requests to activate within the period with the same flow control key.
* Default period is a second.
*/
rate?: number;
/**
* The time interval during which the specified `rate` of requests can be activated
* using the same flow control key.
*
* In seconds.
*/
period?: number;
/**
* The label assigned to the message for filtering purposes.
*/
label?: string;
};
type MessagePayload = Omit<Message, "urlGroup"> & {
topicName: string;
};
declare class Messages {
private readonly http;
constructor(http: Requester);
/**
* Get a message
*/
get(messageId: string): Promise<Message>;
/**
* Cancel a message
*/
delete(messageId: string): Promise<void>;
deleteMany(messageIds: string[]): Promise<number>;
deleteAll(): Promise<number>;
}
type DlqMessage = Message & {
/**
* The unique id within the DLQ
*/
dlqId: string;
/**
* The HTTP status code of the last failed delivery attempt
*/
responseStatus?: number;
/**
* The response headers of the last failed delivery attempt
*/
responseHeader?: Record<string, string[]>;
/**
* The response body of the last failed delivery attempt if it is
* composed of UTF-8 characters only, `None` otherwise.
*/
responseBody?: string;
/**
* The base64 encoded response body of the last failed delivery attempt
* if the response body contains non-UTF-8 characters, `None` otherwise.
*/
responseBodyBase64?: string;
};
type DLQFilter = {
/**
* Filter DLQ entries by message id
*/
messageId?: string;
/**
* Filter DLQ entries by url
*/
url?: string;
/**
* Filter DLQ entries by url group name
*/
urlGroup?: string;
/**
* Filter DLQ entries by api name
*/
api?: string;
/**
* Filter DLQ entries by queue name
*/
queueName?: string;
/**
* Filter DLQ entries by schedule id
*/
scheduleId?: string;
/**
* Filter DLQ entries by starting time, in milliseconds
*/
fromDate?: number;
/**
* Filter DLQ entries by ending time, in milliseconds
*/
toDate?: number;
/**
* Filter DLQ entries by label
*/
label?: string;
/**
* Filter DLQ entries by HTTP status of the response
*/
responseStatus?: number;
/**
* Filter DLQ entries by IP address of the publisher of the message
*/
callerIp?: string;
};
declare class DLQ {
private readonly http;
constructor(http: Requester);
/**
* List messages in the dlq
*/
listMessages(options?: {
cursor?: string;
count?: number;
filter?: DLQFilter;
}): Promise<{
messages: DlqMessage[];
cursor?: string;
}>;
/**
* Remove a message from the dlq using it's `dlqId`
*/
delete(dlqMessageId: string): Promise<void>;
/**
* Remove multiple messages from the dlq using their `dlqId`s
*/
deleteMany(request: {
dlqIds: string[];
}): Promise<{
deleted: number;
}>;
}
declare class Chat {
private http;
private token;
constructor(http: Requester, token: string);
private static toChatRequest;
/**
* Calls the Upstash completions api given a ChatRequest.
*
* Returns a ChatCompletion or a stream of ChatCompletionChunks
* if stream is enabled.
*
* @param request ChatRequest with messages
* @returns Chat completion or stream
*/
create: <TStream extends StreamParameter>(request: ChatRequest<TStream>) => Promise<TStream extends StreamEnabled ? AsyncIterable<ChatCompletionChunk> : ChatCompletion>;
/**
* Calls the Upstash completions api given a ChatRequest.
*
* Returns a ChatCompletion or a stream of ChatCompletionChunks
* if stream is enabled.
*
* @param request ChatRequest with messages
* @returns Chat completion or stream
*/
private createThirdParty;
private getAuthorizationToken;
/**
* Calls the Upstash completions api given a PromptRequest.
*
* Returns a ChatCompletion or a stream of ChatCompletionChunks
* if stream is enabled.
*
* @param request PromptRequest with system and user messages.
* Note that system parameter shouldn't be passed in the case of
* mistralai/Mistral-7B-Instruct-v0.2 model.
* @returns Chat completion or stream
*/
prompt: <TStream extends StreamParameter>(request: PromptChatRequest<TStream>) => Promise<TStream extends StreamEnabled ? AsyncIterable<ChatCompletionChunk> : ChatCompletion>;
}
type QueueResponse = {
createdAt: number;
updatedAt: number;
name: string;
parallelism: number;
lag: number;
paused: boolean;
};
type UpsertQueueRequest = {
/**
* The number of parallel consumers consuming from the queue.
*
* @default 1
*/
parallelism?: number;
/**
* Whether to pause the queue or not. A paused queue will not
* deliver new messages until it is resumed.
*
* @default false
*/
paused?: boolean;
};
declare class Queue {
private readonly http;
private readonly queueName;
constructor(http: Requester, queueName?: string);
/**
* Create or update the queue
*/
upsert(request: UpsertQueueRequest): Promise<void>;
/**
* Get the queue details
*/
get(): Promise<QueueResponse>;
/**
* List queues
*/
list(): Promise<QueueResponse[]>;
/**
* Delete the queue
*/
delete(): Promise<void>;
/**
* Enqueue a message to a queue.
*/
enqueue<TRequest extends PublishRequest>(request: TRequest): Promise<PublishResponse<TRequest>>;
/**
* Enqueue a message to a queue, serializing the body to JSON.
*/
enqueueJSON<TBody = unknown, TRequest extends PublishRequest<TBody> = PublishRequest<TBody>>(request: TRequest): Promise<PublishResponse<TRequest>>;
/**
* Pauses the queue.
*
* A paused queue will not deliver messages until
* it is resumed.
*/
pause(): Promise<void>;
/**
* Resumes the queue.
*/
resume(): Promise<void>;
}
type Schedule = {
scheduleId: string;
cron: string;
createdAt: number;
destination: string;
method: string;
header?: Record<string, string[]>;
body?: string;
bodyBase64?: string;
retries: number;
delay?: number;
callback?: string;
failureCallback?: string;
callerIp?: string;
isPaused: boolean;
queueName?: string;
flowControlKey?: string;
parallelism?: number;
rate?: number;
/**
* @deprecated use rate instead
*/
ratePerSecond?: number;
/**
* The time interval during which the specified `rate` of requests can be activated
* using the same flow control key.
*
* In seconds.
*/
period?: number;
/**
* The retry delay expression for this schedule,
* if retry_delay was set when creating the schedule.
*/
retryDelayExpression?: PublishRequest["retryDelay"];
/**
* The label assigned to the schedule for filtering purposes.
*/
label?: string;
/**
* The timestamp of the last scheduled execution.
*/
lastScheduleTime?: number;
/**
* The timestamp of the next scheduled execution.
*/
nextScheduleTime?: number;
/**
* The states of the last scheduled messages.
*
* Maps message id to state
*/
lastScheduleStates?: Record<string, "IN_PROGRESS" | "SUCCESS" | "FAIL">;
/**
* The IP address of the caller who created the schedule.
*/
callerIP?: string;
};
type CreateScheduleRequest = {
/**
* Either a URL or urlGroup name
*/
destination: string;
/**
* The message to send.
*
* This can be anything, but please set the `Content-Type` header accordingly.
*
* You can leave this empty if you want to send a message with no body.
*/
body?: BodyInit;
/**
* Optionally send along headers with the message.
* These headers will be sent to your destination.
*
* We highly recommend sending a `Content-Type` header along, as this will help your destination
* server to understand the content of the message.
*/
headers?: HeadersInit;
/**
* Optionally delay the delivery of this message.
*
* In seconds.
*
* @default undefined
*/
delay?: Duration | number;
/**
* In case your destination server is unavailable or returns a status code outside of the 200-299
* range, we will retry the request after a certain amount of time.
*
* Configure how many times you would like the delivery to be retried
*
* @default The maximum retry quota associated with your account.
*/
retries?: number;
/**
* Use a callback url to forward the response of your destination server to your callback url.
*
* The callback url must be publicly accessible
*
* @default undefined
*/
callback?: string;
/**
* Use a failure callback url to handle messages that could not be delivered.
*
* The failure callback url must be publicly accessible
*
* @default undefined
*/
failureCallback?: string;
/**
* The method to use when sending a request to your API
*
* @default `POST`
*/
method?: HTTPMethods;
/**
* Specify a cron expression to repeatedly send this message to the destination.
*/
cron: string;
/**
* The HTTP timeout value to use while calling the destination URL.
* When a timeout is specified, it will be used instead of the maximum timeout
* value permitted by the QStash plan. It is useful in scenarios, where a message
* should be delivered with a shorter timeout.
*
* In seconds.
*
* @default undefined
*/
timeout?: Duration | number;
/**
* Schedule id to use.
*
* Can be used to update the settings of an existing schedule.
*
* @default undefined
*/
scheduleId?: string;
/**
* Queue name to schedule the message over.
*/
queueName?: string;
/**
* Settings for controlling the number of active requests
* and number of requests per second with the same key.
*/
flowControl?: FlowControl;
/**
* Assign a label to the schedule to filter logs later.
*
* @default undefined
*/
label?: string;
} & Pick<PublishRequest, "retryDelay">;
declare class Schedules {
private readonly http;
constructor(http: Requester);
/**
* Create a schedule
*/
create(request: CreateScheduleRequest): Promise<{
scheduleId: string;
}>;
/**
* Get a schedule
*/
get(scheduleId: string): Promise<Schedule>;
/**
* List your schedules
*/
list(): Promise<Schedule[]>;
/**
* Delete a schedule
*/
delete(scheduleId: string): Promise<void>;
/**
* Pauses the schedule.
*
* A paused schedule will not deliver messages until
* it is resumed.
*/
pause({ schedule }: {
schedule: string;
}): Promise<void>;
/**
* Resumes the schedule.
*/
resume({ schedule }: {
schedule: string;
}): Promise<void>;
}
type Endpoint = {
/**
* The name of the endpoint (optional)
*/
name?: string;
/**
* The url of the endpoint
*/
url: string;
};
type AddEndpointsRequest = {
/**
* The name of the url group.
* Must be unique and only contain alphanumeric, hyphen, underscore and periods.
*/
name: string;
endpoints: Endpoint[];
};
type RemoveEndpointsRequest = {
/**
* The name of the url group.
* Must be unique and only contain alphanumeric, hyphen, underscore and periods.
*/
name: string;
endpoints: ({
name: string;
url?: string;
} | {
name?: string;
url: string;
})[];
};
type UrlGroup = {
/**
* A unix timestamp (milliseconds)
*/
createdAt: number;
/**
* A unix timestamp (milliseconds)
*/
updatedAt: number;
/**
* The name of this url group.
*/
name: string;
/**
* A list of all subscribed endpoints
*/
endpoints: Endpoint[];
};
declare class UrlGroups {
private readonly http;
constructor(http: Requester);
/**
* Create a new url group with the given name and endpoints
*/
addEndpoints(request: AddEndpointsRequest): Promise<void>;
/**
* Remove endpoints from a url group.
*/
removeEndpoints(request: RemoveEndpointsRequest): Promise<void>;
/**
* Get a list of all url groups.
*/
list(): Promise<UrlGroup[]>;
/**
* Get a single url group
*/
get(name: string): Promise<UrlGroup>;
/**
* Delete a url group
*/
delete(name: string): Promise<void>;
}
/**
* Base class outlining steps. Basically, each step kind (run/sleep/sleepUntil)
* should have two methods: getPlanStep & getResultStep.
*
* getPlanStep works the same way for all so it's implemented here.
* The different step types will implement their own getResultStep method.
*/
declare abstract class BaseLazyStep<TResult = unknown> {
readonly stepName: string;
abstract readonly stepType: StepType;
constructor(stepName: string);
/**
* plan step to submit when step will run parallel with other
* steps (parallel call state `first`)
*
* @param concurrent number of steps running parallel
* @param targetStep target step id corresponding to this step
* @returns
*/
abstract getPlanStep(concurrent: number, targetStep: number): Step<undefined>;
/**
* result step to submit after the step executes. Used in single step executions
* and when a plan step executes in parallel executions (parallel call state `partial`).
*
* @param concurrent
* @param stepId
*/
abstract getResultStep(concurrent: number, stepId: number): Promise<Step<TResult>>;
}
declare const LOG_LEVELS: readonly ["DEBUG", "INFO", "SUBMIT", "WARN", "ERROR"];
type LogLevel = (typeof LOG_LEVELS)[number];
type ChatLogEntry = {
timestamp: number;
workflowRunId: string;
logLevel: LogLevel;
eventType: "ENDPOINT_START" | "SUBMIT_THIRD_PARTY_RESULT" | "CREATE_CONTEXT" | "SUBMIT_FIRST_INVOCATION" | "RUN_SINGLE" | "RUN_PARALLEL" | "SUBMIT_STEP" | "SUBMIT_CLEANUP" | "RESPONSE_WORKFLOW" | "RESPONSE_DEFAULT" | "ERROR";
details: unknown;
};
type WorkflowLoggerOptions = {
logLevel: LogLevel;
logOutput: "console";
};
declare class WorkflowLogger {
private logs;
private options;
private workflowRunId?;
constructor(options: WorkflowLoggerOptions);
log(level: LogLevel, eventType: ChatLogEntry["eventType"], details?: unknown): Promise<void>;
setWorkflowRunId(workflowRunId: string): void;
private writeToConsole;
private shouldLog;
static getLogger(verbose?: boolean | WorkflowLogger): WorkflowLogger | undefined;
}
declare class AutoExecutor {
private context;
private promises;
private activeLazyStepList?;
private debug?;
private readonly nonPlanStepCount;
private readonly steps;
private indexInCurrentList;
stepCount: number;
planStepCount: number;
protected executingStep: string | false;
constructor(context: WorkflowContext, steps: Step[], debug?: WorkflowLogger);
/**
* Adds the step function to the list of step functions to run in
* parallel. After adding the function, defers the execution, so
* that if there is another step function to be added, it's also
* added.
*
* After all functions are added, list of functions are executed.
* If there is a single function, it's executed by itself. If there
* are multiple, they are run in parallel.
*
* If a function is already executing (this.executingStep), this
* means that there is a nested step which is not allowed. In this
* case, addStep throws QStashWorkflowError.
*
* @param stepInfo step plan to add
* @returns result of the step function
*/
addStep<TResult>(stepInfo: BaseLazyStep<TResult>): Promise<TResult>;
/**
* Wraps a step function to set this.executingStep to step name
* before running and set this.executingStep to False after execution
* ends.
*
* this.executingStep allows us to detect nested steps which are not
* allowed.
*
* @param stepName name of the step being wrapped
* @param stepFunction step function to wrap
* @returns wrapped step function
*/
wrapStep<TResult = unknown>(stepName: string, stepFunction: StepFunction<TResult>): TResult | Promise<TResult>;
/**
* Executes a step:
* - If the step result is available in the steps, returns the result
* - If the result is not avaiable, runs the function
* - Sends the result to QStash
*
* @param lazyStep lazy step to execute
* @returns step result
*/
protected runSingle<TResult>(lazyStep: BaseLazyStep<TResult>): Promise<TResult>;
/**
* Runs steps in parallel.
*
* @param stepName parallel step name
* @param stepFunctions list of async functions to run in parallel
* @returns results of the functions run in parallel
*/
protected runParallel<TResults extends unknown[]>(parallelSteps: {
[K in keyof TResults]: BaseLazyStep<TResults[K]>;
}): Promise<TResults>;
/**
* Determines the parallel call state
*
* First filters the steps to get the steps which are after `initialStepCount` parameter.
*
* Depending on the remaining steps, decides the parallel state:
* - "first": If there are no steps
* - "last" If there are equal to or more than `2 * parallelStepCount`. We multiply by two
* because each step in a parallel execution will have 2 steps: a plan step and a result
* step.
* - "partial": If the last step is a plan step
* - "discard": If the last step is not a plan step. This means that the parallel execution
* is in progress (there are still steps to run) and one step has finished and submitted
* its result to QStash
*
* @param parallelStepCount number of steps to run in parallel
* @param initialStepCount steps after the parallel invocation
* @returns parallel call state
*/
protected getParallelCallState(parallelStepCount: number, initialStepCount: number): ParallelCallState;
/**
* sends the steps to QStash as batch
*
* @param steps steps to send
*/
private submitStepsToQStash;
/**
* Get the promise by executing the lazt steps list. If there is a single
* step, we call `runSingle`. Otherwise `runParallel` is called.
*
* @param lazyStepList steps list to execute
* @returns promise corresponding to the execution
*/
private getExecutionPromise;
/**
* @param lazyStepList steps we executed
* @param result result of the promise from `getExecutionPromise`
* @param index index of the current step
* @returns result[index] if lazyStepList > 1, otherwise result
*/
private static getResult;
private deferExecution;
}
/**
* Upstash Workflow context
*
* See the docs for fields and methods https://upstash.com/docs/qstash/workflows/basics/context
*/
declare class WorkflowContext<TInitialPayload = unknown> {
protected readonly executor: AutoExecutor;
protected readonly steps: Step[];
/**
* QStash client of the workflow
*
* Can be overwritten by passing `qstashClient` parameter in `serve`:
*
* ```ts
* import { Client } from "@upstash/qstash"
*
* export const POST = serve(
* async (context) => {
* ...
* },
* {
* qstashClient: new Client({...})
* }
* )
* ```
*/
readonly qstashClient: WorkflowClient;
/**
* Run id of the workflow
*/
readonly workflowRunId: string;
/**
* URL of the workflow
*
* Can be overwritten by passing a `url` parameter in `serve`:
*
* ```ts
* export const POST = serve(
* async (context) => {
* ...
* },
* {
* url: "new-url-value"
* }
* )
* ```
*/
readonly url: string;
/**
* URL to call in case of workflow failure with QStash failure callback
*
* https://upstash.com/docs/qstash/features/callbacks#what-is-a-failure-callback
*
* Can be overwritten by passing a `failureUrl` parameter in `serve`:
*
* ```ts
* export const POST = serve(
* async (context) => {
* ...
* },
* {
* failureUrl: "new-url-value"
* }
* )
* ```
*/
readonly failureUrl?: string;
/**
* Payload of the request which started the workflow.
*
* To specify its type, you can define `serve` as follows:
*
* ```ts
* // set requestPayload type to MyPayload:
* export const POST = serve<MyPayload>(
* async (context) => {
* ...
* }
* )
* ```
*
* By default, `serve` tries to apply `JSON.parse` to the request payload.
* If your payload is encoded in a format other than JSON, you can utilize
* the `initialPayloadParser` parameter:
*
* ```ts
* export const POST = serve<MyPayload>(
* async (context) => {
* ...
* },
* {
* initialPayloadParser: (initialPayload) => {return doSomething(initialPayload)}
* }
* )
* ```
*/
readonly requestPayload: TInitialPayload;
/**
* headers of the initial request
*/
readonly headers: Headers;
/**
* initial payload as a raw string
*/
readonly rawInitialPayload: string;
/**
* Map of environment variables and their values.
*
* Can be set using the `env` option of serve:
*
* ```ts
* export const POST = serve<MyPayload>(
* async (context) => {
* const key = context.env["API_KEY"];
* },
* {
* env: {
* "API_KEY": "*****";
* }
* }
* )
* ```
*
* Default value is set to `process.env`.
*/
readonly env: Record<string, string | undefined>;
/**
* Number of retries
*/
readonly retries: number;
constructor({ qstashClient, workflowRunId, headers, steps, url, failureUrl, debug, initialPayload, rawInitialPayload, env, retries, }: {
qstashClient: WorkflowClient;
workflowRunId: string;
headers: Headers;
steps: Step[];
url: string;
failureUrl?: string;
debug?: WorkflowLogger;
initialPayload: TInitialPayload;
rawInitialPayload?: string;
env?: Record<string, string | undefined>;
retries?: number;
});
/**
* Executes a workflow step
*
* ```typescript
* const result = await context.run("step 1", () => {
* return "result"
* })
* ```
*
* Can also be called in parallel and the steps will be executed
* simulatenously:
*
* ```typescript
* const [result1, result2] = await Promise.all([
* context.run("step 1", () => {
* return "result1"
* })
* context.run("step 2", async () => {
* return await fetchResults()
* })
* ])
* ```
*
* @param stepName name of the step
* @param stepFunction step function to be executed
* @returns result of the step function
*/
run<TResult>(stepName: string, stepFunction: StepFunction<TResult>): Promise<TResult>;
/**
* Stops the execution for the duration provided.
*
* @param stepName
* @param duration sleep duration in seconds
* @returns undefined
*/
sleep(stepName: string, duration: number): Promise<void>;
/**
* Stops the execution until the date time provided.
*
* @param stepName
* @param datetime time to sleep until. Can be provided as a number (in unix seconds),
* as a Date object or a string (passed to `new Date(datetimeString)`)
* @returns undefined
*/
sleepUntil(stepName: string, datetime: Date | string | number): Promise<void>;
/**
* Makes a third party call through QStash in order to make a
* network call without consuming any runtime.
*
* ```ts
* const postResult = await context.call<string>(
* "post call step",
* `https://www.some-endpoint.com/api`,
* "POST",
* "my-payload"
* );
* ```
*
* tries to parse the result of the request as JSON. If it's
* not a JSON which can be parsed, simply returns the response
* body as it is.
*
* @param stepName
* @param url url to call
* @param method call method
* @param body call body
* @param headers call headers
* @returns call result (parsed as JSON if possible)
*/
call<TResult = unknown, TBody = unknown>(stepName: string, url: string, method: HTTPMethods, body?: TBody, headers?: Record<string, string>): Promise<TResult>;
/**
* Adds steps to the executor. Needed so that it can be overwritten in
* DisabledWorkflowContext.
*/
protected addStep<TResult = unknown>(step: BaseLazyStep<TResult>): Promise<TResult>;
}
/**
* Workflow context which throws QStashWorkflowAbort before running the steps.
*
* Used for making a dry run before running any steps to check authentication.
*
* Consider an endpoint like this:
* ```ts
* export const POST = serve({
* routeFunction: context => {
* if (context.headers.get("authentication") !== "Bearer secretPassword") {
* console.error("Authentication failed.");
* return;
* }
*
* // ...
* }
* })
* ```
*
* the serve method will first call the routeFunction with an DisabledWorkflowContext.
* Here is the action we take in different cases
* - "step-found": we will run the workflow related sections of `serve`.
* - "run-ended": simply return success and end the workflow
* - error: returns 500.
*/
declare class DisabledWorkflowContext<TInitialPayload = unknown> extends WorkflowContext<TInitialPayload> {
private static readonly disabledMessage;
/**
* overwrite the WorkflowContext.addStep method to always raise QStashWorkflowAbort
* error in order to stop the execution whenever we encounter a step.
*
* @param _step
*/
protected addStep<TResult = unknown>(_step: BaseLazyStep<TResult>): Promise<TResult>;
/**
* copies the passed context to create a DisabledWorkflowContext. Then, runs the
* route function with the new context.
*
* - returns "run-ended" if there are no steps found or
* if the auth failed and user called `return`
* - returns "step-found" if DisabledWorkflowContext.addStep is called.
* - if there is another error, returns the error.
*
* @param routeFunction
*/
static tryAuthentication<TInitialPayload = unknown>(routeFunction: RouteFunction<TInitialPayload>, context: WorkflowContext<TInitialPayload>): Promise<Ok<"step-found" | "run-ended", never> | Err<never, Error>>;
}
/**
* Interface for Client with required methods
*
* Neeeded to resolve import issues
*/
type WorkflowClient = {
batchJSON: InstanceType<typeof Client>["batchJSON"];
publishJSON: InstanceType<typeof Client>["publishJSON"];
http: InstanceType<typeof Client>["http"];
};
/**
* Interface for Receiver with required methods
*
* Neeeded to resolve import issues
*/
type WorkflowReceiver = {
verify: InstanceType<typeof Receiver>["verify"];
};
declare const StepTypes: readonly ["Initial", "Run", "SleepFor", "SleepUntil", "Call"];
type StepType = (typeof StepTypes)[number];
type ThirdPartyCallFields<TBody = unknown> = {
/**
* Third party call URL. Set when context.call is used.
*/
callUrl: string;
/**
* Third party call method. Set when context.call is used.
*/
callMethod: HTTPMethods;
/**
* Third party call body. Set when context.call is used.
*/
callBody: TBody;
/**
* Third party call headers. Set when context.call is used.
*/
callHeaders: Record<string, string>;
};
type Step<TResult = unknown, TBody = unknown> = {
/**
* index of the step
*/
stepId: number;
/**
* name of the step
*/
stepName: string;
/**
* type of the step (Initial/Run/SleepFor/SleepUntil/Call)
*/
stepType: StepType;
/**
* step result. Set if context.run or context.call are used.
*/
out?: TResult;
/**
* sleep duration in seconds. Set when context.sleep is used.
*/
sleepFor?: number;
/**
* unix timestamp (in seconds) to wait until. Set when context.sleepUntil is used.
*/
sleepUntil?: number;
/**
* number of steps running concurrently if the step is in a parallel run.
* Set to 1 if step is not parallel.
*/
concurrent: number;
/**
* target step of a plan step. In other words, the step to assign the
* result of a plan step.
*
* undefined if the step is not a plan step (of a parallel run). Otherwise,
* set to the target step.
*/
targetStep?: number;
} & (ThirdPartyCallFields<TBody> | {
[P in keyof ThirdPartyCallFields]?: never;
});
type RawStep = {
messageId: string;
body: string;
callType: "step" | "toCallback" | "fromCallback";
};
type SyncStepFunction<TResult> = () => TResult;
type AsyncStepFunction<TResult> = () => Promise<TResult>;
type StepFunction<TResult> = AsyncStepFunction<TResult> | SyncStepFunction<TResult>;
type ParallelCallState = "first" | "partial" | "discard" | "last";
type RouteFunction<TInitialPayload> = (context: WorkflowContext<TInitialPayload>) => Promise<void>;
type FinishCondition = "success" | "duplicate-step" | "fromCallback" | "auth-fail" | "failure-callback";
type WorkflowServeOptions<TResponse extends Response = Response, TInitialPayload = unknown> = {
/**
* QStash client
*/
qstashClient?: WorkflowClient;
/**
* Function called to return a response after each step execution
*
* @param workflowRunId
* @returns response
*/
onStepFinish?: (workflowRunId: string, finishCondition: FinishCondition) => TResponse;
/**
* Function to parse the initial payload passed by the user
*/
initialPayloadParser?: (initialPayload: string) => TInitialPayload;
/**
* Url of the endpoint where the workflow is set up.
*
* If not set, url will be inferred from the request.
*/
url?: string;
/**
* Verbose mode
*
* Disabled if not set. If set to true, a logger is created automatically.
*
* Alternatively, a WorkflowLogger can be passed.
*/
verbose?: WorkflowLogger | true;
/**
* Receiver to verify *all* requests by checking if they come from QStash
*
* By default, a receiver is created from the env variables
* QSTASH_CURRENT_SIGNING_KEY and QSTASH_NEXT_SIGNING_KEY if they are set.
*/
receiver?: WorkflowReceiver;
/**
* Url to call if QStash retries are exhausted while executing the workflow
*/
failureUrl?: string;
/**
* Failure function called when QStash retries are exhausted while executing
* the workflow. Will overwrite `failureUrl` parameter with the workflow
* endpoint if passed.
*
* @param context workflow context at the moment of error
* @param failStatus error status
* @param failResponse error message
* @returns void
*/
failureFunction?: (context: Omit<WorkflowContext, "run" | "sleepUntil" | "sleep" | "call">, failStatus: number, failResponse: string, failHeader: Record<string, string[]>) => Promise<void> | void;
/**
* Base Url of the workflow endpoint
*
* Can be used to set if there is a local tunnel or a proxy between
* QStash and the workflow endpoint.
*
* Will be set to the env variable UPSTASH_WORKFLOW_URL if not passed.
* If the env variable is not set, the url will be infered as usual from
* the `request.url` or the `url` parameter in `serve` options.
*
* @default undefined
*/
baseUrl?: string;
/**
* Optionally, one can pass an env object mapping environment
* variables to their keys.
*
* Useful in cases like cloudflare with hono.
*/
env?: Record<string, string | undefined>;
/**
* Number of retries to use in workflow requests
*
* 3 by default
*/
retries?: number;
};
/**
* Payload passed as body in failureFunction
*/
type FailureFunctionPayload = {
/**