@upstash/workflow
Version:
Durable, Reliable and Performant Serverless Functions
866 lines (860 loc) • 28.8 kB
TypeScript
import { R as RouteFunction, W as WorkflowServeOptions, E as ExclusiveValidationOptions, T as Telemetry, S as StepType, a as RawStep, N as NotifyResponse, b as Waiter } from './types-Q3dM0UlR.js';
export { A as AsyncStepFunction, C as CallResponse, w as CallSettings, D as DetailedFinishCondition, t as Duration, o as FailureFunctionPayload, F as FinishCondition, H as HeaderParams, y as InvokableWorkflow, x as InvokeStepResponse, I as InvokeWorkflowRequest, L as LazyInvokeStepParams, z as LogLevel, s as NotifyStepResponse, P as ParallelCallState, n as PublicServeOptions, p as RequiredExceptFields, k as Step, m as StepFunction, j as StepTypes, v as StringifyBody, l as SyncStepFunction, u as WaitEventOptions, q as WaitRequest, r as WaitStepResponse, d as WorkflowAbort, h as WorkflowClient, g as WorkflowContext, c as WorkflowError, G as WorkflowLogger, B as WorkflowLoggerOptions, e as WorkflowNonRetryableError, i as WorkflowReceiver, f as WorkflowTool } from './types-Q3dM0UlR.js';
import { FlowControl, PublishRequest, HTTPMethods, State, Client as Client$1 } from '@upstash/qstash';
import 'zod';
import 'ai';
import '@ai-sdk/openai';
/**
* Creates an async method that handles incoming requests and runs the provided
* route function as a workflow.
*
* Not exported in the package. Instead, used in framework specific serve implementations.
*
* Only difference from regular serve is the `useJSONContent` parameter.
*
* @param routeFunction - A function that uses WorkflowContext as a parameter and runs a workflow.
* @param options - Options including the client, onFinish callback, and initialPayloadParser.
* @returns An async method that consumes incoming requests and runs the workflow.
*/
declare const serveBase: <TInitialPayload = unknown, TRequest extends Request = Request, TResponse extends Response = Response, TResult = unknown>(routeFunction: RouteFunction<TInitialPayload, TResult>, telemetry?: Telemetry, options?: WorkflowServeOptions<TResponse, TInitialPayload>) => {
handler: (request: TRequest) => Promise<TResponse>;
};
/**
* Creates an async method that handles incoming requests and runs the provided
* route function as a workflow.
*
* @param routeFunction - A function that uses WorkflowContext as a parameter and runs a workflow.
* @param options - Options including the client, onFinish callback, and initialPayloadParser.
* @returns An async method that consumes incoming requests and runs the workflow.
*/
declare const serve: <TInitialPayload = unknown, TRequest extends Request = Request, TResponse extends Response = Response, TResult = unknown>(routeFunction: RouteFunction<TInitialPayload, TResult>, options?: Omit<WorkflowServeOptions<TResponse, TInitialPayload>, "useJSONContent" | "schema" | "initialPayloadParser"> & ExclusiveValidationOptions<TInitialPayload>) => ReturnType<typeof serveBase<TInitialPayload, TRequest, TResponse, TResult>>;
type BaseStepLog = {
/**
* id of the step
*/
stepId?: number;
/**
* name of the step
*/
stepName: string;
/**
* type of the step (example: "call", "wait", "invoke")
*/
stepType: StepType;
/**
* call type of the step.
*
* in most cases it's `step`. For context.call, it will become `toCallback` and `fromCallback`
* as the step executes.
*/
callType: RawStep["callType"];
/**
* message id of the step
*/
messageId: string;
/**
* result of the step
*
* will be undefined for an unfinished parallel step.
*/
out: unknown;
/**
* number of retries for the step
*/
retries: number;
/**
* retry delay parameter for the step if it was set
*/
retryDelay?: string;
/**
* number of parallel steps
*
* if the step is sequential (non-parallel), will be 1.
*/
concurrent: number;
/**
* state of the step
*/
state: "STEP_PROGRESS" | "STEP_SUCCESS" | "STEP_RETRY" | "STEP_FAILED";
/**
* time when the step was created
*/
createdAt: number;
/**
* headers
*/
headers: Record<string, string[]>;
};
type CallUrlGroup = {
/**
* URL called in context.call
*/
callUrl: string;
/**
* Method used in context.call
*/
callMethod: HTTPMethods;
/**
* headers sent in context.call
*/
callHeaders: Record<string, string[]>;
/**
* Body sent in context.call
*/
callBody: unknown;
};
type CallResponseStatusGroup = {
/**
* Status code of the context.call response
*/
callResponseStatus: number;
/**
* Response body of the context.call response
*/
callResponseBody: unknown;
/**
* Headers received from the context.call response
*/
callResponseHeaders: Record<string, string[]>;
} & CallUrlGroup;
type InvokedWorkflowGroup = {
/**
* id of the workflow run invoked in context.invoke
*/
invokedWorkflowRunId: string;
/**
* URL of the workflow invoked in context.invoke
*/
invokedWorkflowUrl: string;
/**
* Time when the workflow was invoked
*/
invokedWorkflowCreatedAt: number;
/**
* Body sent in context.invoke
*/
invokedWorkflowRunBody: unknown;
/**
* Headers sent in context.invoke
*/
invokedWorkflowRunHeaders: Record<string, string[]>;
};
type WaitEventGroup = {
/**
* id of the event waited in context.waitForEvent
*/
waitEventId: string;
/**
* Duration until the time when the event will be triggered due to timeout
*/
waitTimeoutDuration: string;
/**
* Time when the event will be triggered due to timeout
*/
waitTimeoutDeadline: number;
/**
* Whether the event was triggered due to timeout
*/
waitTimeout: boolean;
};
type AsOptional<TType> = TType | {
[P in keyof TType]?: never;
};
type StepLog = BaseStepLog & AsOptional<CallUrlGroup> & AsOptional<CallResponseStatusGroup> & AsOptional<InvokedWorkflowGroup> & AsOptional<{
sleepFor: number;
}> & AsOptional<{
sleepUntil: number;
}> & AsOptional<WaitEventGroup>;
type StepError = {
/**
* error message associated with the request
*
* example:
* ```
* detected a non-workflow destination for trigger/invoke.
* make sure you are sending the request to the correct endpoint
* ```
*/
error: string;
/**
* response body returned in the request which resulted in an error
*/
body: string;
headers: Record<string, string[]>;
status: number;
time: number;
};
type StepLogGroup = {
/**
* Log which belongs to a single step
*/
steps: [StepLog];
/**
* Log which belongs to a single step
*/
type: "sequential";
} | {
/**
* Log which belongs to parallel steps
*/
steps: StepLog[];
/**
* Log which belongs to parallel steps
*/
type: "parallel";
} | {
/**
* Log which belongs to the next step
*/
steps: {
messageId: string;
state: "STEP_PROGRESS" | "STEP_RETRY" | "STEP_FAILED" | "STEP_CANCELED";
/**
* retries
*/
retries: number;
/**
* retry delay parameter for the step if it was set
*/
retryDelay?: string;
/**
* errors which occured in the step
*/
errors?: StepError[];
}[];
/**
* Log which belongs to the next step
*/
type: "next";
};
type FailureFunctionLog = {
/**
* messageId of the message published for handling the failure
*/
messageId: string;
/**
* URL of the function that handles the failure
*/
url: string;
/**
* State of the message published for failure
*/
state: State;
/**
* Headers received from the step which caused the workflow to fail
*/
failHeaders: Record<string, string[]>;
/**
* Status code of the step which caused the workflow to fail
*/
failStatus: number;
/**
* Response body of the step which caused the workflow to fail
*/
failResponse: string;
/**
* @deprecated use dlqId field of the workflow run itself
*/
dlqId: string;
/**
* Errors received while running failure function
*/
errors?: StepError[];
/**
* String body returned from the failure function
*/
responseBody?: string;
/**
* Headers received from the failure function
*/
responseHeaders?: Record<string, string[]>;
/**
* Status code of the response from the failure function
*/
responseStatus?: number;
};
type WorkflowRunLog = {
/**
* Unique identifier for the workflow run
*/
workflowRunId: string;
/**
* URL of the workflow that was run
*/
workflowUrl: string;
/**
* State of the workflow run
*
* - RUN_STARTED: Workflow run has started and is in progress
* - RUN_SUCCESS: Workflow run has completed successfully
* - RUN_FAILED: Workflow run has failed
*/
workflowState: "RUN_STARTED" | "RUN_SUCCESS" | "RUN_FAILED" | "RUN_CANCELED";
/**
* Time when the workflow run was created
*
* in unix milliseconds format
*/
workflowRunCreatedAt: number;
/**
* Time when the workflow run was completed
*
* in unix milliseconds format
*/
workflowRunCompletedAt?: number;
/**
* Message published when the workflow fails if failureUrl or failureFunction
* are set.
*/
failureFunction?: FailureFunctionLog;
/**
*
*/
steps: StepLogGroup[];
/**
* If the workflow returned a response, the stringified state of this
* response will be available in the workflowRunResponse field.
*
* To restore it to its original format, use JSON.parse.
*/
workflowRunResponse?: string;
/**
* Information on the invoker workflow run, if any
*/
invoker?: {
/**
* run id of the invoker workflow
*/
workflowRunId: string;
/**
* URL of the invoker workflow
*/
workflowUrl: string;
/**
* Time when the invoker workflow run was created
*
* in unix milliseconds format
*/
workflowRunCreatedAt: number;
};
/**
* If the workflow run has failed, id of the run in DLQ
*/
dlqId?: string;
/**
* Label of the workflow run
*/
label?: string;
};
type WorkflowRunLogs = {
cursor: string;
runs: WorkflowRunLog[];
};
type TriggerOptions = {
/**
* URL of the workflow to trigger
*/
url: string;
/**
* Body to send to the workflow
*/
body?: unknown;
/**
* Headers to send to the workflow
*/
headers?: Record<string, string>;
/**
* Workflow run id to use for the workflow run.
* If not provided, a random workflow run id will be generated.
*/
workflowRunId?: string;
/**
* Number of retries to perform if the request fails.
*
* @default 3
*/
retries?: number;
/**
* Delay between retries.
*
* By default, the `retryDelay` is exponential backoff.
* More details can be found in: https://upstash.com/docs/qstash/features/retry.
*
* The `retryDelay` option allows you to customize the delay (in milliseconds) between retry attempts when message delivery fails.
*
* You can use mathematical expressions and the following built-in functions to calculate the delay dynamically.
* The special variable `retried` represents the current retry attempt count (starting from 0).
*
* Supported functions:
* - `pow`
* - `sqrt`
* - `abs`
* - `exp`
* - `floor`
* - `ceil`
* - `round`
* - `min`
* - `max`
*
* Examples of valid `retryDelay` values:
* ```ts
* 1000 // 1 second
* 1000 * (1 + retried) // 1 second multiplied by the current retry attempt
* pow(2, retried) // 2 to the power of the current retry attempt
* max(10, pow(2, retried)) // The greater of 10 or 2^retried
* ```
*/
retryDelay?: string;
/**
* Flow control to use for the workflow run.
* If not provided, no flow control will be used.
*/
flowControl?: FlowControl;
/**
* Delay to apply before triggering the workflow.
*/
delay?: PublishRequest["delay"];
/**
* Optionally set the absolute delay of this message.
* This will override the delay option.
* The message will not delivered until the specified time.
*
* Unix timestamp in seconds.
*
* @default undefined
*/
notBefore?: PublishRequest["notBefore"];
/**
* Label to apply to the workflow run.
*
* Can be used to filter the workflow run logs.
*/
label?: string;
} & ({
/**
* URL to call if the first request to the workflow endpoint fails
*/
failureUrl?: never;
/**
* Whether the workflow endpoint has a failure function
* defined to be invoked if the first request fails.
*
* If true, the failureUrl will be ignored.
*/
useFailureFunction?: true;
} | {
/**
* URL to call if the first request to the workflow endpoint fails
*/
failureUrl?: string;
/**
* Whether the workflow endpoint has a failure function
* defined to be invoked if the first request fails.
*
* If true, the failureUrl will be ignored.
*/
useFailureFunction?: never;
});
type DLQResumeRestartOptions<TDLQId extends string | string[] = string | string[]> = {
dlqId: TDLQId;
} & Pick<TriggerOptions, "flowControl" | "retries">;
type DLQResumeRestartResponse = {
/**
* id of the workflow run created to resume or restart the DLQ message
*/
workflowRunId: string;
/**
* Time when the new workflow run was created
*/
workflowCreatedAt: string;
};
type QStashDLQFilterOptions = NonNullable<Required<Parameters<Client$1["dlq"]["listMessages"]>[0]>>["filter"];
type DLQFilterOptions = Pick<QStashDLQFilterOptions, "fromDate" | "toDate" | "url" | "responseStatus"> & {
workflowRunId?: string;
workflowCreatedAt?: string;
failureFunctionState?: FailureCallbackInfo["state"];
label?: string;
};
type FailureCallbackInfo = {
state?: "CALLBACK_FAIL" | "CALLBACK_SUCCESS" | "CALLBACK_INPROGRESS";
responseStatus?: number;
responseBody?: string;
responseHeaders?: Record<string, string[]>;
};
type DLQMessage = {
messageId: string;
/**
* URL of the workflow
*/
url: string;
method: string;
header: Record<string, string[]>;
body: string;
maxRetries: number;
notBefore: number;
createdAt: number;
callerIP: string;
workflowRunId: string;
workflowCreatedAt: number;
workflowUrl: string;
responseStatus: number;
responseHeader: Record<string, string[]>;
responseBody: string;
dlqId: string;
/**
* URL of the failure callback
*/
failureCallback?: string;
/**
* status of the failure callback
*/
failureCallbackInfo?: FailureCallbackInfo;
/**
* label passed when triggering workflow
*/
label?: string;
};
type PublicDLQMessage = Pick<DLQMessage, "header" | "body" | "maxRetries" | "notBefore" | "createdAt" | "callerIP" | "workflowRunId" | "workflowCreatedAt" | "workflowUrl" | "responseStatus" | "responseHeader" | "responseBody" | "dlqId" | "failureCallback" | "failureCallbackInfo" | "label">;
declare class DLQ {
private client;
constructor(client: Client$1);
/**
* list the items in the DLQ
*
* @param cursor - Optional cursor for pagination.
* @param count - Optional number of items to return.
* @param filter - Optional filter options to apply to the DLQ items.
* The available filter options are:
* - `fromDate`: Filter items which entered the DLQ after this date.
* - `toDate`: Filter items which entered the DLQ before this date.
* - `url`: Filter items by the URL they were sent to.
* - `responseStatus`: Filter items by the response status code.
* @returns
*/
list(parameters?: {
cursor?: string;
count?: number;
filter?: DLQFilterOptions;
}): Promise<{
messages: PublicDLQMessage[];
cursor?: string;
}>;
/**
* Resumes the workflow run for the given DLQ message(s).
*
* Resuming means that the new workflow run will start executing from where
* the original workflow run failed, using the same input and context.
*
* If you want to restart the workflow run from the beginning, use
* `restart` method instead.
*
* Example with a single DLQ ID:
* ```ts
* const response = await client.dlq.resume({
* dlqId: "dlq-12345",
* flowControl: {
* key: "my-flow-control-key",
* value: "my-flow-control-value",
* },
* retries: 3,
* });
*
* console.log(response.workflowRunId); // ID of the new workflow run
* ```
*
* Example with multiple DLQ IDs:
* ```ts
* const response = await client.dlq.resume({
* dlqId: ["dlq-12345", "dlq-67890"],
* // other parameters...
* });
* console.log(response[0].workflowRunId); // ID of the first workflow run
* console.log(response[1].workflowRunId); // ID of the second workflow run
* ```
*
* if the dlqId is not found, throws an error.
*
* @param dlqId - The ID(s) of the DLQ message(s) to resume.
* @param flowControl - Optional flow control parameters. If not passed, flow
* control of the failing workflow will be used
* @param retries - Optional number of retries to perform if the request fails.
* If not passed, retries settings of the failing workflow will be used.
* @returns run id and creation time of the new workflow run(s).
*/
resume(parameters: DLQResumeRestartOptions<string>): Promise<DLQResumeRestartResponse>;
resume(parameters: DLQResumeRestartOptions<string[]>): Promise<DLQResumeRestartResponse[]>;
/**
* Restarts the workflow run for the given DLQ message(s).
*
* Restarting means that the new workflow run will start executing from the
* beginning with the same initial payload and configuration.
*
* If you want to resume the workflow run from where it failed, use
* `resume` method instead.
*
* Example with a single DLQ ID:
* ```ts
* const response = await client.dlq.restart({
* dlqId: "dlq-12345",
* flowControl: {
* key: "my-flow-control-key",
* value: "my-flow-control-value",
* },
* retries: 3,
* });
*
* console.log(response.workflowRunId); // ID of the new workflow run
* ```
*
* Example with multiple DLQ IDs:
* ```ts
* const response = await client.dlq.restart({
* dlqId: ["dlq-12345", "dlq-67890"],
* // other parameters...
* });
* console.log(response[0].workflowRunId); // ID of the first workflow run
* console.log(response[1].workflowRunId); // ID of the second workflow run
* ```
*
* if the dlqId is not found, throws an error.
*
* @param dlqId - The ID(s) of the DLQ message(s) to restart.
* @param flowControl - Optional flow control parameters. If not passed, flow
* control of the failing workflow will be used
* @param retries - Optional number of retries to perform if the request fails.
* If not passed, retries settings of the failing workflow will be used.
* @returns run id and creation time of the new workflow run(s).
*/
restart(parameters: DLQResumeRestartOptions<string>): Promise<DLQResumeRestartResponse>;
restart(parameters: DLQResumeRestartOptions<string[]>): Promise<DLQResumeRestartResponse[]>;
/**
* Retry the failure callback of a workflow run whose failureUrl/failureFunction
* request has failed.
*
* @param dlqId - The ID of the DLQ message to retry.
* @returns
*/
retryFailureFunction({ dlqId }: Pick<DLQResumeRestartOptions<string>, "dlqId">): Promise<DLQResumeRestartResponse & {
error?: string;
}>;
private static handleDLQOptions;
private static getDlqIdQueryParameter;
}
type ClientConfig = ConstructorParameters<typeof Client$1>[0];
/**
* Workflow client for canceling & notifying workflows and getting waiters of an
* event.
*
* ```ts
* import { Client } from "@upstash/workflow";
* const client = new Client({ token: "<QSTASH_TOKEN>" })
* ```
*/
declare class Client {
private client;
constructor(clientConfig: ClientConfig);
/**
* Cancel an ongoing workflow
*
* Returns true if workflow is canceled succesfully. Otherwise, throws error.
*
* There are multiple ways you can cancel workflows:
* - pass one or more workflow run ids to cancel them
* - pass a workflow url to cancel all runs starting with this url
* - cancel all pending or active workflow runs
*
* ### Cancel a set of workflow runs
*
* ```ts
* // cancel a single workflow
* await client.cancel({ ids: "<WORKFLOW_RUN_ID>" })
*
* // cancel a set of workflow runs
* await client.cancel({ ids: [
* "<WORKFLOW_RUN_ID_1>",
* "<WORKFLOW_RUN_ID_2>",
* ]})
* ```
*
* ### Cancel workflows starting with a url
*
* If you have an endpoint called `https://your-endpoint.com` and you
* want to cancel all workflow runs on it, you can use `urlStartingWith`.
*
* Note that this will cancel workflows in all endpoints under
* `https://your-endpoint.com`.
*
* ```ts
* await client.cancel({ urlStartingWith: "https://your-endpoint.com" })
* ```
*
* ### Cancel *all* workflows
*
* To cancel all pending and currently running workflows, you can
* do it like this:
*
* ```ts
* await client.cancel({ all: true })
* ```
*
* @param ids run id of the workflow to delete
* @param urlStartingWith cancel workflows starting with this url. Will be ignored
* if `ids` parameter is set.
* @param all set to true in order to cancel all workflows. Will be ignored
* if `ids` or `urlStartingWith` parameters are set.
* @returns true if workflow is succesfully deleted. Otherwise throws QStashError
*/
cancel({ ids, urlStartingWith, all, }: {
ids?: string | string[];
urlStartingWith?: string;
all?: true;
}): Promise<{
cancelled: number;
} & {
error?: string;
}>;
/**
* Notify a workflow run waiting for an event
*
* ```ts
* import { Client } from "@upstash/workflow";
*
* const client = new Client({ token: "<QSTASH_TOKEN>" })
* await client.notify({
* eventId: "my-event-id",
* eventData: "my-data" // data passed to the workflow run
* });
* ```
*
* @param eventId event id to notify
* @param eventData data to provide to the workflow
*/
notify({ eventId, eventData, }: {
eventId: string;
eventData?: unknown;
}): Promise<NotifyResponse[]>;
/**
* Check waiters of an event
*
* ```ts
* import { Client } from "@upstash/workflow";
*
* const client = new Client({ token: "<QSTASH_TOKEN>" })
* const result = await client.getWaiters({
* eventId: "my-event-id"
* })
* ```
*
* @param eventId event id to check
*/
getWaiters({ eventId }: {
eventId: string;
}): Promise<Required<Waiter>[]>;
/**
* Trigger new workflow run and returns the workflow run id or an array of workflow run ids
*
* trigger a single workflow run:
* ```ts
* const { workflowRunId } = await client.trigger({
* url: "https://workflow-endpoint.com",
* body: "hello there!", // Optional body
* headers: { ... }, // Optional headers
* workflowRunId: "my-workflow", // Optional workflow run ID
* retries: 3 // Optional retries for the initial request
* retryDelay: "1000" // Optional retry delay for the delay between retries
* });
*
* console.log(workflowRunId)
* // wfr_my-workflow
* ```
* trigger multiple workflow runs:
* ```ts
* const result = await client.trigger([
* {
* url: "https://workflow-endpoint.com",
* body: "hello there!", // Optional body
* headers: { ... }, // Optional headers
* workflowRunId: "my-workflow", // Optional workflow run ID
* retries: 3 // Optional retries for the initial request
* retryDelay: "1000" // Optional retry delay for the delay between retries
* },
* {
* url: "https://workflow-endpoint-2.com",
* body: "hello world!", // Optional body
* headers: { ... }, // Optional headers
* workflowRunId: "my-workflow-2", // Optional workflow run ID
* retries: 5 // Optional retries for the initial request
* retryDelay: "1000" // Optional retry delay for the delay between retries
* },
* ]);
*
* console.log(result)
* // [
* // { workflowRunId: "wfr_my-workflow" },
* // { workflowRunId: "wfr_my-workflow-2" },
* // ]
* ```
*
* @param url URL of the workflow
* @param body body to start the workflow with
* @param headers headers to use in the request
* @param workflowRunId optional workflow run id to use. mind that
* you should pass different workflow run ids for different runs.
* The final workflowRunId will be `wfr_${workflowRunId}`, in
* other words: the workflow run id you pass will be prefixed
* with `wfr_`.
* @param retries retry to use in the initial request. in the rest of
* the workflow, `retries` option of the `serve` will be used.
* @param retryDelay delay between retries.
* @param flowControl Settings for controlling the number of active requests
* and number of requests per second with the same key.
* @param delay Delay for the workflow run. This is used to delay the
* execution of the workflow run. The delay is in seconds or can be passed
* as a string with a time unit (e.g. "1h", "30m", "15s").
* @returns workflow run id or an array of workflow run ids
*/
trigger(params: TriggerOptions): Promise<{
workflowRunId: string;
}>;
trigger(params: TriggerOptions[]): Promise<{
workflowRunId: string;
}[]>;
/**
* Fetches logs for workflow runs.
*
* @param workflowRunId - The ID of the workflow run to fetch logs for.
* @param cursor - The cursor for pagination.
* @param count - Number of runs to fetch. Default value is 10.
* @param state - The state of the workflow run.
* @param workflowUrl - The URL of the workflow. Should be an exact match.
* @param workflowCreatedAt - The creation time of the workflow. If you have two workflow runs with the same URL, you can use this to filter them.
* @returns A promise that resolves to either a `WorkflowRunLog` or a `WorkflowRunResponse`.
*
* @example
* Fetch logs for a specific workflow run:
* ```typescript
* const { runs } = await client.logs({ workflowRunId: '12345' });
* const steps = runs[0].steps; // access steps
* ```
*
* @example
* Fetch logs with pagination:
* ```typescript
* const { runs, cursor } = await client.logs();
* const steps = runs[0].steps // access steps
*
* const { runs: nextRuns, cursor: nextCursor } = await client.logs({ cursor, count: 2 });
* ```
*/
logs(params?: {
workflowRunId?: WorkflowRunLog["workflowRunId"];
cursor?: string;
count?: number;
state?: WorkflowRunLog["workflowState"];
workflowUrl?: WorkflowRunLog["workflowUrl"];
workflowCreatedAt?: WorkflowRunLog["workflowRunCreatedAt"];
label?: WorkflowRunLog["label"];
}): Promise<WorkflowRunLogs>;
get dlq(): DLQ;
}
export { Client, type DLQResumeRestartOptions, type DLQResumeRestartResponse, ExclusiveValidationOptions, NotifyResponse, RawStep, RouteFunction, type StepError, type StepLog, StepType, Telemetry, type TriggerOptions, Waiter, type WorkflowRunLog, type WorkflowRunLogs, WorkflowServeOptions, serve };