@muppet-kit/shared
Version:
Shared utilities and types for Muppet Kit.
1,466 lines (1,348 loc) • 80.7 kB
TypeScript
/// <reference types="node" />
import { EventEmitter } from 'events';
import { ServerResponse, IncomingMessage } from 'http';
import { WorkerOptions } from 'worker_threads';
import z from 'zod';
// ==================================================================================================
// JSON Schema Draft 07
// ==================================================================================================
// https://tools.ietf.org/html/draft-handrews-json-schema-validation-01
// --------------------------------------------------------------------------------------------------
/**
* Primitive type
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.1.1
*/
type JSONSchema7TypeName =
| "string" //
| "number"
| "integer"
| "boolean"
| "object"
| "array"
| "null";
/**
* Primitive type
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.1.1
*/
type JSONSchema7Type =
| string //
| number
| boolean
| JSONSchema7Object
| JSONSchema7Array
| null;
// Workaround for infinite type recursion
interface JSONSchema7Object {
[key: string]: JSONSchema7Type;
}
// Workaround for infinite type recursion
// https://github.com/Microsoft/TypeScript/issues/3496#issuecomment-128553540
interface JSONSchema7Array extends Array<JSONSchema7Type> {}
/**
* Meta schema
*
* Recommended values:
* - 'http://json-schema.org/schema#'
* - 'http://json-schema.org/hyper-schema#'
* - 'http://json-schema.org/draft-07/schema#'
* - 'http://json-schema.org/draft-07/hyper-schema#'
*
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-5
*/
type JSONSchema7Version = string;
/**
* JSON Schema v7
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01
*/
type JSONSchema7Definition = JSONSchema7 | boolean;
interface JSONSchema7 {
$id?: string | undefined;
$ref?: string | undefined;
$schema?: JSONSchema7Version | undefined;
$comment?: string | undefined;
/**
* @see https://datatracker.ietf.org/doc/html/draft-bhutton-json-schema-00#section-8.2.4
* @see https://datatracker.ietf.org/doc/html/draft-bhutton-json-schema-validation-00#appendix-A
*/
$defs?: {
[key: string]: JSONSchema7Definition;
} | undefined;
/**
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.1
*/
type?: JSONSchema7TypeName | JSONSchema7TypeName[] | undefined;
enum?: JSONSchema7Type[] | undefined;
const?: JSONSchema7Type | undefined;
/**
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.2
*/
multipleOf?: number | undefined;
maximum?: number | undefined;
exclusiveMaximum?: number | undefined;
minimum?: number | undefined;
exclusiveMinimum?: number | undefined;
/**
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.3
*/
maxLength?: number | undefined;
minLength?: number | undefined;
pattern?: string | undefined;
/**
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.4
*/
items?: JSONSchema7Definition | JSONSchema7Definition[] | undefined;
additionalItems?: JSONSchema7Definition | undefined;
maxItems?: number | undefined;
minItems?: number | undefined;
uniqueItems?: boolean | undefined;
contains?: JSONSchema7Definition | undefined;
/**
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.5
*/
maxProperties?: number | undefined;
minProperties?: number | undefined;
required?: string[] | undefined;
properties?: {
[key: string]: JSONSchema7Definition;
} | undefined;
patternProperties?: {
[key: string]: JSONSchema7Definition;
} | undefined;
additionalProperties?: JSONSchema7Definition | undefined;
dependencies?: {
[key: string]: JSONSchema7Definition | string[];
} | undefined;
propertyNames?: JSONSchema7Definition | undefined;
/**
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.6
*/
if?: JSONSchema7Definition | undefined;
then?: JSONSchema7Definition | undefined;
else?: JSONSchema7Definition | undefined;
/**
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.7
*/
allOf?: JSONSchema7Definition[] | undefined;
anyOf?: JSONSchema7Definition[] | undefined;
oneOf?: JSONSchema7Definition[] | undefined;
not?: JSONSchema7Definition | undefined;
/**
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-7
*/
format?: string | undefined;
/**
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-8
*/
contentMediaType?: string | undefined;
contentEncoding?: string | undefined;
/**
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-9
*/
definitions?: {
[key: string]: JSONSchema7Definition;
} | undefined;
/**
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-10
*/
title?: string | undefined;
description?: string | undefined;
default?: JSONSchema7Type | undefined;
readOnly?: boolean | undefined;
writeOnly?: boolean | undefined;
examples?: JSONSchema7Type | undefined;
}
type JSONValue = null | string | number | boolean | JSONObject | JSONArray;
type JSONObject = {
[key: string]: JSONValue;
};
type JSONArray = JSONValue[];
/**
* Additional provider-specific metadata. They are passed through
* to the provider from the AI SDK and enable provider-specific
* functionality that can be fully encapsulated in the provider.
*
* This enables us to quickly ship provider-specific functionality
* without affecting the core AI SDK.
*
* The outer record is keyed by the provider name, and the inner
* record is keyed by the provider-specific metadata key.
*
* ```ts
* {
* "anthropic": {
* "cacheControl": { "type": "ephemeral" }
* }
* }
* ```
*/
type LanguageModelV1ProviderMetadata = Record<string, Record<string, JSONValue>>;
/**
* A source that has been used as input to generate the response.
*/
type LanguageModelV1Source = {
/**
* A URL source. This is return by web search RAG models.
*/
sourceType: 'url';
/**
* The ID of the source.
*/
id: string;
/**
* The URL of the source.
*/
url: string;
/**
* The title of the source.
*/
title?: string;
/**
* Additional provider metadata for the source.
*/
providerMetadata?: LanguageModelV1ProviderMetadata;
};
type LanguageModelV1CallSettings = {
/**
Maximum number of tokens to generate.
*/
maxTokens?: number;
/**
Temperature setting.
It is recommended to set either `temperature` or `topP`, but not both.
*/
temperature?: number;
/**
Stop sequences.
If set, the model will stop generating text when one of the stop sequences is generated.
Providers may have limits on the number of stop sequences.
*/
stopSequences?: string[];
/**
Nucleus sampling.
It is recommended to set either `temperature` or `topP`, but not both.
*/
topP?: number;
/**
Only sample from the top K options for each subsequent token.
Used to remove "long tail" low probability responses.
Recommended for advanced use cases only. You usually only need to use temperature.
*/
topK?: number;
/**
Presence penalty setting. It affects the likelihood of the model to
repeat information that is already in the prompt.
*/
presencePenalty?: number;
/**
Frequency penalty setting. It affects the likelihood of the model
to repeatedly use the same words or phrases.
*/
frequencyPenalty?: number;
/**
Response format. The output can either be text or JSON. Default is text.
If JSON is selected, a schema can optionally be provided to guide the LLM.
*/
responseFormat?: {
type: 'text';
} | {
type: 'json';
/**
* JSON schema that the generated output should conform to.
*/
schema?: JSONSchema7;
/**
* Name of output that should be generated. Used by some providers for additional LLM guidance.
*/
name?: string;
/**
* Description of the output that should be generated. Used by some providers for additional LLM guidance.
*/
description?: string;
};
/**
The seed (integer) to use for random sampling. If set and supported
by the model, calls will generate deterministic results.
*/
seed?: number;
/**
Abort signal for cancelling the operation.
*/
abortSignal?: AbortSignal;
/**
Additional HTTP headers to be sent with the request.
Only applicable for HTTP-based providers.
*/
headers?: Record<string, string | undefined>;
};
/**
A tool has a name, a description, and a set of parameters.
Note: this is **not** the user-facing tool definition. The AI SDK methods will
map the user-facing tool definitions to this format.
*/
type LanguageModelV1FunctionTool = {
/**
The type of the tool (always 'function').
*/
type: 'function';
/**
The name of the tool. Unique within this model call.
*/
name: string;
/**
A description of the tool. The language model uses this to understand the
tool's purpose and to provide better completion suggestions.
*/
description?: string;
/**
The parameters that the tool expects. The language model uses this to
understand the tool's input requirements and to provide matching suggestions.
*/
parameters: JSONSchema7;
};
/**
A prompt is a list of messages.
Note: Not all models and prompt formats support multi-modal inputs and
tool calls. The validation happens at runtime.
Note: This is not a user-facing prompt. The AI SDK methods will map the
user-facing prompt types such as chat or instruction prompts to this format.
*/
type LanguageModelV1Prompt = Array<LanguageModelV1Message>;
type LanguageModelV1Message = ({
role: 'system';
content: string;
} | {
role: 'user';
content: Array<LanguageModelV1TextPart | LanguageModelV1ImagePart | LanguageModelV1FilePart>;
} | {
role: 'assistant';
content: Array<LanguageModelV1TextPart | LanguageModelV1FilePart | LanguageModelV1ReasoningPart | LanguageModelV1RedactedReasoningPart | LanguageModelV1ToolCallPart>;
} | {
role: 'tool';
content: Array<LanguageModelV1ToolResultPart>;
}) & {
/**
* Additional provider-specific metadata. They are passed through
* to the provider from the AI SDK and enable provider-specific
* functionality that can be fully encapsulated in the provider.
*/
providerMetadata?: LanguageModelV1ProviderMetadata;
};
/**
Text content part of a prompt. It contains a string of text.
*/
interface LanguageModelV1TextPart {
type: 'text';
/**
The text content.
*/
text: string;
/**
* Additional provider-specific metadata. They are passed through
* to the provider from the AI SDK and enable provider-specific
* functionality that can be fully encapsulated in the provider.
*/
providerMetadata?: LanguageModelV1ProviderMetadata;
}
/**
Reasoning content part of a prompt. It contains a string of reasoning text.
*/
interface LanguageModelV1ReasoningPart {
type: 'reasoning';
/**
The reasoning text.
*/
text: string;
/**
An optional signature for verifying that the reasoning originated from the model.
*/
signature?: string;
/**
Additional provider-specific metadata. They are passed through
to the provider from the AI SDK and enable provider-specific
functionality that can be fully encapsulated in the provider.
*/
providerMetadata?: LanguageModelV1ProviderMetadata;
}
/**
Redacted reasoning content part of a prompt.
*/
interface LanguageModelV1RedactedReasoningPart {
type: 'redacted-reasoning';
/**
Redacted reasoning data.
*/
data: string;
/**
Additional provider-specific metadata. They are passed through
to the provider from the AI SDK and enable provider-specific
functionality that can be fully encapsulated in the provider.
*/
providerMetadata?: LanguageModelV1ProviderMetadata;
}
/**
Image content part of a prompt. It contains an image.
*/
interface LanguageModelV1ImagePart {
type: 'image';
/**
Image data as a Uint8Array (e.g. from a Blob or Buffer) or a URL.
*/
image: Uint8Array | URL;
/**
Optional mime type of the image.
*/
mimeType?: string;
/**
* Additional provider-specific metadata. They are passed through
* to the provider from the AI SDK and enable provider-specific
* functionality that can be fully encapsulated in the provider.
*/
providerMetadata?: LanguageModelV1ProviderMetadata;
}
/**
File content part of a prompt. It contains a file.
*/
interface LanguageModelV1FilePart {
type: 'file';
/**
* Optional filename of the file.
*/
filename?: string;
/**
File data as base64 encoded string or as a URL.
*/
data: string | URL;
/**
Mime type of the file.
*/
mimeType: string;
/**
* Additional provider-specific metadata. They are passed through
* to the provider from the AI SDK and enable provider-specific
* functionality that can be fully encapsulated in the provider.
*/
providerMetadata?: LanguageModelV1ProviderMetadata;
}
/**
Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
*/
interface LanguageModelV1ToolCallPart {
type: 'tool-call';
/**
ID of the tool call. This ID is used to match the tool call with the tool result.
*/
toolCallId: string;
/**
Name of the tool that is being called.
*/
toolName: string;
/**
Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
*/
args: unknown;
/**
* Additional provider-specific metadata. They are passed through
* to the provider from the AI SDK and enable provider-specific
* functionality that can be fully encapsulated in the provider.
*/
providerMetadata?: LanguageModelV1ProviderMetadata;
}
/**
Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
*/
interface LanguageModelV1ToolResultPart {
type: 'tool-result';
/**
ID of the tool call that this result is associated with.
*/
toolCallId: string;
/**
Name of the tool that generated this result.
*/
toolName: string;
/**
Result of the tool call. This is a JSON-serializable object.
*/
result: unknown;
/**
Optional flag if the result is an error or an error message.
*/
isError?: boolean;
/**
Tool results as an array of parts. This enables advanced tool results including images.
When this is used, the `result` field should be ignored (if the provider supports content).
*/
content?: Array<{
type: 'text';
/**
Text content.
*/
text: string;
} | {
type: 'image';
/**
base-64 encoded image data
*/
data: string;
/**
Mime type of the image.
*/
mimeType?: string;
}>;
/**
* Additional provider-specific metadata. They are passed through
* to the provider from the AI SDK and enable provider-specific
* functionality that can be fully encapsulated in the provider.
*/
providerMetadata?: LanguageModelV1ProviderMetadata;
}
/**
The configuration of a tool that is defined by the provider.
*/
type LanguageModelV1ProviderDefinedTool = {
/**
The type of the tool (always 'provider-defined').
*/
type: 'provider-defined';
/**
The ID of the tool. Should follow the format `<provider-name>.<tool-name>`.
*/
id: `${string}.${string}`;
/**
The name of the tool. Unique within this model call.
*/
name: string;
/**
The arguments for configuring the tool. Must match the expected arguments defined by the provider for this tool.
*/
args: Record<string, unknown>;
};
type LanguageModelV1ToolChoice = {
type: 'auto';
} | {
type: 'none';
} | {
type: 'required';
} | {
type: 'tool';
toolName: string;
};
type LanguageModelV1CallOptions = LanguageModelV1CallSettings & {
/**
Whether the user provided the input as messages or as
a prompt. This can help guide non-chat models in the
expansion, bc different expansions can be needed for
chat/non-chat use cases.
*/
inputFormat: 'messages' | 'prompt';
/**
The mode affects the behavior of the language model. It is required to
support provider-independent streaming and generation of structured objects.
The model can take this information and e.g. configure json mode, the correct
low level grammar, etc. It can also be used to optimize the efficiency of the
streaming, e.g. tool-delta stream parts are only needed in the
object-tool mode.
@deprecated mode will be removed in v2.
All necessary settings will be directly supported through the call settings,
in particular responseFormat, toolChoice, and tools.
*/
mode: {
type: 'regular';
/**
The tools that are available for the model.
*/
tools?: Array<LanguageModelV1FunctionTool | LanguageModelV1ProviderDefinedTool>;
/**
Specifies how the tool should be selected. Defaults to 'auto'.
*/
toolChoice?: LanguageModelV1ToolChoice;
} | {
type: 'object-json';
/**
* JSON schema that the generated output should conform to.
*/
schema?: JSONSchema7;
/**
* Name of output that should be generated. Used by some providers for additional LLM guidance.
*/
name?: string;
/**
* Description of the output that should be generated. Used by some providers for additional LLM guidance.
*/
description?: string;
} | {
type: 'object-tool';
tool: LanguageModelV1FunctionTool;
};
/**
A language mode prompt is a standardized prompt type.
Note: This is **not** the user-facing prompt. The AI SDK methods will map the
user-facing prompt types such as chat or instruction prompts to this format.
That approach allows us to evolve the user facing prompts without breaking
the language model interface.
*/
prompt: LanguageModelV1Prompt;
/**
Additional provider-specific metadata.
The metadata is passed through to the provider from the AI SDK and enables
provider-specific functionality that can be fully encapsulated in the provider.
*/
providerMetadata?: LanguageModelV1ProviderMetadata;
};
/**
Warning from the model provider for this call. The call will proceed, but e.g.
some settings might not be supported, which can lead to suboptimal results.
*/
type LanguageModelV1CallWarning = {
type: 'unsupported-setting';
setting: keyof LanguageModelV1CallSettings;
details?: string;
} | {
type: 'unsupported-tool';
tool: LanguageModelV1FunctionTool | LanguageModelV1ProviderDefinedTool;
details?: string;
} | {
type: 'other';
message: string;
};
/**
Reason why a language model finished generating a response.
Can be one of the following:
- `stop`: model generated stop sequence
- `length`: model generated maximum number of tokens
- `content-filter`: content filter violation stopped the model
- `tool-calls`: model triggered tool calls
- `error`: model stopped because of an error
- `other`: model stopped for other reasons
- `unknown`: the model has not transmitted a finish reason
*/
type LanguageModelV1FinishReason = 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other' | 'unknown';
type LanguageModelV1FunctionToolCall = {
toolCallType: 'function';
toolCallId: string;
toolName: string;
/**
Stringified JSON object with the tool call arguments. Must match the
parameters schema of the tool.
*/
args: string;
};
/**
Log probabilities for each token and its top log probabilities.
*/
type LanguageModelV1LogProbs = Array<{
token: string;
logprob: number;
topLogprobs: Array<{
token: string;
logprob: number;
}>;
}>;
/**
Specification for a language model that implements the language model interface version 1.
*/
type LanguageModelV1 = {
/**
The language model must specify which language model interface
version it implements. This will allow us to evolve the language
model interface and retain backwards compatibility. The different
implementation versions can be handled as a discriminated union
on our side.
*/
readonly specificationVersion: 'v1';
/**
Name of the provider for logging purposes.
*/
readonly provider: string;
/**
Provider-specific model ID for logging purposes.
*/
readonly modelId: string;
/**
Default object generation mode that should be used with this model when
no mode is specified. Should be the mode with the best results for this
model. `undefined` can be returned if object generation is not supported.
This is needed to generate the best objects possible w/o requiring the
user to explicitly specify the object generation mode.
*/
readonly defaultObjectGenerationMode: LanguageModelV1ObjectGenerationMode;
/**
Flag whether this model supports image URLs. Default is `true`.
When the flag is set to `false`, the AI SDK will download the image and
pass the image data to the model.
*/
readonly supportsImageUrls?: boolean;
/**
Flag whether this model supports grammar-guided generation,
i.e. follows JSON schemas for object generation
when the response format is set to 'json' or
when the `object-json` mode is used.
This means that the model guarantees that the generated JSON
will be a valid JSON object AND that the object will match the
JSON schema.
Please note that `generateObject` and `streamObject` will work
regardless of this flag, but might send different prompts and
use further optimizations if this flag is set to `true`.
Defaults to `false`.
*/
readonly supportsStructuredOutputs?: boolean;
/**
Checks if the model supports the given URL for file parts natively.
If the model does not support the URL,
the AI SDK will download the file and pass the file data to the model.
When undefined, the AI SDK will download the file.
*/
supportsUrl?(url: URL): boolean;
/**
Generates a language model output (non-streaming).
Naming: "do" prefix to prevent accidental direct usage of the method
by the user.
*/
doGenerate(options: LanguageModelV1CallOptions): PromiseLike<{
/**
Text that the model has generated.
Can be undefined if the model did not generate any text.
*/
text?: string;
/**
Reasoning that the model has generated.
Can be undefined if the model does not support reasoning.
*/
reasoning?: string | Array<{
type: 'text';
text: string;
/**
An optional signature for verifying that the reasoning originated from the model.
*/
signature?: string;
} | {
type: 'redacted';
data: string;
}>;
/**
Generated files as base64 encoded strings or binary data.
The files should be returned without any unnecessary conversion.
If the API returns base64 encoded strings, the files should be returned
as base64 encoded strings. If the API returns binary data, the files should
be returned as binary data.
*/
files?: Array<{
data: string | Uint8Array;
mimeType: string;
}>;
/**
Tool calls that the model has generated.
Can be undefined if the model did not generate any tool calls.
*/
toolCalls?: Array<LanguageModelV1FunctionToolCall>;
/**
Finish reason.
*/
finishReason: LanguageModelV1FinishReason;
/**
Usage information.
*/
usage: {
promptTokens: number;
completionTokens: number;
};
/**
Raw prompt and setting information for observability provider integration.
*/
rawCall: {
/**
Raw prompt after expansion and conversion to the format that the
provider uses to send the information to their API.
*/
rawPrompt: unknown;
/**
Raw settings that are used for the API call. Includes provider-specific
settings.
*/
rawSettings: Record<string, unknown>;
};
/**
Optional response information for telemetry and debugging purposes.
*/
rawResponse?: {
/**
Response headers.
*/
headers?: Record<string, string>;
/**
Response body.
*/
body?: unknown;
};
/**
Optional request information for telemetry and debugging purposes.
*/
request?: {
/**
Raw request HTTP body that was sent to the provider API as a string (JSON should be stringified).
Non-HTTP(s) providers should not set this.
*/
body?: string;
};
/**
Optional response information for telemetry and debugging purposes.
*/
response?: {
/**
ID for the generated response, if the provider sends one.
*/
id?: string;
/**
Timestamp for the start of the generated response, if the provider sends one.
*/
timestamp?: Date;
/**
The ID of the response model that was used to generate the response, if the provider sends one.
*/
modelId?: string;
};
warnings?: LanguageModelV1CallWarning[];
/**
Additional provider-specific metadata. They are passed through
from the provider to the AI SDK and enable provider-specific
results that can be fully encapsulated in the provider.
*/
providerMetadata?: LanguageModelV1ProviderMetadata;
/**
Sources that have been used as input to generate the response.
*/
sources?: LanguageModelV1Source[];
/**
Logprobs for the completion.
`undefined` if the mode does not support logprobs or if was not enabled
@deprecated will be changed into a provider-specific extension in v2
*/
logprobs?: LanguageModelV1LogProbs;
}>;
/**
Generates a language model output (streaming).
Naming: "do" prefix to prevent accidental direct usage of the method
by the user.
*
@return A stream of higher-level language model output parts.
*/
doStream(options: LanguageModelV1CallOptions): PromiseLike<{
stream: ReadableStream<LanguageModelV1StreamPart>;
/**
Raw prompt and setting information for observability provider integration.
*/
rawCall: {
/**
Raw prompt after expansion and conversion to the format that the
provider uses to send the information to their API.
*/
rawPrompt: unknown;
/**
Raw settings that are used for the API call. Includes provider-specific
settings.
*/
rawSettings: Record<string, unknown>;
};
/**
Optional raw response data.
*/
rawResponse?: {
/**
Response headers.
*/
headers?: Record<string, string>;
};
/**
Optional request information for telemetry and debugging purposes.
*/
request?: {
/**
Raw request HTTP body that was sent to the provider API as a string (JSON should be stringified).
Non-HTTP(s) providers should not set this.
*/
body?: string;
};
/**
Warnings for the call, e.g. unsupported settings.
*/
warnings?: Array<LanguageModelV1CallWarning>;
}>;
};
type LanguageModelV1StreamPart = {
type: 'text-delta';
textDelta: string;
} | {
type: 'reasoning';
textDelta: string;
} | {
type: 'reasoning-signature';
signature: string;
} | {
type: 'redacted-reasoning';
data: string;
} | {
type: 'source';
source: LanguageModelV1Source;
} | {
type: 'file';
mimeType: string;
/**
Generated file data as base64 encoded strings or binary data.
The file data should be returned without any unnecessary conversion.
If the API returns base64 encoded strings, the file data should be returned
as base64 encoded strings. If the API returns binary data, the file data should
be returned as binary data.
*/
data: string | Uint8Array;
} | ({
type: 'tool-call';
} & LanguageModelV1FunctionToolCall) | {
type: 'tool-call-delta';
toolCallType: 'function';
toolCallId: string;
toolName: string;
argsTextDelta: string;
} | {
type: 'response-metadata';
id?: string;
timestamp?: Date;
modelId?: string;
} | {
type: 'finish';
finishReason: LanguageModelV1FinishReason;
providerMetadata?: LanguageModelV1ProviderMetadata;
usage: {
promptTokens: number;
completionTokens: number;
};
logprobs?: LanguageModelV1LogProbs;
} | {
type: 'error';
error: unknown;
};
/**
The object generation modes available for use with a model. `undefined`
represents no support for object generation.
*/
type LanguageModelV1ObjectGenerationMode = 'json' | 'tool' | undefined;
/**
Language model that is used by the AI SDK Core functions.
*/
type LanguageModel = LanguageModelV1;
// Type definitions for pino-std-serializers 2.4
// Definitions by: Connor Fitzgerald <https://github.com/connorjayfitzgerald>
// Igor Savin <https://github.com/kibertoad>
// TypeScript Version: 2.7
interface SerializedError {
/**
* The name of the object's constructor.
*/
type: string;
/**
* The supplied error message.
*/
message: string;
/**
* The stack when the error was generated.
*/
stack: string;
/**
* Non-enumerable. The original Error object. This will not be included in the logged output.
* This is available for subsequent serializers to use.
*/
raw: Error;
/**
* `cause` is never included in the log output, if you need the `cause`, use {@link raw.cause}
*/
cause?: never;
/**
* Any other extra properties that have been attached to the object will also be present on the serialized object.
*/
[key: string]: any;
[key: number]: any;
}
/**
* Serializes an Error object. Does not serialize "err.cause" fields (will append the err.cause.message to err.message
* and err.cause.stack to err.stack)
*/
declare function err(err: Error): SerializedError;
/**
* Serializes an Error object, including full serialization for any err.cause fields recursively.
*/
declare function errWithCause(err: Error): SerializedError;
interface SerializedRequest {
/**
* Defaults to `undefined`, unless there is an `id` property already attached to the `request` object or
* to the `request.info` object. Attach a synchronous function to the `request.id` that returns an
* identifier to have the value filled.
*/
id: string | undefined;
/**
* HTTP method.
*/
method: string;
/**
* Request pathname (as per req.url in core HTTP).
*/
url: string;
/**
* Reference to the `headers` object from the request (as per req.headers in core HTTP).
*/
headers: Record<string, string>;
remoteAddress: string;
remotePort: number;
params: Record<string, string>;
query: Record<string, string>;
/**
* Non-enumerable, i.e. will not be in the output, original request object. This is available for subsequent
* serializers to use. In cases where the `request` input already has a `raw` property this will
* replace the original `request.raw` property.
*/
raw: IncomingMessage;
}
/**
* Serializes a Request object.
*/
declare function req(req: IncomingMessage): SerializedRequest;
/**
* Used internally by Pino for general request logging.
*/
declare function mapHttpRequest(req: IncomingMessage): {
req: SerializedRequest
};
interface SerializedResponse {
/**
* HTTP status code.
*/
statusCode: number;
/**
* The headers to be sent in the response.
*/
headers: Record<string, string>;
/**
* Non-enumerable, i.e. will not be in the output, original response object. This is available for subsequent serializers to use.
*/
raw: ServerResponse;
}
/**
* Serializes a Response object.
*/
declare function res(res: ServerResponse): SerializedResponse;
/**
* Used internally by Pino for general response logging.
*/
declare function mapHttpResponse(res: ServerResponse): {
res: SerializedResponse
};
type CustomErrorSerializer = (err: SerializedError) => Record<string, any>;
/**
* A utility method for wrapping the default error serializer.
* This allows custom serializers to work with the already serialized object.
* The customSerializer accepts one parameter — the newly serialized error object — and returns the new (or updated) error object.
*/
declare function wrapErrorSerializer(customSerializer: CustomErrorSerializer): (err: Error) => Record<string, any>;
type CustomRequestSerializer = (req: SerializedRequest) => Record<string, any>;
/**
* A utility method for wrapping the default request serializer.
* This allows custom serializers to work with the already serialized object.
* The customSerializer accepts one parameter — the newly serialized request object — and returns the new (or updated) request object.
*/
declare function wrapRequestSerializer(customSerializer: CustomRequestSerializer): (req: IncomingMessage) => Record<string, any>;
type CustomResponseSerializer = (res: SerializedResponse) => Record<string, any>;
/**
* A utility method for wrapping the default response serializer.
* This allows custom serializers to work with the already serialized object.
* The customSerializer accepts one parameter — the newly serialized response object — and returns the new (or updated) response object.
*/
declare function wrapResponseSerializer(customSerializer: CustomResponseSerializer): (res: ServerResponse) => Record<string, any>;
type pinoStdSerializers_CustomErrorSerializer = CustomErrorSerializer;
type pinoStdSerializers_CustomRequestSerializer = CustomRequestSerializer;
type pinoStdSerializers_CustomResponseSerializer = CustomResponseSerializer;
type pinoStdSerializers_SerializedError = SerializedError;
type pinoStdSerializers_SerializedRequest = SerializedRequest;
type pinoStdSerializers_SerializedResponse = SerializedResponse;
declare const pinoStdSerializers_err: typeof err;
declare const pinoStdSerializers_errWithCause: typeof errWithCause;
declare const pinoStdSerializers_mapHttpRequest: typeof mapHttpRequest;
declare const pinoStdSerializers_mapHttpResponse: typeof mapHttpResponse;
declare const pinoStdSerializers_req: typeof req;
declare const pinoStdSerializers_res: typeof res;
declare const pinoStdSerializers_wrapErrorSerializer: typeof wrapErrorSerializer;
declare const pinoStdSerializers_wrapRequestSerializer: typeof wrapRequestSerializer;
declare const pinoStdSerializers_wrapResponseSerializer: typeof wrapResponseSerializer;
declare namespace pinoStdSerializers {
export { type pinoStdSerializers_CustomErrorSerializer as CustomErrorSerializer, type pinoStdSerializers_CustomRequestSerializer as CustomRequestSerializer, type pinoStdSerializers_CustomResponseSerializer as CustomResponseSerializer, type pinoStdSerializers_SerializedError as SerializedError, type pinoStdSerializers_SerializedRequest as SerializedRequest, type pinoStdSerializers_SerializedResponse as SerializedResponse, pinoStdSerializers_err as err, pinoStdSerializers_errWithCause as errWithCause, pinoStdSerializers_mapHttpRequest as mapHttpRequest, pinoStdSerializers_mapHttpResponse as mapHttpResponse, pinoStdSerializers_req as req, pinoStdSerializers_res as res, pinoStdSerializers_wrapErrorSerializer as wrapErrorSerializer, pinoStdSerializers_wrapRequestSerializer as wrapRequestSerializer, pinoStdSerializers_wrapResponseSerializer as wrapResponseSerializer };
}
// Type definitions for sonic-boom 0.7
// Definitions by: Alex Ferrando <https://github.com/alferpal>
// Igor Savin <https://github.com/kibertoad>
type SonicBoomOpts = {
fd?: number | string | symbol
dest?: string | number
maxLength?: number
minLength?: number
maxWrite?: number
periodicFlush?: number
sync?: boolean
fsync?: boolean
append?: boolean
mode?: string | number
mkdir?: boolean
contentMode?: 'buffer' | 'utf8'
retryEAGAIN?: (err: Error, writeBufferLen: number, remainingBufferLen: number) => boolean
}
declare class SonicBoom extends EventEmitter {
/**
* @param [fileDescriptor] File path or numerical file descriptor
* relative protocol is enabled. Default: process.stdout
* @returns a new sonic-boom instance
*/
constructor(opts: SonicBoomOpts)
/**
* Writes the string to the file. It will return false to signal the producer to slow down.
*/
write(string: string): boolean;
/**
* Writes the current buffer to the file if a write was not in progress.
* Do nothing if minLength is zero or if it is already writing.
*/
flush(cb?: (err?: Error) => unknown): void;
/**
* Reopen the file in place, useful for log rotation.
*/
reopen(fileDescriptor?: string | number): void;
/**
* Flushes the buffered data synchronously. This is a costly operation.
*/
flushSync(): void;
/**
* Closes the stream, the data will be flushed down asynchronously
*/
end(): void;
/**
* Closes the stream immediately, the data is not flushed.
*/
destroy(): void;
}
// Project: https://github.com/pinojs/pino.git, http://getpino.io
// Definitions by: Peter Snider <https://github.com/psnider>
// BendingBender <https://github.com/BendingBender>
// Christian Rackerseder <https://github.com/screendriver>
// GP <https://github.com/paambaati>
// Alex Ferrando <https://github.com/alferpal>
// Oleksandr Sidko <https://github.com/mortiy>
// Harris Lummis <https://github.com/lummish>
// Raoul Jaeckel <https://github.com/raoulus>
// Cory Donkin <https://github.com/Cooryd>
// Adam Vigneaux <https://github.com/AdamVig>
// Austin Beer <https://github.com/austin-beer>
// Michel Nemnom <https://github.com/Pegase745>
// Igor Savin <https://github.com/kibertoad>
// James Bromwell <https://github.com/thw0rted>
// TypeScript Version: 4.4
//// Non-exported types and interfaces
// ToDo https://github.com/pinojs/thread-stream/issues/24
type ThreadStream = any
type TimeFn = () => string;
type MixinFn<CustomLevels extends string = never> = (mergeObject: object, level: number, logger:pino.Logger<CustomLevels>) => object;
type MixinMergeStrategyFn = (mergeObject: object, mixinObject: object) => object;
type CustomLevelLogger<CustomLevels extends string, UseOnlyCustomLevels extends boolean = boolean> = {
/**
* Define additional logging levels.
*/
customLevels: { [level in CustomLevels]: number };
/**
* Use only defined `customLevels` and omit Pino's levels.
*/
useOnlyCustomLevels: UseOnlyCustomLevels;
} & {
// This will override default log methods
[K in Exclude<pino.Level, CustomLevels>]: UseOnlyCustomLevels extends true ? never : pino.LogFn;
} & {
[level in CustomLevels]: pino.LogFn;
};
/**
* A synchronous callback that will run on each creation of a new child.
* @param child: The newly created child logger instance.
*/
type OnChildCallback<CustomLevels extends string = never> = (child: pino.Logger<CustomLevels>) => void
interface redactOptions {
paths: string[];
censor?: string | ((value: any, path: string[]) => any);
remove?: boolean;
}
interface LoggerExtras<CustomLevels extends string = never, UseOnlyCustomLevels extends boolean = boolean> extends EventEmitter {
/**
* Exposes the Pino package version. Also available on the exported pino function.
*/
readonly version: string;
levels: pino.LevelMapping;
/**
* Outputs the level as a string instead of integer.
*/
useLevelLabels: boolean;
/**
* Returns the integer value for the logger instance's logging level.
*/
levelVal: number;
/**
* Creates a child logger, setting all key-value pairs in `bindings` as properties in the log lines. All serializers will be applied to the given pair.
* Child loggers use the same output stream as the parent and inherit the current log level of the parent at the time they are spawned.
* From v2.x.x the log level of a child is mutable (whereas in v1.x.x it was immutable), and can be set independently of the parent.
* If a `level` property is present in the object passed to `child` it will override the child logger level.
*
* @param bindings: an object of key-value pairs to include in log lines as properties.
* @param options: an options object that will override child logger inherited options.
* @returns a child logger instance.
*/
child<ChildCustomLevels extends string = never>(bindings: pino.Bindings, options?: ChildLoggerOptions<ChildCustomLevels>): pino.Logger<CustomLevels | ChildCustomLevels>;
/**
* This can be used to modify the callback function on creation of a new child.
*/
onChild: OnChildCallback<CustomLevels>;
/**
* Registers a listener function that is triggered when the level is changed.
* Note: When browserified, this functionality will only be available if the `events` module has been required elsewhere
* (e.g. if you're using streams in the browser). This allows for a trade-off between bundle size and functionality.
*
* @param event: only ever fires the `'level-change'` event
* @param listener: The listener is passed four arguments: `levelLabel`, `levelValue`, `previousLevelLabel`, `previousLevelValue`.
*/
on(event: "level-change", listener: pino.LevelChangeEventListener<CustomLevels, UseOnlyCustomLevels>): this;
addListener(event: "level-change", listener: pino.LevelChangeEventListener<CustomLevels, UseOnlyCustomLevels>): this;
once(event: "level-change", listener: pino.LevelChangeEventListener<CustomLevels, UseOnlyCustomLevels>): this;
prependListener(event: "level-change", listener: pino.LevelChangeEventListener<CustomLevels, UseOnlyCustomLevels>): this;
prependOnceListener(event: "level-change", listener: pino.LevelChangeEventListener<CustomLevels, UseOnlyCustomLevels>): this;
removeListener(event: "level-change", listener: pino.LevelChangeEventListener<CustomLevels, UseOnlyCustomLevels>): this;
/**
* A utility method for determining if a given log level will write to the destination.
*/
isLevelEnabled(level: pino.LevelWithSilentOrString): boolean;
/**
* Returns an object containing all the current bindings, cloned from the ones passed in via logger.child().
*/
bindings(): pino.Bindings;
/**
* Adds to the bindings of this logger instance.
* Note: Does not overwrite bindings. Can potentially result in duplicate keys in log lines.
*
* @param bindings: an object of key-value pairs to include in log lines as properties.
*/
setBindings(bindings: pino.Bindings): void;
/**
* Flushes the content of the buffer when using pino.destination({ sync: false }).
* call the callback when finished
*/
flush(cb?: (err?: Error) => void): void;
}
declare namespace pino {
//// Exported types and interfaces
interface BaseLogger {
/**
* Set this property to the desired logging level. In order of priority, available levels are:
*
* - 'fatal'
* - 'error'
* - 'warn'
* - 'info'
* - 'debug'
* - 'trace'
*
* The logging level is a __minimum__ level. For instance if `logger.level` is `'info'` then all `'fatal'`, `'error'`, `'warn'`,
* and `'info'` logs will be enabled.
*
* You can pass `'silent'` to disable logging.
*/
level: pino.LevelWithSilentOrString;
/**
* Log at `'fatal'` level the given msg. If the first argument is an object, all its properties will be included in the JSON line.
* If more args follows `msg`, these will be used to format `msg` using `util.format`.
*
* @typeParam T: the interface of the object being serialized. Default is object.
* @param obj: object to be serialized
* @param msg: the log message to write
* @param ...args: format string values when `msg` is a format string
*/
fatal: pino.LogFn;
/**
* Log at `'error'` level the given msg. If the first argument is an object, all its properties will be included in the JSON line.
* If more args follows `msg`, these will be used to format `msg` using `util.format`.
*
* @typeParam T: the interface of the object being serialized. Default is object.
* @param obj: object to be serialized
* @param msg: the log message to write
* @param ...args: format string values when `msg` is a format string
*/
error: pino.LogFn;
/**
* Log at `'warn'` level the given msg. If the first argument is an object, all its properties will be included in the JSON line.
* If more args follows `msg`, these will be used to format `msg` using `util.format`.
*
* @typeParam T: the interface of the object being serialized. Default is object.
* @param obj: object to be serialized
* @param msg: the log message to write
* @param ...args: format string values when `msg` is a format string
*/
warn: pino.LogFn;
/**
* Log at `'info'` level the given msg. If the first argument is an object, all its properties will be included in the JSON line.
* If more args follows `msg`, these will be used to format `msg` using `util.format`.
*
* @typeParam T: the interface of the object being serialized. Default is object.
* @param obj: object to be serialized
* @param msg: the log message to write
* @param ...args: format string values when `msg` is a format string
*/
info: pino.LogFn;
/**
* Log at `'debug'` level the given msg. If the first argument is an object, all its properties will be included in the JSON line.
* If more args follows `msg`, these will be used to format `msg` using `util.format`.
*
* @typeParam T: the interface of the object being serialized. Default is object.
* @param obj: object to be serialized
* @param msg: the log message to write
* @param ...args: format string values when `msg` is a format string
*/
debug: pino.LogFn;
/**
* Log at `'trace'` level the given msg. If the first argument is an object, all its properties will be included in the JSON line.
* If more args follows `msg`, these will be used to format `msg` using `util.format`.
*
* @typeParam T: the interface of the object being serialized. Default is object.
* @param obj: object to be serialized
* @param msg: the log message to write
* @param ...args: format string values when `msg` is a format string
*/
trace: pino.LogFn;
/**
* Noop function.
*/
silent: pino.LogFn;
}
type Bindings = Record<string, any>;
type Level = "fatal" | "error" | "warn" | "info" | "debug" | "trace";
type LevelOrString = Level | (string & {});
type LevelWithSilent = pino.Level | "silent";
type LevelWithSilentOrString = LevelWithSilent | (string & {});
type SerializerFn = (value: any) => any;
type WriteFn = (o: object) => void;
type LevelChangeEventListener<CustomLevels extends string = never, UseOnlyCustomLevels extends boolean = boolean> = (
lvl: LevelWithSilentOrString,
val: number,
prevLvl: LevelWithSilentOrString,
prevVal: number,
logger: Logger<CustomLevels, UseOnlyCustomLevels>
) => void;
type LogDescriptor = Record<string, any>;
type Logger<CustomLevels extends string = never, UseOnlyCustomLevels extends boolean = boolean> = BaseLogger & LoggerExtras<CustomLevels> & CustomLevelLogger<CustomLevels, UseOnlyCustomLevels>;
type SerializedError = SerializedError;
type SerializedResponse = SerializedResponse;
type SerializedRequest = SerializedRequest;
interface TransportTargetOptions<TransportOptions = Record<string, any>> {
target: string
options?: TransportOptions
level?: LevelWithSilentOrString
}
interface TransportBaseOptions<TransportOptions = Record<string, any>> {
options?: TransportOptions
worker?: Wor