@axflow/models
Version:
Zero-dependency, modular SDK for building robust natural language applications
210 lines (207 loc) • 7.84 kB
TypeScript
type SharedRequestOptions = {
apiKey?: string;
apiUrl?: string;
fetch?: typeof fetch;
headers?: Record<string, string>;
signal?: AbortSignal;
};
declare namespace OpenAIChatTypes {
type RequestOptions = SharedRequestOptions;
type Function = {
name: string;
parameters: Record<string, unknown>;
description?: string;
};
type Request = {
model: string;
messages: Message[];
functions?: Array<Function>;
tools?: Array<{
type: 'function';
function: Function;
}>;
tool_choice?: 'none' | 'auto' | {
type: 'function';
name: string;
};
function_call?: 'none' | 'auto' | {
name: string;
};
response_format?: {
type: 'text' | 'json_object';
};
seed?: number | null;
temperature?: number | null;
top_p?: number | null;
n?: number | null;
stop?: string | null | Array<string>;
max_tokens?: number;
presence_penalty?: number | null;
frequency_penalty?: number | null;
logit_bias?: Record<string, number> | null;
user?: string;
logprobs?: boolean | null;
top_logprobs?: number | null;
};
type SystemMessage = {
role: 'system';
name?: string;
content: string | null;
};
type UserMessage = {
role: 'user';
name?: string;
content: string | null;
};
type AssistantMessage = {
role: 'assistant';
name?: string;
content?: string | null;
tool_calls?: Array<{
id: string;
type: 'function';
function: {
name: string;
arguments: string;
};
}>;
function_call?: {
name: string;
arguments: string;
};
};
type ToolMessage = {
role: 'tool';
name?: string;
content: string | null;
tool_call_id: string;
};
type FunctionMessage = {
role: 'function';
name: string;
content: string | null;
};
type Message = SystemMessage | UserMessage | AssistantMessage | ToolMessage | FunctionMessage;
type Response = {
id: string;
object: string;
created: number;
model: string;
system_fingerprint: string;
choices: Array<{
index: number;
finish_reason: 'stop' | 'length' | 'function_call' | 'content_filter' | 'tool_calls' | null;
message: Message;
logprobs: {
content: any[] | null;
} | null;
}>;
usage?: {
completion_tokens: number;
prompt_tokens: number;
total_tokens: number;
};
};
type Chunk = {
id: string;
object: string;
created: number;
model: string;
system_fingerprint: string;
choices: Array<{
index: number;
delta: Delta;
finish_reason: 'stop' | 'length' | 'function_call' | 'content_filter' | 'tool_calls' | null;
logprobs: {
content: any[] | null;
} | null;
}>;
};
type Delta = {
role?: 'system' | 'user' | 'assistant' | 'function';
content?: string | null;
function_call?: {
name?: string;
arguments?: string;
};
tool_calls?: Array<{
id?: string;
index: number;
type?: 'function';
function: {
name?: string;
arguments: string;
};
}>;
};
}
/**
* Run a chat completion against the OpenAI API.
*
* @see https://platform.openai.com/docs/api-reference/chat
*
* @param request The request body sent to OpenAI. See OpenAI's documentation for /v1/chat/completions for supported parameters.
* @param options
* @param options.apiKey OpenAI API key.
* @param options.apiUrl The url of the OpenAI (or compatible) API. Defaults to https://api.openai.com/v1/chat/completions.
* @param options.fetch A custom implementation of fetch. Defaults to globalThis.fetch.
* @param options.headers Optionally add additional HTTP headers to the request.
* @param options.signal An AbortSignal that can be used to abort the fetch request.
* @returns OpenAI chat completion. See OpenAI's documentation for /v1/chat/completions.
*/
declare function run(request: OpenAIChatTypes.Request, options: OpenAIChatTypes.RequestOptions): Promise<OpenAIChatTypes.Response>;
/**
* Run a streaming chat completion against the OpenAI API. The resulting stream is the raw unmodified bytes from the API.
*
* @see https://platform.openai.com/docs/api-reference/chat
*
* @param request The request body sent to OpenAI. See OpenAI's documentation for /v1/chat/completions for supported parameters.
* @param options
* @param options.apiKey OpenAI API key.
* @param options.apiUrl The url of the OpenAI (or compatible) API. Defaults to https://api.openai.com/v1/chat/completions.
* @param options.fetch A custom implementation of fetch. Defaults to globalThis.fetch.
* @param options.headers Optionally add additional HTTP headers to the request.
* @param options.signal An AbortSignal that can be used to abort the fetch request.
* @returns A stream of bytes directly from the API.
*/
declare function streamBytes(request: OpenAIChatTypes.Request, options: OpenAIChatTypes.RequestOptions): Promise<ReadableStream<Uint8Array>>;
/**
* Run a streaming chat completion against the OpenAI API. The resulting stream is the parsed stream data as JavaScript objects.
*
* @see https://platform.openai.com/docs/api-reference/chat
*
* @param request The request body sent to OpenAI. See OpenAI's documentation for /v1/chat/completions for supported parameters.
* @param options
* @param options.apiKey OpenAI API key.
* @param options.apiUrl The url of the OpenAI (or compatible) API. Defaults to https://api.openai.com/v1/chat/completions.
* @param options.fetch A custom implementation of fetch. Defaults to globalThis.fetch.
* @param options.headers Optionally add additional HTTP headers to the request.
* @param options.signal An AbortSignal that can be used to abort the fetch request.
* @returns A stream of objects representing each chunk from the API.
*/
declare function stream(request: OpenAIChatTypes.Request, options: OpenAIChatTypes.RequestOptions): Promise<ReadableStream<OpenAIChatTypes.Chunk>>;
/**
* Run a streaming chat completion against the OpenAI API. The resulting stream emits only the string tokens.
*
* @see https://platform.openai.com/docs/api-reference/chat
*
* @param request The request body sent to OpenAI. See OpenAI's documentation for /v1/chat/completions for supported parameters.
* @param options
* @param options.apiKey OpenAI API key.
* @param options.apiUrl The url of the OpenAI (or compatible) API. Defaults to https://api.openai.com/v1/chat/completions.
* @param options.fetch A custom implementation of fetch. Defaults to globalThis.fetch.
* @param options.headers Optionally add additional HTTP headers to the request.
* @param options.signal An AbortSignal that can be used to abort the fetch request.
* @returns A stream of tokens from the API.
*/
declare function streamTokens(request: OpenAIChatTypes.Request, options: OpenAIChatTypes.RequestOptions): Promise<ReadableStream<string>>;
/**
* An object that encapsulates methods for calling the OpenAI Chat Completion API.
*/
declare class OpenAIChat {
static run: typeof run;
static stream: typeof stream;
static streamBytes: typeof streamBytes;
static streamTokens: typeof streamTokens;
}
export { OpenAIChat, OpenAIChatTypes };