ai-utils.js
Version:
Build AI applications, chatbots, and agents with JavaScript and TypeScript.
255 lines (254 loc) • 8.91 kB
TypeScript
import z from "zod";
import { AbstractModel } from "../../model-function/AbstractModel.js";
import { FunctionOptions } from "../../model-function/FunctionOptions.js";
import { DeltaEvent } from "../../model-function/generate-text/DeltaEvent.js";
import { TextGenerationModel, TextGenerationModelSettings } from "../../model-function/generate-text/TextGenerationModel.js";
import { PromptMapping } from "../../prompt/PromptMapping.js";
import { PromptMappingTextGenerationModel } from "../../prompt/PromptMappingTextGenerationModel.js";
import { RetryFunction } from "../../util/api/RetryFunction.js";
import { ThrottleFunction } from "../../util/api/ThrottleFunction.js";
import { ResponseHandler } from "../../util/api/postToApi.js";
import { OpenAIImageGenerationCallSettings } from "./OpenAIImageGenerationModel.js";
import { OpenAIModelSettings } from "./OpenAIModelSettings.js";
import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
/**
* @see https://platform.openai.com/docs/models/
* @see https://openai.com/pricing
*/
export declare const OPENAI_TEXT_GENERATION_MODELS: {
"text-davinci-003": {
contextWindowSize: number;
tokenCostInMillicents: number;
};
"text-davinci-002": {
contextWindowSize: number;
tokenCostInMillicents: number;
};
"code-davinci-002": {
contextWindowSize: number;
tokenCostInMillicents: number;
};
davinci: {
contextWindowSize: number;
tokenCostInMillicents: number;
};
"text-curie-001": {
contextWindowSize: number;
tokenCostInMillicents: number;
};
curie: {
contextWindowSize: number;
tokenCostInMillicents: number;
};
"text-babbage-001": {
contextWindowSize: number;
tokenCostInMillicents: number;
};
babbage: {
contextWindowSize: number;
tokenCostInMillicents: number;
};
"text-ada-001": {
contextWindowSize: number;
tokenCostInMillicents: number;
};
ada: {
contextWindowSize: number;
tokenCostInMillicents: number;
};
};
export type OpenAITextGenerationModelType = keyof typeof OPENAI_TEXT_GENERATION_MODELS;
export declare const isOpenAITextGenerationModel: (model: string) => model is "text-davinci-003" | "text-davinci-002" | "code-davinci-002" | "davinci" | "text-curie-001" | "curie" | "text-babbage-001" | "babbage" | "text-ada-001" | "ada";
export declare const calculateOpenAITextGenerationCostInMillicents: ({ model, response, }: {
model: OpenAITextGenerationModelType;
response: OpenAITextGenerationResponse;
}) => number;
export interface OpenAITextGenerationModelSettings extends TextGenerationModelSettings {
model: OpenAITextGenerationModelType;
baseUrl?: string;
apiKey?: string;
retry?: RetryFunction;
throttle?: ThrottleFunction;
isUserIdForwardingEnabled?: boolean;
suffix?: string;
maxTokens?: number;
temperature?: number;
topP?: number;
n?: number;
logprobs?: number;
echo?: boolean;
stop?: string | string[];
presencePenalty?: number;
frequencyPenalty?: number;
bestOf?: number;
}
/**
* Create a text generation model that calls the OpenAI text completion API.
*
* @see https://platform.openai.com/docs/api-reference/completions/create
*
* @example
* const model = new OpenAITextGenerationModel({
* model: "text-davinci-003",
* temperature: 0.7,
* maxTokens: 500,
* retry: retryWithExponentialBackoff({ maxTries: 5 }),
* });
*
* const { text } = await generateText(
* model,
* "Write a short story about a robot learning to love:\n\n"
* );
*/
export declare class OpenAITextGenerationModel extends AbstractModel<OpenAITextGenerationModelSettings> implements TextGenerationModel<string, OpenAITextGenerationResponse, OpenAITextGenerationDelta, OpenAITextGenerationModelSettings> {
constructor(settings: OpenAITextGenerationModelSettings);
readonly provider: "openai";
get modelName(): "text-davinci-003" | "text-davinci-002" | "code-davinci-002" | "davinci" | "text-curie-001" | "curie" | "text-babbage-001" | "babbage" | "text-ada-001" | "ada";
readonly contextWindowSize: number;
readonly tokenizer: TikTokenTokenizer;
private get apiKey();
countPromptTokens(input: string): Promise<number>;
callAPI<RESULT>(prompt: string, options: {
responseFormat: OpenAITextResponseFormatType<RESULT>;
} & FunctionOptions<Partial<OpenAIImageGenerationCallSettings & OpenAIModelSettings & {
user?: string;
}>>): Promise<RESULT>;
generateTextResponse(prompt: string, options?: FunctionOptions<OpenAITextGenerationModelSettings>): Promise<{
object: "text_completion";
model: string;
id: string;
created: number;
usage: {
prompt_tokens: number;
total_tokens: number;
completion_tokens: number;
};
choices: {
text: string;
finish_reason: string;
index: number;
logprobs?: any;
}[];
}>;
extractText(response: OpenAITextGenerationResponse): string;
generateDeltaStreamResponse(prompt: string, options?: FunctionOptions<OpenAITextGenerationModelSettings>): Promise<AsyncIterable<DeltaEvent<OpenAITextGenerationDelta>>>;
extractTextDelta(fullDelta: OpenAITextGenerationDelta): string | undefined;
mapPrompt<INPUT_PROMPT>(promptMapping: PromptMapping<INPUT_PROMPT, string>): PromptMappingTextGenerationModel<INPUT_PROMPT, string, OpenAITextGenerationResponse, OpenAITextGenerationDelta, OpenAITextGenerationModelSettings, this>;
withSettings(additionalSettings: Partial<OpenAITextGenerationModelSettings>): this;
get maxCompletionTokens(): number | undefined;
withMaxCompletionTokens(maxCompletionTokens: number): this;
withStopTokens(stopTokens: string[]): this;
}
declare const openAITextGenerationResponseSchema: z.ZodObject<{
id: z.ZodString;
object: z.ZodLiteral<"text_completion">;
created: z.ZodNumber;
model: z.ZodString;
choices: z.ZodArray<z.ZodObject<{
text: z.ZodString;
index: z.ZodNumber;
logprobs: z.ZodNullable<z.ZodAny>;
finish_reason: z.ZodString;
}, "strip", z.ZodTypeAny, {
text: string;
finish_reason: string;
index: number;
logprobs?: any;
}, {
text: string;
finish_reason: string;
index: number;
logprobs?: any;
}>, "many">;
usage: z.ZodObject<{
prompt_tokens: z.ZodNumber;
completion_tokens: z.ZodNumber;
total_tokens: z.ZodNumber;
}, "strip", z.ZodTypeAny, {
prompt_tokens: number;
total_tokens: number;
completion_tokens: number;
}, {
prompt_tokens: number;
total_tokens: number;
completion_tokens: number;
}>;
}, "strip", z.ZodTypeAny, {
object: "text_completion";
model: string;
id: string;
created: number;
usage: {
prompt_tokens: number;
total_tokens: number;
completion_tokens: number;
};
choices: {
text: string;
finish_reason: string;
index: number;
logprobs?: any;
}[];
}, {
object: "text_completion";
model: string;
id: string;
created: number;
usage: {
prompt_tokens: number;
total_tokens: number;
completion_tokens: number;
};
choices: {
text: string;
finish_reason: string;
index: number;
logprobs?: any;
}[];
}>;
export type OpenAITextGenerationResponse = z.infer<typeof openAITextGenerationResponseSchema>;
export type OpenAITextResponseFormatType<T> = {
stream: boolean;
handler: ResponseHandler<T>;
};
export declare const OpenAITextResponseFormat: {
/**
* Returns the response as a JSON object.
*/
json: {
stream: false;
handler: ResponseHandler<{
object: "text_completion";
model: string;
id: string;
created: number;
usage: {
prompt_tokens: number;
total_tokens: number;
completion_tokens: number;
};
choices: {
text: string;
finish_reason: string;
index: number;
logprobs?: any;
}[];
}>;
};
/**
* Returns an async iterable over the full deltas (all choices, including full current state at time of event)
* of the response stream.
*/
deltaIterable: {
stream: true;
handler: ({ response }: {
response: Response;
}) => Promise<AsyncIterable<DeltaEvent<OpenAITextGenerationDelta>>>;
};
};
export type OpenAITextGenerationDelta = Array<{
content: string;
isComplete: boolean;
delta: string;
}>;
export {};