ai-utils.js
Version:
Build AI applications, chatbots, and agents with JavaScript and TypeScript.
204 lines (203 loc) • 6.98 kB
TypeScript
import { z } from "zod";
import { AbstractModel } from "../../model-function/AbstractModel.js";
import { FunctionOptions } from "../../model-function/FunctionOptions.js";
import { DeltaEvent } from "../../model-function/generate-text/DeltaEvent.js";
import { TextGenerationModel, TextGenerationModelSettings } from "../../model-function/generate-text/TextGenerationModel.js";
import { PromptMapping } from "../../prompt/PromptMapping.js";
import { PromptMappingTextGenerationModel } from "../../prompt/PromptMappingTextGenerationModel.js";
import { RetryFunction } from "../../util/api/RetryFunction.js";
import { ThrottleFunction } from "../../util/api/ThrottleFunction.js";
import { ResponseHandler } from "../../util/api/postToApi.js";
import { CohereTokenizer } from "./CohereTokenizer.js";
export declare const COHERE_TEXT_GENERATION_MODELS: {
command: {
contextWindowSize: number;
};
"command-nightly": {
contextWindowSize: number;
};
"command-light": {
contextWindowSize: number;
};
"command-light-nightly": {
contextWindowSize: number;
};
};
export type CohereTextGenerationModelType = keyof typeof COHERE_TEXT_GENERATION_MODELS;
export interface CohereTextGenerationModelSettings extends TextGenerationModelSettings {
model: CohereTextGenerationModelType;
baseUrl?: string;
apiKey?: string;
retry?: RetryFunction;
throttle?: ThrottleFunction;
tokenizerSettings?: {
retry?: RetryFunction;
throttle?: ThrottleFunction;
};
numGenerations?: number;
maxTokens?: number;
temperature?: number;
k?: number;
p?: number;
frequencyPenalty?: number;
presencePenalty?: number;
endSequences?: string[];
stopSequences?: string[];
returnLikelihoods?: "GENERATION" | "ALL" | "NONE";
logitBias?: Record<string, number>;
truncate?: "NONE" | "START" | "END";
}
/**
* Create a text generation model that calls the Cohere Co.Generate API.
*
* @see https://docs.cohere.com/reference/generate
*
* @example
* const model = new CohereTextGenerationModel({
* model: "command-nightly",
* temperature: 0.7,
* maxTokens: 500,
* });
*
* const { text } = await generateText(
* model,
* "Write a short story about a robot learning to love:\n\n"
* );
*/
export declare class CohereTextGenerationModel extends AbstractModel<CohereTextGenerationModelSettings> implements TextGenerationModel<string, CohereTextGenerationResponse, CohereTextGenerationDelta, CohereTextGenerationModelSettings> {
constructor(settings: CohereTextGenerationModelSettings);
readonly provider: "cohere";
get modelName(): "command" | "command-nightly" | "command-light" | "command-light-nightly";
readonly contextWindowSize: number;
readonly tokenizer: CohereTokenizer;
private get apiKey();
countPromptTokens(input: string): Promise<number>;
callAPI<RESPONSE>(prompt: string, options: {
responseFormat: CohereTextGenerationResponseFormatType<RESPONSE>;
} & FunctionOptions<CohereTextGenerationModelSettings>): Promise<RESPONSE>;
generateTextResponse(prompt: string, options?: FunctionOptions<CohereTextGenerationModelSettings>): Promise<{
prompt: string;
id: string;
generations: {
text: string;
id: string;
finish_reason?: string | undefined;
}[];
meta?: {
api_version: {
version: string;
};
} | undefined;
}>;
extractText(response: CohereTextGenerationResponse): string;
generateDeltaStreamResponse(prompt: string, options?: FunctionOptions<CohereTextGenerationModelSettings>): Promise<AsyncIterable<DeltaEvent<CohereTextGenerationDelta>>>;
extractTextDelta(fullDelta: CohereTextGenerationDelta): string | undefined;
mapPrompt<INPUT_PROMPT>(promptMapping: PromptMapping<INPUT_PROMPT, string>): PromptMappingTextGenerationModel<INPUT_PROMPT, string, CohereTextGenerationResponse, CohereTextGenerationDelta, CohereTextGenerationModelSettings, this>;
withSettings(additionalSettings: Partial<CohereTextGenerationModelSettings>): this;
get maxCompletionTokens(): number | undefined;
withMaxCompletionTokens(maxCompletionTokens: number): this;
withStopTokens(stopTokens: string[]): this;
}
declare const cohereTextGenerationResponseSchema: z.ZodObject<{
id: z.ZodString;
generations: z.ZodArray<z.ZodObject<{
id: z.ZodString;
text: z.ZodString;
finish_reason: z.ZodOptional<z.ZodString>;
}, "strip", z.ZodTypeAny, {
text: string;
id: string;
finish_reason?: string | undefined;
}, {
text: string;
id: string;
finish_reason?: string | undefined;
}>, "many">;
prompt: z.ZodString;
meta: z.ZodOptional<z.ZodObject<{
api_version: z.ZodObject<{
version: z.ZodString;
}, "strip", z.ZodTypeAny, {
version: string;
}, {
version: string;
}>;
}, "strip", z.ZodTypeAny, {
api_version: {
version: string;
};
}, {
api_version: {
version: string;
};
}>>;
}, "strip", z.ZodTypeAny, {
prompt: string;
id: string;
generations: {
text: string;
id: string;
finish_reason?: string | undefined;
}[];
meta?: {
api_version: {
version: string;
};
} | undefined;
}, {
prompt: string;
id: string;
generations: {
text: string;
id: string;
finish_reason?: string | undefined;
}[];
meta?: {
api_version: {
version: string;
};
} | undefined;
}>;
export type CohereTextGenerationResponse = z.infer<typeof cohereTextGenerationResponseSchema>;
export type CohereTextGenerationDelta = {
content: string;
isComplete: boolean;
delta: string;
};
export type CohereTextGenerationResponseFormatType<T> = {
stream: boolean;
handler: ResponseHandler<T>;
};
export declare const CohereTextGenerationResponseFormat: {
/**
* Returns the response as a JSON object.
*/
json: {
stream: false;
handler: ResponseHandler<{
prompt: string;
id: string;
generations: {
text: string;
id: string;
finish_reason?: string | undefined;
}[];
meta?: {
api_version: {
version: string;
};
} | undefined;
}>;
};
/**
* Returns an async iterable over the full deltas (all choices, including full current state at time of event)
* of the response stream.
*/
deltaIterable: {
stream: true;
handler: ({ response }: {
response: Response;
}) => Promise<AsyncIterable<DeltaEvent<CohereTextGenerationDelta>>>;
};
};
export {};