@llamaindex/core
Version:
LlamaIndex Core Module
250 lines (242 loc) • 9.98 kB
text/typescript
import { JSONObject, JSONValue } from '../../global/dist/index.cjs';
import { MessageContentDetail as MessageContentDetail$1, MessageContent as MessageContent$1, ChatMessage as ChatMessage$1, ToolMetadata as ToolMetadata$1 } from '../../llms/dist/index.cjs';
import { QueryType } from '../../query-engine/dist/index.cjs';
import { ImageType } from '../../schema/dist/index.cjs';
import { Tokenizers } from '@llamaindex/env/tokenizers';
import { JSONSchemaType } from 'ajv';
import { z } from 'zod';
/**
* Extracts just the text whether from
* a multi-modal message
* a single text message
* or a query
*
* @param message The message to extract text from.
* @returns The extracted text
*/
declare function extractText(message: MessageContent$1 | QueryType): string;
/**
* Extracts a single text from a multi-modal message content
*
* @param message The message to extract images from.
* @returns The extracted images
*/
declare function extractSingleText(message: MessageContentDetail$1): string | null;
/**
* Extracts an image from a multi-modal message content
*
* @param message The message to extract images from.
* @returns The extracted images
*/
declare function extractImage(message: MessageContentDetail$1): ImageType | null;
declare const extractDataUrlComponents: (dataUrl: string) => {
mimeType: string;
base64: string;
};
declare function messagesToHistory(messages: ChatMessage$1[]): string;
declare function toToolDescriptions(tools: ToolMetadata$1[]): string;
declare function imageToDataUrl(input: ImageType | Uint8Array): Promise<string>;
/**
* @internal
*/
interface LLMChat<AdditionalChatOptions extends object = object, AdditionalMessageOptions extends object = object> {
chat(params: LLMChatParamsStreaming<AdditionalChatOptions> | LLMChatParamsNonStreaming<AdditionalChatOptions>): Promise<ChatResponse<AdditionalMessageOptions> | AsyncIterable<ChatResponseChunk<AdditionalMessageOptions>>>;
}
/**
* Unified language model interface
*/
interface LLM<AdditionalChatOptions extends object = object, AdditionalMessageOptions extends object = object> extends LLMChat<AdditionalChatOptions> {
metadata: LLMMetadata;
/**
* Get a chat response from the LLM
*/
chat(params: LLMChatParamsStreaming<AdditionalChatOptions, AdditionalMessageOptions>): Promise<AsyncIterable<ChatResponseChunk>>;
chat(params: LLMChatParamsNonStreaming<AdditionalChatOptions, AdditionalMessageOptions>): Promise<ChatResponse<AdditionalMessageOptions>>;
/**
* Get a prompt completion from the LLM
*/
complete(params: LLMCompletionParamsStreaming): Promise<AsyncIterable<CompletionResponse>>;
complete(params: LLMCompletionParamsNonStreaming): Promise<CompletionResponse>;
}
type MessageType = "user" | "assistant" | "system" | "memory" | "developer";
type ChatMessage<AdditionalMessageOptions extends object = object> = {
content: MessageContent;
role: MessageType;
options?: undefined | AdditionalMessageOptions;
};
interface ChatResponse<AdditionalMessageOptions extends object = object> {
message: ChatMessage<AdditionalMessageOptions>;
/**
* Raw response from the LLM
*
* If LLM response an iterable of chunks, this will be an array of those chunks
*/
raw: object | null;
}
type ChatResponseChunk<AdditionalMessageOptions extends object = object> = {
raw: object | null;
delta: string;
options?: undefined | AdditionalMessageOptions;
};
interface CompletionResponse {
text: string;
/**
* Raw response from the LLM
*
* It's possible that this is `null` if the LLM response an iterable of chunks
*/
raw: object | null;
}
type LLMMetadata = {
model: string;
temperature: number;
topP: number;
maxTokens?: number | undefined;
contextWindow: number;
tokenizer: Tokenizers | undefined;
structuredOutput: boolean;
};
interface LLMChatParamsBase<AdditionalChatOptions extends object = object, AdditionalMessageOptions extends object = object> {
messages: ChatMessage<AdditionalMessageOptions>[];
additionalChatOptions?: AdditionalChatOptions;
tools?: BaseTool[];
responseFormat?: z.ZodType | object;
}
interface LLMChatParamsStreaming<AdditionalChatOptions extends object = object, AdditionalMessageOptions extends object = object> extends LLMChatParamsBase<AdditionalChatOptions, AdditionalMessageOptions> {
stream: true;
}
interface LLMChatParamsNonStreaming<AdditionalChatOptions extends object = object, AdditionalMessageOptions extends object = object> extends LLMChatParamsBase<AdditionalChatOptions, AdditionalMessageOptions> {
stream?: false;
}
interface LLMCompletionParamsBase {
prompt: MessageContent;
responseFormat?: z.ZodType | object;
}
interface LLMCompletionParamsStreaming extends LLMCompletionParamsBase {
stream: true;
}
interface LLMCompletionParamsNonStreaming extends LLMCompletionParamsBase {
stream?: false | null | undefined;
}
type MessageContentTextDetail = {
type: "text";
text: string;
};
type MessageContentImageDetail = {
type: "image_url";
image_url: {
url: string;
};
detail?: "high" | "low" | "auto";
};
type MessageContentDetail = MessageContentTextDetail | MessageContentImageDetail;
/**
* Extended type for the content of a message that allows for multi-modal messages.
*/
type MessageContent = string | MessageContentDetail[];
type ToolCall = {
name: string;
input: JSONObject;
id: string;
};
type PartialToolCall = {
name: string;
id: string;
input: string;
};
type ToolResult = {
id: string;
result: string;
isError: boolean;
};
type ToolCallOptions = {
toolCall: (ToolCall | PartialToolCall)[];
};
type ToolResultOptions = {
toolResult: ToolResult;
};
type ToolCallLLMMessageOptions = ToolResultOptions | ToolCallOptions | object;
type Known = {
[key: string]: Known;
} | [Known, ...Known[]] | Known[] | number | string | boolean | null;
type ToolMetadata<Parameters extends Record<string, unknown> = Record<string, unknown>> = {
description: string;
name: string;
/**
* OpenAI uses JSON Schema to describe the parameters that a tool can take.
* @link https://json-schema.org/understanding-json-schema
*/
parameters?: Parameters;
/**
* Whether the tool requires workflow context to be passed in.
*/
requireContext?: boolean;
};
/**
* Simple Tool interface. Likely to change.
*/
interface BaseTool<Input = any> {
/**
* This could be undefined if the implementation is not provided,
* which might be the case when communicating with a llm.
*
* @return {JSONValue | Promise<JSONValue>} The output of the tool.
*/
call?: (input: Input) => JSONValue | Promise<JSONValue>;
metadata: Input extends Known ? ToolMetadata<JSONSchemaType<Input>> : ToolMetadata;
}
declare abstract class BaseLLM<AdditionalChatOptions extends object = object, AdditionalMessageOptions extends object = object> implements LLM<AdditionalChatOptions> {
abstract metadata: LLMMetadata;
complete(params: LLMCompletionParamsStreaming): Promise<AsyncIterable<CompletionResponse>>;
complete(params: LLMCompletionParamsNonStreaming): Promise<CompletionResponse>;
abstract chat(params: LLMChatParamsStreaming<AdditionalChatOptions, AdditionalMessageOptions>): Promise<AsyncIterable<ChatResponseChunk>>;
abstract chat(params: LLMChatParamsNonStreaming<AdditionalChatOptions, AdditionalMessageOptions>): Promise<ChatResponse<AdditionalMessageOptions>>;
}
declare abstract class ToolCallLLM<AdditionalChatOptions extends object = object, AdditionalMessageOptions extends ToolCallLLMMessageOptions = ToolCallLLMMessageOptions> extends BaseLLM<AdditionalChatOptions, AdditionalMessageOptions> {
abstract supportToolCall: boolean;
}
declare class MockLLM extends ToolCallLLM {
metadata: LLMMetadata;
options: {
timeBetweenToken: number;
responseMessage: string;
};
supportToolCall: boolean;
constructor(options?: {
timeBetweenToken?: number;
responseMessage?: string;
metadata?: LLMMetadata;
});
chat(params: LLMChatParamsStreaming<object, object>): Promise<AsyncIterable<ChatResponseChunk>>;
chat(params: LLMChatParamsNonStreaming<object, object>): Promise<ChatResponse<object>>;
complete(params: LLMCompletionParamsStreaming): Promise<AsyncIterable<CompletionResponse>>;
complete(params: LLMCompletionParamsNonStreaming): Promise<CompletionResponse>;
}
type ObjectEntries<T extends Record<string, any>> = {
[K in keyof T]: [K, T[K]];
}[keyof T][];
/**
* Type safe version of `Object.entries`
*/
declare function objectEntries<T extends Record<string, any>>(obj: T): ObjectEntries<{
[K in keyof T]-?: NonNullable<T[K]>;
}>;
declare const isPromise: <T>(obj: unknown) => obj is Promise<T>;
declare const isAsyncIterable: (obj: unknown) => obj is AsyncIterable<unknown>;
declare const isIterable: (obj: unknown) => obj is Iterable<unknown>;
declare function streamConverter<S, D>(stream: AsyncIterable<S>, converter: (s: S) => D | null): AsyncIterable<D>;
declare function streamCallbacks<S>(stream: AsyncIterable<S>, callbacks: {
finished?: (value?: S) => void;
}): AsyncIterable<S>;
declare function streamReducer<S, D>(params: {
stream: AsyncIterable<S>;
reducer: (previousValue: D, currentValue: S) => D;
initialValue: D;
finished?: (value: D) => void;
}): AsyncIterable<S>;
/**
* Prettify an error for AI to read
*/
declare function prettifyError(error: unknown): string;
declare function stringifyJSONToMessageContent(value: JSONValue): string;
export { MockLLM, extractDataUrlComponents, extractImage, extractSingleText, extractText, imageToDataUrl, isAsyncIterable, isIterable, isPromise, messagesToHistory, objectEntries, prettifyError, streamCallbacks, streamConverter, streamReducer, stringifyJSONToMessageContent, toToolDescriptions };