llamaindex
Version:
<p align="center"> <img height="100" width="100" alt="LlamaIndex logo" src="https://ts.llamaindex.ai/square.svg" /> </p> <h1 align="center">LlamaIndex.TS</h1> <h3 align="center"> Data framework for your LLM application. </h3>
165 lines (156 loc) • 7.04 kB
TypeScript
import { CallbackManager } from '@llamaindex/core/global';
export { CallbackManager, DEFAULT_BASE_URL, DEFAULT_CHUNK_OVERLAP, DEFAULT_CHUNK_OVERLAP_RATIO, DEFAULT_CHUNK_SIZE, DEFAULT_COLLECTION, DEFAULT_CONTEXT_WINDOW, DEFAULT_DOC_STORE_PERSIST_FILENAME, DEFAULT_GRAPH_STORE_PERSIST_FILENAME, DEFAULT_INDEX_STORE_PERSIST_FILENAME, DEFAULT_NAMESPACE, DEFAULT_NUM_OUTPUTS, DEFAULT_PADDING, DEFAULT_PERSIST_DIR, DEFAULT_PROJECT_NAME, DEFAULT_VECTOR_STORE_PERSIST_FILENAME, JSONArray, JSONObject, JSONValue, LLMEndEvent, LLMStartEvent, LLMStreamEvent, LLMToolCallEvent, LLMToolResultEvent, LlamaIndexEventMaps } from '@llamaindex/core/global';
import { PromptHelper } from '@llamaindex/core/indices';
export * from '@llamaindex/core/indices';
import { BaseEmbedding } from '@llamaindex/core/embeddings';
export * from '@llamaindex/core/embeddings';
import { LLM, ToolMetadata } from '@llamaindex/core/llms';
export * from '@llamaindex/core/llms';
import { NodeParser } from '@llamaindex/core/node-parser';
export { Language, LlamaParseReader, ResultType } from '@llamaindex/cloud/reader';
export * from '@llamaindex/core/agent';
export * from '@llamaindex/core/chat-engine';
export * from '@llamaindex/core/data-structs';
export * from '@llamaindex/core/memory';
export * from '@llamaindex/core/postprocessor';
import { PromptMixin, SubQuestionPrompt, ModuleRecord } from '@llamaindex/core/prompts';
export * from '@llamaindex/core/prompts';
import { QueryType } from '@llamaindex/core/query-engine';
export * from '@llamaindex/core/query-engine';
export * from '@llamaindex/core/response-synthesizers';
export * from '@llamaindex/core/retriever';
import { BaseOutputParser } from '@llamaindex/core/schema';
export * from '@llamaindex/core/schema';
export * from '@llamaindex/core/storage/chat-store';
import { BaseDocumentStore } from '@llamaindex/core/storage/doc-store';
export * from '@llamaindex/core/storage/doc-store';
export { BaseDocumentStore } from '@llamaindex/core/storage/doc-store';
import { BaseIndexStore } from '@llamaindex/core/storage/index-store';
export * from '@llamaindex/core/storage/index-store';
export * from '@llamaindex/core/storage/kv-store';
export * from '@llamaindex/core/utils';
export * from '../../agent/dist/index.js';
export * from '../../cloud/dist/index.js';
export * from '../../engines/dist/index.js';
export * from '../../evaluation/dist/index.js';
export * from '../../extractors/dist/index.js';
export * from '../../indices/dist/index.js';
export * from '../../ingestion/dist/index.js';
export * from '../../node-parser/dist/index.js';
export * from '../../objects/dist/index.js';
export * from '../../postprocessors/dist/index.js';
export * from '../../selectors/dist/index.js';
import { VectorStoreByType, BaseVectorStore } from '@llamaindex/core/vector-store';
export * from '../../tools/dist/index.js';
type PromptConfig = {
llm?: string;
lang?: string;
};
interface Config {
prompt: PromptConfig;
promptHelper: PromptHelper | null;
embedModel: BaseEmbedding | null;
nodeParser: NodeParser | null;
callbackManager: CallbackManager | null;
chunkSize: number | undefined;
chunkOverlap: number | undefined;
}
/**
* @internal
*/
declare class GlobalSettings implements Config {
#private;
get debug(): boolean;
get llm(): LLM;
set llm(llm: LLM);
withLLM<Result>(llm: LLM, fn: () => Result): Result;
get promptHelper(): PromptHelper;
set promptHelper(promptHelper: PromptHelper);
withPromptHelper<Result>(promptHelper: PromptHelper, fn: () => Result): Result;
get embedModel(): BaseEmbedding;
set embedModel(embedModel: BaseEmbedding);
withEmbedModel<Result>(embedModel: BaseEmbedding, fn: () => Result): Result;
get nodeParser(): NodeParser;
set nodeParser(nodeParser: NodeParser);
withNodeParser<Result>(nodeParser: NodeParser, fn: () => Result): Result;
get callbackManager(): CallbackManager;
set callbackManager(callbackManager: CallbackManager);
withCallbackManager<Result>(callbackManager: CallbackManager, fn: () => Result): Result;
set chunkSize(chunkSize: number | undefined);
get chunkSize(): number | undefined;
withChunkSize<Result>(chunkSize: number, fn: () => Result): Result;
get chunkOverlap(): number | undefined;
set chunkOverlap(chunkOverlap: number | undefined);
withChunkOverlap<Result>(chunkOverlap: number, fn: () => Result): Result;
get prompt(): PromptConfig;
set prompt(prompt: PromptConfig);
withPrompt<Result>(prompt: PromptConfig, fn: () => Result): Result;
}
declare const Settings: GlobalSettings;
/**
* QuestionGenerators generate new questions for the LLM using tools and a user query.
*/
interface BaseQuestionGenerator {
generate(tools: ToolMetadata[], query: QueryType): Promise<SubQuestion[]>;
}
interface SubQuestion {
subQuestion: string;
toolName: string;
}
/**
* Top level types to avoid circular dependencies
*/
/**
* StructuredOutput is just a combo of the raw output and the parsed output.
*/
interface StructuredOutput<T> {
rawOutput: string;
parsedOutput: T;
}
type ToolMetadataOnlyDescription = Pick<ToolMetadata, "description">;
type UUID = `${string}-${string}-${string}-${string}-${string}`;
/**
*
* @param text A markdown block with JSON
* @returns parsed JSON object
*/
declare function parseJsonMarkdown(text: string): any;
/**
* SubQuestionOutputParser is used to parse the output of the SubQuestionGenerator.
*/
declare class SubQuestionOutputParser implements BaseOutputParser<StructuredOutput<SubQuestion[]>> {
parse(output: string): StructuredOutput<SubQuestion[]>;
format(output: string): string;
}
/**
* LLMQuestionGenerator uses the LLM to generate new questions for the LLM using tools and a user query.
*/
declare class LLMQuestionGenerator extends PromptMixin implements BaseQuestionGenerator {
llm: LLM;
prompt: SubQuestionPrompt;
outputParser: BaseOutputParser<StructuredOutput<SubQuestion[]>>;
constructor(init?: Partial<LLMQuestionGenerator>);
protected _getPrompts(): {
[x: string]: SubQuestionPrompt;
};
protected _updatePrompts(promptsDict: {
subQuestion: SubQuestionPrompt;
}): void;
generate(tools: ToolMetadata[], query: QueryType): Promise<SubQuestion[]>;
protected _getPromptModules(): ModuleRecord;
}
interface StorageContext {
docStore: BaseDocumentStore;
indexStore: BaseIndexStore;
vectorStores: VectorStoreByType;
}
type BuilderParams = {
docStore: BaseDocumentStore;
indexStore: BaseIndexStore;
vectorStore: BaseVectorStore;
vectorStores: VectorStoreByType;
persistDir: string;
};
declare function storageContextFromDefaults({ docStore, indexStore, vectorStore, vectorStores, persistDir, }: Partial<BuilderParams>): Promise<StorageContext>;
export { LLMQuestionGenerator, Settings, SubQuestionOutputParser, parseJsonMarkdown, storageContextFromDefaults };
export type { StorageContext, StructuredOutput, ToolMetadataOnlyDescription, UUID };