@promptbook/remote-client
Version:
Promptbook: Turn your company's scattered knowledge into AI ready books
58 lines (57 loc) • 2.74 kB
TypeScript
import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant';
import type { AvailableModel } from '../../execution/AvailableModel';
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, PromptResult } from '../../execution/PromptResult';
import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, Prompt } from '../../types/Prompt';
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
/**
* Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
*
* Note: Internal utility of `joinLlmExecutionTools` but exposed type
* @public exported from `@promptbook/core`
*/
export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
readonly title: string_title & string_markdown_text;
/**
* Array of execution tools in order of priority
*/
readonly llmExecutionTools: ReadonlyArray<LlmExecutionTools>;
/**
* Gets array of execution tools in order of priority
*/
constructor(title: string_title & string_markdown_text, ...llmExecutionTools: ReadonlyArray<LlmExecutionTools>);
get description(): string_markdown;
get profile(): ChatParticipant;
/**
* Check the configuration of all execution tools
*/
checkConfiguration(): Promise<void>;
/**
* List all available models that can be used
* This lists is a combination of all available models from all execution tools
*/
listModels(): Promise<ReadonlyArray<AvailableModel>>;
/**
* Calls the best available chat model
*/
callChatModel(prompt: ChatPrompt): Promise<ChatPromptResult>;
/**
* Calls the best available completion model
*/
callCompletionModel(prompt: CompletionPrompt): Promise<CompletionPromptResult>;
/**
* Calls the best available embedding model
*/
callEmbeddingModel(prompt: EmbeddingPrompt): Promise<EmbeddingPromptResult>;
/**
* Calls the best available model
*
* Note: This should be private or protected but is public to be usable with duck typing
*/
callCommonModel(prompt: Prompt): Promise<PromptResult>;
}
/**
* TODO: [🧠][🎛] Aggregating multiple models - have result not only from one first available model BUT all of them
* TODO: [🏖] If no llmTools have for example not defined `callCompletionModel` this will still return object with defined `callCompletionModel` which just throws `PipelineExecutionError`, make it undefined instead
* Look how `countTotalUsage` (and `cacheLlmTools`) implements it
*/