UNPKG

@promptbook/remote-client

Version:

Promptbook: Turn your company's scattered knowledge into AI ready books

80 lines (79 loc) 3.76 kB
import type { Promisable } from 'type-fest'; import type { string_book } from '../../book-2.0/agent-source/string_book'; import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant'; import type { AvailableModel } from '../../execution/AvailableModel'; import type { LlmExecutionTools } from '../../execution/LlmExecutionTools'; import type { ChatPromptResult } from '../../execution/PromptResult'; import type { Prompt } from '../../types/Prompt'; import type { string_markdown, string_markdown_text, string_model_name, string_title } from '../../types/typeAliases'; import type { CreateAgentLlmExecutionToolsOptions } from './CreateAgentLlmExecutionToolsOptions'; /** * Execution Tools for calling LLM models with a predefined agent "soul" * This wraps underlying LLM execution tools and applies agent-specific system prompts and requirements * * Note: [🦖] There are several different things in Promptbook: * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using: * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools` * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server * * @public exported from `@promptbook/core` */ export declare class AgentLlmExecutionTools implements LlmExecutionTools { private readonly options; /** * Cache of OpenAI assistants to avoid creating duplicates */ private static assistantCache; /** * Cached model requirements to avoid re-parsing the agent source */ private _cachedModelRequirements; /** * Cached parsed agent information */ private _cachedAgentInfo; /** * Creates new AgentLlmExecutionTools * * @param llmTools The underlying LLM execution tools to wrap * @param agentSource The agent source string that defines the agent's behavior */ constructor(options: CreateAgentLlmExecutionToolsOptions); /** * Updates the agent source and clears the cache * * @param agentSource The new agent source string */ protected updateAgentSource(agentSource: string_book): void; /** * Get cached or parse agent information */ private getAgentInfo; /** * Get cached or create agent model requirements */ private getAgentModelRequirements; get title(): string_title & string_markdown_text; get description(): string_markdown; get profile(): ChatParticipant | undefined; checkConfiguration(): Promisable<void>; /** * Returns a virtual model name representing the agent behavior */ get modelName(): string_model_name; listModels(): Promisable<ReadonlyArray<AvailableModel>>; /** * Calls the chat model with agent-specific system prompt and requirements */ callChatModel(prompt: Prompt): Promise<ChatPromptResult>; /** * Calls the chat model with agent-specific system prompt and requirements with streaming */ callChatModelStream(prompt: Prompt, onProgress: (chunk: ChatPromptResult) => void): Promise<ChatPromptResult>; } /** * TODO: [🍚] Implement Destroyable pattern to free resources * TODO: [🧠] Adding parameter substitution support (here or should be responsibility of the underlying LLM Tools) */