@promptbook/vercel
Version:
Promptbook: Turn your company's scattered knowledge into AI ready books
93 lines (92 loc) • 4.42 kB
TypeScript
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
import type { ChatPromptResult } from '../../execution/PromptResult';
import type { Prompt } from '../../types/Prompt';
import type { string_markdown, string_markdown_text, string_title, string_token } from '../../types/typeAliases';
import type { OpenAiAssistantExecutionToolsOptions } from './OpenAiAssistantExecutionToolsOptions';
import { OpenAiExecutionTools } from './OpenAiExecutionTools';
/**
* Execution Tools for calling OpenAI API Assistants
*
* This is useful for calling OpenAI API with a single assistant, for more wide usage use `OpenAiExecutionTools`.
*
* Note: [🦖] There are several different things in Promptbook:
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
* - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
*
* @public exported from `@promptbook/openai`
*/
export declare class OpenAiAssistantExecutionTools extends OpenAiExecutionTools implements LlmExecutionTools {
readonly assistantId: string_token;
private readonly isCreatingNewAssistantsAllowed;
/**
* Creates OpenAI Execution Tools.
*
* @param options which are relevant are directly passed to the OpenAI client
*/
constructor(options: OpenAiAssistantExecutionToolsOptions);
get title(): string_title & string_markdown_text;
get description(): string_markdown;
/**
* Calls OpenAI API to use a chat model.
*/
callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'format'>): Promise<ChatPromptResult>;
/**
* Calls OpenAI API to use a chat model with streaming.
*/
callChatModelStream(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'format'>, onProgress: (chunk: ChatPromptResult) => void): Promise<ChatPromptResult>;
/**
* Get an existing assistant tool wrapper
*/
getAssistant(assistantId: string_token): OpenAiAssistantExecutionTools;
createNewAssistant(options: {
/**
* Name of the new assistant
*/
readonly name: string_title;
/**
* Instructions for the new assistant
*/
readonly instructions: string_markdown;
/**
* Optional list of knowledge source links (URLs or file paths) to attach to the assistant via vector store
*/
readonly knowledgeSources?: ReadonlyArray<string>;
}): Promise<OpenAiAssistantExecutionTools>;
updateAssistant(options: {
/**
* ID of the assistant to update
*/
readonly assistantId: string_token;
/**
* Name of the assistant
*/
readonly name?: string_title;
/**
* Instructions for the assistant
*/
readonly instructions?: string_markdown;
/**
* Optional list of knowledge source links (URLs or file paths) to attach to the assistant via vector store
*/
readonly knowledgeSources?: ReadonlyArray<string>;
}): Promise<OpenAiAssistantExecutionTools>;
/**
* Discriminant for type guards
*/
protected get discriminant(): string;
/**
* Type guard to check if given `LlmExecutionTools` are instanceof `OpenAiAssistantExecutionTools`
*
* Note: This is useful when you can possibly have multiple versions of `@promptbook/openai` installed
*/
static isOpenAiAssistantExecutionTools(llmExecutionTools: LlmExecutionTools): llmExecutionTools is OpenAiAssistantExecutionTools;
}
/**
* TODO: [🧠][🧙♂️] Maybe there can be some wizard for those who want to use just OpenAI
* TODO: Maybe make custom OpenAiError
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
*/