@promptbook/vercel
Version:
Promptbook: Turn your company's scattered knowledge into AI ready books
71 lines (70 loc) • 3.02 kB
TypeScript
import { BehaviorSubject } from 'rxjs';
import type { AgentBasicInformation, BookParameter } from '../../book-2.0/agent-source/AgentBasicInformation';
import type { string_book } from '../../book-2.0/agent-source/string_book';
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
import type { ChatPromptResult } from '../../execution/PromptResult';
import type { Prompt } from '../../types/Prompt';
import type { string_agent_hash, string_agent_name, string_agent_url, string_url_image } from '../../types/typeAliases';
import { AgentLlmExecutionTools } from './AgentLlmExecutionTools';
import type { AgentOptions } from './AgentOptions';
/**
* Represents one AI Agent
*
* Note: [🦖] There are several different things in Promptbook:
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
* - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
*
* @public exported from `@promptbook/core`
*/
export declare class Agent extends AgentLlmExecutionTools implements LlmExecutionTools, AgentBasicInformation {
private _agentName;
/**
* Name of the agent
*/
get agentName(): string_agent_name;
/**
* Description of the agent
*/
personaDescription: string | null;
/**
* The initial message shown to the user when the chat starts
*/
initialMessage: string | null;
/**
* Links found in the agent source
*/
links: Array<string_agent_url>;
/**
* Computed hash of the agent source for integrity verification
*/
get agentHash(): string_agent_hash;
/**
* Metadata like image or color
*/
meta: {
fullname?: string;
image?: string_url_image;
link?: string;
title?: string;
description?: string;
[key: string]: string | undefined;
};
/**
* Not used in Agent, always returns empty array
*/
get parameters(): BookParameter[];
readonly agentSource: BehaviorSubject<string_book>;
constructor(options: AgentOptions);
/**
* Calls the chat model with agent-specific system prompt and requirements with streaming
*
* Note: This method also implements the learning mechanism
*/
callChatModelStream(prompt: Prompt, onProgress: (chunk: ChatPromptResult) => void): Promise<ChatPromptResult>;
}
/**
* TODO: [🧠][😰]Agent is not working with the parameters, should it be?
*/