@promptbook/google
Version:
Promptbook: Turn your company's scattered knowledge into AI ready books
155 lines (154 loc) • 6.19 kB
TypeScript
import { Agent as AgentFromKit } from '@openai/agents';
import OpenAI from 'openai';
import { TODO_any } from '../../_packages/types.index';
import type { CallChatModelStreamOptions, LlmExecutionTools } from '../../execution/LlmExecutionTools';
import type { ChatPromptResult } from '../../execution/PromptResult';
import type { ModelRequirements } from '../../types/ModelRequirements';
import type { Prompt } from '../../types/Prompt';
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
import type { OpenAiAgentKitExecutionToolsOptions } from './OpenAiAgentKitExecutionToolsOptions';
import { OpenAiVectorStoreHandler } from './OpenAiVectorStoreHandler';
/**
* Represents the AgentKit output configuration used to match OpenAI `response_format` expectations.
*
* @private utility of Open AI
*/
export type AgentOutputType = 'text' | JsonSchemaDefinition;
type JsonSchemaDefinitionEntry = {
type?: string;
description?: string;
properties?: Record<string, JsonSchemaDefinitionEntry>;
required?: Array<string>;
items?: JsonSchemaDefinitionEntry;
[key: string]: TODO_any;
};
type JsonSchemaDefinition = {
type: 'json_schema';
name: string;
strict: boolean;
schema: {
type: 'object';
properties: Record<string, JsonSchemaDefinitionEntry>;
required: Array<string>;
additionalProperties: boolean;
description?: string;
};
};
type OpenAiChatResponseFormat = OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming['response_format'];
/**
* Maps OpenAI `response_format` payloads to AgentKit output types so the runner can forward
* structured-output preferences to OpenAI while still reusing the same AgentKit agent instance.
*
* @param responseFormat - The OpenAI `response_format` payload from the user request.
* @returns An Agent output type compatible with the requested schema or `undefined` when no impact is required.
* @private utility of Open AI
*/
export declare function mapResponseFormatToAgentOutputType(responseFormat?: OpenAiChatResponseFormat): AgentOutputType | undefined;
/**
* Alias for OpenAI AgentKit agent to avoid naming confusion with Promptbook agents.
*/
type OpenAiAgentKitAgent = AgentFromKit;
/**
* Prepared AgentKit agent details.
*/
type OpenAiAgentKitPreparedAgent = {
readonly agent: OpenAiAgentKitAgent;
readonly vectorStoreId?: string;
};
/**
* Execution tools for OpenAI AgentKit (Agents SDK).
*
* @public exported from `@promptbook/openai`
*/
export declare class OpenAiAgentKitExecutionTools extends OpenAiVectorStoreHandler implements LlmExecutionTools {
private preparedAgentKitAgent;
private readonly agentKitModelName;
/**
* Creates OpenAI AgentKit execution tools.
*/
constructor(options: OpenAiAgentKitExecutionToolsOptions);
get title(): string_title & string_markdown_text;
get description(): string_markdown;
/**
* Calls OpenAI AgentKit with a chat prompt (non-streaming).
*/
callChatModel(prompt: Prompt): Promise<ChatPromptResult>;
/**
* Calls OpenAI AgentKit with a chat prompt (streaming).
*/
callChatModelStream(prompt: Prompt, onProgress: (chunk: ChatPromptResult) => void, options?: CallChatModelStreamOptions): Promise<ChatPromptResult>;
/**
* Returns a prepared AgentKit agent when the server wants to manage caching externally.
*/
getPreparedAgentKitAgent(): OpenAiAgentKitPreparedAgent | null;
/**
* Stores a prepared AgentKit agent for later reuse by external cache managers.
*/
setPreparedAgentKitAgent(preparedAgent: OpenAiAgentKitPreparedAgent): void;
/**
* Creates a new tools instance bound to a prepared AgentKit agent.
*/
getPreparedAgentTools(preparedAgent: OpenAiAgentKitPreparedAgent): OpenAiAgentKitExecutionTools;
/**
* Prepares an AgentKit agent with optional knowledge sources and tool definitions.
*/
prepareAgentKitAgent(options: {
readonly name: string_title;
readonly instructions: string_markdown;
readonly knowledgeSources?: ReadonlyArray<string>;
readonly tools?: ModelRequirements['tools'];
readonly vectorStoreId?: string;
readonly storeAsPrepared?: boolean;
}): Promise<OpenAiAgentKitPreparedAgent>;
/**
* Ensures the AgentKit SDK is wired to the OpenAI client and API key.
*/
private ensureAgentKitDefaults;
/**
* Builds the tool list for AgentKit, including hosted file search when applicable.
*/
private buildAgentKitTools;
/**
* Resolves the configured script tools for tool execution.
*/
private resolveScriptTools;
/**
* Runs a prepared AgentKit agent and streams results back to the caller.
*/
callChatModelStreamWithPreparedAgent(options: {
readonly openAiAgentKitAgent: OpenAiAgentKitAgent;
readonly prompt: Prompt;
readonly rawPromptContent?: string;
readonly onProgress: (chunk: ChatPromptResult) => void;
readonly responseFormatOutputType?: AgentOutputType;
/**
* Optional abort signal propagated from chat surfaces so stream generation can be cancelled.
*/
readonly signal?: AbortSignal;
}): Promise<ChatPromptResult>;
/**
* Builds AgentKit input items from the prompt and optional thread.
*/
private buildAgentKitInputItems;
/**
* Builds the user message content for AgentKit runs, including file inputs when provided.
*/
private buildAgentKitUserContent;
/**
* Normalizes AgentKit tool outputs into a string for Promptbook tool call results.
*/
private formatAgentKitToolOutput;
/**
* Returns AgentKit-specific options.
*/
private get agentKitOptions();
/**
* Discriminant for type guards.
*/
protected get discriminant(): string;
/**
* Type guard to check if given `LlmExecutionTools` are instanceof `OpenAiAgentKitExecutionTools`.
*/
static isOpenAiAgentKitExecutionTools(llmExecutionTools: LlmExecutionTools): llmExecutionTools is OpenAiAgentKitExecutionTools;
}
export {};