@promptbook/vercel
Version:
Promptbook: Turn your company's scattered knowledge into AI ready books
108 lines (107 loc) âĒ 4.29 kB
TypeScript
import OpenAI from 'openai';
import type { AvailableModel } from '../../execution/AvailableModel';
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult } from '../../execution/PromptResult';
import type { Usage } from '../../execution/Usage';
import type { Prompt } from '../../types/Prompt';
import type { string_markdown, string_markdown_text, string_model_name, string_title } from '../../types/typeAliases';
import { computeOpenAiUsage } from './computeOpenAiUsage';
import type { OpenAiCompatibleExecutionToolsNonProxiedOptions } from './OpenAiCompatibleExecutionToolsOptions';
/**
* Execution Tools for calling OpenAI API or other OpenAI compatible provider
*
* @public exported from `@promptbook/openai`
*/
export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecutionTools {
protected readonly options: OpenAiCompatibleExecutionToolsNonProxiedOptions;
/**
* OpenAI API client.
*/
private client;
/**
* Rate limiter instance
*/
private limiter;
/**
* Creates OpenAI compatible Execution Tools.
*
* @param options which are relevant are directly passed to the OpenAI compatible client
*/
constructor(options: OpenAiCompatibleExecutionToolsNonProxiedOptions);
abstract get title(): string_title & string_markdown_text;
abstract get description(): string_markdown;
getClient(): Promise<OpenAI>;
/**
* Check the `options` passed to `constructor`
*/
checkConfiguration(): Promise<void>;
/**
* List all available OpenAI compatible models that can be used
*/
listModels(): Promise<ReadonlyArray<AvailableModel>>;
/**
* Calls OpenAI compatible API to use a chat model.
*/
callChatModel(prompt: Prompt): Promise<ChatPromptResult>;
/**
* Internal method that handles parameter retry for chat model calls
*/
private callChatModelWithRetry;
/**
* Calls OpenAI API to use a complete model.
*/
callCompletionModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<CompletionPromptResult>;
/**
* Internal method that handles parameter retry for completion model calls
*/
private callCompletionModelWithRetry;
/**
* Calls OpenAI compatible API to use a embedding model
*/
callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<EmbeddingPromptResult>;
/**
* Internal method that handles parameter retry for embedding model calls
*/
private callEmbeddingModelWithRetry;
/**
* Get the model that should be used as default
*/
protected getDefaultModel(defaultModelName: string_model_name): AvailableModel;
/**
* List all available models (non dynamically)
*
* Note: Purpose of this is to provide more information about models than standard listing from API
*/
protected abstract get HARDCODED_MODELS(): ReadonlyArray<AvailableModel>;
/**
* Computes the usage of the OpenAI API based on the response from OpenAI Compatible API
*/
protected abstract computeUsage(...args: Parameters<typeof computeOpenAiUsage>): Usage;
/**
* Default model for chat variant.
*/
protected abstract getDefaultChatModel(): AvailableModel;
/**
* Default model for completion variant.
*/
protected abstract getDefaultCompletionModel(): AvailableModel;
/**
* Default model for completion variant.
*/
protected abstract getDefaultEmbeddingModel(): AvailableModel;
/**
* Makes a request with retry logic for network errors like ECONNRESET
*/
private makeRequestWithNetworkRetry;
/**
* Determines if an error is retryable (network-related errors)
*/
private isRetryableNetworkError;
}
/**
* TODO: [ð] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`
* TODO: [ð] Maybe make custom `OpenAiCompatibleError`
* TODO: [ð§ ][ð] Maybe use `isDeterministic` from options
* TODO: [ð§ ][ð°] Allow to pass `title` for tracking purposes
* TODO: [ð§ ][ðĶĒ] Make reverse adapter from LlmExecutionTools to OpenAI-compatible:
*/