@promptbook/azure-openai
Version:
Promptbook: Run AI apps in plain human language across multiple models and platforms
60 lines (59 loc) • 2.88 kB
TypeScript
import type { AvailableModel } from '../../execution/AvailableModel';
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
import type { ChatPromptResult } from '../../execution/PromptResult';
import type { CompletionPromptResult } from '../../execution/PromptResult';
import type { EmbeddingPromptResult } from '../../execution/PromptResult';
import type { RemoteClientOptions } from '../../remote-server/types/RemoteClientOptions';
import type { ChatPrompt } from '../../types/Prompt';
import type { CompletionPrompt } from '../../types/Prompt';
import type { EmbeddingPrompt } from '../../types/Prompt';
import type { string_markdown } from '../../types/typeAliases';
import type { string_markdown_text } from '../../types/typeAliases';
import type { string_title } from '../../types/typeAliases';
/**
* Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
*
* You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
* This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
*
* @see https://github.com/webgptorg/promptbook#remote-server
* @public exported from `@promptbook/remote-client`
*/
export declare class RemoteLlmExecutionTools<TCustomOptions = undefined> implements LlmExecutionTools {
protected readonly options: RemoteClientOptions<TCustomOptions>;
constructor(options: RemoteClientOptions<TCustomOptions>);
get title(): string_title & string_markdown_text;
get description(): string_markdown;
/**
* Check the configuration of all execution tools
*/
checkConfiguration(): Promise<void>;
/**
* List all available models that can be used
*/
listModels(): Promise<ReadonlyArray<AvailableModel>>;
/**
* Calls remote proxy server to use a chat model
*/
callChatModel(prompt: ChatPrompt): Promise<ChatPromptResult>;
/**
* Calls remote proxy server to use a completion model
*/
callCompletionModel(prompt: CompletionPrompt): Promise<CompletionPromptResult>;
/**
* Calls remote proxy server to use a embedding model
*/
callEmbeddingModel(prompt: EmbeddingPrompt): Promise<EmbeddingPromptResult>;
/**
* Calls remote proxy server to use both completion or chat model
*/
private callCommonModel;
}
/**
* TODO: Maybe use `$exportJson`
* TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
* TODO: [🍓] Allow to list compatible models with each variant
* TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
* TODO: [🧠] Maybe remove `@promptbook/remote-client` and just use `@promptbook/core`
*/