UNPKG

@promptbook/azure-openai

Version:

Promptbook: Run AI apps in plain human language across multiple models and platforms

45 lines (44 loc) 1.87 kB
import type { AvailableModel } from '../../execution/AvailableModel'; import type { LlmExecutionTools } from '../../execution/LlmExecutionTools'; import type { Usage } from '../../execution/Usage'; import type { string_markdown } from '../../types/typeAliases'; import type { string_markdown_text } from '../../types/typeAliases'; import type { string_title } from '../../types/typeAliases'; import { computeOpenAiUsage } from '../openai/computeOpenAiUsage'; import { OpenAiCompatibleExecutionTools } from '../openai/OpenAiCompatibleExecutionTools'; import type { OllamaExecutionToolsOptions } from './OllamaExecutionToolsOptions'; /** * Execution Tools for calling Ollama API * * @public exported from `@promptbook/ollama` */ export declare class OllamaExecutionTools extends OpenAiCompatibleExecutionTools implements LlmExecutionTools { constructor(ollamaOptions: OllamaExecutionToolsOptions); get title(): string_title & string_markdown_text; get description(): string_markdown; /** * List all available models (non dynamically) * * Note: Purpose of this is to provide more information about models than standard listing from API */ protected get HARDCODED_MODELS(): ReadonlyArray<AvailableModel>; /** * Computes the usage of the Ollama API based on the response from Ollama */ protected computeUsage(...args: Parameters<typeof computeOpenAiUsage>): Usage; /** * Default model for chat variant. */ protected getDefaultChatModel(): AvailableModel; /** * Default model for completion variant. */ protected getDefaultCompletionModel(): AvailableModel; /** * Default model for completion variant. */ protected getDefaultEmbeddingModel(): AvailableModel; } /** * TODO: [🛄] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools` */