@promptbook/azure-openai
Version:
Promptbook: Run AI apps in plain human language across multiple models and platforms
37 lines (36 loc) • 1.5 kB
TypeScript
import type { LlmExecutionTools } from '../../../../execution/LlmExecutionTools';
import type { Usage } from '../../../../execution/Usage';
import type { PromptbookStorage } from '../../../../storage/_common/PromptbookStorage';
import type { TODO_any } from '../../../../utils/organization/TODO_any';
import type { LlmExecutionToolsWithTotalUsage } from './LlmExecutionToolsWithTotalUsage';
/**
* Options for the `limitTotalUsage` function.
*/
type LimitTotalUsageOptions = {
/**
* The usage limits to apply.
*
* @default ZERO_USAGE
*/
maxTotalUsage: Usage;
/**
* The storage mechanism to use for tracking usage across multiple executions or instances.
*
* @default MemoryStorage which will not persist when the process ends
*/
storage: PromptbookStorage<TODO_any>;
};
/**
* Wraps LlmExecutionTools to limit the total usage based on provided limits.
*
* @public exported from `@promptbook/core`
*/
export declare function limitTotalUsage(llmTools: LlmExecutionTools, options?: Partial<LimitTotalUsageOptions>): LlmExecutionToolsWithTotalUsage;
export {};
/**
* TODO: Maybe internally use `countTotalUsage`
* TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
* TODO: [🧠] Is there some meaningfull way how to test this util
* TODO: [🧠][🌯] Maybe a way how to hide ability to `get totalUsage`
* TODO: [👷♂️] @@@ Manual about construction of llmTools
*/