@promptbook/azure-openai
Version:
Promptbook: Run AI apps in plain human language across multiple models and platforms
19 lines (18 loc) • 1.12 kB
TypeScript
import type Anthropic from '@anthropic-ai/sdk';
import type { PartialDeep } from 'type-fest';
import type { Usage } from '../../execution/Usage';
import type { Prompt } from '../../types/Prompt';
/**
* Computes the usage of the Anthropic Claude API based on the response from Anthropic Claude
*
* @param promptContent The content of the prompt
* @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
* @param rawResponse The raw response from Anthropic Claude API
* @throws {PipelineExecutionError} If the usage is not defined in the response from Anthropic Claude
* @private internal utility of `AnthropicClaudeExecutionTools`
*/
export declare function computeAnthropicClaudeUsage(promptContent: Prompt['content'], // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
resultContent: string, rawResponse: PartialDeep<Pick<Anthropic.Messages.Message, 'model' | 'usage'>>): Usage;
/**
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
*/