@promptbook/azure-openai
Version:
Promptbook: Run AI apps in plain human language across multiple models and platforms
112 lines (111 loc) âĒ 3.65 kB
TypeScript
import type { FormatCommand } from '../commands/FORMAT/FormatCommand';
import type { Expectations } from '../pipeline/PipelineJson/Expectations';
import type { ChatModelRequirements } from './ModelRequirements';
import type { CompletionModelRequirements } from './ModelRequirements';
import type { EmbeddingModelRequirements } from './ModelRequirements';
import type { ModelRequirements } from './ModelRequirements';
import type { Parameters } from './typeAliases';
import type { string_pipeline_url_with_task_hash } from './typeAliases';
import type { string_postprocessing_function_name } from './typeAliases';
import type { string_prompt } from './typeAliases';
import type { string_template } from './typeAliases';
import type { string_title } from './typeAliases';
/**
* Prompt in a text along with model requirements, but without any execution or templating logic.
*
* Note: [ð] This is fully serializable as JSON
* @see https://github.com/webgptorg/promptbook#prompt
*/
export type Prompt = CompletionPrompt | ChatPrompt | EmbeddingPrompt;
/**
* Completion prompt
*
* Note: [ð] This is fully serializable as JSON
*/
export type CompletionPrompt = CommonPrompt & {
/**
* Requirements for completion model
*/
modelRequirements: CompletionModelRequirements;
};
/**
* Chat prompt
*
* Note: [ð] This is fully serializable as JSON
*/
export type ChatPrompt = CommonPrompt & {
/**
* Requirements for chat model
*/
modelRequirements: ChatModelRequirements;
};
/**
* Embedding prompt
*
* Note: [ð] This is fully serializable as JSON
*/
export type EmbeddingPrompt = CommonPrompt & {
/**
* Requirements for chat model
*/
modelRequirements: EmbeddingModelRequirements;
};
/**
* Common properties for all prompt results
*
* Note: [ð] This is fully serializable as JSON
*
* @private just abstract the common properties of the prompts
*/
export type CommonPrompt = {
/**
* The title of the prompt
*
* Note: This has no effect on the model, it is just for the reporting
*/
readonly title: string_title;
/**
* The text of the prompt with placeholders for parameters
*
* @example "What is the capital of {country}?"
*/
readonly content: string_prompt & string_template;
/**
* Requirements for the model
*/
readonly modelRequirements: ModelRequirements;
/**
* List of postprocessing steps that are executed after the prompt
*/
readonly postprocessingFunctionNames?: ReadonlyArray<string_postprocessing_function_name>;
/**
* Expectations for the answer
*
* For example 5 words, 3 sentences, 2 paragraphs, ...
* If not set, nothing is expected from the answer
*/
readonly expectations?: Expectations;
/**
* Expect this format of the answer
*
* Note: Expectations are performed after all postprocessing steps
* @deprecated [ð]
*/
readonly format?: FormatCommand['format'];
/**
* Unique identifier of the pipeline with specific task name as hash
*
* @example https://promptbook.studio/webgpt/write-website-content-cs.book#keywords
*/
readonly pipelineUrl?: string_pipeline_url_with_task_hash;
/**
* Parameters used in the `content`
*/
readonly parameters: Parameters;
};
/**
* TODO: [ð§] Replace all "github.com/webgptorg/promptbook#xxx" with "ptbk.io/xxx"
* TODO: [â] Check ModelRequirements in runtime
* TODO: [ðģ] Add options for translation - maybe create `TranslationPrompt`
* TODO: [ð§ ][ðĪš] Maybe allow overriding of `userId` for each prompt
*/