@promptbook/azure-openai
Version:
Promptbook: Run AI apps in plain human language across multiple models and platforms
37 lines (36 loc) • 1.82 kB
TypeScript
import type { ExecutionTools } from '../execution/ExecutionTools';
import type { PipelineJson } from '../pipeline/PipelineJson/PipelineJson';
import type { TaskJson } from '../pipeline/PipelineJson/TaskJson';
import type { PrepareAndScrapeOptions } from './PrepareAndScrapeOptions';
type PrepareTaskInput = Pick<PipelineJson, 'tasks' | 'parameters'> & {
/**
* The number of knowledge pieces available for the pipeline.
*/
readonly knowledgePiecesCount: number;
};
type PreparedTasks = {
/**
* The sequence of tasks after preparation.
*/
readonly tasksPrepared: ReadonlyArray<TaskJson>;
};
/**
* Prepares tasks by adding knowledge to the prompt and ensuring all necessary parameters are included.
*
* @param tasks Sequence of tasks that are chained together to form a pipeline
* @returns A promise that resolves to the prepared tasks.
*
* @private internal utility of `preparePipeline`
*/
export declare function prepareTasks(pipeline: PrepareTaskInput, tools: Pick<ExecutionTools, 'llm' | 'fs' | 'scrapers'>, options: PrepareAndScrapeOptions): Promise<PreparedTasks>;
export {};
/**
* TODO: [😂] Adding knowledge should be convert to async high-level abstractions, similar thing with expectations to sync high-level abstractions
* TODO: [🧠] Add context to each task (if missing)
* TODO: [🧠] What is better name `prepareTask` or `prepareTaskAndParameters`
* TODO: [♨][main] !!3 Prepare index the examples and maybe tasks
* TODO: Write tests for `preparePipeline`
* TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
* TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
* TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
*/