UNPKG

@promptbook/azure-openai

Version:

Promptbook: Run AI apps in plain human language across multiple models and platforms

44 lines (43 loc) 2.26 kB
import type { KnowledgePiecePreparedJson } from '../../pipeline/PipelineJson/KnowledgePieceJson'; import type { ExecutionTools } from '../../execution/ExecutionTools'; import type { PrepareAndScrapeOptions } from '../../prepare/PrepareAndScrapeOptions'; import type { Converter } from '../_common/Converter'; import type { ScraperAndConverterMetadata } from '../_common/register/ScraperAndConverterMetadata'; import type { Scraper } from '../_common/Scraper'; import type { ScraperSourceHandler } from '../_common/Scraper'; import type { ScraperIntermediateSource } from '../_common/ScraperIntermediateSource'; /** * Scraper for old document files (like .doc and .rtf) * * @see `documentationUrl` for more details * @public exported from `@promptbook/legacy-documents` */ export declare class LegacyDocumentScraper implements Converter, Scraper { private readonly tools; private readonly options; /** * Metadata of the scraper which includes title, mime types, etc. */ get metadata(): ScraperAndConverterMetadata; /** * Document scraper is used internally */ private readonly documentScraper; constructor(tools: Pick<ExecutionTools, 'fs' | 'llm' | 'executables'>, options: PrepareAndScrapeOptions); /** * Convert the `.doc` or `.rtf` to `.doc` file and returns intermediate source * * Note: `$` is used to indicate that this function is not a pure function - it leaves files on the disk and you are responsible for cleaning them by calling `destroy` method of returned object */ $convert(source: ScraperSourceHandler): Promise<ScraperIntermediateSource>; /** * Scrapes the `.doc` or `.rtf` file and returns the knowledge pieces or `null` if it can't scrape it */ scrape(source: ScraperSourceHandler): Promise<ReadonlyArray<Omit<KnowledgePiecePreparedJson, 'sources' | 'preparationIds'>> | null>; } /** * TODO: [👣] Converted documents can act as cached items - there is no need to run conversion each time * TODO: [🪂] Do it in parallel 11:11 * Note: No need to aggregate usage here, it is done by intercepting the llmTools * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment */