@langchain/core
Version:
Core LangChain.js abstractions and schemas
1 lines • 6.17 kB
Source Map (JSON)
{"version":3,"file":"base.d.cts","names":["Runnable","RunnableConfig","BasePromptValueInterface","BaseMessage","ContentBlock","Callbacks","Generation","ChatGeneration","FormatInstructionsOptions","BaseLLMOutputParser","T","Promise","BaseOutputParser","OutputParserException","Error"],"sources":["../../src/output_parsers/base.d.ts"],"sourcesContent":["import { Runnable } from \"../runnables/index.js\";\nimport type { RunnableConfig } from \"../runnables/config.js\";\nimport type { BasePromptValueInterface } from \"../prompt_values.js\";\nimport type { BaseMessage, ContentBlock } from \"../messages/index.js\";\nimport type { Callbacks } from \"../callbacks/manager.js\";\nimport type { Generation, ChatGeneration } from \"../outputs.js\";\n/**\n * Options for formatting instructions.\n */\nexport interface FormatInstructionsOptions {\n}\n/**\n * Abstract base class for parsing the output of a Large Language Model\n * (LLM) call. It provides methods for parsing the result of an LLM call\n * and invoking the parser with a given input.\n */\nexport declare abstract class BaseLLMOutputParser<T = unknown> extends Runnable<string | BaseMessage, T> {\n /**\n * Parses the result of an LLM call. This method is meant to be\n * implemented by subclasses to define how the output from the LLM should\n * be parsed.\n * @param generations The generations from an LLM call.\n * @param callbacks Optional callbacks.\n * @returns A promise of the parsed output.\n */\n abstract parseResult(generations: Generation[] | ChatGeneration[], callbacks?: Callbacks): Promise<T>;\n /**\n * Parses the result of an LLM call with a given prompt. By default, it\n * simply calls `parseResult`.\n * @param generations The generations from an LLM call.\n * @param _prompt The prompt used in the LLM call.\n * @param callbacks Optional callbacks.\n * @returns A promise of the parsed output.\n */\n parseResultWithPrompt(generations: Generation[] | ChatGeneration[], _prompt: BasePromptValueInterface, callbacks?: Callbacks): Promise<T>;\n protected _baseMessageToString(message: BaseMessage): string;\n protected _baseMessageContentToString(content: ContentBlock[]): string;\n /**\n * Calls the parser with a given input and optional configuration options.\n * If the input is a string, it creates a generation with the input as\n * text and calls `parseResult`. If the input is a `BaseMessage`, it\n * creates a generation with the input as a message and the content of the\n * input as text, and then calls `parseResult`.\n * @param input The input to the parser, which can be a string or a `BaseMessage`.\n * @param options Optional configuration options.\n * @returns A promise of the parsed output.\n */\n invoke(input: string | BaseMessage, options?: RunnableConfig): Promise<T>;\n}\n/**\n * Class to parse the output of an LLM call.\n */\nexport declare abstract class BaseOutputParser<T = unknown> extends BaseLLMOutputParser<T> {\n parseResult(generations: Generation[] | ChatGeneration[], callbacks?: Callbacks): Promise<T>;\n /**\n * Parse the output of an LLM call.\n *\n * @param text - LLM output to parse.\n * @returns Parsed output.\n */\n abstract parse(text: string, callbacks?: Callbacks): Promise<T>;\n parseWithPrompt(text: string, _prompt: BasePromptValueInterface, callbacks?: Callbacks): Promise<T>;\n /**\n * Return a string describing the format of the output.\n * @returns Format instructions.\n * @param options - Options for formatting instructions.\n * @example\n * ```json\n * {\n * \"foo\": \"bar\"\n * }\n * ```\n */\n abstract getFormatInstructions(options?: FormatInstructionsOptions): string;\n /**\n * Return the string type key uniquely identifying this class of parser\n */\n _type(): string;\n}\n/**\n * Exception that output parsers should raise to signify a parsing error.\n *\n * This exists to differentiate parsing errors from other code or execution errors\n * that also may arise inside the output parser. OutputParserExceptions will be\n * available to catch and handle in ways to fix the parsing error, while other\n * errors will be raised.\n *\n * @param message - The error that's being re-raised or an error message.\n * @param llmOutput - String model output which is error-ing.\n * @param observation - String explanation of error which can be passed to a\n * model to try and remediate the issue.\n * @param sendToLLM - Whether to send the observation and llm_output back to an Agent\n * after an OutputParserException has been raised. This gives the underlying\n * model driving the agent the context that the previous output was improperly\n * structured, in the hopes that it will update the output to the correct\n * format.\n */\nexport declare class OutputParserException extends Error {\n llmOutput?: string;\n observation?: string;\n sendToLLM: boolean;\n constructor(message: string, llmOutput?: string, observation?: string, sendToLLM?: boolean);\n}\n//# sourceMappingURL=base.d.ts.map"],"mappings":";;;;;;;;;;;;AASiBQ,UAAAA,yBAAAA,CAAyB,CAO1C;;;;;;AASuGE,uBATzED,mBASyEC,CAAAA,IAAAA,OAAAA,CAAAA,SAThCV,QASgCU,CAAAA,MAAAA,GATdP,WAScO,EATDA,CASCA,CAAAA,CAAAA;EAARC;;;;;;;;EAW5CP,SAAAA,WAAAA,CAAAA,WAAAA,EAXbE,UAWaF,EAAAA,GAXEG,cAWFH,EAAAA,EAAAA,SAAAA,CAAAA,EAXgCC,SAWhCD,CAAAA,EAX4CO,OAW5CP,CAXoDM,CAWpDN,CAAAA;EAWxBD;;;;;AA/BoD;AAoC/E;;EAC6BG,qBAAAA,CAAAA,WAAAA,EAnBUA,UAmBVA,EAAAA,GAnByBC,cAmBzBD,EAAAA,EAAAA,OAAAA,EAnBoDJ,wBAmBpDI,EAAAA,SAAAA,CAAAA,EAnB0FD,SAmB1FC,CAAAA,EAnBsGK,OAmBtGL,CAnB8GI,CAmB9GJ,CAAAA;EAAeC,UAAAA,oBAAAA,CAAAA,OAAAA,EAlBAJ,WAkBAI,CAAAA,EAAAA,MAAAA;EAA8BF,UAAAA,2BAAAA,CAAAA,OAAAA,EAjBvBD,YAiBuBC,EAAAA,CAAAA,EAAAA,MAAAA;EAAoBK;;;;;;;;;;EAD1BD,MAAAA,CAAAA,KAAAA,EAAAA,MAAAA,GALzCN,WAKyCM,EAAAA,OAAAA,CAAAA,EALlBR,cAKkBQ,CAAAA,EALDE,OAKCF,CALOC,CAKPD,CAAAA;AAAmB;AA6CvF;;;uBA7C8BG,sCAAsCH,oBAAoBC;2BAC3DJ,eAAeC,8BAA8BF,YAAYM,QAAQD;;;;;;;2CAOjDL,YAAYM,QAAQD;yCACtBR,sCAAsCG,YAAYM,QAAQD;;;;;;;;;;;;2CAYxDF;;;;;;;;;;;;;;;;;;;;;;;;cAwBxBK,qBAAAA,SAA8BC,KAAK"}