@langchain/core
Version:
Core LangChain.js abstractions and schemas
40 lines (39 loc) • 1.55 kB
TypeScript
import { BaseTransformOutputParser } from "./transform.js";
import { MessageContentComplex, MessageContentImageUrl, MessageContentText } from "../messages/index.js";
/**
* OutputParser that parses LLMResult into the top likely string.
* @example
* ```typescript
* const promptTemplate = PromptTemplate.fromTemplate(
* "Tell me a joke about {topic}",
* );
*
* const chain = RunnableSequence.from([
* promptTemplate,
* new ChatOpenAI({}),
* new StringOutputParser(),
* ]);
*
* const result = await chain.invoke({ topic: "bears" });
* console.log("What do you call a bear with no teeth? A gummy bear!");
* ```
*/
export declare class StringOutputParser extends BaseTransformOutputParser<string> {
static lc_name(): string;
lc_namespace: string[];
lc_serializable: boolean;
/**
* Parses a string output from an LLM call. This method is meant to be
* implemented by subclasses to define how a string output from an LLM
* should be parsed.
* @param text The string output from an LLM call.
* @param callbacks Optional callbacks.
* @returns A promise of the parsed output.
*/
parse(text: string): Promise<string>;
getFormatInstructions(): string;
protected _textContentToString(content: MessageContentText): string;
protected _imageUrlContentToString(_content: MessageContentImageUrl): string;
protected _messageContentComplexToString(content: MessageContentComplex): string;
protected _baseMessageContentToString(content: MessageContentComplex[]): string;
}