@langchain/core
Version:
Core LangChain.js abstractions and schemas
102 lines (101 loc) • 4.52 kB
TypeScript
import { Generation, GenerationChunk, LLMResult } from "../outputs.js";
import { BaseCache } from "../caches/index.js";
import { BaseCallbackConfig, CallbackManagerForLLMRun, Callbacks } from "../callbacks/manager.js";
import { RunnableConfig } from "../runnables/types.js";
import "../runnables/config.js";
import { BasePromptValueInterface } from "../prompt_values.js";
import { BaseLanguageModel, BaseLanguageModelCallOptions, BaseLanguageModelInput, BaseLanguageModelParams } from "./base.js";
//#region src/language_models/llms.d.ts
type SerializedLLM = {
_model: string;
_type: string;
} & Record<string, any>;
interface BaseLLMParams extends BaseLanguageModelParams {}
interface BaseLLMCallOptions extends BaseLanguageModelCallOptions {}
/**
* LLM Wrapper. Takes in a prompt (or prompts) and returns a string.
*/
declare abstract class BaseLLM<CallOptions extends BaseLLMCallOptions = BaseLLMCallOptions> extends BaseLanguageModel<string, CallOptions> {
ParsedCallOptions: Omit<CallOptions, Exclude<keyof RunnableConfig, "signal" | "timeout" | "maxConcurrency">>;
lc_namespace: string[];
/**
* This method takes an input and options, and returns a string. It
* converts the input to a prompt value and generates a result based on
* the prompt.
* @param input Input for the LLM.
* @param options Options for the LLM call.
* @returns A string result based on the prompt.
*/
invoke(input: BaseLanguageModelInput, options?: CallOptions): Promise<string>;
_streamResponseChunks(_input: string, _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
protected _separateRunnableConfigFromCallOptionsCompat(options?: Partial<CallOptions>): [RunnableConfig, this["ParsedCallOptions"]];
_streamIterator(input: BaseLanguageModelInput, options?: CallOptions): AsyncGenerator<string>;
/**
* This method takes prompt values, options, and callbacks, and generates
* a result based on the prompts.
* @param promptValues Prompt values for the LLM.
* @param options Options for the LLM call.
* @param callbacks Callbacks for the LLM call.
* @returns An LLMResult based on the prompts.
*/
generatePrompt(promptValues: BasePromptValueInterface[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<LLMResult>;
/**
* Run the LLM on the given prompts and input.
*/
abstract _generate(prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<LLMResult>;
/**
* Get the parameters used to invoke the model
*/
invocationParams(_options?: this["ParsedCallOptions"]): any;
_flattenLLMResult(llmResult: LLMResult): LLMResult[];
/** @ignore */
_generateUncached(prompts: string[], parsedOptions: this["ParsedCallOptions"], handledOptions: BaseCallbackConfig, startedRunManagers?: CallbackManagerForLLMRun[]): Promise<LLMResult>;
_generateCached({
prompts,
cache,
llmStringKey,
parsedOptions,
handledOptions,
runId
}: {
prompts: string[];
cache: BaseCache<Generation[]>;
llmStringKey: string;
parsedOptions: any;
handledOptions: RunnableConfig;
runId?: string;
}): Promise<LLMResult & {
missingPromptIndices: number[];
startedRunManagers?: CallbackManagerForLLMRun[];
}>;
/**
* Run the LLM on the given prompts and input, handling caching.
*/
generate(prompts: string[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<LLMResult>;
/**
* Get the identifying parameters of the LLM.
*/
_identifyingParams(): Record<string, any>;
/**
* Return the string type key uniquely identifying this class of LLM.
*/
abstract _llmType(): string;
_modelType(): string;
}
/**
* LLM class that provides a simpler interface to subclass than {@link BaseLLM}.
*
* Requires only implementing a simpler {@link _call} method instead of {@link _generate}.
*
* @augments BaseLLM
*/
declare abstract class LLM<CallOptions extends BaseLLMCallOptions = BaseLLMCallOptions> extends BaseLLM<CallOptions> {
/**
* Run the LLM on the given prompt and input.
*/
abstract _call(prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<string>;
_generate(prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<LLMResult>;
}
//#endregion
export { BaseLLM, BaseLLMCallOptions, BaseLLMParams, LLM, SerializedLLM };
//# sourceMappingURL=llms.d.ts.map