UNPKG

@langchain/openai

Version:
1 lines 19.1 kB
{"version":3,"file":"base.d.ts","names":["_langchain_core_utils_stream0","OpenAI","ClientOptions","OpenAIClient","AIMessageChunk","BaseMessage","ChatGeneration","BaseChatModel","LangSmithParams","BaseChatModelParams","BaseChatModelCallOptions","BaseFunctionCallOptions","BaseLanguageModelInput","StructuredOutputMethodOptions","ModelProfile","Runnable","InteropZodType","OpenAICallOptions","OpenAIChatInput","OpenAICoreRequestOptions","ChatOpenAIResponseFormat","ResponseFormatConfiguration","OpenAIVerbosityParam","OpenAIApiKey","OpenAICacheRetentionParam","OpenAIToolChoice","ChatOpenAIToolType","ResponsesToolChoice","OpenAILLMOutput","BaseChatOpenAICallOptions","Chat","ChatCompletionStreamOptions","ChatCompletionModality","Array","ChatCompletionAudioParam","ChatCompletionPredictionContent","Reasoning","ChatCompletionCreateParams","BaseChatOpenAIFields","Partial","BaseChatOpenAI","RunOutput","CallOptions","Record","Omit","ChatCompletionTool","_langchain_core_messages0","MessageStructure","IterableReadableStream","Promise","Function","ChatCompletionFunctionCallOption","ModerationModel","ModerationCreateResponse"],"sources":["../../src/chat_models/base.d.ts"],"sourcesContent":["import OpenAI, { type ClientOptions, OpenAI as OpenAIClient } from \"openai\";\nimport { AIMessageChunk, type BaseMessage } from \"@langchain/core/messages\";\nimport { type ChatGeneration } from \"@langchain/core/outputs\";\nimport { BaseChatModel, type LangSmithParams, type BaseChatModelParams, BaseChatModelCallOptions } from \"@langchain/core/language_models/chat_models\";\nimport { type BaseFunctionCallOptions, type BaseLanguageModelInput, type StructuredOutputMethodOptions } from \"@langchain/core/language_models/base\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { InteropZodType } from \"@langchain/core/utils/types\";\nimport { type OpenAICallOptions, type OpenAIChatInput, type OpenAICoreRequestOptions, type ChatOpenAIResponseFormat, ResponseFormatConfiguration, OpenAIVerbosityParam, type OpenAIApiKey, OpenAICacheRetentionParam } from \"../types.js\";\nimport { OpenAIToolChoice, ChatOpenAIToolType, ResponsesToolChoice } from \"../utils/tools.js\";\ninterface OpenAILLMOutput {\n tokenUsage: {\n completionTokens?: number;\n promptTokens?: number;\n totalTokens?: number;\n };\n}\nexport type { OpenAICallOptions, OpenAIChatInput };\nexport interface BaseChatOpenAICallOptions extends BaseChatModelCallOptions, BaseFunctionCallOptions {\n /**\n * Additional options to pass to the underlying axios request.\n */\n options?: OpenAICoreRequestOptions;\n /**\n * A list of tools that the model may use to generate responses.\n * Each tool can be a function, a built-in tool, or a custom tool definition.\n * If not provided, the model will not use any tools.\n */\n tools?: ChatOpenAIToolType[];\n /**\n * Specifies which tool the model should use to respond.\n * Can be an {@link OpenAIToolChoice} or a {@link ResponsesToolChoice}.\n * If not set, the model will decide which tool to use automatically.\n */\n tool_choice?: OpenAIToolChoice | ResponsesToolChoice;\n /**\n * Adds a prompt index to prompts passed to the model to track\n * what prompt is being used for a given generation.\n */\n promptIndex?: number;\n /**\n * An object specifying the format that the model must output.\n */\n response_format?: ChatOpenAIResponseFormat;\n /**\n * When provided, the completions API will make a best effort to sample\n * deterministically, such that repeated requests with the same `seed`\n * and parameters should return the same result.\n */\n seed?: number;\n /**\n * Additional options to pass to streamed completions.\n * If provided, this takes precedence over \"streamUsage\" set at\n * initialization time.\n */\n stream_options?: OpenAIClient.Chat.ChatCompletionStreamOptions;\n /**\n * The model may choose to call multiple functions in a single turn. You can\n * set parallel_tool_calls to false which ensures only one tool is called at most.\n * [Learn more](https://platform.openai.com/docs/guides/function-calling#parallel-function-calling)\n */\n parallel_tool_calls?: boolean;\n /**\n * If `true`, model output is guaranteed to exactly match the JSON Schema\n * provided in the tool definition. If `true`, the input schema will also be\n * validated according to\n * https://platform.openai.com/docs/guides/structured-outputs/supported-schemas.\n *\n * If `false`, input schema will not be validated and model output will not\n * be validated.\n *\n * If `undefined`, `strict` argument will not be passed to the model.\n */\n strict?: boolean;\n /**\n * Output types that you would like the model to generate for this request. Most\n * models are capable of generating text, which is the default:\n *\n * `[\"text\"]`\n *\n * The `gpt-4o-audio-preview` model can also be used to\n * [generate audio](https://platform.openai.com/docs/guides/audio). To request that\n * this model generate both text and audio responses, you can use:\n *\n * `[\"text\", \"audio\"]`\n */\n modalities?: Array<OpenAIClient.Chat.ChatCompletionModality>;\n /**\n * Parameters for audio output. Required when audio output is requested with\n * `modalities: [\"audio\"]`.\n * [Learn more](https://platform.openai.com/docs/guides/audio).\n */\n audio?: OpenAIClient.Chat.ChatCompletionAudioParam;\n /**\n * Static predicted output content, such as the content of a text file that is being regenerated.\n * [Learn more](https://platform.openai.com/docs/guides/latency-optimization#use-predicted-outputs).\n */\n prediction?: OpenAIClient.ChatCompletionPredictionContent;\n /**\n * Options for reasoning models.\n *\n * Note that some options, like reasoning summaries, are only available when using the responses\n * API. If these options are set, the responses API will be used to fulfill the request.\n *\n * These options will be ignored when not using a reasoning model.\n */\n reasoning?: OpenAIClient.Reasoning;\n /**\n * Service tier to use for this request. Can be \"auto\", \"default\", or \"flex\"\n * Specifies the service tier for prioritization and latency optimization.\n */\n service_tier?: OpenAIClient.Chat.ChatCompletionCreateParams[\"service_tier\"];\n /**\n * Used by OpenAI to cache responses for similar requests to optimize your cache\n * hit rates. Replaces the `user` field.\n * [Learn more](https://platform.openai.com/docs/guides/prompt-caching).\n */\n promptCacheKey?: string;\n /**\n * Used by OpenAI to set cache retention time\n */\n promptCacheRetention?: OpenAICacheRetentionParam;\n /**\n * The verbosity of the model's response.\n */\n verbosity?: OpenAIVerbosityParam;\n}\nexport interface BaseChatOpenAIFields extends Partial<OpenAIChatInput>, BaseChatModelParams {\n /**\n * Optional configuration options for the OpenAI client.\n */\n configuration?: ClientOptions;\n}\n/** @internal */\nexport declare abstract class BaseChatOpenAI<CallOptions extends BaseChatOpenAICallOptions> extends BaseChatModel<CallOptions, AIMessageChunk> implements Partial<OpenAIChatInput> {\n temperature?: number;\n topP?: number;\n frequencyPenalty?: number;\n presencePenalty?: number;\n n?: number;\n logitBias?: Record<string, number>;\n model: string;\n modelKwargs?: OpenAIChatInput[\"modelKwargs\"];\n stop?: string[];\n stopSequences?: string[];\n user?: string;\n timeout?: number;\n streaming: boolean;\n streamUsage: boolean;\n maxTokens?: number;\n logprobs?: boolean;\n topLogprobs?: number;\n apiKey?: OpenAIApiKey;\n organization?: string;\n __includeRawResponse?: boolean;\n /** @internal */\n client: OpenAIClient;\n /** @internal */\n clientConfig: ClientOptions;\n /**\n * Whether the model supports the `strict` argument when passing in tools.\n * If `undefined` the `strict` argument will not be passed to OpenAI.\n */\n supportsStrictToolCalling?: boolean;\n audio?: OpenAIClient.Chat.ChatCompletionAudioParam;\n modalities?: Array<OpenAIClient.Chat.ChatCompletionModality>;\n reasoning?: OpenAIClient.Reasoning;\n /**\n * Must be set to `true` in tenancies with Zero Data Retention. Setting to `true` will disable\n * output storage in the Responses API, but this DOES NOT enable Zero Data Retention in your\n * OpenAI organization or project. This must be configured directly with OpenAI.\n *\n * See:\n * https://platform.openai.com/docs/guides/your-data\n * https://platform.openai.com/docs/api-reference/responses/create#responses-create-store\n *\n * @default false\n */\n zdrEnabled?: boolean | undefined;\n /**\n * Service tier to use for this request. Can be \"auto\", \"default\", or \"flex\" or \"priority\".\n * Specifies the service tier for prioritization and latency optimization.\n */\n service_tier?: OpenAIClient.Chat.ChatCompletionCreateParams[\"service_tier\"];\n /**\n * Used by OpenAI to cache responses for similar requests to optimize your cache\n * hit rates.\n * [Learn more](https://platform.openai.com/docs/guides/prompt-caching).\n */\n promptCacheKey: string;\n /**\n * Used by OpenAI to set cache retention time\n */\n promptCacheRetention?: OpenAICacheRetentionParam;\n /**\n * The verbosity of the model's response.\n */\n verbosity?: OpenAIVerbosityParam;\n protected defaultOptions: CallOptions;\n _llmType(): string;\n static lc_name(): string;\n get callKeys(): string[];\n lc_serializable: boolean;\n get lc_secrets(): {\n [key: string]: string;\n } | undefined;\n get lc_aliases(): Record<string, string>;\n get lc_serializable_keys(): string[];\n getLsParams(options: this[\"ParsedCallOptions\"]): LangSmithParams;\n /** @ignore */\n _identifyingParams(): Omit<OpenAIClient.Chat.ChatCompletionCreateParams, \"messages\"> & {\n model_name: string;\n } & ClientOptions;\n /**\n * Get the identifying parameters for the model\n */\n identifyingParams(): Omit<OpenAI.ChatCompletionCreateParams, \"messages\"> & {\n model_name: string;\n } & ClientOptions;\n constructor(fields?: BaseChatOpenAIFields);\n /**\n * Returns backwards compatible reasoning parameters from constructor params and call options\n * @internal\n */\n protected _getReasoningParams(options?: this[\"ParsedCallOptions\"]): OpenAIClient.Reasoning | undefined;\n /**\n * Returns an openai compatible response format from a set of options\n * @internal\n */\n protected _getResponseFormat(resFormat?: CallOptions[\"response_format\"]): ResponseFormatConfiguration | undefined;\n protected _combineCallOptions(additionalOptions?: this[\"ParsedCallOptions\"]): this[\"ParsedCallOptions\"];\n /** @internal */\n _getClientOptions(options: OpenAICoreRequestOptions | undefined): OpenAICoreRequestOptions;\n protected _convertChatOpenAIToolToCompletionsTool(tool: ChatOpenAIToolType, fields?: {\n strict?: boolean;\n }): OpenAIClient.ChatCompletionTool;\n bindTools(tools: ChatOpenAIToolType[], kwargs?: Partial<CallOptions>): Runnable<BaseLanguageModelInput, AIMessageChunk, CallOptions>;\n stream(input: BaseLanguageModelInput, options?: CallOptions): Promise<import(\"@langchain/core/utils/stream\").IterableReadableStream<AIMessageChunk<import(\"@langchain/core/messages\").MessageStructure>>>;\n invoke(input: BaseLanguageModelInput, options?: CallOptions): Promise<AIMessageChunk<import(\"@langchain/core/messages\").MessageStructure>>;\n /** @ignore */\n _combineLLMOutput(...llmOutputs: OpenAILLMOutput[]): OpenAILLMOutput;\n getNumTokensFromMessages(messages: BaseMessage[]): Promise<{\n totalCount: number;\n countPerMessage: number[];\n }>;\n /** @internal */\n protected _getNumTokensFromGenerations(generations: ChatGeneration[]): Promise<number>;\n /** @internal */\n protected _getEstimatedTokenCountFromPrompt(messages: BaseMessage[], functions?: OpenAIClient.Chat.ChatCompletionCreateParams.Function[], function_call?: \"none\" | \"auto\" | OpenAIClient.Chat.ChatCompletionFunctionCallOption): Promise<number>;\n /**\n * Moderate content using OpenAI's Moderation API.\n *\n * This method checks whether content violates OpenAI's content policy by\n * analyzing text for categories such as hate, harassment, self-harm,\n * sexual content, violence, and more.\n *\n * @param input - The text or array of texts to moderate\n * @param params - Optional parameters for the moderation request\n * @param params.model - The moderation model to use. Defaults to \"omni-moderation-latest\".\n * @param params.options - Additional options to pass to the underlying request\n * @returns A promise that resolves to the moderation response containing results for each input\n *\n * @example\n * ```typescript\n * const model = new ChatOpenAI({ model: \"gpt-4o-mini\" });\n *\n * // Moderate a single text\n * const result = await model.moderateContent(\"This is a test message\");\n * console.log(result.results[0].flagged); // false\n * console.log(result.results[0].categories); // { hate: false, harassment: false, ... }\n *\n * // Moderate multiple texts\n * const results = await model.moderateContent([\n * \"Hello, how are you?\",\n * \"This is inappropriate content\"\n * ]);\n * results.results.forEach((result, index) => {\n * console.log(`Text ${index + 1} flagged:`, result.flagged);\n * });\n *\n * // Use a specific moderation model\n * const stableResult = await model.moderateContent(\n * \"Test content\",\n * { model: \"omni-moderation-latest\" }\n * );\n * ```\n */\n moderateContent(input: string | string[], params?: {\n model?: OpenAI.ModerationModel;\n options?: OpenAICoreRequestOptions;\n }): Promise<OpenAIClient.ModerationCreateResponse>;\n /**\n * Return profiling information for the model.\n *\n * Provides information about the model's capabilities and constraints,\n * including token limits, multimodal support, and advanced features like\n * tool calling and structured output.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n *\n * @example\n * ```typescript\n * const model = new ChatOpenAI({ model: \"gpt-4o\" });\n * const profile = model.profile;\n * console.log(profile.maxInputTokens); // 128000\n * console.log(profile.imageInputs); // true\n * ```\n */\n get profile(): ModelProfile;\n /** @internal */\n protected _getStructuredOutputMethod(config: StructuredOutputMethodOptions<boolean>): string | undefined;\n withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;\n withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<boolean>): Runnable<BaseLanguageModelInput, RunOutput> | Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n}\n//# sourceMappingURL=base.d.ts.map"],"mappings":";;;;;;;;;;;;;;UAUU4B,eAAAA;;;;IAAAA,WAAAA,CAAAA,EAAe,MAAA;EAQRC,CAAAA;;AAgBCJ,UAhBDI,yBAAAA,SAAkCnB,wBAgBjCe,EAhB2Dd,uBAgB3Dc,CAAAA;EAAmBE;;;EAoDdxB,OAAa2B,CAAAA,EAhEtBX,wBAgE2Ba;EAAxBC;;;;;EAmCUT,KAAAA,CAAAA,EA7FfE,kBA6FeF,EAAAA;EAIXF;;;AA3GoF;AA6GpG;EAAsDJ,WAAAA,CAAAA,EA7FpCO,gBA6FoCP,GA7FjBS,mBA6FiBT;EAIlChB;;;AAJuE;EAO7DsC,WAAAA,CAAAA,EAAAA,MAAc;EAAqBX;;;EAAiGX,eAAAA,CAAAA,EA3F5IE,wBA2F4IF;EAMlJyB;;;;;EAwBJxC,IAAAA,CAAAA,EAAa2B,MAAKI;EACP/B;;;;;EAgCPmB,cAAAA,CAAAA,EA9IKnB,QAAAA,CAAa2B,IAAAA,CAAKC,2BA8IvBT;EACcoB;;;;;EActBxC,mBAAAA,CAAAA,EAAAA,OAAAA;EAIsBD;;;;;;;;;;;EAoBTyB,MAAAA,CAAAA,EAAAA,OAAAA;EAAuCgB;;;;;;;;;;;;EAE1C9B,UAAAA,CAAAA,EAxJDqB,KAwJCrB,CAxJKT,QAAAA,CAAa2B,IAAAA,CAAKE,sBAwJvBpB,CAAAA;EAAkC8B;;;;;EAEKd,KAAAA,CAAAA,EApJ7CzB,QAAAA,CAAa2B,IAAAA,CAAKI,wBAoJ2BN;EAClBvB;;;;EAOmBA,UAAAA,CAAAA,EAvJzCF,QAAAA,CAAagC,+BAuJ4B9B;EAA2BF;;;;;;;;EA8DpCU,SAAAA,CAAAA,EA5MjCV,QAAAA,CAAaiC,SA4MoBvB;EACN8B;;;;EAAqFA,YAAAA,CAAAA,EAxM7GxC,QAAAA,CAAa2B,IAAAA,CAAKO,0BAwM2FM,CAAAA,cAAAA,CAAAA;EAA8B9B;;;;;EAC7F8B,cAAAA,CAAAA,EAAAA,MAAAA;EAAkDF;;;EAA2C5B,oBAAAA,CAAAA,EA/LnIW,yBA+LmIX;EAA+CD;;;EAATG,SAAAA,CAAAA,EA3LpLO,oBA2LoLP;;AAInI4B,UA7LhDL,oBAAAA,SAA6BC,OA6LmBI,CA7LXzB,eA6LWyB,CAAAA,EA7LOlC,mBA6LPkC,CAAAA;EAAkDF;;;EAA2C5B,aAAAA,CAAAA,EAzL1IX,aAyL0IW;;;AAAyCE,uBAtLzKyB,cAsLyKzB,CAAAA,oBAtLtIc,yBAsLsId,CAAAA,SAtLnGR,aAsLmGQ,CAtLrF2B,WAsLqF3B,EAtLxEX,cAsLwEW,CAAAA,YAtL7CwB,OAsL6CxB,CAtLrCG,eAsLqCH,CAAAA,CAAAA;EAAuDH,WAAAA,CAAAA,EAAAA,MAAAA;EACjPP,IAAAA,CAAAA,EAAAA,MAAAA;EACGoC,gBAAAA,CAAAA,EAAAA,MAAAA;EAFqO1B,eAAAA,CAAAA,EAAAA,MAAAA;EAtLjJR,CAAAA,CAAAA,EAAAA,MAAAA;EAAsDgC,SAAAA,CAAAA,EAM1II,MAN0IJ,CAAAA,MAAAA,EAAAA,MAAAA,CAAAA;EAAO,KAAA,EAAA,MAAA;gBAQ/IrB;;;;;;;;;;WAULK;;;;UAIDpB;;gBAEMD;;;;;;UAMNC,QAAAA,CAAa2B,IAAAA,CAAKI;eACbD,MAAM9B,QAAAA,CAAa2B,IAAAA,CAAKE;cACzB7B,QAAAA,CAAaiC;;;;;;;;;;;;;;;;;iBAiBVjC,QAAAA,CAAa2B,IAAAA,CAAKO;;;;;;;;;;yBAUVb;;;;cAIXF;4BACcoB;;;;;;;;oBAQRC;;mDAE+BnC;;wBAE3BoC,KAAKzC,QAAAA,CAAa2B,IAAAA,CAAKO;;MAEzCnC;;;;uBAIiB0C,KAAK3C,MAAAA,CAAOoC;;MAE7BnC;uBACiBoC;;;;;sEAK+CnC,QAAAA,CAAaiC;;;;;2CAKxCM,iCAAiCrB;;;6BAG/CF,uCAAuCA;0DACVO;;MAEpDvB,QAAAA,CAAa0C;mBACAnB,+BAA+Ba,QAAQG,eAAe3B,SAASH,wBAAwBR,gBAAgBsC;gBAC1G9B,kCAAkC8B,cAAcO,QAAoFjD,6BAAAA,CAArCgD,uBAAuB5C,eAAzE0C,yBAAAA,CAA2HC,gBAAAA;gBACxKnC,kCAAkC8B,cAAcO,QAAQ7C,eAAX0C,yBAAAA,CAA6DC,gBAAAA;;mCAEvFnB,oBAAoBA;qCAClBvB,gBAAgB4C;;;;;sDAKC3C,mBAAmB2C;;wDAEjB5C,2BAA2BF,QAAAA,CAAa2B,IAAAA,CAAKO,0BAAAA,CAA2Ba,8CAA8C/C,QAAAA,CAAa2B,IAAAA,CAAKqB,mCAAmCF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;YAwCrNhD,MAAAA,CAAOmD;cACLjC;MACV8B,QAAQ9C,QAAAA,CAAakD;;;;;;;;;;;;;;;;;;iBAkBVvC;;+CAE8BD;yCACN8B,sBAAsBA,mCAAmC3B,eAAeyB,aAAaE,8BAA8B9B,uCAAuCE,SAASH,wBAAwB6B;yCAC3LE,sBAAsBA,mCAAmC3B,eAAeyB,aAAaE,8BAA8B9B,sCAAsCE,SAASH;SAChMP;YACGoC;;yCAE2BE,sBAAsBA,mCAAmC3B,eAAeyB,aAAaE,8BAA8B9B,yCAAyCE,SAASH,wBAAwB6B,aAAa1B,SAASH;SACjPP;YACGoC"}