UNPKG

@sap-ai-sdk/langchain

Version:

SAP Cloud SDK for AI is the official Software Development Kit (SDK) for **SAP AI Core**, **SAP Generative AI Hub**, and **Orchestration Service**.

48 lines 3.14 kB
import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { ChatGenerationChunk } from '@langchain/core/outputs'; import { type Runnable } from '@langchain/core/runnables'; import type { InteropZodType } from '@langchain/core/utils/types'; import type { BaseLanguageModelInput, StructuredOutputMethodOptions } from '@langchain/core/language_models/base'; import type { AIMessageChunk, BaseMessage } from '@langchain/core/messages'; import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager'; import type { ChatResult } from '@langchain/core/outputs'; import type { AzureOpenAiChatCallOptions, AzureOpenAiChatModelParams, ChatAzureOpenAIToolType } from './types.js'; import type { HttpDestinationOrFetchOptions } from '@sap-cloud-sdk/connectivity'; /** * LangChain chat client for Azure OpenAI consumption on SAP BTP. */ export declare class AzureOpenAiChatClient extends BaseChatModel<AzureOpenAiChatCallOptions> { temperature?: number | null; top_p?: number | null; logit_bias?: Record<string, any> | null | undefined; user?: string; presence_penalty?: number; frequency_penalty?: number; stop?: string | string[]; max_tokens?: number; supportsStrictToolCalling?: boolean; modelName: string; private openAiChatClient; constructor(fields: AzureOpenAiChatModelParams, destination?: HttpDestinationOrFetchOptions); _llmType(): string; _generate(messages: BaseMessage[], options: typeof this.ParsedCallOptions, runManager?: CallbackManagerForLLMRun): Promise<ChatResult>; bindTools(tools: ChatAzureOpenAIToolType[], kwargs?: Partial<AzureOpenAiChatCallOptions> | undefined): Runnable<BaseLanguageModelInput, AIMessageChunk, AzureOpenAiChatCallOptions>; withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>; withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput; }>; withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<boolean>): Runnable<BaseLanguageModelInput, RunOutput> | Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput; }>; /** * Stream response chunks from the Azure OpenAI client. * @param messages - The messages to send to the model. * @param options - The call options. * @param runManager - The callback manager for the run. * @returns An async generator of chat generation chunks. */ _streamResponseChunks(messages: BaseMessage[], options: typeof this.ParsedCallOptions, runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>; } //# sourceMappingURL=chat.d.ts.map