@sap-ai-sdk/langchain
Version:
SAP Cloud SDK for AI is the official Software Development Kit (SDK) for **SAP AI Core**, **SAP Generative AI Hub**, and **Orchestration Service**.
42 lines • 2.94 kB
TypeScript
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { ChatGenerationChunk } from '@langchain/core/outputs';
import { type BaseMessage } from '@langchain/core/messages';
import type { OrchestrationMessageChunk } from './orchestration-message-chunk.js';
import type { BaseLanguageModelInput } from '@langchain/core/language_models/base';
import type { Runnable, RunnableLike } from '@langchain/core/runnables';
import type { ChatResult } from '@langchain/core/outputs';
import type { BaseChatModelParams } from '@langchain/core/language_models/chat_models';
import type { ResourceGroupConfig } from '@sap-ai-sdk/ai-api';
import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
import type { OrchestrationCallOptions, LangChainOrchestrationModuleConfig, ChatOrchestrationToolType } from './types.js';
import type { HttpDestinationOrFetchOptions } from '@sap-cloud-sdk/connectivity';
/**
* The Orchestration client.
*/
export declare class OrchestrationClient extends BaseChatModel<OrchestrationCallOptions, OrchestrationMessageChunk> {
orchestrationConfig: LangChainOrchestrationModuleConfig;
langchainOptions: BaseChatModelParams;
deploymentConfig?: ResourceGroupConfig | undefined;
destination?: HttpDestinationOrFetchOptions | undefined;
constructor(orchestrationConfig: LangChainOrchestrationModuleConfig, langchainOptions?: BaseChatModelParams, deploymentConfig?: ResourceGroupConfig | undefined, destination?: HttpDestinationOrFetchOptions | undefined);
_llmType(): string;
/**
* Create a new runnable sequence that runs each individual runnable in series,
* piping the output of one runnable into another runnable or runnable-like.
* @param coerceable - A runnable, function, or object whose values are functions or runnables.
* @returns A new runnable sequence.
*/
pipe<NewRunOutput>(coerceable: RunnableLike<OrchestrationMessageChunk, NewRunOutput>): Runnable<BaseLanguageModelInput, Exclude<NewRunOutput, Error>, OrchestrationCallOptions>;
_generate(messages: BaseMessage[], options: typeof this.ParsedCallOptions, runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
bindTools(tools: ChatOrchestrationToolType[], kwargs?: Partial<OrchestrationCallOptions> | undefined): Runnable<BaseLanguageModelInput, OrchestrationMessageChunk, OrchestrationCallOptions>;
/**
* Stream response chunks from the Orchestration client.
* @param messages - The messages to send to the model.
* @param options - The call options.
* @param runManager - The callback manager for the run.
* @returns An async generator of chat generation chunks.
*/
_streamResponseChunks(messages: BaseMessage[], options: typeof this.ParsedCallOptions, runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
private mergeOrchestrationConfig;
}
//# sourceMappingURL=client.d.ts.map