@langchain/azure-openai
Version:
Azure SDK for OpenAI integrations for LangChain.js
54 lines (53 loc) • 2.09 kB
TypeScript
import { type OpenAIClientOptions as AzureOpenAIClientOptions } from "@azure/openai";
import { BaseLLM, type BaseLLMParams } from "@langchain/core/language_models/llms";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import { GenerationChunk, type LLMResult } from "@langchain/core/outputs";
import { AzureOpenAIInput, OpenAICallOptions, OpenAIInput } from "./types.js";
/**
* Interface for tracking token usage in OpenAI calls.
*/
export interface TokenUsage {
completionTokens?: number;
promptTokens?: number;
totalTokens?: number;
}
/** @deprecated Import from "@langchain/openai" instead. */
export declare class AzureOpenAI<CallOptions extends OpenAICallOptions = OpenAICallOptions> extends BaseLLM<CallOptions> implements OpenAIInput, AzureOpenAIInput {
static lc_name(): string;
get callKeys(): string[];
lc_serializable: boolean;
get lc_secrets(): {
[key: string]: string;
} | undefined;
get lc_aliases(): Record<string, string>;
temperature: number;
maxTokens: number;
topP: number;
frequencyPenalty: number;
presencePenalty: number;
n: number;
bestOf?: number;
logitBias?: Record<string, number>;
modelName: string;
model: string;
modelKwargs?: OpenAIInput["modelKwargs"];
batchSize: number;
timeout?: number;
stop?: string[];
stopSequences?: string[];
user?: string;
streaming: boolean;
azureOpenAIApiKey?: string;
apiKey?: string;
azureOpenAIEndpoint?: string;
azureOpenAIApiDeploymentName?: string;
logprobs?: number;
echo?: boolean;
private client;
constructor(fields?: Partial<OpenAIInput> & Partial<AzureOpenAIInput> & BaseLLMParams & {
configuration?: AzureOpenAIClientOptions;
});
_streamResponseChunks(input: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
_generate(prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<LLMResult>;
_llmType(): string;
}