aiwrapper
Version:
A Universal AI Wrapper for JavaScript & TypeScript
93 lines (92 loc) • 3.86 kB
TypeScript
import { LangChatMessages, LangResultWithMessages, LangResultWithString, LanguageProvider } from "../language-provider";
import { Model } from 'aimodels';
export type ReasoningEffort = "low" | "medium" | "high";
export type OpenAILikeConfig = {
apiKey?: string;
model: string;
systemPrompt: string;
maxTokens?: number;
maxCompletionTokens?: number;
baseURL: string;
headers?: Record<string, string>;
bodyProperties?: Record<string, unknown>;
reasoningEffort?: ReasoningEffort;
};
export type ReasoningTokenDetails = {
reasoningTokens?: number;
audioTokens?: number;
acceptedPredictionTokens?: number;
rejectedPredictionTokens?: number;
};
export type TokenUsageDetails = {
promptTokens: number;
completionTokens: number;
totalTokens: number;
promptTokensDetails?: {
cachedTokens?: number;
audioTokens?: number;
};
completionTokensDetails?: ReasoningTokenDetails;
};
export declare class OpenAILikeLang extends LanguageProvider {
protected _config: OpenAILikeConfig;
protected modelInfo?: Model;
constructor(config: OpenAILikeConfig);
/**
* Creates an instance of OpenAILikeLang for custom OpenAI-compatible APIs
* @param options Configuration options for the custom API
* @returns A new OpenAILikeLang instance
*/
static custom(options: {
apiKey?: string;
model: string;
baseURL: string;
systemPrompt?: string;
maxTokens?: number;
maxCompletionTokens?: number;
headers?: Record<string, string>;
bodyProperties?: Record<string, unknown>;
reasoningEffort?: ReasoningEffort;
}): OpenAILikeLang;
ask(prompt: string, onResult?: (result: LangResultWithString) => void): Promise<LangResultWithString>;
protected transformMessages(messages: LangChatMessages): LangChatMessages;
protected transformBody(body: Record<string, unknown>): Record<string, unknown>;
/**
* Checks if the current model has reasoning capabilities
* @returns True if the model supports reasoning, false otherwise
*/
supportsReasoning(): boolean;
chat(messages: LangChatMessages, onResult?: (result: LangResultWithMessages) => void): Promise<LangResultWithMessages>;
/**
* Handles streaming data from the API response
* This method can be overridden by subclasses to add custom handling for different response formats
* @param data The current data chunk from the stream
* @param result The result object being built
* @param messages The original messages array
* @param onResult Optional callback for streaming results
*/
protected handleStreamData(data: any, result: LangResultWithMessages, messages: LangChatMessages, onResult?: (result: LangResultWithMessages) => void): void;
/**
* Sets the reasoning effort level for the model
* @param effort The reasoning effort level: "low", "medium", or "high"
* @returns this instance for method chaining
*/
setReasoningEffort(effort: ReasoningEffort): OpenAILikeLang;
/**
* Gets the current reasoning effort level
* @returns The current reasoning effort level or undefined if not set
*/
getReasoningEffort(): ReasoningEffort | undefined;
/**
* Sets the maximum number of tokens (including reasoning tokens) that can be generated
* This is specific to reasoning models and controls the total token output
* @param maxTokens The maximum number of tokens to generate
* @returns this instance for method chaining
*/
setMaxCompletionTokens(maxTokens: number): OpenAILikeLang;
/**
* Gets the current maximum completion tokens setting
* @returns The current maximum completion tokens or undefined if not set
*/
getMaxCompletionTokens(): number | undefined;
}