uniai
Version:
To unify AI models!
1,672 lines (1,644 loc) • 55.2 kB
text/typescript
import { ReasoningEffort, ChatCompletion, ChatCompletionChunk, EmbeddingCreateParams, CreateEmbeddingResponse, ChatCompletionCreateParamsNonStreaming, ChatCompletionCreateParamsStreaming, ImagesResponse, ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam, ChatCompletionAssistantMessageParam, ChatCompletionToolMessageParam, ChatCompletionDeveloperMessageParam, ChatCompletionContentPart } from 'openai/resources';
import { ImageGenerateParamsBase } from 'openai/resources/images.js';
import * as stream from 'stream';
/** @format */
declare enum ChatModelProvider {
OpenAI = "openai",
Anthropic = "anthropic",
DeepSeek = "deepseek",
IFlyTek = "iflytek",
Baidu = "baidu",
Google = "google",
GLM = "glm",
MoonShot = "moonshot",
AliYun = "aliyun",
XAI = "xai",
ARK = "ark",// 火山
Other = "other"
}
declare enum EmbedModelProvider {
OpenAI = "openai",
Google = "google",
GLM = "glm",
AliYun = "aliyun",
Other = "other"
}
declare enum ImagineModelProvider {
OpenAI = "openai",
MidJourney = "midjourney",
StabilityAI = "stability.ai",
IFlyTek = "iflytek"
}
type ModelProvider = ChatModelProvider | EmbedModelProvider | ImagineModelProvider;
declare const ModelProvider: {
OpenAI: ImagineModelProvider.OpenAI;
MidJourney: ImagineModelProvider.MidJourney;
StabilityAI: ImagineModelProvider.StabilityAI;
IFlyTek: ImagineModelProvider.IFlyTek;
Google: EmbedModelProvider.Google;
GLM: EmbedModelProvider.GLM;
AliYun: EmbedModelProvider.AliYun;
Other: EmbedModelProvider.Other;
Anthropic: ChatModelProvider.Anthropic;
DeepSeek: ChatModelProvider.DeepSeek;
Baidu: ChatModelProvider.Baidu;
MoonShot: ChatModelProvider.MoonShot;
XAI: ChatModelProvider.XAI;
ARK: ChatModelProvider.ARK;
};
declare enum OpenAIEmbedModel {
ADA = "text-embedding-ada-002",
LARGE = "text-embedding-3-large",
SMALL = "text-embedding-3-small"
}
declare enum OtherEmbedModel {
BGE_M3 = "bge-m3",
BASE_CHN = "text2vec-base-chinese",
LARGE_CHN = "text2vec-large-chinese",
BASE_CHN_PARAPH = "text2vec-base-chinese-paraphrase",
BASE_CHN_SENTENCE = "text2vec-base-chinese-sentence",
BASE_MUL = "text2vec-base-multilingual",
PARAPH_MUL_MINI = "paraphrase-multilingual-MiniLM-L12-v2"
}
declare enum GLMEmbedModel {
EMBED_2 = "embedding-2",
EMBED_3 = "embedding-3"
}
declare enum GoogleEmbedModel {
GEM_EMBED = "gemini-embedding-001"
}
declare enum AliEmbedModel {
ALI_V3 = "text-embedding-v3",
ALI_V2 = "text-embedding-v2",
ALI_V1 = "text-embedding-v1",
ALI_ASYNC_V2 = "text-embedding-async-v2",
ALI_ASYNC_V1 = "text-embedding-async-v1"
}
type EmbedModel = OpenAIEmbedModel | OtherEmbedModel | GLMEmbedModel | GoogleEmbedModel | AliEmbedModel | string;
declare const EmbedModel: {
ALI_V3: AliEmbedModel.ALI_V3;
ALI_V2: AliEmbedModel.ALI_V2;
ALI_V1: AliEmbedModel.ALI_V1;
ALI_ASYNC_V2: AliEmbedModel.ALI_ASYNC_V2;
ALI_ASYNC_V1: AliEmbedModel.ALI_ASYNC_V1;
GEM_EMBED: GoogleEmbedModel.GEM_EMBED;
EMBED_2: GLMEmbedModel.EMBED_2;
EMBED_3: GLMEmbedModel.EMBED_3;
BGE_M3: OtherEmbedModel.BGE_M3;
BASE_CHN: OtherEmbedModel.BASE_CHN;
LARGE_CHN: OtherEmbedModel.LARGE_CHN;
BASE_CHN_PARAPH: OtherEmbedModel.BASE_CHN_PARAPH;
BASE_CHN_SENTENCE: OtherEmbedModel.BASE_CHN_SENTENCE;
BASE_MUL: OtherEmbedModel.BASE_MUL;
PARAPH_MUL_MINI: OtherEmbedModel.PARAPH_MUL_MINI;
ADA: OpenAIEmbedModel.ADA;
LARGE: OpenAIEmbedModel.LARGE;
SMALL: OpenAIEmbedModel.SMALL;
};
declare enum OpenAIChatModel {
GPT3 = "gpt-3.5-turbo",
GPT4 = "gpt-4",
GPT4_TURBO = "gpt-4-turbo",
GPT_4O_MINI = "gpt-4o-mini",
CHAT_GPT_4O = "chatgpt-4o-latest",
GPT_4O = "gpt-4o",
GPT_4O_AUDIO = "gpt-4o-audio-preview",
GPT_4_1 = "gpt-4.1",
GPT_4_1_MINI = "gpt-4.1-mini",
GPT_4_1_NANO = "gpt-4.1-nano",
GPT_5 = "gpt-5",
GPT_5_CODE = "gpt-5-codex",
GPT_5_1_CODE = "gpt-5.1-codex",
GPT_5_1_CODE_MAX = "gpt-5.1-codex-max",
GPT_5_CHAT = "gpt-5-chat-latest",
GPT_5_PRO = "gpt-5-pro",
GPT_5_MINI = "gpt-5-mini",
GPT_5_NANO = "gpt-5-nano",
GPT_5_1 = "gpt-5.1",
GPT_5_1_CHAT = "gpt-5.1-chat-latest",
O1 = "o1",
O1_MINI = "o1-mini",
O1_PRO = "o1-pro",
O3 = "o3",
O3_PRO = "o3-pro",
O3_DEEP = "o3-deep-research",
O3_MINI = "o3-mini",
O4_MINI = "o4-mini",
O4_DEEP = "o4-mini-deep-research",
GPT_OSS_120B = "gpt-oss-120b",
GPT_OSS_20B = "gpt-oss-20b"
}
declare enum AnthropicChatModel {
CLAUDE_4_5_SONNET = "claude-sonnet-4-5",
CLAUDE_4_5_HAIKU = "claude-haiku-4-5",
CLAUDE_4_5_OPUS = "claude-opus-4-5",
CLAUDE_4_1_OPUS = "claude-opus-4-1",
CLAUDE_4_OPUS = "claude-opus-4-0",
CLAUDE_4_SONNET = "claude-sonnet-4-0",
CLAUDE_3_7_SONNET = "claude-3-7-sonnet-latest",
CLAUDE_3_5_HAIKU = "claude-3-5-haiku-latest",
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
}
declare enum DeepSeekChatModel {
DEEPSEEK_V3 = "deepseek-chat",
DEEPSEEK_R1 = "deepseek-reasoner"
}
declare enum GoogleChatModel {
GEM_FLASH_2 = "gemini-2.0-flash",
GEM_FLASH_2_LITE = "gemini-2.0-flash-lite",
GEM_PRO_2_5 = "gemini-2.5-pro",
GEM_FLASH_2_5 = "gemini-2.5-flash",
GEM_FLASH_2_5_LITE = "gemini-2.5-flash-lite"
}
declare enum GLMChatModel {
GLM_3_TURBO = "glm-3-turbo",
GLM_4 = "glm-4",
GLM_4_AIR = "glm-4-air",
GLM_4_AIRX = "glm-4-airx",
GLM_4_FLASH = "glm-4-flash",
GLM_4_FLASHX = "glm-4-flashx",
GLM_4V = "glm-4v",
GLM_4V_PLUS = "glm-4v-plus",
GLM_4_LONG = "glm-4-long",
GLM_4_PLUS = "glm-4-plus"
}
declare enum BaiduChatModel {
ERNIE_3_5 = "completions",// 'ernie-3.5-8k', // ERNIE 3.5 8K
ERNIE_3_5_PRE = "ernie-3.5-8k-preview",// ERNIE 3.5 8K Preview
ERNIE_3_5_128K = "ernie-3.5-128k",// ERNIE 3.5 128K
ERNIE_4_0_LATEST = "ernie-4.0-8k-latest",// ERNIE 4.0 8K Latest
ERNIE_4_0_PREVIEW = "ernie-4.0-8k-preview",// ERNIE 4.0 8K Preview
ERNIE_4_0_8K = "completions_pro",// 'ernie-4.0-8k', // ERNIE 4.0 8K
ERNIE_4_0_TURBO_LATEST = "ernie-4.0-turbo-8k-latest",// ERNIE 4.0 Turbo 8K Latest
ERNIE_4_0_TURBO_PREVIEW = "ernie-4.0-turbo-8k-preview",// ERNIE 4.0 Turbo 8K Preview
ERNIE_4_0_TURBO_8K = "ernie-4.0-turbo-8k",// ERNIE 4.0 Turbo 8K
ERNIE_4_0_TURBO_128K = "ernie-4.0-turbo-128k",// ERNIE 4.0 Turbo 128K
ERNIE_SPEED_8K = "ernie_speed",// ERNIE Speed 8K
ERNIE_SPEED_128K = "ernie-speed-128k",// ERNIE Speed 128K
ERNIE_SPEED_PRO_128K = "ernie-speed-pro-128k",// ERNIE Speed Pro 128K
ERNIE_LITE_8K = "ernie-lite-8k",// ERNIE Lite 8K
ERNIE_LITE_PRO_128K = "ernie-lite-pro-128k",// ERNIE Lite Pro 128K
ERNIE_TINY_8K = "ernie-tiny-8k",// ERNIE Tiny 8K
ERNIE_CHAR_8K = "ernie-char-8k",// ERNIE Character 8K
ERNIE_CHAR_FICTION_8K = "ernie-char-fiction-8k",// ERNIE Character Fiction 8K
ERNIE_NOVEL_8K = "ernie-novel-8k"
}
declare enum IFlyTekChatModel {
SPARK_LITE = "lite",
SPARK_PRO = "generalv3",
SPARK_PRO_128K = "pro-128k",
SPARK_MAX = "generalv3.5",
SPARK_MAX_32K = "max-32k",
SPARK_ULTRA = "4.0Ultra"
}
declare enum MoonShotChatModel {
KIMI_K2_0711_PREVIEW = "kimi-k2-0711-preview",
MOON_V1_8K = "moonshot-v1-8k",
MOON_V1_32K = "moonshot-v1-32k",
MOON_V1_128K = "moonshot-v1-128k",
MOON_V1_AUTO = "moonshot-v1-auto",
KIMI_LATEST = "kimi-latest",
MOON_V1_8K_VISION_PREVIEW = "moonshot-v1-8k-vision-preview",
MOON_V1_32K_VISION_PREVIEW = "moonshot-v1-32k-vision-preview",
MOON_V1_128K_VISION_PREVIEW = "moonshot-v1-128k-vision-preview",
KIMI_THINKING_PREVIEW = "kimi-thinking-preview"
}
declare enum AliChatModel {
QWEN3_MAX = "qwen3-max",
QWEN_MAX = "qwen-max",
QWEN_PLUS = "qwen-plus",
QWEN_FLASH = "qwen-flash",
QWEN_TURBO = "qwen-turbo",
QWQ_PLUS = "qwq-plus",
QVQ_MAX = "qvq-max",
QVQ_PLUS = "qvq-plus",
QWEN_LONG = "qwen-long",
QWEN_CODE_TURBO = "qwen-coder-turbo",
QWEN_CODE_PLUS = "qwen3-coder-plus",
QWEN_CODE_FLASH = "qwen3-coder-flash",
QWEN_MATH = "qwen-math-plus",
QWEN_VL_MAX = "qwen-vl-max",
QWEN_VL_PLUS = "qwen-vl-plus"
}
declare enum XAIChatModel {
GROK_CODE_FAST_1 = "grok-code-fast-1",
GROK4_FAST_REASONING = "grok-4-fast-reasoning",
GROK4_FAST_NON_REASONING = "grok-4-fast-non-reasoning",
GROK4_0709 = "grok-4-0709",
GROK3_MINI = "grok-3-mini",
GROK3 = "grok-3",
GROK2_VISION_1212_US_EAST_1 = "grok-2-vision-1212us-east-1",
GROK2_VISION_1212_EU_WEST_1 = "grok-2-vision-1212eu-west-1"
}
declare enum ArkChatModel {
DOUBAO_1_5_THINKING_PRO = "doubao-1-5-thinking-pro-m-250428",
DOUBAO_1_5_THINKING_VISION_PRO = "doubao-1-5-thinking-vision-pro-250428",
DOUBAO_SEED_1_6 = "doubao-seed-1-6-251015",
DOUBAO_SEED_1_6_VISION = "doubao-seed-1-6-vision-250815",
DOUBAO_SEED_1_6_FLASH = "doubao-seed-1-6-flash-250828",
DOUBAO_SEED_1_6_LITE = "doubao-seed-1-6-lite-251015",
DOUBAO_SEED_1_6_THINKING = "doubao-seed-1-6-thinking-250715",
DOUBAO_SEED_CODE = "doubao-seed-code-preview-251028",
DEEPSEEK_V3 = "deepseek-v3-250324",
DEEPSEEK_V3_1 = "deepseek-v3-1-250821",
DEEPSEEK_V3_2 = "deepseek-v3-2-251201",
KIMI_K2 = "kimi-k2-250905",
KIMI_K2_THINK = "kimi-k2-thinking-251104"
}
type ChatModel = OpenAIChatModel | AnthropicChatModel | DeepSeekChatModel | BaiduChatModel | GLMChatModel | IFlyTekChatModel | GoogleChatModel | MoonShotChatModel | AliChatModel | XAIChatModel | ArkChatModel | string;
declare const ChatModel: {
DOUBAO_1_5_THINKING_PRO: ArkChatModel.DOUBAO_1_5_THINKING_PRO;
DOUBAO_1_5_THINKING_VISION_PRO: ArkChatModel.DOUBAO_1_5_THINKING_VISION_PRO;
DOUBAO_SEED_1_6: ArkChatModel.DOUBAO_SEED_1_6;
DOUBAO_SEED_1_6_VISION: ArkChatModel.DOUBAO_SEED_1_6_VISION;
DOUBAO_SEED_1_6_FLASH: ArkChatModel.DOUBAO_SEED_1_6_FLASH;
DOUBAO_SEED_1_6_LITE: ArkChatModel.DOUBAO_SEED_1_6_LITE;
DOUBAO_SEED_1_6_THINKING: ArkChatModel.DOUBAO_SEED_1_6_THINKING;
DOUBAO_SEED_CODE: ArkChatModel.DOUBAO_SEED_CODE;
DEEPSEEK_V3: ArkChatModel.DEEPSEEK_V3;
DEEPSEEK_V3_1: ArkChatModel.DEEPSEEK_V3_1;
DEEPSEEK_V3_2: ArkChatModel.DEEPSEEK_V3_2;
KIMI_K2: ArkChatModel.KIMI_K2;
KIMI_K2_THINK: ArkChatModel.KIMI_K2_THINK;
GROK_CODE_FAST_1: XAIChatModel.GROK_CODE_FAST_1;
GROK4_FAST_REASONING: XAIChatModel.GROK4_FAST_REASONING;
GROK4_FAST_NON_REASONING: XAIChatModel.GROK4_FAST_NON_REASONING;
GROK4_0709: XAIChatModel.GROK4_0709;
GROK3_MINI: XAIChatModel.GROK3_MINI;
GROK3: XAIChatModel.GROK3;
GROK2_VISION_1212_US_EAST_1: XAIChatModel.GROK2_VISION_1212_US_EAST_1;
GROK2_VISION_1212_EU_WEST_1: XAIChatModel.GROK2_VISION_1212_EU_WEST_1;
QWEN3_MAX: AliChatModel.QWEN3_MAX;
QWEN_MAX: AliChatModel.QWEN_MAX;
QWEN_PLUS: AliChatModel.QWEN_PLUS;
QWEN_FLASH: AliChatModel.QWEN_FLASH;
QWEN_TURBO: AliChatModel.QWEN_TURBO;
QWQ_PLUS: AliChatModel.QWQ_PLUS;
QVQ_MAX: AliChatModel.QVQ_MAX;
QVQ_PLUS: AliChatModel.QVQ_PLUS;
QWEN_LONG: AliChatModel.QWEN_LONG;
QWEN_CODE_TURBO: AliChatModel.QWEN_CODE_TURBO;
QWEN_CODE_PLUS: AliChatModel.QWEN_CODE_PLUS;
QWEN_CODE_FLASH: AliChatModel.QWEN_CODE_FLASH;
QWEN_MATH: AliChatModel.QWEN_MATH;
QWEN_VL_MAX: AliChatModel.QWEN_VL_MAX;
QWEN_VL_PLUS: AliChatModel.QWEN_VL_PLUS;
KIMI_K2_0711_PREVIEW: MoonShotChatModel.KIMI_K2_0711_PREVIEW;
MOON_V1_8K: MoonShotChatModel.MOON_V1_8K;
MOON_V1_32K: MoonShotChatModel.MOON_V1_32K;
MOON_V1_128K: MoonShotChatModel.MOON_V1_128K;
MOON_V1_AUTO: MoonShotChatModel.MOON_V1_AUTO;
KIMI_LATEST: MoonShotChatModel.KIMI_LATEST;
MOON_V1_8K_VISION_PREVIEW: MoonShotChatModel.MOON_V1_8K_VISION_PREVIEW;
MOON_V1_32K_VISION_PREVIEW: MoonShotChatModel.MOON_V1_32K_VISION_PREVIEW;
MOON_V1_128K_VISION_PREVIEW: MoonShotChatModel.MOON_V1_128K_VISION_PREVIEW;
KIMI_THINKING_PREVIEW: MoonShotChatModel.KIMI_THINKING_PREVIEW;
GPT3: OpenAIChatModel.GPT3;
GPT4: OpenAIChatModel.GPT4;
GPT4_TURBO: OpenAIChatModel.GPT4_TURBO;
GPT_4O_MINI: OpenAIChatModel.GPT_4O_MINI;
CHAT_GPT_4O: OpenAIChatModel.CHAT_GPT_4O;
GPT_4O: OpenAIChatModel.GPT_4O;
GPT_4O_AUDIO: OpenAIChatModel.GPT_4O_AUDIO;
GPT_4_1: OpenAIChatModel.GPT_4_1;
GPT_4_1_MINI: OpenAIChatModel.GPT_4_1_MINI;
GPT_4_1_NANO: OpenAIChatModel.GPT_4_1_NANO;
GPT_5: OpenAIChatModel.GPT_5;
GPT_5_CODE: OpenAIChatModel.GPT_5_CODE;
GPT_5_1_CODE: OpenAIChatModel.GPT_5_1_CODE;
GPT_5_1_CODE_MAX: OpenAIChatModel.GPT_5_1_CODE_MAX;
GPT_5_CHAT: OpenAIChatModel.GPT_5_CHAT;
GPT_5_PRO: OpenAIChatModel.GPT_5_PRO;
GPT_5_MINI: OpenAIChatModel.GPT_5_MINI;
GPT_5_NANO: OpenAIChatModel.GPT_5_NANO;
GPT_5_1: OpenAIChatModel.GPT_5_1;
GPT_5_1_CHAT: OpenAIChatModel.GPT_5_1_CHAT;
O1: OpenAIChatModel.O1;
O1_MINI: OpenAIChatModel.O1_MINI;
O1_PRO: OpenAIChatModel.O1_PRO;
O3: OpenAIChatModel.O3;
O3_PRO: OpenAIChatModel.O3_PRO;
O3_DEEP: OpenAIChatModel.O3_DEEP;
O3_MINI: OpenAIChatModel.O3_MINI;
O4_MINI: OpenAIChatModel.O4_MINI;
O4_DEEP: OpenAIChatModel.O4_DEEP;
GPT_OSS_120B: OpenAIChatModel.GPT_OSS_120B;
GPT_OSS_20B: OpenAIChatModel.GPT_OSS_20B;
GEM_FLASH_2: GoogleChatModel.GEM_FLASH_2;
GEM_FLASH_2_LITE: GoogleChatModel.GEM_FLASH_2_LITE;
GEM_PRO_2_5: GoogleChatModel.GEM_PRO_2_5;
GEM_FLASH_2_5: GoogleChatModel.GEM_FLASH_2_5;
GEM_FLASH_2_5_LITE: GoogleChatModel.GEM_FLASH_2_5_LITE;
SPARK_LITE: IFlyTekChatModel.SPARK_LITE;
SPARK_PRO: IFlyTekChatModel.SPARK_PRO;
SPARK_PRO_128K: IFlyTekChatModel.SPARK_PRO_128K;
SPARK_MAX: IFlyTekChatModel.SPARK_MAX;
SPARK_MAX_32K: IFlyTekChatModel.SPARK_MAX_32K;
SPARK_ULTRA: IFlyTekChatModel.SPARK_ULTRA;
GLM_3_TURBO: GLMChatModel.GLM_3_TURBO;
GLM_4: GLMChatModel.GLM_4;
GLM_4_AIR: GLMChatModel.GLM_4_AIR;
GLM_4_AIRX: GLMChatModel.GLM_4_AIRX;
GLM_4_FLASH: GLMChatModel.GLM_4_FLASH;
GLM_4_FLASHX: GLMChatModel.GLM_4_FLASHX;
GLM_4V: GLMChatModel.GLM_4V;
GLM_4V_PLUS: GLMChatModel.GLM_4V_PLUS;
GLM_4_LONG: GLMChatModel.GLM_4_LONG;
GLM_4_PLUS: GLMChatModel.GLM_4_PLUS;
ERNIE_3_5: BaiduChatModel.ERNIE_3_5;
ERNIE_3_5_PRE: BaiduChatModel.ERNIE_3_5_PRE;
ERNIE_3_5_128K: BaiduChatModel.ERNIE_3_5_128K;
ERNIE_4_0_LATEST: BaiduChatModel.ERNIE_4_0_LATEST;
ERNIE_4_0_PREVIEW: BaiduChatModel.ERNIE_4_0_PREVIEW;
ERNIE_4_0_8K: BaiduChatModel.ERNIE_4_0_8K;
ERNIE_4_0_TURBO_LATEST: BaiduChatModel.ERNIE_4_0_TURBO_LATEST;
ERNIE_4_0_TURBO_PREVIEW: BaiduChatModel.ERNIE_4_0_TURBO_PREVIEW;
ERNIE_4_0_TURBO_8K: BaiduChatModel.ERNIE_4_0_TURBO_8K;
ERNIE_4_0_TURBO_128K: BaiduChatModel.ERNIE_4_0_TURBO_128K;
ERNIE_SPEED_8K: BaiduChatModel.ERNIE_SPEED_8K;
ERNIE_SPEED_128K: BaiduChatModel.ERNIE_SPEED_128K;
ERNIE_SPEED_PRO_128K: BaiduChatModel.ERNIE_SPEED_PRO_128K;
ERNIE_LITE_8K: BaiduChatModel.ERNIE_LITE_8K;
ERNIE_LITE_PRO_128K: BaiduChatModel.ERNIE_LITE_PRO_128K;
ERNIE_TINY_8K: BaiduChatModel.ERNIE_TINY_8K;
ERNIE_CHAR_8K: BaiduChatModel.ERNIE_CHAR_8K;
ERNIE_CHAR_FICTION_8K: BaiduChatModel.ERNIE_CHAR_FICTION_8K;
ERNIE_NOVEL_8K: BaiduChatModel.ERNIE_NOVEL_8K;
DEEPSEEK_R1: DeepSeekChatModel.DEEPSEEK_R1;
CLAUDE_4_5_SONNET: AnthropicChatModel.CLAUDE_4_5_SONNET;
CLAUDE_4_5_HAIKU: AnthropicChatModel.CLAUDE_4_5_HAIKU;
CLAUDE_4_5_OPUS: AnthropicChatModel.CLAUDE_4_5_OPUS;
CLAUDE_4_1_OPUS: AnthropicChatModel.CLAUDE_4_1_OPUS;
CLAUDE_4_OPUS: AnthropicChatModel.CLAUDE_4_OPUS;
CLAUDE_4_SONNET: AnthropicChatModel.CLAUDE_4_SONNET;
CLAUDE_3_7_SONNET: AnthropicChatModel.CLAUDE_3_7_SONNET;
CLAUDE_3_5_HAIKU: AnthropicChatModel.CLAUDE_3_5_HAIKU;
CLAUDE_3_HAIKU: AnthropicChatModel.CLAUDE_3_HAIKU;
};
declare enum MidJourneyImagineModel {
MJ = "midjourney"
}
declare enum OpenAIImagineModel {
DALL_E_2 = "dall-e-2",
DALL_E_3 = "dall-e-3"
}
declare enum StabilityAIImagineModel {
SD_1_6 = "stable-diffusion-v1-6",
SD_XL_1024 = "stable-diffusion-xl-1024-v1-0"
}
declare enum IFlyTekImagineModel {
V2 = "v2.1"
}
declare const ImagineModel: {
V2: IFlyTekImagineModel.V2;
SD_1_6: StabilityAIImagineModel.SD_1_6;
SD_XL_1024: StabilityAIImagineModel.SD_XL_1024;
MJ: MidJourneyImagineModel.MJ;
DALL_E_2: OpenAIImagineModel.DALL_E_2;
DALL_E_3: OpenAIImagineModel.DALL_E_3;
};
type ImagineModel = OpenAIImagineModel | MidJourneyImagineModel | StabilityAIImagineModel | IFlyTekImagineModel;
type ModelModel = ChatModel | ImagineModel | EmbedModel;
declare const ModelModel: {
ALI_V3: AliEmbedModel.ALI_V3;
ALI_V2: AliEmbedModel.ALI_V2;
ALI_V1: AliEmbedModel.ALI_V1;
ALI_ASYNC_V2: AliEmbedModel.ALI_ASYNC_V2;
ALI_ASYNC_V1: AliEmbedModel.ALI_ASYNC_V1;
GEM_EMBED: GoogleEmbedModel.GEM_EMBED;
EMBED_2: GLMEmbedModel.EMBED_2;
EMBED_3: GLMEmbedModel.EMBED_3;
BGE_M3: OtherEmbedModel.BGE_M3;
BASE_CHN: OtherEmbedModel.BASE_CHN;
LARGE_CHN: OtherEmbedModel.LARGE_CHN;
BASE_CHN_PARAPH: OtherEmbedModel.BASE_CHN_PARAPH;
BASE_CHN_SENTENCE: OtherEmbedModel.BASE_CHN_SENTENCE;
BASE_MUL: OtherEmbedModel.BASE_MUL;
PARAPH_MUL_MINI: OtherEmbedModel.PARAPH_MUL_MINI;
ADA: OpenAIEmbedModel.ADA;
LARGE: OpenAIEmbedModel.LARGE;
SMALL: OpenAIEmbedModel.SMALL;
V2: IFlyTekImagineModel.V2;
SD_1_6: StabilityAIImagineModel.SD_1_6;
SD_XL_1024: StabilityAIImagineModel.SD_XL_1024;
MJ: MidJourneyImagineModel.MJ;
DALL_E_2: OpenAIImagineModel.DALL_E_2;
DALL_E_3: OpenAIImagineModel.DALL_E_3;
DOUBAO_1_5_THINKING_PRO: ArkChatModel.DOUBAO_1_5_THINKING_PRO;
DOUBAO_1_5_THINKING_VISION_PRO: ArkChatModel.DOUBAO_1_5_THINKING_VISION_PRO;
DOUBAO_SEED_1_6: ArkChatModel.DOUBAO_SEED_1_6;
DOUBAO_SEED_1_6_VISION: ArkChatModel.DOUBAO_SEED_1_6_VISION;
DOUBAO_SEED_1_6_FLASH: ArkChatModel.DOUBAO_SEED_1_6_FLASH;
DOUBAO_SEED_1_6_LITE: ArkChatModel.DOUBAO_SEED_1_6_LITE;
DOUBAO_SEED_1_6_THINKING: ArkChatModel.DOUBAO_SEED_1_6_THINKING;
DOUBAO_SEED_CODE: ArkChatModel.DOUBAO_SEED_CODE;
DEEPSEEK_V3: ArkChatModel.DEEPSEEK_V3;
DEEPSEEK_V3_1: ArkChatModel.DEEPSEEK_V3_1;
DEEPSEEK_V3_2: ArkChatModel.DEEPSEEK_V3_2;
KIMI_K2: ArkChatModel.KIMI_K2;
KIMI_K2_THINK: ArkChatModel.KIMI_K2_THINK;
GROK_CODE_FAST_1: XAIChatModel.GROK_CODE_FAST_1;
GROK4_FAST_REASONING: XAIChatModel.GROK4_FAST_REASONING;
GROK4_FAST_NON_REASONING: XAIChatModel.GROK4_FAST_NON_REASONING;
GROK4_0709: XAIChatModel.GROK4_0709;
GROK3_MINI: XAIChatModel.GROK3_MINI;
GROK3: XAIChatModel.GROK3;
GROK2_VISION_1212_US_EAST_1: XAIChatModel.GROK2_VISION_1212_US_EAST_1;
GROK2_VISION_1212_EU_WEST_1: XAIChatModel.GROK2_VISION_1212_EU_WEST_1;
QWEN3_MAX: AliChatModel.QWEN3_MAX;
QWEN_MAX: AliChatModel.QWEN_MAX;
QWEN_PLUS: AliChatModel.QWEN_PLUS;
QWEN_FLASH: AliChatModel.QWEN_FLASH;
QWEN_TURBO: AliChatModel.QWEN_TURBO;
QWQ_PLUS: AliChatModel.QWQ_PLUS;
QVQ_MAX: AliChatModel.QVQ_MAX;
QVQ_PLUS: AliChatModel.QVQ_PLUS;
QWEN_LONG: AliChatModel.QWEN_LONG;
QWEN_CODE_TURBO: AliChatModel.QWEN_CODE_TURBO;
QWEN_CODE_PLUS: AliChatModel.QWEN_CODE_PLUS;
QWEN_CODE_FLASH: AliChatModel.QWEN_CODE_FLASH;
QWEN_MATH: AliChatModel.QWEN_MATH;
QWEN_VL_MAX: AliChatModel.QWEN_VL_MAX;
QWEN_VL_PLUS: AliChatModel.QWEN_VL_PLUS;
KIMI_K2_0711_PREVIEW: MoonShotChatModel.KIMI_K2_0711_PREVIEW;
MOON_V1_8K: MoonShotChatModel.MOON_V1_8K;
MOON_V1_32K: MoonShotChatModel.MOON_V1_32K;
MOON_V1_128K: MoonShotChatModel.MOON_V1_128K;
MOON_V1_AUTO: MoonShotChatModel.MOON_V1_AUTO;
KIMI_LATEST: MoonShotChatModel.KIMI_LATEST;
MOON_V1_8K_VISION_PREVIEW: MoonShotChatModel.MOON_V1_8K_VISION_PREVIEW;
MOON_V1_32K_VISION_PREVIEW: MoonShotChatModel.MOON_V1_32K_VISION_PREVIEW;
MOON_V1_128K_VISION_PREVIEW: MoonShotChatModel.MOON_V1_128K_VISION_PREVIEW;
KIMI_THINKING_PREVIEW: MoonShotChatModel.KIMI_THINKING_PREVIEW;
GPT3: OpenAIChatModel.GPT3;
GPT4: OpenAIChatModel.GPT4;
GPT4_TURBO: OpenAIChatModel.GPT4_TURBO;
GPT_4O_MINI: OpenAIChatModel.GPT_4O_MINI;
CHAT_GPT_4O: OpenAIChatModel.CHAT_GPT_4O;
GPT_4O: OpenAIChatModel.GPT_4O;
GPT_4O_AUDIO: OpenAIChatModel.GPT_4O_AUDIO;
GPT_4_1: OpenAIChatModel.GPT_4_1;
GPT_4_1_MINI: OpenAIChatModel.GPT_4_1_MINI;
GPT_4_1_NANO: OpenAIChatModel.GPT_4_1_NANO;
GPT_5: OpenAIChatModel.GPT_5;
GPT_5_CODE: OpenAIChatModel.GPT_5_CODE;
GPT_5_1_CODE: OpenAIChatModel.GPT_5_1_CODE;
GPT_5_1_CODE_MAX: OpenAIChatModel.GPT_5_1_CODE_MAX;
GPT_5_CHAT: OpenAIChatModel.GPT_5_CHAT;
GPT_5_PRO: OpenAIChatModel.GPT_5_PRO;
GPT_5_MINI: OpenAIChatModel.GPT_5_MINI;
GPT_5_NANO: OpenAIChatModel.GPT_5_NANO;
GPT_5_1: OpenAIChatModel.GPT_5_1;
GPT_5_1_CHAT: OpenAIChatModel.GPT_5_1_CHAT;
O1: OpenAIChatModel.O1;
O1_MINI: OpenAIChatModel.O1_MINI;
O1_PRO: OpenAIChatModel.O1_PRO;
O3: OpenAIChatModel.O3;
O3_PRO: OpenAIChatModel.O3_PRO;
O3_DEEP: OpenAIChatModel.O3_DEEP;
O3_MINI: OpenAIChatModel.O3_MINI;
O4_MINI: OpenAIChatModel.O4_MINI;
O4_DEEP: OpenAIChatModel.O4_DEEP;
GPT_OSS_120B: OpenAIChatModel.GPT_OSS_120B;
GPT_OSS_20B: OpenAIChatModel.GPT_OSS_20B;
GEM_FLASH_2: GoogleChatModel.GEM_FLASH_2;
GEM_FLASH_2_LITE: GoogleChatModel.GEM_FLASH_2_LITE;
GEM_PRO_2_5: GoogleChatModel.GEM_PRO_2_5;
GEM_FLASH_2_5: GoogleChatModel.GEM_FLASH_2_5;
GEM_FLASH_2_5_LITE: GoogleChatModel.GEM_FLASH_2_5_LITE;
SPARK_LITE: IFlyTekChatModel.SPARK_LITE;
SPARK_PRO: IFlyTekChatModel.SPARK_PRO;
SPARK_PRO_128K: IFlyTekChatModel.SPARK_PRO_128K;
SPARK_MAX: IFlyTekChatModel.SPARK_MAX;
SPARK_MAX_32K: IFlyTekChatModel.SPARK_MAX_32K;
SPARK_ULTRA: IFlyTekChatModel.SPARK_ULTRA;
GLM_3_TURBO: GLMChatModel.GLM_3_TURBO;
GLM_4: GLMChatModel.GLM_4;
GLM_4_AIR: GLMChatModel.GLM_4_AIR;
GLM_4_AIRX: GLMChatModel.GLM_4_AIRX;
GLM_4_FLASH: GLMChatModel.GLM_4_FLASH;
GLM_4_FLASHX: GLMChatModel.GLM_4_FLASHX;
GLM_4V: GLMChatModel.GLM_4V;
GLM_4V_PLUS: GLMChatModel.GLM_4V_PLUS;
GLM_4_LONG: GLMChatModel.GLM_4_LONG;
GLM_4_PLUS: GLMChatModel.GLM_4_PLUS;
ERNIE_3_5: BaiduChatModel.ERNIE_3_5;
ERNIE_3_5_PRE: BaiduChatModel.ERNIE_3_5_PRE;
ERNIE_3_5_128K: BaiduChatModel.ERNIE_3_5_128K;
ERNIE_4_0_LATEST: BaiduChatModel.ERNIE_4_0_LATEST;
ERNIE_4_0_PREVIEW: BaiduChatModel.ERNIE_4_0_PREVIEW;
ERNIE_4_0_8K: BaiduChatModel.ERNIE_4_0_8K;
ERNIE_4_0_TURBO_LATEST: BaiduChatModel.ERNIE_4_0_TURBO_LATEST;
ERNIE_4_0_TURBO_PREVIEW: BaiduChatModel.ERNIE_4_0_TURBO_PREVIEW;
ERNIE_4_0_TURBO_8K: BaiduChatModel.ERNIE_4_0_TURBO_8K;
ERNIE_4_0_TURBO_128K: BaiduChatModel.ERNIE_4_0_TURBO_128K;
ERNIE_SPEED_8K: BaiduChatModel.ERNIE_SPEED_8K;
ERNIE_SPEED_128K: BaiduChatModel.ERNIE_SPEED_128K;
ERNIE_SPEED_PRO_128K: BaiduChatModel.ERNIE_SPEED_PRO_128K;
ERNIE_LITE_8K: BaiduChatModel.ERNIE_LITE_8K;
ERNIE_LITE_PRO_128K: BaiduChatModel.ERNIE_LITE_PRO_128K;
ERNIE_TINY_8K: BaiduChatModel.ERNIE_TINY_8K;
ERNIE_CHAR_8K: BaiduChatModel.ERNIE_CHAR_8K;
ERNIE_CHAR_FICTION_8K: BaiduChatModel.ERNIE_CHAR_FICTION_8K;
ERNIE_NOVEL_8K: BaiduChatModel.ERNIE_NOVEL_8K;
DEEPSEEK_R1: DeepSeekChatModel.DEEPSEEK_R1;
CLAUDE_4_5_SONNET: AnthropicChatModel.CLAUDE_4_5_SONNET;
CLAUDE_4_5_HAIKU: AnthropicChatModel.CLAUDE_4_5_HAIKU;
CLAUDE_4_5_OPUS: AnthropicChatModel.CLAUDE_4_5_OPUS;
CLAUDE_4_1_OPUS: AnthropicChatModel.CLAUDE_4_1_OPUS;
CLAUDE_4_OPUS: AnthropicChatModel.CLAUDE_4_OPUS;
CLAUDE_4_SONNET: AnthropicChatModel.CLAUDE_4_SONNET;
CLAUDE_3_7_SONNET: AnthropicChatModel.CLAUDE_3_7_SONNET;
CLAUDE_3_5_HAIKU: AnthropicChatModel.CLAUDE_3_5_HAIKU;
CLAUDE_3_HAIKU: AnthropicChatModel.CLAUDE_3_HAIKU;
};
declare enum MJTaskType {
IMAGINE = "IMAGINE",
UPSCALE = "UPSCALE",
VARIATION = "VARIATION",
REROLL = "REROLL",
DESCRIBE = "DESCRIBE",
BLEND = "BLEND"
}
declare enum DETaskType {
GENERATION = "generations",
EDIT = "edits",
VARIATION = "variation"
}
declare enum SDTaskType {
GENERATION = "generation"
}
declare enum SPKTaskType {
GENERATION = "generation"
}
declare const ImgTaskType: {
GENERATION: SPKTaskType.GENERATION;
EDIT: DETaskType.EDIT;
VARIATION: DETaskType.VARIATION;
IMAGINE: MJTaskType.IMAGINE;
UPSCALE: MJTaskType.UPSCALE;
REROLL: MJTaskType.REROLL;
DESCRIBE: MJTaskType.DESCRIBE;
BLEND: MJTaskType.BLEND;
};
type ImgTaskType = MJTaskType | DETaskType | SDTaskType | SPKTaskType;
declare enum ChatRoleEnum {
SYSTEM = "system",
USER = "user",
ASSISTANT = "assistant",
TOOL = "tool",
DEV = "developer"
}
declare enum GPTChatRoleEnum {
SYSTEM = "system",
USER = "user",
ASSISTANT = "assistant",
DEV = "developer",
TOOL = "tool"
}
declare enum AnthropicChatRoleEnum {
USER = "user",
ASSISTANT = "assistant"
}
declare enum DSChatRoleEnum {
SYSTEM = "system",
USER = "user",
ASSISTANT = "assistant",
TOOL = "tool"
}
declare enum SPKChatRoleEnum {
USER = "user",
ASSISTANT = "assistant",
SYSTEM = "system",
TOOL = "tool"
}
declare enum GLMChatRoleEnum {
SYSTEM = "system",
USER = "user",
ASSISTANT = "assistant",
TOOL = "tool"
}
declare enum GEMChatRoleEnum {
USER = "user",
MODEL = "model"
}
declare enum BDUChatRoleEnum {
USER = "user",
ASSISTANT = "assistant"
}
/** @format */
interface UniAIConfig {
OpenAI?: {
key: string | string[];
proxy?: string;
};
Anthropic?: {
key: string | string[];
proxy?: string;
};
DeepSeek?: {
key: string | string[];
proxy?: string;
};
Google?: {
key: string | string[];
proxy?: string;
};
GLM?: {
key?: string | string[];
proxy?: string;
};
IFlyTek?: {
apiPassword?: string | string[];
appId?: string;
apiKey?: string;
apiSecret?: string;
proxy?: string;
};
Baidu?: {
appId?: string;
apiKey?: string;
secretKey?: string;
proxy?: string;
};
MoonShot?: {
key: string | string[];
proxy?: string;
};
AliYun?: {
key: string | string[];
proxy?: string;
};
MidJourney?: {
proxy: string;
imgProxy?: string;
token?: string;
};
StabilityAI?: {
key: string | string[];
proxy?: string;
};
XAI?: {
key: string | string[];
proxy?: string;
};
Ark?: {
key: string | string[];
proxy?: string;
};
Other?: {
api?: string;
key?: string;
};
}
/** @format */
interface ChatMessage {
role: ChatRoleEnum;
content: string | string[];
name?: string;
img?: string | string[];
audio?: string | string[];
audioFormat?: string;
tool?: string;
}
interface EmbeddingResponse {
embedding: number[][];
model: EmbedModel;
object: 'embedding';
promptTokens: number;
totalTokens: number;
}
interface ChatResponse {
id: string;
content: string;
tools?: object[];
promptTokens: number;
completionTokens: number;
totalTokens: number;
model: ChatModel | string;
object: string;
}
type ReasoningLevel = ReasoningEffort;
interface ChatOption {
id?: string;
stream?: boolean;
provider?: ChatModelProvider;
model?: ChatModel;
top?: number;
temperature?: number;
maxLength?: number;
tools?: {
type: string;
[key: string]: any;
}[];
toolChoice?: string;
reasoning?: ReasoningLevel;
}
interface EmbedOption {
provider?: EmbedModelProvider;
model?: EmbedModel;
dimensions?: number;
}
type ModelList = Provider[];
interface Provider {
provider: keyof typeof ModelProvider;
value: ModelProvider;
models: ModelModel[];
}
interface ImagineOption {
provider?: ImagineModelProvider;
model?: ImagineModel;
negativePrompt?: string;
height?: number;
width?: number;
num?: number;
}
interface ImagineResponse {
taskId: string;
time: number;
}
interface TaskResponse {
id: string;
type: ImgTaskType;
imgs: string[];
info: string;
fail: string;
progress: number;
created: number;
model: ImagineModel;
}
/** @format */
interface GPTChatResponse extends ChatCompletion {
}
interface GPTChatStreamResponse extends ChatCompletionChunk {
}
interface OpenAIEmbedRequest extends EmbeddingCreateParams {
}
interface OpenAIEmbedResponse extends CreateEmbeddingResponse {
}
interface GPTChatRequest extends ChatCompletionCreateParamsNonStreaming {
}
interface GPTChatStreamRequest extends ChatCompletionCreateParamsStreaming {
}
interface OpenAIImagineRequest extends ImageGenerateParamsBase {
}
interface OpenAIImagineResponse extends ImagesResponse {
}
type GPTChatMessage = ChatCompletionSystemMessageParam | ChatCompletionUserMessageParam | ChatCompletionAssistantMessageParam | ChatCompletionToolMessageParam | ChatCompletionDeveloperMessageParam;
type GPTImagineSize = '256x256' | '512x512' | '1024x1024' | '1792x1024' | '1024x1792' | null;
/** @format */
interface AnthropicChatRequest {
model: string;
max_tokens: number;
messages: AnthropicChatMessage[];
system?: string;
stream?: boolean;
temperature?: number;
top_p?: number;
tools?: AnthropicTool[];
tool_choice?: AnthropicToolChoice;
}
interface AnthropicChatMessage {
role: AnthropicChatRoleEnum;
content: string | AnthropicContent[];
}
interface AnthropicContent {
type: 'text' | 'image';
text?: string;
source?: {
type: 'base64';
media_type: string;
data: string;
};
}
interface AnthropicTool {
name: string;
description: string;
input_schema: {
type: 'object';
properties: Record<string, any>;
required?: string[];
};
}
interface AnthropicToolChoice {
type: 'auto' | 'any' | 'tool';
name?: string;
}
interface AnthropicChatResponse {
id: string;
type: 'message';
role: 'assistant';
content: AnthropicResponseContent[];
model: string;
stop_reason?: 'end_turn' | 'max_tokens' | 'stop_sequence' | 'tool_use';
stop_sequence?: string;
usage: {
input_tokens: number;
output_tokens: number;
};
}
interface AnthropicResponseContent {
type: 'text' | 'tool_use';
text?: string;
id?: string;
name?: string;
input?: any;
}
interface AnthropicChatStreamResponse {
type: 'message_start' | 'message_delta' | 'content_block_start' | 'content_block_delta' | 'content_block_stop' | 'message_stop';
message?: Partial<AnthropicChatResponse>;
delta?: {
text?: string;
type?: string;
stop_reason?: string;
usage?: {
output_tokens: number;
};
};
content_block?: {
type: 'text' | 'tool_use';
text?: string;
id?: string;
name?: string;
input?: any;
};
index?: number;
}
/** @format */
type ARKReasoningEffort = 'minimal' | 'low' | 'medium' | 'high';
interface ARKThinkingConfig {
type: 'enabled' | 'disabled';
}
interface ARKReasoningConfig {
effort: ARKReasoningEffort;
}
interface ARKChatRequest extends GPTChatRequest {
stream?: false;
thinking?: ARKThinkingConfig;
reasoning?: ARKReasoningConfig;
}
interface ARKChatRequestStream extends GPTChatStreamRequest {
stream: true;
thinking?: ARKThinkingConfig;
reasoning?: ARKReasoningConfig;
}
/** @format */
interface GEMChatRequest {
contents: GEMChatMessage[];
safetySettings?: SafetySetting[];
generationConfig?: GenerationConfig;
system_instruction?: GemSystemInstruction;
}
interface GEMChatMessage {
role: GEMChatRoleEnum;
parts: Part[];
}
interface GemSystemInstruction {
parts: Part;
}
interface GoogleEmbedRequest {
model: string;
content: {
parts: {
text: string;
}[];
};
output_dimensionality: number;
}
interface GoogleEmbedResponse {
embedding: {
values: number[];
};
}
interface Part {
text?: string;
inline_data?: InlineData;
}
interface InlineData {
mime_type: string;
data: string;
}
interface SafetySetting {
category: string;
threshold: string;
}
interface GenerationConfig {
stopSequences?: string[];
temperature?: number;
maxOutputTokens?: number;
topP?: number;
topK?: number;
}
interface GEMChatResponse {
candidates?: Candidate[];
promptFeedback?: Feedback;
}
interface Candidate {
content?: GEMChatMessage;
finishReason: string;
index: number;
safetyRatings: Rating[];
}
interface Feedback {
blockReason?: string;
safetyRatings: Rating[];
}
interface Rating {
category: string;
probability: string;
}
/** @format */
interface SystemMessage$2 {
role: 'system';
content: string;
name?: string;
}
interface UserMessage$2 {
role: 'user';
content: string;
name?: string;
}
interface AssistantMessage$2 {
role: 'assistant';
content: string | null;
name?: string;
prefix?: boolean;
reasoning_content?: string | null;
}
interface ToolMessage$2 {
role: 'tool';
content: string;
tool_call_id: string;
}
type DSChatMessage = SystemMessage$2 | UserMessage$2 | AssistantMessage$2 | ToolMessage$2;
interface Tool {
type: 'function';
function: object;
}
type ToolChoice$1 = 'none' | 'auto' | 'required';
interface DSChatRequest {
messages: DSChatMessage[];
model: DeepSeekChatModel;
frequency_penalty?: number;
max_tokens?: number;
presence_penalty?: number;
response_format?: 'text' | 'json_object';
stop?: string | string[];
stream?: boolean;
stream_options?: {
include_usage: boolean;
};
temperature?: number;
top_p?: number;
tools?: Tool[];
tool_choice?: ToolChoice$1;
logprobs?: boolean;
top_logprobs?: number;
}
interface ToolCall$1 {
id: string;
type: 'function';
function: {
name: string;
arguments: string;
};
}
interface Delta {
role: 'assistant';
content?: string | null;
reasoning_content?: string | null;
}
interface Message$1 {
role: 'assistant';
content: string | null;
reasoning_content?: string | null;
tool_calls?: ToolCall$1[];
}
interface LogProbContent {
token: string;
logprob: number;
bytes: number[] | null;
}
interface LogProbs {
content: LogProbContent[] | null;
top_logprobs: LogProbContent[];
}
interface Choice$2 {
finish_reason: 'stop' | 'length' | 'content_filter' | 'tool_calls' | 'insufficient_system_resource';
index: number;
message?: Message$1;
delta?: Delta;
logprobs?: LogProbs | null;
}
interface Usage$2 {
completion_tokens: number;
prompt_tokens: number;
prompt_cache_hit_tokens: number;
prompt_cache_miss_tokens: number;
total_tokens: number;
completion_tokens_details?: {
reasoning_tokens: number;
};
}
interface DSChatResponse {
id: string;
choices: Choice$2[];
created: number;
model: string;
system_fingerprint: string;
object: 'chat.completion';
usage?: Usage$2;
}
/** @format */
interface ImageUrlContent {
url: string;
detail?: 'high' | 'low';
}
interface MessageContentText {
type: 'text';
text: string;
}
interface MessageContentImage {
type: 'image_url';
image_url: ImageUrlContent;
}
type MessageContent = MessageContentText | MessageContentImage;
interface GrokChatMessage {
role: 'system' | 'user' | 'assistant';
content: MessageContent[] | string;
}
interface ResponseFormatText {
type: 'text';
}
interface ResponseFormatJson {
type: 'json_object';
}
interface ResponseFormatJsonSchema {
type: 'json_schema';
json_schema: Record<string, any>;
}
type ResponseFormat = ResponseFormatText | ResponseFormatJson | ResponseFormatJsonSchema;
interface ToolFunction {
type: 'function';
function: {
name: string;
};
}
interface GrokTool {
type: 'function';
function: Record<string, any>;
}
type GrokToolChoice = 'none' | 'auto' | 'required' | ToolFunction;
interface StreamOptions {
include_usage: boolean;
}
interface GrokChatRequest {
messages: GrokChatMessage[];
model: XAIChatModel;
deferred?: boolean | null;
frequency_penalty?: number | null;
logit_bias?: Record<string, number> | null;
logprobs?: boolean | null;
max_tokens?: number | null;
n?: number | null;
presence_penalty?: number | null;
response_format?: ResponseFormat | null;
seed?: number | null;
stop?: string[] | null;
stream?: boolean | null;
stream_options?: StreamOptions | null;
temperature?: number | null;
tool_choice?: GrokToolChoice | null;
tools?: GrokTool[] | null;
top_logprobs?: number | null;
top_p?: number | null;
user?: string | null;
}
interface GrokChatResponse {
id: string;
object: 'chat.completion';
created: number;
model: string;
choices: Array<{
index: number;
message: ChoiceMessage;
finish_reason: string;
}>;
usage: Usage$1;
system_fingerprint: string;
}
interface ChoiceDelta {
role?: 'assistant';
content?: string;
}
interface ChoiceMessage {
role: 'assistant';
content: string;
tool_calls?: Array<{
id: string;
function: {
name: string;
arguments: string;
};
type: string;
}>;
refusal: string | null;
}
interface Choice$1 {
index: number;
delta: ChoiceDelta;
}
interface PromptTokensDetails {
text_tokens: number;
audio_tokens: number;
image_tokens: number;
cached_tokens: number;
}
interface Usage$1 {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
prompt_tokens_details: PromptTokensDetails;
}
interface GrokChatResponseChunk {
id: string;
object: 'chat.completion.chunk';
created: number;
model: string;
choices: Choice$1[];
usage: Usage$1;
system_fingerprint: string;
}
/** @format */
interface GLMChatRequest {
model?: GLMChatModel;
messages: GLMChatMessage[];
stream?: boolean;
temperature?: number;
top_p?: number;
max_tokens?: number;
request_id?: string;
do_sample?: boolean;
stop?: string[];
tools?: GLMTool[];
tool_choice?: GLMToolChoice;
}
type GLMToolChoice = 'auto';
interface GLMEmbedRequest {
model: GLMEmbedModel;
input: string | string[];
dimensions?: number;
}
interface GLMEmbedResponse {
model: string;
data: {
index: number;
object: string;
embedding: number[];
}[];
index: number;
object: string;
usage: {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
};
}
interface SystemMessage$1 {
role: 'system';
content: string;
}
interface UserMessage$1 {
role: 'user';
content: string | Array<ChatCompletionContentPart>;
}
interface AssistantMessage$1 {
role: 'assistant';
content?: string;
tool_calls?: ToolCall[];
}
interface ToolCall {
id: string;
type: 'web_search' | 'retrieval' | 'function';
function?: FunctionToolCall;
}
interface FunctionToolCall {
name: string;
arguments: string;
}
interface ToolMessage$1 {
role: 'tool';
content: string;
tool_call_id: string;
}
type GLMChatMessage = SystemMessage$1 | UserMessage$1 | AssistantMessage$1 | ToolMessage$1;
interface GLMTool {
type: 'function' | 'retrieval' | 'web_search';
function?: FunctionTool$1;
retrieval?: RetrievalTool;
web_search?: WebSearchTool$1;
}
interface FunctionTool$1 {
name: string;
description: string;
parameters?: {
type: string;
properties: object;
required?: string[];
};
}
interface RetrievalTool {
knowledge_id: string;
prompt_template?: string;
}
interface WebSearchTool$1 {
enable?: boolean;
search_query?: string;
search_result?: boolean;
}
interface GLMChatResponse {
id: string;
model: string;
object?: string;
created: number;
choices: Choice[];
usage: Usage;
web_search?: WebSearch[];
}
interface Choice {
index: number;
finish_reason: 'stop' | 'tool_calls' | 'length' | 'sensitive' | 'network_error';
delta?: Message;
message?: Message;
}
interface Message {
role: 'assistant';
content: string | null;
tool_calls?: object[];
}
interface Usage {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
}
interface WebSearch {
icon: string;
title: string;
link: string;
media: string;
content: string;
}
/** @format */
interface SPKImagineRequest {
header: {
app_id: string;
uid?: string;
};
parameter: {
chat: {
domain: string;
width: number;
height: number;
};
};
payload: {
message: {
text: {
role: string;
content: string;
}[];
};
};
}
interface SPKImagineResponse {
header: {
code: number;
message: string;
sid: string;
status: number;
};
payload?: {
choices: {
status: number;
seq: number;
text: [
{
content: string;
index: number;
role: string;
}
];
};
};
}
interface SPKTool {
type: 'function' | 'web_search';
function?: FunctionTool;
web_search?: WebSearchTool;
}
interface FunctionTool {
name: string;
description?: string;
parameters?: object;
}
interface WebSearchTool {
enable?: boolean;
show_ref_label?: boolean;
}
interface SystemMessage {
role: 'system';
content: string;
}
interface UserMessage {
role: 'user';
content: string;
}
interface AssistantMessage {
role: 'assistant';
content: string;
}
interface ToolMessage {
role: 'tool';
content: string;
}
type SPKChatMessage = SystemMessage | UserMessage | AssistantMessage | ToolMessage;
/**
* Request Parameters
*/
interface SparkChatRequest {
model: IFlyTekChatModel;
user?: string;
messages: SPKChatMessage[];
temperature?: number;
top_k?: number;
top_p?: number;
presence_penalty?: number;
frequency_penalty?: number;
stream?: boolean;
max_tokens?: number;
response_format?: {
type: 'text' | 'json_object';
};
tools?: SPKTool[];
tool_choice?: SPKToolChoice;
suppress_plugin?: string[];
}
type SPKToolChoice = 'auto' | 'none' | 'required';
/**
* Non-Streaming Response
*/
interface SparkChatResponse {
code: number;
message: string;
sid: string;
choices: Array<{
message?: {
role: 'assistant';
content: string | null;
tool_calls?: object[];
};
delta?: {
role: 'assistant';
content: string | null;
tool_calls?: object[];
};
index: number;
finish_reason: string;
}>;
usage?: {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
};
}
/** @format */
interface BaiduAccessTokenRequest {
grant_type: string;
client_id: string;
client_secret: string;
}
interface BaiduAccessTokenResponse {
access_token: string;
expires_in: number;
error?: string;
error_description?: string;
session_key?: string;
refresh_token?: string;
scope?: string;
session_secret?: string;
}
interface BaiduChatMessage {
role: 'user' | 'assistant' | 'function';
content: string;
name?: string;
function_call?: FunctionCall;
}
interface FunctionCall {
name: string;
arguments: string;
thoughts?: string;
}
interface BaiduChatRequest {
messages: BaiduChatMessage[];
functions?: Function[];
temperature?: number;
top_p?: number;
penalty_score?: number;
stream?: boolean;
system?: string;
stop?: string[];
disable_search?: boolean;
enable_citation?: boolean;
max_output_tokens?: number;
user_id?: string;
tool_choice?: ToolChoice;
}
interface Function {
name: string;
description: string;
parameters: object;
responses?: object;
examples?: Example[];
}
interface Example {
role: 'user' | 'assistant' | 'function';
content: string;
name?: string;
function_call?: FunctionCall;
}
interface ToolChoice {
type: 'function';
function: Function;
name: string;
}
interface BaiduChatResponse {
id: string;
object: string;
created: number;
sentence_id?: number;
is_end?: boolean;
is_truncated: boolean;
finish_reason: 'normal' | 'stop' | 'length' | 'content_filter' | 'function_call';
search_info?: SearchInfo;
result: string;
need_clear_history: boolean;
ban_round?: number;
usage?: {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
plugins: PluginUsage[];
};
function_call?: FunctionCall;
error_code?: number;
error_msg?: string;
}
interface SearchInfo {
search_results: SearchResult[];
}
interface SearchResult {
index: number;
url: string;
title: string;
}
interface PluginUsage {
name: string;
parse_tokens: number;
abstract_tokens: number;
search_tokens: number;
total_tokens: number;
}
/** @format */
interface MJImagineRequest {
prompt: string;
base64Array?: string[];
notifyHook?: string;
state?: string;
}
interface MJChangeRequest {
taskId: string;
action: MJTaskType;
index?: number;
notifyHook?: string;
state?: string;
}
interface MJImagineResponse {
code: number;
description: string;
result: string;
properties: {
discordInstanceId: string;
};
}
interface MJTaskResponse {
id: string;
properties?: {
notifyHook: string;
discordInstanceId?: string;
flags: number;
messageId: string;
messageHash: string;
nonce: string;
finalPrompt: string;
progressMessageId: string;
};
action: MJTaskType;
status: string;
prompt: string;
promptEn: string;
description: string;
state: string;
submitTime: number;
startTime: number;
finishTime: number;
imageUrl: string | null;
progress: string;
failReason: string | null;
}
/** @format */
interface StabilityImagineRequest {
height: number;
width: number;
text_prompts: Array<{
text: string;
weight: number;
}>;
cfg_scale?: number;
clip_guidance_preset?: 'FAST_BLUE' | 'FAST_GREEN' | 'NONE' | 'SIMPLE' | 'SLOW' | 'SLOWER' | 'SLOWEST';
sampler?: 'DDIM' | 'DDPM' | 'K_DPMPP_2M' | 'K_DPMPP_2S_ANCESTRAL' | 'K_DPM_2' | 'K_DPM_2_ANCESTRAL' | 'K_EULER' | 'K_EULER_ANCESTRAL' | 'K_HEUN' | 'K_LMS';
samples?: number;
seed?: number;
steps?: number;
style_preset?: '3d-model' | 'analog-film' | 'anime' | 'cinematic' | 'comic-book' | 'digital-art' | 'enhance' | 'fantasy-art' | 'isometric' | 'line-art' | 'low-poly' | 'modeling-compound' | 'neon-punk' | 'origami' | 'photographic' | 'pixel-art' | 'tile-texture';
extras?: object;
}
interface StabilityImagineResponse {
artifacts: [
{
base64: string;
finishReason: 'CONTENT_FILTERED' | 'ERROR' | 'SUCCESS';
seed: number;
}
];
}
declare class UniAI {
config: UniAIConfig | null;
models: ModelList;
embedModels: ModelList;
chatModels: ModelList;
imgModels: ModelList;
private openai;
private anthropic;
private deepseek;
private google;
private glm;
private fly;
private baidu;
private other;
private moon;
private ali;
private mj;
private xai;
private ark;
private stability;
constructor(config?: UniAIConfig);
chat(messages?: ChatMessage[] | string, option?: ChatOption): Promise<ChatResponse | stream.Readable>;
embedding(content: string | string[], option?: EmbedOption): Promise<EmbeddingResponse>;
imagine(prompt: string, option?: ImagineOption): Promise<ImagineResponse>;
task(provider: ImagineModelProvider, id?: string): Promise<TaskResponse[]>;
change(provider: ImagineModelProvider, taskId: string, action: string, index?: number): Promise<ImagineResponse>;
}
/** @format */
interface IAST {
type: string;
depth?: number;
value?: string;
children?: IAST[];
}
/** @format */
/** @format
* Represents a node in a recursive Markdown tree structure,
* containing a title, content, and any number of sub-sections (children).
*/
declare class Prompt {
title: string;
content: string;
children: Prompt[];
/**
* Constructs a new Prompt instance.
* @param title - The heading/title of the node.
* @param content - The content/body text of the node. Defaults to an empty string.
* @param children - An array of Prompt children. Defaults to an empty array.
*/
constructor(title: string, content?: string, children?: Prompt[]);
/**
* Recursively finds all descendant nodes (including direct children) whose title matches the given string.
* @param title - The title to search for.
* @param deep - Whether to search recursively in all descendants. Defaults to false (only immediate children).
* @returns An array of all matching Prompt nodes.
*/
getByTitle(title: string, deep?: boolean): Prompt[];
/**
* Adds a child prompt node to the children of this node.
* Supports chainable calls.
* @param ch