uniai
Version:
To unify AI models!
1,563 lines (1,534 loc) • 47.3 kB
text/typescript
import { ChatCompletion, ChatCompletionChunk, EmbeddingCreateParams, CreateEmbeddingResponse, ChatCompletionCreateParamsNonStreaming, ChatCompletionCreateParamsStreaming, ImageGenerateParams, ImagesResponse, ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam, ChatCompletionAssistantMessageParam, ChatCompletionToolMessageParam, ChatCompletionDeveloperMessageParam, ChatCompletionContentPart } from 'openai/resources';
import * as stream from 'stream';
/** @format */
declare enum ChatModelProvider {
OpenAI = "openai",
Anthropic = "anthropic",
DeepSeek = "deepseek",
IFlyTek = "iflytek",
Baidu = "baidu",
Google = "google",
GLM = "glm",
MoonShot = "moonshot",
AliYun = "aliyun",
XAI = "xai",
Other = "other"
}
declare enum EmbedModelProvider {
OpenAI = "openai",
Google = "google",
GLM = "glm",
AliYun = "aliyun",
Other = "other"
}
declare enum ImagineModelProvider {
OpenAI = "openai",
MidJourney = "midjourney",
StabilityAI = "stability.ai",
IFlyTek = "iflytek"
}
type ModelProvider = ChatModelProvider | EmbedModelProvider | ImagineModelProvider;
declare const ModelProvider: {
OpenAI: ImagineModelProvider.OpenAI;
MidJourney: ImagineModelProvider.MidJourney;
StabilityAI: ImagineModelProvider.StabilityAI;
IFlyTek: ImagineModelProvider.IFlyTek;
Google: EmbedModelProvider.Google;
GLM: EmbedModelProvider.GLM;
AliYun: EmbedModelProvider.AliYun;
Other: EmbedModelProvider.Other;
Anthropic: ChatModelProvider.Anthropic;
DeepSeek: ChatModelProvider.DeepSeek;
Baidu: ChatModelProvider.Baidu;
MoonShot: ChatModelProvider.MoonShot;
XAI: ChatModelProvider.XAI;
};
declare enum OpenAIEmbedModel {
ADA = "text-embedding-ada-002",
LARGE = "text-embedding-3-large",
SMALL = "text-embedding-3-small"
}
declare enum OtherEmbedModel {
BGE_M3 = "bge-m3",
BASE_CHN = "text2vec-base-chinese",
LARGE_CHN = "text2vec-large-chinese",
BASE_CHN_PARAPH = "text2vec-base-chinese-paraphrase",
BASE_CHN_SENTENCE = "text2vec-base-chinese-sentence",
BASE_MUL = "text2vec-base-multilingual",
PARAPH_MUL_MINI = "paraphrase-multilingual-MiniLM-L12-v2"
}
declare enum GLMEmbedModel {
EMBED_2 = "embedding-2",
EMBED_3 = "embedding-3"
}
declare enum GoogleEmbedModel {
GEM_EMBED = "gemini-embedding-exp"
}
declare enum AliEmbedModel {
ALI_V3 = "text-embedding-v3",
ALI_V2 = "text-embedding-v2",
ALI_V1 = "text-embedding-v1",
ALI_ASYNC_V2 = "text-embedding-async-v2",
ALI_ASYNC_V1 = "text-embedding-async-v1"
}
type EmbedModel = OpenAIEmbedModel | OtherEmbedModel | GLMEmbedModel | GoogleEmbedModel | AliEmbedModel;
declare const EmbedModel: {
ALI_V3: AliEmbedModel.ALI_V3;
ALI_V2: AliEmbedModel.ALI_V2;
ALI_V1: AliEmbedModel.ALI_V1;
ALI_ASYNC_V2: AliEmbedModel.ALI_ASYNC_V2;
ALI_ASYNC_V1: AliEmbedModel.ALI_ASYNC_V1;
GEM_EMBED: GoogleEmbedModel.GEM_EMBED;
EMBED_2: GLMEmbedModel.EMBED_2;
EMBED_3: GLMEmbedModel.EMBED_3;
BGE_M3: OtherEmbedModel.BGE_M3;
BASE_CHN: OtherEmbedModel.BASE_CHN;
LARGE_CHN: OtherEmbedModel.LARGE_CHN;
BASE_CHN_PARAPH: OtherEmbedModel.BASE_CHN_PARAPH;
BASE_CHN_SENTENCE: OtherEmbedModel.BASE_CHN_SENTENCE;
BASE_MUL: OtherEmbedModel.BASE_MUL;
PARAPH_MUL_MINI: OtherEmbedModel.PARAPH_MUL_MINI;
ADA: OpenAIEmbedModel.ADA;
LARGE: OpenAIEmbedModel.LARGE;
SMALL: OpenAIEmbedModel.SMALL;
};
declare enum OpenAIChatModel {
GPT3 = "gpt-3.5-turbo",
GPT4 = "gpt-4",
GPT4_TURBO = "gpt-4-turbo",
GPT_4O_MINI = "gpt-4o-mini",
GPT_4_1_MINI = "gpt-4.1-mini",
GPT_4_1_NANO = "gpt-4.1-nano",
GPT_4_1 = "gpt-4.1",
CHAT_GPT_4O = "chatgpt-4o-latest",
GPT_4O = "gpt-4o",
GPT_4O_AUDIO = "gpt-4o-audio-preview",
O1 = "o1",
O1_MINI = "o1-mini",
O1_PRO = "o1-pro",
O3_MINI = "o3-mini"
}
declare enum AnthropicChatModel {
CLAUDE_4_SONNET = "claude-sonnet-4-20250514",
CLAUDE_4_OPUS = "claude-opus-4-20250514",
CLAUDE_3_7_SONNET = "claude-3-7-sonnet-20250219",
CLAUDE_3_5_SONNET = "claude-3-5-sonnet-20241022",
CLAUDE_3_5_HAIKU = "claude-3-5-haiku-20241022",
CLAUDE_3_OPUS = "claude-3-opus-20240229",
CLAUDE_3_SONNET = "claude-3-sonnet-20240229",
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
}
declare enum DeepSeekChatModel {
DEEPSEEK_V3 = "deepseek-chat",
DEEPSEEK_R1 = "deepseek-reasoner"
}
declare enum GoogleChatModel {
GEM_PRO_1_5 = "gemini-1.5-pro",
GEM_FLASH_1_5 = "gemini-1.5-flash",
GEM_FLASH_1_5_8B = "gemini-1.5-flash-8b",
GEM_FLASH_2 = "gemini-2.0-flash",
GEM_FLASH_2_LITE = "gemini-2.0-flash-lite",
GEM_PRO_2_5 = "gemini-2.5-pro",
GEM_FLASH_2_5 = "gemini-2.5-flash",
GEM_FLASH_2_5_LITE = "gemini-2.5-flash-lite"
}
declare enum GLMChatModel {
GLM_3_TURBO = "glm-3-turbo",
GLM_4 = "glm-4",
GLM_4_AIR = "glm-4-air",
GLM_4_AIRX = "glm-4-airx",
GLM_4_FLASH = "glm-4-flash",
GLM_4_FLASHX = "glm-4-flashx",
GLM_4V = "glm-4v",
GLM_4V_PLUS = "glm-4v-plus",
GLM_4_LONG = "glm-4-long",
GLM_4_PLUS = "glm-4-plus"
}
declare enum BaiduChatModel {
ERNIE_3_5 = "completions",// 'ernie-3.5-8k', // ERNIE 3.5 8K
ERNIE_3_5_PRE = "ernie-3.5-8k-preview",// ERNIE 3.5 8K Preview
ERNIE_3_5_128K = "ernie-3.5-128k",// ERNIE 3.5 128K
ERNIE_4_0_LATEST = "ernie-4.0-8k-latest",// ERNIE 4.0 8K Latest
ERNIE_4_0_PREVIEW = "ernie-4.0-8k-preview",// ERNIE 4.0 8K Preview
ERNIE_4_0_8K = "completions_pro",// 'ernie-4.0-8k', // ERNIE 4.0 8K
ERNIE_4_0_TURBO_LATEST = "ernie-4.0-turbo-8k-latest",// ERNIE 4.0 Turbo 8K Latest
ERNIE_4_0_TURBO_PREVIEW = "ernie-4.0-turbo-8k-preview",// ERNIE 4.0 Turbo 8K Preview
ERNIE_4_0_TURBO_8K = "ernie-4.0-turbo-8k",// ERNIE 4.0 Turbo 8K
ERNIE_4_0_TURBO_128K = "ernie-4.0-turbo-128k",// ERNIE 4.0 Turbo 128K
ERNIE_SPEED_8K = "ernie_speed",// ERNIE Speed 8K
ERNIE_SPEED_128K = "ernie-speed-128k",// ERNIE Speed 128K
ERNIE_SPEED_PRO_128K = "ernie-speed-pro-128k",// ERNIE Speed Pro 128K
ERNIE_LITE_8K = "ernie-lite-8k",// ERNIE Lite 8K
ERNIE_LITE_PRO_128K = "ernie-lite-pro-128k",// ERNIE Lite Pro 128K
ERNIE_TINY_8K = "ernie-tiny-8k",// ERNIE Tiny 8K
ERNIE_CHAR_8K = "ernie-char-8k",// ERNIE Character 8K
ERNIE_CHAR_FICTION_8K = "ernie-char-fiction-8k",// ERNIE Character Fiction 8K
ERNIE_NOVEL_8K = "ernie-novel-8k"
}
declare enum IFlyTekChatModel {
SPARK_LITE = "lite",
SPARK_PRO = "generalv3",
SPARK_PRO_128K = "pro-128k",
SPARK_MAX = "generalv3.5",
SPARK_MAX_32K = "max-32k",
SPARK_ULTRA = "4.0Ultra"
}
declare enum MoonShotChatModel {
MOON_V1_8K = "moonshot-v1-8k",
MOON_V1_32K = "moonshot-v1-32k",
MOON_V1_128K = "moonshot-v1-128k"
}
declare enum AliChatModel {
QWEN_MAX = "qwen-max",
QWEN_PLUS = "qwen-plus",
QWEN_TURBO = "qwen-turbo",
QWEN_LONG = "qwen-long",
QWEN_CODE = "qwen-coder-turbo",
QWEN_MATH = "qwen-math-plus",
QWEN_VL_MAX = "qwen-vl-max",
QWEN_VL_PLUS = "qwen-vl-plus"
}
declare enum XAIChatModel {
GROK2 = "grok-2",
GROK2_VISION = "grok-2-vision",
GROK3 = "grok-3",
GROK3_VISION = "grok-3-vision"
}
type ChatModel = OpenAIChatModel | AnthropicChatModel | DeepSeekChatModel | BaiduChatModel | GLMChatModel | IFlyTekChatModel | GoogleChatModel | MoonShotChatModel | AliChatModel | XAIChatModel | string;
declare const ChatModel: {
GROK2: XAIChatModel.GROK2;
GROK2_VISION: XAIChatModel.GROK2_VISION;
GROK3: XAIChatModel.GROK3;
GROK3_VISION: XAIChatModel.GROK3_VISION;
QWEN_MAX: AliChatModel.QWEN_MAX;
QWEN_PLUS: AliChatModel.QWEN_PLUS;
QWEN_TURBO: AliChatModel.QWEN_TURBO;
QWEN_LONG: AliChatModel.QWEN_LONG;
QWEN_CODE: AliChatModel.QWEN_CODE;
QWEN_MATH: AliChatModel.QWEN_MATH;
QWEN_VL_MAX: AliChatModel.QWEN_VL_MAX;
QWEN_VL_PLUS: AliChatModel.QWEN_VL_PLUS;
MOON_V1_8K: MoonShotChatModel.MOON_V1_8K;
MOON_V1_32K: MoonShotChatModel.MOON_V1_32K;
MOON_V1_128K: MoonShotChatModel.MOON_V1_128K;
GPT3: OpenAIChatModel.GPT3;
GPT4: OpenAIChatModel.GPT4;
GPT4_TURBO: OpenAIChatModel.GPT4_TURBO;
GPT_4O_MINI: OpenAIChatModel.GPT_4O_MINI;
GPT_4_1_MINI: OpenAIChatModel.GPT_4_1_MINI;
GPT_4_1_NANO: OpenAIChatModel.GPT_4_1_NANO;
GPT_4_1: OpenAIChatModel.GPT_4_1;
CHAT_GPT_4O: OpenAIChatModel.CHAT_GPT_4O;
GPT_4O: OpenAIChatModel.GPT_4O;
GPT_4O_AUDIO: OpenAIChatModel.GPT_4O_AUDIO;
O1: OpenAIChatModel.O1;
O1_MINI: OpenAIChatModel.O1_MINI;
O1_PRO: OpenAIChatModel.O1_PRO;
O3_MINI: OpenAIChatModel.O3_MINI;
GEM_PRO_1_5: GoogleChatModel.GEM_PRO_1_5;
GEM_FLASH_1_5: GoogleChatModel.GEM_FLASH_1_5;
GEM_FLASH_1_5_8B: GoogleChatModel.GEM_FLASH_1_5_8B;
GEM_FLASH_2: GoogleChatModel.GEM_FLASH_2;
GEM_FLASH_2_LITE: GoogleChatModel.GEM_FLASH_2_LITE;
GEM_PRO_2_5: GoogleChatModel.GEM_PRO_2_5;
GEM_FLASH_2_5: GoogleChatModel.GEM_FLASH_2_5;
GEM_FLASH_2_5_LITE: GoogleChatModel.GEM_FLASH_2_5_LITE;
SPARK_LITE: IFlyTekChatModel.SPARK_LITE;
SPARK_PRO: IFlyTekChatModel.SPARK_PRO;
SPARK_PRO_128K: IFlyTekChatModel.SPARK_PRO_128K;
SPARK_MAX: IFlyTekChatModel.SPARK_MAX;
SPARK_MAX_32K: IFlyTekChatModel.SPARK_MAX_32K;
SPARK_ULTRA: IFlyTekChatModel.SPARK_ULTRA;
GLM_3_TURBO: GLMChatModel.GLM_3_TURBO;
GLM_4: GLMChatModel.GLM_4;
GLM_4_AIR: GLMChatModel.GLM_4_AIR;
GLM_4_AIRX: GLMChatModel.GLM_4_AIRX;
GLM_4_FLASH: GLMChatModel.GLM_4_FLASH;
GLM_4_FLASHX: GLMChatModel.GLM_4_FLASHX;
GLM_4V: GLMChatModel.GLM_4V;
GLM_4V_PLUS: GLMChatModel.GLM_4V_PLUS;
GLM_4_LONG: GLMChatModel.GLM_4_LONG;
GLM_4_PLUS: GLMChatModel.GLM_4_PLUS;
ERNIE_3_5: BaiduChatModel.ERNIE_3_5;
ERNIE_3_5_PRE: BaiduChatModel.ERNIE_3_5_PRE;
ERNIE_3_5_128K: BaiduChatModel.ERNIE_3_5_128K;
ERNIE_4_0_LATEST: BaiduChatModel.ERNIE_4_0_LATEST;
ERNIE_4_0_PREVIEW: BaiduChatModel.ERNIE_4_0_PREVIEW;
ERNIE_4_0_8K: BaiduChatModel.ERNIE_4_0_8K;
ERNIE_4_0_TURBO_LATEST: BaiduChatModel.ERNIE_4_0_TURBO_LATEST;
ERNIE_4_0_TURBO_PREVIEW: BaiduChatModel.ERNIE_4_0_TURBO_PREVIEW;
ERNIE_4_0_TURBO_8K: BaiduChatModel.ERNIE_4_0_TURBO_8K;
ERNIE_4_0_TURBO_128K: BaiduChatModel.ERNIE_4_0_TURBO_128K;
ERNIE_SPEED_8K: BaiduChatModel.ERNIE_SPEED_8K;
ERNIE_SPEED_128K: BaiduChatModel.ERNIE_SPEED_128K;
ERNIE_SPEED_PRO_128K: BaiduChatModel.ERNIE_SPEED_PRO_128K;
ERNIE_LITE_8K: BaiduChatModel.ERNIE_LITE_8K;
ERNIE_LITE_PRO_128K: BaiduChatModel.ERNIE_LITE_PRO_128K;
ERNIE_TINY_8K: BaiduChatModel.ERNIE_TINY_8K;
ERNIE_CHAR_8K: BaiduChatModel.ERNIE_CHAR_8K;
ERNIE_CHAR_FICTION_8K: BaiduChatModel.ERNIE_CHAR_FICTION_8K;
ERNIE_NOVEL_8K: BaiduChatModel.ERNIE_NOVEL_8K;
DEEPSEEK_V3: DeepSeekChatModel.DEEPSEEK_V3;
DEEPSEEK_R1: DeepSeekChatModel.DEEPSEEK_R1;
CLAUDE_4_SONNET: AnthropicChatModel.CLAUDE_4_SONNET;
CLAUDE_4_OPUS: AnthropicChatModel.CLAUDE_4_OPUS;
CLAUDE_3_7_SONNET: AnthropicChatModel.CLAUDE_3_7_SONNET;
CLAUDE_3_5_SONNET: AnthropicChatModel.CLAUDE_3_5_SONNET;
CLAUDE_3_5_HAIKU: AnthropicChatModel.CLAUDE_3_5_HAIKU;
CLAUDE_3_OPUS: AnthropicChatModel.CLAUDE_3_OPUS;
CLAUDE_3_SONNET: AnthropicChatModel.CLAUDE_3_SONNET;
CLAUDE_3_HAIKU: AnthropicChatModel.CLAUDE_3_HAIKU;
};
declare enum MidJourneyImagineModel {
MJ = "midjourney"
}
declare enum OpenAIImagineModel {
DALL_E_2 = "dall-e-2",
DALL_E_3 = "dall-e-3"
}
declare enum StabilityAIImagineModel {
SD_1_6 = "stable-diffusion-v1-6",
SD_XL_1024 = "stable-diffusion-xl-1024-v1-0"
}
declare enum IFlyTekImagineModel {
V2 = "v2.1"
}
declare const ImagineModel: {
V2: IFlyTekImagineModel.V2;
SD_1_6: StabilityAIImagineModel.SD_1_6;
SD_XL_1024: StabilityAIImagineModel.SD_XL_1024;
MJ: MidJourneyImagineModel.MJ;
DALL_E_2: OpenAIImagineModel.DALL_E_2;
DALL_E_3: OpenAIImagineModel.DALL_E_3;
};
type ImagineModel = OpenAIImagineModel | MidJourneyImagineModel | StabilityAIImagineModel | IFlyTekImagineModel;
type ModelModel = ChatModel | ImagineModel | EmbedModel;
declare const ModelModel: {
ALI_V3: AliEmbedModel.ALI_V3;
ALI_V2: AliEmbedModel.ALI_V2;
ALI_V1: AliEmbedModel.ALI_V1;
ALI_ASYNC_V2: AliEmbedModel.ALI_ASYNC_V2;
ALI_ASYNC_V1: AliEmbedModel.ALI_ASYNC_V1;
GEM_EMBED: GoogleEmbedModel.GEM_EMBED;
EMBED_2: GLMEmbedModel.EMBED_2;
EMBED_3: GLMEmbedModel.EMBED_3;
BGE_M3: OtherEmbedModel.BGE_M3;
BASE_CHN: OtherEmbedModel.BASE_CHN;
LARGE_CHN: OtherEmbedModel.LARGE_CHN;
BASE_CHN_PARAPH: OtherEmbedModel.BASE_CHN_PARAPH;
BASE_CHN_SENTENCE: OtherEmbedModel.BASE_CHN_SENTENCE;
BASE_MUL: OtherEmbedModel.BASE_MUL;
PARAPH_MUL_MINI: OtherEmbedModel.PARAPH_MUL_MINI;
ADA: OpenAIEmbedModel.ADA;
LARGE: OpenAIEmbedModel.LARGE;
SMALL: OpenAIEmbedModel.SMALL;
V2: IFlyTekImagineModel.V2;
SD_1_6: StabilityAIImagineModel.SD_1_6;
SD_XL_1024: StabilityAIImagineModel.SD_XL_1024;
MJ: MidJourneyImagineModel.MJ;
DALL_E_2: OpenAIImagineModel.DALL_E_2;
DALL_E_3: OpenAIImagineModel.DALL_E_3;
GROK2: XAIChatModel.GROK2;
GROK2_VISION: XAIChatModel.GROK2_VISION;
GROK3: XAIChatModel.GROK3;
GROK3_VISION: XAIChatModel.GROK3_VISION;
QWEN_MAX: AliChatModel.QWEN_MAX;
QWEN_PLUS: AliChatModel.QWEN_PLUS;
QWEN_TURBO: AliChatModel.QWEN_TURBO;
QWEN_LONG: AliChatModel.QWEN_LONG;
QWEN_CODE: AliChatModel.QWEN_CODE;
QWEN_MATH: AliChatModel.QWEN_MATH;
QWEN_VL_MAX: AliChatModel.QWEN_VL_MAX;
QWEN_VL_PLUS: AliChatModel.QWEN_VL_PLUS;
MOON_V1_8K: MoonShotChatModel.MOON_V1_8K;
MOON_V1_32K: MoonShotChatModel.MOON_V1_32K;
MOON_V1_128K: MoonShotChatModel.MOON_V1_128K;
GPT3: OpenAIChatModel.GPT3;
GPT4: OpenAIChatModel.GPT4;
GPT4_TURBO: OpenAIChatModel.GPT4_TURBO;
GPT_4O_MINI: OpenAIChatModel.GPT_4O_MINI;
GPT_4_1_MINI: OpenAIChatModel.GPT_4_1_MINI;
GPT_4_1_NANO: OpenAIChatModel.GPT_4_1_NANO;
GPT_4_1: OpenAIChatModel.GPT_4_1;
CHAT_GPT_4O: OpenAIChatModel.CHAT_GPT_4O;
GPT_4O: OpenAIChatModel.GPT_4O;
GPT_4O_AUDIO: OpenAIChatModel.GPT_4O_AUDIO;
O1: OpenAIChatModel.O1;
O1_MINI: OpenAIChatModel.O1_MINI;
O1_PRO: OpenAIChatModel.O1_PRO;
O3_MINI: OpenAIChatModel.O3_MINI;
GEM_PRO_1_5: GoogleChatModel.GEM_PRO_1_5;
GEM_FLASH_1_5: GoogleChatModel.GEM_FLASH_1_5;
GEM_FLASH_1_5_8B: GoogleChatModel.GEM_FLASH_1_5_8B;
GEM_FLASH_2: GoogleChatModel.GEM_FLASH_2;
GEM_FLASH_2_LITE: GoogleChatModel.GEM_FLASH_2_LITE;
GEM_PRO_2_5: GoogleChatModel.GEM_PRO_2_5;
GEM_FLASH_2_5: GoogleChatModel.GEM_FLASH_2_5;
GEM_FLASH_2_5_LITE: GoogleChatModel.GEM_FLASH_2_5_LITE;
SPARK_LITE: IFlyTekChatModel.SPARK_LITE;
SPARK_PRO: IFlyTekChatModel.SPARK_PRO;
SPARK_PRO_128K: IFlyTekChatModel.SPARK_PRO_128K;
SPARK_MAX: IFlyTekChatModel.SPARK_MAX;
SPARK_MAX_32K: IFlyTekChatModel.SPARK_MAX_32K;
SPARK_ULTRA: IFlyTekChatModel.SPARK_ULTRA;
GLM_3_TURBO: GLMChatModel.GLM_3_TURBO;
GLM_4: GLMChatModel.GLM_4;
GLM_4_AIR: GLMChatModel.GLM_4_AIR;
GLM_4_AIRX: GLMChatModel.GLM_4_AIRX;
GLM_4_FLASH: GLMChatModel.GLM_4_FLASH;
GLM_4_FLASHX: GLMChatModel.GLM_4_FLASHX;
GLM_4V: GLMChatModel.GLM_4V;
GLM_4V_PLUS: GLMChatModel.GLM_4V_PLUS;
GLM_4_LONG: GLMChatModel.GLM_4_LONG;
GLM_4_PLUS: GLMChatModel.GLM_4_PLUS;
ERNIE_3_5: BaiduChatModel.ERNIE_3_5;
ERNIE_3_5_PRE: BaiduChatModel.ERNIE_3_5_PRE;
ERNIE_3_5_128K: BaiduChatModel.ERNIE_3_5_128K;
ERNIE_4_0_LATEST: BaiduChatModel.ERNIE_4_0_LATEST;
ERNIE_4_0_PREVIEW: BaiduChatModel.ERNIE_4_0_PREVIEW;
ERNIE_4_0_8K: BaiduChatModel.ERNIE_4_0_8K;
ERNIE_4_0_TURBO_LATEST: BaiduChatModel.ERNIE_4_0_TURBO_LATEST;
ERNIE_4_0_TURBO_PREVIEW: BaiduChatModel.ERNIE_4_0_TURBO_PREVIEW;
ERNIE_4_0_TURBO_8K: BaiduChatModel.ERNIE_4_0_TURBO_8K;
ERNIE_4_0_TURBO_128K: BaiduChatModel.ERNIE_4_0_TURBO_128K;
ERNIE_SPEED_8K: BaiduChatModel.ERNIE_SPEED_8K;
ERNIE_SPEED_128K: BaiduChatModel.ERNIE_SPEED_128K;
ERNIE_SPEED_PRO_128K: BaiduChatModel.ERNIE_SPEED_PRO_128K;
ERNIE_LITE_8K: BaiduChatModel.ERNIE_LITE_8K;
ERNIE_LITE_PRO_128K: BaiduChatModel.ERNIE_LITE_PRO_128K;
ERNIE_TINY_8K: BaiduChatModel.ERNIE_TINY_8K;
ERNIE_CHAR_8K: BaiduChatModel.ERNIE_CHAR_8K;
ERNIE_CHAR_FICTION_8K: BaiduChatModel.ERNIE_CHAR_FICTION_8K;
ERNIE_NOVEL_8K: BaiduChatModel.ERNIE_NOVEL_8K;
DEEPSEEK_V3: DeepSeekChatModel.DEEPSEEK_V3;
DEEPSEEK_R1: DeepSeekChatModel.DEEPSEEK_R1;
CLAUDE_4_SONNET: AnthropicChatModel.CLAUDE_4_SONNET;
CLAUDE_4_OPUS: AnthropicChatModel.CLAUDE_4_OPUS;
CLAUDE_3_7_SONNET: AnthropicChatModel.CLAUDE_3_7_SONNET;
CLAUDE_3_5_SONNET: AnthropicChatModel.CLAUDE_3_5_SONNET;
CLAUDE_3_5_HAIKU: AnthropicChatModel.CLAUDE_3_5_HAIKU;
CLAUDE_3_OPUS: AnthropicChatModel.CLAUDE_3_OPUS;
CLAUDE_3_SONNET: AnthropicChatModel.CLAUDE_3_SONNET;
CLAUDE_3_HAIKU: AnthropicChatModel.CLAUDE_3_HAIKU;
};
declare enum MJTaskType {
IMAGINE = "IMAGINE",
UPSCALE = "UPSCALE",
VARIATION = "VARIATION",
REROLL = "REROLL",
DESCRIBE = "DESCRIBE",
BLEND = "BLEND"
}
declare enum DETaskType {
GENERATION = "generations",
EDIT = "edits",
VARIATION = "variation"
}
declare enum SDTaskType {
GENERATION = "generation"
}
declare enum SPKTaskType {
GENERATION = "generation"
}
declare const ImgTaskType: {
GENERATION: SPKTaskType.GENERATION;
EDIT: DETaskType.EDIT;
VARIATION: DETaskType.VARIATION;
IMAGINE: MJTaskType.IMAGINE;
UPSCALE: MJTaskType.UPSCALE;
REROLL: MJTaskType.REROLL;
DESCRIBE: MJTaskType.DESCRIBE;
BLEND: MJTaskType.BLEND;
};
type ImgTaskType = MJTaskType | DETaskType | SDTaskType | SPKTaskType;
declare enum ChatRoleEnum {
SYSTEM = "system",
USER = "user",
ASSISTANT = "assistant",
TOOL = "tool",
DEV = "developer"
}
declare enum GPTChatRoleEnum {
SYSTEM = "system",
USER = "user",
ASSISTANT = "assistant",
DEV = "developer",
TOOL = "tool"
}
declare enum AnthropicChatRoleEnum {
USER = "user",
ASSISTANT = "assistant"
}
declare enum DSChatRoleEnum {
SYSTEM = "system",
USER = "user",
ASSISTANT = "assistant",
TOOL = "tool"
}
declare enum SPKChatRoleEnum {
USER = "user",
ASSISTANT = "assistant",
SYSTEM = "system",
TOOL = "tool"
}
declare enum GLMChatRoleEnum {
SYSTEM = "system",
USER = "user",
ASSISTANT = "assistant",
TOOL = "tool"
}
declare enum GEMChatRoleEnum {
USER = "user",
MODEL = "model"
}
declare enum BDUChatRoleEnum {
USER = "user",
ASSISTANT = "assistant"
}
/** @format */
interface UniAIConfig {
OpenAI?: {
key: string | string[];
proxy?: string;
};
Anthropic?: {
key: string | string[];
proxy?: string;
};
DeepSeek?: {
key: string | string[];
proxy?: string;
};
Google?: {
key: string | string[];
proxy?: string;
};
GLM?: {
key?: string | string[];
proxy?: string;
};
IFlyTek?: {
apiPassword?: string | string[];
appId?: string;
apiKey?: string;
apiSecret?: string;
proxy?: string;
};
Baidu?: {
appId?: string;
apiKey?: string;
secretKey?: string;
proxy?: string;
};
MoonShot?: {
key: string | string[];
proxy?: string;
};
AliYun?: {
key: string | string[];
proxy?: string;
};
MidJourney?: {
proxy: string;
imgProxy?: string;
token?: string;
};
StabilityAI?: {
key: string | string[];
proxy?: string;
};
XAI?: {
key: string | string[];
proxy?: string;
};
Other?: {
api?: string;
key?: string;
};
}
/** @format */
interface ChatMessage {
role: ChatRoleEnum;
content: string;
img?: string;
audio?: string;
tool?: string;
}
interface EmbeddingResponse {
embedding: number[][];
model: EmbedModel;
object: 'embedding';
promptTokens: number;
totalTokens: number;
}
interface ChatResponse {
content: string;
tools?: object[];
promptTokens: number;
completionTokens: number;
totalTokens: number;
model: ChatModel | string;
object: string;
}
interface ChatOption {
stream?: boolean;
provider?: ChatModelProvider;
model?: ChatModel;
top?: number;
temperature?: number;
maxLength?: number;
tools?: {
type: string;
[key: string]: any;
}[];
toolChoice?: string;
}
interface EmbedOption {
provider?: EmbedModelProvider;
model?: EmbedModel;
dimensions?: number;
}
type ModelList = Provider[];
interface Provider {
provider: keyof typeof ModelProvider;
value: ModelProvider;
models: ModelModel[];
}
interface ImagineOption {
provider?: ImagineModelProvider;
model?: ImagineModel;
negativePrompt?: string;
height?: number;
width?: number;
num?: number;
}
interface ImagineResponse {
taskId: string;
time: number;
}
interface TaskResponse {
id: string;
type: ImgTaskType;
imgs: string[];
info: string;
fail: string;
progress: number;
created: number;
model: ImagineModel;
}
/** @format */
interface GPTChatResponse extends ChatCompletion {
}
interface GPTChatStreamResponse extends ChatCompletionChunk {
}
interface OpenAIEmbedRequest extends EmbeddingCreateParams {
}
interface OpenAIEmbedResponse extends CreateEmbeddingResponse {
}
interface GPTChatRequest extends ChatCompletionCreateParamsNonStreaming {
}
interface GPTChatStreamRequest extends ChatCompletionCreateParamsStreaming {
}
interface OpenAIImagineRequest extends ImageGenerateParams {
}
interface OpenAIImagineResponse extends ImagesResponse {
}
type GPTChatMessage = ChatCompletionSystemMessageParam | ChatCompletionUserMessageParam | ChatCompletionAssistantMessageParam | ChatCompletionToolMessageParam | ChatCompletionDeveloperMessageParam;
type GPTImagineSize = '256x256' | '512x512' | '1024x1024' | '1792x1024' | '1024x1792' | null;
/** @format */
interface AnthropicChatRequest {
model: string;
max_tokens: number;
messages: AnthropicChatMessage[];
system?: string;
stream?: boolean;
temperature?: number;
top_p?: number;
tools?: AnthropicTool[];
tool_choice?: AnthropicToolChoice;
}
interface AnthropicChatMessage {
role: AnthropicChatRoleEnum;
content: string | AnthropicContent[];
}
interface AnthropicContent {
type: 'text' | 'image';
text?: string;
source?: {
type: 'base64';
media_type: string;
data: string;
};
}
interface AnthropicTool {
name: string;
description: string;
input_schema: {
type: 'object';
properties: Record<string, any>;
required?: string[];
};
}
interface AnthropicToolChoice {
type: 'auto' | 'any' | 'tool';
name?: string;
}
interface AnthropicChatResponse {
id: string;
type: 'message';
role: 'assistant';
content: AnthropicResponseContent[];
model: string;
stop_reason?: 'end_turn' | 'max_tokens' | 'stop_sequence' | 'tool_use';
stop_sequence?: string;
usage: {
input_tokens: number;
output_tokens: number;
};
}
interface AnthropicResponseContent {
type: 'text' | 'tool_use';
text?: string;
id?: string;
name?: string;
input?: any;
}
interface AnthropicChatStreamResponse {
type: 'message_start' | 'message_delta' | 'content_block_start' | 'content_block_delta' | 'content_block_stop' | 'message_stop';
message?: Partial<AnthropicChatResponse>;
delta?: {
text?: string;
type?: string;
stop_reason?: string;
usage?: {
output_tokens: number;
};
};
content_block?: {
type: 'text' | 'tool_use';
text?: string;
id?: string;
name?: string;
input?: any;
};
index?: number;
}
/** @format */
interface GEMChatRequest {
contents: GEMChatMessage[];
safetySettings?: SafetySetting[];
generationConfig?: GenerationConfig;
system_instruction?: GemSystemInstruction;
}
interface GEMChatMessage {
role: GEMChatRoleEnum;
parts: Part[];
}
interface GemSystemInstruction {
parts: Part;
}
interface GoogleEmbedRequest {
model: string;
content: {
parts: {
text: string;
}[];
};
}
interface GoogleEmbedResponse {
embedding: {
values: number[];
};
}
interface Part {
text?: string;
inline_data?: InlineData;
}
interface InlineData {
mime_type: string;
data: string;
}
interface SafetySetting {
category: string;
threshold: string;
}
interface GenerationConfig {
stopSequences?: string[];
temperature?: number;
maxOutputTokens?: number;
topP?: number;
topK?: number;
}
interface GEMChatResponse {
candidates?: Candidate[];
promptFeedback?: Feedback;
}
interface Candidate {
content?: GEMChatMessage;
finishReason: string;
index: number;
safetyRatings: Rating[];
}
interface Feedback {
blockReason?: string;
safetyRatings: Rating[];
}
interface Rating {
category: string;
probability: string;
}
/** @format */
interface SystemMessage$2 {
role: 'system';
content: string;
name?: string;
}
interface UserMessage$2 {
role: 'user';
content: string;
name?: string;
}
interface AssistantMessage$2 {
role: 'assistant';
content: string | null;
name?: string;
prefix?: boolean;
reasoning_content?: string | null;
}
interface ToolMessage$2 {
role: 'tool';
content: string;
tool_call_id: string;
}
type DSChatMessage = SystemMessage$2 | UserMessage$2 | AssistantMessage$2 | ToolMessage$2;
interface Tool {
type: 'function';
function: object;
}
type ToolChoice$1 = 'none' | 'auto' | 'required';
interface DSChatRequest {
messages: DSChatMessage[];
model: DeepSeekChatModel;
frequency_penalty?: number;
max_tokens?: number;
presence_penalty?: number;
response_format?: 'text' | 'json_object';
stop?: string | string[];
stream?: boolean;
stream_options?: {
include_usage: boolean;
};
temperature?: number;
top_p?: number;
tools?: Tool[];
tool_choice?: ToolChoice$1;
logprobs?: boolean;
top_logprobs?: number;
}
interface ToolCall$1 {
id: string;
type: 'function';
function: {
name: string;
arguments: string;
};
}
interface Delta {
role: 'assistant';
content?: string | null;
reasoning_content?: string | null;
}
interface Message$1 {
role: 'assistant';
content: string | null;
reasoning_content?: string | null;
tool_calls?: ToolCall$1[];
}
interface LogProbContent {
token: string;
logprob: number;
bytes: number[] | null;
}
interface LogProbs {
content: LogProbContent[] | null;
top_logprobs: LogProbContent[];
}
interface Choice$2 {
finish_reason: 'stop' | 'length' | 'content_filter' | 'tool_calls' | 'insufficient_system_resource';
index: number;
message?: Message$1;
delta?: Delta;
logprobs?: LogProbs | null;
}
interface Usage$2 {
completion_tokens: number;
prompt_tokens: number;
prompt_cache_hit_tokens: number;
prompt_cache_miss_tokens: number;
total_tokens: number;
completion_tokens_details?: {
reasoning_tokens: number;
};
}
interface DSChatResponse {
id: string;
choices: Choice$2[];
created: number;
model: string;
system_fingerprint: string;
object: 'chat.completion';
usage?: Usage$2;
}
/** @format */
interface ImageUrlContent {
url: string;
detail?: 'high' | 'low';
}
interface MessageContentText {
type: 'text';
text: string;
}
interface MessageContentImage {
type: 'image_url';
image_url: ImageUrlContent;
}
type MessageContent = MessageContentText | MessageContentImage;
interface GrokChatMessage {
role: 'system' | 'user' | 'assistant';
content: MessageContent[] | string;
}
interface ResponseFormatText {
type: 'text';
}
interface ResponseFormatJson {
type: 'json_object';
}
interface ResponseFormatJsonSchema {
type: 'json_schema';
json_schema: Record<string, any>;
}
type ResponseFormat = ResponseFormatText | ResponseFormatJson | ResponseFormatJsonSchema;
interface ToolFunction {
type: 'function';
function: {
name: string;
};
}
interface GrokTool {
type: 'function';
function: Record<string, any>;
}
type GrokToolChoice = 'none' | 'auto' | 'required' | ToolFunction;
interface StreamOptions {
include_usage: boolean;
}
interface GrokChatRequest {
messages: GrokChatMessage[];
model: XAIChatModel;
deferred?: boolean | null;
frequency_penalty?: number | null;
logit_bias?: Record<string, number> | null;
logprobs?: boolean | null;
max_tokens?: number | null;
n?: number | null;
presence_penalty?: number | null;
response_format?: ResponseFormat | null;
seed?: number | null;
stop?: string[] | null;
stream?: boolean | null;
stream_options?: StreamOptions | null;
temperature?: number | null;
tool_choice?: GrokToolChoice | null;
tools?: GrokTool[] | null;
top_logprobs?: number | null;
top_p?: number | null;
user?: string | null;
}
interface GrokChatResponse {
id: string;
object: 'chat.completion';
created: number;
model: string;
choices: Array<{
index: number;
message: ChoiceMessage;
finish_reason: string;
}>;
usage: Usage$1;
system_fingerprint: string;
}
interface ChoiceDelta {
role?: 'assistant';
content?: string;
}
interface ChoiceMessage {
role: 'assistant';
content: string;
tool_calls?: Array<{
id: string;
function: {
name: string;
arguments: string;
};
type: string;
}>;
refusal: string | null;
}
interface Choice$1 {
index: number;
delta: ChoiceDelta;
}
interface PromptTokensDetails {
text_tokens: number;
audio_tokens: number;
image_tokens: number;
cached_tokens: number;
}
interface Usage$1 {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
prompt_tokens_details: PromptTokensDetails;
}
interface GrokChatResponseChunk {
id: string;
object: 'chat.completion.chunk';
created: number;
model: string;
choices: Choice$1[];
usage: Usage$1;
system_fingerprint: string;
}
/** @format */
interface GLMChatRequest {
model?: GLMChatModel;
messages: GLMChatMessage[];
stream?: boolean;
temperature?: number;
top_p?: number;
max_tokens?: number;
request_id?: string;
do_sample?: boolean;
stop?: string[];
tools?: GLMTool[];
tool_choice?: GLMToolChoice;
}
type GLMToolChoice = 'auto';
interface GLMEmbedRequest {
model: GLMEmbedModel;
input: string | string[];
dimensions?: number;
}
interface GLMEmbedResponse {
model: string;
data: {
index: number;
object: string;
embedding: number[];
}[];
index: number;
object: string;
usage: {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
};
}
interface SystemMessage$1 {
role: 'system';
content: string;
}
interface UserMessage$1 {
role: 'user';
content: string | Array<ChatCompletionContentPart>;
}
interface AssistantMessage$1 {
role: 'assistant';
content?: string;
tool_calls?: ToolCall[];
}
interface ToolCall {
id: string;
type: 'web_search' | 'retrieval' | 'function';
function?: FunctionToolCall;
}
interface FunctionToolCall {
name: string;
arguments: string;
}
interface ToolMessage$1 {
role: 'tool';
content: string;
tool_call_id: string;
}
type GLMChatMessage = SystemMessage$1 | UserMessage$1 | AssistantMessage$1 | ToolMessage$1;
interface GLMTool {
type: 'function' | 'retrieval' | 'web_search';
function?: FunctionTool$1;
retrieval?: RetrievalTool;
web_search?: WebSearchTool$1;
}
interface FunctionTool$1 {
name: string;
description: string;
parameters?: {
type: string;
properties: object;
required?: string[];
};
}
interface RetrievalTool {
knowledge_id: string;
prompt_template?: string;
}
interface WebSearchTool$1 {
enable?: boolean;
search_query?: string;
search_result?: boolean;
}
interface GLMChatResponse {
id: string;
model: string;
object?: string;
created: number;
choices: Choice[];
usage: Usage;
web_search?: WebSearch[];
}
interface Choice {
index: number;
finish_reason: 'stop' | 'tool_calls' | 'length' | 'sensitive' | 'network_error';
delta?: Message;
message?: Message;
}
interface Message {
role: 'assistant';
content: string | null;
tool_calls?: object[];
}
interface Usage {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
}
interface WebSearch {
icon: string;
title: string;
link: string;
media: string;
content: string;
}
/** @format */
interface SPKImagineRequest {
header: {
app_id: string;
uid?: string;
};
parameter: {
chat: {
domain: string;
width: number;
height: number;
};
};
payload: {
message: {
text: {
role: string;
content: string;
}[];
};
};
}
interface SPKImagineResponse {
header: {
code: number;
message: string;
sid: string;
status: number;
};
payload?: {
choices: {
status: number;
seq: number;
text: [
{
content: string;
index: number;
role: string;
}
];
};
};
}
interface SPKTool {
type: 'function' | 'web_search';
function?: FunctionTool;
web_search?: WebSearchTool;
}
interface FunctionTool {
name: string;
description?: string;
parameters?: object;
}
interface WebSearchTool {
enable?: boolean;
show_ref_label?: boolean;
}
interface SystemMessage {
role: 'system';
content: string;
}
interface UserMessage {
role: 'user';
content: string;
}
interface AssistantMessage {
role: 'assistant';
content: string;
}
interface ToolMessage {
role: 'tool';
content: string;
}
type SPKChatMessage = SystemMessage | UserMessage | AssistantMessage | ToolMessage;
/**
* Request Parameters
*/
interface SparkChatRequest {
model: IFlyTekChatModel;
user?: string;
messages: SPKChatMessage[];
temperature?: number;
top_k?: number;
top_p?: number;
presence_penalty?: number;
frequency_penalty?: number;
stream?: boolean;
max_tokens?: number;
response_format?: {
type: 'text' | 'json_object';
};
tools?: SPKTool[];
tool_choice?: SPKToolChoice;
suppress_plugin?: string[];
}
type SPKToolChoice = 'auto' | 'none' | 'required';
/**
* Non-Streaming Response
*/
interface SparkChatResponse {
code: number;
message: string;
sid: string;
choices: Array<{
message?: {
role: 'assistant';
content: string | null;
tool_calls?: object[];
};
delta?: {
role: 'assistant';
content: string | null;
tool_calls?: object[];
};
index: number;
finish_reason: string;
}>;
usage?: {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
};
}
/** @format */
interface BaiduAccessTokenRequest {
grant_type: string;
client_id: string;
client_secret: string;
}
interface BaiduAccessTokenResponse {
access_token: string;
expires_in: number;
error?: string;
error_description?: string;
session_key?: string;
refresh_token?: string;
scope?: string;
session_secret?: string;
}
interface BaiduChatMessage {
role: 'user' | 'assistant' | 'function';
content: string;
name?: string;
function_call?: FunctionCall;
}
interface FunctionCall {
name: string;
arguments: string;
thoughts?: string;
}
interface BaiduChatRequest {
messages: BaiduChatMessage[];
functions?: Function[];
temperature?: number;
top_p?: number;
penalty_score?: number;
stream?: boolean;
system?: string;
stop?: string[];
disable_search?: boolean;
enable_citation?: boolean;
max_output_tokens?: number;
user_id?: string;
tool_choice?: ToolChoice;
}
interface Function {
name: string;
description: string;
parameters: object;
responses?: object;
examples?: Example[];
}
interface Example {
role: 'user' | 'assistant' | 'function';
content: string;
name?: string;
function_call?: FunctionCall;
}
interface ToolChoice {
type: 'function';
function: Function;
name: string;
}
interface BaiduChatResponse {
id: string;
object: string;
created: number;
sentence_id?: number;
is_end?: boolean;
is_truncated: boolean;
finish_reason: 'normal' | 'stop' | 'length' | 'content_filter' | 'function_call';
search_info?: SearchInfo;
result: string;
need_clear_history: boolean;
ban_round?: number;
usage?: {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
plugins: PluginUsage[];
};
function_call?: FunctionCall;
error_code?: number;
error_msg?: string;
}
interface SearchInfo {
search_results: SearchResult[];
}
interface SearchResult {
index: number;
url: string;
title: string;
}
interface PluginUsage {
name: string;
parse_tokens: number;
abstract_tokens: number;
search_tokens: number;
total_tokens: number;
}
/** @format */
interface MJImagineRequest {
prompt: string;
base64Array?: string[];
notifyHook?: string;
state?: string;
}
interface MJChangeRequest {
taskId: string;
action: MJTaskType;
index?: number;
notifyHook?: string;
state?: string;
}
interface MJImagineResponse {
code: number;
description: string;
result: string;
properties: {
discordInstanceId: string;
};
}
interface MJTaskResponse {
id: string;
properties?: {
notifyHook: string;
discordInstanceId?: string;
flags: number;
messageId: string;
messageHash: string;
nonce: string;
finalPrompt: string;
progressMessageId: string;
};
action: MJTaskType;
status: string;
prompt: string;
promptEn: string;
description: string;
state: string;
submitTime: number;
startTime: number;
finishTime: number;
imageUrl: string | null;
progress: string;
failReason: string | null;
}
/** @format */
interface StabilityImagineRequest {
height: number;
width: number;
text_prompts: Array<{
text: string;
weight: number;
}>;
cfg_scale?: number;
clip_guidance_preset?: 'FAST_BLUE' | 'FAST_GREEN' | 'NONE' | 'SIMPLE' | 'SLOW' | 'SLOWER' | 'SLOWEST';
sampler?: 'DDIM' | 'DDPM' | 'K_DPMPP_2M' | 'K_DPMPP_2S_ANCESTRAL' | 'K_DPM_2' | 'K_DPM_2_ANCESTRAL' | 'K_EULER' | 'K_EULER_ANCESTRAL' | 'K_HEUN' | 'K_LMS';
samples?: number;
seed?: number;
steps?: number;
style_preset?: '3d-model' | 'analog-film' | 'anime' | 'cinematic' | 'comic-book' | 'digital-art' | 'enhance' | 'fantasy-art' | 'isometric' | 'line-art' | 'low-poly' | 'modeling-compound' | 'neon-punk' | 'origami' | 'photographic' | 'pixel-art' | 'tile-texture';
extras?: object;
}
interface StabilityImagineResponse {
artifacts: [
{
base64: string;
finishReason: 'CONTENT_FILTERED' | 'ERROR' | 'SUCCESS';
seed: number;
}
];
}
declare class UniAI {
config: UniAIConfig | null;
models: ModelList;
embedModels: ModelList;
chatModels: ModelList;
imgModels: ModelList;
private openai;
private anthropic;
private deepseek;
private google;
private glm;
private fly;
private baidu;
private other;
private moon;
private ali;
private mj;
private xai;
private stability;
constructor(config?: UniAIConfig);
chat(messages?: ChatMessage[] | string, option?: ChatOption): Promise<ChatResponse | stream.Readable>;
embedding(content: string | string[], option?: EmbedOption): Promise<EmbeddingResponse>;
imagine(prompt: string, option?: ImagineOption): Promise<ImagineResponse>;
task(provider: ImagineModelProvider, id?: string): Promise<TaskResponse[]>;
change(provider: ImagineModelProvider, taskId: string, action: string, index?: number): Promise<ImagineResponse>;
}
/** @format */
interface IAST {
type: string;
depth?: number;
value?: string;
children?: IAST[];
}
/** @format */
/** @format
* Represents a node in a recursive Markdown tree structure,
* containing a title, content, and any number of sub-sections (children).
*/
declare class Prompt {
title: string;
content: string;
children: Prompt[];
/**
* Constructs a new Prompt instance.
* @param title - The heading/title of the node.
* @param content - The content/body text of the node. Defaults to an empty string.
* @param children - An array of Prompt children. Defaults to an empty array.
*/
constructor(title: string, content?: string, children?: Prompt[]);
/**
* Recursively finds all descendant nodes (including direct children) whose title matches the given string.
* @param title - The title to search for.
* @param deep - Whether to search recursively in all descendants. Defaults to false (only immediate children).
* @returns An array of all matching Prompt nodes.
*/
getByTitle(title: string, deep?: boolean): Prompt[];
/**
* Adds a child prompt node to the children of this node.
* Supports chainable calls.
* @param child - The Prompt instance to add.
* @returns This Prompt instance (for chaining).
*/
add(child: Prompt): this;
/**
* Recursively removes all nodes (including direct and indirect children) whose title matches the given string.
* @param title - The title of the nodes to remove.
* @param deep - Whether to remove matching nodes at any depth. Defaults to false (only immediate children).
* @returns An array of all removed Prompt nodes.
*/
remove(title: string, deep?: boolean): Prompt[];
/**
* Recursively exports the structure as Markdown text.
* @param level - The Markdown heading level (used internally for recursion).
* @returns The generated Markdown string.
*/
toMarkdown(level?: number): string;
/**
* Creates a deep copy (clone) of this Prompt node, including all descendants.
* @returns A new Prompt instance that is a deep copy of this node.
*/
clone(): Prompt;
/**
* Serializes this node and all descendants into a plain JSON object structure.
* Only the title, content, and children structure are preserved.
* @returns The JSON representation of this Prompt tree.
*/
toJSON(): object;
/**
* Converts this Prompt node and all descendants into a Markdown string.
* @returns The Markdown representation of this Prompt tree.
*/
toString(): string;
/**
* Deserializes a JSON object (or string) into a Prompt tree.
* @param jsonObj - The structured data (from toJSON) or a compatible JSON string.
* @returns The root Prompt instance.
*/
static fromJSON(jsonObj: any): Prompt;
/**
* Parses a Markdown source string and constructs a Prompt tree representing its structure.
* Only basic heading recognition (# ... ######) is implemented.
* @param markdown - The Markdown source string.
* @returns The root Prompt node parsed from the document.
*/
static fromMarkdown(markdown: string): Prompt;
/**
* Generates an Abstract Syntax Tree (AST) representing this node and all descendants,
* compatible with mdast structure.
* @returns The generated AST object.
*/
getAST(): IAST;
/**
* Prints the tree structure of this Prompt node and all descendants to the console,
* with clear indentation and branch markers for visualization.
* @param indent - The indentation prefix for current level (for internal recursion use).
* @param isLast - Whether this is the last child node (for internal recursion use).
*/
printTree(indent?: string, isLast?: boolean): void;
}
/** @format */
export { AliChatModel, AliEmbedModel, type AnthropicChatMessage, AnthropicChatModel, type AnthropicChatRequest, type AnthropicChatResponse, AnthropicChatRoleEnum, type AnthropicChatStreamResponse, type AnthropicContent, type AnthropicResponseContent, type AnthropicTool, type AnthropicToolChoice, BDUChatRoleEnum, type BaiduAccessTokenRequest, type BaiduAccessTokenResponse, type BaiduChatMessage, BaiduChatModel, type BaiduChatRequest, type BaiduChatResponse, type ChatMessage, ChatModel, ChatModelProvider, type ChatOption, type ChatResponse, ChatRoleEnum, DETaskType, type DSChatMessage, type DSChatRequest, type DSChatResponse, DSChatRoleEnum, DeepSeekChatModel, EmbedModel, EmbedModelProvider, type EmbedOption, type EmbeddingResponse, type Example, type Function, type FunctionCall, type GEMChatMessage, type GEMChatRequest, type GEMChatResponse, GEMChatRoleEnum, type GLMChatMessage, GLMChatModel, type GLMChatRequest, type GLMChatResponse, GLMChatRoleEnum, GLMEmbedModel, type GLMEmbedRequest, type GLMEmbedResponse, type GLMTool, type GLMToolChoice, type GPTChatMessage, type GPTChatRequest, type GPTChatResponse, GPTChatRoleEnum, type GPTChatStreamRequest, type GPTChatStreamResponse, type GPTImagineSize, type GemSystemInstruction, GoogleChatModel, GoogleEmbedModel, type GoogleEmbedRequest, type GoogleEmbedResponse, type GrokChatMessage, type GrokChatRequest, type GrokChatResponse, type GrokChatResponseChunk, type GrokTool, type GrokToolChoice, IFlyTekChatModel, IFlyTekImagineModel, ImagineModel, ImagineModelProvider, type ImagineOption, type ImagineResponse, ImgTaskType, type MJChangeRequest, type MJImagineRequest, type MJImagineResponse, type MJTaskResponse, MJTaskType, MidJourneyImagineModel, type ModelList, ModelModel, ModelProvider, MoonShotChatModel, OpenAIChatModel, OpenAIEmbedModel, type OpenAIEmbedRequest, type OpenAIEmbedResponse, OpenAIImagineModel, type OpenAIImagineRequest, type OpenAIImagineResponse, OtherEmbedModel, type PluginUsage, Prompt, type Provider, SDTaskType, type SPKChatMessage, SPKChatRoleEnum, type SPKImagineRequest, type SPKImagineResponse, SPKTaskType, type SPKTool, type SPKToolChoice, type SearchInfo, type SearchResult, type SparkChatRequest, type SparkChatResponse, StabilityAIImagineModel, type StabilityImagineRequest, type StabilityImagineResponse, type TaskResponse, type ToolChoice, type UniAIConfig, XAIChatModel, UniAI as default };