@react-native-ai/mlc
Version:
MLC LLM provider for Vercel AI SDK
68 lines • 2.34 kB
TypeScript
import type { LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3FinishReason, LanguageModelV3Prompt, LanguageModelV3StreamPart } from '@ai-sdk/provider';
import { DownloadProgress } from './NativeMLCEngine';
export declare const mlc: {
languageModel: (modelId?: string) => MlcChatLanguageModel;
};
declare class MlcChatLanguageModel implements LanguageModelV3 {
readonly specificationVersion = "v3";
readonly supportedUrls: {};
readonly provider = "mlc";
readonly modelId: string;
constructor(modelId: string);
prepare(): Promise<void>;
download(progressCallback?: (event: DownloadProgress) => void): Promise<void>;
unload(): Promise<void>;
remove(): Promise<void>;
private prepareMessages;
doGenerate(options: LanguageModelV3CallOptions): Promise<{
content: ({
type: "tool-call";
toolCallId: string;
toolName: string;
input: string;
} | {
type: "text";
text: string;
})[];
finishReason: LanguageModelV3FinishReason;
usage: {
inputTokens: {
total: number;
noCache: undefined;
cacheRead: undefined;
cacheWrite: undefined;
};
outputTokens: {
total: number;
text: undefined;
reasoning: undefined;
};
};
providerMetadata: {
mlc: {
extraUsage: {
ttft_s: number;
prefill_tokens_per_s: number;
prompt_tokens: number;
jump_forward_tokens: number;
completion_tokens: number;
end_to_end_latency_s: number;
prefill_tokens: number;
inter_token_latency_s: number;
decode_tokens_per_s: number;
decode_tokens: number;
};
};
};
warnings: never[];
}>;
doStream(options: LanguageModelV3CallOptions): Promise<{
stream: ReadableStream<LanguageModelV3StreamPart>;
rawCall: {
rawPrompt: LanguageModelV3Prompt;
rawSettings: {};
};
}>;
}
export {};
//# sourceMappingURL=ai-sdk.d.ts.map