@built-in-ai/core
Version:
Browser Built-in AI API provider for Vercel AI SDK v5+ (Chrome & Edge)
239 lines (233 loc) • 9.63 kB
TypeScript
import { LanguageModelV2, LanguageModelV2CallOptions, LanguageModelV2Content, LanguageModelV2FinishReason, LanguageModelV2CallWarning, LanguageModelV2StreamPart, EmbeddingModelV2, EmbeddingModelV2Embedding, ProviderV2 } from '@ai-sdk/provider';
import { TextEmbedder } from '@mediapipe/tasks-text';
import { UIMessage } from 'ai';
type BuiltInAIChatModelId = "text";
interface BuiltInAIChatSettings extends LanguageModelCreateOptions {
/**
* Expected input types for the session, for multimodal inputs.
*/
expectedInputs?: Array<{
type: "text" | "image" | "audio";
languages?: string[];
}>;
}
/**
* Check if the browser supports the built-in AI API
* @returns true if the browser supports the built-in AI API, false otherwise
*/
declare function doesBrowserSupportBuiltInAI(): boolean;
/**
* Check if the Prompt API is available
* @deprecated Use `doesBrowserSupportBuiltInAI()` instead for clearer naming
* @returns true if the browser supports the built-in AI API, false otherwise
*/
declare function isBuiltInAIModelAvailable(): boolean;
declare class BuiltInAIChatLanguageModel implements LanguageModelV2 {
readonly specificationVersion = "v2";
readonly modelId: BuiltInAIChatModelId;
readonly provider = "browser-ai";
private readonly config;
private session;
constructor(modelId: BuiltInAIChatModelId, options?: BuiltInAIChatSettings);
readonly supportedUrls: Record<string, RegExp[]>;
private getSession;
private getArgs;
/**
* Generates a complete text response using the browser's built-in Prompt API
* @param options
* @returns Promise resolving to the generated content with finish reason, usage stats, and any warnings
* @throws {LoadSettingError} When the Prompt API is not available or model needs to be downloaded
* @throws {UnsupportedFunctionalityError} When unsupported features like file input are used
*/
doGenerate(options: LanguageModelV2CallOptions): Promise<{
content: LanguageModelV2Content[];
finishReason: LanguageModelV2FinishReason;
usage: {
inputTokens: undefined;
outputTokens: undefined;
totalTokens: undefined;
};
request: {
body: {
messages: LanguageModelMessage[];
options: LanguageModelPromptOptions & LanguageModelCreateCoreOptions;
};
};
warnings: LanguageModelV2CallWarning[];
}>;
/**
* Check the availability of the built-in AI model
* @returns Promise resolving to "unavailable", "available", or "available-after-download"
*/
availability(): Promise<Availability>;
/**
* Creates a session with download progress monitoring.
*
* @example
* ```typescript
* const session = await model.createSessionWithProgress(
* (progress) => {
* console.log(`Download progress: ${Math.round(progress * 100)}%`);
* }
* );
* ```
*
* @param onDownloadProgress Optional callback receiving progress values 0-1 during model download
* @returns Promise resolving to a configured LanguageModel session
* @throws {LoadSettingError} When the Prompt API is not available or model is unavailable
*/
createSessionWithProgress(onDownloadProgress?: (progress: number) => void): Promise<LanguageModel>;
/**
* Generates a streaming text response using the browser's built-in Prompt API
* @param options
* @returns Promise resolving to a readable stream of text chunks and request metadata
* @throws {LoadSettingError} When the Prompt API is not available or model needs to be downloaded
* @throws {UnsupportedFunctionalityError} When unsupported features like file input are used
*/
doStream(options: LanguageModelV2CallOptions): Promise<{
stream: ReadableStream<LanguageModelV2StreamPart>;
request: {
body: {
messages: LanguageModelMessage[];
options: LanguageModelPromptOptions & LanguageModelCreateCoreOptions;
};
};
}>;
}
interface BuiltInAIEmbeddingModelSettings {
/**
* An optional base path to specify the directory the Wasm files should be loaded from.
* @default 'https://pub-ddcfe353995744e89b8002f16bf98575.r2.dev/text_wasm_internal.js'
*/
wasmLoaderPath?: string;
/**
* It's about 6mb before gzip.
* @default 'https://pub-ddcfe353995744e89b8002f16bf98575.r2.dev/text_wasm_internal.wasm'
*/
wasmBinaryPath?: string;
/**
* The model path to the model asset file.
* It's about 6.1mb before gzip.
* @default 'https://pub-ddcfe353995744e89b8002f16bf98575.r2.dev/universal_sentence_encoder.tflite'
*/
modelAssetPath?: string;
/**
* Whether to normalize the returned feature vector with L2 norm. Use this
* option only if the model does not already contain a native L2_NORMALIZATION
* TF Lite Op. In most cases, this is already the case and L2 norm is thus
* achieved through TF Lite inference.
* @default false
*/
l2Normalize?: boolean;
/**
* Whether the returned embedding should be quantized to bytes via scalar
* quantization. Embeddings are implicitly assumed to be unit-norm and
* therefore any dimension is guaranteed to have a value in [-1.0, 1.0]. Use
* the l2_normalize option if this is not the case.
* @default false
*/
quantize?: boolean;
/**
* Overrides the default backend to use for the provided model.
*/
delegate?: "CPU" | "GPU";
}
declare class BuiltInAIEmbeddingModel implements EmbeddingModelV2<string> {
readonly specificationVersion = "v2";
readonly provider = "google-mediapipe";
readonly modelId: string;
readonly supportsParallelCalls = true;
readonly maxEmbeddingsPerCall: undefined;
private settings;
private modelAssetBuffer;
private textEmbedder;
constructor(settings?: BuiltInAIEmbeddingModelSettings);
protected getTextEmbedder: () => Promise<TextEmbedder>;
doEmbed: (options: {
values: string[];
abortSignal?: AbortSignal;
}) => Promise<{
embeddings: Array<EmbeddingModelV2Embedding>;
rawResponse?: Record<PropertyKey, any>;
}>;
}
interface BuiltInAIProvider extends ProviderV2 {
(modelId?: BuiltInAIChatModelId, settings?: BuiltInAIChatSettings): BuiltInAIChatLanguageModel;
/**
* Creates a model for text generation.
*/
languageModel(modelId: BuiltInAIChatModelId, settings?: BuiltInAIChatSettings): BuiltInAIChatLanguageModel;
/**
* Creates a model for text generation.
*/
chat(modelId: BuiltInAIChatModelId, settings?: BuiltInAIChatSettings): BuiltInAIChatLanguageModel;
textEmbedding(modelId: "embedding", settings?: BuiltInAIEmbeddingModelSettings): EmbeddingModelV2<string>;
textEmbeddingModel: (modelId: "embedding", settings?: BuiltInAIEmbeddingModelSettings) => EmbeddingModelV2<string>;
imageModel(modelId: string): never;
speechModel(modelId: string): never;
transcriptionModel(modelId: string): never;
}
interface BuiltInAIProviderSettings {
}
/**
* Create a BuiltInAI provider instance.
*/
declare function createBuiltInAI(options?: BuiltInAIProviderSettings): BuiltInAIProvider;
/**
* Default BuiltInAI provider instance.
*/
declare const builtInAI: BuiltInAIProvider;
/**
* UI message type for built-in AI features with custom data parts.
*
* Extends base UIMessage to include specific data part schemas
* for built-in AI functionality such as model download progress tracking
*
* @example
* // Import and use with useChat hook from @ai-sdk/react
* ```typescript
* import { useChat } from "@ai-sdk/react";
* import { BuiltInAIUIMessage } from "@built-in-ai/core";
*
* const { messages, sendMessage } = useChat<BuiltInAIUIMessage>({
* onData: (dataPart) => {
* if (dataPart.type === 'data-modelDownloadProgress') {
* console.log(`Download: ${dataPart.data.progress}%`);
* }
* if (dataPart.type === 'data-notification') {
* console.log(`${dataPart.data.level}: ${dataPart.data.message}`);
* }
* }
* });
* ```
*
* @see {@link https://v5.ai-sdk.dev/docs/reference/ai-sdk-ui/use-chat | useChat hook documentation}
*/
type BuiltInAIUIMessage = UIMessage<
never, // No custom metadata type
{
/**
* Model download progress data part for tracking browser AI model download status.
* Used to display download progress bars and status messages to users.
*/
modelDownloadProgress: {
/** Current download/initialization status */
status: "downloading" | "complete" | "error";
/** Download progress percentage (0-100), undefined for non-downloading states */
progress?: number;
/** Human-readable status message to display to users */
message: string;
};
/**
* User notification data part for displaying temporary messages and alerts.
* These are typically transient and not persisted in message history.
*/
notification: {
/** The notification message text */
message: string;
/** Notification severity level for styling and priority */
level: "info" | "warning" | "error";
};
}
>;
export { BuiltInAIChatLanguageModel, type BuiltInAIChatSettings, BuiltInAIEmbeddingModel, type BuiltInAIEmbeddingModelSettings, type BuiltInAIProvider, type BuiltInAIProviderSettings, type BuiltInAIUIMessage, builtInAI, createBuiltInAI, doesBrowserSupportBuiltInAI, isBuiltInAIModelAvailable };