@blinkdotnew/sdk
Version:
Blink TypeScript SDK for client-side applications - Zero-boilerplate CRUD + auth + AI + analytics + notifications for modern SaaS/AI apps
1,636 lines (1,621 loc) • 54.7 kB
TypeScript
/**
* Core type definitions for Blink SDK
*/
interface BlinkClientConfig {
projectId: string;
authRequired?: boolean;
}
interface BlinkUser {
id: string;
email: string;
displayName?: string;
photoURL?: string;
emailVerified?: boolean;
createdAt?: string;
lastSignInAt?: string;
}
interface AuthTokens {
access_token: string;
refresh_token?: string;
token_type: 'Bearer';
expires_in: number;
refresh_expires_in?: number;
issued_at?: number;
}
interface AuthState {
user: BlinkUser | null;
tokens: AuthTokens | null;
isAuthenticated: boolean;
isLoading: boolean;
}
interface FilterOperators {
eq?: any;
neq?: any;
gt?: any;
gte?: any;
lt?: any;
lte?: any;
in?: any[];
not_in?: any[];
like?: string;
ilike?: string;
is?: null | boolean;
not?: any;
}
interface LogicalOperators {
AND?: FilterCondition[];
OR?: FilterCondition[];
}
type FilterCondition = Record<string, any> | FilterOperators | LogicalOperators;
interface QueryOptions {
where?: FilterCondition;
orderBy?: Record<string, 'asc' | 'desc'> | string;
limit?: number;
offset?: number;
cursor?: string;
select?: string[];
}
interface CreateOptions {
returning?: boolean;
}
interface UpdateOptions {
returning?: boolean;
}
interface UpsertOptions {
onConflict?: string;
returning?: boolean;
}
interface TableOperations<T = any> {
create(data: Partial<T>, options?: CreateOptions): Promise<T>;
createMany(data: Partial<T>[], options?: CreateOptions): Promise<T[]>;
upsert(data: Partial<T>, options?: UpsertOptions): Promise<T>;
upsertMany(data: Partial<T>[], options?: UpsertOptions): Promise<T[]>;
get(id: string): Promise<T | null>;
list(options?: QueryOptions): Promise<T[]>;
update(id: string, data: Partial<T>, options?: UpdateOptions): Promise<T>;
updateMany(updates: Array<{
id: string;
} & Partial<T>>, options?: UpdateOptions): Promise<T[]>;
delete(id: string): Promise<void>;
deleteMany(options: {
where: FilterCondition;
}): Promise<void>;
count(options?: {
where?: FilterCondition;
}): Promise<number>;
exists(options: {
where: FilterCondition;
}): Promise<boolean>;
}
declare class BlinkError extends Error {
code?: string | undefined;
status?: number | undefined;
details?: any;
constructor(message: string, code?: string | undefined, status?: number | undefined, details?: any);
}
interface StorageUploadOptions {
upsert?: boolean;
onProgress?: (percent: number) => void;
}
interface StorageUploadResponse {
publicUrl: string;
}
interface FileObject {
id: string;
name: string;
bucket_id: string;
owner?: string | null;
owner_id?: string | null;
version?: string | null;
created_at: string;
updated_at: string;
last_accessed_at: string;
metadata: {
size: number;
mimetype: string;
cacheControl?: string;
};
user_metadata?: Record<string, any>;
}
interface BlinkStorage {
upload(file: File | Blob | Buffer, path: string, options?: StorageUploadOptions): Promise<StorageUploadResponse>;
download(path: string, options?: {
filename?: string;
}): Promise<StorageDownloadResponse>;
remove(...paths: string[]): Promise<void>;
}
interface StorageDownloadResponse {
downloadUrl: string;
filename: string;
contentType?: string;
size?: number;
}
interface TokenUsage {
promptTokens: number;
completionTokens: number;
totalTokens: number;
}
interface TextContent {
type: 'text';
text: string;
}
interface ImageContent {
type: 'image';
image: string;
}
type MessageContent = TextContent | ImageContent;
interface Message {
role: 'system' | 'user' | 'assistant';
content: string | MessageContent[];
}
interface TextGenerationRequest {
model?: string;
prompt?: string;
messages?: Message[];
stream?: boolean;
search?: boolean;
maxSteps?: number;
experimental_continueSteps?: boolean;
maxTokens?: number;
temperature?: number;
signal?: AbortSignal;
}
interface TextGenerationResponse {
text: string;
finishReason?: 'stop' | 'length' | 'content_filter' | 'tool_calls';
usage?: TokenUsage;
files?: any[];
reasoningDetails?: any[];
toolCalls?: any[];
toolResults?: any[];
warnings?: string[];
request?: {
body?: string;
};
response?: any;
steps?: Array<{
stepType?: string;
text?: string;
finishReason?: string;
usage?: TokenUsage;
}>;
sources?: any[];
providerMetadata?: any;
experimental_providerMetadata?: any;
}
interface ObjectGenerationRequest {
model?: string;
prompt: string;
output?: 'object' | 'array' | 'enum';
schema?: any;
enum?: string[];
stream?: boolean;
signal?: AbortSignal;
}
interface ObjectGenerationResponse {
object: any;
finishReason?: 'stop' | 'length' | 'content_filter';
usage?: TokenUsage;
warnings?: string[];
providerMetadata?: {
openai?: {
reasoningTokens?: number;
acceptedPredictionTokens?: number;
rejectedPredictionTokens?: number;
cachedPromptTokens?: number;
};
};
experimental_providerMetadata?: any;
response?: {
id?: string;
timestamp?: string;
modelId?: string;
headers?: any;
body?: any;
};
request?: {
body?: string;
};
}
interface ImageGenerationRequest {
model?: string;
prompt: string;
images?: string[];
size?: string;
quality?: 'auto' | 'low' | 'medium' | 'high';
background?: 'auto' | 'transparent' | 'opaque';
n?: number;
response_format?: 'url' | 'b64_json';
output_format?: 'png' | 'jpeg' | 'webp';
output_compression?: number;
moderation?: 'auto' | 'low';
signal?: AbortSignal;
}
interface ImageGenerationResponse {
data: Array<{
url?: string;
b64_json?: string;
}>;
}
interface SpeechGenerationRequest {
model?: string;
text: string;
voice?: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer';
response_format?: 'mp3' | 'opus' | 'aac' | 'flac' | 'wav' | 'pcm';
speed?: number;
signal?: AbortSignal;
}
interface SpeechGenerationResponse {
url: string;
voice: string;
format: string;
mimeType: string;
}
interface TranscriptionRequest {
model?: string;
audio: string | number[] | ArrayBuffer | Uint8Array;
language?: string;
response_format?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt';
signal?: AbortSignal;
}
interface TranscriptionResponse {
text: string;
transcript?: string;
segments?: Array<{
id: number;
seek: number;
start: number;
end: number;
text: string;
tokens: number[];
temperature: number;
avg_logprob: number;
compression_ratio: number;
no_speech_prob: number;
}>;
language?: string;
duration?: number;
words?: Array<{
word: string;
start: number;
end: number;
}>;
}
interface BlinkAI {
generateText(options: TextGenerationRequest): Promise<TextGenerationResponse>;
streamText(options: TextGenerationRequest, onChunk: (chunk: string) => void): Promise<TextGenerationResponse>;
generateObject(options: ObjectGenerationRequest): Promise<ObjectGenerationResponse>;
streamObject(options: ObjectGenerationRequest, onPartial: (partial: any) => void): Promise<ObjectGenerationResponse>;
generateImage(options: ImageGenerationRequest): Promise<ImageGenerationResponse>;
modifyImage(options: {
images: string[];
prompt: string;
size?: string;
quality?: "auto" | "low" | "medium" | "high";
n?: number;
background?: "auto" | "transparent" | "opaque";
signal?: AbortSignal;
}): Promise<ImageGenerationResponse>;
generateSpeech(options: SpeechGenerationRequest): Promise<SpeechGenerationResponse>;
transcribeAudio(options: TranscriptionRequest): Promise<TranscriptionResponse>;
}
interface DataExtraction {
chunks: string[];
}
interface ExtractFromUrlRequest {
url: string;
chunking?: boolean;
chunkSize?: number;
}
interface ExtractFromUrlResponse {
chunks?: string[];
text?: string;
}
interface ExtractFromBlobResponse {
chunks?: string[];
text?: string;
}
interface ScrapeRequest {
url: string;
formats?: ('markdown' | 'html' | 'rawHtml' | 'links' | 'extract' | 'metadata')[];
}
interface ScrapeResponse {
markdown?: string;
html?: string;
rawHtml?: string;
links?: Array<{
text: string;
url: string;
type: string;
}>;
extract?: {
title?: string;
description?: string;
headings?: string[];
text?: string;
};
metadata?: {
title?: string;
description?: string;
url?: string;
domain?: string;
favicon?: string;
image?: string;
author?: string;
publishedTime?: string;
modifiedTime?: string;
type?: string;
siteName?: string;
locale?: string;
keywords?: string[];
};
}
interface ScrapeResult {
markdown: string;
html: string;
metadata: {
title: string;
description: string;
url: string;
domain: string;
favicon?: string;
image?: string;
author?: string;
publishedTime?: string;
modifiedTime?: string;
type?: string;
siteName?: string;
locale?: string;
keywords?: string[];
};
links: Array<{
text: string;
url: string;
type: string;
}>;
extract: {
title: string;
description: string;
headings: string[];
text: string;
};
}
interface ScreenshotRequest {
url: string;
fullPage?: boolean;
width?: number;
height?: number;
}
interface ScreenshotResponse {
url: string;
}
interface FetchRequest {
url: string;
method?: string;
headers?: Record<string, string>;
body?: any;
query?: Record<string, string>;
async?: boolean;
}
interface FetchResponse {
status: number;
headers: Record<string, string>;
body: any;
durationMs: number;
}
interface AsyncFetchResponse {
status: 'triggered';
message: string;
}
interface SearchRequest {
q: string;
location?: string;
hl?: string;
tbm?: string;
num?: number;
}
interface SearchResponse {
organic_results: Array<{
position: number;
title: string;
link: string;
snippet: string;
}>;
total_results?: string;
related_searches?: string[];
people_also_ask?: Array<{
question: string;
snippet: string;
link: string;
}>;
local_results?: Array<{
title: string;
address: string;
rating: number;
reviews: number;
phone?: string;
}>;
ads?: Array<{
title: string;
link: string;
snippet: string;
}>;
shopping_results?: Array<{
title: string;
price: string;
source: string;
link: string;
}>;
news_results?: Array<{
title: string;
link: string;
snippet: string;
date: string;
source: string;
}>;
image_results?: Array<{
title: string;
link: string;
original: string;
thumbnail: string;
}>;
}
interface RealtimeMessage {
id: string;
type: string;
data: any;
timestamp: number;
userId?: string;
metadata?: Record<string, any>;
}
interface PresenceUser {
userId: string;
metadata?: Record<string, any>;
joinedAt: number;
lastSeen: number;
}
interface RealtimeChannel {
subscribe(options?: {
userId?: string;
metadata?: Record<string, any>;
}): Promise<void>;
unsubscribe(): Promise<void>;
publish(type: string, data: any, options?: {
userId?: string;
metadata?: Record<string, any>;
}): Promise<string>;
onMessage(callback: (message: RealtimeMessage) => void): () => void;
onPresence(callback: (users: PresenceUser[]) => void): () => void;
getPresence(): Promise<PresenceUser[]>;
getMessages(options?: {
limit?: number;
before?: string;
after?: string;
}): Promise<RealtimeMessage[]>;
isReady(): boolean;
}
interface RealtimeSubscribeOptions {
userId?: string;
metadata?: Record<string, any>;
}
interface RealtimePublishOptions {
userId?: string;
metadata?: Record<string, any>;
}
interface RealtimeGetMessagesOptions {
limit?: number;
before?: string;
after?: string;
}
interface BlinkRealtime {
channel(name: string): RealtimeChannel;
subscribe(channelName: string, callback: (message: RealtimeMessage) => void, options?: RealtimeSubscribeOptions): Promise<() => void>;
publish(channelName: string, type: string, data: any, options?: RealtimePublishOptions): Promise<string>;
presence(channelName: string): Promise<PresenceUser[]>;
onPresence(channelName: string, callback: (users: PresenceUser[]) => void): () => void;
}
declare class BlinkRealtimeError extends BlinkError {
constructor(message: string, status?: number, details?: any);
}
interface SendEmailAttachment {
filename: string;
url: string;
type?: string;
content?: string;
disposition?: 'attachment' | 'inline';
cid?: string;
}
interface SendEmailRequest {
to: string | string[];
subject: string;
html?: string;
text?: string;
from?: string;
replyTo?: string;
cc?: string | string[];
bcc?: string | string[];
attachments?: SendEmailAttachment[];
}
interface SendEmailResponse {
success: boolean;
messageId: string;
}
interface BlinkNotifications {
email(params: SendEmailRequest): Promise<SendEmailResponse>;
}
/**
* HTTP client for Blink API requests
* Handles authentication, error handling, and request/response processing
*/
interface RequestOptions {
method?: 'GET' | 'POST' | 'PATCH' | 'DELETE';
headers?: Record<string, string>;
body?: any;
searchParams?: Record<string, string>;
signal?: AbortSignal;
}
interface BlinkResponse<T = any> {
data: T;
status: number;
headers: Headers;
}
declare class HttpClient {
private readonly authUrl;
private readonly coreUrl;
readonly projectId: string;
private getToken;
private getValidToken?;
constructor(config: BlinkClientConfig, getToken: () => string | null, getValidToken?: () => Promise<string | null>);
/**
* Make an authenticated request to the Blink API
*/
request<T = any>(path: string, options?: RequestOptions): Promise<BlinkResponse<T>>;
/**
* GET request
*/
get<T = any>(path: string, searchParams?: Record<string, string>): Promise<BlinkResponse<T>>;
/**
* POST request
*/
post<T = any>(path: string, body?: any, headers?: Record<string, string>): Promise<BlinkResponse<T>>;
/**
* PATCH request
*/
patch<T = any>(path: string, body?: any, headers?: Record<string, string>): Promise<BlinkResponse<T>>;
/**
* DELETE request
*/
delete<T = any>(path: string, searchParams?: Record<string, string>): Promise<BlinkResponse<T>>;
/**
* Database-specific requests
*/
dbGet<T = any>(table: string, searchParams?: Record<string, string>): Promise<BlinkResponse<T[]>>;
dbPost<T = any>(table: string, body: any, options?: {
returning?: boolean;
}): Promise<BlinkResponse<T | T[]>>;
dbPatch<T = any>(table: string, body: any, searchParams?: Record<string, string>, options?: {
returning?: boolean;
}): Promise<BlinkResponse<T[]>>;
dbDelete<T = any>(table: string, searchParams?: Record<string, string>, options?: {
returning?: boolean;
}): Promise<BlinkResponse<T[]>>;
dbSql<T = any>(query: string, params?: any[]): Promise<BlinkResponse<{
rows: T[];
columns: string[];
rowCount: number;
executionTime: number;
}>>;
dbBatch<T = any>(statements: Array<{
sql: string;
args?: any[];
}>, mode?: 'read' | 'write'): Promise<BlinkResponse<{
results: Array<{
rows: T[];
columns: string[];
rowCount: number;
}>;
executionTime: number;
success: boolean;
}>>;
/**
* Upload file with progress tracking
*/
uploadFile(path: string, file: File | Blob | Buffer, filePath: string, options?: {
upsert?: boolean;
onProgress?: (percent: number) => void;
contentType?: string;
}): Promise<BlinkResponse<any>>;
/**
* Upload with progress tracking using XMLHttpRequest
*/
private uploadWithProgress;
/**
* AI-specific requests
*/
aiText(prompt: string, options?: {
model?: string;
messages?: Array<{
role: string;
content: string | any[];
}>;
stream?: boolean;
search?: boolean;
maxSteps?: number;
experimental_continueSteps?: boolean;
maxTokens?: number;
temperature?: number;
signal?: AbortSignal;
}): Promise<BlinkResponse<any>>;
/**
* Stream AI text generation with Vercel AI SDK data stream format
*/
streamAiText(prompt: string, options: {
model?: string | undefined;
messages?: {
role: string;
content: string | any[];
}[] | undefined;
search?: boolean | undefined;
maxSteps?: number | undefined;
experimental_continueSteps?: boolean | undefined;
maxTokens?: number | undefined;
temperature?: number | undefined;
signal?: AbortSignal | undefined;
} | undefined, onChunk: (chunk: string) => void): Promise<any>;
aiObject(prompt: string, options?: {
model?: string;
output?: 'object' | 'array' | 'enum';
schema?: any;
enum?: string[];
stream?: boolean;
signal?: AbortSignal;
}): Promise<BlinkResponse<any>>;
/**
* Stream AI object generation with Vercel AI SDK data stream format
*/
streamAiObject(prompt: string, options: {
model?: string | undefined;
output?: "object" | "array" | "enum" | undefined;
schema?: any;
enum?: string[] | undefined;
signal?: AbortSignal | undefined;
} | undefined, onPartial: (partial: any) => void): Promise<any>;
aiImage(prompt: string, options?: {
model?: string;
images?: string[];
size?: string;
quality?: 'auto' | 'low' | 'medium' | 'high';
background?: 'auto' | 'transparent' | 'opaque';
n?: number;
response_format?: 'url' | 'b64_json';
output_format?: 'png' | 'jpeg' | 'webp';
output_compression?: number;
moderation?: 'auto' | 'low';
signal?: AbortSignal;
}): Promise<BlinkResponse<any>>;
aiSpeech(text: string, options?: {
model?: string;
voice?: string;
response_format?: string;
speed?: number;
signal?: AbortSignal;
}): Promise<BlinkResponse<any>>;
aiTranscribe(audio: string | number[] | ArrayBuffer | Uint8Array, options?: {
model?: string;
language?: string;
response_format?: string;
signal?: AbortSignal;
}): Promise<BlinkResponse<any>>;
/**
* Data-specific requests
*/
dataExtractFromUrl(projectId: string, request: ExtractFromUrlRequest): Promise<BlinkResponse<ExtractFromUrlResponse>>;
dataExtractFromBlob(projectId: string, file: File, chunking?: boolean, chunkSize?: number): Promise<BlinkResponse<ExtractFromBlobResponse>>;
dataScrape(projectId: string, request: ScrapeRequest): Promise<BlinkResponse<ScrapeResponse>>;
dataScreenshot(projectId: string, request: ScreenshotRequest): Promise<BlinkResponse<ScreenshotResponse>>;
dataFetch(projectId: string, request: FetchRequest): Promise<BlinkResponse<FetchResponse | AsyncFetchResponse>>;
dataSearch(projectId: string, request: SearchRequest): Promise<BlinkResponse<SearchResponse>>;
/**
* Realtime-specific requests
*/
realtimePublish(projectId: string, request: {
channel: string;
type: string;
data: any;
userId?: string;
metadata?: Record<string, any>;
}): Promise<BlinkResponse<{
messageId: string;
channel: string;
timestamp: number;
}>>;
realtimeGetPresence(projectId: string, channel: string): Promise<BlinkResponse<{
channel: string;
users: any[];
count: number;
}>>;
realtimeGetMessages(projectId: string, options: {
channel: string;
limit?: number;
start?: string;
end?: string;
}): Promise<BlinkResponse<{
channel: string;
messages: any[];
count: number;
hasMore: boolean;
}>>;
/**
* Private helper methods
*/
private buildUrl;
private parseResponse;
private handleErrorResponse;
/**
* Parse Vercel AI SDK data stream format
* Handles text chunks (0:"text"), partial objects (2:[...]), and metadata (d:, e:)
*/
private parseDataStream;
}
/**
* Blink Auth Module - Client-side authentication management
* Handles token storage, user state, and authentication flows
*/
type AuthStateChangeCallback = (state: AuthState) => void;
declare class BlinkAuth {
private config;
private authState;
private listeners;
private readonly authUrl;
private parentWindowTokens;
private isIframe;
private initializationPromise;
private isInitialized;
constructor(config: BlinkClientConfig);
/**
* Wait for authentication initialization to complete
*/
private waitForInitialization;
/**
* Setup listener for tokens from parent window
*/
private setupParentWindowListener;
/**
* Initialize authentication from stored tokens or URL fragments
*/
initialize(): Promise<void>;
/**
* Redirect to Blink auth page
*/
login(nextUrl?: string): void;
/**
* Logout and clear stored tokens
*/
logout(redirectUrl?: string): void;
/**
* Check if user is authenticated
*/
isAuthenticated(): boolean;
/**
* Get current user (sync)
*/
currentUser(): BlinkUser | null;
/**
* Get current access token
*/
getToken(): string | null;
/**
* Check if access token is expired based on timestamp
*/
private isAccessTokenExpired;
/**
* Check if refresh token is expired based on timestamp
*/
private isRefreshTokenExpired;
/**
* Get a valid access token, refreshing if necessary
*/
getValidToken(): Promise<string | null>;
/**
* Fetch current user profile from API
* Gracefully waits for auth initialization to complete before throwing errors
*/
me(): Promise<BlinkUser>;
/**
* Update user profile
*/
updateMe(updates: Partial<BlinkUser>): Promise<BlinkUser>;
/**
* Manually set tokens (for server-side usage)
*/
setToken(jwt: string, persist?: boolean): Promise<void>;
/**
* Refresh access token using refresh token
*/
refreshToken(): Promise<boolean>;
/**
* Add auth state change listener
*/
onAuthStateChanged(callback: AuthStateChangeCallback): () => void;
/**
* Private helper methods
*/
private validateStoredTokens;
private setTokens;
private clearTokens;
private getStoredTokens;
private extractTokensFromUrl;
private clearUrlTokens;
private redirectToAuth;
private setLoading;
private updateAuthState;
}
/**
* Blink Database Module - Table operations and query interface
* Provides CRUD operations with PostgREST-compatible API
*/
declare class BlinkTable<T = any> implements TableOperations<T> {
private tableName;
private httpClient;
private readonly actualTableName;
constructor(tableName: string, httpClient: HttpClient);
/**
* Create a single record
*/
create(data: Partial<T>, options?: CreateOptions): Promise<T>;
/**
* Create multiple records
*/
createMany(data: Partial<T>[], options?: CreateOptions): Promise<T[]>;
/**
* Upsert a single record (insert or update on conflict)
*/
upsert(data: Partial<T>, options?: UpsertOptions): Promise<T>;
/**
* Upsert multiple records
*/
upsertMany(data: Partial<T>[], options?: UpsertOptions): Promise<T[]>;
/**
* Get a single record by ID
*/
get(id: string): Promise<T | null>;
/**
* List records with filtering, sorting, and pagination
*/
list(options?: QueryOptions): Promise<T[]>;
/**
* Update a single record by ID
*/
update(id: string, data: Partial<T>, options?: UpdateOptions): Promise<T>;
/**
* Update multiple records
*/
updateMany(updates: Array<{
id: string;
} & Partial<T>>, options?: UpdateOptions): Promise<T[]>;
/**
* Delete a single record by ID
*/
delete(id: string): Promise<void>;
/**
* Delete multiple records based on filter
*/
deleteMany(options: {
where: FilterCondition;
}): Promise<void>;
/**
* Count records matching filter
*/
count(options?: {
where?: FilterCondition;
}): Promise<number>;
/**
* Check if any records exist matching filter
*/
exists(options: {
where: FilterCondition;
}): Promise<boolean>;
/**
* Raw SQL query on this table (for advanced use cases)
*/
sql<R = any>(query: string, params?: any[]): Promise<{
rows: R[];
columns: string[];
rowCount: number;
executionTime: number;
}>;
/**
* Private helper methods
*/
private extractCursor;
}
declare class BlinkDatabase {
private httpClient;
private tables;
constructor(httpClient: HttpClient);
/**
* Get a table instance for any table name
*/
table<T = any>(tableName: string): BlinkTable<T>;
/**
* Execute raw SQL query
*/
sql<T = any>(query: string, params?: any[]): Promise<{
rows: T[];
columns: string[];
rowCount: number;
executionTime: number;
}>;
/**
* Execute batch SQL operations
*/
batch<T = any>(statements: Array<{
sql: string;
args?: any[];
}>, mode?: 'read' | 'write'): Promise<{
results: Array<{
rows: T[];
columns: string[];
rowCount: number;
}>;
executionTime: number;
success: boolean;
}>;
}
interface BlinkData {
extractFromUrl(url: string, options?: {
chunking?: boolean;
chunkSize?: number;
}): Promise<string | string[]>;
extractFromBlob(file: File, options?: {
chunking?: boolean;
chunkSize?: number;
}): Promise<string | string[]>;
scrape(url: string): Promise<ScrapeResult>;
screenshot(url: string, options?: {
fullPage?: boolean;
width?: number;
height?: number;
}): Promise<string>;
fetch(request: FetchRequest): Promise<FetchResponse>;
fetchAsync(request: Omit<FetchRequest, 'async'>): Promise<AsyncFetchResponse>;
search(query: string, options?: {
location?: string;
type?: 'news' | 'images' | 'image' | 'videos' | 'video' | 'shopping' | 'shop';
language?: string;
limit?: number;
}): Promise<SearchResponse>;
}
declare class BlinkDataImpl implements BlinkData {
private httpClient;
private projectId;
constructor(httpClient: HttpClient, projectId: string);
extractFromUrl(url: string, options?: {
chunking?: boolean;
chunkSize?: number;
}): Promise<string | string[]>;
extractFromBlob(file: File, options?: {
chunking?: boolean;
chunkSize?: number;
}): Promise<string | string[]>;
scrape(url: string): Promise<ScrapeResult>;
screenshot(url: string, options?: {
fullPage?: boolean;
width?: number;
height?: number;
}): Promise<string>;
fetch(request: FetchRequest): Promise<FetchResponse>;
fetchAsync(request: Omit<FetchRequest, 'async'>): Promise<AsyncFetchResponse>;
search(query: string, options?: {
location?: string;
type?: 'news' | 'images' | 'image' | 'videos' | 'video' | 'shopping' | 'shop';
language?: string;
limit?: number;
}): Promise<SearchResponse>;
}
/**
* Blink Analytics Module
* Provides automatic pageview tracking and custom event logging
*/
interface AnalyticsEvent {
type: string;
timestamp?: string;
user_id?: string | null;
user_email?: string | null;
session_id?: string | null;
pathname?: string | null;
referrer?: string | null;
screen_width?: number | null;
channel?: string | null;
utm_source?: string | null;
utm_medium?: string | null;
utm_campaign?: string | null;
utm_content?: string | null;
utm_term?: string | null;
[key: string]: any;
}
interface BlinkAnalytics {
log(eventName: string, data?: Record<string, any>): void;
disable(): void;
enable(): void;
isEnabled(): boolean;
setUserId(userId: string | null): void;
setUserEmail(email: string | null): void;
clearAttribution(): void;
destroy(): void;
}
declare class BlinkAnalyticsImpl implements BlinkAnalytics {
private httpClient;
private projectId;
private queue;
private timer;
private enabled;
private userId;
private userEmail;
private hasTrackedPageview;
private utmParams;
private persistedAttribution;
constructor(httpClient: HttpClient, projectId: string);
/**
* Log a custom analytics event
*/
log(eventName: string, data?: Record<string, any>): void;
/**
* Disable analytics tracking
*/
disable(): void;
/**
* Cleanup analytics instance (remove from global tracking)
*/
destroy(): void;
/**
* Enable analytics tracking
*/
enable(): void;
/**
* Check if analytics is enabled
*/
isEnabled(): boolean;
/**
* Set the user ID for analytics events
*/
setUserId(userId: string | null): void;
/**
* Set the user email for analytics events
*/
setUserEmail(email: string | null): void;
/**
* Clear persisted attribution data
*/
clearAttribution(): void;
private buildEvent;
private sanitizeData;
private enqueue;
private flush;
private clearTimer;
private getOrCreateSessionId;
private createNewSession;
private loadQueue;
private persistQueue;
private trackPageview;
private setupRouteChangeListener;
private setupUnloadListener;
private captureUTMParams;
private loadPersistedAttribution;
private persistAttribution;
private detectChannel;
}
/**
* Blink Client - Main SDK entry point
* Factory function and client class for the Blink SDK
*/
interface BlinkClient {
auth: BlinkAuth;
db: BlinkDatabase;
storage: BlinkStorage;
ai: BlinkAI;
data: BlinkData;
realtime: BlinkRealtime;
notifications: BlinkNotifications;
analytics: BlinkAnalytics;
}
/**
* Create a new Blink client instance
*/
declare function createClient(config: BlinkClientConfig): BlinkClient;
/**
* Blink Storage Module
* Handles file upload and file removal
*/
declare class BlinkStorageImpl implements BlinkStorage {
private httpClient;
constructor(httpClient: HttpClient);
/**
* Upload a file to project storage
*
* @param file - File, Blob, or Buffer to upload
* @param path - Destination path within project storage (extension will be auto-corrected to match file type)
* @param options - Upload options including upsert and progress callback
* @returns Promise resolving to upload response with public URL
*
* @example
* ```ts
* // Extension automatically corrected to match actual file type
* const { publicUrl } = await blink.storage.upload(
* pngFile,
* `avatars/${user.id}`, // No extension needed!
* { upsert: true }
* );
* // If file is PNG, final path will be: avatars/user123.png
*
* // Or with extension (will be corrected if wrong)
* const { publicUrl } = await blink.storage.upload(
* pngFile,
* `avatars/${user.id}.jpg`, // Wrong extension
* { upsert: true }
* );
* // Final path will be: avatars/user123.png (auto-corrected!)
* ```
*/
upload(file: File | Blob | Buffer, path: string, options?: StorageUploadOptions): Promise<StorageUploadResponse>;
/**
* Detect file type from actual file content and correct path extension
* This ensures the path extension always matches the actual file type
*/
private detectFileTypeAndCorrectPath;
/**
* Get the first few bytes of a file to analyze its signature
*/
private getFileSignature;
/**
* Detect file type from file signature (magic numbers)
* This is the most reliable way to detect actual file type
*/
private detectFileTypeFromSignature;
/**
* Get file extension from MIME type as fallback
*/
private getExtensionFromMimeType;
/**
* Get a download URL for a file that triggers browser download
*
* @param path - Path to the file in project storage
* @param options - Download options including custom filename
* @returns Promise resolving to download response with download URL
*
* @example
* ```ts
* // Download with original filename
* const { downloadUrl, filename } = await blink.storage.download('images/photo.jpg');
* window.open(downloadUrl, '_blank');
*
* // Download with custom filename
* const { downloadUrl } = await blink.storage.download(
* 'images/photo.jpg',
* { filename: 'my-photo.jpg' }
* );
*
* // Create download link in React
* <a href={downloadUrl} download={filename}>Download Image</a>
* ```
*/
download(path: string, options?: {
filename?: string;
}): Promise<StorageDownloadResponse>;
/**
* Remove one or more files from project storage
*
* @param paths - File paths to remove
* @returns Promise that resolves when files are removed
*
* @example
* ```ts
* await blink.storage.remove('avatars/user1.png');
* await blink.storage.remove('file1.pdf', 'file2.pdf', 'file3.pdf');
* ```
*/
remove(...paths: string[]): Promise<void>;
}
/**
* Blink AI Module
* Provides AI generation capabilities with Vercel AI SDK compatibility
*/
declare class BlinkAIImpl implements BlinkAI {
private httpClient;
constructor(httpClient: HttpClient);
private readonly SUPPORTED_IMAGE_FORMATS;
/**
* Validates if a URL is a valid HTTPS image URL
*/
private validateImageUrl;
/**
* Validates messages for image content
*/
private validateMessages;
/**
* Get MIME type for audio format
*/
private getMimeTypeForFormat;
/**
* Generates a text response using the Blink AI engine.
*
* @param options - An object containing either:
* - `prompt`: a simple string prompt
* - OR `messages`: an array of chat messages for conversation
* - Plus optional model, search, maxSteps, experimental_continueSteps, maxTokens, temperature, signal parameters
*
* @example
* ```ts
* // Simple prompt
* const { text } = await blink.ai.generateText({
* prompt: "Write a poem about coding"
* });
*
* // Chat messages (text only)
* const { text } = await blink.ai.generateText({
* messages: [
* { role: "system", content: "You are a helpful assistant" },
* { role: "user", content: "Explain quantum computing" }
* ]
* });
*
* // With image content
* const { text } = await blink.ai.generateText({
* messages: [
* {
* role: "user",
* content: [
* { type: "text", text: "What do you see in this image?" },
* { type: "image", image: "https://example.com/photo.jpg" }
* ]
* }
* ]
* });
*
* // Mixed content with multiple images
* const { text } = await blink.ai.generateText({
* messages: [
* {
* role: "user",
* content: [
* { type: "text", text: "Compare these two images:" },
* { type: "image", image: "https://example.com/image1.jpg" },
* { type: "image", image: "https://example.com/image2.jpg" }
* ]
* }
* ]
* });
*
* // With options
* const { text, usage } = await blink.ai.generateText({
* prompt: "Summarize this article",
* model: "gpt-4o-mini",
* maxTokens: 150,
* temperature: 0.7
* });
*
* // With web search (OpenAI models only)
* const { text, sources } = await blink.ai.generateText({
* prompt: "What are the latest developments in AI?",
* model: "gpt-4o-mini",
* search: true // Enables web search
* });
*
* // With advanced multi-step configuration
* const { text } = await blink.ai.generateText({
* prompt: "Research and analyze recent tech trends",
* model: "gpt-4o",
* search: true,
* maxSteps: 10, // Allow up to 10 reasoning steps
* experimental_continueSteps: true // Enable continued reasoning
* });
* ```
*
* @returns Promise<TextGenerationResponse> - Object containing:
* - `text`: Generated text string
* - `usage`: Token usage information
* - `finishReason`: Why generation stopped ("stop", "length", etc.)
*/
generateText(options: TextGenerationRequest): Promise<TextGenerationResponse>;
/**
* Streams text generation with real-time updates as the AI generates content.
*
* @param options - Same as generateText: either `prompt` or `messages` with optional parameters including search, maxSteps, experimental_continueSteps
* @param onChunk - Callback function that receives each text chunk as it's generated
*
* @example
* ```ts
* // Stream with prompt
* await blink.ai.streamText(
* { prompt: "Write a short story about space exploration" },
* (chunk) => {
* process.stdout.write(chunk); // Real-time output
* }
* );
*
* // Stream with messages
* await blink.ai.streamText(
* {
* messages: [
* { role: "system", content: "You are a creative writer" },
* { role: "user", content: "Write a haiku about programming" }
* ]
* },
* (chunk) => updateUI(chunk)
* );
* ```
*
* @returns Promise<TextGenerationResponse> - Final complete response with full text and metadata
*/
streamText(options: TextGenerationRequest, onChunk: (chunk: string) => void): Promise<TextGenerationResponse>;
/**
* Generates structured JSON objects using AI with schema validation.
*
* @param options - Object containing:
* - `prompt`: Description of what object to generate (required)
* - `schema`: JSON Schema to validate the generated object
* - `output`: Type of output ("object", "array", "enum")
* - `enum`: Array of allowed values for enum output
* - Plus optional model, signal parameters
*
* @example
* ```ts
* // Generate user profile
* const { object } = await blink.ai.generateObject({
* prompt: "Generate a user profile for a software developer",
* schema: {
* type: "object",
* properties: {
* name: { type: "string" },
* age: { type: "number" },
* skills: { type: "array", items: { type: "string" } },
* experience: { type: "number" }
* },
* required: ["name", "skills"]
* }
* });
*
* // Generate array of items
* const { object } = await blink.ai.generateObject({
* prompt: "List 5 programming languages",
* output: "array",
* schema: {
* type: "array",
* items: { type: "string" }
* }
* });
*
* // Generate enum value
* const { object } = await blink.ai.generateObject({
* prompt: "Choose the best programming language for web development",
* output: "enum",
* enum: ["JavaScript", "Python", "TypeScript", "Go"]
* });
* ```
*
* @returns Promise<ObjectGenerationResponse> - Object containing:
* - `object`: The generated and validated JSON object/array/enum
* - `usage`: Token usage information
* - `finishReason`: Why generation stopped
*/
generateObject(options: ObjectGenerationRequest): Promise<ObjectGenerationResponse>;
/**
* Streams structured object generation with real-time partial updates as the AI builds the object.
*
* @param options - Same as generateObject: prompt, schema, output type, etc.
* @param onPartial - Callback function that receives partial object updates as they're generated
*
* @example
* ```ts
* // Stream object generation with schema
* await blink.ai.streamObject(
* {
* prompt: "Generate a detailed product catalog entry",
* schema: {
* type: "object",
* properties: {
* name: { type: "string" },
* price: { type: "number" },
* description: { type: "string" },
* features: { type: "array", items: { type: "string" } }
* }
* }
* },
* (partial) => {
* console.log("Partial update:", partial);
* updateProductForm(partial); // Update UI in real-time
* }
* );
* ```
*
* @returns Promise<ObjectGenerationResponse> - Final complete object with metadata
*/
streamObject(options: ObjectGenerationRequest, onPartial: (partial: any) => void): Promise<ObjectGenerationResponse>;
/**
* Generates images from text descriptions using AI.
*
* @param options - Object containing:
* - `prompt`: Text description of the desired image (required)
* - `size`: Image dimensions (default: "1024x1024")
* - `quality`: Image quality ("auto", "low", "medium", or "high", default: "auto")
* - `n`: Number of images to generate (default: 1)
* - `background`: Background handling ("auto", "transparent", "opaque", default: "auto")
* - Plus optional signal parameter
*
* @example
* ```ts
* // Basic image generation
* const { data } = await blink.ai.generateImage({
* prompt: "A serene landscape with mountains and a lake at sunset"
* });
* console.log("Image URL:", data[0].url);
*
* // High-quality image with specific size
* const { data } = await blink.ai.generateImage({
* prompt: "A futuristic city skyline with flying cars",
* size: "1536x1024",
* quality: "high",
* background: "transparent"
* });
*
* // Multiple images
* const { data } = await blink.ai.generateImage({
* prompt: "A cute robot mascot for a tech company",
* n: 3,
* size: "1024x1024",
* quality: "high"
* });
* data.forEach((img, i) => console.log(`Image ${i+1}:`, img.url));
* ```
*
* @returns Promise<ImageGenerationResponse> - Object containing:
* - `data`: Array of generated images with URLs
* - `created`: Timestamp of generation
* - `usage`: Token usage information
*/
generateImage(options: {
prompt: string;
size?: string;
quality?: "auto" | "low" | "medium" | "high";
n?: number;
background?: "auto" | "transparent" | "opaque";
signal?: AbortSignal;
}): Promise<ImageGenerationResponse>;
/**
* Modifies existing images using AI with text prompts for image-to-image editing.
*
* @param options - Object containing:
* - `images`: Array of public image URLs to modify (required, up to 16 images)
* - `prompt`: Text description of desired modifications (required)
* - `size`: Output image dimensions (default: "auto")
* - `quality`: Image quality ("auto", "low", "medium", or "high", default: "auto")
* - `n`: Number of output images to generate (default: 1)
* - `background`: Background handling ("auto", "transparent", "opaque", default: "auto")
* - Plus optional signal parameter
*
* @example
* ```ts
* // Professional headshots from casual photos
* const { data } = await blink.ai.modifyImage({
* images: [
* "https://storage.example.com/user-photo-1.jpg",
* "https://storage.example.com/user-photo-2.jpg"
* ],
* prompt: "Transform into professional business headshots with studio lighting",
* quality: "high",
* n: 4
* });
* data.forEach((img, i) => console.log(`Headshot ${i+1}:`, img.url));
*
* // Artistic style transformation
* const { data } = await blink.ai.modifyImage({
* images: ["https://storage.example.com/portrait.jpg"],
* prompt: "Transform into oil painting style with dramatic lighting",
* quality: "high",
* size: "1024x1024"
* });
*
* // Background replacement
* const { data } = await blink.ai.modifyImage({
* images: ["https://storage.example.com/product.jpg"],
* prompt: "Remove background and place on clean white studio background",
* background: "transparent",
* n: 2
* });
*
* // Batch processing multiple photos
* const userPhotos = [
* "https://storage.example.com/photo1.jpg",
* "https://storage.example.com/photo2.jpg",
* "https://storage.example.com/photo3.jpg"
* ];
* const { data } = await blink.ai.modifyImage({
* images: userPhotos,
* prompt: "Convert to black and white vintage style photographs",
* quality: "high"
* });
* ```
*
* @returns Promise<ImageGenerationResponse> - Object containing:
* - `data`: Array of modified images with URLs
* - `created`: Timestamp of generation
* - `usage`: Token usage information
*/
modifyImage(options: {
images: string[];
prompt: string;
size?: string;
quality?: "auto" | "low" | "medium" | "high";
n?: number;
background?: "auto" | "transparent" | "opaque";
signal?: AbortSignal;
}): Promise<ImageGenerationResponse>;
/**
* Converts text to speech using AI voice synthesis models.
*
* @param options - Object containing:
* - `text`: Text content to convert to speech (required)
* - `voice`: Voice to use ("alloy", "echo", "fable", "onyx", "nova", "shimmer")
* - `response_format`: Audio format ("mp3", "opus", "aac", "flac", "wav", "pcm")
* - `speed`: Speech speed (0.25 to 4.0, default: 1.0)
* - Plus optional model, signal parameters
*
* @example
* ```ts
* // Basic text-to-speech
* const { url } = await blink.ai.generateSpeech({
* text: "Hello, welcome to our AI-powered application!"
* });
* console.log("Audio URL:", url);
*
* // Custom voice and format
* const { url, voice, format } = await blink.ai.generateSpeech({
* text: "This is a demonstration of our speech synthesis capabilities.",
* voice: "nova",
* response_format: "wav",
* speed: 1.2
* });
* console.log(`Generated ${format} audio with ${voice} voice:`, url);
*
* // Slow, clear speech for accessibility
* const { url } = await blink.ai.generateSpeech({
* text: "Please listen carefully to these important instructions.",
* voice: "echo",
* speed: 0.8
* });
* ```
*
* @returns Promise<SpeechGenerationResponse> - Object containing:
* - `url`: URL to the generated audio file
* - `voice`: Voice used for generation
* - `format`: Audio format
* - `mimeType`: MIME type of the audio
*/
generateSpeech(options: SpeechGenerationRequest): Promise<SpeechGenerationResponse>;
/**
* Transcribes audio content to text using AI speech recognition models.
*
* @param options - Object containing:
* - `audio`: Audio input as URL string, base64 string, or number array buffer (required)
* - `language`: Language code for transcription (e.g., "en", "es", "fr")
* - `response_format`: Output format ("json", "text", "srt", "verbose_json", "vtt")
* - Plus optional model, signal parameters
*
* @example
* ```ts
* // Transcribe from URL
* const { text } = await blink.ai.transcribeAudio({
* audio: "https://example.com/meeting-recording.mp3"
* });
* console.log("Transcription:", text);
*
* // Transcribe with language hint
* const { text, language } = await blink.ai.transcribeAudio({
* audio: "https://example.com/spanish-audio.wav",
* language: "es"
* });
* console.log(`Transcribed ${language}:`, text);
*
* // Transcribe with timestamps (verbose format)
* const result = await blink.ai.transcribeAudio({
* audio: audioFileU