mobile-ai-toolkit
Version: 
On-device AI for React Native. Zero cloud costs. One unified API.
454 lines • 16.5 kB
JavaScript
;
/**
 * @openslm/mobile-ai-toolkit
 * Mobile-first AI toolkit with on-device capabilities for React Native 0.76+
 *
 * Features:
 * - On-device AI processing (iOS/Android)
 * - Cloud AI fallback
 * - Turbo Module performance
 * - Privacy-first architecture
 */
var __importDefault = (this && this.__importDefault) || function (mod) {
    return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.AI = void 0;
const async_storage_1 = __importDefault(require("@react-native-async-storage/async-storage"));
const react_native_1 = require("react-native");
// Import native module
const NativeAIToolkitSpec_1 = __importDefault(require("./specs/NativeAIToolkitSpec"));
/**
 * Main AI class - Hybrid on-device + cloud processing
 */
class AI {
    /**
     * Configure the AI toolkit
     */
    static configure(config) {
        AI.config = { ...AI.config, ...config };
    }
    /**
     * Initialize the toolkit (call once at app start)
     */
    static async initialize() {
        if (AI.initialized)
            return;
        try {
            // Get device capabilities
            if (NativeAIToolkitSpec_1.default) {
                AI.deviceCapabilities = await NativeAIToolkitSpec_1.default.getDeviceCapabilities();
                // Set private mode
                if (AI.config.enablePrivateMode) {
                    NativeAIToolkitSpec_1.default.enablePrivateMode(true);
                }
                // Preload models
                if (AI.config.preloadModels && AI.config.preloadModels.length > 0) {
                    await NativeAIToolkitSpec_1.default.preloadModels(AI.config.preloadModels);
                }
            }
            AI.initialized = true;
        }
        catch (error) {
            console.warn('Failed to initialize native AI capabilities:', error);
            // Continue without native features
            AI.initialized = true;
        }
    }
    /**
     * Check if on-device AI is available
     */
    static isOnDeviceAvailable() {
        return AI.deviceCapabilities !== null && NativeAIToolkitSpec_1.default !== null;
    }
    /**
     * Get device AI capabilities
     */
    static getDeviceCapabilities() {
        return AI.deviceCapabilities;
    }
    /**
     * Chat with AI - Smart routing (on-device or cloud)
     */
    static async chat(message, options) {
        await AI.initialize();
        const cacheKey = `chat:${AI.hashString(message + JSON.stringify(options))}`;
        // Check cache first
        if (AI.config.cacheEnabled) {
            const cached = await AI.getFromCache(cacheKey);
            if (cached) {
                return { ...cached, fromCache: true };
            }
        }
        // Determine processing method
        const useOnDevice = AI.shouldUseOnDevice(options?.forceCloud);
        try {
            let result;
            if (useOnDevice && AI.canProcessOnDevice('chat')) {
                // Use Apple Intelligence or similar on-device chat
                const enhancedText = await NativeAIToolkitSpec_1.default.enhanceText(message, 'professional');
                result = {
                    message: enhancedText,
                    fromDevice: true,
                    model: 'on-device',
                };
            }
            else {
                // Fall back to cloud processing
                result = await AI.processInCloud('chat', { message, ...options });
            }
            // Cache the result
            if (AI.config.cacheEnabled) {
                await AI.saveToCache(cacheKey, result);
            }
            await AI.updateUsageStats(result.fromDevice ? 'device' : 'cloud');
            return result;
        }
        catch (error) {
            throw new Error(`Chat failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
        }
    }
    /**
     * Analyze text - On-device when possible
     */
    static async analyze(text, options) {
        await AI.initialize();
        if (!text || text.trim().length === 0) {
            throw new Error('Text is required for analysis');
        }
        const cacheKey = `analyze:${AI.hashString(text + JSON.stringify(options))}`;
        // Check cache first
        if (AI.config.cacheEnabled) {
            const cached = await AI.getFromCache(cacheKey);
            if (cached) {
                return { ...cached, fromCache: true };
            }
        }
        const useOnDevice = AI.shouldUseOnDevice(options?.forceCloud);
        try {
            let result;
            if (useOnDevice && AI.canProcessOnDevice('text')) {
                // Use on-device text analysis
                const analysis = await NativeAIToolkitSpec_1.default.analyzeText(text, {
                    includeSentiment: options?.includeSentiment !== false,
                    includeEntities: options?.includeEntities !== false,
                    includeSummary: options?.includeSummary || false,
                    language: options?.language || 'auto',
                });
                result = {
                    sentiment: analysis.sentiment,
                    entities: analysis.entities,
                    language: analysis.language,
                    summary: analysis.summary,
                    fromDevice: true,
                };
            }
            else {
                // Fall back to cloud processing
                result = await AI.processInCloud('analyze', { text, ...options });
            }
            // Cache the result
            if (AI.config.cacheEnabled) {
                await AI.saveToCache(cacheKey, result);
            }
            await AI.updateUsageStats(result.fromDevice ? 'device' : 'cloud');
            return result;
        }
        catch (error) {
            throw new Error(`Analysis failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
        }
    }
    /**
     * Understand images - On-device Vision processing
     */
    static async understand(imageBase64, options) {
        await AI.initialize();
        if (!imageBase64) {
            throw new Error('Image data is required');
        }
        const cacheKey = `vision:${AI.hashString(imageBase64.substring(0, 100))}`;
        // Check cache first
        if (AI.config.cacheEnabled) {
            const cached = await AI.getFromCache(cacheKey);
            if (cached) {
                return { ...cached, fromCache: true };
            }
        }
        const useOnDevice = AI.shouldUseOnDevice(options?.forceCloud);
        try {
            let result;
            if (useOnDevice && AI.canProcessOnDevice('vision')) {
                // Use on-device Vision framework
                const analysis = await NativeAIToolkitSpec_1.default.analyzeImage(imageBase64, {
                    detectObjects: options?.detectObjects !== false,
                    extractText: options?.extractText !== false,
                    detectFaces: options?.detectFaces !== false,
                });
                result = {
                    objects: analysis.objects,
                    text: analysis.text,
                    faces: analysis.faces,
                    fromDevice: true,
                };
            }
            else {
                // Fall back to cloud processing
                result = await AI.processInCloud('vision', { image: imageBase64, ...options });
            }
            // Cache the result
            if (AI.config.cacheEnabled) {
                await AI.saveToCache(cacheKey, result);
            }
            await AI.updateUsageStats(result.fromDevice ? 'device' : 'cloud');
            return result;
        }
        catch (error) {
            throw new Error(`Vision analysis failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
        }
    }
    /**
     * Transcribe voice - On-device Speech processing
     */
    static async transcribe(audioBase64, options) {
        await AI.initialize();
        if (!audioBase64) {
            throw new Error('Audio data is required');
        }
        const useOnDevice = AI.shouldUseOnDevice(options?.forceCloud);
        try {
            let result;
            if (useOnDevice && AI.canProcessOnDevice('voice')) {
                // Use on-device speech recognition
                const analysis = await NativeAIToolkitSpec_1.default.transcribeAudio(audioBase64, {
                    language: options?.language || 'auto',
                });
                result = {
                    transcript: analysis.transcript,
                    confidence: analysis.confidence,
                    language: analysis.language,
                    words: analysis.words,
                    fromDevice: true,
                };
            }
            else {
                // Fall back to cloud processing
                result = await AI.processInCloud('transcribe', { audio: audioBase64, ...options });
            }
            await AI.updateUsageStats(result.fromDevice ? 'device' : 'cloud');
            return result;
        }
        catch (error) {
            throw new Error(`Transcription failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
        }
    }
    /**
     * Generate smart replies - Uses on-device intelligence
     */
    static async smartReply(message, context) {
        await AI.initialize();
        if (AI.isOnDeviceAvailable()) {
            try {
                const replies = await NativeAIToolkitSpec_1.default.generateSmartReplies(message, context);
                return replies;
            }
            catch (error) {
                console.warn('On-device smart reply failed, using fallback:', error);
            }
        }
        // Fallback replies
        return ['Thanks!', 'Got it', 'Let me check'];
    }
    /**
     * Writing Tools integration (iOS 18.1+)
     */
    static async enhanceText(text, style) {
        await AI.initialize();
        if (AI.isOnDeviceAvailable()) {
            try {
                return await NativeAIToolkitSpec_1.default.enhanceText(text, style);
            }
            catch (error) {
                console.warn('Text enhancement failed:', error);
            }
        }
        // Fallback - return original text
        return text;
    }
    /**
     * Proofread text using on-device capabilities
     */
    static async proofread(text) {
        await AI.initialize();
        if (AI.isOnDeviceAvailable()) {
            try {
                return await NativeAIToolkitSpec_1.default.proofreadText(text);
            }
            catch (error) {
                console.warn('Proofreading failed:', error);
            }
        }
        // Fallback
        return {
            correctedText: text,
            corrections: [],
        };
    }
    /**
     * Get usage statistics
     */
    static async getUsage() {
        try {
            const stats = await async_storage_1.default.getItem('openslm_usage_stats');
            return stats
                ? JSON.parse(stats)
                : {
                    totalRequests: 0,
                    onDeviceRequests: 0,
                    cloudRequests: 0,
                    cacheHits: 0,
                    costs: 0,
                    lastUsed: new Date().toISOString(),
                };
        }
        catch {
            return {
                totalRequests: 0,
                onDeviceRequests: 0,
                cloudRequests: 0,
                cacheHits: 0,
                costs: 0,
                lastUsed: new Date().toISOString(),
            };
        }
    }
    // Private helper methods
    static shouldUseOnDevice(forceCloud) {
        if (forceCloud)
            return false;
        if (!AI.config.preferOnDevice)
            return false;
        return AI.isOnDeviceAvailable();
    }
    static canProcessOnDevice(type) {
        if (!AI.deviceCapabilities)
            return false;
        switch (type) {
            case 'text':
                return react_native_1.Platform.OS === 'ios' || AI.deviceCapabilities.hasMLKit;
            case 'vision':
                return AI.deviceCapabilities.hasCoreML || AI.deviceCapabilities.hasMLKit;
            case 'voice':
                return react_native_1.Platform.OS === 'ios' || react_native_1.Platform.OS === 'android';
            case 'chat':
                return AI.deviceCapabilities.hasAppleIntelligence || AI.deviceCapabilities.hasGeminiNano;
            default:
                return false;
        }
    }
    static async processInCloud(endpoint, payload) {
        // This would be the same cloud processing as before
        const url = `${AI.config.proxyURL}/${endpoint}`;
        for (let attempt = 0; attempt <= (AI.config.maxRetries || 2); attempt++) {
            try {
                const response = await fetch(url, {
                    method: 'POST',
                    headers: {
                        'Content-Type': 'application/json',
                        Authorization: AI.config.apiKey ? `Bearer ${AI.config.apiKey}` : '',
                        'User-Agent': `@openslm/mobile-ai-toolkit/1.0.0 (${react_native_1.Platform.OS})`,
                    },
                    body: JSON.stringify({ ...payload, mobile: true, platform: react_native_1.Platform.OS }),
                    // @ts-expect-error
                    timeout: AI.config.timeout,
                });
                if (!response.ok) {
                    const error = await response.text();
                    throw new Error(`HTTP ${response.status}: ${error}`);
                }
                const result = await response.json();
                return { ...result, fromDevice: false };
            }
            catch (error) {
                if (attempt === (AI.config.maxRetries || 2)) {
                    throw error;
                }
                await new Promise((resolve) => setTimeout(resolve, 1000 * (attempt + 1)));
            }
        }
    }
    static async getFromCache(key) {
        try {
            const cached = await async_storage_1.default.getItem(`openslm_cache_${key}`);
            if (!cached)
                return null;
            const data = JSON.parse(cached);
            if (Date.now() - data.timestamp > (AI.config.cacheTTL || 3600000)) {
                await async_storage_1.default.removeItem(`openslm_cache_${key}`);
                return null;
            }
            // Update cache hit stats
            const stats = await AI.getUsage();
            stats.cacheHits += 1;
            await async_storage_1.default.setItem('openslm_usage_stats', JSON.stringify(stats));
            return data.result;
        }
        catch {
            return null;
        }
    }
    static async saveToCache(key, result) {
        try {
            const data = {
                result,
                timestamp: Date.now(),
            };
            await async_storage_1.default.setItem(`openslm_cache_${key}`, JSON.stringify(data));
        }
        catch {
            // Silently fail on cache save errors
        }
    }
    static async updateUsageStats(type) {
        try {
            const stats = await AI.getUsage();
            stats.totalRequests += 1;
            if (type === 'device') {
                stats.onDeviceRequests += 1;
            }
            else {
                stats.cloudRequests += 1;
                stats.costs += 0.001; // Rough estimate
            }
            stats.lastUsed = new Date().toISOString();
            await async_storage_1.default.setItem('openslm_usage_stats', JSON.stringify(stats));
        }
        catch {
            // Silently fail on stats update errors
        }
    }
    static hashString(str) {
        let hash = 0;
        for (let i = 0; i < str.length; i++) {
            const char = str.charCodeAt(i);
            hash = (hash << 5) - hash + char;
            hash = hash & hash;
        }
        return Math.abs(hash).toString(36);
    }
}
exports.AI = AI;
AI.config = {
    proxyURL: 'https://api.openslm.ai',
    timeout: 15000,
    preferOnDevice: true,
    enablePrivateMode: false,
    cacheEnabled: true,
    cacheTTL: 3600000, // 1 hour
    maxRetries: 2,
    preloadModels: ['text', 'vision'],
};
AI.deviceCapabilities = null;
AI.initialized = false;
// Default export
exports.default = AI;
//# sourceMappingURL=index.js.map