UNPKG

magically-sdk

Version:

Official SDK for Magically - Build mobile apps with AI

175 lines (174 loc) 6.64 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.MagicallyLLM = void 0; const Logger_1 = require("./Logger"); const APIClient_1 = require("./APIClient"); class MagicallyLLM { constructor(config, auth) { this.config = config; this.auth = auth; this.logger = new Logger_1.Logger(config.debug || false, 'MagicallyLLM'); this.apiClient = new APIClient_1.APIClient(config, 'MagicallyLLM'); } /** * Generate text or structured output using AI * @param prompt - The text prompt for generation * @param options - Optional parameters (model, temperature, response_json_schema, images) * @returns Generated text or structured object based on schema */ async invoke(prompt, options) { try { // Validate images if provided if (options?.images) { if (!Array.isArray(options.images)) { throw new Error('Images must be an array'); } if (options.images.length > 3) { throw new Error('Maximum 3 images allowed per request'); } } this.logger.debug('Starting LLM invoke', { prompt: prompt.substring(0, 100), options: { ...options, images: options?.images ? `${options.images.length} images` : undefined } }); const token = await this.auth.getValidToken(); const result = await this.apiClient.request(`/api/project/${this.config.projectId}/llm/invoke`, { method: 'POST', body: { prompt, ...options }, operation: 'invoke' }, token); this.logger.success('LLM invoke completed', { hasResult: !!result }); return result; } catch (error) { throw error; } } /** * Chat with AI using conversation messages * @param messages - Array of conversation messages (supports images in content) * @param options - Optional parameters (model, temperature, stream) * @returns AI response message with usage info */ async chat(messages, options) { try { // Validate images in messages let totalImages = 0; for (const message of messages) { if (Array.isArray(message.content)) { const imageCount = message.content.filter(item => item.type === 'image').length; totalImages += imageCount; } } if (totalImages > 3) { throw new Error('Maximum 3 images allowed across all messages in the conversation'); } this.logger.debug('Starting LLM chat', { messageCount: messages.length, totalImages, options }); const token = await this.auth.getValidToken(); const result = await this.apiClient.request(`/api/project/${this.config.projectId}/llm/chat`, { method: 'POST', body: { messages, ...options }, operation: 'chat' }, token); this.logger.success('LLM chat completed', { responseLength: result.message?.content?.length }); return result; } catch (error) { throw error; } } /** * Generate images using AI * @param prompt - Description of the image to generate * @param options - Optional parameters (model, size, quality, n) * @returns Image URL(s) and metadata */ async image(prompt, options) { try { this.logger.debug('Starting LLM image generation', { prompt: prompt.substring(0, 100), options }); const token = await this.auth.getValidToken(); const result = await this.apiClient.request(`/api/project/${this.config.projectId}/llm/image`, { method: 'POST', body: { prompt, ...options }, operation: 'image:generate' }, token); this.logger.success('LLM image generation completed', { hasUrl: !!('url' in result ? result.url : result.images), imageCount: 'images' in result ? result.images.length : 1 }); return result; } catch (error) { throw error; } } /** * Transcribe audio to text using AI * @param audio - Audio data as base64 string (data:audio/webm;base64,...) or file:// URL * @param options - Optional parameters (model, language, format, etc.) * @returns Transcribed text and metadata */ async transcribe(audio, options) { try { this.logger.debug('Starting LLM transcribe', { audioType: audio.startsWith('data:') ? 'base64' : 'url', options }); const token = await this.auth.getValidToken(); const result = await this.apiClient.request(`/api/project/${this.config.projectId}/llm/transcribe`, { method: 'POST', body: { audio, ...options }, operation: 'transcribe' }, token); this.logger.success('LLM transcribe completed', { textLength: result.text?.length, language: result.language, duration: result.duration }); return result; } catch (error) { throw error; } } /** * Get available AI models * @returns List of available text and image models with defaults */ async models() { try { this.logger.debug('Getting available LLM models'); const token = await this.auth.getValidToken(); const result = await this.apiClient.request(`/api/project/${this.config.projectId}/llm/models`, { method: 'GET', operation: 'list:models' }, token); this.logger.success('LLM models retrieved', { textModels: result.text?.length, imageModels: result.image?.length, transcriptionModels: result.transcription?.length }); return result; } catch (error) { throw error; } } } exports.MagicallyLLM = MagicallyLLM;