magically-sdk
Version:
Official SDK for Magically - Build mobile apps with AI
237 lines (213 loc) • 6.7 kB
text/typescript
import { MagicallyAuth } from './MagicallyAuth';
import {
SDKConfig,
LLMMessage,
InvokeOptions,
ChatOptions,
ImageOptions,
TranscribeOptions,
InvokeTextResponse,
ChatResponse,
SingleImageResponse,
MultipleImageResponse,
ModelsResponse,
TranscribeResponse
} from './types';
import { Logger } from './Logger';
import { APIClient } from './APIClient';
import { getAuthToken } from './utils';
export class MagicallyLLM {
private logger: Logger;
private apiClient: APIClient;
constructor(private config: SDKConfig, private auth: MagicallyAuth) {
this.logger = new Logger(config.debug || false, 'MagicallyLLM');
this.apiClient = new APIClient(config, 'MagicallyLLM');
}
/**
* Generate text or structured output using AI
* @param prompt - The text prompt for generation
* @param options - Optional parameters (model, temperature, response_json_schema, images)
* @returns Generated text or structured object based on schema
*/
async invoke<T = InvokeTextResponse>(
prompt: string,
options?: InvokeOptions
): Promise<T> {
try {
// Validate images if provided
if (options?.images) {
if (!Array.isArray(options.images)) {
throw new Error('Images must be an array');
}
if (options.images.length > 3) {
throw new Error('Maximum 3 images allowed per request');
}
}
this.logger.debug('Starting LLM invoke', {
prompt: prompt.substring(0, 100),
options: { ...options, images: options?.images ? `${options.images.length} images` : undefined }
});
const token = await getAuthToken(this.apiClient, this.auth);
const result = await this.apiClient.request<T>(
`/api/project/${this.config.projectId}/llm/invoke`,
{
method: 'POST',
body: {
prompt,
...options
},
operation: 'invoke'
},
token
);
this.logger.success('LLM invoke completed', { hasResult: !!result });
return result;
} catch (error) {
throw error;
}
}
/**
* Chat with AI using conversation messages
* @param messages - Array of conversation messages (supports images in content)
* @param options - Optional parameters (model, temperature, stream)
* @returns AI response message with usage info
*/
async chat(
messages: LLMMessage[],
options?: ChatOptions
): Promise<ChatResponse> {
try {
// Validate images in messages
let totalImages = 0;
for (const message of messages) {
if (Array.isArray(message.content)) {
const imageCount = message.content.filter(item => item.type === 'image').length;
totalImages += imageCount;
}
}
if (totalImages > 3) {
throw new Error('Maximum 3 images allowed across all messages in the conversation');
}
this.logger.debug('Starting LLM chat', {
messageCount: messages.length,
totalImages,
options
});
const token = await getAuthToken(this.apiClient, this.auth);
const result = await this.apiClient.request<ChatResponse>(
`/api/project/${this.config.projectId}/llm/chat`,
{
method: 'POST',
body: {
messages,
...options
},
operation: 'chat'
},
token
);
this.logger.success('LLM chat completed', { responseLength: result.message?.content?.length });
return result;
} catch (error) {
throw error;
}
}
/**
* Generate images using AI
* @param prompt - Description of the image to generate
* @param options - Optional parameters (model, size, quality, n)
* @returns Image URL(s) and metadata
*/
async image(
prompt: string,
options?: ImageOptions
): Promise<SingleImageResponse | MultipleImageResponse> {
try {
this.logger.debug('Starting LLM image generation', { prompt: prompt.substring(0, 100), options });
const token = await getAuthToken(this.apiClient, this.auth);
const result = await this.apiClient.request<SingleImageResponse | MultipleImageResponse>(
`/api/project/${this.config.projectId}/llm/image`,
{
method: 'POST',
body: {
prompt,
...options
},
operation: 'image:generate'
},
token
);
this.logger.success('LLM image generation completed', {
hasUrl: !!('url' in result ? result.url : result.images),
imageCount: 'images' in result ? result.images.length : 1
});
return result;
} catch (error) {
throw error;
}
}
/**
* Transcribe audio to text using AI
* @param audio - Audio data as base64 string (data:audio/webm;base64,...) or file:// URL
* @param options - Optional parameters (model, language, format, etc.)
* @returns Transcribed text and metadata
*/
async transcribe(
audio: string,
options?: TranscribeOptions
): Promise<TranscribeResponse> {
try {
this.logger.debug('Starting LLM transcribe', {
audioType: audio.startsWith('data:') ? 'base64' : 'url',
options
});
const token = await getAuthToken(this.apiClient, this.auth);
const result = await this.apiClient.request<TranscribeResponse>(
`/api/project/${this.config.projectId}/llm/transcribe`,
{
method: 'POST',
body: {
audio,
...options
},
operation: 'transcribe'
},
token
);
this.logger.success('LLM transcribe completed', {
textLength: result.text?.length,
language: result.language,
duration: result.duration
});
return result;
} catch (error) {
throw error;
}
}
/**
* Get available AI models
* @returns List of available text and image models with defaults
*/
async models(): Promise<ModelsResponse> {
try {
this.logger.debug('Getting available LLM models');
const token = await getAuthToken(this.apiClient, this.auth);
const result = await this.apiClient.request<ModelsResponse>(
`/api/project/${this.config.projectId}/llm/models`,
{
method: 'GET',
operation: 'list:models'
},
token
);
this.logger.success('LLM models retrieved', {
textModels: result.text?.length,
imageModels: result.image?.length,
transcriptionModels: result.transcription?.length
});
return result;
} catch (error) {
throw error;
}
}
}