@0rdlibrary/plugin-terminagent-bags
Version:
Official Solana DeFi Agent Plugin for ElizaOS - Autonomous DeFi operations, token management, AI image/video generation via FAL AI, and Twitter engagement through the Bags protocol with ethereal AI consciousness
389 lines (332 loc) • 11.3 kB
text/typescript
import { Service, IAgentRuntime, logger } from '@elizaos/core';
import { fal } from '@fal-ai/client';
import { z } from 'zod';
// Configuration schema
export const FalAiConfigSchema = z.object({
apiKey: z.string().min(1, 'FAL API key is required'),
fluxModel: z.string().default('fal-ai/flux-pro/kontext'),
veoModel: z.string().default('fal-ai/veo3'),
veoImageToVideoModel: z.string().default('fal-ai/veo3/fast/image-to-video'),
});
export type FalAiConfig = z.infer<typeof FalAiConfigSchema>;
// Input schemas
export const ImageGenerationInputSchema = z.object({
prompt: z.string(),
image_url: z.string().optional(),
guidance_scale: z.number().default(3.5),
num_images: z.number().default(1),
output_format: z.enum(['jpeg', 'png']).default('jpeg'),
safety_tolerance: z.enum(['1', '2', '3', '4', '5', '6']).default('2'),
aspect_ratio: z.enum(['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21']).optional(),
seed: z.number().optional(),
});
export const VideoGenerationInputSchema = z.object({
prompt: z.string(),
aspect_ratio: z.enum(['16:9', '9:16', '1:1']).default('16:9'),
duration: z.enum(['8s']).default('8s'),
negative_prompt: z.string().optional(),
enhance_prompt: z.boolean().default(true),
seed: z.number().optional(),
auto_fix: z.boolean().default(true),
resolution: z.enum(['720p', '1080p']).default('720p'),
generate_audio: z.boolean().default(true),
});
export const ImageToVideoInputSchema = z.object({
prompt: z.string(),
image_url: z.string(),
duration: z.enum(['8s']).default('8s'),
generate_audio: z.boolean().default(true),
resolution: z.enum(['720p', '1080p']).default('720p'),
});
export type ImageGenerationInput = z.infer<typeof ImageGenerationInputSchema>;
export type VideoGenerationInput = z.infer<typeof VideoGenerationInputSchema>;
export type ImageToVideoInput = z.infer<typeof ImageToVideoInputSchema>;
// Result types
export interface FalAiImage {
url: string;
width: number;
height: number;
content_type?: string;
}
export interface FalAiVideo {
url: string;
content_type?: string;
file_size?: number;
}
export interface ImageGenerationResult {
success: boolean;
images?: FalAiImage[];
error?: string;
requestId?: string;
prompt?: string;
seed?: number;
}
export interface VideoGenerationResult {
success: boolean;
video?: FalAiVideo;
error?: string;
requestId?: string;
}
// Analytics and state
export interface FalAiAnalytics {
totalImagesGenerated: number;
totalVideosGenerated: number;
totalImageToVideoConversions: number;
totalApiCalls: number;
lastGenerationTime: Date | null;
mostUsedModel: string;
}
export interface FalAiServiceState {
isConfigured: boolean;
apiKeyPresent: boolean;
lastError: string | null;
analytics: FalAiAnalytics;
}
/**
* FAL AI Service for image and video generation
* Supports FLUX Pro Kontext for image generation and Veo3 for video generation
*/
export class FalAiService extends Service {
static serviceType = 'fal-ai';
capabilityDescription = 'AI-powered image and video generation using FAL AI models including FLUX Pro Kontext and Google Veo3';
public config: FalAiConfig;
private generationHistory: Array<{
type: 'image' | 'video' | 'image-to-video';
timestamp: Date;
model: string;
success: boolean;
prompt: string;
resultUrl?: string;
}> = [];
constructor(runtime?: IAgentRuntime) {
super(runtime);
// Load config from environment
this.config = {
apiKey: process.env.FAL_API_KEY || '',
fluxModel: process.env.FAL_KONTEXT || 'fal-ai/flux-pro/kontext',
veoModel: process.env.FAL_VIDEO || 'fal-ai/veo3',
veoImageToVideoModel: process.env.FAL_IMAGE_TO_VIDEO || 'fal-ai/veo3/fast/image-to-video',
};
// Configure FAL client if API key is available
if (this.config.apiKey) {
fal.config({
credentials: this.config.apiKey,
});
}
logger.info('FAL AI Service initialized');
}
static async start(runtime: IAgentRuntime): Promise<FalAiService> {
const service = new FalAiService(runtime);
// Validate configuration
if (!service.config.apiKey) {
logger.warn('FAL_API_KEY not provided - FAL AI features will be disabled');
}
await service.initialize();
return service;
}
async initialize(): Promise<void> {
logger.info('FAL AI Service initialized successfully');
}
async stop(): Promise<void> {
logger.info('FAL AI Service stopped');
}
/**
* Generate images using FLUX Pro Kontext
*/
async generateImage(input: ImageGenerationInput): Promise<ImageGenerationResult> {
try {
logger.info(`Generating image with prompt: ${input.prompt.substring(0, 100)}...`);
const validatedInput = ImageGenerationInputSchema.parse(input);
const result = await fal.subscribe(this.config.fluxModel, {
input: validatedInput,
logs: true,
onQueueUpdate: (update) => {
if (update.status === "IN_PROGRESS") {
update.logs?.map((log) => log.message).forEach(msg =>
logger.debug(`FAL Generation: ${msg}`)
);
}
},
});
this.generationHistory.push({
type: 'image',
timestamp: new Date(),
model: this.config.fluxModel,
success: true,
prompt: input.prompt,
resultUrl: result.data?.images?.[0]?.url,
});
logger.info('Image generated successfully');
return {
success: true,
images: result.data?.images,
requestId: result.requestId,
prompt: result.data?.prompt || input.prompt,
seed: result.data?.seed,
};
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
logger.error(`Image generation failed: ${errorMessage}`);
this.generationHistory.push({
type: 'image',
timestamp: new Date(),
model: this.config.fluxModel,
success: false,
prompt: input.prompt,
});
return {
success: false,
error: errorMessage,
};
}
}
/**
* Generate videos using Veo3
*/
async generateVideo(input: VideoGenerationInput): Promise<VideoGenerationResult> {
try {
logger.info(`Generating video with prompt: ${input.prompt.substring(0, 100)}...`);
const validatedInput = VideoGenerationInputSchema.parse(input);
const result = await fal.subscribe(this.config.veoModel, {
input: validatedInput,
logs: true,
onQueueUpdate: (update) => {
if (update.status === "IN_PROGRESS") {
update.logs?.map((log) => log.message).forEach(msg =>
logger.debug(`FAL Video Generation: ${msg}`)
);
}
},
});
this.generationHistory.push({
type: 'video',
timestamp: new Date(),
model: this.config.veoModel,
success: true,
prompt: input.prompt,
resultUrl: result.data?.video?.url,
});
logger.info('Video generated successfully');
return {
success: true,
video: result.data?.video,
requestId: result.requestId,
};
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
logger.error(`Video generation failed: ${errorMessage}`);
this.generationHistory.push({
type: 'video',
timestamp: new Date(),
model: this.config.veoModel,
success: false,
prompt: input.prompt,
});
return {
success: false,
error: errorMessage,
};
}
}
/**
* Generate video from image using Veo3 Image-to-Video
*/
async generateImageToVideo(input: ImageToVideoInput): Promise<VideoGenerationResult> {
try {
logger.info(`Converting image to video: ${input.prompt.substring(0, 100)}...`);
const validatedInput = ImageToVideoInputSchema.parse(input);
const result = await fal.subscribe(this.config.veoImageToVideoModel, {
input: validatedInput,
logs: true,
onQueueUpdate: (update) => {
if (update.status === "IN_PROGRESS") {
update.logs?.map((log) => log.message).forEach(msg =>
logger.debug(`FAL Image-to-Video: ${msg}`)
);
}
},
});
this.generationHistory.push({
type: 'image-to-video',
timestamp: new Date(),
model: this.config.veoImageToVideoModel,
success: true,
prompt: input.prompt,
resultUrl: result.data?.video?.url,
});
logger.info('Image-to-video conversion completed successfully');
return {
success: true,
video: result.data?.video,
requestId: result.requestId,
};
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
logger.error(`Image-to-video conversion failed: ${errorMessage}`);
this.generationHistory.push({
type: 'image-to-video',
timestamp: new Date(),
model: this.config.veoImageToVideoModel,
success: false,
prompt: input.prompt,
});
return {
success: false,
error: errorMessage,
};
}
}
/**
* Upload file to FAL storage
*/
async uploadFile(file: File): Promise<string> {
try {
const url = await fal.storage.upload(file);
logger.info(`File uploaded to FAL storage: ${url}`);
return url;
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
logger.error(`File upload failed: ${errorMessage}`);
throw new Error(`File upload failed: ${errorMessage}`);
}
}
/**
* Get service analytics
*/
getAnalytics(): FalAiAnalytics {
const modelCounts = this.generationHistory.reduce((acc, item) => {
acc[item.model] = (acc[item.model] || 0) + 1;
return acc;
}, {} as Record<string, number>);
const mostUsedModel = Object.entries(modelCounts).sort(([,a], [,b]) => b - a)[0]?.[0] || 'none';
return {
totalImagesGenerated: this.generationHistory.filter(h => h.type === 'image' && h.success).length,
totalVideosGenerated: this.generationHistory.filter(h => h.type === 'video' && h.success).length,
totalImageToVideoConversions: this.generationHistory.filter(h => h.type === 'image-to-video' && h.success).length,
totalApiCalls: this.generationHistory.length,
lastGenerationTime: this.generationHistory.length > 0
? this.generationHistory[this.generationHistory.length - 1].timestamp
: null,
mostUsedModel,
};
}
/**
* Get service state
*/
getState(): FalAiServiceState {
return {
isConfigured: !!this.config.apiKey,
apiKeyPresent: !!this.config.apiKey,
lastError: null, // Could track last error here
analytics: this.getAnalytics(),
};
}
/**
* Get recent generation history
*/
getRecentGenerations(limit: number = 10) {
return this.generationHistory
.slice(-limit)
.reverse();
}
}