UNPKG

n8n-nodes-piapi

Version:

Community n8n nodes for PiAPI - integrate generative AI capabilities (image, video, audio, 3D) into your workflows

197 lines 7.78 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.LLMTextToImage = void 0; const GenericFunctions_1 = require("../shared/GenericFunctions"); const Constants_1 = require("../shared/Constants"); class LLMTextToImage { constructor() { this.description = { displayName: 'PiAPI GPT-4o Text to Image', name: 'llmTextToImage', icon: 'file:../piapi.svg', group: ['transform'], version: 1, description: 'Generate images using PiAPI GPT-4o Image Generation API', defaults: { name: 'GPT-4o Text to Image', }, inputs: ["main"], outputs: ["main"], credentials: [ { name: 'piAPIApi', required: true, }, ], properties: [ { displayName: 'Model', name: 'model', type: 'options', options: [ { name: 'GPT-4o Image', value: 'gpt-4o-image', }, { name: 'GPT-4o Image Preview (Legacy)', value: 'gpt-4o-image-preview', }, ], default: 'gpt-4o-image', description: 'The model to use for image generation', }, { displayName: 'Prompt', name: 'prompt', type: 'string', typeOptions: { rows: 4, }, default: '', required: true, description: 'Text prompt for image generation', }, { displayName: 'Aspect Ratio', name: 'aspectRatio', type: 'options', options: Constants_1.ASPECT_RATIO_OPTIONS, default: '1024:1024', description: 'Aspect ratio for the generated image', }, { displayName: 'Custom Width', name: 'width', type: 'number', displayOptions: { show: { aspectRatio: ['custom'], }, }, default: 1024, description: 'Custom width of the generated image', }, { displayName: 'Custom Height', name: 'height', type: 'number', displayOptions: { show: { aspectRatio: ['custom'], }, }, default: 1024, description: 'Custom height of the generated image', }, { displayName: 'Image Style', name: 'imageStyle', type: 'options', options: Constants_1.LORA_OPTIONS, default: 'none', description: 'Style to apply to the generated image', }, ], }; } async execute() { const items = this.getInputData(); const returnData = []; for (let i = 0; i < items.length; i++) { const model = this.getNodeParameter('model', i); const prompt = this.getNodeParameter('prompt', i); const aspectRatio = this.getNodeParameter('aspectRatio', i, '1:1'); const body = { model, messages: [ { role: 'user', content: model === 'gpt-4o-image' ? [ { type: 'text', text: prompt, } ] : prompt, }, ], stream: true, }; const imageStyle = this.getNodeParameter('imageStyle', i, 'none'); let aspectRatioText = ''; if (aspectRatio === 'custom') { const width = this.getNodeParameter('width', i, 1024); const height = this.getNodeParameter('height', i, 1024); aspectRatioText = `Image size: ${width}x${height}. `; } else if (aspectRatio !== 'square_header' && aspectRatio !== 'landscape_header' && aspectRatio !== 'portrait_header') { aspectRatioText = `Image size: ${aspectRatio}. `; } let styleText = ''; if (imageStyle !== 'none') { styleText = `Image style: ${imageStyle}. `; } body.messages[0].content = `${aspectRatioText}${styleText}${prompt}`; try { const credentials = await this.getCredentials('piAPIApi'); const options = { method: 'POST', body, url: 'https://api.piapi.ai/v1/chat/completions', headers: { 'Content-Type': 'application/json', 'Authorization': `Bearer ${credentials.apiKey}`, }, json: true, returnFullResponse: true, }; const response = await this.helpers.httpRequestWithAuthentication.call(this, 'piAPIApi', options); const rawStreamResponse = response.body; const processedContent = (0, GenericFunctions_1.processStreamedResponse)(rawStreamResponse); const failed = (0, GenericFunctions_1.isGenerationFailed)(processedContent); const status = failed ? 'failed' : 'completed'; let simplifiedResponse; if (failed) { const { reason, suggestion } = (0, GenericFunctions_1.extractFailureDetails)(processedContent); simplifiedResponse = { prompt, status, error: { reason, suggestion, full_message: processedContent, }, original_response: rawStreamResponse }; } else { const imageUrl = (0, GenericFunctions_1.extractImageUrlFromResponse)(rawStreamResponse); simplifiedResponse = { prompt, status, image_url: imageUrl || null, processed_content: processedContent, original_response: rawStreamResponse }; } returnData.push({ json: simplifiedResponse, }); } catch (error) { if (this.continueOnFail()) { returnData.push({ json: { error: error.message, }, }); continue; } throw error; } } return [returnData]; } } exports.LLMTextToImage = LLMTextToImage; //# sourceMappingURL=LLMTextToImage.node.js.map