n8n-nodes-piapi
Version:
Community n8n nodes for PiAPI - integrate generative AI capabilities (image, video, audio, 3D) into your workflows
311 lines • 12.4 kB
JavaScript
Object.defineProperty(exports, "__esModule", { value: true });
exports.LLMImageToImage = void 0;
const GenericFunctions_1 = require("../shared/GenericFunctions");
const Constants_1 = require("../shared/Constants");
function generatePromptWithAspectRatio(executeFunctions, itemIndex, prompt) {
const aspectRatio = executeFunctions.getNodeParameter('aspectRatio', itemIndex);
const imageStyle = executeFunctions.getNodeParameter('imageStyle', itemIndex, 'none');
let aspectRatioText = '';
if (aspectRatio === 'custom') {
const width = executeFunctions.getNodeParameter('width', itemIndex, 1024);
const height = executeFunctions.getNodeParameter('height', itemIndex, 1024);
aspectRatioText = `Image size: ${width}x${height}. `;
}
else if (aspectRatio !== 'square_header' && aspectRatio !== 'landscape_header' && aspectRatio !== 'portrait_header') {
aspectRatioText = `Image size: ${aspectRatio}. `;
}
let styleText = '';
if (imageStyle !== 'none') {
styleText = `Image style: ${imageStyle}. `;
}
return `${aspectRatioText}${styleText}${prompt}`;
}
class LLMImageToImage {
constructor() {
this.description = {
displayName: 'PiAPI GPT-4o Image to Image',
name: 'llmImageToImage',
icon: 'file:../piapi.svg',
group: ['transform'],
version: 1,
description: 'Transform images using PiAPI GPT-4o Image Generation API',
defaults: {
name: 'GPT-4o Image to Image',
},
inputs: ["main"],
outputs: ["main"],
credentials: [
{
name: 'piAPIApi',
required: true,
},
],
properties: [
{
displayName: 'Model',
name: 'model',
type: 'options',
options: [
{
name: 'GPT-4o Image',
value: 'gpt-4o-image',
},
{
name: 'GPT-4o Image Preview (Legacy)',
value: 'gpt-4o-image-preview',
},
],
default: 'gpt-4o-image',
description: 'The model to use for image transformation',
},
{
displayName: 'Image Source',
name: 'imageSource',
type: 'options',
options: [
{
name: 'URL',
value: 'url',
},
{
name: 'Binary Data',
value: 'binaryData',
},
],
default: 'url',
description: 'The source of the input image',
},
{
displayName: 'Multiple Images Input',
name: 'multipleImages',
type: 'boolean',
default: false,
description: 'Whether to use multiple images as input (only supported by GPT-4o Image)',
displayOptions: {
show: {
imageSource: ['url'],
model: ['gpt-4o-image'],
},
},
},
{
displayName: 'Additional Image URLs',
name: 'additionalImageUrls',
type: 'string',
typeOptions: {
multipleValues: true,
},
default: [],
description: 'Additional image URLs to include in the request',
displayOptions: {
show: {
imageSource: ['url'],
multipleImages: [true],
model: ['gpt-4o-image'],
},
},
},
{
displayName: 'Image URL',
name: 'imageUrl',
type: 'string',
default: '',
required: true,
displayOptions: {
show: {
imageSource: ['url'],
},
},
description: 'URL of the image to transform',
},
{
displayName: 'Binary Property',
name: 'binaryPropertyName',
type: 'string',
default: 'data',
required: true,
displayOptions: {
show: {
imageSource: ['binaryData'],
},
},
description: 'Name of the binary property containing the image data',
},
{
displayName: 'Prompt',
name: 'prompt',
type: 'string',
typeOptions: {
rows: 4,
},
default: '',
required: true,
description: 'Text prompt for image transformation',
},
{
displayName: 'Aspect Ratio',
name: 'aspectRatio',
type: 'options',
options: Constants_1.ASPECT_RATIO_OPTIONS,
default: '1024:1024',
description: 'Aspect ratio for the output image',
},
{
displayName: 'Custom Width',
name: 'width',
type: 'number',
displayOptions: {
show: {
aspectRatio: ['custom'],
},
},
default: 1024,
description: 'Custom width of the output image',
},
{
displayName: 'Custom Height',
name: 'height',
type: 'number',
displayOptions: {
show: {
aspectRatio: ['custom'],
},
},
default: 1024,
description: 'Custom height of the output image',
},
{
displayName: 'Image Style',
name: 'imageStyle',
type: 'options',
options: Constants_1.LORA_OPTIONS,
default: 'none',
description: 'Style to apply to the generated image',
},
],
};
}
async execute() {
const items = this.getInputData();
const returnData = [];
for (let i = 0; i < items.length; i++) {
const model = this.getNodeParameter('model', i);
const prompt = this.getNodeParameter('prompt', i);
const imageSource = this.getNodeParameter('imageSource', i);
let imageUrls = [];
if (imageSource === 'url') {
const mainImageUrl = this.getNodeParameter('imageUrl', i);
imageUrls.push(mainImageUrl);
if (model === 'gpt-4o-image') {
const useMultipleImages = this.getNodeParameter('multipleImages', i, false);
if (useMultipleImages) {
const additionalUrls = this.getNodeParameter('additionalImageUrls', i, []);
imageUrls = imageUrls.concat(additionalUrls);
}
}
}
else {
const binaryPropertyName = this.getNodeParameter('binaryPropertyName', i);
const binaryData = this.helpers.assertBinaryData(i, binaryPropertyName);
if (binaryData.mimeType && !binaryData.mimeType.includes('image/')) {
throw new Error('The provided binary data is not an image');
}
if (binaryData.data) {
const dataBuffer = Buffer.from(binaryData.data, 'base64');
imageUrls.push(`data:${binaryData.mimeType};base64,${dataBuffer.toString('base64')}`);
}
else if (binaryData.url) {
imageUrls.push(binaryData.url);
}
else {
throw new Error('No usable image data found in the provided binary property');
}
}
const messageContent = [];
for (const url of imageUrls) {
messageContent.push({
type: 'image_url',
image_url: {
url: url,
},
});
}
messageContent.push({
type: 'text',
text: generatePromptWithAspectRatio(this, i, prompt),
});
const body = {
model,
messages: [
{
role: 'user',
content: messageContent,
},
],
stream: true,
};
try {
const credentials = await this.getCredentials('piAPIApi');
const options = {
method: 'POST',
body,
url: 'https://api.piapi.ai/v1/chat/completions',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${credentials.apiKey}`,
},
json: true,
returnFullResponse: true,
};
const response = await this.helpers.httpRequestWithAuthentication.call(this, 'piAPIApi', options);
const rawStreamResponse = response.body;
const processedContent = (0, GenericFunctions_1.processStreamedResponse)(rawStreamResponse);
const failed = (0, GenericFunctions_1.isGenerationFailed)(processedContent);
const status = failed ? 'failed' : 'completed';
let simplifiedResponse;
if (failed) {
const { reason, suggestion } = (0, GenericFunctions_1.extractFailureDetails)(processedContent);
simplifiedResponse = {
prompt,
status,
error: {
reason,
suggestion,
full_message: processedContent,
},
original_response: rawStreamResponse
};
}
else {
const imageUrl = (0, GenericFunctions_1.extractImageUrlFromResponse)(rawStreamResponse);
simplifiedResponse = {
prompt,
status,
source_image: imageUrl,
image_url: imageUrl || null,
processed_content: processedContent,
original_response: rawStreamResponse
};
}
returnData.push({
json: simplifiedResponse,
});
}
catch (error) {
if (this.continueOnFail()) {
returnData.push({
json: {
error: error.message,
},
});
continue;
}
throw error;
}
}
return [returnData];
}
}
exports.LLMImageToImage = LLMImageToImage;
//# sourceMappingURL=LLMImageToImage.node.js.map
;