UNPKG

@302ai/ai-sdk

Version:

The **[302AI provider](https://sdk.vercel.ai/providers/ai-sdk-providers/)** for the [AI SDK](https://sdk.vercel.ai/docs) contains image model support for the [302AI](https://302.ai) platform.

1 lines 230 kB
{"version":3,"sources":["../src/ai302-provider.ts","../src/models/auraflow.ts","../src/utils/api-handlers.ts","../src/models/base-model.ts","../src/models/bagel.ts","../src/models/cogview.ts","../src/models/dalle.ts","../src/models/flux-pro-dev.ts","../src/ai302-image-settings.ts","../src/models/flux-kontext.ts","../src/models/gpt-image.ts","../src/models/hidream.ts","../src/models/ideogram.ts","../src/ai302-types.ts","../src/models/irag.ts","../src/models/kling.ts","../src/models/kolors.ts","../src/models/luma-photon.ts","../src/models/midjourney.ts","../src/models/minimax.ts","../src/models/recraft.ts","../src/models/sd3v2.ts","../src/models/sd3-ultra.ts","../src/models/sdxl.ts","../src/models/sdxl-lightning.ts","../src/models/soul.ts","../src/models/sd35.ts","../src/models/google-imagen-3.ts","../src/models/google-imagen-4.ts","../src/models/doubao.ts","../src/models/lumina-image.ts","../src/models/omnigen.ts","../src/models/playground.ts","../src/models/model-factory.ts","../src/ai302-image-model.ts"],"sourcesContent":["import {\n EmbeddingModelV1,\n ImageModelV1,\n LanguageModelV1,\n} from '@ai-sdk/provider';\nimport { AI302ImageModelId, AI302ImageSettings } from './ai302-image-settings';\nimport {\n FetchFunction,\n loadApiKey,\n withoutTrailingSlash,\n} from '@ai-sdk/provider-utils';\nimport { AI302ImageModel } from './ai302-image-model';\nimport { AI302Config } from './ai302-config';\nimport {\n OpenAICompatibleChatLanguageModel,\n OpenAICompatibleEmbeddingModel,\n ProviderErrorStructure,\n} from '@ai-sdk/openai-compatible';\nimport { AI302ChatSettings, AI302ChatModelId } from './ai302-chat-settings';\nimport { AI302EmbeddingModelId } from './ai302-embedding-settings';\nimport { z } from 'zod';\nimport { AI302EmbeddingSettings } from './ai302-embedding-settings';\n\nexport type AI302ErrorData = z.infer<typeof ai302ErrorSchema>;\n\nconst ai302ErrorSchema = z.object({\n error: z.object({\n message: z.string(),\n }),\n});\n\nconst ai302ErrorStructure: ProviderErrorStructure<AI302ErrorData> = {\n errorSchema: ai302ErrorSchema,\n errorToMessage: error => error.error.message,\n};\n\nexport interface AI302ProviderSettings {\n /**\n AI302 API key. Default value is taken from the `AI302_API_KEY`\n environment variable.\n */\n apiKey?: string;\n /**\n Base URL for the API calls.\n */\n baseURL?: string;\n /**\n Custom headers to include in the requests.\n */\n headers?: Record<string, string>;\n /**\n Custom fetch implementation. You can use it as a middleware to intercept requests,\n or to provide a custom fetch implementation for e.g. testing.\n */\n fetch?: FetchFunction;\n}\n\nexport interface AI302Provider {\n /**\n Creates a model for text generation.\n */\n (modelId: AI302ChatModelId, settings?: AI302ChatSettings): LanguageModelV1;\n\n /**\nCreates a chat model for text generation.\n*/\n chatModel(\n modelId: AI302ChatModelId,\n settings?: AI302ChatSettings,\n ): LanguageModelV1;\n\n /**\n Creates a text embedding model for text generation.\n */\n textEmbeddingModel(\n modelId: AI302EmbeddingModelId,\n settings?: AI302EmbeddingSettings,\n ): EmbeddingModelV1<string>;\n\n /**\n Creates a model for image generation.\n */\n image(\n modelId: AI302ImageModelId,\n settings?: AI302ImageSettings,\n ): ImageModelV1;\n}\n\nconst defaultBaseURL = 'https://api.302.ai';\n\nexport function createAI302(\n options: AI302ProviderSettings = {},\n): AI302Provider {\n const baseURL = withoutTrailingSlash(options.baseURL) ?? defaultBaseURL;\n const getHeaders = () => ({\n Authorization: `Bearer ${loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'AI302_API_KEY',\n description: '302 AI API key',\n })}`,\n 'mj-api-secret': loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'AI302_API_KEY',\n description: 'Midjourney API key',\n }),\n ...options.headers,\n });\n\n const getCommonModelConfig = (modelType: string): AI302Config => ({\n provider: `ai302.${modelType}`,\n url: ({ modelId, path }) => {\n if (modelType === 'embedding') {\n if (modelId.includes('jina')) {\n return `${baseURL}/jina/v1${path}`;\n }\n return `${baseURL}/v1${path}`;\n }\n return `${baseURL}${path}`;\n },\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createImageModel = (\n modelId: AI302ImageModelId,\n settings?: AI302ImageSettings,\n ) => {\n return new AI302ImageModel(\n modelId,\n settings ?? {},\n getCommonModelConfig('image'),\n );\n };\n\n const createChatModel = (\n modelId: AI302ChatModelId,\n settings: AI302ChatSettings = {},\n ) => {\n return new OpenAICompatibleChatLanguageModel(modelId, settings, {\n ...getCommonModelConfig('chat'),\n errorStructure: ai302ErrorStructure,\n defaultObjectGenerationMode: 'json',\n });\n };\n\n const createTextEmbeddingModel = (\n modelId: AI302EmbeddingModelId,\n settings: AI302EmbeddingSettings = {},\n ) =>\n new OpenAICompatibleEmbeddingModel(modelId, settings, {\n ...getCommonModelConfig('embedding'),\n errorStructure: ai302ErrorStructure,\n });\n\n const provider = (modelId: AI302ChatModelId, settings?: AI302ChatSettings) =>\n createChatModel(modelId, settings);\n\n provider.languageModel = createChatModel;\n provider.chatModel = createChatModel;\n provider.textEmbeddingModel = createTextEmbeddingModel;\n provider.image = createImageModel;\n\n return provider as AI302Provider;\n}\n\nexport const ai302 = createAI302();\n","import type {\n ImageModelV1CallOptions,\n ImageModelV1CallWarning,\n} from '@ai-sdk/provider';\nimport { combineHeaders, postToApi } from '@ai-sdk/provider-utils';\nimport type { AuraflowResponse } from '../ai302-types';\nimport {\n createJsonResponseHandler,\n statusCodeErrorResponseHandler,\n} from '../utils/api-handlers';\nimport { BaseModelHandler } from './base-model';\n\nexport class AuraflowHandler extends BaseModelHandler {\n protected async processRequest({\n prompt,\n n,\n size,\n aspectRatio,\n seed,\n providerOptions,\n headers,\n abortSignal,\n }: ImageModelV1CallOptions) {\n const warnings: ImageModelV1CallWarning[] = [];\n\n if (n != null && n > 1) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'n',\n details: 'AuraFlow does not support batch generation',\n });\n }\n\n if (size != null) {\n warnings.push({ type: 'unsupported-setting', setting: 'size' });\n }\n\n if (aspectRatio != null) {\n warnings.push({ type: 'unsupported-setting', setting: 'aspectRatio' });\n }\n\n if (seed != null) {\n warnings.push({ type: 'unsupported-setting', setting: 'seed' });\n }\n\n if (providerOptions.ai302 != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'providerOptions',\n });\n }\n\n const formData = new FormData();\n formData.append('prompt', prompt);\n\n const { value: response, responseHeaders } =\n await postToApi<AuraflowResponse>({\n url: this.config.url({\n modelId: this.modelId,\n path: '/302/submit/aura-flow',\n }),\n headers: combineHeaders(this.config.headers(), headers),\n body: {\n content: formData,\n values: { prompt },\n },\n failedResponseHandler: statusCodeErrorResponseHandler,\n successfulResponseHandler: createJsonResponseHandler(),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n const urls = response.images.map(img => img.url).filter(Boolean);\n const images = await this.downloadImages(urls);\n\n return {\n images,\n warnings,\n response: {\n timestamp: new Date(),\n modelId: this.modelId,\n headers: responseHeaders,\n },\n };\n }\n}\n","import { APICallError } from \"@ai-sdk/provider\";\nimport {\n type ResponseHandler,\n extractResponseHeaders,\n} from \"@ai-sdk/provider-utils\";\n\nexport const createJsonResponseHandler =\n <T>(): ResponseHandler<T> =>\n async ({ response, url, requestBodyValues }) => {\n const responseHeaders = extractResponseHeaders(response);\n const responseBody = await response.json();\n return {\n responseHeaders,\n value: responseBody as T,\n };\n };\n\nexport const statusCodeErrorResponseHandler: ResponseHandler<\n APICallError\n> = async ({ response, url, requestBodyValues }) => {\n const responseHeaders = extractResponseHeaders(response);\n const responseBody = await response.text();\n\n return {\n responseHeaders,\n value: new APICallError({\n message: response.statusText,\n url,\n requestBodyValues,\n statusCode: response.status,\n responseHeaders,\n responseBody,\n }),\n };\n};\n","import {\n ImageModelV1,\n type ImageModelV1CallOptions,\n type ImageModelV1CallWarning,\n} from '@ai-sdk/provider';\nimport type {\n AI302ImageModelId,\n AI302ImageSettings,\n} from '../ai302-image-settings';\nimport type { ImageSize } from '../ai302-types';\nimport { AI302Config } from '../ai302-config';\n\nexport abstract class BaseModelHandler {\n constructor(\n readonly modelId: AI302ImageModelId,\n readonly settings: AI302ImageSettings,\n readonly config: AI302Config,\n ) {}\n\n public async handleRequest(\n params: ImageModelV1CallOptions,\n ): Promise<Awaited<ReturnType<ImageModelV1['doGenerate']>>> {\n const { headers, ...rest } = params;\n const requestHeaders = headers\n ? Object.fromEntries(\n Object.entries(headers)\n .filter(([_, v]) => v !== undefined)\n .map(([k, v]) => [k, v as string]),\n )\n : undefined;\n\n return this.processRequest({\n ...rest,\n headers: requestHeaders,\n });\n }\n\n protected abstract processRequest(\n params: ImageModelV1CallOptions,\n ): Promise<Awaited<ReturnType<ImageModelV1['doGenerate']>>>;\n\n protected parseSize(size: string | undefined): ImageSize | undefined {\n if (!size) return undefined;\n const [width, height] = size.split('x').map(Number);\n return { width, height };\n }\n\n protected validateAspectRatio(\n aspectRatio: string | undefined,\n warnings: ImageModelV1CallWarning[],\n maxRatio?: number,\n minRatio?: number,\n ): string | undefined {\n if (!aspectRatio) return undefined;\n\n const [width, height] = aspectRatio.split(':').map(Number);\n if (!width || !height) return undefined;\n\n if (maxRatio === undefined || minRatio === undefined) {\n return aspectRatio;\n }\n\n const ratio = width / height;\n\n if (ratio >= minRatio && ratio <= maxRatio) {\n return aspectRatio;\n }\n\n let adjustedWidth: number;\n let adjustedHeight: number;\n\n if (ratio > maxRatio) {\n adjustedHeight = 9;\n adjustedWidth = Math.round(maxRatio * adjustedHeight);\n } else {\n adjustedWidth = 9;\n adjustedHeight = Math.round(adjustedWidth / minRatio);\n }\n\n warnings.push({\n type: 'other',\n message: `Aspect ratio ${aspectRatio} is outside the allowed range (${adjustedWidth}:${adjustedHeight} to ${adjustedHeight}:${adjustedWidth}). Adjusted to ${adjustedWidth}:${adjustedHeight}`,\n });\n\n return `${adjustedWidth}:${adjustedHeight}`;\n }\n\n protected aspectRatioToSize(\n aspectRatio: string | undefined,\n baseSize: number = 1024,\n warnings: ImageModelV1CallWarning[],\n ): ImageSize | undefined {\n if (!aspectRatio) return undefined;\n\n const validatedAspectRatio = this.validateAspectRatio(\n aspectRatio,\n warnings,\n );\n if (!validatedAspectRatio) return undefined;\n\n const [width, height] = validatedAspectRatio.split(':').map(Number);\n if (!width || !height) return undefined;\n\n const ratio = width / height;\n\n if (ratio > 1) {\n return { width: baseSize, height: Math.round(baseSize / ratio) };\n } else {\n return { width: Math.round(baseSize * ratio), height: baseSize };\n }\n }\n\n protected async downloadImage(url: string): Promise<string> {\n const maxRetries = 5;\n const timeout = 120000;\n\n for (let attempt = 0; attempt < maxRetries; attempt++) {\n const controller = new AbortController();\n try {\n const timeoutId = setTimeout(() => controller.abort(), timeout);\n\n const imageResponse = await fetch(url, {\n signal: controller.signal,\n headers: {\n Accept: 'image/*',\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',\n },\n });\n\n clearTimeout(timeoutId);\n\n if (!imageResponse.ok) {\n throw new Error(`HTTP error! status: ${imageResponse.status}`);\n }\n\n const arrayBuffer = await imageResponse.arrayBuffer();\n const base64 = Buffer.from(arrayBuffer).toString('base64');\n return base64;\n } catch (error: unknown) {\n const errorMessage =\n error instanceof Error ? error.message : String(error);\n\n if (attempt === maxRetries - 1) {\n throw new Error(\n `Failed to download image after ${maxRetries} attempts: ${errorMessage}`,\n );\n }\n\n const delay = Math.min(\n 2 ** attempt * 2000 + Math.random() * 1000,\n 30000,\n );\n await new Promise(resolve => setTimeout(resolve, delay));\n controller.abort();\n }\n }\n throw new Error('Failed to download image after retries');\n }\n\n protected async downloadImages(urls: string[]): Promise<string[]> {\n const imagePromises = urls.map(async url => {\n try {\n return await this.downloadImage(url);\n } catch (error) {\n return null;\n }\n });\n\n const base64Images = await Promise.all(imagePromises);\n const validImages = base64Images.filter(Boolean) as string[];\n\n if (validImages.length === 0) {\n throw new Error('All image downloads failed');\n }\n\n return validImages;\n }\n\n protected validateSizeOption(\n parsedSize: ImageSize,\n supportedSizes: string[],\n warnings: ImageModelV1CallWarning[],\n ): ImageSize {\n const validatedSize = this.validateDimensionsMultipleOf32(\n parsedSize,\n warnings,\n );\n\n const sizeStr = `${validatedSize.width}x${validatedSize.height}`;\n if (!supportedSizes.includes(sizeStr)) {\n const closestSize = this.findClosestSize(validatedSize, supportedSizes);\n warnings.push({\n type: 'other',\n message: `Size ${sizeStr} is not supported. Using closest supported size: ${closestSize}`,\n });\n const [width, height] = closestSize.split('x').map(Number);\n return { width, height };\n }\n return validatedSize;\n }\n\n protected validateDimensionsMultipleOf32(\n size: ImageSize,\n warnings: ImageModelV1CallWarning[],\n minSize: number = 32,\n maxSize: number = 4096,\n ): ImageSize {\n const adjustDimension = (value: number): number => {\n if (value < minSize) {\n return minSize;\n }\n if (value > maxSize) {\n return maxSize;\n }\n if (value % 32 !== 0) {\n const roundedValue = Math.round(value / 32) * 32;\n return Math.min(maxSize, Math.max(minSize, roundedValue));\n }\n return value;\n };\n\n const adjustedWidth = adjustDimension(size.width);\n const adjustedHeight = adjustDimension(size.height);\n\n if (adjustedWidth !== size.width || adjustedHeight !== size.height) {\n warnings.push({\n type: 'other',\n message: `Image dimensions must be multiples of 32 and within the range ${minSize}-${maxSize}. Adjusted from ${size.width}x${size.height} to ${adjustedWidth}x${adjustedHeight}`,\n });\n return { width: adjustedWidth, height: adjustedHeight };\n }\n\n return size;\n }\n\n protected findClosestSize(size: ImageSize, supportedSizes: string[]): string {\n const targetRatio = size.width / size.height;\n\n const sizesByRatio = supportedSizes.slice().sort((a, b) => {\n const [w1, h1] = a.split('x').map(Number);\n const [w2, h2] = b.split('x').map(Number);\n const ratio1 = w1 / h1;\n const ratio2 = w2 / h2;\n const diff1 = Math.abs(ratio1 - targetRatio);\n const diff2 = Math.abs(ratio2 - targetRatio);\n return diff1 - diff2;\n });\n\n const similarRatioSizes = sizesByRatio.slice(0, 2);\n return similarRatioSizes.reduce((closest, current) => {\n const [w1, h1] = current.split('x').map(Number);\n const [w2, h2] = closest.split('x').map(Number);\n\n const diff1 = Math.abs(Math.max(w1, h1) - 1024);\n const diff2 = Math.abs(Math.max(w2, h2) - 1024);\n\n return diff1 < diff2 ? current : closest;\n });\n }\n\n protected findClosestAspectRatio(\n targetRatio: `${number}:${number}` | undefined,\n supportedRatios: readonly `${number}:${number}`[],\n warnings: ImageModelV1CallWarning[],\n ): `${number}:${number}` {\n if (!targetRatio) return supportedRatios[0];\n\n const [targetWidth, targetHeight] = targetRatio.split(':').map(Number);\n if (!targetWidth || !targetHeight) return supportedRatios[0];\n\n const targetValue = targetWidth / targetHeight;\n let closestRatio = supportedRatios[0];\n let minDiff = Infinity;\n\n for (const ratio of supportedRatios) {\n const [w, h] = ratio.split(':').map(Number);\n if (!w || !h) continue;\n\n const currentValue = w / h;\n const diff = Math.abs(currentValue - targetValue);\n\n if (diff < minDiff) {\n minDiff = diff;\n closestRatio = ratio;\n }\n }\n\n if (closestRatio !== targetRatio) {\n warnings.push({\n type: 'other',\n message: `Aspect ratio ${targetRatio} is not supported. Using closest supported ratio: ${closestRatio}`,\n });\n }\n\n return closestRatio;\n }\n\n protected sizeToAspectRatio(\n size: string | undefined,\n supportedRatios: readonly string[],\n warnings: ImageModelV1CallWarning[],\n ): string | undefined {\n if (!size) return undefined;\n\n const parsedSize = this.parseSize(size);\n if (!parsedSize) {\n warnings.push({\n type: 'other',\n message: `Invalid size format: ${size}. Expected format: WIDTHxHEIGHT`,\n });\n return undefined;\n }\n\n const ratio = parsedSize.width / parsedSize.height;\n\n // Find the closest supported aspect ratio\n let closestRatio = supportedRatios[0];\n let minDiff = Infinity;\n\n for (const aspectRatio of supportedRatios) {\n const [w, h] = aspectRatio.split(':').map(Number);\n if (!w || !h) continue;\n\n const currentRatio = w / h;\n const diff = Math.abs(currentRatio - ratio);\n\n if (diff < minDiff) {\n minDiff = diff;\n closestRatio = aspectRatio;\n }\n }\n\n // Check if conversion was needed\n const [closestW, closestH] = closestRatio.split(':').map(Number);\n const closestRatioValue = closestW / closestH;\n\n if (Math.abs(closestRatioValue - ratio) > 0.05) {\n // 5% tolerance\n warnings.push({\n type: 'other',\n message: `Size ${size} (ratio ${ratio.toFixed(2)}) converted to closest supported aspect ratio: ${closestRatio}`,\n });\n }\n\n return closestRatio;\n }\n}\n","import type { ImageModelV1CallOptions, ImageModelV1CallWarning } from \"@ai-sdk/provider\";\nimport { combineHeaders, postJsonToApi } from \"@ai-sdk/provider-utils\";\nimport type { BagelResponse } from \"../ai302-types\";\nimport {\n createJsonResponseHandler,\n statusCodeErrorResponseHandler,\n} from \"../utils/api-handlers\";\nimport { BaseModelHandler } from \"./base-model\";\n\nexport class BagelHandler extends BaseModelHandler {\n protected async processRequest({\n prompt,\n n,\n size,\n aspectRatio,\n seed,\n providerOptions,\n headers,\n abortSignal,\n }: ImageModelV1CallOptions) {\n const warnings: ImageModelV1CallWarning[] = [];\n\n if (n != null && n > 1) {\n warnings.push({ type: 'unsupported-setting', setting: 'n', details: 'Bagel does not support batch generation' });\n }\n\n if (size != null) {\n warnings.push({ type: 'unsupported-setting', setting: 'size', details: 'Bagel does not support custom size' });\n }\n\n if (aspectRatio != null) {\n warnings.push({ type: 'unsupported-setting', setting: 'aspectRatio', details: 'Bagel does not support custom aspect ratio' });\n }\n\n if (seed != null) {\n warnings.push({ type: 'unsupported-setting', setting: 'seed', details: 'Bagel does not support custom seed' });\n }\n\n const { value: response, responseHeaders } = await postJsonToApi<BagelResponse>({\n url: this.config.url({ modelId: this.modelId, path: '/302/submit/bagel' }),\n headers: combineHeaders(this.config.headers(), headers),\n body: {\n prompt,\n use_thought: providerOptions.ai302?.use_thought ?? false,\n ...(providerOptions.ai302 ?? {}),\n },\n failedResponseHandler: statusCodeErrorResponseHandler,\n successfulResponseHandler: createJsonResponseHandler(),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n const urls = response.images.map((img) => img.url).filter(Boolean);\n const images = await this.downloadImages(urls);\n\n return {\n images,\n warnings,\n response: {\n timestamp: new Date(),\n modelId: this.modelId,\n headers: responseHeaders,\n },\n };\n }\n}\n","import type { ImageModelV1CallOptions, ImageModelV1CallWarning } from '@ai-sdk/provider';\nimport { combineHeaders, postJsonToApi } from '@ai-sdk/provider-utils';\nimport type { CogViewResponse } from '../ai302-types';\nimport {\n createJsonResponseHandler,\n statusCodeErrorResponseHandler,\n} from '../utils/api-handlers';\nimport { BaseModelHandler } from './base-model';\n\n// Supported sizes for CogView-4\nconst SUPPORTED_SIZE_OPTIONS = [\n '1024x1024',\n '768x1344',\n '864x1152',\n '1344x768',\n '1152x864',\n '1440x720',\n '720x1440',\n];\n\nexport class CogViewHandler extends BaseModelHandler {\n protected async processRequest({\n prompt,\n n,\n size,\n aspectRatio,\n seed,\n providerOptions,\n headers,\n abortSignal,\n }: ImageModelV1CallOptions) {\n const warnings: ImageModelV1CallWarning[] = [];\n\n if (n != null && n > 1) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'n',\n details: 'CogView-4 does not support batch generation',\n });\n }\n\n if (seed != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'seed',\n details: 'CogView-4 does not support seed parameter'\n });\n }\n\n // Handle size selection\n let sizeString: string | undefined;\n\n // First try to parse the size parameter\n if (size) {\n const parsedSize = this.parseSize(size);\n if (parsedSize) {\n const validatedSize = this.validateSizeOption(parsedSize, SUPPORTED_SIZE_OPTIONS, warnings);\n sizeString = `${validatedSize.width}x${validatedSize.height}`;\n }\n }\n // If no size, try to use aspect ratio\n else if (aspectRatio) {\n const parsedSize = this.aspectRatioToSize(aspectRatio, 1024, warnings);\n if (parsedSize) {\n const validatedSize = this.validateSizeOption(parsedSize, SUPPORTED_SIZE_OPTIONS, warnings);\n sizeString = `${validatedSize.width}x${validatedSize.height}`;\n }\n }\n\n // Determine which model variant to use\n const modelVariant = this.modelId === 'cogview-4-250304' ? 'cogview-4-250304' : 'cogview-4';\n\n const { value: response, responseHeaders } = await postJsonToApi<CogViewResponse>({\n url: this.config.url({ modelId: this.modelId, path: '/bigmodel/api/paas/v4/images/generations' }),\n headers: combineHeaders(this.config.headers(), headers),\n body: {\n model: modelVariant,\n prompt,\n size: sizeString,\n ...(providerOptions.ai302 ?? {}),\n },\n failedResponseHandler: statusCodeErrorResponseHandler,\n successfulResponseHandler: createJsonResponseHandler(),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n const urls = response.data.map(img => img.url).filter(Boolean);\n const images = await this.downloadImages(urls);\n\n return {\n images,\n warnings,\n response: {\n timestamp: new Date(),\n modelId: this.modelId,\n headers: responseHeaders,\n },\n };\n }\n}\n","import type { ImageModelV1CallOptions, ImageModelV1CallWarning } from \"@ai-sdk/provider\";\nimport { combineHeaders, postJsonToApi } from \"@ai-sdk/provider-utils\";\nimport type { DallEResponse } from \"../ai302-types\";\nimport {\n createJsonResponseHandler,\n statusCodeErrorResponseHandler,\n} from \"../utils/api-handlers\";\nimport { BaseModelHandler } from \"./base-model\";\n\nconst SUPPORTED_SIZE_OPTIONS = ['256x256', '512x512', '1024x1024'];\n\nexport class DallEHandler extends BaseModelHandler {\n protected async processRequest({\n prompt,\n n,\n size,\n aspectRatio,\n seed,\n providerOptions,\n headers,\n abortSignal,\n }: ImageModelV1CallOptions) {\n const warnings: ImageModelV1CallWarning[] = [];\n\n if (n != null && n > 1) {\n warnings.push({ type: 'unsupported-setting', setting: 'n', details: 'DALL-E 3 does not support batch generation' });\n }\n\n if (size != null && aspectRatio != null) {\n warnings.push({ type: 'unsupported-setting', setting: 'aspectRatio', details: 'When size is provided, aspectRatio will be ignored' });\n } else if (size == null && aspectRatio != null) {\n warnings.push({ type: 'other', message: 'Using size calculated from aspect ratio with base size 1024' });\n }\n\n if (seed != null) {\n warnings.push({ type: 'unsupported-setting', setting: 'seed' });\n }\n\n let parsedSize = this.parseSize(size) ||\n this.aspectRatioToSize(aspectRatio, 1024, warnings) || {\n width: 1024,\n height: 1024,\n };\n\n parsedSize = this.validateSizeOption(parsedSize, SUPPORTED_SIZE_OPTIONS, warnings);\n\n const { value: response, responseHeaders } = await postJsonToApi<DallEResponse>({\n url: this.config.url({ modelId: this.modelId, path: '/v1/images/generations' }),\n headers: combineHeaders(this.config.headers(), headers),\n body: {\n prompt,\n model: \"dall-e-3\",\n size: `${parsedSize.width}x${parsedSize.height}`,\n ...(providerOptions.ai302 ?? {}),\n },\n failedResponseHandler: statusCodeErrorResponseHandler,\n successfulResponseHandler: createJsonResponseHandler(),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n const urls = response.data.map((img) => img.url).filter(Boolean);\n const images = await this.downloadImages(urls);\n\n return {\n images,\n warnings,\n response: {\n timestamp: new Date(),\n modelId: this.modelId,\n headers: responseHeaders,\n },\n };\n }\n}\n","import type {\n ImageModelV1CallOptions,\n ImageModelV1CallWarning,\n} from '@ai-sdk/provider';\nimport { combineHeaders, postJsonToApi } from '@ai-sdk/provider-utils';\nimport type { FluxProDevResponse } from '../ai302-types';\nimport {\n createJsonResponseHandler,\n statusCodeErrorResponseHandler,\n} from '../utils/api-handlers';\nimport { BaseModelHandler } from './base-model';\nimport { modelToBackendConfig } from '../ai302-image-settings';\n\n// Ref 1: https://api.us1.bfl.ai/scalar#tag/tasks/POST/v1/flux-pro\n// Ref 2: https://api.us1.bfl.ai/scalar#tag/tasks/POST/v1/flux-dev\n// Ref 3: https://api.us1.bfl.ai/scalar#tag/tasks/POST/v1/flux-pro-1.1\nexport class FluxProDevHandler extends BaseModelHandler {\n protected async processRequest({\n prompt,\n n,\n size,\n aspectRatio,\n seed,\n providerOptions,\n headers,\n abortSignal,\n }: ImageModelV1CallOptions) {\n const warnings: ImageModelV1CallWarning[] = [];\n\n if (n != null && n > 1) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'n',\n details: 'Flux Pro does not support batch generation',\n });\n }\n\n const backendConfig = modelToBackendConfig[this.modelId];\n if (backendConfig?.supportsSize) {\n if (size != null && aspectRatio != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'aspectRatio',\n details: 'When size is provided, aspectRatio will be ignored',\n });\n } else if (size == null && aspectRatio != null) {\n warnings.push({\n type: 'other',\n message:\n 'Using size calculated from aspect ratio with base size 1024',\n });\n }\n }\n\n let parsedSize = this.parseSize(size) ||\n this.aspectRatioToSize(aspectRatio, 1024, warnings) || {\n width: 1024,\n height: 1024,\n };\n\n if (backendConfig?.supportsSize) {\n parsedSize = this.validateDimensionsMultipleOf32(\n parsedSize,\n warnings,\n 256,\n 1440,\n );\n }\n\n const { value: response, responseHeaders } = await postJsonToApi<FluxProDevResponse>({\n url: this.config.url({\n modelId: this.modelId,\n path: `/302/submit/${this.modelId}`,\n }),\n headers: combineHeaders(this.config.headers(), headers),\n body: {\n prompt,\n image_size: backendConfig?.supportsSize\n ? { width: parsedSize.width, height: parsedSize.height }\n : undefined,\n size: backendConfig?.supportsSize\n ? { width: parsedSize.width, height: parsedSize.height }\n : undefined,\n aspect_ratio: !backendConfig?.supportsSize ? aspectRatio : undefined,\n seed,\n ...(providerOptions.ai302 ?? {}),\n },\n failedResponseHandler: statusCodeErrorResponseHandler,\n successfulResponseHandler: createJsonResponseHandler(),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n const urls = response.images.map(img => img.url).filter(Boolean);\n const images = await this.downloadImages(urls);\n\n return {\n images,\n warnings,\n response: {\n timestamp: new Date(),\n modelId: this.modelId,\n headers: responseHeaders,\n },\n };\n }\n}\n","export type AI302ImageModelId =\n | 'flux-v1.1-ultra'\n | 'flux-pro-v1.1'\n | 'flux-pro'\n | 'flux-dev'\n | 'flux-schnell'\n | 'flux-kontext-max'\n | 'flux-kontext-pro'\n | 'ideogram/V_1'\n | 'ideogram/V_1_TURBO'\n | 'ideogram/V_2'\n | 'ideogram/V_2_TURBO'\n | 'ideogram/V_2A'\n | 'ideogram/V_2A_TURBO'\n | 'dall-e-3'\n | 'recraftv3'\n | 'recraftv2'\n | 'sdxl-lightning'\n | 'sdxl-lightning-v2'\n | 'sdxl-lightning-v3'\n | 'kolors'\n | 'aura-flow'\n | 'photon-1'\n | 'photon-flash-1'\n | 'sdxl'\n | 'sd3-ultra'\n | 'sd3v2'\n | 'sd3.5-large'\n | 'sd3.5-large-turbo'\n | 'sd3.5-medium'\n | 'midjourney/6.0'\n | 'midjourney/6.1'\n | 'midjourney/7.0'\n | 'nijijourney/6.0'\n | 'google-imagen-3'\n | 'google-imagen-3-fast'\n | 'google-imagen-4-preview'\n | 'doubao-general-v2.1-l'\n | 'doubao-general-v2.0-l'\n | 'doubao-general-v2.0'\n | 'doubao-general-v3.0'\n | 'lumina-image-v2'\n | 'omnigen-v1'\n | 'playground-v25'\n | 'cogview-4'\n | 'cogview-4-250304'\n | 'minimaxi-image-01'\n | 'irag-1.0'\n | 'hidream-i1-full'\n | 'hidream-i1-dev'\n | 'hidream-i1-fast'\n | 'gpt-image-1'\n | 'bagel'\n | 'soul'\n | 'kling-v1'\n | 'kling-v1-5'\n | 'kling-v2'\n | (string & {});\n\nexport interface AI302ImageSettings {}\n\ninterface AI302ImageModelBackendConfig {\n supportsSize?: boolean;\n}\n\nexport const modelToBackendConfig: Partial<\n Record<AI302ImageModelId, AI302ImageModelBackendConfig>\n> = {\n 'flux-v1.1-ultra': {\n supportsSize: false,\n },\n 'flux-pro-v1.1': {\n supportsSize: true,\n },\n 'flux-pro': {\n supportsSize: true,\n },\n 'flux-dev': {\n supportsSize: true,\n },\n 'flux-schnell': {\n supportsSize: true,\n },\n 'flux-kontext-max': {\n supportsSize: true,\n },\n 'flux-kontext-pro': {\n supportsSize: true,\n },\n 'ideogram/V_1': {\n supportsSize: true,\n },\n 'ideogram/V_1_TURBO': {\n supportsSize: true,\n },\n 'ideogram/V_2': {\n supportsSize: true,\n },\n 'ideogram/V_2_TURBO': {\n supportsSize: true,\n },\n 'ideogram/V_2A': {\n supportsSize: true,\n },\n 'ideogram/V_2A_TURBO': {\n supportsSize: true,\n },\n 'dall-e-3': {\n supportsSize: true,\n },\n recraftv3: {\n supportsSize: true,\n },\n recraftv2: {\n supportsSize: true,\n },\n 'sdxl-lightning': {\n supportsSize: true,\n },\n 'sdxl-lightning-v2': {\n supportsSize: true,\n },\n 'sdxl-lightning-v3': {\n supportsSize: true,\n },\n kolors: {\n supportsSize: true,\n },\n 'aura-flow': {\n supportsSize: true,\n },\n 'luma-photon': {\n supportsSize: true,\n },\n sdxl: {\n supportsSize: true,\n },\n 'sd3-ultra': {\n supportsSize: false,\n },\n sd3v2: {\n supportsSize: true,\n },\n 'sd3.5-large': {\n supportsSize: true,\n },\n 'sd3.5-large-turbo': {\n supportsSize: true,\n },\n 'sd3.5-medium': {\n supportsSize: true,\n },\n 'midjourney/6.0': {\n supportsSize: false,\n },\n 'midjourney/6.1': {\n supportsSize: false,\n },\n 'midjourney/7.0': {\n supportsSize: false,\n },\n 'nijijourney/6.0': {\n supportsSize: false,\n },\n 'google-imagen-3': {\n supportsSize: true,\n },\n 'google-imagen-3-fast': {\n supportsSize: true,\n },\n 'google-imagen-4-preview': {\n supportsSize: false,\n },\n 'doubao-general-v2.1-l': {\n supportsSize: true,\n },\n 'doubao-general-v2.0-l': {\n supportsSize: true,\n },\n 'doubao-general-v2.0': {\n supportsSize: true,\n },\n 'doubao-general-v3.0': {\n supportsSize: true,\n },\n 'lumina-image-v2': {\n supportsSize: true,\n },\n 'omnigen-v1': {\n supportsSize: true,\n },\n 'playground-v25': {\n supportsSize: true,\n },\n 'cogview-4': {\n supportsSize: true,\n },\n 'cogview-4-250304': {\n supportsSize: true,\n },\n 'minimaxi-image-01': {\n supportsSize: false,\n },\n 'irag-1.0': {\n supportsSize: false,\n },\n 'hidream-i1-full': {\n supportsSize: true,\n },\n 'hidream-i1-dev': {\n supportsSize: true,\n },\n 'hidream-i1-fast': {\n supportsSize: true,\n },\n 'gpt-image-1': {\n supportsSize: true,\n },\n bagel: {\n supportsSize: false,\n },\n soul: {\n supportsSize: true,\n },\n 'kling-v1': {\n supportsSize: true,\n },\n 'kling-v1-5': {\n supportsSize: true,\n },\n 'kling-v2': {\n supportsSize: true,\n },\n};\n","import type {\n ImageModelV1CallOptions,\n ImageModelV1CallWarning,\n} from '@ai-sdk/provider';\nimport { combineHeaders, postJsonToApi } from '@ai-sdk/provider-utils';\nimport {\n type FluxKontextSubmitResponse,\n type FluxKontextResultResponse,\n} from '../ai302-types';\nimport {\n createJsonResponseHandler,\n statusCodeErrorResponseHandler,\n} from '../utils/api-handlers';\nimport { BaseModelHandler } from './base-model';\n\nconst POLL_INTERVAL = 2000; // 2 seconds\nconst MAX_POLL_TIME = 300000; // 5 minutes\n\nexport class FluxKontextHandler extends BaseModelHandler {\n private getModelName(): string {\n switch (this.modelId) {\n case 'flux-kontext-max':\n return 'flux-kontext-max';\n case 'flux-kontext-pro':\n return 'flux-kontext-pro';\n default:\n return 'flux-kontext-pro';\n }\n }\n\n private async pollTask(\n taskId: string,\n abortSignal?: AbortSignal,\n ): Promise<FluxKontextResultResponse> {\n const startTime = Date.now();\n const fetchFn = this.config.fetch || fetch;\n\n while (true) {\n if (abortSignal?.aborted) {\n throw new Error('Task polling aborted');\n }\n\n if (Date.now() - startTime > MAX_POLL_TIME) {\n throw new Error('Task polling timed out');\n }\n\n const response = await fetchFn(\n `${this.config.url({ modelId: this.modelId, path: `/flux/v1/get_result?id=${taskId}` })}`,\n {\n method: 'GET',\n headers: this.config.headers() as HeadersInit,\n signal: abortSignal,\n },\n );\n\n if (!response.ok) {\n throw new Error(`HTTP error! status: ${response.status}`);\n }\n\n const data = (await response.json()) as FluxKontextResultResponse;\n\n if (data.status === 'Ready' && data.result) {\n return data;\n }\n\n if (data.status === 'Failed' || data.status === 'Error') {\n throw new Error(`Task failed with status: ${data.status}`);\n }\n\n await new Promise(resolve => setTimeout(resolve, POLL_INTERVAL));\n }\n }\n\n protected async processRequest({\n prompt,\n n,\n size,\n aspectRatio,\n seed,\n providerOptions,\n headers,\n abortSignal,\n }: ImageModelV1CallOptions) {\n const warnings: ImageModelV1CallWarning[] = [];\n\n if (n != null && n > 1) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'n',\n details: 'Flux Kontext generates one image per request',\n });\n }\n\n // Handle size to aspect ratio conversion\n let finalAspectRatio: string | undefined;\n\n if (size) {\n // Convert size to aspect ratio\n const supportedRatios = [\n '21:9',\n '16:9',\n '3:2',\n '4:3',\n '1:1',\n '3:4',\n '2:3',\n '9:16',\n '9:21',\n ];\n const sizeToAspectRatio = this.sizeToAspectRatio(\n size,\n supportedRatios,\n warnings,\n );\n if (sizeToAspectRatio) {\n finalAspectRatio = sizeToAspectRatio;\n }\n } else if (aspectRatio) {\n // Validate aspect ratio is within supported range\n const supportedRatios = [\n '21:9',\n '16:9',\n '3:2',\n '4:3',\n '1:1',\n '3:4',\n '2:3',\n '9:16',\n '9:21',\n ];\n if (supportedRatios.includes(aspectRatio)) {\n finalAspectRatio = aspectRatio;\n } else {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'aspectRatio',\n details: `Aspect ratio ${aspectRatio} not supported. Supported ratios: ${supportedRatios.join(', ')}`,\n });\n }\n }\n\n if (size != null && aspectRatio != null) {\n warnings.push({\n type: 'other',\n message:\n 'Both size and aspectRatio provided. Size will be converted to aspect ratio and aspectRatio parameter will be ignored.',\n });\n }\n\n // Get additional parameters from providerOptions\n const ai302Options = providerOptions?.ai302 || {};\n const inputImage = ai302Options.input_image as string | undefined;\n const promptUpsampling = ai302Options.prompt_upsampling as\n | boolean\n | undefined;\n const safetyTolerance = ai302Options.safety_tolerance as number | undefined;\n const outputFormat = ai302Options.output_format as\n | 'jpeg'\n | 'png'\n | undefined;\n const webhookUrl = ai302Options.webhook_url as string | undefined;\n const webhookSecret = ai302Options.webhook_secret as string | undefined;\n\n const { value: submitResponse, responseHeaders } =\n await postJsonToApi<FluxKontextSubmitResponse>({\n url: this.config.url({\n modelId: this.modelId,\n path: `/flux/v1/${this.getModelName()}`,\n }),\n headers: combineHeaders(this.config.headers(), headers),\n body: {\n prompt,\n ...(inputImage !== undefined && { input_image: inputImage }),\n ...(seed !== undefined && { seed }),\n ...(finalAspectRatio !== undefined && {\n aspect_ratio: finalAspectRatio,\n }),\n ...(outputFormat !== undefined && { output_format: outputFormat }),\n ...(webhookUrl !== undefined && { webhook_url: webhookUrl }),\n ...(webhookSecret !== undefined && { webhook_secret: webhookSecret }),\n ...(promptUpsampling !== undefined && {\n prompt_upsampling: promptUpsampling,\n }),\n ...(safetyTolerance !== undefined && {\n safety_tolerance: safetyTolerance,\n }),\n ...ai302Options,\n },\n failedResponseHandler: statusCodeErrorResponseHandler,\n successfulResponseHandler: createJsonResponseHandler(),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n const taskResult = await this.pollTask(submitResponse.id, abortSignal);\n\n if (!taskResult.result?.sample) {\n throw new Error('No image generated');\n }\n\n const images = await this.downloadImages([taskResult.result.sample]);\n\n return {\n images,\n warnings,\n response: {\n timestamp: new Date(),\n modelId: this.modelId,\n headers: responseHeaders,\n },\n };\n }\n}\n","import type { ImageModelV1CallOptions, ImageModelV1CallWarning } from '@ai-sdk/provider';\nimport { combineHeaders, postJsonToApi } from '@ai-sdk/provider-utils';\nimport { BaseModelHandler } from './base-model';\nimport { createJsonResponseHandler, statusCodeErrorResponseHandler } from '../utils/api-handlers';\nimport type { GPTImageResponse } from '../ai302-types';\n\n// Supported sizes for GPT Image\nconst SUPPORTED_SIZES = [\n '1024x1024',\n '1536x1024',\n '1024x1536',\n];\n\nexport class GPTImageHandler extends BaseModelHandler {\n protected async processRequest({\n prompt,\n n,\n size,\n aspectRatio,\n providerOptions,\n headers,\n abortSignal,\n }: ImageModelV1CallOptions) {\n const warnings: ImageModelV1CallWarning[] = [];\n\n // Parse size or use aspect ratio to determine size\n let parsedSize = this.parseSize(size);\n if (!parsedSize && aspectRatio) {\n parsedSize = this.aspectRatioToSize(aspectRatio, 1024, warnings);\n }\n\n // Find the closest supported size\n let sizeStr = '1024x1024';\n if (parsedSize) {\n // Use the validateSizeOption method from BaseModelHandler\n parsedSize = this.validateSizeOption(parsedSize, SUPPORTED_SIZES, warnings);\n sizeStr = `${parsedSize.width}x${parsedSize.height}`;\n }\n\n // Prepare request body\n const requestBody = {\n prompt,\n model: 'gpt-image-1',\n size: sizeStr,\n n: n || 1,\n ...(providerOptions.ai302 ?? {}),\n };\n\n // Extract response_format from providerOptions if available\n const responseFormat = (providerOptions.ai302 as any)?.response_format || 'url';\n\n // Remove response_format from the body if it exists\n if ((requestBody as any).response_format) {\n delete (requestBody as any).response_format;\n }\n\n // Construct URL with query parameter\n const baseUrl = this.config.url({ modelId: this.modelId, path: '/v1/images/generations' });\n const url = `${baseUrl}?response_format=${responseFormat}`;\n\n // Make API request\n const { value: response, responseHeaders } = await postJsonToApi<GPTImageResponse>({\n url,\n headers: combineHeaders(this.config.headers(), headers),\n body: requestBody,\n failedResponseHandler: statusCodeErrorResponseHandler,\n successfulResponseHandler: createJsonResponseHandler(),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n // Extract image URLs and download them\n const urls = response.data\n .map((img) => img.url || '')\n .filter(Boolean);\n\n const images = await this.downloadImages(urls);\n\n return {\n images,\n warnings,\n response: {\n timestamp: new Date(),\n modelId: this.modelId,\n headers: responseHeaders,\n },\n };\n }\n}\n","import type { ImageModelV1CallOptions, ImageModelV1CallWarning } from '@ai-sdk/provider';\nimport { combineHeaders, postJsonToApi } from '@ai-sdk/provider-utils';\nimport { BaseModelHandler } from './base-model';\nimport { createJsonResponseHandler, statusCodeErrorResponseHandler } from '../utils/api-handlers';\nimport type { OmnigenResponse } from '../ai302-types';\n\nexport class HidreamHandler extends BaseModelHandler {\n protected async processRequest({\n prompt,\n n,\n size,\n aspectRatio,\n seed,\n providerOptions,\n headers,\n abortSignal,\n }: ImageModelV1CallOptions) {\n const warnings: ImageModelV1CallWarning[] = [];\n\n if (n != null && n > 1) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'n',\n details: 'Hidream does not support batch generation',\n });\n }\n\n let parsedSize = this.parseSize(size);\n if (!parsedSize && aspectRatio) {\n parsedSize = this.aspectRatioToSize(aspectRatio, 1024, warnings);\n }\n\n if (parsedSize) {\n parsedSize = this.validateDimensionsMultipleOf32(parsedSize, warnings);\n }\n\n const { value: response, responseHeaders } = await postJsonToApi<OmnigenResponse>({\n url: this.config.url({ modelId: this.modelId, path: `/302/submit/${this.modelId}` }),\n headers: combineHeaders(this.config.headers(), headers),\n body: {\n prompt,\n image_size: parsedSize,\n seed,\n ...(providerOptions.ai302 ?? {}),\n },\n failedResponseHandler: statusCodeErrorResponseHandler,\n successfulResponseHandler: createJsonResponseHandler(),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n const urls = response.images.map(img => img.url).filter(Boolean);\n const images = await this.downloadImages(urls);\n\n return {\n images,\n warnings,\n response: {\n timestamp: new Date(),\n modelId: this.modelId,\n headers: responseHeaders,\n },\n };\n }\n}\n","import type {\n ImageModelV1CallOptions,\n ImageModelV1CallWarning,\n} from '@ai-sdk/provider';\nimport { combineHeaders, postJsonToApi } from '@ai-sdk/provider-utils';\nimport {\n IdeogramAspectRatioSchema,\n IdeogramResolutionSchema,\n type IdeogramResponse,\n} from '../ai302-types';\nimport {\n createJsonResponseHandler,\n statusCodeErrorResponseHandler,\n} from '../utils/api-handlers';\nimport { BaseModelHandler } from './base-model';\n\n// Ref 1: https://developer.ideogram.ai/api-reference/api-reference/generate\nexport class IdeogramHandler extends BaseModelHandler {\n protected async processRequest({\n prompt,\n n,\n size,\n aspectRatio,\n seed,\n providerOptions,\n headers,\n abortSignal,\n }: ImageModelV1CallOptions) {\n const warnings: ImageModelV1CallWarning[] = [];\n\n if (n != null && n > 1) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'n',\n details: 'Ideogram does not support batch generation',\n });\n }\n\n const convertedAspectRatio = this.convertToIdeogramAspectRatio(aspectRatio);\n const convertedResolution = this.convertToIdeogramResolution(size);\n\n if (aspectRatio && !convertedAspectRatio) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'aspectRatio',\n details: `Unsupported aspect ratio: ${aspectRatio}. Supported values are: 1:1, 10:16, 16:10, 9:16, 16:9, 3:2, 2:3, 4:3, 3:4, 1:3, 3:1`,\n });\n }\n\n if (size && !convertedResolution) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'size',\n details: `Unsupported resolution: ${size}. Please use one of the supported resolutions (e.g., '1024x1024', '768x1024', etc.)`,\n });\n }\n\n if (aspectRatio && size) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'size',\n details: 'Cannot use both aspectRatio and size for ideogram model',\n });\n }\n\n const { value: response, responseHeaders } =\n await postJsonToApi<IdeogramResponse>({\n url: this.config.url({\n modelId: this.modelId,\n path: '/ideogram/generate',\n }),\n headers: combineHeaders(this.config.headers(), headers),\n body: {\n image_request: {\n aspect_ratio: convertedAspectRatio,\n model: this.modelId.split('/')[1],\n prompt,\n resolution: convertedResolution,\n seed,\n ...(providerOptions.ai302 ?? {}),\n },\n },\n failedResponseHandler: statusCodeErrorResponseHandler,\n successfulResponseHandler: createJsonResponseHandler(),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n const urls = response.data.map(img => img.url).filter(Boolean);\n const images = await this.downloadImages(urls);\n\n return {\n images,\n warnings,\n response: {\n timestamp: new Date(),\n modelId: this.modelId,\n headers: responseHeaders,\n },\n };\n }\n\n protected convertToIdeogramAspectRatio(aspectRatio: string | undefined) {\n if (!aspectRatio) return undefined;\n const normalized = `ASPECT_${aspectRatio.replace(':', '_')}`;\n if (IdeogramAspectRatioSchema.safeParse(normalized).success) {\n return normalized;\n }\n return undefined;\n }\n\n protected convertToIdeogramResolution(size: string | undefined) {\n if (!size) return undefined;\n const normalized = `RESOLUTION_${size.replace('x', '_')}`;\n if (IdeogramResolutionSchema.safeParse(normalized