UNPKG

inference-server

Version:

Libraries and server to build AI applications. Adapters to various native bindings allowing local inference. Integrate it with your application, or use as a microservice.

93 lines (92 loc) 3.94 kB
import StableDiffusion from '@lmagder/node-stable-diffusion-cpp'; import { EngineContext, FileDownloadProgress, ModelConfig, TextToImageTaskResult, ModelFileSource, TextToImageTaskArgs, EngineTaskContext, ImageToImageTaskArgs } from '../../types/index.js'; import { StableDiffusionSamplingMethod, StableDiffusionSchedule, StableDiffusionWeightType } from './types.js'; export interface StableDiffusionInstance { context: StableDiffusion.Context; } export interface StableDiffusionModelConfig extends ModelConfig { location: string; sha256?: string; clipL?: ModelFileSource; clipG?: ModelFileSource; vae?: ModelFileSource; t5xxl?: ModelFileSource; controlNet?: ModelFileSource; taesd?: ModelFileSource; diffusionModel?: boolean; model?: ModelFileSource; loras?: ModelFileSource[]; samplingMethod?: StableDiffusionSamplingMethod; weightType?: StableDiffusionWeightType; schedule?: StableDiffusionSchedule; device?: { gpu?: boolean | 'auto' | (string & {}); cpuThreads?: number; }; } interface StableDiffusionModelMeta { gguf: any; } export declare const autoGpu = true; export declare function prepareModel({ config, log }: EngineContext<StableDiffusionModelConfig, StableDiffusionModelMeta>, onProgress?: (progress: FileDownloadProgress) => void, signal?: AbortSignal): Promise<any>; export declare function createInstance({ config, log }: EngineContext<StableDiffusionModelConfig>, signal?: AbortSignal): Promise<{ context: Readonly<{ dispose: () => Promise<void>; txt2img: (params: { prompt: string; negativePrompt?: string; clipSkip?: number; cfgScale?: number; guidance?: number; width?: number; height?: number; sampleMethod?: StableDiffusion.SampleMethod; sampleSteps?: number; seed?: number; batchCount?: number; controlCond?: StableDiffusion.Image; controlStrength?: number; styleRatio?: number; normalizeInput?: boolean; inputIdImagesPath?: string; }) => Promise<StableDiffusion.Image[]>; img2img: (params: { initImage: StableDiffusion.Image; prompt: string; negativePrompt?: string; clipSkip?: number; cfgScale?: number; guidance?: number; width?: number; height?: number; sampleMethod?: StableDiffusion.SampleMethod; sampleSteps?: number; strength?: number; seed?: number; batchCount?: number; controlCond?: StableDiffusion.Image; controlStrength?: number; styleRatio?: number; normalizeInput?: boolean; inputIdImagesPath?: string; }) => Promise<StableDiffusion.Image[]>; img2vid: (params: { initImage: StableDiffusion.Image; width?: number; height?: number; videoFrames?: number; motionBucketId?: number; fps?: number; augmentationLevel?: number; minCfg?: number; cfgScale?: number; sampleMethod?: StableDiffusion.SampleMethod; sampleSteps?: number; strength?: number; seed?: number; }) => Promise<StableDiffusion.Image[]>; }>; }>; export declare function processTextToImageTask(task: TextToImageTaskArgs, ctx: EngineTaskContext<StableDiffusionInstance, StableDiffusionModelConfig, StableDiffusionModelMeta>, signal?: AbortSignal): Promise<TextToImageTaskResult>; export declare function processImageToImageTask(task: ImageToImageTaskArgs, ctx: EngineTaskContext<StableDiffusionInstance, StableDiffusionModelConfig, StableDiffusionModelMeta>, signal?: AbortSignal): Promise<TextToImageTaskResult>; export {};