inference-server
Version:
Libraries and server to build AI applications. Adapters to various native bindings allowing local inference. Integrate it with your application, or use as a microservice.
55 lines (52 loc) • 687 B
text/typescript
export type StableDiffusionWeightType =
| 'f32'
| 'f16'
| 'q4_0'
| 'q4_1'
| 'q5_0'
| 'q5_1'
| 'q8_0'
| 'q2_k'
| 'q3_k'
| 'q4_k'
| 'q5_k'
| 'q6_k'
| 'q8_k'
| 'iq2_xxs'
| 'iq2_xs'
| 'iq3_xxs'
| 'iq1_s'
| 'iq4_nl'
| 'iq3_s'
| 'iq2_s'
| 'iq4_xs'
| 'i8'
| 'i16'
| 'i32'
| 'i64'
| 'f64'
| 'iq1_m'
| 'bf16'
| 'q4_0_4_4'
| 'q4_0_4_8'
| 'q4_0_8_8'
| (string & {})
export type StableDiffusionSchedule =
| 'discrete'
| 'karras'
| 'exponential'
| 'ays'
| 'gits'
| (string & {})
export type StableDiffusionSamplingMethod =
| 'euler'
| 'euler_a'
| 'lcm'
| 'heun'
| 'dpm2'
| 'dpm++2s_a'
| 'dpm++2m'
| 'dpm++2mv2'
| 'ipndm'
| 'ipndm_v'
| (string & {})