inference-server
Version:
Libraries and server to build AI applications. Adapters to various native bindings allowing local inference. Integrate it with your application, or use as a microservice.
8 lines (7 loc) • 566 B
TypeScript
import type { PreTrainedModel, PreTrainedTokenizer, PretrainedMixin, AutoProcessor, AutoTokenizer, AutoModel } from '@huggingface/transformers';
export type TransformersJsModelClass = typeof PretrainedMixin | typeof PreTrainedModel | typeof AutoModel;
export type TransformersJsTokenizerClass = typeof PreTrainedTokenizer | typeof AutoTokenizer;
export type TransformersJsDataType = 'fp32' | 'fp16' | 'q8' | 'int8' | 'uint8' | 'q4' | 'bnb4' | 'q4f16';
export interface TransformersJsProcessorClass {
from_pretrained: (typeof AutoProcessor)['from_pretrained'];
}