@huggingface/inference
Version:
Typescript client for the Hugging Face Inference Providers and Inference Endpoints
33 lines (32 loc) • 822 B
JavaScript
/**
* If you want to try to run inference for a new model locally before it's registered on huggingface.co
* for a given Inference Provider,
* you can add it to the following dictionary, for dev purposes.
*
* We also inject into this dictionary from tests.
*/
export const HARDCODED_MODEL_INFERENCE_MAPPING = {
/**
* "HF model ID" => "Model ID on Inference Provider's side"
*
* Example:
* "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen2.5-Coder-32B-Instruct",
*/
"black-forest-labs": {},
cerebras: {},
cohere: {},
"fal-ai": {},
"featherless-ai": {},
"fireworks-ai": {},
groq: {},
"hf-inference": {},
hyperbolic: {},
nebius: {},
novita: {},
nscale: {},
openai: {},
ovhcloud: {},
replicate: {},
sambanova: {},
together: {},
};