@huggingface/inference
Version:
Typescript client for the Hugging Face Inference Providers and Inference Endpoints
25 lines • 1.57 kB
TypeScript
import type { WidgetType } from "@huggingface/tasks";
import type { InferenceProvider, InferenceProviderOrPolicy, ModelId } from "../types";
export declare const inferenceProviderMappingCache: Map<string, Partial<Record<"black-forest-labs" | "cerebras" | "cohere" | "fal-ai" | "featherless-ai" | "fireworks-ai" | "groq" | "hf-inference" | "hyperbolic" | "nebius" | "novita" | "nscale" | "openai" | "ovhcloud" | "replicate" | "sambanova" | "together", Omit<InferenceProviderModelMapping, "hfModelId">>>>;
export type InferenceProviderMapping = Partial<Record<InferenceProvider, Omit<InferenceProviderModelMapping, "hfModelId">>>;
export interface InferenceProviderModelMapping {
adapter?: string;
adapterWeightsPath?: string;
hfModelId: ModelId;
providerId: string;
status: "live" | "staging";
task: WidgetType;
}
export declare function fetchInferenceProviderMappingForModel(modelId: ModelId, accessToken?: string, options?: {
fetch?: (input: RequestInfo, init?: RequestInit) => Promise<Response>;
}): Promise<InferenceProviderMapping>;
export declare function getInferenceProviderMapping(params: {
accessToken?: string;
modelId: ModelId;
provider: InferenceProvider;
task: WidgetType;
}, options: {
fetch?: (input: RequestInfo, init?: RequestInit) => Promise<Response>;
}): Promise<InferenceProviderModelMapping | null>;
export declare function resolveProvider(provider?: InferenceProviderOrPolicy, modelId?: string, endpointUrl?: string): Promise<InferenceProvider>;
//# sourceMappingURL=getInferenceProviderMapping.d.ts.map