UNPKG

@huggingface/inference

Version:

Typescript client for the Hugging Face Inference Providers and Inference Endpoints

22 lines (20 loc) 930 B
import type { ChatCompletionInput, ChatCompletionOutput } from "@huggingface/tasks"; import { resolveProvider } from "../../lib/getInferenceProviderMapping"; import { getProviderHelper } from "../../lib/getProviderHelper"; import type { BaseArgs, Options } from "../../types"; import { innerRequest } from "../../utils/request"; /** * Use the chat completion endpoint to generate a response to a prompt, using OpenAI message completion API no stream */ export async function chatCompletion( args: BaseArgs & ChatCompletionInput, options?: Options ): Promise<ChatCompletionOutput> { const provider = await resolveProvider(args.provider, args.model, args.endpointUrl); const providerHelper = getProviderHelper(provider, "conversational"); const { data: response } = await innerRequest<ChatCompletionOutput>(args, providerHelper, { ...options, task: "conversational", }); return providerHelper.getResponse(response); }