@huggingface/inference
Version:
Typescript client for the Hugging Face Inference Providers and Inference Endpoints
16 lines (15 loc) • 727 B
JavaScript
import { resolveProvider } from "../../lib/getInferenceProviderMapping.js";
import { getProviderHelper } from "../../lib/getProviderHelper.js";
import { innerRequest } from "../../utils/request.js";
/**
* Use the chat completion endpoint to generate a response to a prompt, using OpenAI message completion API no stream
*/
export async function chatCompletion(args, options) {
const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
const providerHelper = getProviderHelper(provider, "conversational");
const { data: response } = await innerRequest(args, providerHelper, {
...options,
task: "conversational",
});
return providerHelper.getResponse(response);
}