UNPKG

@burncloud/inference

Version:

Typescript client for the Hugging Face Inference Providers and Inference Endpoints

120 lines (119 loc) 5.17 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.NovitaTextToVideoTask = exports.NovitaConversationalTask = exports.NovitaTextGenerationTask = void 0; /** * See the registered mapping of HF model ID => Novita model ID here: * * https://huggingface.co/api/partners/novita/models * * This is a publicly available mapping. * * If you want to try to run inference for a new model locally before it's registered on huggingface.co, * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes. * * - If you work at Novita and want to update this mapping, please use the model mapping API we provide on huggingface.co * - If you're a community member and want to add a new supported HF model to Novita, please open an issue on the present repo * and we will tag Novita team members. * * Thanks! */ const InferenceOutputError_js_1 = require("../lib/InferenceOutputError.js"); const isUrl_js_1 = require("../lib/isUrl.js"); const delay_js_1 = require("../utils/delay.js"); const omit_js_1 = require("../utils/omit.js"); const providerHelper_js_1 = require("./providerHelper.js"); const NOVITA_API_BASE_URL = "https://api.novita.ai"; class NovitaTextGenerationTask extends providerHelper_js_1.BaseTextGenerationTask { constructor() { super("novita", NOVITA_API_BASE_URL); } makeRoute() { return "/v3/openai/chat/completions"; } } exports.NovitaTextGenerationTask = NovitaTextGenerationTask; class NovitaConversationalTask extends providerHelper_js_1.BaseConversationalTask { constructor() { super("novita", NOVITA_API_BASE_URL); } makeRoute() { return "/v3/openai/chat/completions"; } } exports.NovitaConversationalTask = NovitaConversationalTask; class NovitaTextToVideoTask extends providerHelper_js_1.TaskProviderHelper { constructor() { super("novita", NOVITA_API_BASE_URL); } makeRoute(params) { return `/v3/async/${params.model}`; } preparePayload(params) { const { num_inference_steps, ...restParameters } = params.args.parameters ?? {}; return { ...(0, omit_js_1.omit)(params.args, ["inputs", "parameters"]), ...restParameters, steps: num_inference_steps, prompt: params.args.inputs, }; } async getResponse(response, url, headers) { if (!url || !headers) { throw new InferenceOutputError_js_1.InferenceOutputError("URL and headers are required for text-to-video task"); } const taskId = response.task_id; if (!taskId) { throw new InferenceOutputError_js_1.InferenceOutputError("No task ID found in the response"); } const parsedUrl = new URL(url); const baseUrl = `${parsedUrl.protocol}//${parsedUrl.host}${parsedUrl.host === "router.huggingface.co" ? "/novita" : ""}`; const resultUrl = `${baseUrl}/v3/async/task-result?task_id=${taskId}`; let status = ""; let taskResult; while (status !== "TASK_STATUS_SUCCEED" && status !== "TASK_STATUS_FAILED") { await (0, delay_js_1.delay)(500); const resultResponse = await fetch(resultUrl, { headers }); if (!resultResponse.ok) { throw new InferenceOutputError_js_1.InferenceOutputError("Failed to fetch task result"); } try { taskResult = await resultResponse.json(); if (taskResult && typeof taskResult === "object" && "task" in taskResult && taskResult.task && typeof taskResult.task === "object" && "status" in taskResult.task && typeof taskResult.task.status === "string") { status = taskResult.task.status; } else { throw new InferenceOutputError_js_1.InferenceOutputError("Failed to get task status"); } } catch (error) { throw new InferenceOutputError_js_1.InferenceOutputError("Failed to parse task result"); } } if (status === "TASK_STATUS_FAILED") { throw new InferenceOutputError_js_1.InferenceOutputError("Task failed"); } if (typeof taskResult === "object" && !!taskResult && "videos" in taskResult && typeof taskResult.videos === "object" && !!taskResult.videos && Array.isArray(taskResult.videos) && taskResult.videos.length > 0 && "video_url" in taskResult.videos[0] && typeof taskResult.videos[0].video_url === "string" && (0, isUrl_js_1.isUrl)(taskResult.videos[0].video_url)) { const urlResponse = await fetch(taskResult.videos[0].video_url); return await urlResponse.blob(); } else { throw new InferenceOutputError_js_1.InferenceOutputError("Expected { videos: [{ video_url: string }] }"); } } } exports.NovitaTextToVideoTask = NovitaTextToVideoTask;