@huggingface/tasks
Version:
List of ML tasks for huggingface.co/tasks
68 lines (65 loc) • 1.85 kB
text/typescript
import type { TaskDataCustom } from "../index.js";
const taskData: TaskDataCustom = {
datasets: [
{
description: "Dataset with detailed annotations for training and benchmarking video instance editing.",
id: "suimu/VIRESET",
},
{
description: "Dataset to evaluate models on long video generation and understanding.",
id: "zhangsh2001/LongV-EVAL",
},
{
description: "Collection of 104 demo videos from the SeedVR/SeedVR2 series showcasing model outputs.",
id: "Iceclear/SeedVR_VideoDemos",
},
],
demo: {
inputs: [
{
filename: "input.gif",
type: "img",
},
],
outputs: [
{
filename: "output.gif",
type: "img",
},
],
},
metrics: [],
models: [
{
description: "Model for editing outfits, character, and scenery in videos.",
id: "decart-ai/Lucy-Edit-Dev",
},
{
description: "Framework that uses 3D mesh proxies for precise, consistent video editing.",
id: "LeoLau/Shape-for-Motion",
},
{
description: "Model for generating physics-aware videos from input videos and control conditions.",
id: "nvidia/Cosmos-Transfer2.5-2B",
},
{
description: "A model to upscale videos at input, designed for seamless use with ComfyUI.",
id: "numz/SeedVR2_comfyUI",
},
],
spaces: [
{
description: "Interactive demo space for Lucy-Edit-Dev video editing.",
id: "decart-ai/lucy-edit-dev",
},
{
description: "Demo space for SeedVR2-3B showcasing video upscaling and restoration.",
id: "ByteDance-Seed/SeedVR2-3B",
},
],
summary:
"Video-to-video models take one or more videos as input and generate new videos as output. They can enhance quality, interpolate frames, modify styles, or create new motion dynamics, enabling creative applications, video production, and research.",
widgetModels: [],
youtubeId: "",
};
export default taskData;