llm-checker
Version: 
Intelligent CLI tool with AI-powered model selection that analyzes your hardware and recommends optimal LLM models for your system
2,023 lines (2,022 loc) • 362 kB
JSON
{
  "models": [
    {
      "model_identifier": "gpt-oss",
      "model_name": "gpt-oss",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": [
        "gpt-oss:latest",
        "gpt-oss:20b",
        "gpt-oss:120b"
      ],
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/gpt-oss",
      "namespace": null,
      "model_type": "official",
      "variants": [
        {
          "tag": "gpt-oss:latest",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull gpt-oss:latest",
          "estimated_size_gb": 1,
          "real_size_gb": 14,
          "categories": [
            "chat",
            "general",
            "reasoning"
          ]
        },
        {
          "tag": "gpt-oss:20b",
          "size": "20b",
          "quantization": "Q4_0",
          "command": "ollama pull gpt-oss:20b",
          "estimated_size_gb": 20,
          "real_size_gb": 14,
          "categories": [
            "chat",
            "general",
            "reasoning"
          ]
        },
        {
          "tag": "gpt-oss:120b",
          "size": "120b",
          "quantization": "Q4_0",
          "command": "ollama pull gpt-oss:120b",
          "estimated_size_gb": 120,
          "real_size_gb": 65,
          "categories": [
            "chat",
            "general",
            "reasoning"
          ]
        }
      ],
      "detailed_description": "OpenAI’s open-weight models designed for powerful reasoning, agentic tasks, and versatile developer use cases.",
      "parameters": {},
      "quantizations": [],
      "model_sizes": [
        "20b",
        "120b",
        "14gb",
        "65gb",
        "16gb",
        "80gb"
      ],
      "category": "general",
      "use_cases": [
        "general",
        "assistant"
      ],
      "main_size": "20b",
      "actual_pulls": 0,
      "context_length": "128K",
      "input_types": [
        "text",
        "image",
        "code"
      ],
      "detailed_scraped_at": "2025-08-14T08:51:15.908Z",
      "categories": [
        "chat",
        "general",
        "reasoning"
      ],
      "primary_category": "reasoning"
    },
    {
      "model_identifier": "deepseek-r1",
      "model_name": "deepseek-r1",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": [
        "deepseek-r1:671b",
        "deepseek-r1:8b",
        "deepseek-r1:1.5b",
        "deepseek-r1:7b",
        "deepseek-r1:14b",
        "deepseek-r1:32b",
        "deepseek-r1:70b",
        "deepseek-r1:latest"
      ],
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/deepseek-r1",
      "namespace": null,
      "model_type": "official",
      "variants": [
        {
          "tag": "deepseek-r1:671b",
          "size": "671b",
          "quantization": "Q4_0",
          "command": "ollama pull deepseek-r1:671b",
          "estimated_size_gb": 671,
          "real_size_gb": 404,
          "categories": [
            "chat",
            "reasoning"
          ]
        },
        {
          "tag": "deepseek-r1:8b",
          "size": "8b",
          "quantization": "Q4_0",
          "command": "ollama pull deepseek-r1:8b",
          "estimated_size_gb": 8,
          "real_size_gb": 5.2,
          "categories": [
            "chat",
            "reasoning"
          ]
        },
        {
          "tag": "deepseek-r1:1.5b",
          "size": "1.5b",
          "quantization": "Q4_0",
          "command": "ollama pull deepseek-r1:1.5b",
          "estimated_size_gb": 1.5,
          "real_size_gb": 1.1,
          "categories": [
            "chat",
            "reasoning"
          ]
        },
        {
          "tag": "deepseek-r1:7b",
          "size": "7b",
          "quantization": "Q4_0",
          "command": "ollama pull deepseek-r1:7b",
          "estimated_size_gb": 7,
          "real_size_gb": 4.7,
          "categories": [
            "chat",
            "reasoning"
          ]
        },
        {
          "tag": "deepseek-r1:14b",
          "size": "14b",
          "quantization": "Q4_0",
          "command": "ollama pull deepseek-r1:14b",
          "estimated_size_gb": 14,
          "real_size_gb": 9,
          "categories": [
            "chat",
            "reasoning"
          ]
        },
        {
          "tag": "deepseek-r1:32b",
          "size": "32b",
          "quantization": "Q4_0",
          "command": "ollama pull deepseek-r1:32b",
          "estimated_size_gb": 32,
          "real_size_gb": 20,
          "categories": [
            "chat",
            "reasoning"
          ]
        },
        {
          "tag": "deepseek-r1:70b",
          "size": "70b",
          "quantization": "Q4_0",
          "command": "ollama pull deepseek-r1:70b",
          "estimated_size_gb": 70,
          "real_size_gb": 43,
          "categories": [
            "chat",
            "reasoning"
          ]
        },
        {
          "tag": "deepseek-r1:latest",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull deepseek-r1:latest",
          "estimated_size_gb": 1,
          "real_size_gb": 5.2,
          "categories": [
            "chat",
            "reasoning"
          ]
        }
      ],
      "detailed_description": "DeepSeek-R1 is a family of open reasoning models with performance approaching that of leading models, such as O3 and Gemini 2.5 Pro.",
      "parameters": {},
      "quantizations": [],
      "model_sizes": [
        "1.5b",
        "7b",
        "8b",
        "14b",
        "32b",
        "70b",
        "671b",
        "5.2gb",
        "1.1gb",
        "4.7gb",
        "9.0gb",
        "20gb",
        "43gb",
        "404gb"
      ],
      "category": "reasoning",
      "use_cases": [
        "reasoning",
        "mathematics",
        "logic"
      ],
      "main_size": "1.5b",
      "actual_pulls": 0,
      "context_length": "160K",
      "input_types": [
        "text",
        "image",
        "code"
      ],
      "detailed_scraped_at": "2025-08-14T08:51:15.849Z",
      "categories": [
        "chat",
        "reasoning"
      ],
      "primary_category": "reasoning"
    },
    {
      "model_identifier": "gemma3",
      "model_name": "gemma3",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": [
        "gemma3:1b",
        "gemma3:4b",
        "gemma3:12b",
        "gemma3:27b",
        "gemma3:1b-it-qat",
        "gemma3:4b-it-qat",
        "gemma3:12b-it-qat",
        "gemma3:27b-it-qat",
        "gemma3:latest"
      ],
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/gemma3",
      "namespace": null,
      "model_type": "official",
      "variants": [
        {
          "tag": "gemma3:1b",
          "size": "1b",
          "quantization": "Q4_0",
          "command": "ollama pull gemma3:1b",
          "estimated_size_gb": 1,
          "real_size_gb": 0.7958984375,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "gemma3:4b",
          "size": "4b",
          "quantization": "Q4_0",
          "command": "ollama pull gemma3:4b",
          "estimated_size_gb": 4,
          "real_size_gb": 3.3,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "gemma3:12b",
          "size": "12b",
          "quantization": "Q4_0",
          "command": "ollama pull gemma3:12b",
          "estimated_size_gb": 12,
          "real_size_gb": 8.1,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "gemma3:27b",
          "size": "27b",
          "quantization": "Q4_0",
          "command": "ollama pull gemma3:27b",
          "estimated_size_gb": 27,
          "real_size_gb": 17,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "gemma3:1b-it-qat",
          "size": "1b",
          "quantization": "Q4_0",
          "command": "ollama pull gemma3:1b-it-qat",
          "estimated_size_gb": 1,
          "real_size_gb": 1,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "gemma3:4b-it-qat",
          "size": "4b",
          "quantization": "Q4_0",
          "command": "ollama pull gemma3:4b-it-qat",
          "estimated_size_gb": 4,
          "real_size_gb": 4,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "gemma3:12b-it-qat",
          "size": "12b",
          "quantization": "Q4_0",
          "command": "ollama pull gemma3:12b-it-qat",
          "estimated_size_gb": 12,
          "real_size_gb": 12,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "gemma3:27b-it-qat",
          "size": "27b",
          "quantization": "Q4_0",
          "command": "ollama pull gemma3:27b-it-qat",
          "estimated_size_gb": 27,
          "real_size_gb": 27,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "gemma3:latest",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull gemma3:latest",
          "estimated_size_gb": 1,
          "real_size_gb": 3.3,
          "categories": [
            "chat"
          ]
        }
      ],
      "detailed_description": "The current, most capable model that runs on a single GPU.",
      "parameters": {},
      "quantizations": [],
      "model_sizes": [
        "1b",
        "4b",
        "12b",
        "27b",
        "3.3gb",
        "8.1gb",
        "17gb"
      ],
      "category": "talking",
      "use_cases": [
        "chat",
        "conversation",
        "assistant"
      ],
      "main_size": "1b",
      "actual_pulls": 0,
      "context_length": "128K",
      "input_types": [
        "text",
        "image",
        "code"
      ],
      "detailed_scraped_at": "2025-08-14T08:51:15.839Z",
      "categories": [
        "chat"
      ],
      "primary_category": "chat"
    },
    {
      "model_identifier": "qwen3",
      "model_name": "qwen3",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": [
        "qwen3:30b",
        "qwen3:235b",
        "qwen3:latest",
        "qwen3:0.6b",
        "qwen3:1.7b",
        "qwen3:4b",
        "qwen3:8b",
        "qwen3:14b",
        "qwen3:32b"
      ],
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/qwen3",
      "namespace": null,
      "model_type": "official",
      "variants": [
        {
          "tag": "qwen3:30b",
          "size": "30b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen3:30b",
          "estimated_size_gb": 30,
          "real_size_gb": 19,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen3:235b",
          "size": "235b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen3:235b",
          "estimated_size_gb": 235,
          "real_size_gb": 142,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen3:latest",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull qwen3:latest",
          "estimated_size_gb": 1,
          "real_size_gb": 5.2,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen3:0.6b",
          "size": "0.6b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen3:0.6b",
          "estimated_size_gb": 0.6,
          "real_size_gb": 0.5107421875,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen3:1.7b",
          "size": "1.7b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen3:1.7b",
          "estimated_size_gb": 1.7,
          "real_size_gb": 1.4,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen3:4b",
          "size": "4b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen3:4b",
          "estimated_size_gb": 4,
          "real_size_gb": 2.5,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen3:8b",
          "size": "8b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen3:8b",
          "estimated_size_gb": 8,
          "real_size_gb": 5.2,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen3:14b",
          "size": "14b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen3:14b",
          "estimated_size_gb": 14,
          "real_size_gb": 9.3,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen3:32b",
          "size": "32b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen3:32b",
          "estimated_size_gb": 32,
          "real_size_gb": 20,
          "categories": [
            "chat"
          ]
        }
      ],
      "detailed_description": "Qwen3 is the latest generation of large language models in Qwen series, offering a comprehensive suite of dense and mixture-of-experts (MoE) models.",
      "parameters": {},
      "quantizations": [],
      "model_sizes": [
        "0.6b",
        "1.7b",
        "4b",
        "8b",
        "14b",
        "30b",
        "32b",
        "235b",
        "5.2gb",
        "1.4gb",
        "2.5gb",
        "9.3gb",
        "19gb",
        "20gb",
        "142gb",
        "72b"
      ],
      "category": "talking",
      "use_cases": [
        "chat",
        "conversation",
        "assistant"
      ],
      "main_size": "0.6b",
      "actual_pulls": 0,
      "context_length": "256K",
      "input_types": [
        "text",
        "image",
        "code"
      ],
      "detailed_scraped_at": "2025-08-14T08:51:15.906Z",
      "categories": [
        "chat"
      ],
      "primary_category": "chat"
    },
    {
      "model_identifier": "llama3.1",
      "model_name": "llama3.1",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": [
        "llama3.1:latest",
        "llama3.1:8b",
        "llama3.1:70b",
        "llama3.1:405b"
      ],
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/llama3.1",
      "namespace": null,
      "model_type": "official",
      "variants": [
        {
          "tag": "llama3.1:latest",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull llama3.1:latest",
          "estimated_size_gb": 1,
          "real_size_gb": 4.9,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "llama3.1:8b",
          "size": "8b",
          "quantization": "Q4_0",
          "command": "ollama pull llama3.1:8b",
          "estimated_size_gb": 8,
          "real_size_gb": 4.9,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "llama3.1:70b",
          "size": "70b",
          "quantization": "Q4_0",
          "command": "ollama pull llama3.1:70b",
          "estimated_size_gb": 70,
          "real_size_gb": 43,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "llama3.1:405b",
          "size": "405b",
          "quantization": "Q4_0",
          "command": "ollama pull llama3.1:405b",
          "estimated_size_gb": 405,
          "real_size_gb": 243,
          "categories": [
            "chat"
          ]
        }
      ],
      "detailed_description": "Llama 3.1 is a new state-of-the-art model from Meta available in 8B, 70B and 405B parameter sizes.",
      "parameters": {},
      "quantizations": [],
      "model_sizes": [
        "8b",
        "70b",
        "405b",
        "4.9gb",
        "43gb",
        "243gb",
        "952b"
      ],
      "category": "talking",
      "use_cases": [
        "chat",
        "conversation",
        "assistant"
      ],
      "main_size": "8b",
      "actual_pulls": 0,
      "context_length": "128K",
      "input_types": [
        "text",
        "image",
        "code"
      ],
      "detailed_scraped_at": "2025-08-14T08:51:15.978Z",
      "categories": [
        "chat"
      ],
      "primary_category": "chat"
    },
    {
      "model_identifier": "nomic-embed-text",
      "model_name": "nomic-embed-text",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": [
        "nomic-embed-text:latest",
        "nomic-embed-text:v1.5"
      ],
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/nomic-embed-text",
      "namespace": null,
      "model_type": "official",
      "variants": [
        {
          "tag": "nomic-embed-text:latest",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull nomic-embed-text:latest",
          "estimated_size_gb": 1,
          "real_size_gb": 0.267578125,
          "categories": [
            "chat",
            "embeddings"
          ]
        },
        {
          "tag": "nomic-embed-text:v1.5",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull nomic-embed-text:v1.5",
          "estimated_size_gb": 1,
          "real_size_gb": 0.267578125,
          "categories": [
            "chat",
            "embeddings"
          ]
        }
      ],
      "detailed_description": "A high-performing open embedding model with a large token context window.",
      "parameters": {},
      "quantizations": [],
      "model_sizes": [],
      "category": "embeddings",
      "use_cases": [
        "embeddings",
        "search",
        "similarity"
      ],
      "main_size": "Unknown",
      "actual_pulls": 0,
      "context_length": "2K",
      "input_types": [
        "text",
        "image",
        "code"
      ],
      "detailed_scraped_at": "2025-08-14T08:51:17.267Z",
      "categories": [
        "chat",
        "embeddings"
      ],
      "primary_category": "embeddings"
    },
    {
      "model_identifier": "llama3.2",
      "model_name": "llama3.2",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": [
        "llama3.2:1b",
        "llama3.2:latest",
        "llama3.2:3b"
      ],
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/llama3.2",
      "namespace": null,
      "model_type": "official",
      "variants": [
        {
          "tag": "llama3.2:1b",
          "size": "1b",
          "quantization": "Q4_0",
          "command": "ollama pull llama3.2:1b",
          "estimated_size_gb": 1,
          "real_size_gb": 1.3,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "llama3.2:latest",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull llama3.2:latest",
          "estimated_size_gb": 1,
          "real_size_gb": 2,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "llama3.2:3b",
          "size": "3b",
          "quantization": "Q4_0",
          "command": "ollama pull llama3.2:3b",
          "estimated_size_gb": 3,
          "real_size_gb": 2,
          "categories": [
            "chat"
          ]
        }
      ],
      "detailed_description": "Meta's Llama 3.2 goes small with 1B and 3B models.",
      "parameters": {},
      "quantizations": [],
      "model_sizes": [
        "1b",
        "3b",
        "2.0gb",
        "1.3gb",
        "929b",
        "2.6b"
      ],
      "category": "talking",
      "use_cases": [
        "chat",
        "conversation",
        "assistant"
      ],
      "main_size": "1b",
      "actual_pulls": 0,
      "context_length": "128K",
      "input_types": [
        "text",
        "image",
        "code"
      ],
      "detailed_scraped_at": "2025-08-14T08:51:17.301Z",
      "categories": [
        "chat"
      ],
      "primary_category": "chat"
    },
    {
      "model_identifier": "mistral",
      "model_name": "mistral",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": [
        "mistral:latest",
        "mistral:7b"
      ],
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/mistral",
      "namespace": null,
      "model_type": "official",
      "variants": [
        {
          "tag": "mistral:latest",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull mistral:latest",
          "estimated_size_gb": 1,
          "real_size_gb": 4.4,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "mistral:7b",
          "size": "7b",
          "quantization": "Q4_0",
          "command": "ollama pull mistral:7b",
          "estimated_size_gb": 7,
          "real_size_gb": 4.4,
          "categories": [
            "chat"
          ]
        }
      ],
      "detailed_description": "The 7B model released by Mistral AI, updated to version 0.3.",
      "parameters": {},
      "quantizations": [],
      "model_sizes": [
        "7b",
        "4.4gb",
        "417b",
        "13b",
        "34b"
      ],
      "category": "talking",
      "use_cases": [
        "chat",
        "conversation",
        "assistant"
      ],
      "main_size": "7b",
      "actual_pulls": 0,
      "context_length": "32K",
      "input_types": [
        "text",
        "image",
        "code"
      ],
      "detailed_scraped_at": "2025-08-14T08:51:17.312Z",
      "categories": [
        "chat"
      ],
      "primary_category": "chat"
    },
    {
      "model_identifier": "qwen2.5",
      "model_name": "qwen2.5",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": [
        "qwen2.5:latest",
        "qwen2.5:0.5b",
        "qwen2.5:1.5b",
        "qwen2.5:3b",
        "qwen2.5:7b",
        "qwen2.5:14b",
        "qwen2.5:32b",
        "qwen2.5:72b"
      ],
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/qwen2.5",
      "namespace": null,
      "model_type": "official",
      "variants": [
        {
          "tag": "qwen2.5:latest",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull qwen2.5:latest",
          "estimated_size_gb": 1,
          "real_size_gb": 4.7,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen2.5:0.5b",
          "size": "0.5b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen2.5:0.5b",
          "estimated_size_gb": 0.5,
          "real_size_gb": 0.388671875,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen2.5:1.5b",
          "size": "1.5b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen2.5:1.5b",
          "estimated_size_gb": 1.5,
          "real_size_gb": 0.962890625,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen2.5:3b",
          "size": "3b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen2.5:3b",
          "estimated_size_gb": 3,
          "real_size_gb": 1.9,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen2.5:7b",
          "size": "7b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen2.5:7b",
          "estimated_size_gb": 7,
          "real_size_gb": 4.7,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen2.5:14b",
          "size": "14b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen2.5:14b",
          "estimated_size_gb": 14,
          "real_size_gb": 9,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen2.5:32b",
          "size": "32b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen2.5:32b",
          "estimated_size_gb": 32,
          "real_size_gb": 20,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen2.5:72b",
          "size": "72b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen2.5:72b",
          "estimated_size_gb": 72,
          "real_size_gb": 47,
          "categories": [
            "chat"
          ]
        }
      ],
      "detailed_description": "Qwen2.5 models are pretrained on Alibaba's latest large-scale dataset, encompassing up to 18 trillion tokens. The model supports up to 128K tokens and has multilingual support.",
      "parameters": {},
      "quantizations": [],
      "model_sizes": [
        "0.5b",
        "1.5b",
        "3b",
        "7b",
        "14b",
        "32b",
        "72b",
        "4.7gb",
        "1.9gb",
        "9.0gb",
        "20gb",
        "47gb"
      ],
      "category": "talking",
      "use_cases": [
        "chat",
        "conversation",
        "assistant"
      ],
      "main_size": "0.5b",
      "actual_pulls": 0,
      "context_length": "32K",
      "input_types": [
        "text",
        "image",
        "code"
      ],
      "detailed_scraped_at": "2025-08-14T08:51:17.192Z",
      "categories": [
        "chat"
      ],
      "primary_category": "chat"
    },
    {
      "model_identifier": "llama3",
      "model_name": "llama3",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": [
        "llama3:70b",
        "llama3:text",
        "llama3:70b-text",
        "llama3:latest",
        "llama3:8b"
      ],
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/llama3",
      "namespace": null,
      "model_type": "official",
      "variants": [
        {
          "tag": "llama3:70b",
          "size": "70b",
          "quantization": "Q4_0",
          "command": "ollama pull llama3:70b",
          "estimated_size_gb": 70,
          "real_size_gb": 40,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "llama3:text",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull llama3:text",
          "estimated_size_gb": 1,
          "real_size_gb": 1,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "llama3:70b-text",
          "size": "70b",
          "quantization": "Q4_0",
          "command": "ollama pull llama3:70b-text",
          "estimated_size_gb": 70,
          "real_size_gb": 70,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "llama3:latest",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull llama3:latest",
          "estimated_size_gb": 1,
          "real_size_gb": 4.7,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "llama3:8b",
          "size": "8b",
          "quantization": "Q4_0",
          "command": "ollama pull llama3:8b",
          "estimated_size_gb": 8,
          "real_size_gb": 4.7,
          "categories": [
            "chat"
          ]
        }
      ],
      "detailed_description": "Meta Llama 3: The most capable openly available LLM to date",
      "parameters": {},
      "quantizations": [],
      "model_sizes": [
        "8b",
        "70b",
        "4.7gb",
        "40gb",
        "876b"
      ],
      "category": "talking",
      "use_cases": [
        "chat",
        "conversation",
        "assistant"
      ],
      "main_size": "8b",
      "actual_pulls": 0,
      "context_length": "8K",
      "input_types": [
        "text",
        "image",
        "code"
      ],
      "detailed_scraped_at": "2025-08-14T08:51:17.279Z",
      "categories": [
        "chat"
      ],
      "primary_category": "chat"
    },
    {
      "model_identifier": "llava",
      "model_name": "llava",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": 0,
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/llava",
      "namespace": null,
      "model_type": "official",
      "categories": [
        "chat",
        "multimodal"
      ],
      "primary_category": "multimodal",
      "variants": []
    },
    {
      "model_identifier": "phi3",
      "model_name": "phi3",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": 0,
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/phi3",
      "namespace": null,
      "model_type": "official",
      "categories": [
        "chat"
      ],
      "primary_category": "chat",
      "variants": []
    },
    {
      "model_identifier": "gemma2",
      "model_name": "gemma2",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": 0,
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/gemma2",
      "namespace": null,
      "model_type": "official",
      "categories": [
        "chat"
      ],
      "primary_category": "chat",
      "variants": []
    },
    {
      "model_identifier": "qwen2.5-coder",
      "model_name": "qwen2.5-coder",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": 0,
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/qwen2.5-coder",
      "namespace": null,
      "model_type": "official",
      "categories": [
        "general"
      ],
      "primary_category": "general",
      "variants": []
    },
    {
      "model_identifier": "gemma",
      "model_name": "gemma",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": 0,
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/gemma",
      "namespace": null,
      "model_type": "official",
      "categories": [
        "chat"
      ],
      "primary_category": "chat",
      "variants": []
    },
    {
      "model_identifier": "qwen",
      "model_name": "qwen",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": [
        "qwen:0.5b",
        "qwen:1.8b",
        "qwen:4b",
        "qwen:7b",
        "qwen:14b",
        "qwen:32b",
        "qwen:72b",
        "qwen:110b",
        "qwen:latest"
      ],
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/qwen",
      "namespace": null,
      "model_type": "official",
      "variants": [
        {
          "tag": "qwen:0.5b",
          "size": "0.5b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen:0.5b",
          "estimated_size_gb": 0.5,
          "real_size_gb": 0.3857421875,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen:1.8b",
          "size": "1.8b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen:1.8b",
          "estimated_size_gb": 1.8,
          "real_size_gb": 1.1,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen:4b",
          "size": "4b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen:4b",
          "estimated_size_gb": 4,
          "real_size_gb": 2.3,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen:7b",
          "size": "7b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen:7b",
          "estimated_size_gb": 7,
          "real_size_gb": 4.5,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen:14b",
          "size": "14b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen:14b",
          "estimated_size_gb": 14,
          "real_size_gb": 8.2,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen:32b",
          "size": "32b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen:32b",
          "estimated_size_gb": 32,
          "real_size_gb": 18,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen:72b",
          "size": "72b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen:72b",
          "estimated_size_gb": 72,
          "real_size_gb": 41,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen:110b",
          "size": "110b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen:110b",
          "estimated_size_gb": 110,
          "real_size_gb": 63,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen:latest",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull qwen:latest",
          "estimated_size_gb": 1,
          "real_size_gb": 2.3,
          "categories": [
            "chat"
          ]
        }
      ],
      "detailed_description": "Qwen 1.5 is a series of large language models by Alibaba Cloud spanning from 0.5B to 110B parameters",
      "parameters": {},
      "quantizations": [],
      "model_sizes": [
        "0.5b",
        "110b",
        "1.8b",
        "4b",
        "7b",
        "14b",
        "32b",
        "72b",
        "2.3gb",
        "1.1gb",
        "4.5gb",
        "8.2gb",
        "18gb",
        "41gb",
        "63gb",
        "2gb"
      ],
      "category": "talking",
      "use_cases": [
        "chat",
        "conversation",
        "assistant"
      ],
      "main_size": "0.5b",
      "actual_pulls": 0,
      "context_length": "32K",
      "input_types": [
        "text",
        "image",
        "code"
      ],
      "detailed_scraped_at": "2025-08-14T08:51:57.838Z",
      "categories": [
        "chat"
      ],
      "primary_category": "chat"
    },
    {
      "model_identifier": "mxbai-embed-large",
      "model_name": "mxbai-embed-large",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": [
        "mxbai-embed-large:latest",
        "mxbai-embed-large:335m"
      ],
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/mxbai-embed-large",
      "namespace": null,
      "model_type": "official",
      "variants": [
        {
          "tag": "mxbai-embed-large:latest",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull mxbai-embed-large:latest",
          "estimated_size_gb": 1,
          "real_size_gb": 0.654296875,
          "categories": [
            "chat",
            "embeddings"
          ]
        },
        {
          "tag": "mxbai-embed-large:335m",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull mxbai-embed-large:335m",
          "estimated_size_gb": 1,
          "real_size_gb": 0.654296875,
          "categories": [
            "chat",
            "embeddings"
          ]
        }
      ],
      "detailed_description": "State-of-the-art large embedding model from mixedbread.ai",
      "parameters": {},
      "quantizations": [],
      "model_sizes": [],
      "category": "embeddings",
      "use_cases": [
        "embeddings",
        "search",
        "similarity"
      ],
      "main_size": "Unknown",
      "actual_pulls": 0,
      "context_length": "512",
      "input_types": [
        "text",
        "image",
        "code"
      ],
      "detailed_scraped_at": "2025-08-14T08:51:57.951Z",
      "categories": [
        "chat",
        "embeddings"
      ],
      "primary_category": "embeddings"
    },
    {
      "model_identifier": "qwen2",
      "model_name": "qwen2",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": [
        "qwen2:latest",
        "qwen2:0.5b",
        "qwen2:1.5b",
        "qwen2:7b",
        "qwen2:72b"
      ],
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/qwen2",
      "namespace": null,
      "model_type": "official",
      "variants": [
        {
          "tag": "qwen2:latest",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull qwen2:latest",
          "estimated_size_gb": 1,
          "real_size_gb": 4.4,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen2:0.5b",
          "size": "0.5b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen2:0.5b",
          "estimated_size_gb": 0.5,
          "real_size_gb": 0.34375,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen2:1.5b",
          "size": "1.5b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen2:1.5b",
          "estimated_size_gb": 1.5,
          "real_size_gb": 0.9130859375,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen2:7b",
          "size": "7b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen2:7b",
          "estimated_size_gb": 7,
          "real_size_gb": 4.4,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "qwen2:72b",
          "size": "72b",
          "quantization": "Q4_0",
          "command": "ollama pull qwen2:72b",
          "estimated_size_gb": 72,
          "real_size_gb": 41,
          "categories": [
            "chat"
          ]
        }
      ],
      "detailed_description": "Qwen2 is a new series of large language models from Alibaba group",
      "parameters": {},
      "quantizations": [],
      "model_sizes": [
        "0.5b",
        "1.5b",
        "7b",
        "72b",
        "4.4gb",
        "41gb",
        "0.49b",
        "1.54b",
        "7.07b",
        "72.71b",
        "0.35b",
        "1.31b",
        "5.98b",
        "70.21b"
      ],
      "category": "talking",
      "use_cases": [
        "chat",
        "conversation",
        "assistant"
      ],
      "main_size": "0.5b",
      "actual_pulls": 0,
      "context_length": "32K",
      "input_types": [
        "text",
        "image",
        "code"
      ],
      "detailed_scraped_at": "2025-08-14T08:51:57.924Z",
      "categories": [
        "chat"
      ],
      "primary_category": "chat"
    },
    {
      "model_identifier": "llama2",
      "model_name": "llama2",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": [
        "llama2:text",
        "llama2:latest",
        "llama2:7b",
        "llama2:13b",
        "llama2:70b"
      ],
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/llama2",
      "namespace": null,
      "model_type": "official",
      "variants": [
        {
          "tag": "llama2:text",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull llama2:text",
          "estimated_size_gb": 1,
          "real_size_gb": 1,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "llama2:latest",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull llama2:latest",
          "estimated_size_gb": 1,
          "real_size_gb": 3.8,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "llama2:7b",
          "size": "7b",
          "quantization": "Q4_0",
          "command": "ollama pull llama2:7b",
          "estimated_size_gb": 7,
          "real_size_gb": 3.8,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "llama2:13b",
          "size": "13b",
          "quantization": "Q4_0",
          "command": "ollama pull llama2:13b",
          "estimated_size_gb": 13,
          "real_size_gb": 7.4,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "llama2:70b",
          "size": "70b",
          "quantization": "Q4_0",
          "command": "ollama pull llama2:70b",
          "estimated_size_gb": 70,
          "real_size_gb": 39,
          "categories": [
            "chat"
          ]
        }
      ],
      "detailed_description": "Llama 2 is a collection of foundation language models ranging from 7B to 70B parameters.",
      "parameters": {},
      "quantizations": [
        "Q4"
      ],
      "model_sizes": [
        "7b",
        "70b",
        "13b",
        "3.8gb",
        "7.4gb",
        "39gb",
        "8gb",
        "16gb",
        "64gb"
      ],
      "category": "talking",
      "use_cases": [
        "chat",
        "conversation",
        "assistant"
      ],
      "main_size": "7b",
      "actual_pulls": 0,
      "context_length": "4K",
      "input_types": [
        "text",
        "image",
        "code"
      ],
      "detailed_scraped_at": "2025-08-14T08:51:58.102Z",
      "categories": [
        "chat"
      ],
      "primary_category": "chat"
    },
    {
      "model_identifier": "phi4",
      "model_name": "phi4",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": [
        "phi4:latest",
        "phi4:14b"
      ],
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/phi4",
      "namespace": null,
      "model_type": "official",
      "variants": [
        {
          "tag": "phi4:latest",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull phi4:latest",
          "estimated_size_gb": 1,
          "real_size_gb": 9.1,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "phi4:14b",
          "size": "14b",
          "quantization": "Q4_0",
          "command": "ollama pull phi4:14b",
          "estimated_size_gb": 14,
          "real_size_gb": 9.1,
          "categories": [
            "chat"
          ]
        }
      ],
      "detailed_description": "Phi-4 is a 14B parameter, state-of-the-art open model from Microsoft.",
      "parameters": {},
      "quantizations": [],
      "model_sizes": [
        "14b",
        "9.1gb"
      ],
      "category": "talking",
      "use_cases": [
        "chat",
        "conversation",
        "assistant"
      ],
      "main_size": "14b",
      "actual_pulls": 0,
      "context_length": "16K",
      "input_types": [
        "text",
        "image",
        "code"
      ],
      "detailed_scraped_at": "2025-08-14T08:51:57.937Z",
      "categories": [
        "chat"
      ],
      "primary_category": "chat"
    },
    {
      "model_identifier": "minicpm-v",
      "model_name": "minicpm-v",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": [
        "minicpm-v:latest",
        "minicpm-v:8b"
      ],
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/minicpm-v",
      "namespace": null,
      "model_type": "official",
      "variants": [
        {
          "tag": "minicpm-v:latest",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull minicpm-v:latest",
          "estimated_size_gb": 1,
          "real_size_gb": 5.5,
          "categories": [
            "chat",
            "general",
            "multimodal"
          ]
        },
        {
          "tag": "minicpm-v:8b",
          "size": "8b",
          "quantization": "Q4_0",
          "command": "ollama pull minicpm-v:8b",
          "estimated_size_gb": 8,
          "real_size_gb": 5.5,
          "categories": [
            "chat",
            "general",
            "multimodal"
          ]
        }
      ],
      "detailed_description": "A series of multimodal LLMs (MLLMs) designed for vision-language understanding.",
      "parameters": {},
      "quantizations": [],
      "model_sizes": [
        "8b",
        "5.5gb",
        "7b"
      ],
      "category": "general",
      "use_cases": [
        "general",
        "assistant"
      ],
      "main_size": "8b",
      "actual_pulls": 0,
      "context_length": "32K",
      "input_types": [
        "text",
        "image",
        "code"
      ],
      "detailed_scraped_at": "2025-08-14T08:51:59.346Z",
      "categories": [
        "chat",
        "general",
        "multimodal"
      ],
      "primary_category": "multimodal"
    },
    {
      "model_identifier": "codellama",
      "model_name": "codellama",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": [
        "codellama:7b",
        "codellama:13b",
        "codellama:34b",
        "codellama:70b",
        "codellama:7b-instruct",
        "codellama:7b-code",
        "codellama:latest"
      ],
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/codellama",
      "namespace": null,
      "model_type": "official",
      "variants": [
        {
          "tag": "codellama:7b",
          "size": "7b",
          "quantization": "Q4_0",
          "command": "ollama pull codellama:7b",
          "estimated_size_gb": 7,
          "real_size_gb": 3.8,
          "categories": [
            "chat",
            "coding"
          ]
        },
        {
          "tag": "codellama:13b",
          "size": "13b",
          "quantization": "Q4_0",
          "command": "ollama pull codellama:13b",
          "estimated_size_gb": 13,
          "real_size_gb": 7.4,
          "categories": [
            "chat",
            "coding"
          ]
        },
        {
          "tag": "codellama:34b",
          "size": "34b",
          "quantization": "Q4_0",
          "command": "ollama pull codellama:34b",
          "estimated_size_gb": 34,
          "real_size_gb": 19,
          "categories": [
            "chat",
            "coding"
          ]
        },
        {
          "tag": "codellama:70b",
          "size": "70b",
          "quantization": "Q4_0",
          "command": "ollama pull codellama:70b",
          "estimated_size_gb": 70,
          "real_size_gb": 39,
          "categories": [
            "chat",
            "coding"
          ]
        },
        {
          "tag": "codellama:7b-instruct",
          "size": "7b",
          "quantization": "Q4_0",
          "command": "ollama pull codellama:7b-instruct",
          "estimated_size_gb": 7,
          "real_size_gb": 7,
          "categories": [
            "chat",
            "coding"
          ]
        },
        {
          "tag": "codellama:7b-code",
          "size": "7b",
          "quantization": "Q4_0",
          "command": "ollama pull codellama:7b-code",
          "estimated_size_gb": 7,
          "real_size_gb": 7,
          "categories": [
            "chat",
            "coding"
          ]
        },
        {
          "tag": "codellama:latest",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull codellama:latest",
          "estimated_size_gb": 1,
          "real_size_gb": 3.8,
          "categories": [
            "chat",
            "coding"
          ]
        }
      ],
      "detailed_description": "A large language model that can use text prompts to generate and discuss code.",
      "parameters": {},
      "quantizations": [],
      "model_sizes": [
        "7b",
        "13b",
        "34b",
        "70b",
        "3.8gb",
        "7.4gb",
        "19gb",
        "39gb",
        "100b"
      ],
      "category": "coding",
      "use_cases": [
        "coding",
        "programming",
        "development"
      ],
      "main_size": "7b",
      "actual_pulls": 0,
      "context_length": "16K",
      "input_types": [
        "text",
        "image",
        "code"
      ],
      "detailed_scraped_at": "2025-08-14T08:51:59.458Z",
      "categories": [
        "chat",
        "coding"
      ],
      "primary_category": "coding"
    },
    {
      "model_identifier": "tinyllama",
      "model_name": "tinyllama",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": [
        "tinyllama:latest",
        "tinyllama:1.1b"
      ],
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/tinyllama",
      "namespace": null,
      "model_type": "official",
      "variants": [
        {
          "tag": "tinyllama:latest",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull tinyllama:latest",
          "estimated_size_gb": 1,
          "real_size_gb": 0.623046875,
          "categories": [
            "chat"
          ]
        },
        {
          "tag": "tinyllama:1.1b",
          "size": "1.1b",
          "quantization": "Q4_0",
          "command": "ollama pull tinyllama:1.1b",
          "estimated_size_gb": 1.1,
          "real_size_gb": 0.623046875,
          "categories": [
            "chat"
          ]
        }
      ],
      "detailed_description": "The TinyLlama project is an open endeavor to train a compact 1.1B Llama model on 3 trillion tokens.",
      "parameters": {},
      "quantizations": [],
      "model_sizes": [
        "1.1b"
      ],
      "category": "talking",
      "use_cases": [
        "chat",
        "conversation",
        "assistant"
      ],
      "main_size": "1.1b",
      "actual_pulls": 0,
      "context_length": "2K",
      "input_types": [
        "text",
        "image",
        "code"
      ],
      "detailed_scraped_at": "2025-08-14T08:51:59.385Z",
      "categories": [
        "chat"
      ],
      "primary_category": "chat"
    },
    {
      "model_identifier": "llama3.3",
      "model_name": "llama3.3",
      "description": "",
      "labels": [],
      "pulls": 0,
      "tags": [
        "llama3.3:latest",
        "llama3.3:70b"
      ],
      "last_updated": "Unknown",
      "url": "https://ollama.com/library/llama3.3",
      "namespace": null,
      "model_type": "official",
      "variants": [
        {
          "tag": "llama3.3:latest",
          "size": "unknown",
          "quantization": "Q4_0",
          "command": "ollama pull llama3.3:latest",
          "estimated_size_gb": 1,